content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
library(ggplot2) library(Rmisc) library(Sleuth2) library(ggpubr) set.seed(364) kappa1 <- runif(NP, 0, 4) kappa2 <- runif(NP, 0, 4) sigma1 <- runif(NP, 0.3, 3) sigma2 <- runif(NP, 0.3, 3) gname = c("ESS.eps",sep="") postscript(gname,width=5,height=4,horizontal = FALSE, onefile = FALSE, paper = "special") par(mfrow=c(1,1),oma=c(0.2,1.5,0.2,1.5),mar=c(3,2,0.2,2),cex.axis=1,las=1,mgp=c(1,0.5,0),adj=0.5) plot(ESSVec, xlab = 'Time', ylab = 'ESS', type = 'b', col = 4, pch = 18) dev.off() gname = c("ParameterBasis.eps",sep="") postscript(gname,width=10,height=5,horizontal = FALSE, onefile = FALSE, paper = "special") par(mfrow=c(1,2),oma=c(0.2,1.5,0.2,1.5),mar=c(3,2,0.2,2),cex.axis=1,las=1,mgp=c(1,0.5,0),adj=0.5) plot(cbind(kappaStore[[1]][,1], kappaStore[[1]][,2]), col = 1, xlab = expression(kappa[1]), ylab = expression(kappa[2]), pch = 20, xlim = c(-5,5), ylim = c(-0.5, 4)) points(cbind(kappaStore[[40]][,1], kappaStore[[40]][,2]), col = 2, pch = 20) points(cbind(kappaStore[[80]][,1], kappaStore[[80]][,2]), col = 3, pch = 20) points(cbind(kappaStore[[120]][,1], kappaStore[[120]][,2]), col = 4, pch = 20) points(cbind(kappaStore[[160]][,1], kappaStore[[160]][,2]), col = 5, pch = 20) points(cbind(kappaStore[[193]][,1], kappaStore[[193]][,2]), col = 6, pch = 20) plot(cbind(sigmaStore[[1]][,1], sigmaStore[[1]][,2]), col = 1, xlab = expression(sigma[1]), ylab = expression(sigma[2]), pch = 20, ylim = c(0,12), xlim = c(0, 18)) points(cbind(sigmaStore[[40]][,1], sigmaStore[[40]][,2]), col = 2, pch = 20) points(cbind(sigmaStore[[80]][,1], sigmaStore[[80]][,2]), col = 3, pch = 20) points(cbind(sigmaStore[[120]][,1], sigmaStore[[120]][,2]), col = 4, pch = 20) points(cbind(sigmaStore[[160]][,1], sigmaStore[[160]][,2]), col = 5, pch = 20) points(cbind(sigmaStore[[193]][,1], sigmaStore[[193]][,2]), col = 6, pch = 20) legend("topright",c("t=1","t=40","t=80","t=120","t=160", "t=193"),cex=1.2, col=1:6,pch = 20,bty="n") dev.off() gname = c("EstimatedODEwithLambda.eps",sep="") postscript(gname,width=10,height=5,horizontal = FALSE, onefile = FALSE, paper = "special") par(mfrow=c(1,2),oma=c(0.2,1.5,0.2,1.5),mar=c(3,2,0.2,2),cex.axis=1,las=1,mgp=c(1,0.5,0),adj=0.5) plot(output[-1,2], type = 'l', ylim = c(-8, 11), xlab = expression(X[1]), ylab = '', lwd = 3) lines(fitted1, lty = 2, col = 2, lwd = 3) lines(upper1, lty = 3, col =2, lwd = 3) lines(lower1, lty = 3, col =2, lwd = 3) plot(output[-1,3], type = 'l', ylim = c(-80, 100), xlab = expression(X[2]), ylab = '', lwd = 3) lines(fitted2,lty = 2, col = 2, lwd = 3) lines(upper2, lty = 3, col =2, lwd = 3) lines(lower2, lty = 3, col =2, lwd = 3) legend("topright",c("TRUE","MEAN","CI"),cex=1.2, col=c(1,2,2),lty = c(1, 2, 3), lwd = 3,pch = 20,bty="n") dev.off()
/Simulation/simulation5_1/lambdaSMC/SMCl1/FigureBasis.R
no_license
ecustwy/smcDE
R
false
false
2,763
r
library(ggplot2) library(Rmisc) library(Sleuth2) library(ggpubr) set.seed(364) kappa1 <- runif(NP, 0, 4) kappa2 <- runif(NP, 0, 4) sigma1 <- runif(NP, 0.3, 3) sigma2 <- runif(NP, 0.3, 3) gname = c("ESS.eps",sep="") postscript(gname,width=5,height=4,horizontal = FALSE, onefile = FALSE, paper = "special") par(mfrow=c(1,1),oma=c(0.2,1.5,0.2,1.5),mar=c(3,2,0.2,2),cex.axis=1,las=1,mgp=c(1,0.5,0),adj=0.5) plot(ESSVec, xlab = 'Time', ylab = 'ESS', type = 'b', col = 4, pch = 18) dev.off() gname = c("ParameterBasis.eps",sep="") postscript(gname,width=10,height=5,horizontal = FALSE, onefile = FALSE, paper = "special") par(mfrow=c(1,2),oma=c(0.2,1.5,0.2,1.5),mar=c(3,2,0.2,2),cex.axis=1,las=1,mgp=c(1,0.5,0),adj=0.5) plot(cbind(kappaStore[[1]][,1], kappaStore[[1]][,2]), col = 1, xlab = expression(kappa[1]), ylab = expression(kappa[2]), pch = 20, xlim = c(-5,5), ylim = c(-0.5, 4)) points(cbind(kappaStore[[40]][,1], kappaStore[[40]][,2]), col = 2, pch = 20) points(cbind(kappaStore[[80]][,1], kappaStore[[80]][,2]), col = 3, pch = 20) points(cbind(kappaStore[[120]][,1], kappaStore[[120]][,2]), col = 4, pch = 20) points(cbind(kappaStore[[160]][,1], kappaStore[[160]][,2]), col = 5, pch = 20) points(cbind(kappaStore[[193]][,1], kappaStore[[193]][,2]), col = 6, pch = 20) plot(cbind(sigmaStore[[1]][,1], sigmaStore[[1]][,2]), col = 1, xlab = expression(sigma[1]), ylab = expression(sigma[2]), pch = 20, ylim = c(0,12), xlim = c(0, 18)) points(cbind(sigmaStore[[40]][,1], sigmaStore[[40]][,2]), col = 2, pch = 20) points(cbind(sigmaStore[[80]][,1], sigmaStore[[80]][,2]), col = 3, pch = 20) points(cbind(sigmaStore[[120]][,1], sigmaStore[[120]][,2]), col = 4, pch = 20) points(cbind(sigmaStore[[160]][,1], sigmaStore[[160]][,2]), col = 5, pch = 20) points(cbind(sigmaStore[[193]][,1], sigmaStore[[193]][,2]), col = 6, pch = 20) legend("topright",c("t=1","t=40","t=80","t=120","t=160", "t=193"),cex=1.2, col=1:6,pch = 20,bty="n") dev.off() gname = c("EstimatedODEwithLambda.eps",sep="") postscript(gname,width=10,height=5,horizontal = FALSE, onefile = FALSE, paper = "special") par(mfrow=c(1,2),oma=c(0.2,1.5,0.2,1.5),mar=c(3,2,0.2,2),cex.axis=1,las=1,mgp=c(1,0.5,0),adj=0.5) plot(output[-1,2], type = 'l', ylim = c(-8, 11), xlab = expression(X[1]), ylab = '', lwd = 3) lines(fitted1, lty = 2, col = 2, lwd = 3) lines(upper1, lty = 3, col =2, lwd = 3) lines(lower1, lty = 3, col =2, lwd = 3) plot(output[-1,3], type = 'l', ylim = c(-80, 100), xlab = expression(X[2]), ylab = '', lwd = 3) lines(fitted2,lty = 2, col = 2, lwd = 3) lines(upper2, lty = 3, col =2, lwd = 3) lines(lower2, lty = 3, col =2, lwd = 3) legend("topright",c("TRUE","MEAN","CI"),cex=1.2, col=c(1,2,2),lty = c(1, 2, 3), lwd = 3,pch = 20,bty="n") dev.off()
#' Prints an Example of a QualtRics Configuration File to the Console. #' #' This function is deprecated; use [qualtRics::qualtrics_api_credentials()] instead. #' #' @param ... All arguments for `qualtRicsConfigFile` #' #' @export #' @examples #' \dontrun{ #' # Execute this line to get instructions on how to make a .qualtrics.yml config file. #' qualtRicsConfigFile() #' } #' qualtRicsConfigFile <- function(...) { .Deprecated("qualtrics_api_credentials") } #' Retrieve a data frame containing question IDs and labels #' #' This function is deprecated; use [qualtRics::survey_questions()] #' instead. #' @param ... All arguments for `survey_questions` #' #' @export getSurveyQuestions <- function(...) { .Deprecated("survey_questions") survey_questions(...) } #' Download a survey and import it into R #' #' This function is deprecated; use [qualtRics::fetch_survey()] #' instead. #' @param ... All arguments for `fetch_survey` #' #' @export getSurvey <- function(...) { .Deprecated("fetch_survey") fetch_survey(...) } #' Read a CSV file exported from Qualtrics #' #' This function is deprecated; use [qualtRics::read_survey()] #' instead. #' Reads comma separated CSV files generated by Qualtrics #' software. The second line containing the variable labels is imported. #' Repetitive introductions to matrix questions are automatically removed. #' Variable labels are stored as attributes. #' @param ... All arguments for [qualtRics::read_survey()] #' #' @export readSurvey <- function(...) { .Deprecated("read_survey") read_survey(...) } #' Retrieve a data frame of all active surveys on Qualtrics #' #' This function is deprecated; use [qualtRics::all_surveys()] #' instead. #' @export getSurveys <- function() { .Deprecated("all_surveys") all_surveys() }
/R/deprecated.R
permissive
ropensci/qualtRics
R
false
false
1,790
r
#' Prints an Example of a QualtRics Configuration File to the Console. #' #' This function is deprecated; use [qualtRics::qualtrics_api_credentials()] instead. #' #' @param ... All arguments for `qualtRicsConfigFile` #' #' @export #' @examples #' \dontrun{ #' # Execute this line to get instructions on how to make a .qualtrics.yml config file. #' qualtRicsConfigFile() #' } #' qualtRicsConfigFile <- function(...) { .Deprecated("qualtrics_api_credentials") } #' Retrieve a data frame containing question IDs and labels #' #' This function is deprecated; use [qualtRics::survey_questions()] #' instead. #' @param ... All arguments for `survey_questions` #' #' @export getSurveyQuestions <- function(...) { .Deprecated("survey_questions") survey_questions(...) } #' Download a survey and import it into R #' #' This function is deprecated; use [qualtRics::fetch_survey()] #' instead. #' @param ... All arguments for `fetch_survey` #' #' @export getSurvey <- function(...) { .Deprecated("fetch_survey") fetch_survey(...) } #' Read a CSV file exported from Qualtrics #' #' This function is deprecated; use [qualtRics::read_survey()] #' instead. #' Reads comma separated CSV files generated by Qualtrics #' software. The second line containing the variable labels is imported. #' Repetitive introductions to matrix questions are automatically removed. #' Variable labels are stored as attributes. #' @param ... All arguments for [qualtRics::read_survey()] #' #' @export readSurvey <- function(...) { .Deprecated("read_survey") read_survey(...) } #' Retrieve a data frame of all active surveys on Qualtrics #' #' This function is deprecated; use [qualtRics::all_surveys()] #' instead. #' @export getSurveys <- function() { .Deprecated("all_surveys") all_surveys() }
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common new_handlers new_service set_config NULL #' AmazonApiGatewayManagementApi #' #' @description #' The Amazon API Gateway Management API allows you to directly manage #' runtime aspects of your deployed APIs. To use it, you must explicitly #' set the SDK's endpoint to point to the endpoint of your deployed API. #' The endpoint will be of the form #' https://\{api-id\}.execute-api.\{region\}.amazonaws.com/\{stage\}, #' or will be the endpoint corresponding to your API's custom domain and #' base path, if applicable. #' #' @param #' config #' Optional configuration of credentials, endpoint, and/or region. #' \itemize{ #' \item{\strong{access_key_id}:} {AWS access key ID} #' \item{\strong{secret_access_key}:} {AWS secret access key} #' \item{\strong{session_token}:} {AWS temporary session token} #' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.} #' \item{\strong{anonymous}:} {Set anonymous credentials.} #' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.} #' \item{\strong{region}:} {The AWS Region used in instantiating the client.} #' \item{\strong{close_connection}:} {Immediately close all HTTP connections.} #' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.} #' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e., `http://s3.amazonaws.com/BUCKET/KEY`.} #' } #' #' @section Service syntax: #' ``` #' svc <- apigatewaymanagementapi( #' config = list( #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string", #' anonymous = "logical" #' ), #' endpoint = "string", #' region = "string", #' close_connection = "logical", #' timeout = "numeric", #' s3_force_path_style = "logical" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' svc <- apigatewaymanagementapi() #' svc$delete_connection( #' Foo = 123 #' ) #' } #' #' @section Operations: #' \tabular{ll}{ #' \link[=apigatewaymanagementapi_delete_connection]{delete_connection} \tab Delete the connection with the provided id\cr #' \link[=apigatewaymanagementapi_get_connection]{get_connection} \tab Get information about the connection with the provided id\cr #' \link[=apigatewaymanagementapi_post_to_connection]{post_to_connection} \tab Sends the provided data to the specified connection #' } #' #' @return #' A client for the service. You can call the service's operations using #' syntax like `svc$operation(...)`, where `svc` is the name you've assigned #' to the client. The available operations are listed in the #' Operations section. #' #' @rdname apigatewaymanagementapi #' @export apigatewaymanagementapi <- function(config = list()) { svc <- .apigatewaymanagementapi$operations svc <- set_config(svc, config) return(svc) } # Private API objects: metadata, handlers, interfaces, etc. .apigatewaymanagementapi <- list() .apigatewaymanagementapi$operations <- list() .apigatewaymanagementapi$metadata <- list( service_name = "apigatewaymanagementapi", endpoints = list("*" = list(endpoint = "execute-api.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "execute-api.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "execute-api.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "execute-api.{region}.sc2s.sgov.gov", global = FALSE)), service_id = "ApiGatewayManagementApi", api_version = "2018-11-29", signing_name = "execute-api", json_version = "1.1", target_prefix = "" ) .apigatewaymanagementapi$service <- function(config = list()) { handlers <- new_handlers("restjson", "v4") new_service(.apigatewaymanagementapi$metadata, handlers, config) }
/R/apigatewaymanagementapi_service.R
no_license
cran/paws.networking
R
false
false
4,034
r
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common new_handlers new_service set_config NULL #' AmazonApiGatewayManagementApi #' #' @description #' The Amazon API Gateway Management API allows you to directly manage #' runtime aspects of your deployed APIs. To use it, you must explicitly #' set the SDK's endpoint to point to the endpoint of your deployed API. #' The endpoint will be of the form #' https://\{api-id\}.execute-api.\{region\}.amazonaws.com/\{stage\}, #' or will be the endpoint corresponding to your API's custom domain and #' base path, if applicable. #' #' @param #' config #' Optional configuration of credentials, endpoint, and/or region. #' \itemize{ #' \item{\strong{access_key_id}:} {AWS access key ID} #' \item{\strong{secret_access_key}:} {AWS secret access key} #' \item{\strong{session_token}:} {AWS temporary session token} #' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.} #' \item{\strong{anonymous}:} {Set anonymous credentials.} #' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.} #' \item{\strong{region}:} {The AWS Region used in instantiating the client.} #' \item{\strong{close_connection}:} {Immediately close all HTTP connections.} #' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.} #' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e., `http://s3.amazonaws.com/BUCKET/KEY`.} #' } #' #' @section Service syntax: #' ``` #' svc <- apigatewaymanagementapi( #' config = list( #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string", #' anonymous = "logical" #' ), #' endpoint = "string", #' region = "string", #' close_connection = "logical", #' timeout = "numeric", #' s3_force_path_style = "logical" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' svc <- apigatewaymanagementapi() #' svc$delete_connection( #' Foo = 123 #' ) #' } #' #' @section Operations: #' \tabular{ll}{ #' \link[=apigatewaymanagementapi_delete_connection]{delete_connection} \tab Delete the connection with the provided id\cr #' \link[=apigatewaymanagementapi_get_connection]{get_connection} \tab Get information about the connection with the provided id\cr #' \link[=apigatewaymanagementapi_post_to_connection]{post_to_connection} \tab Sends the provided data to the specified connection #' } #' #' @return #' A client for the service. You can call the service's operations using #' syntax like `svc$operation(...)`, where `svc` is the name you've assigned #' to the client. The available operations are listed in the #' Operations section. #' #' @rdname apigatewaymanagementapi #' @export apigatewaymanagementapi <- function(config = list()) { svc <- .apigatewaymanagementapi$operations svc <- set_config(svc, config) return(svc) } # Private API objects: metadata, handlers, interfaces, etc. .apigatewaymanagementapi <- list() .apigatewaymanagementapi$operations <- list() .apigatewaymanagementapi$metadata <- list( service_name = "apigatewaymanagementapi", endpoints = list("*" = list(endpoint = "execute-api.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "execute-api.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "execute-api.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "execute-api.{region}.sc2s.sgov.gov", global = FALSE)), service_id = "ApiGatewayManagementApi", api_version = "2018-11-29", signing_name = "execute-api", json_version = "1.1", target_prefix = "" ) .apigatewaymanagementapi$service <- function(config = list()) { handlers <- new_handlers("restjson", "v4") new_service(.apigatewaymanagementapi$metadata, handlers, config) }
## Composition estimation with one sample sscomp <- function(x,wt=rep(1,length(x)),alpha=1.4) { ## Check inputs if ((nlvl <- length(x))<3) stop("gss error in sscomp: length of x should be 3 or more") if (length(x)!=length(wt)) stop("gss error in sscomp: x and wt mismatch in lengths") ## Generate terms cnt <- x x <- as.factor(1:nlvl) mf <- model.frame(~x) term <- mkterm(mf,NULL) rk <- term$x$rk ## get basis functions id.basis <- 1:nlvl if (max(abs(wt-mean(wt)))/mean(wt)<.Machine$double.eps) id.basis <- id.basis[cnt>0] if (length(id.basis)==nlvl) id.basis <- id.basis[-nlvl] ## generate matrices r <- rk$fun(x[id.basis],x,nu=1,env=rk$env,out=TRUE) q <- r[,id.basis] qd.wt <- as.vector(wt) ## Fit the model nt <- b.wt <- 1 t.wt <- matrix(1,nlvl,1) bias0 <- list(nt=nt,wt=b.wt,qd.wt=t.wt) z <- sspdsty(NULL,r,q,cnt,NULL,r,qd.wt,1e-7,30,alpha,bias0) ## return fitted probabilities fit <- exp(t(r)%*%z$c)*qd.wt rownames(fit) <- rownames(x) fit/sum(fit) } ## Composition estimation with a matrix input sscomp2 <- function(x,alpha=1.4) { if (!is.matrix(x)) stop("gss error in sscomp2: x should be a matrix") if (min(x)<0) stop("gss error in sscomp2: x should have non-negative entries") if (any(apply(x,2,sum)==0)) stop("gss error in sscomp2: column totals of x must be positive") nlvl <- dim(x)[1] yy <- apply(x,1,sum) p0 <- sscomp(yy) fit <- NULL for (i in 1:dim(x)[2]) { fit <- cbind(fit,sscomp(x[,i],p0,alpha)) } rownames(fit) <- rownames(x) colnames(fit) <- colnames(x) fit }
/R/sscomp.R
no_license
cran/gss
R
false
false
1,687
r
## Composition estimation with one sample sscomp <- function(x,wt=rep(1,length(x)),alpha=1.4) { ## Check inputs if ((nlvl <- length(x))<3) stop("gss error in sscomp: length of x should be 3 or more") if (length(x)!=length(wt)) stop("gss error in sscomp: x and wt mismatch in lengths") ## Generate terms cnt <- x x <- as.factor(1:nlvl) mf <- model.frame(~x) term <- mkterm(mf,NULL) rk <- term$x$rk ## get basis functions id.basis <- 1:nlvl if (max(abs(wt-mean(wt)))/mean(wt)<.Machine$double.eps) id.basis <- id.basis[cnt>0] if (length(id.basis)==nlvl) id.basis <- id.basis[-nlvl] ## generate matrices r <- rk$fun(x[id.basis],x,nu=1,env=rk$env,out=TRUE) q <- r[,id.basis] qd.wt <- as.vector(wt) ## Fit the model nt <- b.wt <- 1 t.wt <- matrix(1,nlvl,1) bias0 <- list(nt=nt,wt=b.wt,qd.wt=t.wt) z <- sspdsty(NULL,r,q,cnt,NULL,r,qd.wt,1e-7,30,alpha,bias0) ## return fitted probabilities fit <- exp(t(r)%*%z$c)*qd.wt rownames(fit) <- rownames(x) fit/sum(fit) } ## Composition estimation with a matrix input sscomp2 <- function(x,alpha=1.4) { if (!is.matrix(x)) stop("gss error in sscomp2: x should be a matrix") if (min(x)<0) stop("gss error in sscomp2: x should have non-negative entries") if (any(apply(x,2,sum)==0)) stop("gss error in sscomp2: column totals of x must be positive") nlvl <- dim(x)[1] yy <- apply(x,1,sum) p0 <- sscomp(yy) fit <- NULL for (i in 1:dim(x)[2]) { fit <- cbind(fit,sscomp(x[,i],p0,alpha)) } rownames(fit) <- rownames(x) colnames(fit) <- colnames(x) fit }
rm(list=ls()) require(cowplot) gg_color_hue <- function(n) { hues = seq(15, 375, length=n+1) hcl(h=hues, l=65, c=100)[1:n] } cols = rev(gg_color_hue(3)) d <- read.table('~/Google Drive/Data/influenza_HA_evolution/data_table/numbering_table_unix.csv', sep=',', head=T, stringsAsFactors = F) d <- d[!is.na(d$pdb.4fnk), ] r.value.1 <- summary(lm(FEL.dN.Internal ~ RSA.Multimer, data=d))$r.squared r.1 <- "RSA" r.value.2 <- summary(lm(FEL.dN.Internal ~ I(1/distance.to.224.all), data=d))$r.squared r.2 <- "1 / Distance" r.value.3 <- summary(lm(FEL.dN.Internal ~ Bush.99, data=d))$r.squared r.3 <- "Bush '99" r.value.4 <- summary(lm(FEL.dN.Internal ~ Meyer.14, data=d))$r.squared r.4 <- "Epitopes" r.value.5 <- summary(lm(FEL.dN.Internal ~ I(1 / distance.to.224.all) + Bush.99, data=d))$r.squared r.5 <- "1 / Distance + Bush '99" r.value.6 <- summary(lm(FEL.dN.Internal ~ I(1 / distance.to.224.all) + Meyer.14 , data=d))$r.squared r.6 <- "1 / Distance + Epitopes" r.value.7 <- summary(lm(FEL.dN.Internal ~ RSA.Multimer + Bush.99, data=d))$r.squared r.7 <- "RSA + Bush '99" r.value.8 <- summary(lm(FEL.dN.Internal ~ RSA.Multimer + Meyer.14 , data=d))$r.squared r.8 <- "RSA + Epitopes" r.value.9 <- summary(lm(FEL.dN.Internal ~ RSA.Multimer + I(1 / distance.to.224.all), data=d))$r.squared r.9 <- "RSA + 1 / Distance" r.value.10 <- summary(lm(FEL.dN.Internal ~ RSA.Multimer + I(1 / distance.to.224.all) + Bush.99, data=d))$r.squared r.10 <- "RSA + 1 / Distance + Bush '99" r.value.11 <- summary(lm(FEL.dN.Internal ~ RSA.Multimer + I(1 / distance.to.224.all) + Meyer.14, data=d))$r.squared r.11 <- "RSA + 1 / Distance + Epitopes" rs <- c(r.value.1, r.value.4, r.value.2, r.value.3, r.value.8, r.value.7, r.value.6, r.value.5, r.value.9, r.value.11, r.value.10) r.names <- c(r.1, r.4, r.2, r.3, r.8, r.7, r.6, r.5, r.9, r.11, r.10) df <- data.frame(r.square = rs, names = factor(r.names, levels = r.names)) p <- ggplot(aes(x = names, y = r.square), data = df) + geom_bar(stat = 'identity', colour='darkgray', fill='darkgray') + ylab(expression(paste("Variance Explained (R"^"2", ')', sep=''))) + xlab("Predictor Variables") + theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1)) + scale_y_continuous(limits = c(0, 0.4)) #ggsave("~/Google Drive/Data/influenza_HA_evolution/figures/r_squared_internal.pdf", p, width=7.5, height=7.5)
/figures/rsquared_internal_barplot.R
no_license
schlogl2017/influenza_HA_evolution
R
false
false
2,365
r
rm(list=ls()) require(cowplot) gg_color_hue <- function(n) { hues = seq(15, 375, length=n+1) hcl(h=hues, l=65, c=100)[1:n] } cols = rev(gg_color_hue(3)) d <- read.table('~/Google Drive/Data/influenza_HA_evolution/data_table/numbering_table_unix.csv', sep=',', head=T, stringsAsFactors = F) d <- d[!is.na(d$pdb.4fnk), ] r.value.1 <- summary(lm(FEL.dN.Internal ~ RSA.Multimer, data=d))$r.squared r.1 <- "RSA" r.value.2 <- summary(lm(FEL.dN.Internal ~ I(1/distance.to.224.all), data=d))$r.squared r.2 <- "1 / Distance" r.value.3 <- summary(lm(FEL.dN.Internal ~ Bush.99, data=d))$r.squared r.3 <- "Bush '99" r.value.4 <- summary(lm(FEL.dN.Internal ~ Meyer.14, data=d))$r.squared r.4 <- "Epitopes" r.value.5 <- summary(lm(FEL.dN.Internal ~ I(1 / distance.to.224.all) + Bush.99, data=d))$r.squared r.5 <- "1 / Distance + Bush '99" r.value.6 <- summary(lm(FEL.dN.Internal ~ I(1 / distance.to.224.all) + Meyer.14 , data=d))$r.squared r.6 <- "1 / Distance + Epitopes" r.value.7 <- summary(lm(FEL.dN.Internal ~ RSA.Multimer + Bush.99, data=d))$r.squared r.7 <- "RSA + Bush '99" r.value.8 <- summary(lm(FEL.dN.Internal ~ RSA.Multimer + Meyer.14 , data=d))$r.squared r.8 <- "RSA + Epitopes" r.value.9 <- summary(lm(FEL.dN.Internal ~ RSA.Multimer + I(1 / distance.to.224.all), data=d))$r.squared r.9 <- "RSA + 1 / Distance" r.value.10 <- summary(lm(FEL.dN.Internal ~ RSA.Multimer + I(1 / distance.to.224.all) + Bush.99, data=d))$r.squared r.10 <- "RSA + 1 / Distance + Bush '99" r.value.11 <- summary(lm(FEL.dN.Internal ~ RSA.Multimer + I(1 / distance.to.224.all) + Meyer.14, data=d))$r.squared r.11 <- "RSA + 1 / Distance + Epitopes" rs <- c(r.value.1, r.value.4, r.value.2, r.value.3, r.value.8, r.value.7, r.value.6, r.value.5, r.value.9, r.value.11, r.value.10) r.names <- c(r.1, r.4, r.2, r.3, r.8, r.7, r.6, r.5, r.9, r.11, r.10) df <- data.frame(r.square = rs, names = factor(r.names, levels = r.names)) p <- ggplot(aes(x = names, y = r.square), data = df) + geom_bar(stat = 'identity', colour='darkgray', fill='darkgray') + ylab(expression(paste("Variance Explained (R"^"2", ')', sep=''))) + xlab("Predictor Variables") + theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1)) + scale_y_continuous(limits = c(0, 0.4)) #ggsave("~/Google Drive/Data/influenza_HA_evolution/figures/r_squared_internal.pdf", p, width=7.5, height=7.5)
### Coercion and Methods for Symmetric Packed Matrices dsp2dsy <- function(from) .Call(dspMatrix_as_dsyMatrix, from) dsp2C <- function(from) dsy2C(.Call(dspMatrix_as_dsyMatrix, from)) setAs("dspMatrix", "dsyMatrix", dsp2dsy) ## setAs("dspMatrix", "dsCMatrix", dsp2C) setAs("dspMatrix", "CsparseMatrix", dsp2C) setAs("dspMatrix", "sparseMatrix", dsp2C) ## dge <--> dsp via dsy .dense2sp <- function(from) .dsy2dsp(.dense2sy(from)) setAs("dgeMatrix", "dspMatrix", .dense2sp) setAs("matrix", "dspMatrix", function(from) .dense2sp(..2dge(from))) ## S3-matrix <--> dsp via dsy setAs("dspMatrix", "matrix", function(from) .dsy2mat(dsp2dsy(from))) setMethod("rcond", signature(x = "dspMatrix", norm = "character"), function(x, norm, ...) .Call(dspMatrix_rcond, x, norm), valueClass = "numeric") setMethod("rcond", signature(x = "dspMatrix", norm = "missing"), function(x, norm, ...) .Call(dspMatrix_rcond, x, "O"), valueClass = "numeric") setMethod("BunchKaufman", signature(x = "dspMatrix"), function(x) .Call(dspMatrix_trf, x)) ## Should define multiplication from the right setMethod("solve", signature(a = "dspMatrix", b = "missing"), function(a, b, ...) .Call(dspMatrix_solve, a), valueClass = "dspMatrix") setMethod("solve", signature(a = "dspMatrix", b = "matrix"), function(a, b, ...) .Call(dspMatrix_matrix_solve, a, b), valueClass = "dgeMatrix") setMethod("solve", signature(a = "dspMatrix", b = "ddenseMatrix"), function(a, b, ...) .Call(dspMatrix_matrix_solve, a, b), valueClass = "dgeMatrix") ##setMethod("solve", signature(a = "dspMatrix", b = "numeric"), ## function(a, b, ...) ## .Call(dspMatrix_matrix_solve, a, as.matrix(b)), ## valueClass = "dgeMatrix") ## No longer needed ## setMethod("solve", signature(a = "dspMatrix", b = "integer"), ## function(a, b, ...) { ## storage.mode(b) <- "double" ## .Call(dspMatrix_matrix_solve, a, as.matrix(b)) ## }, valueClass = "dgeMatrix") setMethod("norm", signature(x = "dspMatrix", type = "character"), function(x, type, ...) .Call(dspMatrix_norm, x, type), valueClass = "numeric") setMethod("norm", signature(x = "dspMatrix", type = "missing"), function(x, type, ...) .Call(dspMatrix_norm, x, "O"), valueClass = "numeric") ## FIXME: speed up! setMethod("t", signature(x = "dspMatrix"), function(x) as(t(as(x, "dsyMatrix")), "dspMatrix"), valueClass = "dspMatrix") setMethod("diag", signature(x = "dspMatrix"), function(x, nrow, ncol) .Call(dspMatrix_getDiag, x)) setMethod("diag<-", signature(x = "dspMatrix"), function(x, value) .Call(dspMatrix_setDiag, x, value))
/R/dspMatrix.R
no_license
bedatadriven/renjin-matrix
R
false
false
2,722
r
### Coercion and Methods for Symmetric Packed Matrices dsp2dsy <- function(from) .Call(dspMatrix_as_dsyMatrix, from) dsp2C <- function(from) dsy2C(.Call(dspMatrix_as_dsyMatrix, from)) setAs("dspMatrix", "dsyMatrix", dsp2dsy) ## setAs("dspMatrix", "dsCMatrix", dsp2C) setAs("dspMatrix", "CsparseMatrix", dsp2C) setAs("dspMatrix", "sparseMatrix", dsp2C) ## dge <--> dsp via dsy .dense2sp <- function(from) .dsy2dsp(.dense2sy(from)) setAs("dgeMatrix", "dspMatrix", .dense2sp) setAs("matrix", "dspMatrix", function(from) .dense2sp(..2dge(from))) ## S3-matrix <--> dsp via dsy setAs("dspMatrix", "matrix", function(from) .dsy2mat(dsp2dsy(from))) setMethod("rcond", signature(x = "dspMatrix", norm = "character"), function(x, norm, ...) .Call(dspMatrix_rcond, x, norm), valueClass = "numeric") setMethod("rcond", signature(x = "dspMatrix", norm = "missing"), function(x, norm, ...) .Call(dspMatrix_rcond, x, "O"), valueClass = "numeric") setMethod("BunchKaufman", signature(x = "dspMatrix"), function(x) .Call(dspMatrix_trf, x)) ## Should define multiplication from the right setMethod("solve", signature(a = "dspMatrix", b = "missing"), function(a, b, ...) .Call(dspMatrix_solve, a), valueClass = "dspMatrix") setMethod("solve", signature(a = "dspMatrix", b = "matrix"), function(a, b, ...) .Call(dspMatrix_matrix_solve, a, b), valueClass = "dgeMatrix") setMethod("solve", signature(a = "dspMatrix", b = "ddenseMatrix"), function(a, b, ...) .Call(dspMatrix_matrix_solve, a, b), valueClass = "dgeMatrix") ##setMethod("solve", signature(a = "dspMatrix", b = "numeric"), ## function(a, b, ...) ## .Call(dspMatrix_matrix_solve, a, as.matrix(b)), ## valueClass = "dgeMatrix") ## No longer needed ## setMethod("solve", signature(a = "dspMatrix", b = "integer"), ## function(a, b, ...) { ## storage.mode(b) <- "double" ## .Call(dspMatrix_matrix_solve, a, as.matrix(b)) ## }, valueClass = "dgeMatrix") setMethod("norm", signature(x = "dspMatrix", type = "character"), function(x, type, ...) .Call(dspMatrix_norm, x, type), valueClass = "numeric") setMethod("norm", signature(x = "dspMatrix", type = "missing"), function(x, type, ...) .Call(dspMatrix_norm, x, "O"), valueClass = "numeric") ## FIXME: speed up! setMethod("t", signature(x = "dspMatrix"), function(x) as(t(as(x, "dsyMatrix")), "dspMatrix"), valueClass = "dspMatrix") setMethod("diag", signature(x = "dspMatrix"), function(x, nrow, ncol) .Call(dspMatrix_getDiag, x)) setMethod("diag<-", signature(x = "dspMatrix"), function(x, value) .Call(dspMatrix_setDiag, x, value))
plot(as.numeric(as.character(tit_new$pclass)),tit_new$fare/as.numeric(as.character(tit_new$family)),ylim=c(-10,550)) plot(as.numeric(as.character(tit_new$pclass)),tit_new$fare,ylim=c(-10,550)) by(as.numeric(as.character(tit_new$fare))/as.numeric(as.character(tit_new$family)),tit_new$pclass,summary) a<-tit_new[tit_new$pclass==2,"fare"] a[order(tit_new[tit_new$pclass==2,"fare"])] hist(a[order(tit_new[tit_new$pclass==2,"fare"])],breaks=20) a<-tit[tit$fare<=5,] median(tit[tit$pclass==1,"fare"]) median(test[test$pclass==1,"fare"]) median(tit[tit$pclass==2,"fare"]) median(tit[tit$pclass==3,"fare"]) \ \ data<-test data$fare/as.numeric(as.character(data$family))
/Code/Raw/test1.R
no_license
astronerma/Titanic
R
false
false
668
r
plot(as.numeric(as.character(tit_new$pclass)),tit_new$fare/as.numeric(as.character(tit_new$family)),ylim=c(-10,550)) plot(as.numeric(as.character(tit_new$pclass)),tit_new$fare,ylim=c(-10,550)) by(as.numeric(as.character(tit_new$fare))/as.numeric(as.character(tit_new$family)),tit_new$pclass,summary) a<-tit_new[tit_new$pclass==2,"fare"] a[order(tit_new[tit_new$pclass==2,"fare"])] hist(a[order(tit_new[tit_new$pclass==2,"fare"])],breaks=20) a<-tit[tit$fare<=5,] median(tit[tit$pclass==1,"fare"]) median(test[test$pclass==1,"fare"]) median(tit[tit$pclass==2,"fare"]) median(tit[tit$pclass==3,"fare"]) \ \ data<-test data$fare/as.numeric(as.character(data$family))
library('csv') library('dplyr') library('readxl') library('aTSA') library('outliers') library('quantmod') library('ggplot2') library('scales') library('xtable') setwd("~/Desktop/15.458 Financial Data Science/PS3") dt <- read.csv("Q3return.csv",header=TRUE) colnames(dt) = c('Date','Long','Short','Combined') dt$Date = as.Date(dt$Date,"%m/%d/%Y") df.fama = read.table("Q3FamaFrench(05-09).txt", sep = ',', header = TRUE) colnames(df.fama) = c('Date','Risk Free Rate', 'Market Excess Return', 'SMB', 'HML', 'UMD') df.fama$Date = as.Date(df.fama$Date,"%Y-%m-%d") #---------------------------------------------------------------------------------------# #1a) annualized return, volatility and SR, assume log return for long-short strategy f <- function(x){ ann_return = mean(x)*252 vol = sqrt(252)*sd(x) sr = ann_return/vol c(ann_return,vol,sr) } df.a = apply(dt[,-1],2,f) rownames(df.a) = c('Annualized Return', 'Volatility', 'Sharpe Ratio') #2b) CAPM df = df= merge(dt,df.fama, by = "Date") answer2b = summary(lm((df$`Combined`-df$`Risk Free Rate`)~df$`Market Excess Return`)) #2d) plot(sort(dt$Combined), type = 'h',ylab = 'Return', xlab = 'Counts') frac_winners = length(which(dt$Combined>0))/length(dt$Combined) frac_losers = length(which(dt$Combined<0))/length(dt$Combined) median_winners = median( dt$Combined[which(dt$Combined>0)]) median_losers = median( dt$Combined[which(dt$Combined<0)]) df.d = data.frame(c(frac_winners,median_winners),c(frac_losers,median_losers)) colnames(df.d) = c('Winners', 'Losers') rownames(df.d) = c('Fracation', 'Median')
/Q2 & Q3/Project_C_#3.R
no_license
irali1994/project-C
R
false
false
1,600
r
library('csv') library('dplyr') library('readxl') library('aTSA') library('outliers') library('quantmod') library('ggplot2') library('scales') library('xtable') setwd("~/Desktop/15.458 Financial Data Science/PS3") dt <- read.csv("Q3return.csv",header=TRUE) colnames(dt) = c('Date','Long','Short','Combined') dt$Date = as.Date(dt$Date,"%m/%d/%Y") df.fama = read.table("Q3FamaFrench(05-09).txt", sep = ',', header = TRUE) colnames(df.fama) = c('Date','Risk Free Rate', 'Market Excess Return', 'SMB', 'HML', 'UMD') df.fama$Date = as.Date(df.fama$Date,"%Y-%m-%d") #---------------------------------------------------------------------------------------# #1a) annualized return, volatility and SR, assume log return for long-short strategy f <- function(x){ ann_return = mean(x)*252 vol = sqrt(252)*sd(x) sr = ann_return/vol c(ann_return,vol,sr) } df.a = apply(dt[,-1],2,f) rownames(df.a) = c('Annualized Return', 'Volatility', 'Sharpe Ratio') #2b) CAPM df = df= merge(dt,df.fama, by = "Date") answer2b = summary(lm((df$`Combined`-df$`Risk Free Rate`)~df$`Market Excess Return`)) #2d) plot(sort(dt$Combined), type = 'h',ylab = 'Return', xlab = 'Counts') frac_winners = length(which(dt$Combined>0))/length(dt$Combined) frac_losers = length(which(dt$Combined<0))/length(dt$Combined) median_winners = median( dt$Combined[which(dt$Combined>0)]) median_losers = median( dt$Combined[which(dt$Combined<0)]) df.d = data.frame(c(frac_winners,median_winners),c(frac_losers,median_losers)) colnames(df.d) = c('Winners', 'Losers') rownames(df.d) = c('Fracation', 'Median')
## Getting full dataset datafile <- "./data/household_power_consumption.txt" data <- read.table(datafile, header = TRUE, sep = ";",stringsAsFactors = FALSE, na.strings = "?", dec = ".") ## Subsetting the data subsetData <- data[data$Date %in% c("1/2/2007","2/2/2007"),] ## clear full dataset rm(data) datetime <- strptime(paste(subsetData$Date, subsetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S") png("plot2.png", width=480, height=480) plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)") dev.off()
/Plot 2.R
no_license
mnoohu28/ExData_Plotting1
R
false
false
541
r
## Getting full dataset datafile <- "./data/household_power_consumption.txt" data <- read.table(datafile, header = TRUE, sep = ";",stringsAsFactors = FALSE, na.strings = "?", dec = ".") ## Subsetting the data subsetData <- data[data$Date %in% c("1/2/2007","2/2/2007"),] ## clear full dataset rm(data) datetime <- strptime(paste(subsetData$Date, subsetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S") png("plot2.png", width=480, height=480) plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)") dev.off()
list.of.packages <- c("data.table", "dplyr", "tidyverse", "network", "visNetwork", "networkD3", "magrittr") new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])] if(length(new.packages)) install.packages(new.packages) lapply(list.of.packages, require, character.only=T) setwd("~/git/sdl-mock-data") dat = fread("organisation_links.csv") names(dat) = make.names(names(dat)) pubs = fread("iati_publishers_list.csv") names(pubs) = make.names(names(pubs)) pubs = select(pubs, Publisher, IATI.Organisation.Identifier) dat = dat %>% left_join(pubs, by=c("Organisation.1" = "IATI.Organisation.Identifier")) %>% select(Publisher, Relationship, Organisation.2) %>% rename(Organisation.1 = Publisher) %>% left_join(pubs, by=c("Organisation.2" = "IATI.Organisation.Identifier")) %>% select(Organisation.1, Relationship, Publisher) %>% rename(Organisation.2 = Publisher) dat = dat[complete.cases(dat),] dat = subset(dat, Organisation.1 != Organisation.2) fwrite(dat, "organisation_links_clean.csv") dat = subset(dat, Relationship=="provides transaction funding to") sources = dat %>% distinct(Organisation.1) %>% rename(label = Organisation.1) destinations = dat %>% distinct(Organisation.2) %>% rename(label = Organisation.2) nodes = full_join(sources, destinations, by = "label") nodes = nodes %>% rowid_to_column("id") route = dat %>% group_by(Organisation.1, Organisation.2) %>% summarize(weight = n()) %>% ungroup() edges = route %>% left_join(nodes, by = c("Organisation.1" = "label")) %>% rename(from = id) edges = edges %>% left_join(nodes, by = c("Organisation.2" = "label")) %>% rename(to = id) edges <- select(edges, from, to, weight) # routes_network = network(edges, vertex.attr = nodes, matrix.type = "edgelist", ignore.eval=F) # plot(routes_network) nodes_d3 <- mutate(nodes, id = id - 1) edges_d3 <- mutate(edges, from = from - 1, to = to - 1) forceNetwork(Links = edges_d3, Nodes = nodes_d3, Source = "from", Target = "to", NodeID = "label", Group = "id", Value = "weight", arrows=T, opacity = 1, fontSize = 16, zoom = TRUE) %>% saveNetwork(file="force_directed_network.html")
/link_visualization.R
no_license
akmiller01/sdl-mock-data
R
false
false
2,194
r
list.of.packages <- c("data.table", "dplyr", "tidyverse", "network", "visNetwork", "networkD3", "magrittr") new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])] if(length(new.packages)) install.packages(new.packages) lapply(list.of.packages, require, character.only=T) setwd("~/git/sdl-mock-data") dat = fread("organisation_links.csv") names(dat) = make.names(names(dat)) pubs = fread("iati_publishers_list.csv") names(pubs) = make.names(names(pubs)) pubs = select(pubs, Publisher, IATI.Organisation.Identifier) dat = dat %>% left_join(pubs, by=c("Organisation.1" = "IATI.Organisation.Identifier")) %>% select(Publisher, Relationship, Organisation.2) %>% rename(Organisation.1 = Publisher) %>% left_join(pubs, by=c("Organisation.2" = "IATI.Organisation.Identifier")) %>% select(Organisation.1, Relationship, Publisher) %>% rename(Organisation.2 = Publisher) dat = dat[complete.cases(dat),] dat = subset(dat, Organisation.1 != Organisation.2) fwrite(dat, "organisation_links_clean.csv") dat = subset(dat, Relationship=="provides transaction funding to") sources = dat %>% distinct(Organisation.1) %>% rename(label = Organisation.1) destinations = dat %>% distinct(Organisation.2) %>% rename(label = Organisation.2) nodes = full_join(sources, destinations, by = "label") nodes = nodes %>% rowid_to_column("id") route = dat %>% group_by(Organisation.1, Organisation.2) %>% summarize(weight = n()) %>% ungroup() edges = route %>% left_join(nodes, by = c("Organisation.1" = "label")) %>% rename(from = id) edges = edges %>% left_join(nodes, by = c("Organisation.2" = "label")) %>% rename(to = id) edges <- select(edges, from, to, weight) # routes_network = network(edges, vertex.attr = nodes, matrix.type = "edgelist", ignore.eval=F) # plot(routes_network) nodes_d3 <- mutate(nodes, id = id - 1) edges_d3 <- mutate(edges, from = from - 1, to = to - 1) forceNetwork(Links = edges_d3, Nodes = nodes_d3, Source = "from", Target = "to", NodeID = "label", Group = "id", Value = "weight", arrows=T, opacity = 1, fontSize = 16, zoom = TRUE) %>% saveNetwork(file="force_directed_network.html")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sqladmin_functions.R \name{sql.databases.patch} \alias{sql.databases.patch} \title{Updates a resource containing information about a database inside a Cloud SQL instance. This method supports patch semantics.} \usage{ sql.databases.patch(Database, project, instance, database) } \arguments{ \item{Database}{The \link{Database} object to pass to this method} \item{project}{Project ID of the project that contains the instance} \item{instance}{Database instance ID} \item{database}{Name of the database to be updated in the instance} } \description{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}} } \details{ Authentication scopes used by this function are: \itemize{ \item https://www.googleapis.com/auth/cloud-platform \item https://www.googleapis.com/auth/sqlservice.admin } Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/sqlservice.admin)} Then run \code{googleAuthR::gar_auth()} to authenticate. See \code{\link[googleAuthR]{gar_auth}} for details. } \seealso{ \href{https://cloud.google.com/sql/docs/reference/latest}{Google Documentation} Other Database functions: \code{\link{Database}}, \code{\link{sql.databases.insert}}, \code{\link{sql.databases.update}} }
/googlesqladminv1beta4.auto/man/sql.databases.patch.Rd
permissive
GVersteeg/autoGoogleAPI
R
false
true
1,362
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sqladmin_functions.R \name{sql.databases.patch} \alias{sql.databases.patch} \title{Updates a resource containing information about a database inside a Cloud SQL instance. This method supports patch semantics.} \usage{ sql.databases.patch(Database, project, instance, database) } \arguments{ \item{Database}{The \link{Database} object to pass to this method} \item{project}{Project ID of the project that contains the instance} \item{instance}{Database instance ID} \item{database}{Name of the database to be updated in the instance} } \description{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}} } \details{ Authentication scopes used by this function are: \itemize{ \item https://www.googleapis.com/auth/cloud-platform \item https://www.googleapis.com/auth/sqlservice.admin } Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/sqlservice.admin)} Then run \code{googleAuthR::gar_auth()} to authenticate. See \code{\link[googleAuthR]{gar_auth}} for details. } \seealso{ \href{https://cloud.google.com/sql/docs/reference/latest}{Google Documentation} Other Database functions: \code{\link{Database}}, \code{\link{sql.databases.insert}}, \code{\link{sql.databases.update}} }
library(ggplot2) library(dplyr) library(ggsignif) setwd("~/Desktop/islets CNN/luciferase_results/") res=read.table("endoC_luciferase.txt",h=T) summary <- res %>% # the names of the new data frame and the data frame to be summarised group_by(experiment) %>% # the grouping variable summarise(mean = mean(luciferase), # calculates the mean of each group sd = sd(luciferase), # calculates the standard deviation of each group n = n(), # calculates the sample size per group SE = sd(luciferase)/sqrt(n())) # calculates the standard error of each group summary$experiment=factor(summary$experiment, levels=c("GFP","EV","MTNR1B","WT","both_SNPs","rs17712208","rs79687284")) levels(summary$experiment)[5]="both SNPs" # Error bars represent standard error of the mean text_format <- element_text(face = "bold", color="black",size = 16) ggplot(summary, aes(x=experiment, y=mean)) + geom_bar(position=position_dodge(), stat="identity", fill=c("black","grey","white","grey","indianred","darkred","grey"), color=c("black"),width=0.75, size=1.2) + geom_errorbar(aes(ymin = mean - sd, ymax = mean + sd), width=0.2, size=1.2) + ylab("Luciferase") + xlab("") + theme_classic() + theme(axis.text.x=element_text(angle = 45, hjust = 1, face="bold",size=16), axis.text.y=text_format, axis.title=text_format) pdf("endoC_luciferase.for_paper.pdf") ggplot(summary, aes(x=experiment, y=mean)) + geom_bar(position=position_dodge(), stat="identity", fill=c("white","white","grey","grey","indianred","darkred","grey"), color=c("black"),width=0.75, size=1.2) + geom_errorbar(aes(ymin = mean - sd, ymax = mean + sd), width=0.2, size=1.2) + ylab("Luciferase") + xlab("") + theme_classic() + theme(axis.text.x=element_text(angle = 45, hjust = 1, color="black",face="bold",size=16), axis.text.y=text_format, axis.title=text_format) + geom_signif(annotation="***", y_position=1.6, xmin=4, xmax=5, tip_length = c(0.05, 0.4), textsize=6) + geom_signif(annotation="***", y_position=1.75, xmin=4, xmax=6, tip_length = c(0.05, 0.7), textsize=6) + geom_signif(annotation="NS", y_position=1.9, xmin=4, xmax=7, tip_length = c(0.05, 0.3), textsize=6) dev.off() #### subset plot for presentation - skip MTNR1B pdf("endoC_luciferase.skip_controls.pdf", width=8,height=6.5) summary_subset=summary[c(1:3,5:7),] levels(summary_subset$experiment)[6]="rs17712208-A" levels(summary_subset$experiment)[7]="rs79687284-C" ggplot(summary_subset, aes(x=experiment, y=mean)) + geom_bar(position=position_dodge(), stat="identity", fill=c("white","white","grey","indianred","indianred","grey"), color=c("black"),width=0.75, size=1.2) + geom_errorbar(aes(ymin = mean - sd, ymax = mean + sd), width=0.2, size=1.2) + ylab("Relative Luciferase Units") + xlab("") + theme_classic() + theme(axis.text.x=element_text(angle = 45, hjust = 1, color="black",face="bold",size=16), axis.text.y=text_format, axis.title=text_format) + geom_signif(annotation="***", y_position=1.6, xmin=3, xmax=4, tip_length = c(0.05, 0.4), textsize=6) + geom_signif(annotation="***", y_position=1.75, xmin=3, xmax=5, tip_length = c(0.05, 0.8), textsize=6) + geom_signif(annotation="NS", y_position=1.9, xmin=3, xmax=6, tip_length = c(0.05, 0.3), textsize=6) dev.off() ### for paper, not bold text_format <- element_text( color="black",size = 16) pdf("endoC_luciferase.manuscript.pdf", width=6,height=6.5) summary_subset=summary[c(1:3,5:7),] levels(summary_subset$experiment)[6]="rs17712208-A" levels(summary_subset$experiment)[7]="rs79687284-C" ggplot(summary_subset, aes(x=experiment, y=mean)) + geom_bar(position=position_dodge(), stat="identity", fill=c("white","white","grey","indianred","indianred","grey"), color=c("black"),width=0.7, size=1.2) + geom_errorbar(aes(ymin = mean - sd, ymax = mean + sd), width=0.2, size=1.2) + ylab("Relative Luciferase Units") + xlab("") + theme_classic() + theme(axis.text.x=element_text(angle = 45, hjust = 1, color="black",size=16), axis.text.y=text_format, axis.title=text_format) + geom_signif(annotation="***", y_position=1.6, xmin=3, xmax=4, tip_length = c(0.05, 0.4), textsize=6) + geom_signif(annotation="***", y_position=1.75, xmin=3, xmax=5, tip_length = c(0.05, 0.8), textsize=6) + geom_signif(annotation="NS", y_position=1.9, xmin=3, xmax=6, tip_length = c(0.05, 0.3), textsize=6) dev.off()
/endoC_luciferase_plot.R
permissive
agawes/islet_CNN
R
false
false
4,500
r
library(ggplot2) library(dplyr) library(ggsignif) setwd("~/Desktop/islets CNN/luciferase_results/") res=read.table("endoC_luciferase.txt",h=T) summary <- res %>% # the names of the new data frame and the data frame to be summarised group_by(experiment) %>% # the grouping variable summarise(mean = mean(luciferase), # calculates the mean of each group sd = sd(luciferase), # calculates the standard deviation of each group n = n(), # calculates the sample size per group SE = sd(luciferase)/sqrt(n())) # calculates the standard error of each group summary$experiment=factor(summary$experiment, levels=c("GFP","EV","MTNR1B","WT","both_SNPs","rs17712208","rs79687284")) levels(summary$experiment)[5]="both SNPs" # Error bars represent standard error of the mean text_format <- element_text(face = "bold", color="black",size = 16) ggplot(summary, aes(x=experiment, y=mean)) + geom_bar(position=position_dodge(), stat="identity", fill=c("black","grey","white","grey","indianred","darkred","grey"), color=c("black"),width=0.75, size=1.2) + geom_errorbar(aes(ymin = mean - sd, ymax = mean + sd), width=0.2, size=1.2) + ylab("Luciferase") + xlab("") + theme_classic() + theme(axis.text.x=element_text(angle = 45, hjust = 1, face="bold",size=16), axis.text.y=text_format, axis.title=text_format) pdf("endoC_luciferase.for_paper.pdf") ggplot(summary, aes(x=experiment, y=mean)) + geom_bar(position=position_dodge(), stat="identity", fill=c("white","white","grey","grey","indianred","darkred","grey"), color=c("black"),width=0.75, size=1.2) + geom_errorbar(aes(ymin = mean - sd, ymax = mean + sd), width=0.2, size=1.2) + ylab("Luciferase") + xlab("") + theme_classic() + theme(axis.text.x=element_text(angle = 45, hjust = 1, color="black",face="bold",size=16), axis.text.y=text_format, axis.title=text_format) + geom_signif(annotation="***", y_position=1.6, xmin=4, xmax=5, tip_length = c(0.05, 0.4), textsize=6) + geom_signif(annotation="***", y_position=1.75, xmin=4, xmax=6, tip_length = c(0.05, 0.7), textsize=6) + geom_signif(annotation="NS", y_position=1.9, xmin=4, xmax=7, tip_length = c(0.05, 0.3), textsize=6) dev.off() #### subset plot for presentation - skip MTNR1B pdf("endoC_luciferase.skip_controls.pdf", width=8,height=6.5) summary_subset=summary[c(1:3,5:7),] levels(summary_subset$experiment)[6]="rs17712208-A" levels(summary_subset$experiment)[7]="rs79687284-C" ggplot(summary_subset, aes(x=experiment, y=mean)) + geom_bar(position=position_dodge(), stat="identity", fill=c("white","white","grey","indianred","indianred","grey"), color=c("black"),width=0.75, size=1.2) + geom_errorbar(aes(ymin = mean - sd, ymax = mean + sd), width=0.2, size=1.2) + ylab("Relative Luciferase Units") + xlab("") + theme_classic() + theme(axis.text.x=element_text(angle = 45, hjust = 1, color="black",face="bold",size=16), axis.text.y=text_format, axis.title=text_format) + geom_signif(annotation="***", y_position=1.6, xmin=3, xmax=4, tip_length = c(0.05, 0.4), textsize=6) + geom_signif(annotation="***", y_position=1.75, xmin=3, xmax=5, tip_length = c(0.05, 0.8), textsize=6) + geom_signif(annotation="NS", y_position=1.9, xmin=3, xmax=6, tip_length = c(0.05, 0.3), textsize=6) dev.off() ### for paper, not bold text_format <- element_text( color="black",size = 16) pdf("endoC_luciferase.manuscript.pdf", width=6,height=6.5) summary_subset=summary[c(1:3,5:7),] levels(summary_subset$experiment)[6]="rs17712208-A" levels(summary_subset$experiment)[7]="rs79687284-C" ggplot(summary_subset, aes(x=experiment, y=mean)) + geom_bar(position=position_dodge(), stat="identity", fill=c("white","white","grey","indianred","indianred","grey"), color=c("black"),width=0.7, size=1.2) + geom_errorbar(aes(ymin = mean - sd, ymax = mean + sd), width=0.2, size=1.2) + ylab("Relative Luciferase Units") + xlab("") + theme_classic() + theme(axis.text.x=element_text(angle = 45, hjust = 1, color="black",size=16), axis.text.y=text_format, axis.title=text_format) + geom_signif(annotation="***", y_position=1.6, xmin=3, xmax=4, tip_length = c(0.05, 0.4), textsize=6) + geom_signif(annotation="***", y_position=1.75, xmin=3, xmax=5, tip_length = c(0.05, 0.8), textsize=6) + geom_signif(annotation="NS", y_position=1.9, xmin=3, xmax=6, tip_length = c(0.05, 0.3), textsize=6) dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/graph_to_shp.R \name{graph_to_shp} \alias{graph_to_shp} \title{Export a spatial graph to shapefile layers} \usage{ graph_to_shp( graph, crds, mode = "both", crds_crs, layer_name, dir_path, metrics = FALSE ) } \arguments{ \item{graph}{A graph object of class \code{igraph}} \item{crds}{(if 'mode = 'spatial'') A \code{data.frame} with the spatial coordinates of the graph nodes. It must have three columns: \itemize{ \item{ID: Name of the graph nodes (will be converted into character string). The names must the same as the node names of the graph object of class \code{igraph} (\code{igraph::V(graph)$name})} \item{x: Longitude (numeric or integer) of the graph nodes in the coordinates reference system indicated with the argument crds_crs.} \item{y: Latitude (numeric or integer) of the graph nodes in the coordinates reference system indicated with the argument crds_crs.} }} \item{mode}{Indicates which shapefile layers will be created \itemize{ \item{If 'mode = 'both'' (default), then two shapefile layers are created, one for the nodes and another for the links.} \item{If 'mode = 'node'', a shapefile layer is created for the nodes only.} \item{If 'mode = 'link'', a shapefile layer is created for the links only.} }} \item{crds_crs}{A character string indicating the Coordinates Reference System of the spatial coordinates of the nodes and of the shapefile layers created. The projection and datum are given in the PROJ.4 format.} \item{layer_name}{A character string indicating the suffix of the name of the layers to be created.} \item{dir_path}{A character string corresponding to the path to the directory in which the shapefile layers will be exported. If \code{dir_path = "wd"}, then the layers are created in the current working directory.} \item{metrics}{(not possible if 'mode = 'link'') Logical. Should metrics be calculated and integrated in the attribute table of the node shapefile layer? (default: FALSE) Metrics calculated are degrees, betweenness centrality and sum of inverse weight (if links are weighted)} } \value{ Create shapefile layers in the directory specified with the parameter 'dir_path'. } \description{ The function enables to export a spatial graph to shapefile layers. } \examples{ data(data_tuto) mat_w <- data_tuto[[1]] gp <- gen_graph_topo(mat_w = mat_w, topo = "gabriel") crds_crs1 <- "+proj=lcc +lat_1=49 +lat_2=44 +lat_0=46.5 +lon_0=3 " crds_crs2 <- "+x_0=700000 +y_0=6600000 +ellps=GRS80 +units=m +no_defs" crds_crs <- paste(crds_crs1, crds_crs2, sep = "") crds <- pts_pop_simul layer_name <- "graph_dps_gab" graph_to_shp(graph = gp, crds = pts_pop_simul, mode = "both", crds_crs = crds_crs, layer_name = "test_fonct", dir_path = tempdir(), metrics = TRUE) } \author{ P. Savary }
/man/graph_to_shp.Rd
no_license
jamilleveiga/graph4lg
R
false
true
2,879
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/graph_to_shp.R \name{graph_to_shp} \alias{graph_to_shp} \title{Export a spatial graph to shapefile layers} \usage{ graph_to_shp( graph, crds, mode = "both", crds_crs, layer_name, dir_path, metrics = FALSE ) } \arguments{ \item{graph}{A graph object of class \code{igraph}} \item{crds}{(if 'mode = 'spatial'') A \code{data.frame} with the spatial coordinates of the graph nodes. It must have three columns: \itemize{ \item{ID: Name of the graph nodes (will be converted into character string). The names must the same as the node names of the graph object of class \code{igraph} (\code{igraph::V(graph)$name})} \item{x: Longitude (numeric or integer) of the graph nodes in the coordinates reference system indicated with the argument crds_crs.} \item{y: Latitude (numeric or integer) of the graph nodes in the coordinates reference system indicated with the argument crds_crs.} }} \item{mode}{Indicates which shapefile layers will be created \itemize{ \item{If 'mode = 'both'' (default), then two shapefile layers are created, one for the nodes and another for the links.} \item{If 'mode = 'node'', a shapefile layer is created for the nodes only.} \item{If 'mode = 'link'', a shapefile layer is created for the links only.} }} \item{crds_crs}{A character string indicating the Coordinates Reference System of the spatial coordinates of the nodes and of the shapefile layers created. The projection and datum are given in the PROJ.4 format.} \item{layer_name}{A character string indicating the suffix of the name of the layers to be created.} \item{dir_path}{A character string corresponding to the path to the directory in which the shapefile layers will be exported. If \code{dir_path = "wd"}, then the layers are created in the current working directory.} \item{metrics}{(not possible if 'mode = 'link'') Logical. Should metrics be calculated and integrated in the attribute table of the node shapefile layer? (default: FALSE) Metrics calculated are degrees, betweenness centrality and sum of inverse weight (if links are weighted)} } \value{ Create shapefile layers in the directory specified with the parameter 'dir_path'. } \description{ The function enables to export a spatial graph to shapefile layers. } \examples{ data(data_tuto) mat_w <- data_tuto[[1]] gp <- gen_graph_topo(mat_w = mat_w, topo = "gabriel") crds_crs1 <- "+proj=lcc +lat_1=49 +lat_2=44 +lat_0=46.5 +lon_0=3 " crds_crs2 <- "+x_0=700000 +y_0=6600000 +ellps=GRS80 +units=m +no_defs" crds_crs <- paste(crds_crs1, crds_crs2, sep = "") crds <- pts_pop_simul layer_name <- "graph_dps_gab" graph_to_shp(graph = gp, crds = pts_pop_simul, mode = "both", crds_crs = crds_crs, layer_name = "test_fonct", dir_path = tempdir(), metrics = TRUE) } \author{ P. Savary }
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 3342 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 3342 c c Input Parameter (command line, file): c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query24_query25_1344n.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 473 c no.of clauses 3342 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 3342 c c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query24_query25_1344n.qdimacs 473 3342 E1 [] 0 72 401 3342 NONE
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query24_query25_1344n/query24_query25_1344n.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
710
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 3342 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 3342 c c Input Parameter (command line, file): c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query24_query25_1344n.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 473 c no.of clauses 3342 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 3342 c c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query24_query25_1344n.qdimacs 473 3342 E1 [] 0 72 401 3342 NONE
cat("\014") rm(list = ls()) # exploring dplyr with michael levy's workshop # http://www.michaellevy.name/blog/dplyr-data-manipulation-in-r-made-easy ############################################################### install.packages("dplyr") install.packages("babynames") require(dplyr) set.seed(1234) rows <- 8 d <- data.frame(shape = sample(c("circle","square"), rows, replace = TRUE), color = sample(c("red","blue"), rows, replace = TRUE), area = runif(rows, min = 1, max = 10)) # filter : choose rows filter(d, shape == "circle") # choose rows circle & area <2 or not circle (square) & area >3 filter(d, ifelse(shape == "circle", area <2, area>3)) # select: choose columns select(d, shape, area) select(d, shape:area) select(d, -color) select(d, contains("co")) # the syntax is astoundingly simple # arrange: ordering rows! arrange(d, shape, color) arrange(d, -area) # mutate: make new columns (1:1) mutate(d, new.color = sample(rainbow(8)), perimeter = ifelse(shape == "square", 4*area^0.5, 2&(pi*area)^0.5), side.length = ifelse(shape == "square", perimeter/4, NA)) # summarize() + group_by() -- make new columns (N:1) d.by.col <- group_by(d,color) d.by.col summarize(d.by.col, tot.area = sum(area)) # indent is important d %>% group_by(shape, color) %>% summarize(max.area = max(area)) library(babynames) d<- tbl_df(babynames) d %>% filter(year >= 1980 & year < 1990) %>% group_by(sex,name) %>% summarize(count = sum(n)) %>% mutate(position = rank(-count)) %>% filter(position <= 10) %>% arrange(sex,position) # official vignette: http://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html install.packages("nycflights13") library(nycflights13) head(flights) filter(flights, month ==1) slice(flights, 1:10) mutate(flights, gain = arr_delay - dep_delay)
/datavis/dplyr_practice.R
no_license
haesunshine/R
R
false
false
1,864
r
cat("\014") rm(list = ls()) # exploring dplyr with michael levy's workshop # http://www.michaellevy.name/blog/dplyr-data-manipulation-in-r-made-easy ############################################################### install.packages("dplyr") install.packages("babynames") require(dplyr) set.seed(1234) rows <- 8 d <- data.frame(shape = sample(c("circle","square"), rows, replace = TRUE), color = sample(c("red","blue"), rows, replace = TRUE), area = runif(rows, min = 1, max = 10)) # filter : choose rows filter(d, shape == "circle") # choose rows circle & area <2 or not circle (square) & area >3 filter(d, ifelse(shape == "circle", area <2, area>3)) # select: choose columns select(d, shape, area) select(d, shape:area) select(d, -color) select(d, contains("co")) # the syntax is astoundingly simple # arrange: ordering rows! arrange(d, shape, color) arrange(d, -area) # mutate: make new columns (1:1) mutate(d, new.color = sample(rainbow(8)), perimeter = ifelse(shape == "square", 4*area^0.5, 2&(pi*area)^0.5), side.length = ifelse(shape == "square", perimeter/4, NA)) # summarize() + group_by() -- make new columns (N:1) d.by.col <- group_by(d,color) d.by.col summarize(d.by.col, tot.area = sum(area)) # indent is important d %>% group_by(shape, color) %>% summarize(max.area = max(area)) library(babynames) d<- tbl_df(babynames) d %>% filter(year >= 1980 & year < 1990) %>% group_by(sex,name) %>% summarize(count = sum(n)) %>% mutate(position = rank(-count)) %>% filter(position <= 10) %>% arrange(sex,position) # official vignette: http://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html install.packages("nycflights13") library(nycflights13) head(flights) filter(flights, month ==1) slice(flights, 1:10) mutate(flights, gain = arr_delay - dep_delay)
crossValErrors <- function ( trueLabels , cvFit ) { nFeatureSets <- cvFit$nSplits delete <- cvFit$delete nUniqueLabels <- length( table( trueLabels ) ) predictionErrors <- matrix( NA , ncol = length( nFeatureSets ) , nrow = cvFit$nFold ) errorConfusion <- vector( length = length( nFeatureSets ) , "numeric" ) if ( inherits( trueLabels , "numeric" ) ) { individualErrors <- vector( length = length( nFeatureSets ) , "numeric" ) } else { individualErrors <- matrix( NA , ncol = length( nFeatureSets ) , nrow = nUniqueLabels , dimnames = list( dimnames( table( trueLabels ) )[[1]] , NULL ) ) } for ( i in 1:cvFit$nFold ) { ii <- cvFit$groups[[i]] tmp <- collateErrorsNonEntropy( trueLabels , nFeatureSets , nUniqueLabels , cvFit , errorConfusion , individualErrors , ii ) predictionErrors[i,] <- tmp$errorConfusion } tmp <- collateErrorsNonEntropy( trueLabels , nFeatureSets , nUniqueLabels , cvFit , errorConfusion , individualErrors ) return( list( errorSE = sqrt( apply( predictionErrors , 2 , var ) / cvFit$nFold ) , errorCV = colMeans( predictionErrors ) , individualErrors = tmp$individualErrors , errorConfusion = tmp$errorConfusion ) ) }
/axioPackages/svmrfe/R/crossValErrors.R
no_license
jjsayleraxio/Axio_rstudio
R
false
false
1,263
r
crossValErrors <- function ( trueLabels , cvFit ) { nFeatureSets <- cvFit$nSplits delete <- cvFit$delete nUniqueLabels <- length( table( trueLabels ) ) predictionErrors <- matrix( NA , ncol = length( nFeatureSets ) , nrow = cvFit$nFold ) errorConfusion <- vector( length = length( nFeatureSets ) , "numeric" ) if ( inherits( trueLabels , "numeric" ) ) { individualErrors <- vector( length = length( nFeatureSets ) , "numeric" ) } else { individualErrors <- matrix( NA , ncol = length( nFeatureSets ) , nrow = nUniqueLabels , dimnames = list( dimnames( table( trueLabels ) )[[1]] , NULL ) ) } for ( i in 1:cvFit$nFold ) { ii <- cvFit$groups[[i]] tmp <- collateErrorsNonEntropy( trueLabels , nFeatureSets , nUniqueLabels , cvFit , errorConfusion , individualErrors , ii ) predictionErrors[i,] <- tmp$errorConfusion } tmp <- collateErrorsNonEntropy( trueLabels , nFeatureSets , nUniqueLabels , cvFit , errorConfusion , individualErrors ) return( list( errorSE = sqrt( apply( predictionErrors , 2 , var ) / cvFit$nFold ) , errorCV = colMeans( predictionErrors ) , individualErrors = tmp$individualErrors , errorConfusion = tmp$errorConfusion ) ) }
## The german Credit data set contains 25 columns. ## columns 1 - 20 are actual data features ## columns 21 - 24 are indicator columns ## column 25 is the classificator column ## Read in the data set. # apparently only works if we put sep="" df <- read.csv('homework_1.txt', header=FALSE, sep="") ## quick summary of all data head(df) summary(df) ## quick look to see the classification of all 100 ## give some rownames #x <- (1:1000) #qqplot (x, df$V25) ## the above graph showed tha somewhere between 600 - 800 ## of the people in the study had good credit = 1, ## whereas the rest have bad = 2. ## Find out the real number with the table function ## which gives us that it's ## 700 -> 1 ## 300 -> 2 #class <- table(df$V25) #class # let's use a regular linear model first, and plot it against our data set fit <- lm(V25 ~ ., data=df) summary(fit) # remove all the indicator columns ? ## TODO: does this make sense ? fit2 <- update(fit, .~. -V24 -V23 -V22 -V21) # remove the one with lowest t score fit3 <- update(fit2, .~. -V8) summary(fit3) fit4 <- update(fit3, .~. -V13) summary(fit4) fit5 <- update(fit4, .~. -V10) summary(fit5) plot(resid(fit5)) # want to see absence of structure in resid scatterplot ("gaussian white noise") ## not good yet, let's remove more. fit6 <- update(fit5, .~. -V4) summary(fit6) fit7 <- update(fit6, .~. -V14) summary(fit7) ## it's looking kind of good plot(resid(fit7)) qqnorm(resid(fit7)) # let's use a regular linear model first, and plot it against our data set fit <- lm(pairs ~ ped, df) plot(pairs ~ ped, df) abline(fit, col="red") # What's going on here? Do we think this is a good fit of the model? # probably not. # Let's try polynomial regression at various levels and compare each performance # fit with two values fit2 <- lm(pairs ~ poly(ped, 2), df) points(df$ped, predict(fit2), col="blue", type="l") # fit with three values fit3 <- lm(pairs ~ poly(ped, 3), df) points(df$ped, predict(fit3), col="orange", type="l") # fit with four values fit4 <- lm(pairs ~ poly(ped, 4), df) points(df$ped, predict(fit4), col="green", type="l") # which data set performed the best? Why do you think so? # check the analysis of variance between each fit. which fit has the best p value? anova(fit, fit2, fit3, fit4)
/homeworks/homework1/homework1.R
no_license
GusSand/GADataScience
R
false
false
2,283
r
## The german Credit data set contains 25 columns. ## columns 1 - 20 are actual data features ## columns 21 - 24 are indicator columns ## column 25 is the classificator column ## Read in the data set. # apparently only works if we put sep="" df <- read.csv('homework_1.txt', header=FALSE, sep="") ## quick summary of all data head(df) summary(df) ## quick look to see the classification of all 100 ## give some rownames #x <- (1:1000) #qqplot (x, df$V25) ## the above graph showed tha somewhere between 600 - 800 ## of the people in the study had good credit = 1, ## whereas the rest have bad = 2. ## Find out the real number with the table function ## which gives us that it's ## 700 -> 1 ## 300 -> 2 #class <- table(df$V25) #class # let's use a regular linear model first, and plot it against our data set fit <- lm(V25 ~ ., data=df) summary(fit) # remove all the indicator columns ? ## TODO: does this make sense ? fit2 <- update(fit, .~. -V24 -V23 -V22 -V21) # remove the one with lowest t score fit3 <- update(fit2, .~. -V8) summary(fit3) fit4 <- update(fit3, .~. -V13) summary(fit4) fit5 <- update(fit4, .~. -V10) summary(fit5) plot(resid(fit5)) # want to see absence of structure in resid scatterplot ("gaussian white noise") ## not good yet, let's remove more. fit6 <- update(fit5, .~. -V4) summary(fit6) fit7 <- update(fit6, .~. -V14) summary(fit7) ## it's looking kind of good plot(resid(fit7)) qqnorm(resid(fit7)) # let's use a regular linear model first, and plot it against our data set fit <- lm(pairs ~ ped, df) plot(pairs ~ ped, df) abline(fit, col="red") # What's going on here? Do we think this is a good fit of the model? # probably not. # Let's try polynomial regression at various levels and compare each performance # fit with two values fit2 <- lm(pairs ~ poly(ped, 2), df) points(df$ped, predict(fit2), col="blue", type="l") # fit with three values fit3 <- lm(pairs ~ poly(ped, 3), df) points(df$ped, predict(fit3), col="orange", type="l") # fit with four values fit4 <- lm(pairs ~ poly(ped, 4), df) points(df$ped, predict(fit4), col="green", type="l") # which data set performed the best? Why do you think so? # check the analysis of variance between each fit. which fit has the best p value? anova(fit, fit2, fit3, fit4)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fit.R \name{ifind2} \alias{ifind2} \title{Item Fit indexes 'chi^2' and 'g^2' statistics that cut scores based on observed score.} \usage{ ifind2(x, para, theta, Gc = 2, fc = 3, p = 0.05, D = 1.702) } \arguments{ \item{x}{data.frame of item response data.} \item{para}{item parameter data.frame estimated by \code{\link{estip}}.} \item{theta}{theta parameter vector. This length must be same to \code{x}.} \item{Gc}{the munber of sub groups.} \item{fc}{a first column of item response data.frame.} \item{p}{\emph{p} value.} \item{D}{a scale constant} } \description{ Item Fit indexes 'chi^2' and 'g^2' statistics that cut scores based on observed score. }
/man/ifind2.Rd
permissive
takuizum/irtfun2
R
false
true
739
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fit.R \name{ifind2} \alias{ifind2} \title{Item Fit indexes 'chi^2' and 'g^2' statistics that cut scores based on observed score.} \usage{ ifind2(x, para, theta, Gc = 2, fc = 3, p = 0.05, D = 1.702) } \arguments{ \item{x}{data.frame of item response data.} \item{para}{item parameter data.frame estimated by \code{\link{estip}}.} \item{theta}{theta parameter vector. This length must be same to \code{x}.} \item{Gc}{the munber of sub groups.} \item{fc}{a first column of item response data.frame.} \item{p}{\emph{p} value.} \item{D}{a scale constant} } \description{ Item Fit indexes 'chi^2' and 'g^2' statistics that cut scores based on observed score. }
install.packages("stringr") # Install stringr package library(stringr) library(dplyr) ######################################################################################################################################### #solution for question 1 ######################################################################################################################################### #setting the working directory workingdic <- getwd() setwd(workingdic) print(workingdic) #Read the data from the CSV file. niPostCodes <- read.csv("NIPostCodesData/NIPostcodes.csv", header = FALSE) #Displaying first 10 rows of the csv file records. head(niPostCodes, 10) #showing the structure of data from csv file str(niPostCodes) #getting number of rows and columns cat("number of columns: ", length(niPostCodes)) cat("number of rows: ", nrow(niPostCodes)) ######################################################################################################################################### #solution for question 2 ######################################################################################################################################### # creating column names niPostCodes_column_name <- c("Organisation Name", "Sub-building Name", "Building Name", "Number", "Primary_Thorfare", "Alt Thorfare", "Secondary Thorfare", "Locality", "Townland", "Town", "County", "PostCode", "X-Cordinates", "Y-Cordinates", "Index" ) colnames(niPostCodes) <- niPostCodes_column_name head(niPostCodes) ######################################################################################################################################### #solution for question 3 ######################################################################################################################################### #Replacing all the blank values with NA niPostCodes[niPostCodes == ""] <- NA head(niPostCodes) ######################################################################################################################################### #solution for question 4 ######################################################################################################################################### missing_data_count <- sapply(niPostCodes, function(x) sum(is.na(x))) missing_data_count ######################################################################################################################################### #solution for question 5 ######################################################################################################################################### # moving the column named index to the beginning of the dataframe niPostCodes <- niPostCodes[,c(which(colnames(niPostCodes)=="Index"),which(colnames(niPostCodes)!="Index"))] head(niPostCodes) ######################################################################################################################################### #solution for question 6 ######################################################################################################################################### #create a new dataset called Limavady_data Limavady_data <- niPostCodes %>% filter(str_detect(niPostCodes$Location, "LIMAVADY") | str_detect(niPostCodes$TownLand, "LIMAVADY") | str_detect(niPostCodes$Town, "LIMAVADY")) Limavady_data cat("number of rows: ", nrow(Limavady_data)) write.csv(Limavady_data, file = "NIPostCodesData/Limavady_data.csv", row.names = FALSE) ######################################################################################################################################### #solution for question 7 ######################################################################################################################################### write.csv(niPostCodes, file = "NIPostCodesData/CleanNIPostcodeData.csv", row.names = FALSE)
/CA2/NIPostcode.R
no_license
murthysn18/Data_Science_R
R
false
false
3,954
r
install.packages("stringr") # Install stringr package library(stringr) library(dplyr) ######################################################################################################################################### #solution for question 1 ######################################################################################################################################### #setting the working directory workingdic <- getwd() setwd(workingdic) print(workingdic) #Read the data from the CSV file. niPostCodes <- read.csv("NIPostCodesData/NIPostcodes.csv", header = FALSE) #Displaying first 10 rows of the csv file records. head(niPostCodes, 10) #showing the structure of data from csv file str(niPostCodes) #getting number of rows and columns cat("number of columns: ", length(niPostCodes)) cat("number of rows: ", nrow(niPostCodes)) ######################################################################################################################################### #solution for question 2 ######################################################################################################################################### # creating column names niPostCodes_column_name <- c("Organisation Name", "Sub-building Name", "Building Name", "Number", "Primary_Thorfare", "Alt Thorfare", "Secondary Thorfare", "Locality", "Townland", "Town", "County", "PostCode", "X-Cordinates", "Y-Cordinates", "Index" ) colnames(niPostCodes) <- niPostCodes_column_name head(niPostCodes) ######################################################################################################################################### #solution for question 3 ######################################################################################################################################### #Replacing all the blank values with NA niPostCodes[niPostCodes == ""] <- NA head(niPostCodes) ######################################################################################################################################### #solution for question 4 ######################################################################################################################################### missing_data_count <- sapply(niPostCodes, function(x) sum(is.na(x))) missing_data_count ######################################################################################################################################### #solution for question 5 ######################################################################################################################################### # moving the column named index to the beginning of the dataframe niPostCodes <- niPostCodes[,c(which(colnames(niPostCodes)=="Index"),which(colnames(niPostCodes)!="Index"))] head(niPostCodes) ######################################################################################################################################### #solution for question 6 ######################################################################################################################################### #create a new dataset called Limavady_data Limavady_data <- niPostCodes %>% filter(str_detect(niPostCodes$Location, "LIMAVADY") | str_detect(niPostCodes$TownLand, "LIMAVADY") | str_detect(niPostCodes$Town, "LIMAVADY")) Limavady_data cat("number of rows: ", nrow(Limavady_data)) write.csv(Limavady_data, file = "NIPostCodesData/Limavady_data.csv", row.names = FALSE) ######################################################################################################################################### #solution for question 7 ######################################################################################################################################### write.csv(niPostCodes, file = "NIPostCodesData/CleanNIPostcodeData.csv", row.names = FALSE)
library(shiny) shinyUI(fluidPage( titlePanel("Mauna Loa Atmospheric CO2 Concentration"), sidebarLayout( sidebarPanel( sliderInput("years", "Select the range of years to plot", sep="", 1959, 1997, value = c(1994, 1997)), checkboxInput("show_xlab", "Show/Hide X Axis Label", value = TRUE), checkboxInput("show_ylab", "Show/Hide Y Axis Label", value = TRUE), checkboxInput("show_title", "Show/Hide Title", value = TRUE) ), mainPanel( h3("Graph of CO2 data and Forecast for next 12 months"), h6("Use the slider to select the years by which you would like to forecast to work from. The graph will take the input and recalcuate the forecast based on the date range selected. The data will be forecasted out 12 months using the forecast library. It will use the best model available to fit to the data The forecast is in blue. Expand the years to see more trend."), plotOutput("plot1") ) ) ))
/coursera_homework/developing_data_products/week_4/ui.R
no_license
tamcknight/coding-exercises
R
false
false
1,011
r
library(shiny) shinyUI(fluidPage( titlePanel("Mauna Loa Atmospheric CO2 Concentration"), sidebarLayout( sidebarPanel( sliderInput("years", "Select the range of years to plot", sep="", 1959, 1997, value = c(1994, 1997)), checkboxInput("show_xlab", "Show/Hide X Axis Label", value = TRUE), checkboxInput("show_ylab", "Show/Hide Y Axis Label", value = TRUE), checkboxInput("show_title", "Show/Hide Title", value = TRUE) ), mainPanel( h3("Graph of CO2 data and Forecast for next 12 months"), h6("Use the slider to select the years by which you would like to forecast to work from. The graph will take the input and recalcuate the forecast based on the date range selected. The data will be forecasted out 12 months using the forecast library. It will use the best model available to fit to the data The forecast is in blue. Expand the years to see more trend."), plotOutput("plot1") ) ) ))
#plot1 mydata<-read.table("household_power_consumption.txt", sep = ";", nrows = 24*60*2, skip = 24*60*46 + 396, header = TRUE) names(mydata)<-c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") hist(mydata$Global_active_power,xlab = "Global Active Power (Kilowatts)", col= "Red", main = "Global Active Power") dev.copy(png, file = "plot1.png") dev.off()
/plot1.R
no_license
carlawright/ExData_Plotting1
R
false
false
445
r
#plot1 mydata<-read.table("household_power_consumption.txt", sep = ";", nrows = 24*60*2, skip = 24*60*46 + 396, header = TRUE) names(mydata)<-c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") hist(mydata$Global_active_power,xlab = "Global Active Power (Kilowatts)", col= "Red", main = "Global Active Power") dev.copy(png, file = "plot1.png") dev.off()
# The functions in this file are all internal. # Determine if a set of parameters is within the bounds. checkBounds <- function(tab, bounds) { return( sapply( names(bounds) , function(paramName) { tab[[paramName]] >= bounds[[paramName]][[1]] & tab[[paramName]] <= bounds[[paramName]][[2]] } ) ) } # Draw random parameters with LHS randParams <- function(boundsDT, rPoints, FAIL = TRUE) { # Attempt to procure rPoints unique parameter sets by lhs. attempt <- 1 newPars <- data.table() poi <- rPoints while(attempt <= 100) { latinCube <- data.table(lhs::improvedLHS(n = poi, k = nrow(boundsDT))) setnames(latinCube, boundsDT$N) newPars <- unique(rbind(unMMScale(latinCube, boundsDT),newPars)) if (nrow(newPars) == rPoints) break else poi <- rPoints-nrow(newPars) if (attempt >= 100 & FAIL) stop("Latin Hypercube Sampling could not produce the required distinct parameter sets. \nTry decreasing gsPoints or initPoints.") attempt <- attempt + 1 } setnames(newPars, boundsDT$N) return(newPars) } # Scale parameters to 0-1 between their bounds. minMaxScale <- function(tabl, boundsDT) { # tabl <- newD mms <- lapply(boundsDT$N, function(x) (tabl[[x]]-boundsDT[get("N")==x,]$L)/boundsDT[get("N")==x,]$R) setDT(mms) setnames(mms, boundsDT$N) return(mms) } # Do the reverse of minMaxScale unMMScale <- function(tabl, boundsDT) { umms <- lapply(boundsDT$N, function(x) { B <- boundsDT[get("N")==x,] n <- tabl[[x]]*B$R+B$L if (B$C == "integer") n <- round(n) return(n) }) setDT(umms) if(!identical(names(tabl),boundsDT$N)) umms <- cbind(umms, tabl[,-boundsDT$N, with = F]) setnames(umms, names(tabl)) return(umms) } # Scale a vector between 0-1 zeroOneScale <- function(vec) { r <- max(vec) - min(vec) # If the scoring function returned the same results # this results in the function a vector of 1s. if(r==0) stop("Results from FUN have 0 variance, cannot build GP.") vec <- (vec - min(vec))/r return(vec) } # Check to see if any rows from tab1 are exact duplicates of rows in tab2. checkDup <- function(tab1,tab2) { sapply(1:nrow(tab1), function(i) { tab2 <- rbind(tab2,tab1[0:(i-1),]) nrow(fintersect(tab2,tab1[i,])) > 0 }) } # Return a data.table from a bounds list. Easier to work with. boundsToDT <- function(bounds) { data.table( N = names(bounds) , L = sapply(bounds, function(x) x[1]) , U = sapply(bounds, function(x) x[2]) , R = sapply(bounds, function(x) x[2]) - sapply(bounds, function(x) x[1]) , C = sapply(bounds, function(x) class(x)) ) } # Attempt to save bayesOpt object between optimization steps. saveSoFar <- function(optObj,verbose) { if (!is.null(optObj$saveFile)) { tryCatch( { suppressWarnings(saveRDS(optObj, file = optObj$saveFile)) if (verbose > 0) cat(" 4) Saving Intermediary Results to: \n ",optObj$saveFile,"\n") } , error = function(e) { if (verbose > 0) cat(red(" 4) Failed to save intermediary results. Please check file path.\n")) } ) } } # Cannot pass `%dopar%` so we recreate it with this function. ParMethod <- function(x) if(x) {`%dopar%`} else {`%do%`} # Get information about the acquisition functions. getAcqInfo <- function(acq) { return( data.table( nam = c("ei","eips","poi","ucb") , disp = c("Expected Improvement","Expct. Imprvmt./Second", "Prob. of Improvement","Upper Conf. Bound") , base = c(0,0,0,1) )[get("nam")==acq,] ) } # Early checks for parameters. checkParameters <- function( bounds , iters.n , iters.k , otherHalting , acq , acqThresh , errorHandling , plotProgress , parallel , verbose ) { if (iters.n < iters.k) stop("iters.n cannot be less than iters.k. See ?bayesOpt for parameter definitions.") if (iters.n %% 1 != 0 | iters.k %% 1 != 0) stop("iters.n and iters.k must be integers.") if (!any(acq == c("ucb","ei","eips","poi"))) stop("Acquisition function not recognized") if (parallel & (getDoParWorkers() == 1)) stop("parallel is set to TRUE but no back end is registered.\n") if (!parallel & getDoParWorkers() > 1 & verbose > 0) message("parallel back end is registered, but parallel is set to false. Process will not be run in parallel.") if (any(!names(otherHalting) %in% c("timeLimit","minUtility"))) stop("otherHalting element not recognized. Must be one of timeLimit and minUtility.") if (class(bounds) != "list") stop("bounds must be a list of parameter bounds with the same arguments as FUN.") if (any(lengths(bounds) != 2)) stop("Not all elements in bounds are length 2.") if (acqThresh > 1 | acqThresh < 0) stop("acqThresh must be in [0,1]") if (!is.logical(plotProgress)) stop("plotProgress must be logical") if (!errorHandling %in% c("stop","continue") & !is.numeric(errorHandling)) stop("errorHandling is malformed: Must be one of 'stop', 'continue', or an integer.") } # Get the total time run of an object given the time it was started. totalTime <- function(optObj,startT) { optObj$elapsedTime + as.numeric(difftime(Sys.time(),startT,units = "secs")) } # Fill in any missing elements of otherHalting we need. formatOtherHalting <- function(otherHalting) { if (is.null(otherHalting$timeLimit)) otherHalting$timeLimit <- Inf if (is.null(otherHalting$minUtility)) otherHalting$minUtility <- 0 return(otherHalting) } # When the process stops early it will print this color. #' @importFrom crayon make_style red returnEarly <- crayon::make_style("#FF6200") # Constructor for stopEarlyMsg class. makeStopEarlyMessage <- function(msg) { class(msg) <- "stopEarlyMsg" return(msg) } # Multiple places the process can stop early. This just prints the message. printStopStatus <- function(optObj,verbose) { if (verbose > 0) cat(returnEarly("\n",optObj$stopStatus,"\n")) } # Combining function for foreach. Allows the return of message without scores. rbindFE <- function(...) rbind(...,fill=TRUE) # What to do if FUN produced errors? getEarlyStoppingErrorStatus <- function(NewResults,scoreSummary,errorHandling,verbose) { newErrors <- sum(!is.na(NewResults$errorMessage)) allErrors <- newErrors + sum(!is.na(scoreSummary$errorMessage)) if (errorHandling == "stop" & allErrors > 0) { return(makeStopEarlyMessage("Errors encountered in FUN")) } else if (errorHandling == "continue") { return("OK") } else if (errorHandling <= allErrors) { return(makeStopEarlyMessage("Errors from FUN exceeded errorHandling limit")) } else { return("OK") } }
/R/SmallFuncs.R
no_license
ilyaselitser/ParBayesianOptimization
R
false
false
6,614
r
# The functions in this file are all internal. # Determine if a set of parameters is within the bounds. checkBounds <- function(tab, bounds) { return( sapply( names(bounds) , function(paramName) { tab[[paramName]] >= bounds[[paramName]][[1]] & tab[[paramName]] <= bounds[[paramName]][[2]] } ) ) } # Draw random parameters with LHS randParams <- function(boundsDT, rPoints, FAIL = TRUE) { # Attempt to procure rPoints unique parameter sets by lhs. attempt <- 1 newPars <- data.table() poi <- rPoints while(attempt <= 100) { latinCube <- data.table(lhs::improvedLHS(n = poi, k = nrow(boundsDT))) setnames(latinCube, boundsDT$N) newPars <- unique(rbind(unMMScale(latinCube, boundsDT),newPars)) if (nrow(newPars) == rPoints) break else poi <- rPoints-nrow(newPars) if (attempt >= 100 & FAIL) stop("Latin Hypercube Sampling could not produce the required distinct parameter sets. \nTry decreasing gsPoints or initPoints.") attempt <- attempt + 1 } setnames(newPars, boundsDT$N) return(newPars) } # Scale parameters to 0-1 between their bounds. minMaxScale <- function(tabl, boundsDT) { # tabl <- newD mms <- lapply(boundsDT$N, function(x) (tabl[[x]]-boundsDT[get("N")==x,]$L)/boundsDT[get("N")==x,]$R) setDT(mms) setnames(mms, boundsDT$N) return(mms) } # Do the reverse of minMaxScale unMMScale <- function(tabl, boundsDT) { umms <- lapply(boundsDT$N, function(x) { B <- boundsDT[get("N")==x,] n <- tabl[[x]]*B$R+B$L if (B$C == "integer") n <- round(n) return(n) }) setDT(umms) if(!identical(names(tabl),boundsDT$N)) umms <- cbind(umms, tabl[,-boundsDT$N, with = F]) setnames(umms, names(tabl)) return(umms) } # Scale a vector between 0-1 zeroOneScale <- function(vec) { r <- max(vec) - min(vec) # If the scoring function returned the same results # this results in the function a vector of 1s. if(r==0) stop("Results from FUN have 0 variance, cannot build GP.") vec <- (vec - min(vec))/r return(vec) } # Check to see if any rows from tab1 are exact duplicates of rows in tab2. checkDup <- function(tab1,tab2) { sapply(1:nrow(tab1), function(i) { tab2 <- rbind(tab2,tab1[0:(i-1),]) nrow(fintersect(tab2,tab1[i,])) > 0 }) } # Return a data.table from a bounds list. Easier to work with. boundsToDT <- function(bounds) { data.table( N = names(bounds) , L = sapply(bounds, function(x) x[1]) , U = sapply(bounds, function(x) x[2]) , R = sapply(bounds, function(x) x[2]) - sapply(bounds, function(x) x[1]) , C = sapply(bounds, function(x) class(x)) ) } # Attempt to save bayesOpt object between optimization steps. saveSoFar <- function(optObj,verbose) { if (!is.null(optObj$saveFile)) { tryCatch( { suppressWarnings(saveRDS(optObj, file = optObj$saveFile)) if (verbose > 0) cat(" 4) Saving Intermediary Results to: \n ",optObj$saveFile,"\n") } , error = function(e) { if (verbose > 0) cat(red(" 4) Failed to save intermediary results. Please check file path.\n")) } ) } } # Cannot pass `%dopar%` so we recreate it with this function. ParMethod <- function(x) if(x) {`%dopar%`} else {`%do%`} # Get information about the acquisition functions. getAcqInfo <- function(acq) { return( data.table( nam = c("ei","eips","poi","ucb") , disp = c("Expected Improvement","Expct. Imprvmt./Second", "Prob. of Improvement","Upper Conf. Bound") , base = c(0,0,0,1) )[get("nam")==acq,] ) } # Early checks for parameters. checkParameters <- function( bounds , iters.n , iters.k , otherHalting , acq , acqThresh , errorHandling , plotProgress , parallel , verbose ) { if (iters.n < iters.k) stop("iters.n cannot be less than iters.k. See ?bayesOpt for parameter definitions.") if (iters.n %% 1 != 0 | iters.k %% 1 != 0) stop("iters.n and iters.k must be integers.") if (!any(acq == c("ucb","ei","eips","poi"))) stop("Acquisition function not recognized") if (parallel & (getDoParWorkers() == 1)) stop("parallel is set to TRUE but no back end is registered.\n") if (!parallel & getDoParWorkers() > 1 & verbose > 0) message("parallel back end is registered, but parallel is set to false. Process will not be run in parallel.") if (any(!names(otherHalting) %in% c("timeLimit","minUtility"))) stop("otherHalting element not recognized. Must be one of timeLimit and minUtility.") if (class(bounds) != "list") stop("bounds must be a list of parameter bounds with the same arguments as FUN.") if (any(lengths(bounds) != 2)) stop("Not all elements in bounds are length 2.") if (acqThresh > 1 | acqThresh < 0) stop("acqThresh must be in [0,1]") if (!is.logical(plotProgress)) stop("plotProgress must be logical") if (!errorHandling %in% c("stop","continue") & !is.numeric(errorHandling)) stop("errorHandling is malformed: Must be one of 'stop', 'continue', or an integer.") } # Get the total time run of an object given the time it was started. totalTime <- function(optObj,startT) { optObj$elapsedTime + as.numeric(difftime(Sys.time(),startT,units = "secs")) } # Fill in any missing elements of otherHalting we need. formatOtherHalting <- function(otherHalting) { if (is.null(otherHalting$timeLimit)) otherHalting$timeLimit <- Inf if (is.null(otherHalting$minUtility)) otherHalting$minUtility <- 0 return(otherHalting) } # When the process stops early it will print this color. #' @importFrom crayon make_style red returnEarly <- crayon::make_style("#FF6200") # Constructor for stopEarlyMsg class. makeStopEarlyMessage <- function(msg) { class(msg) <- "stopEarlyMsg" return(msg) } # Multiple places the process can stop early. This just prints the message. printStopStatus <- function(optObj,verbose) { if (verbose > 0) cat(returnEarly("\n",optObj$stopStatus,"\n")) } # Combining function for foreach. Allows the return of message without scores. rbindFE <- function(...) rbind(...,fill=TRUE) # What to do if FUN produced errors? getEarlyStoppingErrorStatus <- function(NewResults,scoreSummary,errorHandling,verbose) { newErrors <- sum(!is.na(NewResults$errorMessage)) allErrors <- newErrors + sum(!is.na(scoreSummary$errorMessage)) if (errorHandling == "stop" & allErrors > 0) { return(makeStopEarlyMessage("Errors encountered in FUN")) } else if (errorHandling == "continue") { return("OK") } else if (errorHandling <= allErrors) { return(makeStopEarlyMessage("Errors from FUN exceeded errorHandling limit")) } else { return("OK") } }
testread <- function(data){ # Reads only two days of data from the text file header=read.table("household_power_consumption.txt", nrows = 1, header = T, sep = ";") datasource_p2=read.table("household_power_consumption.txt", skip = 66636, nrows = (2*24*60), header = T, sep = ";", stringsAsFactors = F, na.strings = "?") a=colnames(header) colnames(datasource_p2)=paste(a) datasource_p2$Date_Time=strptime(paste(datasource_p2$Date,datasource_p2$Time, sep = " "), "%d/%m/%Y %H:%M:%S") datasource_p2$Weekday=weekdays(datasource_p2$Date_Time) # colnames(datasource_p2) # head(datasource_p2,10) # nrow(datasource_p2) # Prints the plot in png file png("plot2.png", width = 480, height = 480) plot(datasource_p2$Date_Time, as.numeric(datasource_p2$Global_active_power), main="Weekday Based Power Consumption", xlab = "", ylab = "Global Active Power (kilowats)", type = "l") dev.off() } testread("household_power_consumption.txt")
/plot2.R
no_license
arghyaditya/ExData_Plotting1
R
false
false
1,009
r
testread <- function(data){ # Reads only two days of data from the text file header=read.table("household_power_consumption.txt", nrows = 1, header = T, sep = ";") datasource_p2=read.table("household_power_consumption.txt", skip = 66636, nrows = (2*24*60), header = T, sep = ";", stringsAsFactors = F, na.strings = "?") a=colnames(header) colnames(datasource_p2)=paste(a) datasource_p2$Date_Time=strptime(paste(datasource_p2$Date,datasource_p2$Time, sep = " "), "%d/%m/%Y %H:%M:%S") datasource_p2$Weekday=weekdays(datasource_p2$Date_Time) # colnames(datasource_p2) # head(datasource_p2,10) # nrow(datasource_p2) # Prints the plot in png file png("plot2.png", width = 480, height = 480) plot(datasource_p2$Date_Time, as.numeric(datasource_p2$Global_active_power), main="Weekday Based Power Consumption", xlab = "", ylab = "Global Active Power (kilowats)", type = "l") dev.off() } testread("household_power_consumption.txt")
context("agree_test") testthat::test_that("Simple Use Run Through", { sf <- matrix( c(9, 2, 5, 8, 6, 1, 3, 2, 8, 4, 6, 8, 7, 1, 2, 6, 10, 5, 6, 9, 6, 2, 4, 7), ncol = 4, byrow = TRUE ) colnames(sf) <- paste("J", 1:4, sep = "") rownames(sf) <- paste("S", 1:6, sep = "") #sf #example from Shrout and Fleiss (1979) dat = as.data.frame(sf) test1 = reli_stats(data = dat, wide = TRUE, col.names = c("J1", "J2", "J3", "J4")) jmvtest2 = jmvreli(data = dat, vars =c("J1", "J2", "J3", "J4"), desc = TRUE, plots = TRUE) testthat::expect_equivalent(jmvtest2$icctab$asDF$icc, test1$icc$icc) testthat::expect_equivalent(jmvtest2$icctab$asDF$lower.ci, test1$icc$lower.ci) testthat::expect_equivalent(jmvtest2$icctab$asDF$upper.ci, test1$icc$upper.ci) pr_test = print(test1) p = plot(test1) df = data.frame(id = c(1,1,1,1, 2,2,2,2, 3,3,3,3, 4,4,4,4, 5,5,5,5, 6,6,6,6), it = c(1,2,3,4, 1,2,3,4, 1,2,3,4, 1,2,3,4, 1,2,3,4, 1,2,3,4), va = c(9,2,5,8, 6,1,3,2, 8,4,6,8, 7,1,2,6, 10,5,6,9, 6,2,4,7)) test2 = reli_stats(data = df, measure = "va", item = "it", id = "id") pr_test = print(test2) p = plot(test2) testthat::expect_equal(test1$icc, test2$icc) testthat::expect_equal(test1$anova, test2$anova) testthat::expect_equal(test1$var_comp, test2$var_comp) testthat::expect_equal(test1$cv, test2$cv) testthat::expect_equal(test1$SEM, test2$SEM) testthat::expect_equal(test1$SEE, test2$SEE) testthat::expect_equal(test1$SEP, test2$SEP) })
/tests/testthat/test-reli_stats.R
no_license
hyunsooseol/SimplyAgree
R
false
false
2,418
r
context("agree_test") testthat::test_that("Simple Use Run Through", { sf <- matrix( c(9, 2, 5, 8, 6, 1, 3, 2, 8, 4, 6, 8, 7, 1, 2, 6, 10, 5, 6, 9, 6, 2, 4, 7), ncol = 4, byrow = TRUE ) colnames(sf) <- paste("J", 1:4, sep = "") rownames(sf) <- paste("S", 1:6, sep = "") #sf #example from Shrout and Fleiss (1979) dat = as.data.frame(sf) test1 = reli_stats(data = dat, wide = TRUE, col.names = c("J1", "J2", "J3", "J4")) jmvtest2 = jmvreli(data = dat, vars =c("J1", "J2", "J3", "J4"), desc = TRUE, plots = TRUE) testthat::expect_equivalent(jmvtest2$icctab$asDF$icc, test1$icc$icc) testthat::expect_equivalent(jmvtest2$icctab$asDF$lower.ci, test1$icc$lower.ci) testthat::expect_equivalent(jmvtest2$icctab$asDF$upper.ci, test1$icc$upper.ci) pr_test = print(test1) p = plot(test1) df = data.frame(id = c(1,1,1,1, 2,2,2,2, 3,3,3,3, 4,4,4,4, 5,5,5,5, 6,6,6,6), it = c(1,2,3,4, 1,2,3,4, 1,2,3,4, 1,2,3,4, 1,2,3,4, 1,2,3,4), va = c(9,2,5,8, 6,1,3,2, 8,4,6,8, 7,1,2,6, 10,5,6,9, 6,2,4,7)) test2 = reli_stats(data = df, measure = "va", item = "it", id = "id") pr_test = print(test2) p = plot(test2) testthat::expect_equal(test1$icc, test2$icc) testthat::expect_equal(test1$anova, test2$anova) testthat::expect_equal(test1$var_comp, test2$var_comp) testthat::expect_equal(test1$cv, test2$cv) testthat::expect_equal(test1$SEM, test2$SEM) testthat::expect_equal(test1$SEE, test2$SEE) testthat::expect_equal(test1$SEP, test2$SEP) })
# Reference: http://www.tutorialspoint.com/r/r_csv_files.htm #[1] CSV Files ------------------------------------------------------- # Read in the data file data <- read.csv("input.csv") print(data) # Examine its contents print(is.data.frame(data)) print(ncol(data)) print(nrow(data)) # List the variables in this dataset names(data) # Perform data analysis sal <- max(data$salary) print(sal) # Get the person detail having max salary. retval <- subset(data, salary == max(salary)) print(retval) # Get all people working in the IT Dep. retval <- subset( data, dept == "IT") print(retval) # Get all people working in the IT Dep. whose salary > $600 info <- subset(data, salary > 600 & dept == "IT") print(info) # Get all people who joined on or after 2014 retval <- subset(data, as.Date(start_date) > as.Date("2014-01-01")) print(retval) # Write into a csv file write.csv(retval,"output.csv") # Verify contents of output.csv newdata <- read.csv("output.csv") print(newdata) #------------------------------------------------------------------------ #[2] Excel Files -------------------------------------------------------- # Need to load a library: #library("xlsx") library(readxl) # Read the first worksheet in the file input.xlsx. #data <- read.xlsx("input.xlsx", sheetIndex = 1) data <- read_excel('input.xlsx') print(data) #[3] Data Tables --------------------------------------------------------- #library(data.table) require(data.table) #[4] R Web Data --------------------------------------------------------- #install.packages("RCurl") #install.packages("XML") #install.packages("stringr") #install.packages("plyr") # Need to load libraries: library('XML') library('RCurl') library('plyr') library("quantmod") require("financeR") require("xts") library('tseries') library(RJSONIO) # Getting Data from FRED #install.packages("Quandl") library(Quandl) #Quandl.api_key("EUfG5wVrxfdxmbdfDs5G") #EUfG5wVrxfdxmbdfDs5G #Get US GDP data from FRED and put it into a data frame: us_gdp = Quandl("FRED/GDP") #Get US GDP data from FRED in time-series format us_gdp_ts = Quandl("FRED/GDP", type="ts") plot(us_gdp_ts) #To change the sampling frequency: us_gdp = Quandl("FRED/GDP", collapse="annual") # Download data directly download.file("http://www.geos.ed.ac.uk/~weather/jcmb_ws/JCMB_2015_Oct.csv","downloaded_file") # Get all historical prices (adjusted close) for the S&P500 sp500 = get.hist.quote("^GSPC", quote = "AdjClose", compression = "d") # Get World Bank Development Indicator data library(WDI) DF <- WDI(country=c("US","CA","MX"), indicator="NY.GDP.MKTP.KD.ZG", start=1990, end=2016) # Get data from the BLS # http://www.bls.gov/developers/api_signature.htm (Notice that STATA is not supported) # API Check ************** library(rjson) library(blsAPI) library(blscrapeR) df <- get_bls_county() bls_gg <- bls_map_county(map_data = df, fill_rate = "unemployed_rate",labtitle = "Unemployment Rate") bls_gg df <- get_bls_county(stateName = c("Florida", "California")) bls_gg <- bls_map_county(map_data=df, fill_rate = "unemployed_rate",stateName = c("Florida", "California")) bls_gg # State employment statistics for April 2016. df <- get_bls_state("April 2016", seasonality = TRUE) bls_gg <- map_bls(map_data = df, fill_rate = "unemployed_rate",labtitle = "Unemployment Rate") bls_gg #Below are some addtions made by Melody: #------------------------------------------------------------------------------- #Author(s): Melody Huang #Last modified: 09/06/2018 #Common Data Pull Functions & Examples #------------------------------------------------------------------------------- rm(list=ls(all=TRUE)) #------------------------------------------------------------------------------- library(tseries) library(vars) library(tis) library(quantmod) library(dplyr) #-------------------------------------------------------------------------------------------------------- #Certain R Libraries have functions that allow you to pull data from various sources #This is helpful when we want to work with large sources of economic data: #-------------------------------------------------------------------------------------------------------- #get.hist.quote() #Example 1: sp500<-get.hist.quote('^GSPC', start ="1963-12-01", end="2017-02-28", compression='m') #This returns data in a data.frame type structure #We can preview it using the head() function: head(sp500) #To call its names: names(sp500) sp500_returns<-timeSeries::getReturns(sp500$Close) plot(sp500$Close, type='l', ylab="S&P 500 Closing Price", main="S&P 500", xlab='Time') plot(sp500_returns, type='l', ylab="S&P 500 Returns", main="S&P 500", xlab='Time') #Example 2: irx<-get.hist.quote('^IRX', start ="1965-01-01", compression='m') tnx<-get.hist.quote('^TNX', start ="1965-01-01", compression='m') spread<-ts(tnx$Close - irx$Close, freq=12) time<-seq(1965, length=length(spread), by=1/12) plot(time, spread, type='l', xlab="Time", ylab="Spread (Ten Year - Three Month Treasury)", main="Ten Year - Three Month Treasury Spread") nberShade() abline(h=0) lines(time, spread) #-------------------------------------------------------------------------------------------------------- #getsymbols() #getSymbols() can pull data from different sources, like #the Federal Reserve's Economic Database #You can change which source you pull from by using the "src" flag #Example: getSymbols('AAA', src="FRED") getSymbols('BAA', src="FRED") #What's a little annoying about getSymbols() is that it's default to download the data into your global environment #So the data will be stored in an object created for you already, named after the #symbol you put in #Note: for FRED data, copy paste the string appended to the end of the URL #i.e., the link for the AAA data is: https://fred.stlouisfed.org/series/AAA #If we check the type of object that getSymbols() returns to us, notice that it isn't a data.frame class(AAA) corporate_bonds<-data.frame(dates = as.Date(row.names(as.data.frame(AAA))), AAA[,1], BAA[,1]) row.names(corporate_bonds)<-NULL head(corporate_bonds) #-------------------------------------------------------------------------------------------------------- #Quandl() library(Quandl) #Takes data from the Quandl library: https://www.quandl.com/search #Helpful for financial data #Go to the website and search for the data you want #They will provide you with an R command that you can copy/paste to pull data #Note: #There is a limit to how much data you can pull from Quandl() in one go #So you don't use up all of my pulls, go on Quandl's website and set up an API key (it's free!!!) #Put the API key they email you here: my_api_key = "sBTb9YZ2pF-6NyfSBSxA" #Example: df_pmi<-Quandl("ISM/MAN_PMI", api_key=my_api_key) #Quandl() will return data in reverse chronological order #You will have to flip the order pmi<-rev(df_pmi[,2]) plot(seq(1948, by=1/12, length=length(pmi)), pmi, type='l', xlab="Time", ylab="PMI", main="Purchasing Manager's Index")
/R Bootcamp/R_Bootcamp_Data_Formats.R
permissive
dustinoakes/UCLA-MAE
R
false
false
6,986
r
# Reference: http://www.tutorialspoint.com/r/r_csv_files.htm #[1] CSV Files ------------------------------------------------------- # Read in the data file data <- read.csv("input.csv") print(data) # Examine its contents print(is.data.frame(data)) print(ncol(data)) print(nrow(data)) # List the variables in this dataset names(data) # Perform data analysis sal <- max(data$salary) print(sal) # Get the person detail having max salary. retval <- subset(data, salary == max(salary)) print(retval) # Get all people working in the IT Dep. retval <- subset( data, dept == "IT") print(retval) # Get all people working in the IT Dep. whose salary > $600 info <- subset(data, salary > 600 & dept == "IT") print(info) # Get all people who joined on or after 2014 retval <- subset(data, as.Date(start_date) > as.Date("2014-01-01")) print(retval) # Write into a csv file write.csv(retval,"output.csv") # Verify contents of output.csv newdata <- read.csv("output.csv") print(newdata) #------------------------------------------------------------------------ #[2] Excel Files -------------------------------------------------------- # Need to load a library: #library("xlsx") library(readxl) # Read the first worksheet in the file input.xlsx. #data <- read.xlsx("input.xlsx", sheetIndex = 1) data <- read_excel('input.xlsx') print(data) #[3] Data Tables --------------------------------------------------------- #library(data.table) require(data.table) #[4] R Web Data --------------------------------------------------------- #install.packages("RCurl") #install.packages("XML") #install.packages("stringr") #install.packages("plyr") # Need to load libraries: library('XML') library('RCurl') library('plyr') library("quantmod") require("financeR") require("xts") library('tseries') library(RJSONIO) # Getting Data from FRED #install.packages("Quandl") library(Quandl) #Quandl.api_key("EUfG5wVrxfdxmbdfDs5G") #EUfG5wVrxfdxmbdfDs5G #Get US GDP data from FRED and put it into a data frame: us_gdp = Quandl("FRED/GDP") #Get US GDP data from FRED in time-series format us_gdp_ts = Quandl("FRED/GDP", type="ts") plot(us_gdp_ts) #To change the sampling frequency: us_gdp = Quandl("FRED/GDP", collapse="annual") # Download data directly download.file("http://www.geos.ed.ac.uk/~weather/jcmb_ws/JCMB_2015_Oct.csv","downloaded_file") # Get all historical prices (adjusted close) for the S&P500 sp500 = get.hist.quote("^GSPC", quote = "AdjClose", compression = "d") # Get World Bank Development Indicator data library(WDI) DF <- WDI(country=c("US","CA","MX"), indicator="NY.GDP.MKTP.KD.ZG", start=1990, end=2016) # Get data from the BLS # http://www.bls.gov/developers/api_signature.htm (Notice that STATA is not supported) # API Check ************** library(rjson) library(blsAPI) library(blscrapeR) df <- get_bls_county() bls_gg <- bls_map_county(map_data = df, fill_rate = "unemployed_rate",labtitle = "Unemployment Rate") bls_gg df <- get_bls_county(stateName = c("Florida", "California")) bls_gg <- bls_map_county(map_data=df, fill_rate = "unemployed_rate",stateName = c("Florida", "California")) bls_gg # State employment statistics for April 2016. df <- get_bls_state("April 2016", seasonality = TRUE) bls_gg <- map_bls(map_data = df, fill_rate = "unemployed_rate",labtitle = "Unemployment Rate") bls_gg #Below are some addtions made by Melody: #------------------------------------------------------------------------------- #Author(s): Melody Huang #Last modified: 09/06/2018 #Common Data Pull Functions & Examples #------------------------------------------------------------------------------- rm(list=ls(all=TRUE)) #------------------------------------------------------------------------------- library(tseries) library(vars) library(tis) library(quantmod) library(dplyr) #-------------------------------------------------------------------------------------------------------- #Certain R Libraries have functions that allow you to pull data from various sources #This is helpful when we want to work with large sources of economic data: #-------------------------------------------------------------------------------------------------------- #get.hist.quote() #Example 1: sp500<-get.hist.quote('^GSPC', start ="1963-12-01", end="2017-02-28", compression='m') #This returns data in a data.frame type structure #We can preview it using the head() function: head(sp500) #To call its names: names(sp500) sp500_returns<-timeSeries::getReturns(sp500$Close) plot(sp500$Close, type='l', ylab="S&P 500 Closing Price", main="S&P 500", xlab='Time') plot(sp500_returns, type='l', ylab="S&P 500 Returns", main="S&P 500", xlab='Time') #Example 2: irx<-get.hist.quote('^IRX', start ="1965-01-01", compression='m') tnx<-get.hist.quote('^TNX', start ="1965-01-01", compression='m') spread<-ts(tnx$Close - irx$Close, freq=12) time<-seq(1965, length=length(spread), by=1/12) plot(time, spread, type='l', xlab="Time", ylab="Spread (Ten Year - Three Month Treasury)", main="Ten Year - Three Month Treasury Spread") nberShade() abline(h=0) lines(time, spread) #-------------------------------------------------------------------------------------------------------- #getsymbols() #getSymbols() can pull data from different sources, like #the Federal Reserve's Economic Database #You can change which source you pull from by using the "src" flag #Example: getSymbols('AAA', src="FRED") getSymbols('BAA', src="FRED") #What's a little annoying about getSymbols() is that it's default to download the data into your global environment #So the data will be stored in an object created for you already, named after the #symbol you put in #Note: for FRED data, copy paste the string appended to the end of the URL #i.e., the link for the AAA data is: https://fred.stlouisfed.org/series/AAA #If we check the type of object that getSymbols() returns to us, notice that it isn't a data.frame class(AAA) corporate_bonds<-data.frame(dates = as.Date(row.names(as.data.frame(AAA))), AAA[,1], BAA[,1]) row.names(corporate_bonds)<-NULL head(corporate_bonds) #-------------------------------------------------------------------------------------------------------- #Quandl() library(Quandl) #Takes data from the Quandl library: https://www.quandl.com/search #Helpful for financial data #Go to the website and search for the data you want #They will provide you with an R command that you can copy/paste to pull data #Note: #There is a limit to how much data you can pull from Quandl() in one go #So you don't use up all of my pulls, go on Quandl's website and set up an API key (it's free!!!) #Put the API key they email you here: my_api_key = "sBTb9YZ2pF-6NyfSBSxA" #Example: df_pmi<-Quandl("ISM/MAN_PMI", api_key=my_api_key) #Quandl() will return data in reverse chronological order #You will have to flip the order pmi<-rev(df_pmi[,2]) plot(seq(1948, by=1/12, length=length(pmi)), pmi, type='l', xlab="Time", ylab="PMI", main="Purchasing Manager's Index")
\name{toUnambig} \title{Generates unambiguous base sequences} \description{ Given a string containing ambiguous bases, generate a list of all possible unambiguous sequences that could match that string } \usage{ toUnambig(ambigBases) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{ambigBases}{a character string, or a character vector} } \details{ If ambigBases is a vector, then a list will be output containing the unambiguous translations for each element in ambigBases. } \value{ A character vector containing all possible unambiguous translations for the given input string, using standard IUPAC base classifications: \item{A;C;G;T}{A;C;G;T} \item{S;W}{C/G;A/T} \item{Y;R}{C/T;A/G} \item{M;K}{A/C;G/T} \item{B}{C/G/T} \item{D}{A/G/T} \item{H}{A/C/T} \item{V}{A/C/G} \item{N}{A/C/G/T} } \references{ %% ~put references to the literature/web site here ~ } \author{ David Eccles (gringer) <bioinformatics@gringene.org>} \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ ## Select 10 random translations of an ambiguous sequence gringene.backtran <- "GGNMGNATHAAYGGNGARAAYGAR"; sample(toUnambig("GGNMGNATHAAYGGNGARAAYGAR"),10); ## Show all the needles in a haystack toUnambig(c("HAYSTACK")); ## Display all translations of the sequences "BAH","HUMBUG" toUnambig(c("BAH","HUMBUG")); } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~manip }% __ONLY ONE__ keyword per line
/man/toUnambig.Rd
no_license
gringer/RichPoreTK
R
false
false
1,629
rd
\name{toUnambig} \title{Generates unambiguous base sequences} \description{ Given a string containing ambiguous bases, generate a list of all possible unambiguous sequences that could match that string } \usage{ toUnambig(ambigBases) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{ambigBases}{a character string, or a character vector} } \details{ If ambigBases is a vector, then a list will be output containing the unambiguous translations for each element in ambigBases. } \value{ A character vector containing all possible unambiguous translations for the given input string, using standard IUPAC base classifications: \item{A;C;G;T}{A;C;G;T} \item{S;W}{C/G;A/T} \item{Y;R}{C/T;A/G} \item{M;K}{A/C;G/T} \item{B}{C/G/T} \item{D}{A/G/T} \item{H}{A/C/T} \item{V}{A/C/G} \item{N}{A/C/G/T} } \references{ %% ~put references to the literature/web site here ~ } \author{ David Eccles (gringer) <bioinformatics@gringene.org>} \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ ## Select 10 random translations of an ambiguous sequence gringene.backtran <- "GGNMGNATHAAYGGNGARAAYGAR"; sample(toUnambig("GGNMGNATHAAYGGNGARAAYGAR"),10); ## Show all the needles in a haystack toUnambig(c("HAYSTACK")); ## Display all translations of the sequences "BAH","HUMBUG" toUnambig(c("BAH","HUMBUG")); } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~manip }% __ONLY ONE__ keyword per line
\name{h2o.parseRaw.VA} \alias{h2o.parseRaw.VA} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Parse Raw Data File to ValueArray } \description{ Parses a raw data file, returning an object containing the identifying hex key. } \usage{ h2o.parseRaw.VA(data, key = "", header, sep = "", col.names) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{data}{ An \code{\linkS4class{H2ORawDataVA}} object to be parsed. } \item{key}{ (Optional) The hex key assigned to the parsed file. } \item{header}{ (Optional) A logical value indicating whether the first row is the column header. If missing, H2O will automatically try to detect the presence of a header. } \item{sep}{ (Optional) The field separator character. Values on each line of the file are separated by this character. If \code{sep = ""}, the parser will automatically detect the separator. } \item{col.names}{ (Optional) An \code{\linkS4class{H2OParsedDataVA}} object containing the column header as its first and only row. } } \details{ This method should only be used to parse raw data imported using \code{\link{h2o.importFile.VA}}, \code{\link{h2o.importFolder.VA}}, or one of its variants. After the raw data file is parsed, it will be automatically deleted from the H2O server. } \value{ An object of class \code{\linkS4class{H2ORawDataVA}}, representing the unparsed ValueArray dataset that was imported. %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... } \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ \code{\link{h2o.importFile.VA}, \link{h2o.importFolder.VA}, \link{h2o.importHDFS.VA}, \link{h2o.importURL.VA}, \link{h2o.uploadFile.VA}} } \examples{ library(h2o) localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE, silentUpgrade = TRUE, promptUpgrade = FALSE) prosPath = system.file("extdata", "prostate.csv", package="h2oRClient") prostate.raw = h2o.importFile.VA(localH2O, path = prosPath, parse = FALSE) # Do not modify prostate.csv on disk at this point! prostate.hex = h2o.parseRaw.VA(data = prostate.raw, key = "prostate.hex") # After parsing, it is okay to modify or delete prostate.csv } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 } \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
/R/h2oRClient-package/man/h2o.parseRaw.VA.Rd
permissive
cloudtrends/h2o
R
false
false
2,422
rd
\name{h2o.parseRaw.VA} \alias{h2o.parseRaw.VA} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Parse Raw Data File to ValueArray } \description{ Parses a raw data file, returning an object containing the identifying hex key. } \usage{ h2o.parseRaw.VA(data, key = "", header, sep = "", col.names) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{data}{ An \code{\linkS4class{H2ORawDataVA}} object to be parsed. } \item{key}{ (Optional) The hex key assigned to the parsed file. } \item{header}{ (Optional) A logical value indicating whether the first row is the column header. If missing, H2O will automatically try to detect the presence of a header. } \item{sep}{ (Optional) The field separator character. Values on each line of the file are separated by this character. If \code{sep = ""}, the parser will automatically detect the separator. } \item{col.names}{ (Optional) An \code{\linkS4class{H2OParsedDataVA}} object containing the column header as its first and only row. } } \details{ This method should only be used to parse raw data imported using \code{\link{h2o.importFile.VA}}, \code{\link{h2o.importFolder.VA}}, or one of its variants. After the raw data file is parsed, it will be automatically deleted from the H2O server. } \value{ An object of class \code{\linkS4class{H2ORawDataVA}}, representing the unparsed ValueArray dataset that was imported. %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... } \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ \code{\link{h2o.importFile.VA}, \link{h2o.importFolder.VA}, \link{h2o.importHDFS.VA}, \link{h2o.importURL.VA}, \link{h2o.uploadFile.VA}} } \examples{ library(h2o) localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE, silentUpgrade = TRUE, promptUpgrade = FALSE) prosPath = system.file("extdata", "prostate.csv", package="h2oRClient") prostate.raw = h2o.importFile.VA(localH2O, path = prosPath, parse = FALSE) # Do not modify prostate.csv on disk at this point! prostate.hex = h2o.parseRaw.VA(data = prostate.raw, key = "prostate.hex") # After parsing, it is okay to modify or delete prostate.csv } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 } \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
# set working directory to the priors-for-separation subdirectory # setwd("~/Dropbox/projects/priors-for-separation") # clear workspace rm(list = ls()) # paramaters install_packages <- FALSE n_sims <- 200000 # 200,000 in paper---use 200 as the initial run n_burnin <- 50000 # 50,0000 in paper---use 50 as the initial run n_chains <- 4 # 4 in paper (note: Windows users should change this to 1) # create subdirectories (if missing) to store output if (!file.exists("doc/figs")){ dir.create(file.path("doc", "figs")) } if (!file.exists("doc/tabs")){ dir.create(file.path("doc", "tabs")) } if (!file.exists("output")){ dir.create("output") } # install packages if (install_packages) { # install all needed packages from CRAN install.packages(c("devtools", "arm", "coda", "logistf", "lme4", "xtable")) # install MCMCpack dependencies not on CRAN source("https://bioconductor.org/biocLite.R") biocLite("graph") biocLite("Rgraphviz") # install packages from github devtools::install_github("carlislerainey/separation", ref = "cbe6e1fa12ff4507465e98fbe58740ae928bc9da") devtools::install_github("carlislerainey/compactr", ref = "67a0df74d497a18f36affdc54533ef141c0e2bde") } # load packages library(arm) library(coda) library(logistf) library(separation) library(compactr) library(xtable) # set seed set.seed(8742570) # theorem 1 illustrated source("R/thm-1-illustrated.R") # note: glm.fit warning expected # barrilleaux and rainey replication source("br-replication/R/br.R") # bell and miller replication source("bm-replication/R/partial-prior.R") # (note: glm.fit warning expected) source("bm-replication/R/mcmc.R") # (note: glm.fit warning expected) source("bm-replication/R/plots.R")
/do-all.R
no_license
hal2001/priors-for-separation
R
false
false
1,720
r
# set working directory to the priors-for-separation subdirectory # setwd("~/Dropbox/projects/priors-for-separation") # clear workspace rm(list = ls()) # paramaters install_packages <- FALSE n_sims <- 200000 # 200,000 in paper---use 200 as the initial run n_burnin <- 50000 # 50,0000 in paper---use 50 as the initial run n_chains <- 4 # 4 in paper (note: Windows users should change this to 1) # create subdirectories (if missing) to store output if (!file.exists("doc/figs")){ dir.create(file.path("doc", "figs")) } if (!file.exists("doc/tabs")){ dir.create(file.path("doc", "tabs")) } if (!file.exists("output")){ dir.create("output") } # install packages if (install_packages) { # install all needed packages from CRAN install.packages(c("devtools", "arm", "coda", "logistf", "lme4", "xtable")) # install MCMCpack dependencies not on CRAN source("https://bioconductor.org/biocLite.R") biocLite("graph") biocLite("Rgraphviz") # install packages from github devtools::install_github("carlislerainey/separation", ref = "cbe6e1fa12ff4507465e98fbe58740ae928bc9da") devtools::install_github("carlislerainey/compactr", ref = "67a0df74d497a18f36affdc54533ef141c0e2bde") } # load packages library(arm) library(coda) library(logistf) library(separation) library(compactr) library(xtable) # set seed set.seed(8742570) # theorem 1 illustrated source("R/thm-1-illustrated.R") # note: glm.fit warning expected # barrilleaux and rainey replication source("br-replication/R/br.R") # bell and miller replication source("bm-replication/R/partial-prior.R") # (note: glm.fit warning expected) source("bm-replication/R/mcmc.R") # (note: glm.fit warning expected) source("bm-replication/R/plots.R")
ttsplot <- function (stn, hy, ..., stagetics = seq(0, 4, 0.1), adj = T, number = T, units = "cfs", split = 0.35, grid1, grid2) { arglist <- as.list(match.call()[-1]) #collect Args #Collect ... args (i.e. turbsrc objects): predobj <- as.list(match.call(expand.dots = F)$...) print(units) msc.n <- 0 tts.n <- 0 n <- 0 tts <- NULL for (name in predobj) { n <- n + 1 obj <- eval(name) if (data.class(obj) == "list" && "meth" %in% names(obj)) { if (obj$meth == 1 && obj$type != "pairs") { tts.n <- tts.n + 1 if (tts.n > 2) stop("No more than 2 turbsrc objects permitted") objname <- paste("tts", tts.n, sep = "") } else if (obj$meth %in% 2:3 || obj$type == "pairs") { msc.n <- msc.n + 1 if (msc.n > 3) stop("No more than 3 flowsrc and lineartime objects allowed") objname <- paste("msc", msc.n, sep = "") } else stop(paste("Invalid meth component for object", name)) names(arglist)[n + 2] <- objname } else stop(paste("Inappropriate prediction object", deparse(substitute(name)))) } #print(arglist) do.call("oldttsplot", arglist) }
/r/ttsplot.r
no_license
adamkc/HydroRSL
R
false
false
1,341
r
ttsplot <- function (stn, hy, ..., stagetics = seq(0, 4, 0.1), adj = T, number = T, units = "cfs", split = 0.35, grid1, grid2) { arglist <- as.list(match.call()[-1]) #collect Args #Collect ... args (i.e. turbsrc objects): predobj <- as.list(match.call(expand.dots = F)$...) print(units) msc.n <- 0 tts.n <- 0 n <- 0 tts <- NULL for (name in predobj) { n <- n + 1 obj <- eval(name) if (data.class(obj) == "list" && "meth" %in% names(obj)) { if (obj$meth == 1 && obj$type != "pairs") { tts.n <- tts.n + 1 if (tts.n > 2) stop("No more than 2 turbsrc objects permitted") objname <- paste("tts", tts.n, sep = "") } else if (obj$meth %in% 2:3 || obj$type == "pairs") { msc.n <- msc.n + 1 if (msc.n > 3) stop("No more than 3 flowsrc and lineartime objects allowed") objname <- paste("msc", msc.n, sep = "") } else stop(paste("Invalid meth component for object", name)) names(arglist)[n + 2] <- objname } else stop(paste("Inappropriate prediction object", deparse(substitute(name)))) } #print(arglist) do.call("oldttsplot", arglist) }
#' @title Plot percent spliced-in (PSI) values for exon-level alternative splicing events #' #' @description #' \code{PlotPSI} computes percent spliced-in (PSI) at each genomic coordinate for exon-level alternative splicing events. #' #' @details #' This function computes percent spliced-in (PSI) at each genomic coordinate for exon-level alternative splicing events, namely skipped-exon (SE), mutually-exclusive exons (MXE), retained-intron (RI), alternative 5' splice site (A5SS), and alternative 3' splice site (A3SS). Formula for computing PSI is number of reads with non-N CIGAR operation divided by the total number of reads. Total number of reads is the sum of reads with non-N CIGAR operation and reads with N-CIGAR operation #' #' @param tran_id Character string. Splicing event nomenclature. #' @param event.type Character string. Specify \code{"SE"}, \code{"MXE"}, \code{"RI"}, \code{"A5SS"} or \code{"A3SS"}. #' @param strand Character string. Specify \code{"positive"} or \code{"negative"} to indicate forward or negative strand, respectively. #' @param Bam Character string. Path to folder where the BAM files and their corresponding index files are located. #' @param BamPheno object of class data.frame. Mandatory columns are \code{bam.file.name} and \code{cell.type}. \code{bam.file.name} column indicates BAM file names as per that found in the \code{Bam} folder. \code{cell.type} column indicates the cell group names. #' @param cell.types Character string. Cell types to plot. Should be the same number of cell groups or less than the \code{cell.type} column of the \code{BamPheno} argument. #' @param min.coverage Numeric value. Coverage (Total reads) threshold below which the PSI value of the genomic coordinate is annotate as missing value, i.e. no coverage. #' @param cons.exon.cutoff Numeric value. Limit the number of bases to plot for the constitutive exons. This allow users to focus the plots on the alternative exon. #' @param method Character string. Statistical test to compare the PSI values across the different cell types. \code{"wilcox"}, \code{"t.test"}, and \code{"ks"} available for 2-group comparison. \code{"ANOVA"} and \code{"kw"} available for 3- or more group comparison. \code{"ks"} and \code{"kw"} represent Kolmogorov–Smirnov test and Kruskal-Wallis test, respectively. #' @param method.adj Character string. Adjust p-values for multiple testing. Options available as per \code{p.adjust} function. #' @param cell.types.colors Character string. Legend colors for each cell type. Should be of same length as \code{cell.types} argument. To use ggplot2 default color scheme, please specify \code{"ggplot.default"}. #' @param plot.title Character string. Main title for plot. Examples are gene ID, gene names, splicing ID etc.. #' @param plot.width Numeric value. Width of plot. #' @param plot.height Numeric value. Height of plot. #' @param plot.out Character string. Path to folder to output plot. #' @export #' @return A plot in PDF format located in the folder specified by \code{plot.out} argument. #' @author Sean Wen <sean.wenwx@gmail.com> #' @importFrom plyr join #' @import GenomicAlignments #' @import GenomicRanges #' @import IRanges #' @import Rsamtools #' @import ggplot2 #' @import pheatmap #' @import ggplotify #' @import ggpubr #' @import scales #' @importFrom reshape2 dcast #' @import grDevices #' @examples #' # Read sample metadata #' path_to_file <- system.file("extdata", "BAM_PhenoData_Small.txt", package="VALERIE") #' BamPheno <- read.table(path_to_file, sep="\t", header=TRUE, stringsAsFactors=FALSE) #' head(BamPheno) #' #' # Plot #' PlotPSI( #' tran_id="chr18:82554580:82554750:+@chr18:82561778:82561855:+@chr18:82572825:82572926", #' event.type="SE", #' strand="positive", #' Bam=system.file("extdata/BAM", package="VALERIE"), #' BamPheno=BamPheno, #' cell.types=c("Ctrl", "EAE"), #' min.coverage=10, #' cons.exon.cutoff=100, #' method="t.test", #' method.adj="bonferroni", #' cell.types.colors="ggplot.default", #' plot.title="Mbp", #' plot.width=5, #' plot.height=8, #' plot.out=paste(tempdir(), "Plot.pdf", sep="") #' ) PlotPSI <- function(tran_id, event.type, strand, Bam, BamPheno, cell.types, min.coverage, cons.exon.cutoff, method, method.adj, cell.types.colors, plot.title, plot.width, plot.height, plot.out) { if(event.type=="SE" & strand=="positive") { PlotPSI.SE.Pos(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="SE" & strand=="negative") { PlotPSI.SE.Neg(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="MXE" & strand=="positive") { PlotPSI.MXE.Pos(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="MXE" & strand=="negative") { PlotPSI.MXE.Neg(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="RI" & strand=="positive") { PlotPSI.RI.Pos(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="RI" & strand=="negative") { PlotPSI.RI.Neg(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="A5SS" & strand=="positive") { PlotPSI.A5SS.Pos(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="A5SS" & strand=="negative") { PlotPSI.A5SS.Neg(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="A3SS" & strand=="positive") { PlotPSI.A3SS.Pos(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="A3SS" & strand=="negative") { PlotPSI.A3SS.Neg(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } }
/R/Script_01_0_PlotPSI.R
no_license
HSCBWIMMlab/VALERIE
R
false
false
9,399
r
#' @title Plot percent spliced-in (PSI) values for exon-level alternative splicing events #' #' @description #' \code{PlotPSI} computes percent spliced-in (PSI) at each genomic coordinate for exon-level alternative splicing events. #' #' @details #' This function computes percent spliced-in (PSI) at each genomic coordinate for exon-level alternative splicing events, namely skipped-exon (SE), mutually-exclusive exons (MXE), retained-intron (RI), alternative 5' splice site (A5SS), and alternative 3' splice site (A3SS). Formula for computing PSI is number of reads with non-N CIGAR operation divided by the total number of reads. Total number of reads is the sum of reads with non-N CIGAR operation and reads with N-CIGAR operation #' #' @param tran_id Character string. Splicing event nomenclature. #' @param event.type Character string. Specify \code{"SE"}, \code{"MXE"}, \code{"RI"}, \code{"A5SS"} or \code{"A3SS"}. #' @param strand Character string. Specify \code{"positive"} or \code{"negative"} to indicate forward or negative strand, respectively. #' @param Bam Character string. Path to folder where the BAM files and their corresponding index files are located. #' @param BamPheno object of class data.frame. Mandatory columns are \code{bam.file.name} and \code{cell.type}. \code{bam.file.name} column indicates BAM file names as per that found in the \code{Bam} folder. \code{cell.type} column indicates the cell group names. #' @param cell.types Character string. Cell types to plot. Should be the same number of cell groups or less than the \code{cell.type} column of the \code{BamPheno} argument. #' @param min.coverage Numeric value. Coverage (Total reads) threshold below which the PSI value of the genomic coordinate is annotate as missing value, i.e. no coverage. #' @param cons.exon.cutoff Numeric value. Limit the number of bases to plot for the constitutive exons. This allow users to focus the plots on the alternative exon. #' @param method Character string. Statistical test to compare the PSI values across the different cell types. \code{"wilcox"}, \code{"t.test"}, and \code{"ks"} available for 2-group comparison. \code{"ANOVA"} and \code{"kw"} available for 3- or more group comparison. \code{"ks"} and \code{"kw"} represent Kolmogorov–Smirnov test and Kruskal-Wallis test, respectively. #' @param method.adj Character string. Adjust p-values for multiple testing. Options available as per \code{p.adjust} function. #' @param cell.types.colors Character string. Legend colors for each cell type. Should be of same length as \code{cell.types} argument. To use ggplot2 default color scheme, please specify \code{"ggplot.default"}. #' @param plot.title Character string. Main title for plot. Examples are gene ID, gene names, splicing ID etc.. #' @param plot.width Numeric value. Width of plot. #' @param plot.height Numeric value. Height of plot. #' @param plot.out Character string. Path to folder to output plot. #' @export #' @return A plot in PDF format located in the folder specified by \code{plot.out} argument. #' @author Sean Wen <sean.wenwx@gmail.com> #' @importFrom plyr join #' @import GenomicAlignments #' @import GenomicRanges #' @import IRanges #' @import Rsamtools #' @import ggplot2 #' @import pheatmap #' @import ggplotify #' @import ggpubr #' @import scales #' @importFrom reshape2 dcast #' @import grDevices #' @examples #' # Read sample metadata #' path_to_file <- system.file("extdata", "BAM_PhenoData_Small.txt", package="VALERIE") #' BamPheno <- read.table(path_to_file, sep="\t", header=TRUE, stringsAsFactors=FALSE) #' head(BamPheno) #' #' # Plot #' PlotPSI( #' tran_id="chr18:82554580:82554750:+@chr18:82561778:82561855:+@chr18:82572825:82572926", #' event.type="SE", #' strand="positive", #' Bam=system.file("extdata/BAM", package="VALERIE"), #' BamPheno=BamPheno, #' cell.types=c("Ctrl", "EAE"), #' min.coverage=10, #' cons.exon.cutoff=100, #' method="t.test", #' method.adj="bonferroni", #' cell.types.colors="ggplot.default", #' plot.title="Mbp", #' plot.width=5, #' plot.height=8, #' plot.out=paste(tempdir(), "Plot.pdf", sep="") #' ) PlotPSI <- function(tran_id, event.type, strand, Bam, BamPheno, cell.types, min.coverage, cons.exon.cutoff, method, method.adj, cell.types.colors, plot.title, plot.width, plot.height, plot.out) { if(event.type=="SE" & strand=="positive") { PlotPSI.SE.Pos(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="SE" & strand=="negative") { PlotPSI.SE.Neg(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="MXE" & strand=="positive") { PlotPSI.MXE.Pos(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="MXE" & strand=="negative") { PlotPSI.MXE.Neg(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="RI" & strand=="positive") { PlotPSI.RI.Pos(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="RI" & strand=="negative") { PlotPSI.RI.Neg(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="A5SS" & strand=="positive") { PlotPSI.A5SS.Pos(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="A5SS" & strand=="negative") { PlotPSI.A5SS.Neg(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="A3SS" & strand=="positive") { PlotPSI.A3SS.Pos(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } else if(event.type=="A3SS" & strand=="negative") { PlotPSI.A3SS.Neg(tran_id=tran_id, Bam=Bam, BamPheno=BamPheno, cell.types=cell.types, min.coverage=min.coverage, cons.exon.cutoff=cons.exon.cutoff, method=method, method.adj=method.adj, cell.types.colors=cell.types.colors, plot.title=plot.title, plot.width=plot.width, plot.height=plot.height, plot.out=plot.out) } }
library(data.table) library(dplyr) library(GenomicRanges) library(BuenColors) library(diffloop) library(Biostrings) library(BSgenome.Hsapiens.UCSC.hg19) "%ni%" <- Negate("%in%") # Import annotation x01 <- bedToGRanges("../annotations/UCSC_3primeUTR.bed") x02 <- bedToGRanges("../annotations/UCSC_Exons.bed") x03 <- bedToGRanges("../annotations/UCSC_5primeUTR.bed") x04 <- bedToGRanges("../annotations/UCSC_Introns.bed") # Take a sample and run with it getProportions <- function(file, sample){ gr_t <- addchr(makeGRangesFromDataFrame(read.table(file, header = TRUE))) seq_t <- getSeq(BSgenome.Hsapiens.UCSC.hg19, gr_t) # Do overlaps ov_1 <- findOverlaps(gr_t, x01) ov_2 <- findOverlaps(gr_t, x02) ov_3 <- findOverlaps(gr_t, x03) ov_4 <- findOverlaps(gr_t, x04) # Classify each variant class <- ifelse(1:length(gr_t) %in% queryHits(ov_1), "3UTR", ifelse(1:length(gr_t) %in% queryHits(ov_2), "Exon", ifelse(1:length(gr_t) %in% queryHits(ov_3), "5UTR", ifelse(1:length(gr_t) %in% queryHits(ov_4), "Intron", "other")))) data.frame(class) %>% group_by(class) %>% summarise (n = n()) %>% filter(class != "other") %>% mutate(freq = n / sum(n)) %>% reshape2::melt() %>% filter(variable == "freq") %>% mutate(sample = sample) } propdf <- rbind(getProportions("../output/LIN28B-IDR.bed", "Consensus"), getProportions("../output/LIN28B-Rep1.bed", "Rep1"), getProportions("../output/LIN28B-Rep2.bed", "Rep2")) easter_colors <- c("3UTR" = "#e0cdff", "Exon" = "#dcf9a8", "Intron" = "#c1f0fb") ggplot(propdf, aes(x = sample, y = value*100, fill = class)) + geom_bar(stat = "identity", color = "black") + scale_fill_manual(values = c( "gray30", "gray40", "lightgrey")) + labs(x = "", y = "% of peak annotations", fill = "") + pretty_plot(fontsize = 8) + L_border() -> propPlot cowplot::ggsave(propPlot, file = "../output/plots/proportion_plot.pdf", width = 2.2, height = 2)
/code/02_annotationProportions.R
no_license
sankaranlab/translation-regulation-bcl11a
R
false
false
2,012
r
library(data.table) library(dplyr) library(GenomicRanges) library(BuenColors) library(diffloop) library(Biostrings) library(BSgenome.Hsapiens.UCSC.hg19) "%ni%" <- Negate("%in%") # Import annotation x01 <- bedToGRanges("../annotations/UCSC_3primeUTR.bed") x02 <- bedToGRanges("../annotations/UCSC_Exons.bed") x03 <- bedToGRanges("../annotations/UCSC_5primeUTR.bed") x04 <- bedToGRanges("../annotations/UCSC_Introns.bed") # Take a sample and run with it getProportions <- function(file, sample){ gr_t <- addchr(makeGRangesFromDataFrame(read.table(file, header = TRUE))) seq_t <- getSeq(BSgenome.Hsapiens.UCSC.hg19, gr_t) # Do overlaps ov_1 <- findOverlaps(gr_t, x01) ov_2 <- findOverlaps(gr_t, x02) ov_3 <- findOverlaps(gr_t, x03) ov_4 <- findOverlaps(gr_t, x04) # Classify each variant class <- ifelse(1:length(gr_t) %in% queryHits(ov_1), "3UTR", ifelse(1:length(gr_t) %in% queryHits(ov_2), "Exon", ifelse(1:length(gr_t) %in% queryHits(ov_3), "5UTR", ifelse(1:length(gr_t) %in% queryHits(ov_4), "Intron", "other")))) data.frame(class) %>% group_by(class) %>% summarise (n = n()) %>% filter(class != "other") %>% mutate(freq = n / sum(n)) %>% reshape2::melt() %>% filter(variable == "freq") %>% mutate(sample = sample) } propdf <- rbind(getProportions("../output/LIN28B-IDR.bed", "Consensus"), getProportions("../output/LIN28B-Rep1.bed", "Rep1"), getProportions("../output/LIN28B-Rep2.bed", "Rep2")) easter_colors <- c("3UTR" = "#e0cdff", "Exon" = "#dcf9a8", "Intron" = "#c1f0fb") ggplot(propdf, aes(x = sample, y = value*100, fill = class)) + geom_bar(stat = "identity", color = "black") + scale_fill_manual(values = c( "gray30", "gray40", "lightgrey")) + labs(x = "", y = "% of peak annotations", fill = "") + pretty_plot(fontsize = 8) + L_border() -> propPlot cowplot::ggsave(propPlot, file = "../output/plots/proportion_plot.pdf", width = 2.2, height = 2)
context("pkg-options") # Setup ------------------------------------------------------------------------ source("setup.R") skip_on_cran_windows() # Test package options --------------------------------------------------------- test_that("workflowr does not overwrite user-defined package options", { sysgit <- callr::r_safe(function() { options(workflowr.sysgit = "/git") library(workflowr) getOption("workflowr.sysgit") }) expect_identical(sysgit, "/git") }) test_that("workflowr does not overwrite user-defined package options in .Rprofile", { if (!interactive()) skip("These tests don't work in R CMD check") # Thought this would fix it but it didn't: https://github.com/r-lib/callr/issues/20 # Interestingly, this worked fine on Windows path <- test_setup() on.exit(test_teardown(path)) cwd <- setwd(path) on.exit(setwd(cwd), add = TRUE) # Have to load workflowr after setting options to properly test this, thus # can't append to default .Rprofile that loads workflowr writeLines(c("options(workflowr.autosave = FALSE)", "options(workflowr.sysgit = \"/git\")", "options(workflowr.view = \"bananas\")", "library(workflowr)"), con = ".Rprofile") autosave <- callr::r_safe(function() getOption("workflowr.autosave"), user_profile = TRUE) expect_false(autosave) sysgit <- callr::r_safe(function() getOption("workflowr.sysgit"), user_profile = TRUE) expect_identical(sysgit, "/git") view <- callr::r_safe(function() getOption("workflowr.view"), user_profile = TRUE) expect_identical(view, "bananas") }) test_that("Invalid workflowr.autosave does not crash workflowr", { path <- test_setup() on.exit(test_teardown(path)) expect_silent( callr::r_safe(function(path) { options(workflowr.autosave = "not-a-logical") library(workflowr) wflow_status(project = path) }, args = list(path = path)) ) })
/tests/testthat/test-pkg-options.R
permissive
workflowr/workflowr
R
false
false
2,031
r
context("pkg-options") # Setup ------------------------------------------------------------------------ source("setup.R") skip_on_cran_windows() # Test package options --------------------------------------------------------- test_that("workflowr does not overwrite user-defined package options", { sysgit <- callr::r_safe(function() { options(workflowr.sysgit = "/git") library(workflowr) getOption("workflowr.sysgit") }) expect_identical(sysgit, "/git") }) test_that("workflowr does not overwrite user-defined package options in .Rprofile", { if (!interactive()) skip("These tests don't work in R CMD check") # Thought this would fix it but it didn't: https://github.com/r-lib/callr/issues/20 # Interestingly, this worked fine on Windows path <- test_setup() on.exit(test_teardown(path)) cwd <- setwd(path) on.exit(setwd(cwd), add = TRUE) # Have to load workflowr after setting options to properly test this, thus # can't append to default .Rprofile that loads workflowr writeLines(c("options(workflowr.autosave = FALSE)", "options(workflowr.sysgit = \"/git\")", "options(workflowr.view = \"bananas\")", "library(workflowr)"), con = ".Rprofile") autosave <- callr::r_safe(function() getOption("workflowr.autosave"), user_profile = TRUE) expect_false(autosave) sysgit <- callr::r_safe(function() getOption("workflowr.sysgit"), user_profile = TRUE) expect_identical(sysgit, "/git") view <- callr::r_safe(function() getOption("workflowr.view"), user_profile = TRUE) expect_identical(view, "bananas") }) test_that("Invalid workflowr.autosave does not crash workflowr", { path <- test_setup() on.exit(test_teardown(path)) expect_silent( callr::r_safe(function(path) { options(workflowr.autosave = "not-a-logical") library(workflowr) wflow_status(project = path) }, args = list(path = path)) ) })
Prova
/Prova.R
no_license
SimoneArrigoni/Time-Series
R
false
false
8
r
Prova
#Q1 library(rattle) library(AppliedPredictiveModeling) data(segmentationOriginal) suppressMessages(library(caret)) inTrain=createDataPartition(segmentationOriginal$Case,p=0.65,list=F) training=segmentationOriginal[inTrain,] testing=segmentationOriginal[-inTrain,] set.seed(125) modFit=train(Class ~ .,method="rpart",data=training) fancyRpartPlot(modFit$finalModel) #Q3 library(pgmm) data(olive) olive = olive[, -1] inTrain=createDataPartition(olive$Area,p=0.65,list=F) training=olive[inTrain,] testing=olive[-inTrain,] modFit=train(Area ~ .,method="rpart",data=training) newdata = as.data.frame(t(colMeans(olive))) predict(modFit,newdata) #Q4 library(ElemStatLearn) data(SAheart) set.seed(8484) train = sample(1:dim(SAheart)[1], size = dim(SAheart)[1] / 2, replace = F) trainSA = SAheart[train, ] testSA = SAheart[-train, ] set.seed(13234) modFit=train(chd ~ age+alcohol+obesity+tobacco+typea+ldl, method="glm",family="binomial",data=trainSA) missClass = function(values, prediction){ sum(((prediction > 0.5) * 1) != values) / length(values)} missClass(trainSA$chd,predict(modFit,newdata=trainSA)) missClass(testSA$chd,predict(modFit,newdata=testSA)) #Q5 library(ElemStatLearn) data(vowel.train) data(vowel.test) vowel.train$y=as.factor(vowel.train$y) vowel.test$y=as.factor(vowel.test$y) set.seed(33833) # modFit=train(y~ .,data=vowel.train,method="rf",prox=F) # varImp(modFit,scale=F) modFit=randomForest(y~.,data=vowel.train) order(varImp(modFit,scale=F),decreasing = T)
/MachineQuiz3.R
no_license
hawaiidaniel/Practical-Machine-Learning
R
false
false
1,550
r
#Q1 library(rattle) library(AppliedPredictiveModeling) data(segmentationOriginal) suppressMessages(library(caret)) inTrain=createDataPartition(segmentationOriginal$Case,p=0.65,list=F) training=segmentationOriginal[inTrain,] testing=segmentationOriginal[-inTrain,] set.seed(125) modFit=train(Class ~ .,method="rpart",data=training) fancyRpartPlot(modFit$finalModel) #Q3 library(pgmm) data(olive) olive = olive[, -1] inTrain=createDataPartition(olive$Area,p=0.65,list=F) training=olive[inTrain,] testing=olive[-inTrain,] modFit=train(Area ~ .,method="rpart",data=training) newdata = as.data.frame(t(colMeans(olive))) predict(modFit,newdata) #Q4 library(ElemStatLearn) data(SAheart) set.seed(8484) train = sample(1:dim(SAheart)[1], size = dim(SAheart)[1] / 2, replace = F) trainSA = SAheart[train, ] testSA = SAheart[-train, ] set.seed(13234) modFit=train(chd ~ age+alcohol+obesity+tobacco+typea+ldl, method="glm",family="binomial",data=trainSA) missClass = function(values, prediction){ sum(((prediction > 0.5) * 1) != values) / length(values)} missClass(trainSA$chd,predict(modFit,newdata=trainSA)) missClass(testSA$chd,predict(modFit,newdata=testSA)) #Q5 library(ElemStatLearn) data(vowel.train) data(vowel.test) vowel.train$y=as.factor(vowel.train$y) vowel.test$y=as.factor(vowel.test$y) set.seed(33833) # modFit=train(y~ .,data=vowel.train,method="rf",prox=F) # varImp(modFit,scale=F) modFit=randomForest(y~.,data=vowel.train) order(varImp(modFit,scale=F),decreasing = T)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rm_noninfo_vars.r \name{rm_near_zero_vars} \alias{rm_near_zero_vars} \alias{rm_high_correlated} \alias{rm_linear_combos} \title{Remove non informative variables} \usage{ rm_near_zero_vars(data, keep = NULL, freqCut = 95/5, uniqueCut = 10, verbose = FALSE) rm_high_correlated(data, keep = NULL, cutoff = 0.9, verbose = FALSE) rm_linear_combos(data, keep = NULL, verbose = FALSE) } \arguments{ \item{data}{dataframe} \item{keep}{vector of variables to kept} \item{freqCut}{first/second, default 95/5} \item{uniqueCut}{nfac/nrow, default 10(\%)} \item{verbose}{default FALSE} \item{cutoff}{default 0.9} } \description{ This is wrap of `nearZeroVar()`,`findCorrelation()` from caret } \details{ The output of `rm_near_zero_vars()` will be a dataframe obj }
/dapre/man/rm_noninfo_vars.Rd
no_license
peter159/RpackageDev
R
false
true
840
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rm_noninfo_vars.r \name{rm_near_zero_vars} \alias{rm_near_zero_vars} \alias{rm_high_correlated} \alias{rm_linear_combos} \title{Remove non informative variables} \usage{ rm_near_zero_vars(data, keep = NULL, freqCut = 95/5, uniqueCut = 10, verbose = FALSE) rm_high_correlated(data, keep = NULL, cutoff = 0.9, verbose = FALSE) rm_linear_combos(data, keep = NULL, verbose = FALSE) } \arguments{ \item{data}{dataframe} \item{keep}{vector of variables to kept} \item{freqCut}{first/second, default 95/5} \item{uniqueCut}{nfac/nrow, default 10(\%)} \item{verbose}{default FALSE} \item{cutoff}{default 0.9} } \description{ This is wrap of `nearZeroVar()`,`findCorrelation()` from caret } \details{ The output of `rm_near_zero_vars()` will be a dataframe obj }
rm(list=ls()) # Loading R packages library(rEDM) library(Synth) library(astsa) source("ccm_function.R") # prepare covariates for artificial states: permutation index artcov_prep <-function(){ country_name = advers$country new_country = paste0("f_",country_name) advers$country = new_country advers$unit.num = advers$unit.num + 31 ## permute index for consumption consumption_index = sample(31) consumption_mat_v = matrix(advers$consumption,nrow = 96,ncol = 31) consumption_mat = consumption_mat_v[, consumption_index] advers$consumption = c(consumption_mat) ## permute index for investment investment_index = sample(31) investment_mat_v = matrix(advers$investment,nrow = 96,ncol = 31) investment_mat = investment_mat_v[, investment_index] advers$investment = c(investment_mat) ## permute index for export export_index = sample(31) export_mat_v = matrix(advers$export,nrow = 96,ncol = 31) export_mat = export_mat_v[, export_index] advers$export = c(export_mat) ## permute index for short_interest short_interest_index = sample(31) short_interest_mat_v = matrix(advers$short_interest,nrow = 96,ncol = 31) short_interest_mat = short_interest_mat_v[, short_interest_index] advers$short_interest = c(short_interest_mat) ## permute index for exchange exchange_index = sample(31) exchange_mat_v = matrix(advers$exchange,nrow = 96,ncol = 31) exchange_mat = exchange_mat_v[, exchange_index] advers$exchange = c(exchange_mat) ## permute index for inflation inflation_index = sample(31) inflation_mat_v = matrix(advers$inflation,nrow = 96,ncol = 31) inflation_mat = inflation_mat_v[, inflation_index] advers$inflation = c(inflation_mat) return(advers) } # prepare cigsale data for artificial countries: using daily call data artdata_prep <- function(advers_mat, advers, sc, k){ ## add extra term to make daily call data scale fit gdp data year = c(1:96)+k advers_gdp = advers_mat[year,]*sc advers$gdp = c(advers_gdp) gdp_advers = rbind(gdp, advers) return(gdp_advers) } synth_ccm_M0<-function(data){ ## 2005Q1: 40 ; 2008Q1: 52 ; 2009Q3: 58 ; 2012Q1: 68 tr.in = 85 l=96 controls = c(1:29,31:62) # create matrices from panel data that provide inputs for synth() dataprep.out<- dataprep( foo = gdp_advers, predictors = c("consumption","investment","export","short_interest","exchange","inflation"), predictors.op = "mean", dependent = "gdp", unit.variable = "unit.num", time.variable = "quarter_index", special.predictors = list( list("gdp", 1, "mean"), list("gdp", 20, "mean"), list("gdp", 40, "mean"), list("gdp", 60, "mean"), list("gdp", 65, "mean"), list("gdp", 70, "mean"), list("gdp", 80, "mean") ), treatment.identifier = 30, controls.identifier = controls, time.predictors.prior = c(1:tr.in), time.optimize.ssr = c(1:tr.in), unit.names.variable = c("country"), time.plot = c(1:96) ) synth.out <- synth(dataprep.out) ## there are two ways to summarize the results ## we can either access the output from synth.out directly round(synth.out$solution.w,2) # contains the unit weights or synth.out$solution.v gaps<- dataprep.out$Y1plot-(dataprep.out$Y0plot%*%synth.out$solution.w) synth.tables <- synth.tab(dataprep.res = dataprep.out,synth.res = synth.out) print(synth.tables) # ccm plots for control with non-zero weights a=synth.tables$tab.w nonZeroIndex = a$unit.numbers[which(a$w.weights>0.002 & a$unit.numbers>31)] nonZeroWeightAll = a$w.weights[which(a$w.weights>0.002)] nonZeroWeightAdv = a$w.weights[which(a$w.weights>0.002 & a$unit.numbers>31)] nonZeroState = a$unit.names[which(a$w.weights>0.002 & a$unit.numbers>31)] # N_yc: # artificial states selected N_yc = length(nonZeroIndex) # ATE = synthCA - realCA ATE = -sum(gaps[(tr.in+1):l])/(l+1-tr.in) # AWALL: average weights for all states AWALL = sum(nonZeroWeightAll)/length(nonZeroWeightAll) # AWADV: average weights for all adversarial states AWADV = sum(nonZeroWeightAdv)/length(nonZeroWeightAdv) return(c(N_yc, ATE, AWALL, AWADV)) } synth_ccm_M1<-function(data){ ## 2005Q1: 40 ; 2008Q1: 52 ; 2009Q3: 58 ; 2012Q1: 68 tr.in = 85 num_units = 62 ## Data normalization for all data instead of preintervention gdp_mat = matrix(gdp_advers$gdp,nrow = 96,ncol = num_units) gdp_mat_n = scale(gdp_mat) cntl_index = c(1:29,31:num_units) Y0 = gdp_mat_n Y1 = gdp_mat_n[,30] l=length(Y1) index_pre=c(1:tr.in) index_post=c((tr.in+1):96) # preintervention Y00 : 20 X 38 matrix (20 years of smoking data for 38 control states) Y00 = Y0[index_pre,] # postintervention Y01 : 11 X 38 matrix (20 years of smoking data for 38 control states) Y01 = Y0[index_post,] # preintervention Y10 : 20 X 1 matrix (11 years of smoking data for 1 treated state) Y10 = Y1[index_pre] # postintervention Y11 : 11 X 1 matrix (11 years of smoking data for 1 treated state) Y11 = Y1[index_post] ## Data normalization for preintervention data Y10_n = Y10 #((Y10-mean(Y10))/sd(Y10)) Y00_n = Y00 ## Simplex projection for control and treatment to select optimal E index_cut = 50 data_name = "gdp_advers_M1" ## Select the most informative control units for MTV. The ranking is robust. rank_index = ccm_all(data_name, Y10_n, Y00_n, index_cut, 3) print(rank_index) ccm_index = setdiff(rank_index, c(30)) ## fix. # create matrices from panel data that provide inputs for synth() dataprep.out<- dataprep( foo = gdp_advers, predictors = c("consumption","investment","export","short_interest","exchange","inflation"), predictors.op = "mean", dependent = "gdp", unit.variable = "unit.num", time.variable = "quarter_index", special.predictors = list( list("gdp", 1, "mean"), list("gdp", 20, "mean"), list("gdp", 40, "mean"), list("gdp", 60, "mean"), list("gdp", 65, "mean"), list("gdp", 70, "mean"), list("gdp", 80, "mean") ), treatment.identifier = 30, controls.identifier = ccm_index, time.predictors.prior = c(1:tr.in), time.optimize.ssr = c(1:tr.in), unit.names.variable = c("country"), time.plot = c(1:96) ) synth.out <- synth(dataprep.out) ## there are two ways to summarize the results ## we can either access the output from synth.out directly round(synth.out$solution.w,2) # contains the unit weights or synth.out$solution.v gaps<- dataprep.out$Y1plot-(dataprep.out$Y0plot%*%synth.out$solution.w) synth.tables <- synth.tab(dataprep.res = dataprep.out,synth.res = synth.out) print(synth.tables) # ccm plots for control with non-zero weights a=synth.tables$tab.w nonZeroIndex = a$unit.numbers[which(a$w.weights>0.002 & a$unit.numbers>31)] nonZeroWeightAll = a$w.weights[which(a$w.weights>0.002)] nonZeroWeightAdv = a$w.weights[which(a$w.weights>0.002 & a$unit.numbers>31)] nonZeroState = a$unit.names[which(a$w.weights>0.002 & a$unit.numbers>31)] # N_yc: # artificial states selected N_yc = length(nonZeroIndex) # ATE = synthCA - realCA ATE = -sum(gaps[(tr.in+1):l])/(l+1-tr.in) # AWALL: average weights for all states AWALL = sum(nonZeroWeightAll)/length(nonZeroWeightAll) # AWADV: average weights for all adversarial states AWADV = sum(nonZeroWeightAdv)/length(nonZeroWeightAdv) return(c(N_yc, ATE, AWALL, AWADV)) } synth_ccm_M2<-function(data){ ## 2005Q1: 40 ; 2008Q1: 52 ; 2009Q3: 58 ; 2012Q1: 68 tr.in = 85 num_units = 62 controls = c(1:29,31:62) ## Data normalization for all data instead of preintervention gdp_mat = matrix(gdp_advers$gdp,nrow = 96,ncol = num_units) gdp_mat_n = scale(gdp_mat) cntl_index = c(1:29,31:num_units) Y0 = gdp_mat_n Y1 = gdp_mat_n[,30] l=length(Y1) index_pre=c(1:tr.in) index_post=c((tr.in+1):96) # preintervention Y00 : 20 X 38 matrix (20 years of smoking data for 38 control states) Y00 = Y0[index_pre,] # postintervention Y01 : 11 X 38 matrix (20 years of smoking data for 38 control states) Y01 = Y0[index_post,] # preintervention Y10 : 20 X 1 matrix (11 years of smoking data for 1 treated state) Y10 = Y1[index_pre] # postintervention Y11 : 11 X 1 matrix (11 years of smoking data for 1 treated state) Y11 = Y1[index_post] ## Data normalization for preintervention data Y10_n = Y10 #((Y10-mean(Y10))/sd(Y10)) Y00_n = Y00 ## Simplex projection for control and treatment to select optimal E index_cut = 50 data_name = "gdp_advers_M1" ## Select the most informative control units for MTV. The ranking is robust. rank_index = ccm_all(data_name, Y10_n, Y00_n, index_cut, 3) print(rank_index) ## Step 1: get the selected index by CCM ccm_index = setdiff(rank_index, c(30)) ## fix. # create matrices from panel data that provide inputs for synth() dataprep.out<- dataprep( foo = gdp_advers, predictors = c("consumption","investment","export","short_interest","exchange","inflation"), predictors.op = "mean", dependent = "gdp", unit.variable = "unit.num", time.variable = "quarter_index", special.predictors = list( list("gdp", 1, "mean"), list("gdp", 20, "mean"), list("gdp", 40, "mean"), list("gdp", 60, "mean"), list("gdp", 65, "mean"), list("gdp", 70, "mean"), list("gdp", 80, "mean") ), treatment.identifier = 30, controls.identifier = controls, time.predictors.prior = c(1:tr.in), time.optimize.ssr = c(1:tr.in), unit.names.variable = c("country"), time.plot = c(1:96) ) synth.out <- synth(dataprep.out) ## there are two ways to summarize the results ## we can either access the output from synth.out directly round(synth.out$solution.w,2) # contains the unit weights or synth.out$solution.v gaps<- dataprep.out$Y1plot-(dataprep.out$Y0plot%*%synth.out$solution.w) synth.tables <- synth.tab(dataprep.res = dataprep.out,synth.res = synth.out) print(synth.tables) # ccm plots for control with non-zero weights a=synth.tables$tab.w ## Step 2: get the selected index by SCM scm_index = a$unit.numbers[which(a$w.weights>0.000)] print(paste("scm_index",scm_index)) print(paste("ccm_index",ccm_index)) ## Step 3: get the new index by intersecting ccm_index and scm_index inter_index = intersect(ccm_index,scm_index) print(paste("inter_index",inter_index)) if(length(inter_index)<2){ return(c(0,0,0,0)) }else{ ## Step 4: run SCM again by using inter_index as control units dataprep.out<- dataprep( foo = gdp_advers, predictors = c("consumption","investment","export","short_interest","exchange","inflation"), predictors.op = "mean", dependent = "gdp", unit.variable = "unit.num", time.variable = "quarter_index", special.predictors = list( list("gdp", 1, "mean"), list("gdp", 20, "mean"), list("gdp", 40, "mean"), list("gdp", 60, "mean"), list("gdp", 65, "mean"), list("gdp", 70, "mean"), list("gdp", 80, "mean") ), treatment.identifier = 30, controls.identifier = inter_index, time.predictors.prior = c(1:tr.in), time.optimize.ssr = c(1:tr.in), unit.names.variable = c("country"), time.plot = c(1:96) ) synth.out <- synth(dataprep.out) synth.out <- synth(dataprep.out) ## there are two ways to summarize the results ## we can either access the output from synth.out directly round(synth.out$solution.w,2) # contains the unit weights or synth.out$solution.v gaps<- dataprep.out$Y1plot-(dataprep.out$Y0plot%*%synth.out$solution.w) synth.tables <- synth.tab(dataprep.res = dataprep.out,synth.res = synth.out) print(synth.tables) ## Do CCM plot a=synth.tables$tab.w nonZeroIndex = a$unit.numbers[which(a$w.weights>0.002 & a$unit.numbers>31)] nonZeroWeightAll = a$w.weights[which(a$w.weights>0.002)] nonZeroWeightAdv = a$w.weights[which(a$w.weights>0.002 & a$unit.numbers>31)] nonZeroState = a$unit.names[which(a$w.weights>0.002 & a$unit.numbers>31)] # N_yc: # artificial states selected N_yc = length(nonZeroIndex) # ATE = synthCA - realCA ATE = -sum(gaps[(tr.in+1):l])/(l+1-tr.in) # AWALL: average weights for all states AWALL = sum(nonZeroWeightAll)/length(nonZeroWeightAll) # AWADV: average weights for all adversarial states AWADV = sum(nonZeroWeightAdv)/length(nonZeroWeightAdv) return(c(N_yc, ATE, AWALL, AWADV)) } } load("data/brexit.RData") advers = gdp advers=artcov_prep() # dat = read.csv("data/daily-calls.csv",header = F) # dat = dat$V1 # # dat = dat[25:131]/dat[1] # dat = dat[25:130] # dat = dat/dat[1] # log_dat = log(dat) * 50 # log_dat[55:73] = log_dat[55:73] + 4 # # plot(log_dat, type = "l", col="red") # l = length(log_dat) # x = log_dat # tsx = ts(x) # ## Fit time series with AR(p) model # x_fit <- sarima(x, p = 2, d = 1, q = 1) # x_fitted = x - x_fit$fit$residuals # # plot(tsx,col="green") # # lines(c(1:l),x_fitted, col="red") # n_art = 106 # advers_mat = matrix(0, nrow = n_art, ncol = 31) # for(i in c(1:31)){ # advers_mat[,i] = x_fitted[1:n_art] + 2*rnorm(n_art) # } # len=106 # pdf(file="fig/scdata_art.pdf", width = 15, height = 5) # par(mfrow=c(4,5),mar=c(4,4,1,1), mgp = c(2.5, 1, 0)) # for(i in c(1:31)){ # plot(1:len, advers_mat[,i], main = paste0(i),type = "l", col = "blue",lwd=2, xlab = "Time Index", ylab = "Value") # } # dev.off() # save(advers,advers_mat, file = "data/call_base.RData") load("data/call_base.RData") y_l = c(0:10) sc_l = c(1) for(sc in sc_l){ M0_res = list() for(y in y_l){ gdp_advers = artdata_prep(advers_mat, advers, sc, y) result = synth_ccm_M0(gdp_advers) M0_res = c(M0_res, result) } M0_res_mat = matrix(unlist(M0_res), ncol = length(y_l), byrow = F) save(M0_res_mat,file = paste0("res/M0_",sc,".RData")) } for(sc in sc_l){ M1_res = list() for(y in y_l){ gdp_advers = artdata_prep(advers_mat, advers, sc, y) result = synth_ccm_M1(gdp_advers) M1_res = c(M1_res, result) } M1_res_mat = matrix(unlist(M1_res), ncol = length(y_l), byrow = F) save(M1_res_mat,file = paste0("res/M1_",sc,".RData")) } for(sc in sc_l){ M2_res = list() for(y in y_l){ gdp_advers = artdata_prep(advers_mat, advers, sc, y) result = synth_ccm_M2(gdp_advers) if(sum(result[1:2])!=0){ M2_res = c(M2_res, result) } } M2_res_mat = matrix(unlist(M2_res), ncol = length(y_l), byrow = F) save(M2_res_mat,file = paste0("res/M2_",sc,".RData")) }
/jasa_figure10.R
permissive
y-ding/DYNSCM
R
false
false
14,831
r
rm(list=ls()) # Loading R packages library(rEDM) library(Synth) library(astsa) source("ccm_function.R") # prepare covariates for artificial states: permutation index artcov_prep <-function(){ country_name = advers$country new_country = paste0("f_",country_name) advers$country = new_country advers$unit.num = advers$unit.num + 31 ## permute index for consumption consumption_index = sample(31) consumption_mat_v = matrix(advers$consumption,nrow = 96,ncol = 31) consumption_mat = consumption_mat_v[, consumption_index] advers$consumption = c(consumption_mat) ## permute index for investment investment_index = sample(31) investment_mat_v = matrix(advers$investment,nrow = 96,ncol = 31) investment_mat = investment_mat_v[, investment_index] advers$investment = c(investment_mat) ## permute index for export export_index = sample(31) export_mat_v = matrix(advers$export,nrow = 96,ncol = 31) export_mat = export_mat_v[, export_index] advers$export = c(export_mat) ## permute index for short_interest short_interest_index = sample(31) short_interest_mat_v = matrix(advers$short_interest,nrow = 96,ncol = 31) short_interest_mat = short_interest_mat_v[, short_interest_index] advers$short_interest = c(short_interest_mat) ## permute index for exchange exchange_index = sample(31) exchange_mat_v = matrix(advers$exchange,nrow = 96,ncol = 31) exchange_mat = exchange_mat_v[, exchange_index] advers$exchange = c(exchange_mat) ## permute index for inflation inflation_index = sample(31) inflation_mat_v = matrix(advers$inflation,nrow = 96,ncol = 31) inflation_mat = inflation_mat_v[, inflation_index] advers$inflation = c(inflation_mat) return(advers) } # prepare cigsale data for artificial countries: using daily call data artdata_prep <- function(advers_mat, advers, sc, k){ ## add extra term to make daily call data scale fit gdp data year = c(1:96)+k advers_gdp = advers_mat[year,]*sc advers$gdp = c(advers_gdp) gdp_advers = rbind(gdp, advers) return(gdp_advers) } synth_ccm_M0<-function(data){ ## 2005Q1: 40 ; 2008Q1: 52 ; 2009Q3: 58 ; 2012Q1: 68 tr.in = 85 l=96 controls = c(1:29,31:62) # create matrices from panel data that provide inputs for synth() dataprep.out<- dataprep( foo = gdp_advers, predictors = c("consumption","investment","export","short_interest","exchange","inflation"), predictors.op = "mean", dependent = "gdp", unit.variable = "unit.num", time.variable = "quarter_index", special.predictors = list( list("gdp", 1, "mean"), list("gdp", 20, "mean"), list("gdp", 40, "mean"), list("gdp", 60, "mean"), list("gdp", 65, "mean"), list("gdp", 70, "mean"), list("gdp", 80, "mean") ), treatment.identifier = 30, controls.identifier = controls, time.predictors.prior = c(1:tr.in), time.optimize.ssr = c(1:tr.in), unit.names.variable = c("country"), time.plot = c(1:96) ) synth.out <- synth(dataprep.out) ## there are two ways to summarize the results ## we can either access the output from synth.out directly round(synth.out$solution.w,2) # contains the unit weights or synth.out$solution.v gaps<- dataprep.out$Y1plot-(dataprep.out$Y0plot%*%synth.out$solution.w) synth.tables <- synth.tab(dataprep.res = dataprep.out,synth.res = synth.out) print(synth.tables) # ccm plots for control with non-zero weights a=synth.tables$tab.w nonZeroIndex = a$unit.numbers[which(a$w.weights>0.002 & a$unit.numbers>31)] nonZeroWeightAll = a$w.weights[which(a$w.weights>0.002)] nonZeroWeightAdv = a$w.weights[which(a$w.weights>0.002 & a$unit.numbers>31)] nonZeroState = a$unit.names[which(a$w.weights>0.002 & a$unit.numbers>31)] # N_yc: # artificial states selected N_yc = length(nonZeroIndex) # ATE = synthCA - realCA ATE = -sum(gaps[(tr.in+1):l])/(l+1-tr.in) # AWALL: average weights for all states AWALL = sum(nonZeroWeightAll)/length(nonZeroWeightAll) # AWADV: average weights for all adversarial states AWADV = sum(nonZeroWeightAdv)/length(nonZeroWeightAdv) return(c(N_yc, ATE, AWALL, AWADV)) } synth_ccm_M1<-function(data){ ## 2005Q1: 40 ; 2008Q1: 52 ; 2009Q3: 58 ; 2012Q1: 68 tr.in = 85 num_units = 62 ## Data normalization for all data instead of preintervention gdp_mat = matrix(gdp_advers$gdp,nrow = 96,ncol = num_units) gdp_mat_n = scale(gdp_mat) cntl_index = c(1:29,31:num_units) Y0 = gdp_mat_n Y1 = gdp_mat_n[,30] l=length(Y1) index_pre=c(1:tr.in) index_post=c((tr.in+1):96) # preintervention Y00 : 20 X 38 matrix (20 years of smoking data for 38 control states) Y00 = Y0[index_pre,] # postintervention Y01 : 11 X 38 matrix (20 years of smoking data for 38 control states) Y01 = Y0[index_post,] # preintervention Y10 : 20 X 1 matrix (11 years of smoking data for 1 treated state) Y10 = Y1[index_pre] # postintervention Y11 : 11 X 1 matrix (11 years of smoking data for 1 treated state) Y11 = Y1[index_post] ## Data normalization for preintervention data Y10_n = Y10 #((Y10-mean(Y10))/sd(Y10)) Y00_n = Y00 ## Simplex projection for control and treatment to select optimal E index_cut = 50 data_name = "gdp_advers_M1" ## Select the most informative control units for MTV. The ranking is robust. rank_index = ccm_all(data_name, Y10_n, Y00_n, index_cut, 3) print(rank_index) ccm_index = setdiff(rank_index, c(30)) ## fix. # create matrices from panel data that provide inputs for synth() dataprep.out<- dataprep( foo = gdp_advers, predictors = c("consumption","investment","export","short_interest","exchange","inflation"), predictors.op = "mean", dependent = "gdp", unit.variable = "unit.num", time.variable = "quarter_index", special.predictors = list( list("gdp", 1, "mean"), list("gdp", 20, "mean"), list("gdp", 40, "mean"), list("gdp", 60, "mean"), list("gdp", 65, "mean"), list("gdp", 70, "mean"), list("gdp", 80, "mean") ), treatment.identifier = 30, controls.identifier = ccm_index, time.predictors.prior = c(1:tr.in), time.optimize.ssr = c(1:tr.in), unit.names.variable = c("country"), time.plot = c(1:96) ) synth.out <- synth(dataprep.out) ## there are two ways to summarize the results ## we can either access the output from synth.out directly round(synth.out$solution.w,2) # contains the unit weights or synth.out$solution.v gaps<- dataprep.out$Y1plot-(dataprep.out$Y0plot%*%synth.out$solution.w) synth.tables <- synth.tab(dataprep.res = dataprep.out,synth.res = synth.out) print(synth.tables) # ccm plots for control with non-zero weights a=synth.tables$tab.w nonZeroIndex = a$unit.numbers[which(a$w.weights>0.002 & a$unit.numbers>31)] nonZeroWeightAll = a$w.weights[which(a$w.weights>0.002)] nonZeroWeightAdv = a$w.weights[which(a$w.weights>0.002 & a$unit.numbers>31)] nonZeroState = a$unit.names[which(a$w.weights>0.002 & a$unit.numbers>31)] # N_yc: # artificial states selected N_yc = length(nonZeroIndex) # ATE = synthCA - realCA ATE = -sum(gaps[(tr.in+1):l])/(l+1-tr.in) # AWALL: average weights for all states AWALL = sum(nonZeroWeightAll)/length(nonZeroWeightAll) # AWADV: average weights for all adversarial states AWADV = sum(nonZeroWeightAdv)/length(nonZeroWeightAdv) return(c(N_yc, ATE, AWALL, AWADV)) } synth_ccm_M2<-function(data){ ## 2005Q1: 40 ; 2008Q1: 52 ; 2009Q3: 58 ; 2012Q1: 68 tr.in = 85 num_units = 62 controls = c(1:29,31:62) ## Data normalization for all data instead of preintervention gdp_mat = matrix(gdp_advers$gdp,nrow = 96,ncol = num_units) gdp_mat_n = scale(gdp_mat) cntl_index = c(1:29,31:num_units) Y0 = gdp_mat_n Y1 = gdp_mat_n[,30] l=length(Y1) index_pre=c(1:tr.in) index_post=c((tr.in+1):96) # preintervention Y00 : 20 X 38 matrix (20 years of smoking data for 38 control states) Y00 = Y0[index_pre,] # postintervention Y01 : 11 X 38 matrix (20 years of smoking data for 38 control states) Y01 = Y0[index_post,] # preintervention Y10 : 20 X 1 matrix (11 years of smoking data for 1 treated state) Y10 = Y1[index_pre] # postintervention Y11 : 11 X 1 matrix (11 years of smoking data for 1 treated state) Y11 = Y1[index_post] ## Data normalization for preintervention data Y10_n = Y10 #((Y10-mean(Y10))/sd(Y10)) Y00_n = Y00 ## Simplex projection for control and treatment to select optimal E index_cut = 50 data_name = "gdp_advers_M1" ## Select the most informative control units for MTV. The ranking is robust. rank_index = ccm_all(data_name, Y10_n, Y00_n, index_cut, 3) print(rank_index) ## Step 1: get the selected index by CCM ccm_index = setdiff(rank_index, c(30)) ## fix. # create matrices from panel data that provide inputs for synth() dataprep.out<- dataprep( foo = gdp_advers, predictors = c("consumption","investment","export","short_interest","exchange","inflation"), predictors.op = "mean", dependent = "gdp", unit.variable = "unit.num", time.variable = "quarter_index", special.predictors = list( list("gdp", 1, "mean"), list("gdp", 20, "mean"), list("gdp", 40, "mean"), list("gdp", 60, "mean"), list("gdp", 65, "mean"), list("gdp", 70, "mean"), list("gdp", 80, "mean") ), treatment.identifier = 30, controls.identifier = controls, time.predictors.prior = c(1:tr.in), time.optimize.ssr = c(1:tr.in), unit.names.variable = c("country"), time.plot = c(1:96) ) synth.out <- synth(dataprep.out) ## there are two ways to summarize the results ## we can either access the output from synth.out directly round(synth.out$solution.w,2) # contains the unit weights or synth.out$solution.v gaps<- dataprep.out$Y1plot-(dataprep.out$Y0plot%*%synth.out$solution.w) synth.tables <- synth.tab(dataprep.res = dataprep.out,synth.res = synth.out) print(synth.tables) # ccm plots for control with non-zero weights a=synth.tables$tab.w ## Step 2: get the selected index by SCM scm_index = a$unit.numbers[which(a$w.weights>0.000)] print(paste("scm_index",scm_index)) print(paste("ccm_index",ccm_index)) ## Step 3: get the new index by intersecting ccm_index and scm_index inter_index = intersect(ccm_index,scm_index) print(paste("inter_index",inter_index)) if(length(inter_index)<2){ return(c(0,0,0,0)) }else{ ## Step 4: run SCM again by using inter_index as control units dataprep.out<- dataprep( foo = gdp_advers, predictors = c("consumption","investment","export","short_interest","exchange","inflation"), predictors.op = "mean", dependent = "gdp", unit.variable = "unit.num", time.variable = "quarter_index", special.predictors = list( list("gdp", 1, "mean"), list("gdp", 20, "mean"), list("gdp", 40, "mean"), list("gdp", 60, "mean"), list("gdp", 65, "mean"), list("gdp", 70, "mean"), list("gdp", 80, "mean") ), treatment.identifier = 30, controls.identifier = inter_index, time.predictors.prior = c(1:tr.in), time.optimize.ssr = c(1:tr.in), unit.names.variable = c("country"), time.plot = c(1:96) ) synth.out <- synth(dataprep.out) synth.out <- synth(dataprep.out) ## there are two ways to summarize the results ## we can either access the output from synth.out directly round(synth.out$solution.w,2) # contains the unit weights or synth.out$solution.v gaps<- dataprep.out$Y1plot-(dataprep.out$Y0plot%*%synth.out$solution.w) synth.tables <- synth.tab(dataprep.res = dataprep.out,synth.res = synth.out) print(synth.tables) ## Do CCM plot a=synth.tables$tab.w nonZeroIndex = a$unit.numbers[which(a$w.weights>0.002 & a$unit.numbers>31)] nonZeroWeightAll = a$w.weights[which(a$w.weights>0.002)] nonZeroWeightAdv = a$w.weights[which(a$w.weights>0.002 & a$unit.numbers>31)] nonZeroState = a$unit.names[which(a$w.weights>0.002 & a$unit.numbers>31)] # N_yc: # artificial states selected N_yc = length(nonZeroIndex) # ATE = synthCA - realCA ATE = -sum(gaps[(tr.in+1):l])/(l+1-tr.in) # AWALL: average weights for all states AWALL = sum(nonZeroWeightAll)/length(nonZeroWeightAll) # AWADV: average weights for all adversarial states AWADV = sum(nonZeroWeightAdv)/length(nonZeroWeightAdv) return(c(N_yc, ATE, AWALL, AWADV)) } } load("data/brexit.RData") advers = gdp advers=artcov_prep() # dat = read.csv("data/daily-calls.csv",header = F) # dat = dat$V1 # # dat = dat[25:131]/dat[1] # dat = dat[25:130] # dat = dat/dat[1] # log_dat = log(dat) * 50 # log_dat[55:73] = log_dat[55:73] + 4 # # plot(log_dat, type = "l", col="red") # l = length(log_dat) # x = log_dat # tsx = ts(x) # ## Fit time series with AR(p) model # x_fit <- sarima(x, p = 2, d = 1, q = 1) # x_fitted = x - x_fit$fit$residuals # # plot(tsx,col="green") # # lines(c(1:l),x_fitted, col="red") # n_art = 106 # advers_mat = matrix(0, nrow = n_art, ncol = 31) # for(i in c(1:31)){ # advers_mat[,i] = x_fitted[1:n_art] + 2*rnorm(n_art) # } # len=106 # pdf(file="fig/scdata_art.pdf", width = 15, height = 5) # par(mfrow=c(4,5),mar=c(4,4,1,1), mgp = c(2.5, 1, 0)) # for(i in c(1:31)){ # plot(1:len, advers_mat[,i], main = paste0(i),type = "l", col = "blue",lwd=2, xlab = "Time Index", ylab = "Value") # } # dev.off() # save(advers,advers_mat, file = "data/call_base.RData") load("data/call_base.RData") y_l = c(0:10) sc_l = c(1) for(sc in sc_l){ M0_res = list() for(y in y_l){ gdp_advers = artdata_prep(advers_mat, advers, sc, y) result = synth_ccm_M0(gdp_advers) M0_res = c(M0_res, result) } M0_res_mat = matrix(unlist(M0_res), ncol = length(y_l), byrow = F) save(M0_res_mat,file = paste0("res/M0_",sc,".RData")) } for(sc in sc_l){ M1_res = list() for(y in y_l){ gdp_advers = artdata_prep(advers_mat, advers, sc, y) result = synth_ccm_M1(gdp_advers) M1_res = c(M1_res, result) } M1_res_mat = matrix(unlist(M1_res), ncol = length(y_l), byrow = F) save(M1_res_mat,file = paste0("res/M1_",sc,".RData")) } for(sc in sc_l){ M2_res = list() for(y in y_l){ gdp_advers = artdata_prep(advers_mat, advers, sc, y) result = synth_ccm_M2(gdp_advers) if(sum(result[1:2])!=0){ M2_res = c(M2_res, result) } } M2_res_mat = matrix(unlist(M2_res), ncol = length(y_l), byrow = F) save(M2_res_mat,file = paste0("res/M2_",sc,".RData")) }
######################################################################################################################################################################################################### ##Funktion f?r die Berechnung der ?berlebenswahrscheinlichkeit f?r einen bestimmten Zeitpunkt ########################################################################################################################################################################################################## #' Function to calculate the probability of survival for a point in time #' #' @param S xx #' @param totaltimes xx #' @return one vector with the first non-NA value #' #' @author Stefan Bartels, \email{email@biobits.eu} #' #' @examples #' \dontrun{ #' x<-kmsurv(S,totaltimes) #' } #' #'@export kmsurv <- function(S, totaltimes) { f <- survfit.km(factor(rep(1,nrow(S))), S) tt <- c(0, f$totaltimes) ss <- c(1, f$surv) # add first point to survival curve approx(tt, ss, xout=totaltimes, method='constant', f=0)$y } ########################################################################################################################################################################################################## ##Funktion fuer das extrahieren des p-Wertes aus der surfdiff funktion ########################################################################################################################################################################################################## #' Function to exctract p-Value from surfdiff object #' #' accepts a list of vectors of identical length and returns one vector with the first non-NA value #' #' @param daten Data.Frame with survival datacontaining min. two columns with a) survival time b) survival status #' @param group columnsname to be groubed by #' @param time name of column containing the intervall data. default="time" #' @param status name of colmn containig status data. default="status" #' #' @return a numeric vector representing the p-value #' #' @import survival #' #' @author Stefan Bartels, \email{email@biobits.eu} #' #' @examples #' \dontrun{ #' pvalue<-p.value.survdiff(daten=df,group="Treatment",time="duration",status="survivalstatus") #' } #' #'@export p.value.survdiff<-function(daten,group=NULL, time="time",status="status") { sdf<-survdiff(Surv(daten[[time]],daten[[status]])~daten[,group], data =daten) p.val <- 1 - pchisq(sdf$chisq, length(sdf$n) - 1) return(p.val) } ########################################################################################################################################################################################################## ##Funktion f?r eine zum erstellen eines einheitlichen BoxPlots ########################################################################################################################################################################################################## #' R Function to streamline the generation of (grouped) boxplots #' #' @title bbBoxPlot - streamlined BoxPlot #' #' @param daten the vector of numeric data #' @param gruppe if given the factors to be grouped by #' @param filename if given output will be delivered to file #' @param ylab label of y axis #' @param titel title of plot #' #' @return a base boxplot #' #' @import bbhelper #' #' @author Stefan Bartels, \email{email@biobits.eu} #' #' @examples #' bp<-bbBoxPlot(iris$Petal.Width,gruppe=iris$Species,ylab="Petal Width") #' #'@export bbBoxPlot<- function(daten,gruppe=NULL,filename=NULL,ylab,titel=NULL) { mypalette<-bbhelper::getBBColors(length(levels(gruppe))) if (is.null(filename)==FALSE){png(file=paste(c(filename,".png"),collapse = ""),bg="white",res=300,width=1600,height=1600) } boxplot(daten ~ gruppe,ylab=ylab,col=mypalette, horizontal=FALSE,cex.axis=0.7, las=3,show.names=FALSE) legend("topright",levels(gruppe),fill=mypalette,cex=0.8) if (is.null(titel)==FALSE) { title(main = list(titel,cex= 0.8, col="#60759B")) } text(0.2,0,paste(" created by biobits\n",Sys.Date()),col="grey",pos=4,cex=0.6) if (is.null(filename)==FALSE){dev.off()} } ########################################################################################################################################################################################################## ##Funktion f?r das erzeugen eines angepassten Farbthemas ########################################################################################################################################################################################################## #' R fuction to set a unified layout for base plots #' #' accepts a list of vectors of identical length and returns one vector with the first non-NA value #' #' #' @author Stefan Bartels, \email{email@biobits.eu} #' #' @examples #' \dontrun{ #' bbTheme() #' } #' #'@export bbTheme <- function() { par <- col.whitebg() par$strip.background$col <- rep("#60759B", 7) par$add.text$col <- "#eeeeaa" par$add.text$font <- 2 par$background$col <- "#ffffff" par$superpose.line$lty <- rep(1, 7) par$superpose.line$col[1:2] <- c("#880000", "#008800") par$superpose.symbol$col[1:2] <- c("#880000", "#008800") par } ########################################################################################################################################################################################################## ## Function to streamline survival plots on base of survminer ########################################################################################################################################################################################################## #' R Function to streamline the generation of survival plots #' #' accepts a data frame containing survival data an #' #' @title bbggkmplot - streamlined KM survival curve #' #' @param daten the data.frame with survival data #' @param gruppe optional: the factor the plot has to be grouped by #' @param time optional: data holding the the time #' @param status optional: data holding survival status (0/1) #' @param title the title of the plot #' @param survtime the time intervall in month to show the survival rate for (e.g. 24 for 2-year survival-rate) #' @param survtimetext the text to label the survival rate #' @param risk.table if TRUE the the risk table is ploted beneath the graph. Defalut is true. #' @param showmedian if true the median value is shown in plot /legend (for more than one group). Default is true. #' @param median.dig to how many digits the median should be rounded. default is 2 #' @param xmax MAx Value for X-axis #' @param xlab Label for x-axis (default="Monate (Anzeige bis max. 5 Jahre)") #' @param cex.lab Fontsize for label (default=1) #' @param cex.axis Fontsize for axis (default=1) #' @param watermark if TRUE the biobits watermark will be printet on plot #' @param ylab the label for y-axis #' @param logrank if true a logrank test is performed and the p-value will be printet on the plot (default=FALSE) #' @param ggtheme function, ggplot2 theme name. Default value is theme_light. #' Allowed values include ggplot2 official themes: see theme #' @param conf.int logical value. If TRUE, plots confidence interval #' @param legend character specifying legend position. Allowed values are one of c("top", "bottom", "left", "right", "none"). #' #' @return a survival plot #' #' @import survminer survival bbhelper dplyr ggplot2 #' #' @author Stefan Bartels, \email{email@biobits.eu} #' #' @examples #' \dontrun{ #' data("myeloma") #' bbggkmplot(daten = myeloma ,time=time #' ,gruppe = molecular_group #' ,status=event,logrank=T #' ,watermark = T,risk.table = T #' ,showmedian = T #' ,survtime=60 #' ,survtimetext="5-Y SR") #' } #' #'@export bbggkmplot<-function(daten,gruppe=NULL,time=time,status=status,xlab="Time in months" ,cex.lab=1,cex.axis=1,watermark=TRUE,ylab="",title="",survtime=NULL,survtimetext=NULL ,risk.table = TRUE,logrank=FALSE,xmax=100,showmedian=T,median.dig=2,ggtheme= theme_light(),conf.int=TRUE ,legend="right"){ qTIME <- dplyr::enquo(time) # Create quosure qSTATUS <- dplyr::enquo(status) # Create quosure qLevels<-1 qMedian<-"none" qLegend<-legend groupnames<-NULL subtext<-"" if(!missing(gruppe)){ qGRUPPE <- dplyr::enquo(gruppe) Sdata<-daten%>%dplyr::select(time= !! dplyr::as_label(qTIME),status= !! dplyr::as_label(qSTATUS),group= !! dplyr::as_label(qGRUPPE)) Sdata$group<-as.factor(Sdata$group) fit <- survival::survfit(survival::Surv(time=time, event=status) ~ group, data=Sdata) groupnames<-levels(droplevels(Sdata$group)) qLevels<-length(groupnames) if (showmedian){ gMedian<-as.vector(survminer::surv_median(fit)[["median"]]) medianText<-paste0("\nMedian = ", as.vector(unlist(round(gMedian,median.dig)))) groupnames<-paste0(groupnames,medianText) } }else{ Sdata<-daten%>%dplyr::select(time= !! dplyr::as_label(qTIME),status= !! dplyr::as_label(qSTATUS)) fit <- survival::survfit(survival::Surv(time=time, event=status)~1 , data=Sdata) groupnames<-"" if(showmedian){ qMedian<-"hv" gMedian<-as.vector(survminer::surv_median(fit)[["median"]]) subtext<-paste("Median=",round(gMedian,median.dig)) } qLegend<-"none" } mypalette<-bbhelper::getBBColors(qLevels) #survivalprobability at timepoint if(!is.null(survtime)){ survtimepoint<-formatC((summary(fit, times=survtime,extend=TRUE)$surv),digits=2,format="f") # Wenn wir nur eine Gruppe haben Angabe als "caption" if(missing(gruppe)){ subtext<-paste0(subtext,paste("\n",survtimetext," = ",survtimepoint)) }else{ groupnames<-paste0(groupnames,paste("\n",survtimetext," = ",survtimepoint,"\n")) } } survplot<-survminer::ggsurvplot( fit, # fitted survfit object data=Sdata, risk.table = risk.table, # include risk table? conf.int = conf.int, # add confidence intervals? pval = logrank, # add p-value to the plot? pval.size=3, title=title, break.time.by = 12, #default because we mostly use months legend=qLegend, xlim = c(0, xmax), xlab = xlab, caption =subtext, show.legend = FALSE, ylab=ylab, risk.table.col = "strata", risk.table.fontsize=3, palette=mypalette, ggtheme = ggtheme, legend.title = "", surv.median.line = qMedian, # median survival in plot conf.int.style = "step", # customize style of confidence intervals risk.table.y.text = FALSE,# show bars instead of names in text annotations # in legend of risk table. legend.labs = groupnames, font.x = c(8), # font for the x axis font.legend = c(8), font.caption =c(7) ) if (watermark){ # survplot$plot<- survplot$plot + ggplot2::annotate("text", x = Inf, y = -Inf, label = paste(" created by biobits\n",Sys.Date()), hjust=1.1, vjust=-0.2, col="grey", cex=2.5, alpha = 0.8) } if(risk.table){ # survplot$table <- ggpubr::ggpar( # survplot$table, # font.title = c(8), # font.x = c(8), # font.xtickslab = c(8) #) survplot$table<-survplot$table + theme(plot.title = element_text(size=8), axis.title.x = element_text(size=8), axis.text.x = element_text(size=8) ) } survplot } ########################################################################################################################################################################################################## ##Funktion zum erstellen eines einheitlichen KM-Plots ########################################################################################################################################################################################################## #' R deprecated ! Function to streamline the generation of survival plots #' #' accepts a data frame containing survival data an #' @title bbKMplot - streamlined KM survival curve #' #' @param daten the data.frame with survival data #' @param gruppe optional: the factor the plot has to be grouped by #' @param titel the title of the plot #' @param filename the filename if the plot should be saved as file #' @param survtime the time intervall in month to show the survival rate for (e.g. 24 for 2-year survival-rate) #' @param survtimetext the text to label the survival rate #' @param legendeout if TRUE the legend will be printet outside the plot margins #' @param file.out if TRUE the plot will be saved to file #' @param legfontsize fontsize of legend (default=1) #' @param subtext if needed a subtext to be placed beneath the plot #' @param xmax MAx Value for X-axis #' @param xlab Label for x-axis (default="Monate (Anzeige bis max. 5 Jahre)") #' @param cex.lab Fontsize for label (default=1) #' @param cex.axis Fontsize for axis (default=1) #' @param watermark if TRUE the biobits watermark will be printet on plot #' @param ylab the label for y-axis #' @param logrank if true a logrank test is performed for two(!) survival curves and the result will be printet on the plot (default=FALSE) #' #' #' @return a survival plot #' #' @import survival bbhelper #' #' @author Stefan Bartels, \email{email@biobits.eu} #' #' @examples #' \dontrun{ #' p<-bbKMPlot(daten,gruppe=NULL,titel,filename,survtime=NULL,survtimetext=NULL,legendeout=NULL,file.out=TRUE,legfontsize=NULL,subtext="", #' xmax=61.25,xlab="Beobachtungszeit in Monaten",cex.lab=1,cex.axis=1,watermark=TRUE,ylab="",logrank=FALSE) #' } #' #'@export bbKMPlot <- function (daten,gruppe=NULL,titel,filename,survtime=NULL,survtimetext=NULL,legendeout=NULL,file.out=TRUE,legfontsize=NULL,subtext="", xmax=61.25,xlab="Beobachtungszeit in Monaten",cex.lab=1,cex.axis=1,watermark=TRUE,ylab="",logrank=FALSE) { #status als numeric definieren daten$status<-as.numeric(daten$status) blwd<-2 #Schriftgroesse der Legende if (is.null(legfontsize)==TRUE){leg.cex<-0.6} else {leg.cex<-legfontsize} if ((is.null(gruppe)==TRUE) || ((length(levels(as.factor(daten[[gruppe]])))==1))) { #aucount <- length(daten$time) if(file.out==TRUE) {png(file=paste(c(filename,".png"),collapse = ""),bg="white",res=300,width=1600,height=1600)} fitg<-survival::survfit(survival::Surv(time, status)~1 ,data = daten,type="kaplan-meier") survmedian<-(summary(fitg)$table["median"][[1]]) aucount<-formatC((summary(fitg)$table["records"][[1]]),digits=0,format="f") survmedian_text<-formatC(survmedian,digits = 1,format = "f") plot(fitg,xlab=xlab,ylab=ylab,xmax=xmax,cex.lab=cex.lab,cex.axis=cex.axis,lwd = blwd) if(watermark==TRUE){text(0,0,paste(" created by biobits\n",Sys.Date()),col="grey",pos=4,cex=0.6)} # median einzeichnen segments(0, 0.5, survmedian,0.5, col = "#a6202a", lty = 2, lwd = 1.5) segments(survmedian, 0.5, survmedian,0, col = "#a6202a", lty = 2, lwd = 1.5) text(survmedian,-0.02,paste(c("Median=\n\n",survmedian_text)),cex=0.6,col = "#a6202a") if (is.null(survtime)==FALSE) { survtimepoint<-formatC((summary(fitg, times=survtime)$surv),digits=2,format="f") title(main = list(c(titel,"\n",paste("n =",aucount)),cex= 0.8, col="#60759B"), sub=paste(survtimetext," = ",survtimepoint)) } else{ title(main = list(c(titel,"\n",paste("n =",aucount)),cex= 0.8, col="#60759B")) } # ausgabe ende if(file.out==TRUE) {dev.off()} } else { daten[,gruppe]<-as.factor(daten[[gruppe]]) Acount <- length(daten$time) fits<-survival::survfit(survival::Surv(time,status) ~daten[[gruppe]], data =daten) gruppen_list <- levels(daten[[gruppe]]) MEDIANE <-list() ANZAHL<-list() SURVTIMEPOINT<-list() {if (length(gruppen_list)==1) { MEDIANE[gruppen_list[c]]<-formatC((summary(fits)$table["median"][[c]]),digits=1,format="f") ANZAHL[gruppen_list[c]]<-formatC((summary(fits)$table["records"][[c]]),digits=0,format="f") if (is.null(survtime)==FALSE) { SURVTIMEPOINT[gruppen_list[c]]<-formatC((summary(fits, times=survtime)$surv[c]),digits=2,format="f") } } else{ for (c in 1:length(gruppen_list)) { MEDIANE[gruppen_list[c]]<-formatC((summary(fits)$table[,"median"][[c]]),digits=1,format="f") ANZAHL[gruppen_list[c]]<-formatC((summary(fits)$table[,"records"][[c]]),digits=0,format="f") if (is.null(survtime)==FALSE) { #MIT Errorhandling, da bei zu gro?em Zeitraum ein fehler geworfen wird res<-try((SURVTIMEPOINT[gruppen_list[c]]<-formatC((summary(fits, times=survtime)$surv[c]),digits=2,format="f")),silent=TRUE) if(class(res) == "try-error"){SURVTIMEPOINT[gruppen_list[c]]<-NA} } } }} mypalette<-bbhelper::getBBColors(length(MEDIANE)) # Legende ausserhalb des Plotbereichs if(is.null(legendeout)==FALSE) { if(file.out==TRUE) {png(file=paste(c(filename,".png"),collapse = ""),bg="white",res=300,width=1800,height=1600)} par(mar=c(5, 4, 4, 10)+.1,xpd=TRUE)#oma=c(0,0,0,2)plt=c(1,1,1,1.5), plot(fits,xlab=xlab,ylab=ylab,xmax=xmax,col=mypalette,cex.lab=cex.lab,cex.axis=cex.axis,lwd = blwd) tmp.u <- par('usr') leg.pos<-list(x=tmp.u[2], y=tmp.u[4], xjust=0, yjust=0,outer=TRUE) } else { leg.pos<-"topright" if(file.out==TRUE) {png(file=paste(c(filename,".png"),collapse = ""),bg="white",res=300,width=1600,height=1600)} plot(fits,xlab=xlab,ylab=ylab,xmax=xmax,col=mypalette,cex.lab=cex.lab,cex.axis=cex.axis,lwd = blwd) } if(watermark==TRUE){text(0,0,paste(" created by biobits\n",Sys.Date()),col="grey",pos=4,cex=0.6)} title(main = list(paste(titel,sep="" ),cex= 0.8, col="#60759B"),sub=list(paste(subtext,sep="" ),cex= 0.6)) if (is.null(survtime)==FALSE) { legend(leg.pos,paste(names(MEDIANE),"\nMedian =",MEDIANE," n =",ANZAHL,"\n",survtimetext,"=",SURVTIMEPOINT,"\n"),lty=1,col=mypalette, adj = c(0, .6),cex= leg.cex,bty="n") } else { legend(leg.pos,paste(names(MEDIANE),"\nMedian =",MEDIANE," n =",ANZAHL,"\n"),lty=1,col=mypalette, adj = c(0, .6),cex= leg.cex,bty="n") } #Logrank test lograng_txt<-"" if (logrank==TRUE){ cdiff<-survdiff(Surv(time,status) ~daten[,gruppe], data =daten) cp.diff<-pchisq(cdiff$chisq, df=1, lower=FALSE) if(cp.diff<0.00001) {lograng_txt="Logrank: p < 0.00001"} else {if (cp.diff>0.001) {lograng_txt<-paste("Logrank: p = ",formatC(cp.diff,digits = 3,format = "f"))} else {lograng_txt<-paste("Logrank: p = ",formatC(cp.diff,digits = 5,format = "f"))} } pypos<-(par('xaxp')[2]/par('xaxp')[3])/2 pxpos<-0.12 text(pypos,pxpos,lograng_txt,pos=4,cex=leg.cex) } if(file.out==TRUE) {dev.off()} } }
/bbsurvr/R/survival.R
permissive
biobits/bbsurvr
R
false
false
19,197
r
######################################################################################################################################################################################################### ##Funktion f?r die Berechnung der ?berlebenswahrscheinlichkeit f?r einen bestimmten Zeitpunkt ########################################################################################################################################################################################################## #' Function to calculate the probability of survival for a point in time #' #' @param S xx #' @param totaltimes xx #' @return one vector with the first non-NA value #' #' @author Stefan Bartels, \email{email@biobits.eu} #' #' @examples #' \dontrun{ #' x<-kmsurv(S,totaltimes) #' } #' #'@export kmsurv <- function(S, totaltimes) { f <- survfit.km(factor(rep(1,nrow(S))), S) tt <- c(0, f$totaltimes) ss <- c(1, f$surv) # add first point to survival curve approx(tt, ss, xout=totaltimes, method='constant', f=0)$y } ########################################################################################################################################################################################################## ##Funktion fuer das extrahieren des p-Wertes aus der surfdiff funktion ########################################################################################################################################################################################################## #' Function to exctract p-Value from surfdiff object #' #' accepts a list of vectors of identical length and returns one vector with the first non-NA value #' #' @param daten Data.Frame with survival datacontaining min. two columns with a) survival time b) survival status #' @param group columnsname to be groubed by #' @param time name of column containing the intervall data. default="time" #' @param status name of colmn containig status data. default="status" #' #' @return a numeric vector representing the p-value #' #' @import survival #' #' @author Stefan Bartels, \email{email@biobits.eu} #' #' @examples #' \dontrun{ #' pvalue<-p.value.survdiff(daten=df,group="Treatment",time="duration",status="survivalstatus") #' } #' #'@export p.value.survdiff<-function(daten,group=NULL, time="time",status="status") { sdf<-survdiff(Surv(daten[[time]],daten[[status]])~daten[,group], data =daten) p.val <- 1 - pchisq(sdf$chisq, length(sdf$n) - 1) return(p.val) } ########################################################################################################################################################################################################## ##Funktion f?r eine zum erstellen eines einheitlichen BoxPlots ########################################################################################################################################################################################################## #' R Function to streamline the generation of (grouped) boxplots #' #' @title bbBoxPlot - streamlined BoxPlot #' #' @param daten the vector of numeric data #' @param gruppe if given the factors to be grouped by #' @param filename if given output will be delivered to file #' @param ylab label of y axis #' @param titel title of plot #' #' @return a base boxplot #' #' @import bbhelper #' #' @author Stefan Bartels, \email{email@biobits.eu} #' #' @examples #' bp<-bbBoxPlot(iris$Petal.Width,gruppe=iris$Species,ylab="Petal Width") #' #'@export bbBoxPlot<- function(daten,gruppe=NULL,filename=NULL,ylab,titel=NULL) { mypalette<-bbhelper::getBBColors(length(levels(gruppe))) if (is.null(filename)==FALSE){png(file=paste(c(filename,".png"),collapse = ""),bg="white",res=300,width=1600,height=1600) } boxplot(daten ~ gruppe,ylab=ylab,col=mypalette, horizontal=FALSE,cex.axis=0.7, las=3,show.names=FALSE) legend("topright",levels(gruppe),fill=mypalette,cex=0.8) if (is.null(titel)==FALSE) { title(main = list(titel,cex= 0.8, col="#60759B")) } text(0.2,0,paste(" created by biobits\n",Sys.Date()),col="grey",pos=4,cex=0.6) if (is.null(filename)==FALSE){dev.off()} } ########################################################################################################################################################################################################## ##Funktion f?r das erzeugen eines angepassten Farbthemas ########################################################################################################################################################################################################## #' R fuction to set a unified layout for base plots #' #' accepts a list of vectors of identical length and returns one vector with the first non-NA value #' #' #' @author Stefan Bartels, \email{email@biobits.eu} #' #' @examples #' \dontrun{ #' bbTheme() #' } #' #'@export bbTheme <- function() { par <- col.whitebg() par$strip.background$col <- rep("#60759B", 7) par$add.text$col <- "#eeeeaa" par$add.text$font <- 2 par$background$col <- "#ffffff" par$superpose.line$lty <- rep(1, 7) par$superpose.line$col[1:2] <- c("#880000", "#008800") par$superpose.symbol$col[1:2] <- c("#880000", "#008800") par } ########################################################################################################################################################################################################## ## Function to streamline survival plots on base of survminer ########################################################################################################################################################################################################## #' R Function to streamline the generation of survival plots #' #' accepts a data frame containing survival data an #' #' @title bbggkmplot - streamlined KM survival curve #' #' @param daten the data.frame with survival data #' @param gruppe optional: the factor the plot has to be grouped by #' @param time optional: data holding the the time #' @param status optional: data holding survival status (0/1) #' @param title the title of the plot #' @param survtime the time intervall in month to show the survival rate for (e.g. 24 for 2-year survival-rate) #' @param survtimetext the text to label the survival rate #' @param risk.table if TRUE the the risk table is ploted beneath the graph. Defalut is true. #' @param showmedian if true the median value is shown in plot /legend (for more than one group). Default is true. #' @param median.dig to how many digits the median should be rounded. default is 2 #' @param xmax MAx Value for X-axis #' @param xlab Label for x-axis (default="Monate (Anzeige bis max. 5 Jahre)") #' @param cex.lab Fontsize for label (default=1) #' @param cex.axis Fontsize for axis (default=1) #' @param watermark if TRUE the biobits watermark will be printet on plot #' @param ylab the label for y-axis #' @param logrank if true a logrank test is performed and the p-value will be printet on the plot (default=FALSE) #' @param ggtheme function, ggplot2 theme name. Default value is theme_light. #' Allowed values include ggplot2 official themes: see theme #' @param conf.int logical value. If TRUE, plots confidence interval #' @param legend character specifying legend position. Allowed values are one of c("top", "bottom", "left", "right", "none"). #' #' @return a survival plot #' #' @import survminer survival bbhelper dplyr ggplot2 #' #' @author Stefan Bartels, \email{email@biobits.eu} #' #' @examples #' \dontrun{ #' data("myeloma") #' bbggkmplot(daten = myeloma ,time=time #' ,gruppe = molecular_group #' ,status=event,logrank=T #' ,watermark = T,risk.table = T #' ,showmedian = T #' ,survtime=60 #' ,survtimetext="5-Y SR") #' } #' #'@export bbggkmplot<-function(daten,gruppe=NULL,time=time,status=status,xlab="Time in months" ,cex.lab=1,cex.axis=1,watermark=TRUE,ylab="",title="",survtime=NULL,survtimetext=NULL ,risk.table = TRUE,logrank=FALSE,xmax=100,showmedian=T,median.dig=2,ggtheme= theme_light(),conf.int=TRUE ,legend="right"){ qTIME <- dplyr::enquo(time) # Create quosure qSTATUS <- dplyr::enquo(status) # Create quosure qLevels<-1 qMedian<-"none" qLegend<-legend groupnames<-NULL subtext<-"" if(!missing(gruppe)){ qGRUPPE <- dplyr::enquo(gruppe) Sdata<-daten%>%dplyr::select(time= !! dplyr::as_label(qTIME),status= !! dplyr::as_label(qSTATUS),group= !! dplyr::as_label(qGRUPPE)) Sdata$group<-as.factor(Sdata$group) fit <- survival::survfit(survival::Surv(time=time, event=status) ~ group, data=Sdata) groupnames<-levels(droplevels(Sdata$group)) qLevels<-length(groupnames) if (showmedian){ gMedian<-as.vector(survminer::surv_median(fit)[["median"]]) medianText<-paste0("\nMedian = ", as.vector(unlist(round(gMedian,median.dig)))) groupnames<-paste0(groupnames,medianText) } }else{ Sdata<-daten%>%dplyr::select(time= !! dplyr::as_label(qTIME),status= !! dplyr::as_label(qSTATUS)) fit <- survival::survfit(survival::Surv(time=time, event=status)~1 , data=Sdata) groupnames<-"" if(showmedian){ qMedian<-"hv" gMedian<-as.vector(survminer::surv_median(fit)[["median"]]) subtext<-paste("Median=",round(gMedian,median.dig)) } qLegend<-"none" } mypalette<-bbhelper::getBBColors(qLevels) #survivalprobability at timepoint if(!is.null(survtime)){ survtimepoint<-formatC((summary(fit, times=survtime,extend=TRUE)$surv),digits=2,format="f") # Wenn wir nur eine Gruppe haben Angabe als "caption" if(missing(gruppe)){ subtext<-paste0(subtext,paste("\n",survtimetext," = ",survtimepoint)) }else{ groupnames<-paste0(groupnames,paste("\n",survtimetext," = ",survtimepoint,"\n")) } } survplot<-survminer::ggsurvplot( fit, # fitted survfit object data=Sdata, risk.table = risk.table, # include risk table? conf.int = conf.int, # add confidence intervals? pval = logrank, # add p-value to the plot? pval.size=3, title=title, break.time.by = 12, #default because we mostly use months legend=qLegend, xlim = c(0, xmax), xlab = xlab, caption =subtext, show.legend = FALSE, ylab=ylab, risk.table.col = "strata", risk.table.fontsize=3, palette=mypalette, ggtheme = ggtheme, legend.title = "", surv.median.line = qMedian, # median survival in plot conf.int.style = "step", # customize style of confidence intervals risk.table.y.text = FALSE,# show bars instead of names in text annotations # in legend of risk table. legend.labs = groupnames, font.x = c(8), # font for the x axis font.legend = c(8), font.caption =c(7) ) if (watermark){ # survplot$plot<- survplot$plot + ggplot2::annotate("text", x = Inf, y = -Inf, label = paste(" created by biobits\n",Sys.Date()), hjust=1.1, vjust=-0.2, col="grey", cex=2.5, alpha = 0.8) } if(risk.table){ # survplot$table <- ggpubr::ggpar( # survplot$table, # font.title = c(8), # font.x = c(8), # font.xtickslab = c(8) #) survplot$table<-survplot$table + theme(plot.title = element_text(size=8), axis.title.x = element_text(size=8), axis.text.x = element_text(size=8) ) } survplot } ########################################################################################################################################################################################################## ##Funktion zum erstellen eines einheitlichen KM-Plots ########################################################################################################################################################################################################## #' R deprecated ! Function to streamline the generation of survival plots #' #' accepts a data frame containing survival data an #' @title bbKMplot - streamlined KM survival curve #' #' @param daten the data.frame with survival data #' @param gruppe optional: the factor the plot has to be grouped by #' @param titel the title of the plot #' @param filename the filename if the plot should be saved as file #' @param survtime the time intervall in month to show the survival rate for (e.g. 24 for 2-year survival-rate) #' @param survtimetext the text to label the survival rate #' @param legendeout if TRUE the legend will be printet outside the plot margins #' @param file.out if TRUE the plot will be saved to file #' @param legfontsize fontsize of legend (default=1) #' @param subtext if needed a subtext to be placed beneath the plot #' @param xmax MAx Value for X-axis #' @param xlab Label for x-axis (default="Monate (Anzeige bis max. 5 Jahre)") #' @param cex.lab Fontsize for label (default=1) #' @param cex.axis Fontsize for axis (default=1) #' @param watermark if TRUE the biobits watermark will be printet on plot #' @param ylab the label for y-axis #' @param logrank if true a logrank test is performed for two(!) survival curves and the result will be printet on the plot (default=FALSE) #' #' #' @return a survival plot #' #' @import survival bbhelper #' #' @author Stefan Bartels, \email{email@biobits.eu} #' #' @examples #' \dontrun{ #' p<-bbKMPlot(daten,gruppe=NULL,titel,filename,survtime=NULL,survtimetext=NULL,legendeout=NULL,file.out=TRUE,legfontsize=NULL,subtext="", #' xmax=61.25,xlab="Beobachtungszeit in Monaten",cex.lab=1,cex.axis=1,watermark=TRUE,ylab="",logrank=FALSE) #' } #' #'@export bbKMPlot <- function (daten,gruppe=NULL,titel,filename,survtime=NULL,survtimetext=NULL,legendeout=NULL,file.out=TRUE,legfontsize=NULL,subtext="", xmax=61.25,xlab="Beobachtungszeit in Monaten",cex.lab=1,cex.axis=1,watermark=TRUE,ylab="",logrank=FALSE) { #status als numeric definieren daten$status<-as.numeric(daten$status) blwd<-2 #Schriftgroesse der Legende if (is.null(legfontsize)==TRUE){leg.cex<-0.6} else {leg.cex<-legfontsize} if ((is.null(gruppe)==TRUE) || ((length(levels(as.factor(daten[[gruppe]])))==1))) { #aucount <- length(daten$time) if(file.out==TRUE) {png(file=paste(c(filename,".png"),collapse = ""),bg="white",res=300,width=1600,height=1600)} fitg<-survival::survfit(survival::Surv(time, status)~1 ,data = daten,type="kaplan-meier") survmedian<-(summary(fitg)$table["median"][[1]]) aucount<-formatC((summary(fitg)$table["records"][[1]]),digits=0,format="f") survmedian_text<-formatC(survmedian,digits = 1,format = "f") plot(fitg,xlab=xlab,ylab=ylab,xmax=xmax,cex.lab=cex.lab,cex.axis=cex.axis,lwd = blwd) if(watermark==TRUE){text(0,0,paste(" created by biobits\n",Sys.Date()),col="grey",pos=4,cex=0.6)} # median einzeichnen segments(0, 0.5, survmedian,0.5, col = "#a6202a", lty = 2, lwd = 1.5) segments(survmedian, 0.5, survmedian,0, col = "#a6202a", lty = 2, lwd = 1.5) text(survmedian,-0.02,paste(c("Median=\n\n",survmedian_text)),cex=0.6,col = "#a6202a") if (is.null(survtime)==FALSE) { survtimepoint<-formatC((summary(fitg, times=survtime)$surv),digits=2,format="f") title(main = list(c(titel,"\n",paste("n =",aucount)),cex= 0.8, col="#60759B"), sub=paste(survtimetext," = ",survtimepoint)) } else{ title(main = list(c(titel,"\n",paste("n =",aucount)),cex= 0.8, col="#60759B")) } # ausgabe ende if(file.out==TRUE) {dev.off()} } else { daten[,gruppe]<-as.factor(daten[[gruppe]]) Acount <- length(daten$time) fits<-survival::survfit(survival::Surv(time,status) ~daten[[gruppe]], data =daten) gruppen_list <- levels(daten[[gruppe]]) MEDIANE <-list() ANZAHL<-list() SURVTIMEPOINT<-list() {if (length(gruppen_list)==1) { MEDIANE[gruppen_list[c]]<-formatC((summary(fits)$table["median"][[c]]),digits=1,format="f") ANZAHL[gruppen_list[c]]<-formatC((summary(fits)$table["records"][[c]]),digits=0,format="f") if (is.null(survtime)==FALSE) { SURVTIMEPOINT[gruppen_list[c]]<-formatC((summary(fits, times=survtime)$surv[c]),digits=2,format="f") } } else{ for (c in 1:length(gruppen_list)) { MEDIANE[gruppen_list[c]]<-formatC((summary(fits)$table[,"median"][[c]]),digits=1,format="f") ANZAHL[gruppen_list[c]]<-formatC((summary(fits)$table[,"records"][[c]]),digits=0,format="f") if (is.null(survtime)==FALSE) { #MIT Errorhandling, da bei zu gro?em Zeitraum ein fehler geworfen wird res<-try((SURVTIMEPOINT[gruppen_list[c]]<-formatC((summary(fits, times=survtime)$surv[c]),digits=2,format="f")),silent=TRUE) if(class(res) == "try-error"){SURVTIMEPOINT[gruppen_list[c]]<-NA} } } }} mypalette<-bbhelper::getBBColors(length(MEDIANE)) # Legende ausserhalb des Plotbereichs if(is.null(legendeout)==FALSE) { if(file.out==TRUE) {png(file=paste(c(filename,".png"),collapse = ""),bg="white",res=300,width=1800,height=1600)} par(mar=c(5, 4, 4, 10)+.1,xpd=TRUE)#oma=c(0,0,0,2)plt=c(1,1,1,1.5), plot(fits,xlab=xlab,ylab=ylab,xmax=xmax,col=mypalette,cex.lab=cex.lab,cex.axis=cex.axis,lwd = blwd) tmp.u <- par('usr') leg.pos<-list(x=tmp.u[2], y=tmp.u[4], xjust=0, yjust=0,outer=TRUE) } else { leg.pos<-"topright" if(file.out==TRUE) {png(file=paste(c(filename,".png"),collapse = ""),bg="white",res=300,width=1600,height=1600)} plot(fits,xlab=xlab,ylab=ylab,xmax=xmax,col=mypalette,cex.lab=cex.lab,cex.axis=cex.axis,lwd = blwd) } if(watermark==TRUE){text(0,0,paste(" created by biobits\n",Sys.Date()),col="grey",pos=4,cex=0.6)} title(main = list(paste(titel,sep="" ),cex= 0.8, col="#60759B"),sub=list(paste(subtext,sep="" ),cex= 0.6)) if (is.null(survtime)==FALSE) { legend(leg.pos,paste(names(MEDIANE),"\nMedian =",MEDIANE," n =",ANZAHL,"\n",survtimetext,"=",SURVTIMEPOINT,"\n"),lty=1,col=mypalette, adj = c(0, .6),cex= leg.cex,bty="n") } else { legend(leg.pos,paste(names(MEDIANE),"\nMedian =",MEDIANE," n =",ANZAHL,"\n"),lty=1,col=mypalette, adj = c(0, .6),cex= leg.cex,bty="n") } #Logrank test lograng_txt<-"" if (logrank==TRUE){ cdiff<-survdiff(Surv(time,status) ~daten[,gruppe], data =daten) cp.diff<-pchisq(cdiff$chisq, df=1, lower=FALSE) if(cp.diff<0.00001) {lograng_txt="Logrank: p < 0.00001"} else {if (cp.diff>0.001) {lograng_txt<-paste("Logrank: p = ",formatC(cp.diff,digits = 3,format = "f"))} else {lograng_txt<-paste("Logrank: p = ",formatC(cp.diff,digits = 5,format = "f"))} } pypos<-(par('xaxp')[2]/par('xaxp')[3])/2 pxpos<-0.12 text(pypos,pxpos,lograng_txt,pos=4,cex=leg.cex) } if(file.out==TRUE) {dev.off()} } }
################################################################################ ### Voorbereidingen.R ################################################################################ ### R code voor Tentamenanalyse Vrije Universiteit Amsterdam ### ### Bestandsnaam: Voorbereidingen.R ### Doel: De working directory wordt bepaald door de locatie van het project ### (vu-toetsanalyse) ### De specifieke functies en libraries voor dit project worden ingeladen ### ### Afhankelijkheden: geen ### ### Gebruikte datasets: geen ### ### Opmerkingen: geen ### ################################################################################ ### TODO: ### 1) Geen ### ################################################################################ ### Geschiedenis: ### 06-03-2018: DD: Aanmaken bestand ### 21-06-2018: DD: Functies toegevoegd om data te prepareren en analyseren ################################################################################ # installeren en laden benodigde packages ------------------------------------------ if(!require(pacman)){install.packages("pacman")} pacman::p_load(CTT, stringr, dplyr, psych, ggplot2, readxl, purrr, knitr, reshape2, kableExtra, tibble, PASWR, ggrepel, devtools, magrittr, profvis, data.table, XLConnect, tidyr) # # if(!require(XLConnectJars)){install.packages("XLConnectJars", dependencies = TRUE)} # if(!require(XLConnect)){install.packages("XLConnect", dependencies = TRUE)} # # # # laden libraries --------------------------------------------------------- # library(XLConnect) # Bepaal de netwerk directory op basis van het besturingsssyteem: windows = VU vunetid <- Sys.getenv("USERNAME") # Bepaal de netwerk directory op basis van het besturingsssyteem: windows = VU Network_directory_WIN <- paste0("G:/DSZ/OKZ/OTIR/Toetsen/",vunetid,"/") Network_directory_MAC <- "/Volumes/groups/DSZ/OKZ/OTIR/Toetsen/Werkmap/" if (.Platform$OS.type == "windows") { Network_directory <- Network_directory_WIN } else { Network_directory <- Network_directory_MAC } Network_directory # Functie om vragen na te kijken (met meerdere antwoorden goed) ----------- score_mc <- function (items, key, output.scored = TRUE, ID = NA, rel = TRUE, multiKeySep = "none", multiKeyScore = c("or", "dich")) { t <- as.vector(ID) t <- table(ID) if (any(t > 1)) { for (i in 1:length(ID)) { for (j in 1:nrow(subset(t, t > 1))) { if (ID[i] == (rownames(subset(t, t > 1)))[j]) { ID[i] <- paste(ID[i], "/", i) } } } warning("Duplicate ID exists; the duplicate ID has been renamed and retained in the calculation") } if (!missing(ID)) { if (length(ID) == nrow(items)) rownames(items) <- ID else warning("The length of ID vector does not match the sample size.") } if (missing(key)) { warning("No key provided, assuming pre-scored data.") scored <- apply(items, 2, function(XXX) { if (!is.numeric(XXX)) XXX <- as.numeric(XXX) XXX }) } else { if (length(key) == ncol(items)) { if (multiKeySep == "none") { scored <- t(apply(items, 1, function(X) { ifelse(X == (key), 1, 0) })) } else { scored <- array(0, dim = dim(items)) key <- purrr:: map_df(key, as.character) items <- purrr:: map_df(items, as.character) %>% as.data.frame() for (colcol in 1:ncol(items)) { thisKey <- strsplit(key[[colcol]], multiKeySep)[[1]] thisAnswer <- strsplit(items[, colcol], multiKeySep) thisScore <- lapply(thisAnswer, function(XXX, myKey = thisKey) { compare <- XXX %in% myKey oot <- all(c(compare, compare)) * 1 oot }) scored[, colcol] <- unlist(thisScore) } } } else stop("Number of items is not equal to the length of key.") } scores <- rowSums(scored) names(scores) <- paste("P", c(seq(1:nrow(items))), sep = "") if (!rel == FALSE) reli <- CTT:: reliability(scored) if (output.scored == FALSE & rel == FALSE) out <- list(score = scores) if (output.scored == FALSE & rel == TRUE) out <- list(score = scores, reliability = reli) if (output.scored == TRUE & rel == FALSE) out <- list(score = scores, scored = scored) if (output.scored == TRUE & rel == TRUE) out <- list(score = scores, reliability = reli, scored = scored) out } # Functie om teleformdata met 2 versies te prepareren --------------------- prep_mc_2 <- function (teleformdatabestand, aantal_vragen = NULL, aantal_alternatieven = NULL) { ## Zet data in goede volgorde teleformdata <- teleformdata %>% dplyr:: select(stud_nr, stud_naam, everything()) ##defineer aantal columns nrc <- nrq+2 ##Bepaal gokkans gk <- 1/nra #Maak 2 datasets op basis van versie en verwijder studenten zonder versie teleformdataA <- teleformdata %>% dplyr:: filter(Toetsversie == 1) teleformdataB <- teleformdata %>% dplyr:: filter(Toetsversie == 2) teleformdata_onbekend <- teleformdata %>% dplyr:: filter(Toetsversie >2) ##Maak bestand met studentnummer + Toetsversie voor latere koppeling aan score student_versies <- dplyr:: select(teleformdata, studentnummers=stud_nr, Toetsversie) %>% dplyr::filter(studentnummers > 0) ##Maak ruwe data file: letter data + sleutel teleformdata_new <- teleformdata[ c(1:nrc) ] teleformdataA_new <- teleformdataA[ c(1:nrc) ] teleformdataB_new <- teleformdataB[ c(1:nrc) ] ##Defineer vraagnamen aanwezige vragen vrn <- colnames(teleformdata_new[3:nrc]) ##Extraheer sleutel sleutel <- teleformdataA_new %>% dplyr:: filter(stud_nr == 0) %>% dplyr:: select(-c(stud_nr, stud_naam)) write.csv2(sleutel, file=paste0(Network_directory,"sleutel.csv"), row.names=FALSE) ##Bepaal nieuwe volgorde vragen B naar A versie volgorde <- read.csv2(paste0(Network_directory,"Volgordeomzetting.csv")) # volgorde <- read_xlsx(paste0(Network_directory,"Volgordeomzetting.xlsx")) %>% map_df(as.integer) orderB <- as.vector(volgorde$Bversie) ##Verwijder eerste twee kolommen (=studentnamen en studentnummers) teleformdataB_new <- teleformdataB_new %>% dplyr:: select(-c(stud_nr, stud_naam)) teleformdataA_new <- teleformdataA_new %>% dplyr:: select(-c(stud_nr, stud_naam)) ##Zet data B versie in volgorde Aversie en verander kolomnamen zodat deze ##overeen komen met A versie teleformdataB_correct <- teleformdataB_new[,orderB] names = c(colnames(sleutel[1:nrq])) colnames(teleformdataB_correct) = names ##Toevoegen studentnummers aan juiste volgorde b versies teleformdataB_correct <- cbind(teleformdataB$stud_nr, teleformdataB$stud_naam, teleformdataB_correct) %>% dplyr:: rename(stud_nr = 'teleformdataB$stud_nr', stud_naam = 'teleformdataB$stud_naam') ##Toevoegen studentnummers aan a versie teleformdataA <- cbind(teleformdataA$stud_nr, teleformdataA$stud_naam, teleformdataA_new) %>% dplyr:: rename(stud_nr = 'teleformdataA$stud_nr', stud_naam = 'teleformdataA$stud_naam') %>% dplyr:: filter(stud_nr > 0) ##Voeg data versie A en B samen teleformdata_correct <- rbind(teleformdataA, teleformdataB_correct) ###Extraheer data en verwijder eerste twee kolommen ## (=studentnamen en studentnummers) data <- teleformdata_correct %>% dplyr:: select(-c(stud_nr, stud_naam)) list(teleformdata_correct = teleformdata_correct, data = data, sleutel = sleutel, gokkans = gk, student_versies = student_versies) } # Functie om data van een tentamen met 2 versies te analyseren ------------ analyze_2 <- function (data, sleutel, teleformdata_correct, aantal_vragen, cesuur, gokkans, student_versies) { ## Vervang lege cellen met NA zodat deze goed gescoord worden data[] <- lapply(data, str_trim) is.na(data) <- data=='' ##Transformeren van ruwe letter_data naar score data + basale analyse scored_data <- score_mc(data, sleutel, multiKeySep = ",", output.scored = TRUE, rel = TRUE) studentnummers_namen <- teleformdata_correct[1:2] ##Toevoegen studentnummers en namen aan score data scored_datax <- cbind(studentnummers_namen, scored_data$scored) ##Toevoegen studentnummers aan totaalscore student total_score <- cbind(studentnummers_namen, scored_data[1]) total_score <- dplyr::rename(total_score, studentnummers = stud_nr) ##Transformeer scores naar cijfers total_score <- mutate(total_score, cijfer = (10-(nrq-total_score$score)/(nrq-cesuur)*(10-5.5))) total_score <- total_score %>% mutate(cijfer = replace(cijfer, cijfer<1, 1)) ##Wegschrijven score per student naar csv file write.csv2(total_score, file=paste0(Network_directory,"results_student.csv"), row.names=FALSE) ## Toon cronbachs alpha KR20 <- purrr:: pluck(scored_data, 2, "alpha") # KR20 <- scored_data$reliability$alpha ##Bereken KR-20 (75) ifactor <- 75/nrq KR20_75 <- round(CTT:: spearman.brown(KR20, input = ifactor, n.or.r = "n")$r.new, digits = 2) ##Maak itemanalyse itemanalyse <- itemAnalysis(as.data.frame(scored_data$scored), NA.Delete=FALSE)$itemReport %>% dplyr:: select(-bis) %>% dplyr::rename(P_waarde = itemMean, rir = pBis, "New Alpha" = alphaIfDeleted) ##NA vervangen met nullen itemanalyse[is.na(itemanalyse)] <- 0 ##Voeg P' column toe aan itemanalyse itemanalyse["Rel_P"] <- NA ##Bereken relatieve p-waarde for ( i in 1:nrq ) itemanalyse$Rel_P[i] <- ((-1/(gokkans-1))*itemanalyse$P_waarde[i]+1-(-1/(gokkans-1))) ##Toetswaarden wegschrijven geslaagd <- filter(total_score, cijfer >= 5.5) %>% nrow() toets <- tbl_df(scored_data$reliability[1:5]) %>% round(digits = 2) toets <- mutate(toets, KR20_75 = KR20_75) %>% dplyr:: select(nItem, nPerson, alpha, KR20_75, scaleMean, scaleSD) %>% dplyr:: mutate(meanRelP = round(summarise(itemanalyse, mean(Rel_P))$`mean(Rel_P)`, digits = 2), meanP = round(summarise(itemanalyse, mean(P_waarde))$`mean(P_waarde)`, digits = 2), perc_geslaagd = paste0(round(geslaagd/nrow(total_score)*100),"%"), cesuur = cesuur) ##Berekenen kappa kappa <- round(((KR20)*(toets$scaleSD^2)+(toets$scaleMean-cesuur)^2)/((toets$scaleSD^2) + (toets$scaleMean-cesuur)^2), digits = 2) toets <- mutate(toets, kappa = as.numeric(kappa)) ##Bepaal aantal studenten nrst <- toets$nPerson ## Vervang NA in data door lege cel data[is.na(data)] <- " " ##Toevoegen A-waarde aan itemanalyse itemanalyse["A"] <- NA itemanalyse["B"] <- NA if (nra >= 3) { itemanalyse["C"] <- NA } if (nra >= 4 ) { itemanalyse["D"] <- NA } if (nra >= 5) { itemanalyse["E"] <- NA } if (nra >= 6) { itemanalyse["F"] <- NA } for ( i in 1:nrq ) itemanalyse$A[i] <- (sum(str_count(data[,i], "A"))/nrst) for ( i in 1:nrq ) itemanalyse$B[i] <- (sum(str_count(data[,i], "B"))/nrst) if (nra >= 3) { for ( i in 1:nrq ) itemanalyse$C[i] <- (sum(str_count(data[,i], "C"))/nrst) } if (nra >= 4) { for ( i in 1:nrq ) itemanalyse$D[i] <- (sum(str_count(data[,i], "D"))/nrst) } if (nra >= 5) { for ( i in 1:nrq ) itemanalyse$E[i] <- (sum(str_count(data[,i], "E"))/nrst) } if (nra >= 6) { for ( i in 1:nrq ) itemanalyse$'F'[i] <- (sum(str_count(data[,i], "F"))/nrst) } ##Voeg advies column toe aan itemanalyse itemanalyse[".A"] <- NA itemanalyse[".B"] <- NA itemanalyse[".C"] <- NA itemanalyse[".D"] <- NA itemanalyse[".E"] <- NA ##Genereer advies op basis van P- en rirwaarden for ( i in 1:nrq ) if( (itemanalyse$Rel_P[i] + itemanalyse$rir[i] < 0.4) ){ itemanalyse$.E[i] <- "E" } for ( i in 1:nrq ) if( (itemanalyse$P_waarde[i] < (gokkans+0.04))&(itemanalyse$rir[i] > 0.05) ){ itemanalyse$.D[i] <- "D" } for ( i in 1:nrq ) if( (itemanalyse$P_waarde[i] < 0.3)&((itemanalyse$rir[i] <= 0.05)&(itemanalyse$rir[i] >= -0.05)) ){ itemanalyse$.C[i] <- "C" } for ( i in 1:nrq ) if( (itemanalyse$Rel_P[i] < 0.4)&(itemanalyse$rir[i] <= 0.10) ){ itemanalyse$.A[i] <- "A" } for ( i in 1:nrq ) if( (itemanalyse$Rel_P[i] < 0.8)&(itemanalyse$rir[i] < -0.10) ){ itemanalyse$.B[i] <- "B" } ##Verander kolom volgorde itemanalyse if (nra == 2) { itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, P_waarde, Rel_P, rir, `New Alpha`, .A, .B, .C, .D, .E) } if (nra == 3) { itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, P_waarde, Rel_P, rir, `New Alpha`, .A, .B, .C, .D, .E) } if (nra == 4) { itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, D, P_waarde, Rel_P, rir, `New Alpha`, .A, .B, .C, .D, .E) } if (nra == 5) { itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, D, E, P_waarde, Rel_P, rir, `New Alpha`, .A, .B, .C, .D, .E) } if (nra == 6) { itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, D, E, 'F', P_waarde, Rel_P, rir, `New Alpha`, .A, .B, .C, .D, .E) } ##Verwijder NA's uit itemanalyse if (nra == 2) { itemanalyse[,8:12] <- sapply(itemanalyse[,8:12], as.character) itemanalyse[,8:12][is.na(itemanalyse[,8:12])] <- " " } if (nra == 3) { itemanalyse[,9:13] <- sapply(itemanalyse[,9:13], as.character) itemanalyse[,9:13][is.na(itemanalyse[,9:13])] <- " " } if (nra == 4) { itemanalyse[,10:14] <- sapply(itemanalyse[,10:14], as.character) itemanalyse[,10:14][is.na(itemanalyse[,10:14])] <- " " } if (nra == 5) { itemanalyse[,11:15] <- sapply(itemanalyse[,11:15], as.character) itemanalyse[,11:15][is.na(itemanalyse[,11:15])] <- " " } if (nra == 6) { itemanalyse[,12:16] <- sapply(itemanalyse[,12:16], as.character) itemanalyse[,12:16][is.na(itemanalyse[,12:16])] <- " " } ## Voeg gebruikte sleutel toe aan itemanalyse tsleutel <- as.data.frame(t(sleutel)) itemanalyse <- cbind(tsleutel, itemanalyse) %>% dplyr:: rename(Key = V1) itemanalyse <- dplyr:: mutate(itemanalyse, itemName = colnames(sleutel)) ##Bereken gemiddelde score en sd per toetsversie versie_score <- inner_join(total_score, student_versies, by = "studentnummers") %>% group_by(Toetsversie) %>% summarise(mean=mean(score), sd=sd(score), n=n()) ttest <- tsum.test(mean.x=versie_score$mean[1], s.x=versie_score$sd[1], n.x=versie_score$n[1], mean.y=versie_score$mean[2], s.y=versie_score$sd[2], n.y=versie_score$n[2]) try(if(ttest$p.value < 0.05) stop("Gemiddelde score versies verschillen significant")) list(itemanalyse = itemanalyse, toetswaarden = toets, cijfers = total_score) }
/Voorbereidingen.R
permissive
Dritty/toetsanalyse
R
false
false
15,779
r
################################################################################ ### Voorbereidingen.R ################################################################################ ### R code voor Tentamenanalyse Vrije Universiteit Amsterdam ### ### Bestandsnaam: Voorbereidingen.R ### Doel: De working directory wordt bepaald door de locatie van het project ### (vu-toetsanalyse) ### De specifieke functies en libraries voor dit project worden ingeladen ### ### Afhankelijkheden: geen ### ### Gebruikte datasets: geen ### ### Opmerkingen: geen ### ################################################################################ ### TODO: ### 1) Geen ### ################################################################################ ### Geschiedenis: ### 06-03-2018: DD: Aanmaken bestand ### 21-06-2018: DD: Functies toegevoegd om data te prepareren en analyseren ################################################################################ # installeren en laden benodigde packages ------------------------------------------ if(!require(pacman)){install.packages("pacman")} pacman::p_load(CTT, stringr, dplyr, psych, ggplot2, readxl, purrr, knitr, reshape2, kableExtra, tibble, PASWR, ggrepel, devtools, magrittr, profvis, data.table, XLConnect, tidyr) # # if(!require(XLConnectJars)){install.packages("XLConnectJars", dependencies = TRUE)} # if(!require(XLConnect)){install.packages("XLConnect", dependencies = TRUE)} # # # # laden libraries --------------------------------------------------------- # library(XLConnect) # Bepaal de netwerk directory op basis van het besturingsssyteem: windows = VU vunetid <- Sys.getenv("USERNAME") # Bepaal de netwerk directory op basis van het besturingsssyteem: windows = VU Network_directory_WIN <- paste0("G:/DSZ/OKZ/OTIR/Toetsen/",vunetid,"/") Network_directory_MAC <- "/Volumes/groups/DSZ/OKZ/OTIR/Toetsen/Werkmap/" if (.Platform$OS.type == "windows") { Network_directory <- Network_directory_WIN } else { Network_directory <- Network_directory_MAC } Network_directory # Functie om vragen na te kijken (met meerdere antwoorden goed) ----------- score_mc <- function (items, key, output.scored = TRUE, ID = NA, rel = TRUE, multiKeySep = "none", multiKeyScore = c("or", "dich")) { t <- as.vector(ID) t <- table(ID) if (any(t > 1)) { for (i in 1:length(ID)) { for (j in 1:nrow(subset(t, t > 1))) { if (ID[i] == (rownames(subset(t, t > 1)))[j]) { ID[i] <- paste(ID[i], "/", i) } } } warning("Duplicate ID exists; the duplicate ID has been renamed and retained in the calculation") } if (!missing(ID)) { if (length(ID) == nrow(items)) rownames(items) <- ID else warning("The length of ID vector does not match the sample size.") } if (missing(key)) { warning("No key provided, assuming pre-scored data.") scored <- apply(items, 2, function(XXX) { if (!is.numeric(XXX)) XXX <- as.numeric(XXX) XXX }) } else { if (length(key) == ncol(items)) { if (multiKeySep == "none") { scored <- t(apply(items, 1, function(X) { ifelse(X == (key), 1, 0) })) } else { scored <- array(0, dim = dim(items)) key <- purrr:: map_df(key, as.character) items <- purrr:: map_df(items, as.character) %>% as.data.frame() for (colcol in 1:ncol(items)) { thisKey <- strsplit(key[[colcol]], multiKeySep)[[1]] thisAnswer <- strsplit(items[, colcol], multiKeySep) thisScore <- lapply(thisAnswer, function(XXX, myKey = thisKey) { compare <- XXX %in% myKey oot <- all(c(compare, compare)) * 1 oot }) scored[, colcol] <- unlist(thisScore) } } } else stop("Number of items is not equal to the length of key.") } scores <- rowSums(scored) names(scores) <- paste("P", c(seq(1:nrow(items))), sep = "") if (!rel == FALSE) reli <- CTT:: reliability(scored) if (output.scored == FALSE & rel == FALSE) out <- list(score = scores) if (output.scored == FALSE & rel == TRUE) out <- list(score = scores, reliability = reli) if (output.scored == TRUE & rel == FALSE) out <- list(score = scores, scored = scored) if (output.scored == TRUE & rel == TRUE) out <- list(score = scores, reliability = reli, scored = scored) out } # Functie om teleformdata met 2 versies te prepareren --------------------- prep_mc_2 <- function (teleformdatabestand, aantal_vragen = NULL, aantal_alternatieven = NULL) { ## Zet data in goede volgorde teleformdata <- teleformdata %>% dplyr:: select(stud_nr, stud_naam, everything()) ##defineer aantal columns nrc <- nrq+2 ##Bepaal gokkans gk <- 1/nra #Maak 2 datasets op basis van versie en verwijder studenten zonder versie teleformdataA <- teleformdata %>% dplyr:: filter(Toetsversie == 1) teleformdataB <- teleformdata %>% dplyr:: filter(Toetsversie == 2) teleformdata_onbekend <- teleformdata %>% dplyr:: filter(Toetsversie >2) ##Maak bestand met studentnummer + Toetsversie voor latere koppeling aan score student_versies <- dplyr:: select(teleformdata, studentnummers=stud_nr, Toetsversie) %>% dplyr::filter(studentnummers > 0) ##Maak ruwe data file: letter data + sleutel teleformdata_new <- teleformdata[ c(1:nrc) ] teleformdataA_new <- teleformdataA[ c(1:nrc) ] teleformdataB_new <- teleformdataB[ c(1:nrc) ] ##Defineer vraagnamen aanwezige vragen vrn <- colnames(teleformdata_new[3:nrc]) ##Extraheer sleutel sleutel <- teleformdataA_new %>% dplyr:: filter(stud_nr == 0) %>% dplyr:: select(-c(stud_nr, stud_naam)) write.csv2(sleutel, file=paste0(Network_directory,"sleutel.csv"), row.names=FALSE) ##Bepaal nieuwe volgorde vragen B naar A versie volgorde <- read.csv2(paste0(Network_directory,"Volgordeomzetting.csv")) # volgorde <- read_xlsx(paste0(Network_directory,"Volgordeomzetting.xlsx")) %>% map_df(as.integer) orderB <- as.vector(volgorde$Bversie) ##Verwijder eerste twee kolommen (=studentnamen en studentnummers) teleformdataB_new <- teleformdataB_new %>% dplyr:: select(-c(stud_nr, stud_naam)) teleformdataA_new <- teleformdataA_new %>% dplyr:: select(-c(stud_nr, stud_naam)) ##Zet data B versie in volgorde Aversie en verander kolomnamen zodat deze ##overeen komen met A versie teleformdataB_correct <- teleformdataB_new[,orderB] names = c(colnames(sleutel[1:nrq])) colnames(teleformdataB_correct) = names ##Toevoegen studentnummers aan juiste volgorde b versies teleformdataB_correct <- cbind(teleformdataB$stud_nr, teleformdataB$stud_naam, teleformdataB_correct) %>% dplyr:: rename(stud_nr = 'teleformdataB$stud_nr', stud_naam = 'teleformdataB$stud_naam') ##Toevoegen studentnummers aan a versie teleformdataA <- cbind(teleformdataA$stud_nr, teleformdataA$stud_naam, teleformdataA_new) %>% dplyr:: rename(stud_nr = 'teleformdataA$stud_nr', stud_naam = 'teleformdataA$stud_naam') %>% dplyr:: filter(stud_nr > 0) ##Voeg data versie A en B samen teleformdata_correct <- rbind(teleformdataA, teleformdataB_correct) ###Extraheer data en verwijder eerste twee kolommen ## (=studentnamen en studentnummers) data <- teleformdata_correct %>% dplyr:: select(-c(stud_nr, stud_naam)) list(teleformdata_correct = teleformdata_correct, data = data, sleutel = sleutel, gokkans = gk, student_versies = student_versies) } # Functie om data van een tentamen met 2 versies te analyseren ------------ analyze_2 <- function (data, sleutel, teleformdata_correct, aantal_vragen, cesuur, gokkans, student_versies) { ## Vervang lege cellen met NA zodat deze goed gescoord worden data[] <- lapply(data, str_trim) is.na(data) <- data=='' ##Transformeren van ruwe letter_data naar score data + basale analyse scored_data <- score_mc(data, sleutel, multiKeySep = ",", output.scored = TRUE, rel = TRUE) studentnummers_namen <- teleformdata_correct[1:2] ##Toevoegen studentnummers en namen aan score data scored_datax <- cbind(studentnummers_namen, scored_data$scored) ##Toevoegen studentnummers aan totaalscore student total_score <- cbind(studentnummers_namen, scored_data[1]) total_score <- dplyr::rename(total_score, studentnummers = stud_nr) ##Transformeer scores naar cijfers total_score <- mutate(total_score, cijfer = (10-(nrq-total_score$score)/(nrq-cesuur)*(10-5.5))) total_score <- total_score %>% mutate(cijfer = replace(cijfer, cijfer<1, 1)) ##Wegschrijven score per student naar csv file write.csv2(total_score, file=paste0(Network_directory,"results_student.csv"), row.names=FALSE) ## Toon cronbachs alpha KR20 <- purrr:: pluck(scored_data, 2, "alpha") # KR20 <- scored_data$reliability$alpha ##Bereken KR-20 (75) ifactor <- 75/nrq KR20_75 <- round(CTT:: spearman.brown(KR20, input = ifactor, n.or.r = "n")$r.new, digits = 2) ##Maak itemanalyse itemanalyse <- itemAnalysis(as.data.frame(scored_data$scored), NA.Delete=FALSE)$itemReport %>% dplyr:: select(-bis) %>% dplyr::rename(P_waarde = itemMean, rir = pBis, "New Alpha" = alphaIfDeleted) ##NA vervangen met nullen itemanalyse[is.na(itemanalyse)] <- 0 ##Voeg P' column toe aan itemanalyse itemanalyse["Rel_P"] <- NA ##Bereken relatieve p-waarde for ( i in 1:nrq ) itemanalyse$Rel_P[i] <- ((-1/(gokkans-1))*itemanalyse$P_waarde[i]+1-(-1/(gokkans-1))) ##Toetswaarden wegschrijven geslaagd <- filter(total_score, cijfer >= 5.5) %>% nrow() toets <- tbl_df(scored_data$reliability[1:5]) %>% round(digits = 2) toets <- mutate(toets, KR20_75 = KR20_75) %>% dplyr:: select(nItem, nPerson, alpha, KR20_75, scaleMean, scaleSD) %>% dplyr:: mutate(meanRelP = round(summarise(itemanalyse, mean(Rel_P))$`mean(Rel_P)`, digits = 2), meanP = round(summarise(itemanalyse, mean(P_waarde))$`mean(P_waarde)`, digits = 2), perc_geslaagd = paste0(round(geslaagd/nrow(total_score)*100),"%"), cesuur = cesuur) ##Berekenen kappa kappa <- round(((KR20)*(toets$scaleSD^2)+(toets$scaleMean-cesuur)^2)/((toets$scaleSD^2) + (toets$scaleMean-cesuur)^2), digits = 2) toets <- mutate(toets, kappa = as.numeric(kappa)) ##Bepaal aantal studenten nrst <- toets$nPerson ## Vervang NA in data door lege cel data[is.na(data)] <- " " ##Toevoegen A-waarde aan itemanalyse itemanalyse["A"] <- NA itemanalyse["B"] <- NA if (nra >= 3) { itemanalyse["C"] <- NA } if (nra >= 4 ) { itemanalyse["D"] <- NA } if (nra >= 5) { itemanalyse["E"] <- NA } if (nra >= 6) { itemanalyse["F"] <- NA } for ( i in 1:nrq ) itemanalyse$A[i] <- (sum(str_count(data[,i], "A"))/nrst) for ( i in 1:nrq ) itemanalyse$B[i] <- (sum(str_count(data[,i], "B"))/nrst) if (nra >= 3) { for ( i in 1:nrq ) itemanalyse$C[i] <- (sum(str_count(data[,i], "C"))/nrst) } if (nra >= 4) { for ( i in 1:nrq ) itemanalyse$D[i] <- (sum(str_count(data[,i], "D"))/nrst) } if (nra >= 5) { for ( i in 1:nrq ) itemanalyse$E[i] <- (sum(str_count(data[,i], "E"))/nrst) } if (nra >= 6) { for ( i in 1:nrq ) itemanalyse$'F'[i] <- (sum(str_count(data[,i], "F"))/nrst) } ##Voeg advies column toe aan itemanalyse itemanalyse[".A"] <- NA itemanalyse[".B"] <- NA itemanalyse[".C"] <- NA itemanalyse[".D"] <- NA itemanalyse[".E"] <- NA ##Genereer advies op basis van P- en rirwaarden for ( i in 1:nrq ) if( (itemanalyse$Rel_P[i] + itemanalyse$rir[i] < 0.4) ){ itemanalyse$.E[i] <- "E" } for ( i in 1:nrq ) if( (itemanalyse$P_waarde[i] < (gokkans+0.04))&(itemanalyse$rir[i] > 0.05) ){ itemanalyse$.D[i] <- "D" } for ( i in 1:nrq ) if( (itemanalyse$P_waarde[i] < 0.3)&((itemanalyse$rir[i] <= 0.05)&(itemanalyse$rir[i] >= -0.05)) ){ itemanalyse$.C[i] <- "C" } for ( i in 1:nrq ) if( (itemanalyse$Rel_P[i] < 0.4)&(itemanalyse$rir[i] <= 0.10) ){ itemanalyse$.A[i] <- "A" } for ( i in 1:nrq ) if( (itemanalyse$Rel_P[i] < 0.8)&(itemanalyse$rir[i] < -0.10) ){ itemanalyse$.B[i] <- "B" } ##Verander kolom volgorde itemanalyse if (nra == 2) { itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, P_waarde, Rel_P, rir, `New Alpha`, .A, .B, .C, .D, .E) } if (nra == 3) { itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, P_waarde, Rel_P, rir, `New Alpha`, .A, .B, .C, .D, .E) } if (nra == 4) { itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, D, P_waarde, Rel_P, rir, `New Alpha`, .A, .B, .C, .D, .E) } if (nra == 5) { itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, D, E, P_waarde, Rel_P, rir, `New Alpha`, .A, .B, .C, .D, .E) } if (nra == 6) { itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, D, E, 'F', P_waarde, Rel_P, rir, `New Alpha`, .A, .B, .C, .D, .E) } ##Verwijder NA's uit itemanalyse if (nra == 2) { itemanalyse[,8:12] <- sapply(itemanalyse[,8:12], as.character) itemanalyse[,8:12][is.na(itemanalyse[,8:12])] <- " " } if (nra == 3) { itemanalyse[,9:13] <- sapply(itemanalyse[,9:13], as.character) itemanalyse[,9:13][is.na(itemanalyse[,9:13])] <- " " } if (nra == 4) { itemanalyse[,10:14] <- sapply(itemanalyse[,10:14], as.character) itemanalyse[,10:14][is.na(itemanalyse[,10:14])] <- " " } if (nra == 5) { itemanalyse[,11:15] <- sapply(itemanalyse[,11:15], as.character) itemanalyse[,11:15][is.na(itemanalyse[,11:15])] <- " " } if (nra == 6) { itemanalyse[,12:16] <- sapply(itemanalyse[,12:16], as.character) itemanalyse[,12:16][is.na(itemanalyse[,12:16])] <- " " } ## Voeg gebruikte sleutel toe aan itemanalyse tsleutel <- as.data.frame(t(sleutel)) itemanalyse <- cbind(tsleutel, itemanalyse) %>% dplyr:: rename(Key = V1) itemanalyse <- dplyr:: mutate(itemanalyse, itemName = colnames(sleutel)) ##Bereken gemiddelde score en sd per toetsversie versie_score <- inner_join(total_score, student_versies, by = "studentnummers") %>% group_by(Toetsversie) %>% summarise(mean=mean(score), sd=sd(score), n=n()) ttest <- tsum.test(mean.x=versie_score$mean[1], s.x=versie_score$sd[1], n.x=versie_score$n[1], mean.y=versie_score$mean[2], s.y=versie_score$sd[2], n.y=versie_score$n[2]) try(if(ttest$p.value < 0.05) stop("Gemiddelde score versies verschillen significant")) list(itemanalyse = itemanalyse, toetswaarden = toets, cijfers = total_score) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_rls_data.R \name{get_rls_data} \alias{get_rls_data} \title{get_rls_data} \usage{ get_rls_data() } \value{ } \description{ get_rls_data }
/man/get_rls_data.Rd
permissive
SimonCoulombe/covidtwitterbot
R
false
true
220
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_rls_data.R \name{get_rls_data} \alias{get_rls_data} \title{get_rls_data} \usage{ get_rls_data() } \value{ } \description{ get_rls_data }
#' Transform a footnote mark to an HTML representation #' #' @noRd footnote_mark_to_html <- function(mark) { if (is.na(mark)) return("") # Generate the CSS classes needed on the basis of whether the # mark is one or more asterisk characters or anything else if (!grepl("^[\\*]+?$", mark)) { sup_class <- "gt_footnote_marks" } else { sup_class <- "gt_footnote_marks gt_asterisk" } as.character(htmltools::tags$sup(class = sup_class, mark)) } styles_to_html <- function(styles) { vapply( styles, FUN.VALUE = character(1), USE.NAMES = FALSE, FUN = function(x) { if (any(is.null(names(x)))) { style <- gsub(":", ": ", x, fixed = TRUE) } else if (all(names(x) != "")) { x <- cell_style_to_html(x) style <- paste0(names(x), ": ", x, ";", collapse = " ") %>% tidy_gsub(";;", ";") } else { style <- as.character(x) } style } ) %>% paste(collapse = " ") %>% tidy_gsub("\n", " ") } cell_style_to_html <- function(style) { UseMethod("cell_style_to_html") } cell_style_to_html.default <- function(style) { utils::str(style) cli::cli_abort("Implement `cell_style_to_html()` for the object above.") } # Upgrade `_styles` to gain a `html_style` column with CSS style rules add_css_styles <- function(data) { styles_tbl <- dt_styles_get(data = data) styles_tbl$html_style <- vapply(styles_tbl$styles, styles_to_html, character(1)) dt_styles_set(data = data, styles = styles_tbl) } #' For a given location, reduce the footnote marks to a single string #' #' @param fn_tbl The table containing all of the resolved footnote information. #' @param locname The location name for the footnotes. #' @param delimiter The delimiter to use for the coalesced footnote marks. #' @noRd coalesce_marks <- function( fn_tbl, locname, delimiter = "," ) { dplyr::filter(fn_tbl, locname == !!locname) %>% dplyr::summarize(fs_id_c = paste(fs_id, collapse = delimiter)) } # Get the attributes for the table tag get_table_defs <- function(data) { boxh <- dt_boxhead_get(data = data) # Get the `table-layout` value, which is set in `_options` table_style <- paste0( "table-layout: ", dt_options_get_value( data = data, option = "table_layout" ), ";" ) # In the case that column widths are not set for any columns, # there should not be a `<colgroup>` tag requirement if (length(unlist(boxh$column_width)) < 1) { return(list(table_style = NULL, table_colgroups = NULL)) } # Get the table's width (which or may not have been set) table_width <- dt_options_get_value( data = data, option = "table_width" ) widths <- boxh %>% dplyr::filter(type %in% c("default", "stub")) %>% dplyr::arrange(dplyr::desc(type)) %>% # This ensures that the `stub` is first .$column_width %>% unlist() # Stop function if all length dimensions (where provided) # don't conform to accepted CSS length definitions validate_css_lengths(widths) # If all of the widths are defined as px values for all columns, # then ensure that the width values are strictly respected as # absolute width values (even if a table width has already been set) if (all(grepl("px", widths)) && table_width == "auto") { table_width <- "0px" } if (all(grepl("%", widths)) && table_width == "auto") { table_width <- "100%" } if (table_width != "auto") { table_style <- paste(table_style, paste0("width: ", table_width), sep = "; ") } # Create the `<colgroup>` tag table_colgroups <- htmltools::tags$colgroup( lapply( widths, FUN = function(width) { htmltools::tags$col(style = htmltools::css(width = width)) }) ) list( table_style = table_style, table_colgroups = table_colgroups ) } create_caption_component_h <- function(data) { # Create the table caption if available table_caption <- dt_options_get_value(data = data, option = "table_caption") if (!is.null(table_caption)) { table_caption <- process_text(table_caption, context = "html") if (isTRUE(getOption("knitr.in.progress"))) { table_caption <- kable_caption(label = NULL, table_caption, "html") } if (!getOption("htmltools.preserve.raw", FALSE)) { # <!--/html_preserve--> ... <!--html_preserve--> is because bookdown scans # the .md file, looking for references in the form of: # <caption>(#tab:mytable) # Ref: # https://github.com/rstudio/bookdown/blob/00987215b7572def2f5cd73a623efc38f4f30ab7/R/html.R#L629 # https://github.com/rstudio/bookdown/blob/00987215b7572def2f5cd73a623efc38f4f30ab7/R/html.R#L667 # # Normally, the gt table in its entirety is excluded from the .md, to # prevent it from being corrupted by pandoc's md-to-html rendering. We do # this by wrapping the whole table in htmltools::htmlPreserve (I think this # actually happens in htmlwidgets). So the extra markup here is used to # temporarily suspend that protection, emit the caption (including the HTML # <caption> tag, which bookdown searches for), and then resume protection. htmltools::HTML(paste0( "<!--/html_preserve--><caption>", table_caption, "</caption><!--html_preserve-->" )) } else { htmltools::HTML(paste0("<caption>", table_caption, "</caption>")) } } else { NULL } } #' Create the heading component of a table #' #' The table heading component contains the title and possibly a subtitle; if #' there are no heading components defined this function will return an empty #' string. #' #' @noRd create_heading_component_h <- function(data) { # If there is no title or heading component, then return an empty string if (!dt_heading_has_title(data = data)) { return("") } heading <- dt_heading_get(data = data) footnotes_tbl <- dt_footnotes_get(data = data) styles_tbl <- dt_styles_get(data = data) subtitle_defined <- dt_heading_has_subtitle(data = data) # Get effective number of columns n_cols_total <- get_effective_number_of_columns(data = data) # Get the footnote marks for the title if ("title" %in% footnotes_tbl$locname) { footnote_title_marks <- coalesce_marks( fn_tbl = footnotes_tbl, locname = "title" ) footnote_title_marks <- footnote_mark_to_html(mark = footnote_title_marks$fs_id_c) } else { footnote_title_marks <- "" } # Get the style attrs for the title if ("title" %in% styles_tbl$locname) { title_style_rows <- dplyr::filter(styles_tbl, locname == "title") if (nrow(title_style_rows) > 0) { title_styles <- title_style_rows$html_style } else { title_styles <- NULL } } else { title_styles <- NA_character_ } # Get the footnote marks for the subtitle if (subtitle_defined && "subtitle" %in% footnotes_tbl$locname) { footnote_subtitle_marks <- coalesce_marks( fn_tbl = footnotes_tbl, locname = "subtitle" ) footnote_subtitle_marks <- footnote_mark_to_html(mark = footnote_subtitle_marks$fs_id_c) } else { footnote_subtitle_marks <- "" } # Get the style attrs for the subtitle if (subtitle_defined && "subtitle" %in% styles_tbl$locname) { subtitle_style_rows <- dplyr::filter(styles_tbl, locname == "subtitle") if (nrow(subtitle_style_rows) > 0) { subtitle_styles <- subtitle_style_rows$html_style } else { subtitle_styles <- NULL } } else { subtitle_styles <- NA_character_ } title_classes <- c("gt_heading", "gt_title", "gt_font_normal") subtitle_classes <- title_classes %>% tidy_sub("title", "subtitle") if (!subtitle_defined) { title_classes <- c(title_classes, "gt_bottom_border") } else { subtitle_classes <- c(subtitle_classes, "gt_bottom_border") } title_row <- htmltools::tags$tr( htmltools::tags$td( colspan = n_cols_total, class = paste(title_classes, collapse = " "), style = title_styles, htmltools::HTML( paste0(heading$title, footnote_title_marks) ) ) ) if (subtitle_defined) { subtitle_row <- htmltools::tags$tr( htmltools::tags$td( colspan = n_cols_total, class = paste(subtitle_classes, collapse = " "), style = subtitle_styles, htmltools::HTML( paste0(heading$subtitle, footnote_subtitle_marks) ) ) ) } else { subtitle_row <- "" } htmltools::tags$thead( class = "gt_header", title_row, subtitle_row ) } #' Create the columns component of a table (HTML) #' #' @noRd create_columns_component_h <- function(data) { # Should the column labels be hidden? column_labels_hidden <- dt_options_get_value( data = data, option = "column_labels_hidden" ) if (column_labels_hidden) { return("") } stubh <- dt_stubhead_get(data = data) styles_tbl <- dt_styles_get(data = data) # Get vector representation of stub layout stub_layout <- get_stub_layout(data = data) # Determine the finalized number of spanner rows spanner_row_count <- dt_spanners_matrix_height( data = data, omit_columns_row = TRUE ) # Get the column alignments and also the alignment class names col_alignment <- dt_boxhead_get_vars_align_default(data = data) # Get the column headings headings_vars <- dt_boxhead_get_vars_default(data = data) headings_labels <- dt_boxhead_get_vars_labels_default(data = data) # Get the style attrs for the stubhead label stubhead_style_attrs <- subset(styles_tbl, locname == "stubhead") # Get the style attrs for the spanner column headings spanner_style_attrs <- subset(styles_tbl, locname == "columns_groups") # Get the style attrs for the spanner column headings column_style_attrs <- subset(styles_tbl, locname == "columns_columns") # If columns are present in the stub, then replace with a set stubhead # label or nothing if (length(stub_layout) > 0 && length(stubh$label) > 0) { headings_labels <- prepend_vec(headings_labels, stubh$label) headings_vars <- prepend_vec(headings_vars, "::stub") } else if (length(stub_layout) > 0) { headings_labels <- prepend_vec(headings_labels, "") headings_vars <- prepend_vec(headings_vars, "::stub") } stubhead_label_alignment <- "left" table_col_headings <- list() if (spanner_row_count < 1) { # Create the cell for the stubhead label if (length(stub_layout) > 0) { stubhead_style <- if (nrow(stubhead_style_attrs) > 0) { stubhead_style_attrs$html_style } else { NULL } table_col_headings[[length(table_col_headings) + 1]] <- htmltools::tags$th( class = paste( c("gt_col_heading", "gt_columns_bottom_border", paste0("gt_", stubhead_label_alignment)), collapse = " "), rowspan = 1, colspan = length(stub_layout), style = stubhead_style, scope = ifelse(length(stub_layout) > 1, "colgroup", "col"), htmltools::HTML(headings_labels[1]) ) headings_vars <- headings_vars[-1] headings_labels <- headings_labels[-1] } for (i in seq_along(headings_vars)) { styles_column <- subset(column_style_attrs, colnum == i) column_style <- if (nrow(styles_column) > 0) { styles_column$html_style } else { NULL } table_col_headings[[length(table_col_headings) + 1]] <- htmltools::tags$th( class = paste( c("gt_col_heading", "gt_columns_bottom_border", paste0("gt_", col_alignment[i])), collapse = " "), rowspan = 1, colspan = 1, style = column_style, scope = "col", htmltools::HTML(headings_labels[i]) ) } table_col_headings <- htmltools::tags$tr(table_col_headings) } if (spanner_row_count > 0) { spanners <- dt_spanners_print_matrix( data = data, include_hidden = FALSE ) spanner_ids <- dt_spanners_print_matrix( data = data, include_hidden = FALSE, ids = TRUE ) level_1_index <- nrow(spanners) - 1L # A list of <th> elements that will go in the first level; this # includes spanner labels and column labels for solo columns (don't # have spanner labels above them) level_1_spanners <- list() # A list of <th> elements that will go in the second row. This is # all column labels that DO have spanners above them. spanned_column_labels <- list() # Create the cell for the stubhead label if (length(stub_layout) > 0) { stubhead_style <- if (nrow(stubhead_style_attrs) > 0) { stubhead_style_attrs$html_style } else { NULL } level_1_spanners[[length(level_1_spanners) + 1]] <- htmltools::tags$th( class = paste( c("gt_col_heading", "gt_columns_bottom_border", paste0("gt_", stubhead_label_alignment)), collapse = " "), rowspan = 2, colspan = length(stub_layout), style = stubhead_style, scope = ifelse(length(stub_layout) > 1, "colgroup", "col"), htmltools::HTML(headings_labels[1]) ) headings_vars <- headings_vars[-1] headings_labels <- headings_labels[-1] } # NOTE: `rle()` treats NA values as distinct from each other; # in other words, each NA value starts a new run of length 1 spanners_rle <- rle(spanner_ids[level_1_index, ]) # The `sig_cells` vector contains the indices of spanners' elements # where the value is either NA, or, is different than the previous value; # because NAs are distinct, every NA element will be present sig_cells sig_cells <- c(1, utils::head(cumsum(spanners_rle$lengths) + 1, -1)) # `colspans` matches `spanners` in length; each element is the # number of columns that the <th> at that position should span; if 0, # then skip the <th> at that position colspans <- ifelse( seq_along(spanners[level_1_index, ]) %in% sig_cells, # Index back into the rle result, working backward through sig_cells spanners_rle$lengths[match(seq_along(spanner_ids[level_1_index, ]), sig_cells)], 0 ) for (i in seq_along(headings_vars)) { if (is.na(spanner_ids[level_1_index, ][i])) { styles_heading <- dplyr::filter( styles_tbl, locname == "columns_columns", colname == headings_vars[i] ) heading_style <- if (nrow(styles_heading) > 0) { styles_heading$html_style } else { NULL } first_set_alignment <- dt_boxhead_get_alignment_by_var(data = data, headings_vars[i]) level_1_spanners[[length(level_1_spanners) + 1]] <- htmltools::tags$th( class = paste( c( "gt_col_heading", "gt_columns_bottom_border", paste0("gt_", first_set_alignment) ), collapse = " "), rowspan = 2, colspan = 1, style = heading_style, scope = "col", htmltools::HTML(headings_labels[i]) ) } else if (!is.na(spanner_ids[level_1_index, ][i])) { # If colspans[i] == 0, it means that a previous cell's # `colspan` will cover us if (colspans[i] > 0) { styles_spanners <- dplyr::filter( spanner_style_attrs, locname == "columns_groups", grpname == spanner_ids[level_1_index, ][i] ) spanner_style <- if (nrow(styles_spanners) > 0) { styles_spanners$html_style } else { NULL } level_1_spanners[[length(level_1_spanners) + 1]] <- htmltools::tags$th( class = paste( c( "gt_center", "gt_columns_top_border", "gt_column_spanner_outer" ), collapse = " " ), rowspan = 1, colspan = colspans[i], style = spanner_style, scope = ifelse(colspans[i] > 1, "colgroup", "col"), htmltools::tags$span( class = "gt_column_spanner", htmltools::HTML(spanners[level_1_index, ][i]) ) ) } } } solo_headings <- headings_vars[is.na(spanner_ids[level_1_index, ])] remaining_headings <- headings_vars[!(headings_vars %in% solo_headings)] remaining_headings_labels <- dt_boxhead_get(data = data) %>% dplyr::filter(var %in% remaining_headings) %>% dplyr::pull(column_label) %>% unlist() col_alignment <- col_alignment[-1][!(headings_vars %in% solo_headings)] if (length(remaining_headings) > 0) { spanned_column_labels <- c() for (j in seq(remaining_headings)) { styles_remaining <- dplyr::filter( styles_tbl, locname == "columns_columns", colname == remaining_headings[j] ) remaining_style <- if (nrow(styles_remaining) > 0) { styles_remaining$html_style } else { NULL } remaining_alignment <- dt_boxhead_get_alignment_by_var(data = data, remaining_headings[j]) spanned_column_labels[[length(spanned_column_labels) + 1]] <- htmltools::tags$th( class = paste( c( "gt_col_heading", "gt_columns_bottom_border", paste0("gt_", remaining_alignment) ), collapse = " " ), rowspan = 1, colspan = 1, style = remaining_style, scope = "col", htmltools::HTML(remaining_headings_labels[j]) ) } table_col_headings <- htmltools::tagList( htmltools::tags$tr(level_1_spanners), htmltools::tags$tr(spanned_column_labels) ) } else { # Create the `table_col_headings` HTML component table_col_headings <- htmltools::tags$tr(level_1_spanners) } } if (dt_spanners_matrix_height(data = data) > 2) { higher_spanner_rows_idx <- seq_len(nrow(spanner_ids) - 2) higher_spanner_rows <- htmltools::tagList() for (i in higher_spanner_rows_idx) { spanner_ids_row <- spanner_ids[i, ] spanners_row <- spanners[i, ] spanners_vars <- unique(spanner_ids_row[!is.na(spanner_ids_row)]) # Replace NA values with an empty string ID spanner_ids_row[is.na(spanner_ids_row)] <- "" spanners_rle <- rle(spanner_ids_row) sig_cells <- c(1, utils::head(cumsum(spanners_rle$lengths) + 1, -1)) colspans <- ifelse( seq_along(spanner_ids_row) %in% sig_cells, # Index back into the rle result, working backward through sig_cells spanners_rle$lengths[match(seq_along(spanner_ids_row), sig_cells)], 0 ) level_i_spanners <- list() for (j in seq_along(colspans)) { if (colspans[j] > 0) { styles_spanners <- dplyr::filter( styles_tbl, locname == "columns_groups", grpname == spanners_vars ) spanner_style <- if (nrow(styles_spanners) > 0) { styles_spanners$html_style } else { NULL } level_i_spanners[[length(level_i_spanners) + 1]] <- htmltools::tags$th( class = paste( c( "gt_center", "gt_columns_top_border", "gt_column_spanner_outer" ), collapse = " " ), rowspan = 1, colspan = colspans[j], style = spanner_style, scope = ifelse(colspans[j] > 1, "colgroup", "col"), if (spanner_ids_row[j] != "") { htmltools::tags$span( class = "gt_column_spanner", htmltools::HTML(spanners_row[j]) ) } ) } } if (length(stub_layout) > 0 && i == 1) { level_i_spanners <- htmltools::tagList( htmltools::tags$th( rowspan = max(higher_spanner_rows_idx), colspan = length(stub_layout), scope = ifelse(length(stub_layout) > 1, "colgroup", "col") ), level_i_spanners ) } higher_spanner_rows <- htmltools::tagList( higher_spanner_rows, htmltools::tagList(htmltools::tags$tr(level_i_spanners)) ) } table_col_headings <- htmltools::tagList( higher_spanner_rows, table_col_headings, ) } htmltools::tags$thead( class = "gt_col_headings", table_col_headings ) } #' Create the table body component (HTML) #' #' @noRd create_body_component_h <- function(data) { summaries_present <- dt_summary_exists(data = data) list_of_summaries <- dt_summary_df_get(data = data) groups_rows_df <- dt_groups_rows_get(data = data) styles_tbl <- dt_styles_get(data = data) # Get effective number of columns n_cols_total <- get_effective_number_of_columns(data = data) # Get the number of columns for the body cells only n_data_cols <- get_number_of_visible_data_columns(data = data) # Get vector representation of stub layout stub_layout <- get_stub_layout(data = data) has_stub_column <- "rowname" %in% stub_layout # Get a matrix of all cells in the body (not including summary cells) cell_matrix <- get_body_component_cell_matrix(data = data) # Get the number of rows in the body n_rows <- nrow(cell_matrix) # Get the column alignments and also the alignment class names col_alignment <- c( rep("right", length(stub_layout)), dt_boxhead_get_vars_align_default(data = data) ) alignment_classes <- paste0("gt_", col_alignment) # Define function to get a character vector of formatted cell # data (this includes the stub, if it is present) output_df_row_as_vec <- function(i) { cell_matrix <- cell_matrix[i, ] if ( "group_label" %in% stub_layout && !(i %in% groups_rows_df$row_start) ) { cell_matrix <- cell_matrix[-1] } cell_matrix } # Replace an NA group with an empty string if (any(is.na(groups_rows_df$group_label))) { groups_rows_df <- groups_rows_df %>% dplyr::mutate(group_label = ifelse(is.na(group_label), "", group_label)) } # Is the stub to be striped? table_stub_striped <- dt_options_get_value( data = data, option = "row_striping_include_stub" ) # Are the rows in the table body to be striped? table_body_striped <- dt_options_get_value( data = data, option = "row_striping_include_table_body" ) extra_classes_1 <- rep_len(list(NULL), n_cols_total) extra_classes_2 <- rep_len(list(if (table_body_striped) "gt_striped" else NULL), n_cols_total) if (length(stub_layout) > 0) { if ("rowname" %in% stub_layout) { row_label_col <- which(stub_layout == "rowname") extra_classes_1[[row_label_col]] <- "gt_stub" extra_classes_2[[row_label_col]] <- c("gt_stub", if (table_stub_striped) "gt_striped" else NULL) } } # Create a default vector of row span values for group labels as a column row_span_vals <- rep_len(list(NULL), n_cols_total) body_rows <- lapply( seq_len(n_rows), function(i) { body_section <- list() group_info <- groups_rows_df[groups_rows_df$row_start == i, c("group_id", "group_label")] if (nrow(group_info) == 0) { group_info <- NULL } group_id <- group_info[["group_id"]] group_label <- group_info[["group_label"]] # # Create a group heading row # if ( !is.null(group_id) && !("group_label" %in% stub_layout) ) { row_style <- dt_styles_pluck(styles_tbl, locname = "row_groups", grpname = group_id)$html_style group_class <- if (group_label == "") { "gt_empty_group_heading" } else { "gt_group_heading" } group_heading_row <- htmltools::tags$tr( class = "gt_group_heading_row", htmltools::tags$td( colspan = n_cols_total, class = group_class, style = row_style, htmltools::HTML(group_label) ) ) body_section <- append(body_section, list(group_heading_row)) } # # Create a body row # extra_classes <- if (i %% 2 == 0) extra_classes_2 else extra_classes_1 styles_row <- dt_styles_pluck(styles_tbl, locname = c("data", "stub"), rownum = i) row_styles <- build_row_styles( styles_resolved_row = styles_row, include_stub = has_stub_column, n_cols = n_data_cols ) if ("group_label" %in% stub_layout) { if (i %in% groups_rows_df$row_start) { # Modify the `extra_classes` list to include a class for # the row group column extra_classes[[1]] <- "gt_stub_row_group" # Obtain a one-row table that contains the beginning and # ending row index for the row group row_limits <- groups_rows_df %>% dplyr::filter(row_start == i) %>% dplyr::select(group_id, row_start, row_end) summary_rows_group_df <- list_of_summaries[["summary_df_display_list"]][[row_limits$group_id]] if (!is.null(summary_rows_group_df) && "rowname" %in% stub_layout) { summary_row_count <- nrow(summary_rows_group_df) } else { summary_row_count <- 0L } # Modify the `row_span_vals` list such that the first # element (the row group column) contains the number of rows to span row_span_vals[[1]] <- row_limits$row_end - row_limits$row_start + 1 + summary_row_count # Process row group styles if there is an indication that some # are present row_group_style <- dt_styles_pluck(styles_tbl, locname = "row_groups", grpname = group_id)$html_style # Add style of row group cell to vector row_styles <- c(list(row_group_style), row_styles) } else { # Remove first element of `alignment_classes` vector alignment_classes <- alignment_classes[-1] row_span_vals[[1]] <- NULL extra_classes[[1]] <- NULL } } body_row <- htmltools::tags$tr( class = if (!is.null(group_info)) "gt_row_group_first", htmltools::HTML( paste0( mapply( SIMPLIFY = FALSE, USE.NAMES = FALSE, output_df_row_as_vec(i = i), row_span_vals, alignment_classes, extra_classes, row_styles, FUN = function(x, row_span, alignment_class, extra_class, cell_style) { sprintf( "<%s %sclass=\"%s\"%s>%s</%s>", if ("gt_stub" %in% extra_class) { "th scope=\"row\"" } else { "td" }, if (is.null(row_span)) { "" } else { paste0( "rowspan=\"", htmltools::htmlEscape(row_span, attribute = TRUE), "\" " ) }, paste( c( "gt_row", htmltools::htmlEscape(alignment_class, attribute = TRUE), htmltools::htmlEscape(extra_class, attribute = TRUE) ), collapse = " " ), if (!any(nzchar(cell_style))) { "" } else { paste0( " style=\"", htmltools::htmlEscape(cell_style, attribute = TRUE), "\"" ) }, as.character(x), if ("gt_stub" %in% extra_class) { "th" } else { "td" } ) } ), collapse = "\n" ) ) ) body_section <- append(body_section, list(body_row)) # # Add groupwise summary rows # if (summaries_present && i %in% groups_rows_df$row_end) { group_id <- groups_rows_df[ stats::na.omit(groups_rows_df$row_end == i), "group_id", drop = TRUE ] summary_section <- summary_row_tags_i( data = data, group_id = group_id ) body_section <- append(body_section, summary_section) } body_section } ) body_rows <- flatten_list(body_rows) # # Add grand summary rows # if (summaries_present && grand_summary_col %in% names(list_of_summaries$summary_df_display_list)) { grand_summary_section <- summary_row_tags_i( data = data, group_id = grand_summary_col ) body_rows <- c(body_rows, grand_summary_section) } htmltools::tags$tbody( class = "gt_table_body", body_rows ) } #' Create the table source note component (HTML) #' #' @noRd create_source_notes_component_h <- function(data) { source_notes <- dt_source_notes_get(data = data) if (is.null(source_notes)) { return("") } styles_tbl <- dt_styles_get(data = data) # Get effective number of columns n_cols_total <- get_effective_number_of_columns(data = data) # Get the style attrs for the source notes if ("source_notes" %in% styles_tbl$locname) { source_notes_style <- dplyr::filter(styles_tbl, locname == "source_notes") source_notes_styles <- if (nrow(source_notes_style) > 0) { paste(source_notes_style$html_style, collapse = " ") } else { NULL } } else { source_notes_styles <- NULL } # Get the source note multiline option multiline <- dt_options_get_value(data = data, option = "source_notes_multiline") # Get the source note separator option separator <- dt_options_get_value(data = data, option = "source_notes_sep") # Handle the multiline source notes case (each footnote takes up one line) if (multiline) { # Create the source notes component as a series of `<tr><td>` (one per # source note) inside of a `<tfoot>` return( htmltools::tags$tfoot( class = "gt_sourcenotes", lapply( source_notes, function(x) { htmltools::tags$tr( htmltools::tags$td( class = "gt_sourcenote", style = source_notes_styles, colspan = n_cols_total, htmltools::HTML(x) ) ) } ) ) ) } # Perform HTML escaping on the separator text and transform space # characters to non-breaking spaces separator <- gsub(" (?= )", "&nbsp;", separator, perl = TRUE) # Create the source notes component as a single `<tr><td>` inside # of a `<tfoot>` htmltools::tags$tfoot( htmltools::tags$tr( class = "gt_sourcenotes", style = source_notes_styles, htmltools::tags$td( class = "gt_sourcenote", colspan = n_cols_total, htmltools::tags$div( style = htmltools::css(`padding-bottom` = "2px"), htmltools::HTML(paste(source_notes, collapse = separator)) ) ) ) ) } #' Create the table footnote component (HTML) #' #' @noRd create_footnotes_component_h <- function(data) { footnotes_tbl <- dt_footnotes_get(data = data) # If the `footnotes_resolved` object has no # rows, then return an empty footnotes component if (nrow(footnotes_tbl) == 0) { return("") } styles_tbl <- dt_styles_get(data = data) # Get effective number of columns n_cols_total <- get_effective_number_of_columns(data = data) footnotes_tbl <- footnotes_tbl %>% dplyr::select(fs_id, footnotes) %>% dplyr::distinct() # Get the style attrs for the footnotes if ("footnotes" %in% styles_tbl$locname) { footnotes_style <- dplyr::filter(styles_tbl, locname == "footnotes") footnotes_styles <- if (nrow(footnotes_style) > 0) { paste(footnotes_style$html_style, collapse = " ") } else { NULL } } else { footnotes_styles <- NULL } # Get the footnote multiline option multiline <- dt_options_get_value(data = data, option = "footnotes_multiline") # Get the footnote separator option separator <- dt_options_get_value(data = data, option = "footnotes_sep") # Obtain vectors of footnote ID values (prerendered glyphs) and # the associated text footnote_ids <- footnotes_tbl[["fs_id"]] footnote_text <- footnotes_tbl[["footnotes"]] # Create a vector of HTML footnotes footnotes <- unlist( mapply( SIMPLIFY = FALSE, USE.NAMES = FALSE, footnote_ids, footnote_text, FUN = function(x, footnote_text) { as.character( htmltools::tagList( htmltools::HTML( paste0( footnote_mark_to_html(x), " ", process_text(footnote_text, context = "html") ), .noWS = c("after", "before") ) ) ) } ) ) # Handle the multiline footnotes case (each footnote takes up one line) if (multiline) { # Create the footnotes component as a series of `<tr><td>` (one per # footnote) inside of a `<tfoot>` return( htmltools::tags$tfoot( class = "gt_footnotes", lapply( footnotes, function(x) { htmltools::tags$tr( htmltools::tags$td( class = "gt_footnote", style = footnotes_styles, colspan = n_cols_total, htmltools::HTML(x) ) ) } ) ) ) } # Perform HTML escaping on the separator text and transform space # characters to non-breaking spaces separator <- gsub(" (?= )", "&nbsp;", separator, perl = TRUE) # Create the footnotes component as a single `<tr><td>` inside # of a `<tfoot>` htmltools::tags$tfoot( htmltools::tags$tr( class = "gt_footnotes", style = footnotes_styles, htmltools::tags$td( class = "gt_footnote", colspan = n_cols_total, htmltools::tags$div( style = htmltools::css(`padding-bottom` = "2px"), htmltools::HTML(paste(footnotes, collapse = separator)) ) ) ) ) } # Get a matrix of all body cells get_body_component_cell_matrix <- function(data) { body <- dt_body_get(data = data) stub_layout <- get_stub_layout(data = data) default_vars <- dt_boxhead_get_vars_default(data = data) body_matrix <- unname(as.matrix(body[, default_vars])) if (length(stub_layout) == 0) { return(body_matrix) } if ("rowname" %in% stub_layout) { body_matrix <- cbind( unname(as.matrix(body[, dt_boxhead_get_var_stub(data = data)])), body_matrix ) } if ("group_label" %in% stub_layout) { groups_rows_df <- dt_groups_rows_get(data = data) %>% dplyr::select(group_id, group_label, row_start) group_label_matrix <- dt_stub_df_get(data = data) %>% dplyr::select(-rowname, -group_label) %>% dplyr::inner_join(groups_rows_df, by = "group_id") %>% dplyr::mutate( row = dplyr::row_number(), built = dplyr::if_else(row_start != row, "", built) ) %>% dplyr::select(built) %>% as.matrix %>% unname() body_matrix <- cbind(group_label_matrix, body_matrix) } body_matrix } summary_row_tags_i <- function(data, group_id) { # Check that `group_id` isn't NULL and that length is exactly 1 if (is.null(group_id) || length(group_id) != 1) { cli::cli_abort("`group_id` cannot be `NULL` and must be of length 1.") } list_of_summaries <- dt_summary_df_get(data = data) styles_tbl <- dt_styles_get(data = data) # Obtain all of the visible (`"default"`), non-stub column names # for the table from the `boxh` object default_vars <- dt_boxhead_get_vars_default(data = data) stub_layout <- get_stub_layout(data = data) stub_is_2 <- length(stub_layout) > 1 summary_row_lines <- list() # In the below conditions # - `grand_summary_col` is a global variable (`"::GRAND_SUMMARY"`, assigned # in `dt_summary.R`) # - `group_id` might be passed in as NA when there are unnamed groups (this # can happen usually when using `tab_row_group()` to build these row groups) # and you cannot create summary rows for unnamed groups if (is.na(group_id)) { return(summary_row_lines) } else if ( group_id %in% names(list_of_summaries$summary_df_display_list) && group_id != grand_summary_col ) { summary_row_type <- "group" } else if (group_id == grand_summary_col) { summary_row_type <- "grand" } else { return(summary_row_lines) } # Obtain the summary data table specific to the group ID and # select the column named `rowname` and all of the visible columns summary_df <- dplyr::select( list_of_summaries$summary_df_display_list[[group_id]], .env$rowname_col_private, .env$default_vars ) # Get effective number of columns n_cols_total <- get_effective_number_of_columns(data = data) # Get the number of columns for the body cells only n_data_cols <- get_number_of_visible_data_columns(data = data) if (stub_is_2) { n_cols_total <- n_cols_total - 1 } extra_classes <- rep_len(list(NULL), n_cols_total) extra_classes[[1]] <- "gt_stub" # Create a default list of colspan values for the summary row col_span_vals <- rep_len(list(NULL), n_cols_total) if (stub_is_2 && summary_row_type == "grand") { col_span_vals[[1]] <- 2L } # Get the column alignments and also the alignment class names col_alignment <- c("right", dt_boxhead_get_vars_align_default(data = data)) alignment_classes <- paste0("gt_", col_alignment) for (j in seq_len(nrow(summary_df))) { last_row_class <- "gt_last_summary_row" if (summary_row_type == "grand") { styles_resolved_row <- dt_styles_pluck(styles_tbl, locname = "grand_summary_cells", grpname = group_id, rownum = j) summary_row_class <- "gt_grand_summary_row" first_row_class <- "gt_first_grand_summary_row" } else { styles_resolved_row <- dt_styles_pluck(styles_tbl, locname = "summary_cells", grpname = group_id, grprow = j) summary_row_class <- "gt_summary_row" first_row_class <- if ("rowname" %in% stub_layout) "gt_first_summary_row thick" else "gt_first_summary_row" } row_styles <- build_row_styles( styles_resolved_row = styles_resolved_row, include_stub = TRUE, n_cols = n_data_cols ) summary_row_lines[[length(summary_row_lines) + 1]] <- htmltools::tags$tr( htmltools::HTML( paste0( mapply( SIMPLIFY = FALSE, USE.NAMES = FALSE, unname(unlist(summary_df[j, ])), col_span_vals, alignment_classes, extra_classes, row_styles, FUN = function(x, col_span, alignment_class, extra_class, cell_style) { extra_class <- c(extra_class, summary_row_class) if (j == 1) { extra_class <- c(extra_class, first_row_class) } if (j == nrow(summary_df)) { extra_class <- c(extra_class, last_row_class) } sprintf( "<td %sclass=\"%s\"%s>%s</td>", if (is.null(col_span)) { "" } else { paste0( "colspan=\"", htmltools::htmlEscape(col_span, attribute = TRUE), "\" ") }, htmltools::htmlEscape( paste( c("gt_row", alignment_class, extra_class), collapse = " " ), attribute = TRUE ), if (!any(nzchar(cell_style))) { "" } else { paste0( " style=\"", htmltools::htmlEscape(cell_style, attribute = TRUE), "\"" ) }, as.character(x) ) } ), collapse = "\n" ) ) ) } summary_row_lines } build_row_styles <- function( styles_resolved_row, include_stub, n_cols ) { # The styles_resolved_row data frame should contain the columns `colnum` and # `html_style`. Each colnum should match the number of a data column in the # output table; the first data column is number 1. No colnum should appear # more than once in styles_resolved_row. It's OK for a column not to appear in # styles_resolved_row, and it's OK for styles_resolved_row to have 0 rows. # # If `include_stub` is TRUE, then a row with column==0 will be used as the # stub style. # This function's implementation can't tolerate colnum of NA, or illegal # colnum values. Check and throw early. if (!isTRUE(all(styles_resolved_row$colnum %in% c(0, seq_len(n_cols)))) || any(duplicated(styles_resolved_row$colnum))) { cli::cli_abort( "`build_row_styles()` was called with invalid `colnum` values." ) } # This will hold the resulting styles result <- rep_len(list(NULL), n_cols) # The subset of styles_resolved_row that applies to data data_styles <- styles_resolved_row[styles_resolved_row$colnum > 0,] result[data_styles$colnum] <- data_styles$html_style # If a stub exists, we need to prepend a style (or NULL) to the result. if (include_stub) { stub_style <- styles_resolved_row[styles_resolved_row$colnum == 0,] result <- c(list(stub_style$html_style), result) } result } as_css_font_family_attr <- function(font_vec, value_only = FALSE) { fonts_spaces <- grepl(" ", font_vec) font_vec[fonts_spaces] <- paste_between( x = font_vec[fonts_spaces], x_2 = c("'", "'") ) value <- paste(font_vec, collapse = ", ") if (value_only) return(value) paste_between(value, x_2 = c("font-family: ", ";")) }
/R/utils_render_html.R
permissive
jooyoungseo/gt
R
false
false
44,060
r
#' Transform a footnote mark to an HTML representation #' #' @noRd footnote_mark_to_html <- function(mark) { if (is.na(mark)) return("") # Generate the CSS classes needed on the basis of whether the # mark is one or more asterisk characters or anything else if (!grepl("^[\\*]+?$", mark)) { sup_class <- "gt_footnote_marks" } else { sup_class <- "gt_footnote_marks gt_asterisk" } as.character(htmltools::tags$sup(class = sup_class, mark)) } styles_to_html <- function(styles) { vapply( styles, FUN.VALUE = character(1), USE.NAMES = FALSE, FUN = function(x) { if (any(is.null(names(x)))) { style <- gsub(":", ": ", x, fixed = TRUE) } else if (all(names(x) != "")) { x <- cell_style_to_html(x) style <- paste0(names(x), ": ", x, ";", collapse = " ") %>% tidy_gsub(";;", ";") } else { style <- as.character(x) } style } ) %>% paste(collapse = " ") %>% tidy_gsub("\n", " ") } cell_style_to_html <- function(style) { UseMethod("cell_style_to_html") } cell_style_to_html.default <- function(style) { utils::str(style) cli::cli_abort("Implement `cell_style_to_html()` for the object above.") } # Upgrade `_styles` to gain a `html_style` column with CSS style rules add_css_styles <- function(data) { styles_tbl <- dt_styles_get(data = data) styles_tbl$html_style <- vapply(styles_tbl$styles, styles_to_html, character(1)) dt_styles_set(data = data, styles = styles_tbl) } #' For a given location, reduce the footnote marks to a single string #' #' @param fn_tbl The table containing all of the resolved footnote information. #' @param locname The location name for the footnotes. #' @param delimiter The delimiter to use for the coalesced footnote marks. #' @noRd coalesce_marks <- function( fn_tbl, locname, delimiter = "," ) { dplyr::filter(fn_tbl, locname == !!locname) %>% dplyr::summarize(fs_id_c = paste(fs_id, collapse = delimiter)) } # Get the attributes for the table tag get_table_defs <- function(data) { boxh <- dt_boxhead_get(data = data) # Get the `table-layout` value, which is set in `_options` table_style <- paste0( "table-layout: ", dt_options_get_value( data = data, option = "table_layout" ), ";" ) # In the case that column widths are not set for any columns, # there should not be a `<colgroup>` tag requirement if (length(unlist(boxh$column_width)) < 1) { return(list(table_style = NULL, table_colgroups = NULL)) } # Get the table's width (which or may not have been set) table_width <- dt_options_get_value( data = data, option = "table_width" ) widths <- boxh %>% dplyr::filter(type %in% c("default", "stub")) %>% dplyr::arrange(dplyr::desc(type)) %>% # This ensures that the `stub` is first .$column_width %>% unlist() # Stop function if all length dimensions (where provided) # don't conform to accepted CSS length definitions validate_css_lengths(widths) # If all of the widths are defined as px values for all columns, # then ensure that the width values are strictly respected as # absolute width values (even if a table width has already been set) if (all(grepl("px", widths)) && table_width == "auto") { table_width <- "0px" } if (all(grepl("%", widths)) && table_width == "auto") { table_width <- "100%" } if (table_width != "auto") { table_style <- paste(table_style, paste0("width: ", table_width), sep = "; ") } # Create the `<colgroup>` tag table_colgroups <- htmltools::tags$colgroup( lapply( widths, FUN = function(width) { htmltools::tags$col(style = htmltools::css(width = width)) }) ) list( table_style = table_style, table_colgroups = table_colgroups ) } create_caption_component_h <- function(data) { # Create the table caption if available table_caption <- dt_options_get_value(data = data, option = "table_caption") if (!is.null(table_caption)) { table_caption <- process_text(table_caption, context = "html") if (isTRUE(getOption("knitr.in.progress"))) { table_caption <- kable_caption(label = NULL, table_caption, "html") } if (!getOption("htmltools.preserve.raw", FALSE)) { # <!--/html_preserve--> ... <!--html_preserve--> is because bookdown scans # the .md file, looking for references in the form of: # <caption>(#tab:mytable) # Ref: # https://github.com/rstudio/bookdown/blob/00987215b7572def2f5cd73a623efc38f4f30ab7/R/html.R#L629 # https://github.com/rstudio/bookdown/blob/00987215b7572def2f5cd73a623efc38f4f30ab7/R/html.R#L667 # # Normally, the gt table in its entirety is excluded from the .md, to # prevent it from being corrupted by pandoc's md-to-html rendering. We do # this by wrapping the whole table in htmltools::htmlPreserve (I think this # actually happens in htmlwidgets). So the extra markup here is used to # temporarily suspend that protection, emit the caption (including the HTML # <caption> tag, which bookdown searches for), and then resume protection. htmltools::HTML(paste0( "<!--/html_preserve--><caption>", table_caption, "</caption><!--html_preserve-->" )) } else { htmltools::HTML(paste0("<caption>", table_caption, "</caption>")) } } else { NULL } } #' Create the heading component of a table #' #' The table heading component contains the title and possibly a subtitle; if #' there are no heading components defined this function will return an empty #' string. #' #' @noRd create_heading_component_h <- function(data) { # If there is no title or heading component, then return an empty string if (!dt_heading_has_title(data = data)) { return("") } heading <- dt_heading_get(data = data) footnotes_tbl <- dt_footnotes_get(data = data) styles_tbl <- dt_styles_get(data = data) subtitle_defined <- dt_heading_has_subtitle(data = data) # Get effective number of columns n_cols_total <- get_effective_number_of_columns(data = data) # Get the footnote marks for the title if ("title" %in% footnotes_tbl$locname) { footnote_title_marks <- coalesce_marks( fn_tbl = footnotes_tbl, locname = "title" ) footnote_title_marks <- footnote_mark_to_html(mark = footnote_title_marks$fs_id_c) } else { footnote_title_marks <- "" } # Get the style attrs for the title if ("title" %in% styles_tbl$locname) { title_style_rows <- dplyr::filter(styles_tbl, locname == "title") if (nrow(title_style_rows) > 0) { title_styles <- title_style_rows$html_style } else { title_styles <- NULL } } else { title_styles <- NA_character_ } # Get the footnote marks for the subtitle if (subtitle_defined && "subtitle" %in% footnotes_tbl$locname) { footnote_subtitle_marks <- coalesce_marks( fn_tbl = footnotes_tbl, locname = "subtitle" ) footnote_subtitle_marks <- footnote_mark_to_html(mark = footnote_subtitle_marks$fs_id_c) } else { footnote_subtitle_marks <- "" } # Get the style attrs for the subtitle if (subtitle_defined && "subtitle" %in% styles_tbl$locname) { subtitle_style_rows <- dplyr::filter(styles_tbl, locname == "subtitle") if (nrow(subtitle_style_rows) > 0) { subtitle_styles <- subtitle_style_rows$html_style } else { subtitle_styles <- NULL } } else { subtitle_styles <- NA_character_ } title_classes <- c("gt_heading", "gt_title", "gt_font_normal") subtitle_classes <- title_classes %>% tidy_sub("title", "subtitle") if (!subtitle_defined) { title_classes <- c(title_classes, "gt_bottom_border") } else { subtitle_classes <- c(subtitle_classes, "gt_bottom_border") } title_row <- htmltools::tags$tr( htmltools::tags$td( colspan = n_cols_total, class = paste(title_classes, collapse = " "), style = title_styles, htmltools::HTML( paste0(heading$title, footnote_title_marks) ) ) ) if (subtitle_defined) { subtitle_row <- htmltools::tags$tr( htmltools::tags$td( colspan = n_cols_total, class = paste(subtitle_classes, collapse = " "), style = subtitle_styles, htmltools::HTML( paste0(heading$subtitle, footnote_subtitle_marks) ) ) ) } else { subtitle_row <- "" } htmltools::tags$thead( class = "gt_header", title_row, subtitle_row ) } #' Create the columns component of a table (HTML) #' #' @noRd create_columns_component_h <- function(data) { # Should the column labels be hidden? column_labels_hidden <- dt_options_get_value( data = data, option = "column_labels_hidden" ) if (column_labels_hidden) { return("") } stubh <- dt_stubhead_get(data = data) styles_tbl <- dt_styles_get(data = data) # Get vector representation of stub layout stub_layout <- get_stub_layout(data = data) # Determine the finalized number of spanner rows spanner_row_count <- dt_spanners_matrix_height( data = data, omit_columns_row = TRUE ) # Get the column alignments and also the alignment class names col_alignment <- dt_boxhead_get_vars_align_default(data = data) # Get the column headings headings_vars <- dt_boxhead_get_vars_default(data = data) headings_labels <- dt_boxhead_get_vars_labels_default(data = data) # Get the style attrs for the stubhead label stubhead_style_attrs <- subset(styles_tbl, locname == "stubhead") # Get the style attrs for the spanner column headings spanner_style_attrs <- subset(styles_tbl, locname == "columns_groups") # Get the style attrs for the spanner column headings column_style_attrs <- subset(styles_tbl, locname == "columns_columns") # If columns are present in the stub, then replace with a set stubhead # label or nothing if (length(stub_layout) > 0 && length(stubh$label) > 0) { headings_labels <- prepend_vec(headings_labels, stubh$label) headings_vars <- prepend_vec(headings_vars, "::stub") } else if (length(stub_layout) > 0) { headings_labels <- prepend_vec(headings_labels, "") headings_vars <- prepend_vec(headings_vars, "::stub") } stubhead_label_alignment <- "left" table_col_headings <- list() if (spanner_row_count < 1) { # Create the cell for the stubhead label if (length(stub_layout) > 0) { stubhead_style <- if (nrow(stubhead_style_attrs) > 0) { stubhead_style_attrs$html_style } else { NULL } table_col_headings[[length(table_col_headings) + 1]] <- htmltools::tags$th( class = paste( c("gt_col_heading", "gt_columns_bottom_border", paste0("gt_", stubhead_label_alignment)), collapse = " "), rowspan = 1, colspan = length(stub_layout), style = stubhead_style, scope = ifelse(length(stub_layout) > 1, "colgroup", "col"), htmltools::HTML(headings_labels[1]) ) headings_vars <- headings_vars[-1] headings_labels <- headings_labels[-1] } for (i in seq_along(headings_vars)) { styles_column <- subset(column_style_attrs, colnum == i) column_style <- if (nrow(styles_column) > 0) { styles_column$html_style } else { NULL } table_col_headings[[length(table_col_headings) + 1]] <- htmltools::tags$th( class = paste( c("gt_col_heading", "gt_columns_bottom_border", paste0("gt_", col_alignment[i])), collapse = " "), rowspan = 1, colspan = 1, style = column_style, scope = "col", htmltools::HTML(headings_labels[i]) ) } table_col_headings <- htmltools::tags$tr(table_col_headings) } if (spanner_row_count > 0) { spanners <- dt_spanners_print_matrix( data = data, include_hidden = FALSE ) spanner_ids <- dt_spanners_print_matrix( data = data, include_hidden = FALSE, ids = TRUE ) level_1_index <- nrow(spanners) - 1L # A list of <th> elements that will go in the first level; this # includes spanner labels and column labels for solo columns (don't # have spanner labels above them) level_1_spanners <- list() # A list of <th> elements that will go in the second row. This is # all column labels that DO have spanners above them. spanned_column_labels <- list() # Create the cell for the stubhead label if (length(stub_layout) > 0) { stubhead_style <- if (nrow(stubhead_style_attrs) > 0) { stubhead_style_attrs$html_style } else { NULL } level_1_spanners[[length(level_1_spanners) + 1]] <- htmltools::tags$th( class = paste( c("gt_col_heading", "gt_columns_bottom_border", paste0("gt_", stubhead_label_alignment)), collapse = " "), rowspan = 2, colspan = length(stub_layout), style = stubhead_style, scope = ifelse(length(stub_layout) > 1, "colgroup", "col"), htmltools::HTML(headings_labels[1]) ) headings_vars <- headings_vars[-1] headings_labels <- headings_labels[-1] } # NOTE: `rle()` treats NA values as distinct from each other; # in other words, each NA value starts a new run of length 1 spanners_rle <- rle(spanner_ids[level_1_index, ]) # The `sig_cells` vector contains the indices of spanners' elements # where the value is either NA, or, is different than the previous value; # because NAs are distinct, every NA element will be present sig_cells sig_cells <- c(1, utils::head(cumsum(spanners_rle$lengths) + 1, -1)) # `colspans` matches `spanners` in length; each element is the # number of columns that the <th> at that position should span; if 0, # then skip the <th> at that position colspans <- ifelse( seq_along(spanners[level_1_index, ]) %in% sig_cells, # Index back into the rle result, working backward through sig_cells spanners_rle$lengths[match(seq_along(spanner_ids[level_1_index, ]), sig_cells)], 0 ) for (i in seq_along(headings_vars)) { if (is.na(spanner_ids[level_1_index, ][i])) { styles_heading <- dplyr::filter( styles_tbl, locname == "columns_columns", colname == headings_vars[i] ) heading_style <- if (nrow(styles_heading) > 0) { styles_heading$html_style } else { NULL } first_set_alignment <- dt_boxhead_get_alignment_by_var(data = data, headings_vars[i]) level_1_spanners[[length(level_1_spanners) + 1]] <- htmltools::tags$th( class = paste( c( "gt_col_heading", "gt_columns_bottom_border", paste0("gt_", first_set_alignment) ), collapse = " "), rowspan = 2, colspan = 1, style = heading_style, scope = "col", htmltools::HTML(headings_labels[i]) ) } else if (!is.na(spanner_ids[level_1_index, ][i])) { # If colspans[i] == 0, it means that a previous cell's # `colspan` will cover us if (colspans[i] > 0) { styles_spanners <- dplyr::filter( spanner_style_attrs, locname == "columns_groups", grpname == spanner_ids[level_1_index, ][i] ) spanner_style <- if (nrow(styles_spanners) > 0) { styles_spanners$html_style } else { NULL } level_1_spanners[[length(level_1_spanners) + 1]] <- htmltools::tags$th( class = paste( c( "gt_center", "gt_columns_top_border", "gt_column_spanner_outer" ), collapse = " " ), rowspan = 1, colspan = colspans[i], style = spanner_style, scope = ifelse(colspans[i] > 1, "colgroup", "col"), htmltools::tags$span( class = "gt_column_spanner", htmltools::HTML(spanners[level_1_index, ][i]) ) ) } } } solo_headings <- headings_vars[is.na(spanner_ids[level_1_index, ])] remaining_headings <- headings_vars[!(headings_vars %in% solo_headings)] remaining_headings_labels <- dt_boxhead_get(data = data) %>% dplyr::filter(var %in% remaining_headings) %>% dplyr::pull(column_label) %>% unlist() col_alignment <- col_alignment[-1][!(headings_vars %in% solo_headings)] if (length(remaining_headings) > 0) { spanned_column_labels <- c() for (j in seq(remaining_headings)) { styles_remaining <- dplyr::filter( styles_tbl, locname == "columns_columns", colname == remaining_headings[j] ) remaining_style <- if (nrow(styles_remaining) > 0) { styles_remaining$html_style } else { NULL } remaining_alignment <- dt_boxhead_get_alignment_by_var(data = data, remaining_headings[j]) spanned_column_labels[[length(spanned_column_labels) + 1]] <- htmltools::tags$th( class = paste( c( "gt_col_heading", "gt_columns_bottom_border", paste0("gt_", remaining_alignment) ), collapse = " " ), rowspan = 1, colspan = 1, style = remaining_style, scope = "col", htmltools::HTML(remaining_headings_labels[j]) ) } table_col_headings <- htmltools::tagList( htmltools::tags$tr(level_1_spanners), htmltools::tags$tr(spanned_column_labels) ) } else { # Create the `table_col_headings` HTML component table_col_headings <- htmltools::tags$tr(level_1_spanners) } } if (dt_spanners_matrix_height(data = data) > 2) { higher_spanner_rows_idx <- seq_len(nrow(spanner_ids) - 2) higher_spanner_rows <- htmltools::tagList() for (i in higher_spanner_rows_idx) { spanner_ids_row <- spanner_ids[i, ] spanners_row <- spanners[i, ] spanners_vars <- unique(spanner_ids_row[!is.na(spanner_ids_row)]) # Replace NA values with an empty string ID spanner_ids_row[is.na(spanner_ids_row)] <- "" spanners_rle <- rle(spanner_ids_row) sig_cells <- c(1, utils::head(cumsum(spanners_rle$lengths) + 1, -1)) colspans <- ifelse( seq_along(spanner_ids_row) %in% sig_cells, # Index back into the rle result, working backward through sig_cells spanners_rle$lengths[match(seq_along(spanner_ids_row), sig_cells)], 0 ) level_i_spanners <- list() for (j in seq_along(colspans)) { if (colspans[j] > 0) { styles_spanners <- dplyr::filter( styles_tbl, locname == "columns_groups", grpname == spanners_vars ) spanner_style <- if (nrow(styles_spanners) > 0) { styles_spanners$html_style } else { NULL } level_i_spanners[[length(level_i_spanners) + 1]] <- htmltools::tags$th( class = paste( c( "gt_center", "gt_columns_top_border", "gt_column_spanner_outer" ), collapse = " " ), rowspan = 1, colspan = colspans[j], style = spanner_style, scope = ifelse(colspans[j] > 1, "colgroup", "col"), if (spanner_ids_row[j] != "") { htmltools::tags$span( class = "gt_column_spanner", htmltools::HTML(spanners_row[j]) ) } ) } } if (length(stub_layout) > 0 && i == 1) { level_i_spanners <- htmltools::tagList( htmltools::tags$th( rowspan = max(higher_spanner_rows_idx), colspan = length(stub_layout), scope = ifelse(length(stub_layout) > 1, "colgroup", "col") ), level_i_spanners ) } higher_spanner_rows <- htmltools::tagList( higher_spanner_rows, htmltools::tagList(htmltools::tags$tr(level_i_spanners)) ) } table_col_headings <- htmltools::tagList( higher_spanner_rows, table_col_headings, ) } htmltools::tags$thead( class = "gt_col_headings", table_col_headings ) } #' Create the table body component (HTML) #' #' @noRd create_body_component_h <- function(data) { summaries_present <- dt_summary_exists(data = data) list_of_summaries <- dt_summary_df_get(data = data) groups_rows_df <- dt_groups_rows_get(data = data) styles_tbl <- dt_styles_get(data = data) # Get effective number of columns n_cols_total <- get_effective_number_of_columns(data = data) # Get the number of columns for the body cells only n_data_cols <- get_number_of_visible_data_columns(data = data) # Get vector representation of stub layout stub_layout <- get_stub_layout(data = data) has_stub_column <- "rowname" %in% stub_layout # Get a matrix of all cells in the body (not including summary cells) cell_matrix <- get_body_component_cell_matrix(data = data) # Get the number of rows in the body n_rows <- nrow(cell_matrix) # Get the column alignments and also the alignment class names col_alignment <- c( rep("right", length(stub_layout)), dt_boxhead_get_vars_align_default(data = data) ) alignment_classes <- paste0("gt_", col_alignment) # Define function to get a character vector of formatted cell # data (this includes the stub, if it is present) output_df_row_as_vec <- function(i) { cell_matrix <- cell_matrix[i, ] if ( "group_label" %in% stub_layout && !(i %in% groups_rows_df$row_start) ) { cell_matrix <- cell_matrix[-1] } cell_matrix } # Replace an NA group with an empty string if (any(is.na(groups_rows_df$group_label))) { groups_rows_df <- groups_rows_df %>% dplyr::mutate(group_label = ifelse(is.na(group_label), "", group_label)) } # Is the stub to be striped? table_stub_striped <- dt_options_get_value( data = data, option = "row_striping_include_stub" ) # Are the rows in the table body to be striped? table_body_striped <- dt_options_get_value( data = data, option = "row_striping_include_table_body" ) extra_classes_1 <- rep_len(list(NULL), n_cols_total) extra_classes_2 <- rep_len(list(if (table_body_striped) "gt_striped" else NULL), n_cols_total) if (length(stub_layout) > 0) { if ("rowname" %in% stub_layout) { row_label_col <- which(stub_layout == "rowname") extra_classes_1[[row_label_col]] <- "gt_stub" extra_classes_2[[row_label_col]] <- c("gt_stub", if (table_stub_striped) "gt_striped" else NULL) } } # Create a default vector of row span values for group labels as a column row_span_vals <- rep_len(list(NULL), n_cols_total) body_rows <- lapply( seq_len(n_rows), function(i) { body_section <- list() group_info <- groups_rows_df[groups_rows_df$row_start == i, c("group_id", "group_label")] if (nrow(group_info) == 0) { group_info <- NULL } group_id <- group_info[["group_id"]] group_label <- group_info[["group_label"]] # # Create a group heading row # if ( !is.null(group_id) && !("group_label" %in% stub_layout) ) { row_style <- dt_styles_pluck(styles_tbl, locname = "row_groups", grpname = group_id)$html_style group_class <- if (group_label == "") { "gt_empty_group_heading" } else { "gt_group_heading" } group_heading_row <- htmltools::tags$tr( class = "gt_group_heading_row", htmltools::tags$td( colspan = n_cols_total, class = group_class, style = row_style, htmltools::HTML(group_label) ) ) body_section <- append(body_section, list(group_heading_row)) } # # Create a body row # extra_classes <- if (i %% 2 == 0) extra_classes_2 else extra_classes_1 styles_row <- dt_styles_pluck(styles_tbl, locname = c("data", "stub"), rownum = i) row_styles <- build_row_styles( styles_resolved_row = styles_row, include_stub = has_stub_column, n_cols = n_data_cols ) if ("group_label" %in% stub_layout) { if (i %in% groups_rows_df$row_start) { # Modify the `extra_classes` list to include a class for # the row group column extra_classes[[1]] <- "gt_stub_row_group" # Obtain a one-row table that contains the beginning and # ending row index for the row group row_limits <- groups_rows_df %>% dplyr::filter(row_start == i) %>% dplyr::select(group_id, row_start, row_end) summary_rows_group_df <- list_of_summaries[["summary_df_display_list"]][[row_limits$group_id]] if (!is.null(summary_rows_group_df) && "rowname" %in% stub_layout) { summary_row_count <- nrow(summary_rows_group_df) } else { summary_row_count <- 0L } # Modify the `row_span_vals` list such that the first # element (the row group column) contains the number of rows to span row_span_vals[[1]] <- row_limits$row_end - row_limits$row_start + 1 + summary_row_count # Process row group styles if there is an indication that some # are present row_group_style <- dt_styles_pluck(styles_tbl, locname = "row_groups", grpname = group_id)$html_style # Add style of row group cell to vector row_styles <- c(list(row_group_style), row_styles) } else { # Remove first element of `alignment_classes` vector alignment_classes <- alignment_classes[-1] row_span_vals[[1]] <- NULL extra_classes[[1]] <- NULL } } body_row <- htmltools::tags$tr( class = if (!is.null(group_info)) "gt_row_group_first", htmltools::HTML( paste0( mapply( SIMPLIFY = FALSE, USE.NAMES = FALSE, output_df_row_as_vec(i = i), row_span_vals, alignment_classes, extra_classes, row_styles, FUN = function(x, row_span, alignment_class, extra_class, cell_style) { sprintf( "<%s %sclass=\"%s\"%s>%s</%s>", if ("gt_stub" %in% extra_class) { "th scope=\"row\"" } else { "td" }, if (is.null(row_span)) { "" } else { paste0( "rowspan=\"", htmltools::htmlEscape(row_span, attribute = TRUE), "\" " ) }, paste( c( "gt_row", htmltools::htmlEscape(alignment_class, attribute = TRUE), htmltools::htmlEscape(extra_class, attribute = TRUE) ), collapse = " " ), if (!any(nzchar(cell_style))) { "" } else { paste0( " style=\"", htmltools::htmlEscape(cell_style, attribute = TRUE), "\"" ) }, as.character(x), if ("gt_stub" %in% extra_class) { "th" } else { "td" } ) } ), collapse = "\n" ) ) ) body_section <- append(body_section, list(body_row)) # # Add groupwise summary rows # if (summaries_present && i %in% groups_rows_df$row_end) { group_id <- groups_rows_df[ stats::na.omit(groups_rows_df$row_end == i), "group_id", drop = TRUE ] summary_section <- summary_row_tags_i( data = data, group_id = group_id ) body_section <- append(body_section, summary_section) } body_section } ) body_rows <- flatten_list(body_rows) # # Add grand summary rows # if (summaries_present && grand_summary_col %in% names(list_of_summaries$summary_df_display_list)) { grand_summary_section <- summary_row_tags_i( data = data, group_id = grand_summary_col ) body_rows <- c(body_rows, grand_summary_section) } htmltools::tags$tbody( class = "gt_table_body", body_rows ) } #' Create the table source note component (HTML) #' #' @noRd create_source_notes_component_h <- function(data) { source_notes <- dt_source_notes_get(data = data) if (is.null(source_notes)) { return("") } styles_tbl <- dt_styles_get(data = data) # Get effective number of columns n_cols_total <- get_effective_number_of_columns(data = data) # Get the style attrs for the source notes if ("source_notes" %in% styles_tbl$locname) { source_notes_style <- dplyr::filter(styles_tbl, locname == "source_notes") source_notes_styles <- if (nrow(source_notes_style) > 0) { paste(source_notes_style$html_style, collapse = " ") } else { NULL } } else { source_notes_styles <- NULL } # Get the source note multiline option multiline <- dt_options_get_value(data = data, option = "source_notes_multiline") # Get the source note separator option separator <- dt_options_get_value(data = data, option = "source_notes_sep") # Handle the multiline source notes case (each footnote takes up one line) if (multiline) { # Create the source notes component as a series of `<tr><td>` (one per # source note) inside of a `<tfoot>` return( htmltools::tags$tfoot( class = "gt_sourcenotes", lapply( source_notes, function(x) { htmltools::tags$tr( htmltools::tags$td( class = "gt_sourcenote", style = source_notes_styles, colspan = n_cols_total, htmltools::HTML(x) ) ) } ) ) ) } # Perform HTML escaping on the separator text and transform space # characters to non-breaking spaces separator <- gsub(" (?= )", "&nbsp;", separator, perl = TRUE) # Create the source notes component as a single `<tr><td>` inside # of a `<tfoot>` htmltools::tags$tfoot( htmltools::tags$tr( class = "gt_sourcenotes", style = source_notes_styles, htmltools::tags$td( class = "gt_sourcenote", colspan = n_cols_total, htmltools::tags$div( style = htmltools::css(`padding-bottom` = "2px"), htmltools::HTML(paste(source_notes, collapse = separator)) ) ) ) ) } #' Create the table footnote component (HTML) #' #' @noRd create_footnotes_component_h <- function(data) { footnotes_tbl <- dt_footnotes_get(data = data) # If the `footnotes_resolved` object has no # rows, then return an empty footnotes component if (nrow(footnotes_tbl) == 0) { return("") } styles_tbl <- dt_styles_get(data = data) # Get effective number of columns n_cols_total <- get_effective_number_of_columns(data = data) footnotes_tbl <- footnotes_tbl %>% dplyr::select(fs_id, footnotes) %>% dplyr::distinct() # Get the style attrs for the footnotes if ("footnotes" %in% styles_tbl$locname) { footnotes_style <- dplyr::filter(styles_tbl, locname == "footnotes") footnotes_styles <- if (nrow(footnotes_style) > 0) { paste(footnotes_style$html_style, collapse = " ") } else { NULL } } else { footnotes_styles <- NULL } # Get the footnote multiline option multiline <- dt_options_get_value(data = data, option = "footnotes_multiline") # Get the footnote separator option separator <- dt_options_get_value(data = data, option = "footnotes_sep") # Obtain vectors of footnote ID values (prerendered glyphs) and # the associated text footnote_ids <- footnotes_tbl[["fs_id"]] footnote_text <- footnotes_tbl[["footnotes"]] # Create a vector of HTML footnotes footnotes <- unlist( mapply( SIMPLIFY = FALSE, USE.NAMES = FALSE, footnote_ids, footnote_text, FUN = function(x, footnote_text) { as.character( htmltools::tagList( htmltools::HTML( paste0( footnote_mark_to_html(x), " ", process_text(footnote_text, context = "html") ), .noWS = c("after", "before") ) ) ) } ) ) # Handle the multiline footnotes case (each footnote takes up one line) if (multiline) { # Create the footnotes component as a series of `<tr><td>` (one per # footnote) inside of a `<tfoot>` return( htmltools::tags$tfoot( class = "gt_footnotes", lapply( footnotes, function(x) { htmltools::tags$tr( htmltools::tags$td( class = "gt_footnote", style = footnotes_styles, colspan = n_cols_total, htmltools::HTML(x) ) ) } ) ) ) } # Perform HTML escaping on the separator text and transform space # characters to non-breaking spaces separator <- gsub(" (?= )", "&nbsp;", separator, perl = TRUE) # Create the footnotes component as a single `<tr><td>` inside # of a `<tfoot>` htmltools::tags$tfoot( htmltools::tags$tr( class = "gt_footnotes", style = footnotes_styles, htmltools::tags$td( class = "gt_footnote", colspan = n_cols_total, htmltools::tags$div( style = htmltools::css(`padding-bottom` = "2px"), htmltools::HTML(paste(footnotes, collapse = separator)) ) ) ) ) } # Get a matrix of all body cells get_body_component_cell_matrix <- function(data) { body <- dt_body_get(data = data) stub_layout <- get_stub_layout(data = data) default_vars <- dt_boxhead_get_vars_default(data = data) body_matrix <- unname(as.matrix(body[, default_vars])) if (length(stub_layout) == 0) { return(body_matrix) } if ("rowname" %in% stub_layout) { body_matrix <- cbind( unname(as.matrix(body[, dt_boxhead_get_var_stub(data = data)])), body_matrix ) } if ("group_label" %in% stub_layout) { groups_rows_df <- dt_groups_rows_get(data = data) %>% dplyr::select(group_id, group_label, row_start) group_label_matrix <- dt_stub_df_get(data = data) %>% dplyr::select(-rowname, -group_label) %>% dplyr::inner_join(groups_rows_df, by = "group_id") %>% dplyr::mutate( row = dplyr::row_number(), built = dplyr::if_else(row_start != row, "", built) ) %>% dplyr::select(built) %>% as.matrix %>% unname() body_matrix <- cbind(group_label_matrix, body_matrix) } body_matrix } summary_row_tags_i <- function(data, group_id) { # Check that `group_id` isn't NULL and that length is exactly 1 if (is.null(group_id) || length(group_id) != 1) { cli::cli_abort("`group_id` cannot be `NULL` and must be of length 1.") } list_of_summaries <- dt_summary_df_get(data = data) styles_tbl <- dt_styles_get(data = data) # Obtain all of the visible (`"default"`), non-stub column names # for the table from the `boxh` object default_vars <- dt_boxhead_get_vars_default(data = data) stub_layout <- get_stub_layout(data = data) stub_is_2 <- length(stub_layout) > 1 summary_row_lines <- list() # In the below conditions # - `grand_summary_col` is a global variable (`"::GRAND_SUMMARY"`, assigned # in `dt_summary.R`) # - `group_id` might be passed in as NA when there are unnamed groups (this # can happen usually when using `tab_row_group()` to build these row groups) # and you cannot create summary rows for unnamed groups if (is.na(group_id)) { return(summary_row_lines) } else if ( group_id %in% names(list_of_summaries$summary_df_display_list) && group_id != grand_summary_col ) { summary_row_type <- "group" } else if (group_id == grand_summary_col) { summary_row_type <- "grand" } else { return(summary_row_lines) } # Obtain the summary data table specific to the group ID and # select the column named `rowname` and all of the visible columns summary_df <- dplyr::select( list_of_summaries$summary_df_display_list[[group_id]], .env$rowname_col_private, .env$default_vars ) # Get effective number of columns n_cols_total <- get_effective_number_of_columns(data = data) # Get the number of columns for the body cells only n_data_cols <- get_number_of_visible_data_columns(data = data) if (stub_is_2) { n_cols_total <- n_cols_total - 1 } extra_classes <- rep_len(list(NULL), n_cols_total) extra_classes[[1]] <- "gt_stub" # Create a default list of colspan values for the summary row col_span_vals <- rep_len(list(NULL), n_cols_total) if (stub_is_2 && summary_row_type == "grand") { col_span_vals[[1]] <- 2L } # Get the column alignments and also the alignment class names col_alignment <- c("right", dt_boxhead_get_vars_align_default(data = data)) alignment_classes <- paste0("gt_", col_alignment) for (j in seq_len(nrow(summary_df))) { last_row_class <- "gt_last_summary_row" if (summary_row_type == "grand") { styles_resolved_row <- dt_styles_pluck(styles_tbl, locname = "grand_summary_cells", grpname = group_id, rownum = j) summary_row_class <- "gt_grand_summary_row" first_row_class <- "gt_first_grand_summary_row" } else { styles_resolved_row <- dt_styles_pluck(styles_tbl, locname = "summary_cells", grpname = group_id, grprow = j) summary_row_class <- "gt_summary_row" first_row_class <- if ("rowname" %in% stub_layout) "gt_first_summary_row thick" else "gt_first_summary_row" } row_styles <- build_row_styles( styles_resolved_row = styles_resolved_row, include_stub = TRUE, n_cols = n_data_cols ) summary_row_lines[[length(summary_row_lines) + 1]] <- htmltools::tags$tr( htmltools::HTML( paste0( mapply( SIMPLIFY = FALSE, USE.NAMES = FALSE, unname(unlist(summary_df[j, ])), col_span_vals, alignment_classes, extra_classes, row_styles, FUN = function(x, col_span, alignment_class, extra_class, cell_style) { extra_class <- c(extra_class, summary_row_class) if (j == 1) { extra_class <- c(extra_class, first_row_class) } if (j == nrow(summary_df)) { extra_class <- c(extra_class, last_row_class) } sprintf( "<td %sclass=\"%s\"%s>%s</td>", if (is.null(col_span)) { "" } else { paste0( "colspan=\"", htmltools::htmlEscape(col_span, attribute = TRUE), "\" ") }, htmltools::htmlEscape( paste( c("gt_row", alignment_class, extra_class), collapse = " " ), attribute = TRUE ), if (!any(nzchar(cell_style))) { "" } else { paste0( " style=\"", htmltools::htmlEscape(cell_style, attribute = TRUE), "\"" ) }, as.character(x) ) } ), collapse = "\n" ) ) ) } summary_row_lines } build_row_styles <- function( styles_resolved_row, include_stub, n_cols ) { # The styles_resolved_row data frame should contain the columns `colnum` and # `html_style`. Each colnum should match the number of a data column in the # output table; the first data column is number 1. No colnum should appear # more than once in styles_resolved_row. It's OK for a column not to appear in # styles_resolved_row, and it's OK for styles_resolved_row to have 0 rows. # # If `include_stub` is TRUE, then a row with column==0 will be used as the # stub style. # This function's implementation can't tolerate colnum of NA, or illegal # colnum values. Check and throw early. if (!isTRUE(all(styles_resolved_row$colnum %in% c(0, seq_len(n_cols)))) || any(duplicated(styles_resolved_row$colnum))) { cli::cli_abort( "`build_row_styles()` was called with invalid `colnum` values." ) } # This will hold the resulting styles result <- rep_len(list(NULL), n_cols) # The subset of styles_resolved_row that applies to data data_styles <- styles_resolved_row[styles_resolved_row$colnum > 0,] result[data_styles$colnum] <- data_styles$html_style # If a stub exists, we need to prepend a style (or NULL) to the result. if (include_stub) { stub_style <- styles_resolved_row[styles_resolved_row$colnum == 0,] result <- c(list(stub_style$html_style), result) } result } as_css_font_family_attr <- function(font_vec, value_only = FALSE) { fonts_spaces <- grepl(" ", font_vec) font_vec[fonts_spaces] <- paste_between( x = font_vec[fonts_spaces], x_2 = c("'", "'") ) value <- paste(font_vec, collapse = ", ") if (value_only) return(value) paste_between(value, x_2 = c("font-family: ", ";")) }
#' Complete Random Sampling #' #' complete_rs implements a random sampling procedure in which fixed numbers of units are sampled. The canonical example of complete random sampling is a procedure in which exactly n of N units are sampled.\cr \cr #' Users can set the exact number of units to sample with n. Alternatively, users can specify the probability of being sampled with prob and complete_rs will infer the correct number of units to sample. #' complete_rs will either sample floor(N*prob) or ceiling(N*prob) units, choosing between these two values to ensure that the overall probability of being sampled is exactly prob. #' Users should specify N and not more than one of n or prob. \cr \cr #' If only N is specified, N/2 units will be sampled. If N is odd, either floor(N/2) units or ceiling(N/2) units will be sampled. #' #' #' @param N The number of units. N must be a positive integer. (required) #' @param n Use for a design in which exactly n units are sampled. (optional) #' @param prob Use for a design in which either floor(N*prob) or ceiling(N*prob) units are sampled. The probability of being sampled is exactly prob because with probability 1-prob, floor(N*prob) units will be sampled and with probability prob, ceiling(N*prob) units will be sampled. prob must be a real number between 0 and 1 inclusive. (optional) #' @param check_inputs logical. Defaults to TRUE. #' #' @return A numeric vector of length N that indicates if a unit is sampled (1) or not (0). #' @export #' #' @importFrom stats rbinom #' #' @examples #' S <- complete_rs(N = 100) #' table(S) #' #' S <- complete_rs(N = 100, n = 50) #' table(S) #' #' S <- complete_rs(N = 100, prob = .111) #' table(S) #' #' # If N = n, sample with 100% probability... #' complete_ra(N=2, n=2) #' #' # except if N = n = 1, in which case sample with 50% probability #' complete_ra(N=1, n=1) #' #' complete_rs <- function(N, n = NULL, prob = NULL, check_inputs = TRUE) { # Checks if (check_inputs) { input_check <- check_samplr_arguments(N = N, n = n, prob = prob) } if (N == 1) { if (is.null(n) & is.null(prob)) { assignment <- simple_rs(N, prob = 0.5) return(assignment) } if (!is.null(n)) { if (!n %in% c(0, 1)) { stop( "The number of units sampled (n) must be less than or equal to the total number of units (N)" ) } if (n == 0) { assignment <- 0 return(assignment) } if (n == 1) { assignment <- simple_rs(N, prob = 0.5, check_inputs = check_inputs) return(assignment) } } if (!is.null(prob)) { assignment <- simple_rs(N, prob = prob, check_inputs = check_inputs) } } if (N > 1) { if (is.null(n) & is.null(prob)) { n_floor <- floor(N / 2) n_ceiling <- ceiling(N / 2) if (n_ceiling > n_floor) { prob_fix_up <- ((N * .5) - n_floor) / (n_ceiling - n_floor) } else{ prob_fix_up <- .5 } if (simple_rs(1, prob_fix_up, check_inputs = check_inputs) == 0) { n <- n_floor } else{ n <- n_ceiling } assignment <- sample(rep(c(0, 1), c(N - n, n))) return(assignment) } if (!is.null(n)) { if (n == N) { assignment <- rep(1, N) return(assignment) } assignment <- sample(rep(c(0, 1), c(N - n, n))) return(assignment) } if (!is.null(prob)) { n_floor <- floor(N * prob) n_ceiling <- ceiling(N * prob) if (n_ceiling == N) { n <- n_floor assignment <- sample(rep(c(0, 1), c(N - n, n))) return(assignment) } if (n_ceiling > n_floor) { prob_fix_up <- ((N * prob) - n_floor) / (n_ceiling - n_floor) } else{ prob_fix_up <- .5 } if (simple_rs(1, prob_fix_up, check_inputs = check_inputs) == 0) { n <- n_floor } else{ n <- n_ceiling } assignment <- sample(rep(c(0, 1), c(N - n, n))) return(assignment) } } }
/R/complete_rs.R
no_license
lukesonnet/randomizr
R
false
false
4,163
r
#' Complete Random Sampling #' #' complete_rs implements a random sampling procedure in which fixed numbers of units are sampled. The canonical example of complete random sampling is a procedure in which exactly n of N units are sampled.\cr \cr #' Users can set the exact number of units to sample with n. Alternatively, users can specify the probability of being sampled with prob and complete_rs will infer the correct number of units to sample. #' complete_rs will either sample floor(N*prob) or ceiling(N*prob) units, choosing between these two values to ensure that the overall probability of being sampled is exactly prob. #' Users should specify N and not more than one of n or prob. \cr \cr #' If only N is specified, N/2 units will be sampled. If N is odd, either floor(N/2) units or ceiling(N/2) units will be sampled. #' #' #' @param N The number of units. N must be a positive integer. (required) #' @param n Use for a design in which exactly n units are sampled. (optional) #' @param prob Use for a design in which either floor(N*prob) or ceiling(N*prob) units are sampled. The probability of being sampled is exactly prob because with probability 1-prob, floor(N*prob) units will be sampled and with probability prob, ceiling(N*prob) units will be sampled. prob must be a real number between 0 and 1 inclusive. (optional) #' @param check_inputs logical. Defaults to TRUE. #' #' @return A numeric vector of length N that indicates if a unit is sampled (1) or not (0). #' @export #' #' @importFrom stats rbinom #' #' @examples #' S <- complete_rs(N = 100) #' table(S) #' #' S <- complete_rs(N = 100, n = 50) #' table(S) #' #' S <- complete_rs(N = 100, prob = .111) #' table(S) #' #' # If N = n, sample with 100% probability... #' complete_ra(N=2, n=2) #' #' # except if N = n = 1, in which case sample with 50% probability #' complete_ra(N=1, n=1) #' #' complete_rs <- function(N, n = NULL, prob = NULL, check_inputs = TRUE) { # Checks if (check_inputs) { input_check <- check_samplr_arguments(N = N, n = n, prob = prob) } if (N == 1) { if (is.null(n) & is.null(prob)) { assignment <- simple_rs(N, prob = 0.5) return(assignment) } if (!is.null(n)) { if (!n %in% c(0, 1)) { stop( "The number of units sampled (n) must be less than or equal to the total number of units (N)" ) } if (n == 0) { assignment <- 0 return(assignment) } if (n == 1) { assignment <- simple_rs(N, prob = 0.5, check_inputs = check_inputs) return(assignment) } } if (!is.null(prob)) { assignment <- simple_rs(N, prob = prob, check_inputs = check_inputs) } } if (N > 1) { if (is.null(n) & is.null(prob)) { n_floor <- floor(N / 2) n_ceiling <- ceiling(N / 2) if (n_ceiling > n_floor) { prob_fix_up <- ((N * .5) - n_floor) / (n_ceiling - n_floor) } else{ prob_fix_up <- .5 } if (simple_rs(1, prob_fix_up, check_inputs = check_inputs) == 0) { n <- n_floor } else{ n <- n_ceiling } assignment <- sample(rep(c(0, 1), c(N - n, n))) return(assignment) } if (!is.null(n)) { if (n == N) { assignment <- rep(1, N) return(assignment) } assignment <- sample(rep(c(0, 1), c(N - n, n))) return(assignment) } if (!is.null(prob)) { n_floor <- floor(N * prob) n_ceiling <- ceiling(N * prob) if (n_ceiling == N) { n <- n_floor assignment <- sample(rep(c(0, 1), c(N - n, n))) return(assignment) } if (n_ceiling > n_floor) { prob_fix_up <- ((N * prob) - n_floor) / (n_ceiling - n_floor) } else{ prob_fix_up <- .5 } if (simple_rs(1, prob_fix_up, check_inputs = check_inputs) == 0) { n <- n_floor } else{ n <- n_ceiling } assignment <- sample(rep(c(0, 1), c(N - n, n))) return(assignment) } } }
################################################################ #Title: Thermal History (TH) Analysis With Real Tide Pool Temp Values #Purpose: Determine relationship between Q10 and thermal history - all species - across all timepoints #Created by: R. E. Rangel #Created: August 2018 #Last edited: 15 April 2022 ################################################################ ##### Packages ##### library(tidyverse) library(ggplot2) library(dplyr, warn.conflicts = FALSE) library(lme4) library(lmerTest) library(emmeans) library(pastecs) library(lattice) library(ggplot2) library(ggbiplot) library(FactoMineR) library(factoextra) library(ggthemes) library(nortest) #For ad.test for normality ###Working Directory###-------------------------------------------------------- setwd("/Users/racinerangel/Desktop/Thermal History") #---------------------------------------------------------------------------- #THERMAL HISTORY ANALYSIS--------------------------------------------------- TH_Meta<-read.csv("RangelandSorte_ThermalHistory_All_Updated.csv", na.strings = "nd", header=T) str(TH_Meta) #---------DAILY MAX------------------------------------------------------------------ #------------------------------------------------------- #------------------------ #3MONTH ONLY------------------------------------------------------ #--------------------------------- ThreeMo<-filter(TH_Meta, TempDate == "3Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #3 MONTHS Daily Max---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M3.RV<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+Species*dailymax+Timepoint*Species+(1|Pool), data=ThreeMo) summary(M3.RV) anova(M3.RV) qqnorm(resid(M3.RV)) qqline(resid(M3.RV)) hist(resid(M3.RV)) ad.test(resid(M3.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M3.RV.2<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+Species*dailymax+(1|Pool), data=ThreeMo) summary(M3.RV.2) anova(M3.RV.2) #Check to see if you can drop term anova(M3.RV, M3.RV.2) # Yes, can drop qqnorm(resid(M3.RV.2)) qqline(resid(M3.RV.2)) hist(resid(M3.RV.2)) ad.test(resid(M3.RV.2)) #Checking normality AIC(M3.RV, M3.RV.2) #df AIC #M3.RV 16 139.4217 #M3.RV.2 12 133.1245 #DROPPING TERMS ------------------------------------------------SIMPLEST MODEL************** M3.RV.3<-lmer(Q10~dailymax+Timepoint+Species+Species*dailymax+(1|Pool), data=ThreeMo) summary(M3.RV.3) anova(M3.RV.3) #Check to see if you can drop term anova(M3.RV.2, M3.RV.3) # Yes, can drop qqnorm(resid(M3.RV.3)) qqline(resid(M3.RV.3)) hist(resid(M3.RV.3)) ad.test(resid(M3.RV.3)) #Checking normality AIC(M3.RV.2, M3.RV.3) #df AIC # M3.RV.2 12 133.1245 #M3.RV.3 10 123.5571 #DROPPING TERMS ------------------------------------------------ M3.RV.4<-lmer(Q10~dailymax+Timepoint+Species+(1|Pool), data=ThreeMo) summary(M3.RV.4) anova(M3.RV.4) #Check to see if you can drop term anova(M3.RV.3, M3.RV.4) # No, cannot drop qqnorm(resid(M3.RV.4)) qqline(resid(M3.RV.4)) hist(resid(M3.RV.4)) ad.test(resid(M3.RV.4)) #Checking normality #------------------------------------------------------- #------------------------ #2MONTH ONLY------------------------------------------------------ #--------------------------------- TwoMo<-filter(TH_Meta, TempDate == "2Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #2 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M2.RV<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+Species*dailymax+Timepoint*Species+(1|Pool), data=TwoMo) summary(M2.RV) anova(M2.RV) qqnorm(resid(M2.RV)) qqline(resid(M2.RV)) hist(resid(M2.RV)) ad.test(resid(M2.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M2.RV.2<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+Species*dailymax+(1|Pool), data=TwoMo) summary(M2.RV.2) anova(M2.RV.2) #Check to see if you can drop term anova(M2.RV, M2.RV.2) # Yes, can drop qqnorm(resid(M2.RV.2)) qqline(resid(M2.RV.2)) hist(resid(M2.RV.2)) ad.test(resid(M2.RV.2)) #Checking normality AIC(M2.RV, M2.RV.2) #df AIC #M2.RV 16 140.6011 #M2.RV.2 12 135.4057 #DROPPING TERMS ------------------------------------------------SIMPLEST MODEL M2.RV.3<-lmer(Q10~dailymax+Timepoint+Species+Species*dailymax+(1|Pool), data=TwoMo) summary(M2.RV.3) anova(M2.RV.3) #Check to see if you can drop term anova(M2.RV.2, M2.RV.3) # Yes, can drop qqnorm(resid(M2.RV.3)) qqline(resid(M2.RV.3)) hist(resid(M2.RV.3)) ad.test(resid(M2.RV.3)) #Checking normality AIC(M2.RV.2, M2.RV.3) #df AIC #M2.RV.2 12 135.4057 #M2.RV.3 10 124.9988 #DROPPING TERMS ------------------------------------------------ M2.RV.4<-lmer(Q10~dailymax+Timepoint+Species+(1|Pool), data=TwoMo) summary(M2.RV.4) anova(M2.RV.4) #Check to see if you can drop term anova(M2.RV.3, M2.RV.4) # No, cannot drop qqnorm(resid(M2.RV.4)) qqline(resid(M2.RV.4)) hist(resid(M2.RV.4)) ad.test(resid(M2.RV.4)) #Checking normality #------------------------------------------------------- #------------------------ #1MONTH ONLY------------------------------------------------------ #--------------------------------- OneMo<-filter(TH_Meta, TempDate == "1Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1.RV<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+Species*dailymax+(1|Pool), data=OneMo) summary(M1.RV) anova(M1.RV) qqnorm(resid(M1.RV)) qqline(resid(M1.RV)) hist(resid(M1.RV)) ad.test(resid(M1.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M1.RV.2<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+(1|Pool), data=OneMo) summary(M1.RV.2) anova(M1.RV.2) #Check to see if you can drop term anova(M1.RV, M1.RV.2) # Yes, can drop qqnorm(resid(M1.RV.2)) qqline(resid(M1.RV.2)) hist(resid(M1.RV.2)) ad.test(resid(M1.RV.2)) #Checking normality AIC(M1.RV, M1.RV.2) #df AIC #M1.RV 12 135.7675 #M1.RV.2 10 121.5423 #DROPPING TERMS ------------------------------------------------ M1.RV.3<-lmer(Q10~dailymax+Timepoint+Species+(1|Pool), data=OneMo) summary(M1.RV.3) anova(M1.RV.3) #Check to see if you can drop term anova(M1.RV.2, M1.RV.3) # Yes, can drop qqnorm(resid(M1.RV.3)) qqline(resid(M1.RV.3)) hist(resid(M1.RV.3)) ad.test(resid(M1.RV.3)) #Checking normality AIC(M1.RV.2, M1.RV.3) #df AIC #M1.RV.2 10 121.5423 #M1.RV.3 8 111.2050 M1.Q10.emm.n<-emmeans(M1.RV.3, "Species") M1.Q10.emm.n emmeans(M1.RV.3, pairwise~Species) #contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.103 0.0875 93.1 1.182 0.4669 #Hermit - Mussel 0.256 0.0818 91.6 3.133 0.0065 #Littorine - Mussel 0.153 0.0820 93.7 1.862 0.1555 #------------------------------------------------------- #------------------------ #1WEEK ONLY------------------------------------------------------ #--------------------------------- OneWk<-filter(TH_Meta, TempDate == "1Week") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1WK.RV<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+Species*dailymax+(1|Pool), data=OneWk) summary(M1WK.RV) anova(M1WK.RV) qqnorm(resid(M1WK.RV)) qqline(resid(M1WK.RV)) hist(resid(M1WK.RV)) ad.test(resid(M1WK.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M1WK.RV.2<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+(1|Pool), data=OneWk) summary(M1WK.RV.2) anova(M1WK.RV.2) #Check to see if you can drop term anova(M1WK.RV, M1WK.RV.2) # Yes, can drop qqnorm(resid(M1WK.RV.2)) qqline(resid(M1WK.RV.2)) hist(resid(M1WK.RV.2)) ad.test(resid(M1WK.RV.2)) #Checking normality AIC(M1WK.RV, M1WK.RV.2) #df AIC #M1WK.RV 12 137.0267 #M1WK.RV.2 10 121.9377 #DROPPING TERMS ------------------------------------------------SIMPLEST MODEL M1WK.RV.3<-lmer(Q10~dailymax+Timepoint+Species+(1|Pool), data=OneWk) summary(M1WK.RV.3) anova(M1WK.RV.3) #Check to see if you can drop term anova(M1WK.RV.2, M1WK.RV.3) # Yes, can drop qqnorm(resid(M1WK.RV.3)) qqline(resid(M1WK.RV.3)) hist(resid(M1WK.RV.3)) ad.test(resid(M1WK.RV.3)) #Checking normality AIC(M1WK.RV.2, M1WK.RV.3) #df AIC #M1WK.RV.2 10 121.9377 #M1WK.RV.3 8 112.3737 M1WK.Q10.emm.n<-emmeans(M1WK.RV.3, "Species") M1WK.Q10.emm.n emmeans(M1WK.RV.3, pairwise~Species) #contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.093 0.0870 90.8 1.070 0.5352 #Hermit - Mussel 0.259 0.0814 90.7 3.183 0.0056 #Littorine - Mussel 0.166 0.0813 91.0 2.043 0.1079 #------------------------------------------------------- #------------------------ #1DAY ONLY------------------------------------------------------ #--------------------------------- OneDay<-filter(TH_Meta, TempDate == "1Day") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 DAY--------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1DY.RV<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+Species*dailymax+(1|Pool), data=OneDay) summary(M1DY.RV) anova(M1DY.RV) qqnorm(resid(M1DY.RV)) qqline(resid(M1DY.RV)) hist(resid(M1DY.RV)) ad.test(resid(M1DY.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------SIMPLEST MODEL M1DY.RV.2<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+(1|Pool), data=OneDay) summary(M1DY.RV.2) anova(M1DY.RV.2) #Check to see if you can drop term anova(M1DY.RV, M1DY.RV.2) # Yes, can drop qqnorm(resid(M1DY.RV.2)) qqline(resid(M1DY.RV.2)) hist(resid(M1DY.RV.2)) ad.test(resid(M1DY.RV.2)) #Checking normality AIC(M1DY.RV, M1DY.RV.2) #df AIC #M1DY.RV 12 133.7402 #M1DY.RV.2 10 118.4012 #Post hoc - emmeans-------------------------------------------- #Interested in overall effect of Season Spp.1day<- emmeans(M1DY.RV.2, ~Timepoint*dailymax) pairs(Spp.1day, by="dailymax") #contrast estimate SE df t.ratio p.value #(19-Jul) - (19-Mar) -0.0464 0.188 87.5 -0.247 0.9669 #(19-Jul) - (19-Sep) -0.1986 0.131 84.2 -1.520 0.2869 #(19-Mar) - (19-Sep) -0.1521 0.181 84.8 -0.842 0.6779 M1DY.Q10.emm.n<-emmeans(M1DY.RV.2, "Species") M1DY.Q10.emm.n emmeans(M1DY.RV.2, pairwise~Species) #contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.156 0.0904 96.4 1.722 0.2024 #Hermit - Mussel 0.256 0.0858 91.1 2.987 0.0100 #Littorine - Mussel 0.100 0.0879 94.5 1.144 0.4897 ################################################################ #DAILY 95 TEMP ONLY ################################################################ #---------------------------------------------------------------------------- #THERMAL HISTORY ANALYSIS--------------------------------------------------- TH_Meta<-read.csv("RangelandSorte_ThermalHistory_All.csv", na.strings = "nd", header=T) str(TH_Meta) #------------------------------------------------------- #------------------------ #3MONTH ONLY------------------------------------------------------ #--------------------------------- ThreeMo<-filter(TH_Meta, TempDate == "3Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #3 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M3.RV<-lmer(Q10~Range+Timepoint+Species+Timepoint*daily95+Species*daily95+Species*Timepoint+(1|Pool), data=ThreeMo) summary(M3.RV) anova(M3.RV) qqnorm(resid(M3.RV)) qqline(resid(M3.RV)) hist(resid(M3.RV)) ad.test(resid(M3.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------Removed Species*Timepoint M3.RV.2<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+Species*daily95+(1|Pool), data=ThreeMo) summary(M3.RV.2) anova(M3.RV.2) #Check to see if you can drop term anova(M3.RV, M3.RV.2) # Yes, can drop qqnorm(resid(M3.RV.2)) qqline(resid(M3.RV.2)) hist(resid(M3.RV.2)) ad.test(resid(M3.RV.2)) #Checking normality AIC(M3.RV, M3.RV.2) #df AIC #M3.RV 16 139.0296 #M3.RV.2 12 132.8001 #DROPPING TERMS ------------------------------------------------Removed Timepoint*daily95---SIMPLEST MODEL M3.RV.3<-lmer(Q10~daily95+Timepoint+Species+Species*daily95+(1|Pool), data=ThreeMo) summary(M3.RV.3) anova(M3.RV.3) #Check to see if you can drop term anova(M3.RV.2, M3.RV.3) # Yes, can drop qqnorm(resid(M3.RV.3)) qqline(resid(M3.RV.3)) hist(resid(M3.RV.3)) ad.test(resid(M3.RV.3)) #Checking normality AIC(M3.RV.2, M3.RV.3) #df AIC #M3.RV.2 12 132.8001 #M3.RV.3 10 123.4620 #DROPPING TERMS ------------------------------------------------Removed Species*dailymax95 M3.RV.4<-lmer(Q10~daily95+Timepoint+Species+(1|Pool), data=ThreeMo) #Simplest model summary(M3.RV.4) anova(M3.RV.4) #Check to see if you can drop term anova(M3.RV.3, M3.RV.4) # NO, cannot drop qqnorm(resid(M3.RV.4)) qqline(resid(M3.RV.4)) hist(resid(M3.RV.4)) ad.test(resid(M3.RV.4)) #Checking normality #------------------------------------------------------- #------------------------ #2MONTH ONLY------------------------------------------------------ #--------------------------------- TwoMo<-filter(TH_Meta, TempDate == "2Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #2 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M2.RV<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+Species*daily95+Species*Timepoint+(1|Pool), data=TwoMo) summary(M2.RV) anova(M2.RV) qqnorm(resid(M2.RV)) qqline(resid(M2.RV)) hist(resid(M2.RV)) ad.test(resid(M2.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------Removed Species*Timepoint M2.RV.2<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+Species*daily95+(1|Pool), data=TwoMo) summary(M2.RV.2) anova(M2.RV.2) #Check to see if you can drop term anova(M2.RV, M2.RV.2) # Yes, can drop qqnorm(resid(M2.RV.2)) qqline(resid(M2.RV.2)) hist(resid(M2.RV.2)) ad.test(resid(M2.RV.2)) #Checking normality AIC(M2.RV, M2.RV.2) #df AIC #M2.RV 16 140.1660 #M2.RV.2 12 135.1132 #DROPPING TERMS ------------------------------------------------Removed Timepoint*daily95 ---- -- SIMPLEST MODEL-- M2.RV.3<-lmer(Q10~daily95+Timepoint+Species+Species*daily95+(1|Pool), data=TwoMo) summary(M2.RV.3) anova(M2.RV.3) #Check to see if you can drop term anova(M2.RV.2, M2.RV.3) # Yes, can drop qqnorm(resid(M2.RV.3)) qqline(resid(M2.RV.3)) hist(resid(M2.RV.3)) ad.test(resid(M2.RV.3)) #Checking normality AIC(M2.RV.2, M2.RV.3) #df AIC #M2.RV.2 12 135.1132 #M2.RV.3 10 124.9252 #DROPPING TERMS ------------------------------------------------Removed Species*daily95 M2.RV.4<-lmer(Q10~daily95+Timepoint+Species+(1|Pool), data=TwoMo) summary(M2.RV.4) anova(M2.RV.4) #Check to see if you can drop term anova(M2.RV.3, M2.RV.4) # NOPE, cannot drop qqnorm(resid(M2.RV.4)) qqline(resid(M2.RV.4)) hist(resid(M2.RV.4)) ad.test(resid(M2.RV.4)) #Checking normality #------------------------------------------------------- #------------------------ #1MONTH ONLY------------------------------------------------------ #--------------------------------- OneMo<-filter(TH_Meta, TempDate == "1Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1.RV<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+Species*daily95+(1|Pool), data=OneMo) summary(M1.RV) anova(M1.RV) qqnorm(resid(M1.RV)) qqline(resid(M1.RV)) hist(resid(M1.RV)) ad.test(resid(M1.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M1.RV.2<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+(1|Pool), data=OneMo) summary(M1.RV.2) anova(M1.RV.2) #Check to see if you can drop term anova(M1.RV, M1.RV.2) # Yes, can drop qqnorm(resid(M1.RV.2)) qqline(resid(M1.RV.2)) hist(resid(M1.RV.2)) ad.test(resid(M1.RV.2)) #Checking normality AIC(M1.RV, M1.RV.2) #df AIC #M1.RV 12 135.3689 #M1.RV.2 10 121.1813 #DROPPING TERMS ------------------------------------------------SIMPLEST MODEL M1.RV.3<-lmer(Q10~daily95+Timepoint+Species+(1|Pool), data=OneMo) summary(M1.RV.3) anova(M1.RV.3) #Check to see if you can drop term anova(M1.RV.2, M1.RV.3) # Yes, can drop qqnorm(resid(M1.RV.3)) qqline(resid(M1.RV.3)) hist(resid(M1.RV.3)) ad.test(resid(M1.RV.3)) #Checking normality AIC(M1.RV.2, M1.RV.3) #df AIC #M1.RV.2 10 121.1813 #M1.RV.3 8 111.1456 M1DY.Q10.emm.n<-emmeans(M1.RV.3, "Species") M1DY.Q10.emm.n emmeans(M1.RV.3, pairwise~Species) #contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.104 0.0875 93.2 1.186 0.4646 #Hermit - Mussel 0.256 0.0817 91.5 3.134 0.0065 #Littorine - Mussel 0.152 0.0821 93.8 1.857 0.1571 #------------------------------------------------------- #------------------------ #1WEEK ONLY------------------------------------------------------ #--------------------------------- OneWk<-filter(TH_Meta, TempDate == "1Week") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1WK.RV<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+Species*daily95+(1|Pool), data=OneWk) summary(M1WK.RV) anova(M1WK.RV) qqnorm(resid(M1WK.RV)) qqline(resid(M1WK.RV)) hist(resid(M1WK.RV)) ad.test(resid(M1WK.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M1WK.RV.2<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+(1|Pool), data=OneWk) summary(M1WK.RV.2) anova(M1WK.RV.2) #Check to see if you can drop term anova(M1WK.RV, M1WK.RV.2) # Yes, can drop qqnorm(resid(M1WK.RV.2)) qqline(resid(M1WK.RV.2)) hist(resid(M1WK.RV.2)) ad.test(resid(M1WK.RV.2)) #Checking normality AIC(M1WK.RV, M1WK.RV.2) #df AIC # df AIC #M1WK.RV 12 136.4234 #M1WK.RV.2 10 121.2463 #DROPPING TERMS ------------------------------------------------SIMPLEST MODEL M1WK.RV.3<-lmer(Q10~daily95+Timepoint+Species+(1|Pool), data=OneWk) summary(M1WK.RV.3) anova(M1WK.RV.3) #Check to see if you can drop term anova(M1WK.RV.2, M1WK.RV.3) # Yes, can drop qqnorm(resid(M1WK.RV.3)) qqline(resid(M1WK.RV.3)) hist(resid(M1WK.RV.3)) ad.test(resid(M1WK.RV.3)) #Checking normality AIC(M1WK.RV.2, M1WK.RV.3) #df AIC #M1WK.RV.2 10 121.2463 #M1WK.RV.3 8 111.9167 #Post hoc - emmeans------------------------------------------- # M1WK.RV.emm.n<-emmeans(M1WK.RV.3, "Species") M1WK.RV.emm.n emmeans(M1WK.RV.3, pairwise~Species) # contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.0918 0.0865 90.2 1.061 0.5405 #Hermit - Mussel 0.2583 0.0810 90.5 3.188 0.0055 #Littorine - Mussel 0.1665 0.0808 90.4 2.061 0.1039 #------------------------------------------------------- #------------------------ #1DAY ONLY------------------------------------------------------ #--------------------------------- OneDay<-filter(TH_Meta, TempDate == "1Day") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1DY.RV<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+Species*daily95+(1|Pool), data=OneDay) summary(M1DY.RV) anova(M1DY.RV) qqnorm(resid(M1DY.RV)) qqline(resid(M1DY.RV)) hist(resid(M1DY.RV)) ad.test(resid(M1DY.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M1DY.RV.2<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+(1|Pool), data=OneDay) summary(M1DY.RV.2) anova(M1DY.RV.2) #Check to see if you can drop term anova(M1DY.RV, M1DY.RV.2) # Yes, can drop qqnorm(resid(M1DY.RV.2)) qqline(resid(M1DY.RV.2)) hist(resid(M1DY.RV.2)) ad.test(resid(M1DY.RV.2)) #Checking normality AIC(M1DY.RV, M1DY.RV.2) #df AIC #M1DY.RV 12 132.9109 #M1DY.RV.2 10 117.8557 #Post hoc - emmeans-------------------------------------------- #Interested in overall effect of Season Spp.1day<- emmeans(M1DY.RV.2, ~Timepoint*daily95) pairs(Spp.1day, by="daily95") # contrast estimate SE df t.ratio p.value #(19-Jul) - (19-Mar) -0.0532 0.185 90.5 -0.288 0.9555 #(19-Jul) - (19-Sep) -0.2359 0.134 83.3 -1.767 0.1870 #(19-Mar) - (19-Sep) -0.1827 0.179 89.1 -1.019 0.5670 M1DY.Q10.emm.n<-emmeans(M1DY.RV.2, "Species") M1DY.Q10.emm.n emmeans(M1DY.RV.2, pairwise~Species) #contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.158 0.0921 97.0 1.715 0.2050 #Hermit - Mussel 0.266 0.0859 91.5 3.095 0.0073 #Littorine - Mussel 0.108 0.0880 95.2 1.225 0.4414 ################################################################ #RANGE ONLY ################################################################ #---------------------------------------------------------------------------- #THERMAL HISTORY ANALYSIS--------------------------------------------------- TH_Meta<-read.csv("RangelandSorte_ThermalHistory_All.csv", na.strings = "nd", header=T) str(TH_Meta) #------------------------------------------------------- #------------------------ #3MONTH ONLY------------------------------------------------------ #--------------------------------- ThreeMo<-filter(TH_Meta, TempDate == "3Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #3 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M3.RV<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+Species*Range+Species*Timepoint+(1|Pool), data=ThreeMo) summary(M3.RV) anova(M3.RV) qqnorm(resid(M3.RV)) qqline(resid(M3.RV)) hist(resid(M3.RV)) ad.test(resid(M3.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------Removed Species*Range M3.RV.2<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+Species*Timepoint+(1|Pool), data=ThreeMo) summary(M3.RV.2) anova(M3.RV.2) #Check to see if you can drop term anova(M3.RV, M3.RV.2) # Yes, can drop qqnorm(resid(M3.RV.2)) qqline(resid(M3.RV.2)) hist(resid(M3.RV.2)) ad.test(resid(M3.RV.2)) #Checking normality AIC(M3.RV, M3.RV.2) #df AIC #M3.RV 16 148.6283 #M3.RV.2 14 134.1816 #DROPPING TERMS ------------------------------------------------Removed Timepoint*Range---SIMPLEST MODEL M3.RV.3<-lmer(Q10~Range+Timepoint+Species+Species*Timepoint+(1|Pool), data=ThreeMo) summary(M3.RV.3) anova(M3.RV.3) #Check to see if you can drop term anova(M3.RV.2, M3.RV.3) # Yes, can drop qqnorm(resid(M3.RV.3)) qqline(resid(M3.RV.3)) hist(resid(M3.RV.3)) ad.test(resid(M3.RV.3)) #Checking normality AIC(M3.RV.2, M3.RV.3) #df AIC #M3.RV.2 14 134.1816 #M3.RV.3 12 120.5765 #Post hoc - emmeans-------------------------------------------- #Interested in overall effect of Season Spp.1day<- emmeans(M3.RV.3, ~Timepoint*Species) pairs(Spp.1day, by="Timepoint") #Timepoint = 19-Jul: # contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.0974 0.144 84.8 0.678 0.7772 #Hermit - Mussel 0.2359 0.134 82.7 1.765 0.1876 #Littorine - Mussel 0.1385 0.138 84.3 1.004 0.5764 #Timepoint = 19-Mar: # contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.1780 0.135 97.7 1.318 0.3887 #Hermit - Mussel 0.1078 0.127 93.7 0.850 0.6728 #Littorine - Mussel -0.0702 0.125 93.6 -0.563 0.8399 #Timepoint = 19-Sep: # contrast estimate SE df t.ratio p.value #Hermit - Littorine -0.0592 0.141 80.2 -0.421 0.9071 #Hermit - Mussel 0.4567 0.128 82.9 3.571 0.0017 #Littorine - Mussel 0.5159 0.142 84.9 3.624 0.0014 #Interested in overall effect of Species Spp.3mo<- emmeans(M3.RV.3, ~Timepoint*Species) pairs(Spp.3mo, by="Species") #Species = Hermit: # contrast estimate SE df t.ratio p.value #(19-Jul) - (19-Mar) 0.0485 0.169 90.2 0.287 0.9557 #(19-Jul) - (19-Sep) -0.1985 0.150 99.9 -1.326 0.3841 #(19-Mar) - (19-Sep) -0.2469 0.135 93.8 -1.822 0.1679 #Species = Littorine: # contrast estimate SE df t.ratio p.value #(19-Jul) - (19-Mar) 0.1291 0.194 63.5 0.664 0.7849 #(19-Jul) - (19-Sep) -0.3551 0.170 99.8 -2.086 0.0979 #(19-Mar) - (19-Sep) -0.4841 0.156 99.1 -3.103 0.0070 #Species = Mussel: # contrast estimate SE df t.ratio p.value #(19-Jul) - (19-Mar) -0.0795 0.161 73.5 -0.493 0.8748 #(19-Jul) - (19-Sep) 0.0224 0.145 99.4 0.155 0.9869 #(19-Mar) - (19-Sep) 0.1019 0.129 99.1 0.790 0.7102 #------------------------------------------------------- #------------------------ #2MONTH ONLY------------------------------------------------------ #--------------------------------- TwoMo<-filter(TH_Meta, TempDate == "2Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #2 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M2.RV<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+Species*Range+Species*Timepoint+(1|Pool), data=TwoMo) summary(M2.RV) anova(M2.RV) qqnorm(resid(M2.RV)) qqline(resid(M2.RV)) hist(resid(M2.RV)) ad.test(resid(M2.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------Removed Species*Range M2.RV.2<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+Species*Timepoint+(1|Pool), data=TwoMo) summary(M2.RV.2) anova(M2.RV.2) #Check to see if you can drop term anova(M2.RV, M2.RV.2) # Yes, can drop qqnorm(resid(M2.RV.2)) qqline(resid(M2.RV.2)) hist(resid(M2.RV.2)) ad.test(resid(M2.RV.2)) #Checking normality AIC(M2.RV, M2.RV.2) #df AIC #M2.RV 16 148.8095 #M2.RV.2 14 134.4633 #DROPPING TERMS ------------------------------------------------Removed Timepoint*Range ---- -- SIMPLEST MODEL-- M2.RV.3<-lmer(Q10~Range+Timepoint+Species+Species*Timepoint+(1|Pool), data=TwoMo) summary(M2.RV.3) anova(M2.RV.3) #Check to see if you can drop term anova(M2.RV.2, M2.RV.3) # Yes, can drop qqnorm(resid(M2.RV.3)) qqline(resid(M2.RV.3)) hist(resid(M2.RV.3)) ad.test(resid(M2.RV.3)) #Checking normality AIC(M2.RV.2, M2.RV.3) #df AIC #M2.RV.2 14 134.4633 #M2.RV.3 12 120.2325 Spp.2mo<- emmeans(M2.RV.3, ~Timepoint*Species) pairs(Spp.2mo, by="Species") Spp.2mo<- emmeans(M2.RV.3, ~Timepoint*Species) pairs(Spp.2mo, by="Timepoint") #DROPPING TERMS ------------------------------------------------Removed Species*Timepoint M2.RV.4<-lmer(Q10~Range+Timepoint+Species+(1|Pool), data=TwoMo) summary(M2.RV.4) anova(M2.RV.4) #Check to see if you can drop term anova(M2.RV.3, M2.RV.4) # Cannot, cannot drop qqnorm(resid(M2.RV.4)) qqline(resid(M2.RV.4)) hist(resid(M2.RV.4)) ad.test(resid(M2.RV.4)) #Checking normality #------------------------------------------------------- #------------------------ #1MONTH ONLY------------------------------------------------------ #--------------------------------- OneMo<-filter(TH_Meta, TempDate == "1Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1.RV<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+Species*Range+(1|Pool), data=OneMo) summary(M1.RV) anova(M1.RV) qqnorm(resid(M1.RV)) qqline(resid(M1.RV)) hist(resid(M1.RV)) ad.test(resid(M1.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M1.RV.2<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+(1|Pool), data=OneMo) summary(M1.RV.2) anova(M1.RV.2) #Check to see if you can drop term anova(M1.RV, M1.RV.2) # Yes, can drop qqnorm(resid(M1.RV.2)) qqline(resid(M1.RV.2)) hist(resid(M1.RV.2)) ad.test(resid(M1.RV.2)) #Checking normality AIC(M1.RV, M1.RV.2) #df AIC #M1.RV 12 138.8697 #M1.RV.2 10 123.4780 #DROPPING TERMS ------------------------------------------------ M1.RV.3<-lmer(Q10~Range+Timepoint+Species+(1|Pool), data=OneMo) summary(M1.RV.3) anova(M1.RV.3) #Check to see if you can drop term anova(M1.RV.2, M1.RV.3) # Yes, can drop qqnorm(resid(M1.RV.3)) qqline(resid(M1.RV.3)) hist(resid(M1.RV.3)) ad.test(resid(M1.RV.3)) #Checking normality AIC(M1.RV.2, M1.RV.3) #df AIC #M1.RV.2 10 123.4780 #M1.RV.3 8 113.6754 M1Mo.Q10.emm.n<-emmeans(M1.RV.3, "Species") M1Mo.Q10.emm.n emmeans(M1.RV.3, pairwise~Species) #contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.0956 0.0872 91.5 1.096 0.5191 #Hermit - Mussel 0.2589 0.0821 90.9 3.152 0.0062 #Littorine - Mussel 0.1633 0.0824 94.1 1.981 0.1226 #------------------------------------------------------- #------------------------ #1WEEK ONLY------------------------------------------------------ #--------------------------------- OneWk<-filter(TH_Meta, TempDate == "1Week") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 MWEEK--------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1WK.RV<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+Species*Range+(1|Pool), data=OneWk) summary(M1WK.RV) anova(M1WK.RV) qqnorm(resid(M1WK.RV)) qqline(resid(M1WK.RV)) hist(resid(M1WK.RV)) ad.test(resid(M1WK.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M1WK.RV.2<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+(1|Pool), data=OneWk) summary(M1WK.RV.2) anova(M1WK.RV.2) #Check to see if you can drop term anova(M1WK.RV, M1WK.RV.2) # Yes, can drop qqnorm(resid(M1WK.RV.2)) qqline(resid(M1WK.RV.2)) hist(resid(M1WK.RV.2)) ad.test(resid(M1WK.RV.2)) #Checking normality AIC(M1WK.RV, M1WK.RV.2) #df AIC # df AIC #M1WK.RV 12 138.3163 #M1WK.RV.2 10 124.3536 #DROPPING TERMS ------------------------------------------------ M1WK.RV.3<-lmer(Q10~Range+Timepoint+Species+(1|Pool), data=OneWk) summary(M1WK.RV.3) anova(M1WK.RV.3) #Check to see if you can drop term anova(M1WK.RV.2, M1WK.RV.3) # Yes, can drop qqnorm(resid(M1WK.RV.3)) qqline(resid(M1WK.RV.3)) hist(resid(M1WK.RV.3)) ad.test(resid(M1WK.RV.3)) #Checking normality AIC(M1WK.RV.2, M1WK.RV.3) #df AIC #M1WK.RV.2 10 124.3536 #M1WK.RV.3 8 113.1174 #Post hoc - emmeans------------------------------------------- # M1WK.RV.emm.n<-emmeans(M1WK.RV.3, "Species") M1WK.RV.emm.n emmeans(M1WK.RV.3, pairwise~Species) # contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.0997 0.0871 89.2 1.146 0.4888 #Hermit - Mussel 0.2627 0.0816 90.4 3.220 0.0050 #Littorine - Mussel 0.1629 0.0811 90.4 2.010 0.1157 #------------------------------------------------------- #------------------------ #1DAY ONLY------------------------------------------------------ #--------------------------------- OneDay<-filter(TH_Meta, TempDate == "1Day") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 DAY---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1DY.RV<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+Species*Range+(1|Pool), data=OneDay) summary(M1DY.RV) anova(M1DY.RV) qqnorm(resid(M1DY.RV)) qqline(resid(M1DY.RV)) hist(resid(M1DY.RV)) ad.test(resid(M1DY.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------SIMPLEST MODEL M1DY.RV.2<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+(1|Pool), data=OneDay) summary(M1DY.RV.2) anova(M1DY.RV.2) #Check to see if you can drop term anova(M1DY.RV, M1DY.RV.2) # Yes, can drop qqnorm(resid(M1DY.RV.2)) qqline(resid(M1DY.RV.2)) hist(resid(M1DY.RV.2)) ad.test(resid(M1DY.RV.2)) #Checking normality AIC(M1DY.RV, M1DY.RV.2) #df AIC #M1DY.RV 12 126.7225 #M1DY.RV.2 10 115.9732 #Post hoc - emmeans-------------------------------------------- #Interested in overall effect of Season Spp.1day<- emmeans(M1DY.RV.2, ~Timepoint*Range) pairs(Spp.1day, by="Range") #contrast estimate SE df t.ratio p.value #(19-Jul) - (19-Mar) 0.0555 0.0820 89.5 0.677 0.7775 #(19-Jul) - (19-Sep) -0.0932 0.0825 79.7 -1.129 0.4993 #(19-Mar) - (19-Sep) -0.1487 0.0807 87.8 -1.843 0.1616 M1DY.Q10.emm.n<-emmeans(M1DY.RV.2, "Species") M1DY.Q10.emm.n emmeans(M1DY.RV.2, pairwise~Species) # contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.1415 0.0879 94.8 1.609 0.2467 #Hermit - Mussel 0.2359 0.0867 90.6 2.720 0.0212 #Littorine - Mussel 0.0943 0.0852 94.2 1.107 0.5119
/RangelandSorte_TH_Values.R
no_license
racine-rangel/RangelandSorte_ThermalHistory
R
false
false
32,610
r
################################################################ #Title: Thermal History (TH) Analysis With Real Tide Pool Temp Values #Purpose: Determine relationship between Q10 and thermal history - all species - across all timepoints #Created by: R. E. Rangel #Created: August 2018 #Last edited: 15 April 2022 ################################################################ ##### Packages ##### library(tidyverse) library(ggplot2) library(dplyr, warn.conflicts = FALSE) library(lme4) library(lmerTest) library(emmeans) library(pastecs) library(lattice) library(ggplot2) library(ggbiplot) library(FactoMineR) library(factoextra) library(ggthemes) library(nortest) #For ad.test for normality ###Working Directory###-------------------------------------------------------- setwd("/Users/racinerangel/Desktop/Thermal History") #---------------------------------------------------------------------------- #THERMAL HISTORY ANALYSIS--------------------------------------------------- TH_Meta<-read.csv("RangelandSorte_ThermalHistory_All_Updated.csv", na.strings = "nd", header=T) str(TH_Meta) #---------DAILY MAX------------------------------------------------------------------ #------------------------------------------------------- #------------------------ #3MONTH ONLY------------------------------------------------------ #--------------------------------- ThreeMo<-filter(TH_Meta, TempDate == "3Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #3 MONTHS Daily Max---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M3.RV<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+Species*dailymax+Timepoint*Species+(1|Pool), data=ThreeMo) summary(M3.RV) anova(M3.RV) qqnorm(resid(M3.RV)) qqline(resid(M3.RV)) hist(resid(M3.RV)) ad.test(resid(M3.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M3.RV.2<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+Species*dailymax+(1|Pool), data=ThreeMo) summary(M3.RV.2) anova(M3.RV.2) #Check to see if you can drop term anova(M3.RV, M3.RV.2) # Yes, can drop qqnorm(resid(M3.RV.2)) qqline(resid(M3.RV.2)) hist(resid(M3.RV.2)) ad.test(resid(M3.RV.2)) #Checking normality AIC(M3.RV, M3.RV.2) #df AIC #M3.RV 16 139.4217 #M3.RV.2 12 133.1245 #DROPPING TERMS ------------------------------------------------SIMPLEST MODEL************** M3.RV.3<-lmer(Q10~dailymax+Timepoint+Species+Species*dailymax+(1|Pool), data=ThreeMo) summary(M3.RV.3) anova(M3.RV.3) #Check to see if you can drop term anova(M3.RV.2, M3.RV.3) # Yes, can drop qqnorm(resid(M3.RV.3)) qqline(resid(M3.RV.3)) hist(resid(M3.RV.3)) ad.test(resid(M3.RV.3)) #Checking normality AIC(M3.RV.2, M3.RV.3) #df AIC # M3.RV.2 12 133.1245 #M3.RV.3 10 123.5571 #DROPPING TERMS ------------------------------------------------ M3.RV.4<-lmer(Q10~dailymax+Timepoint+Species+(1|Pool), data=ThreeMo) summary(M3.RV.4) anova(M3.RV.4) #Check to see if you can drop term anova(M3.RV.3, M3.RV.4) # No, cannot drop qqnorm(resid(M3.RV.4)) qqline(resid(M3.RV.4)) hist(resid(M3.RV.4)) ad.test(resid(M3.RV.4)) #Checking normality #------------------------------------------------------- #------------------------ #2MONTH ONLY------------------------------------------------------ #--------------------------------- TwoMo<-filter(TH_Meta, TempDate == "2Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #2 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M2.RV<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+Species*dailymax+Timepoint*Species+(1|Pool), data=TwoMo) summary(M2.RV) anova(M2.RV) qqnorm(resid(M2.RV)) qqline(resid(M2.RV)) hist(resid(M2.RV)) ad.test(resid(M2.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M2.RV.2<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+Species*dailymax+(1|Pool), data=TwoMo) summary(M2.RV.2) anova(M2.RV.2) #Check to see if you can drop term anova(M2.RV, M2.RV.2) # Yes, can drop qqnorm(resid(M2.RV.2)) qqline(resid(M2.RV.2)) hist(resid(M2.RV.2)) ad.test(resid(M2.RV.2)) #Checking normality AIC(M2.RV, M2.RV.2) #df AIC #M2.RV 16 140.6011 #M2.RV.2 12 135.4057 #DROPPING TERMS ------------------------------------------------SIMPLEST MODEL M2.RV.3<-lmer(Q10~dailymax+Timepoint+Species+Species*dailymax+(1|Pool), data=TwoMo) summary(M2.RV.3) anova(M2.RV.3) #Check to see if you can drop term anova(M2.RV.2, M2.RV.3) # Yes, can drop qqnorm(resid(M2.RV.3)) qqline(resid(M2.RV.3)) hist(resid(M2.RV.3)) ad.test(resid(M2.RV.3)) #Checking normality AIC(M2.RV.2, M2.RV.3) #df AIC #M2.RV.2 12 135.4057 #M2.RV.3 10 124.9988 #DROPPING TERMS ------------------------------------------------ M2.RV.4<-lmer(Q10~dailymax+Timepoint+Species+(1|Pool), data=TwoMo) summary(M2.RV.4) anova(M2.RV.4) #Check to see if you can drop term anova(M2.RV.3, M2.RV.4) # No, cannot drop qqnorm(resid(M2.RV.4)) qqline(resid(M2.RV.4)) hist(resid(M2.RV.4)) ad.test(resid(M2.RV.4)) #Checking normality #------------------------------------------------------- #------------------------ #1MONTH ONLY------------------------------------------------------ #--------------------------------- OneMo<-filter(TH_Meta, TempDate == "1Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1.RV<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+Species*dailymax+(1|Pool), data=OneMo) summary(M1.RV) anova(M1.RV) qqnorm(resid(M1.RV)) qqline(resid(M1.RV)) hist(resid(M1.RV)) ad.test(resid(M1.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M1.RV.2<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+(1|Pool), data=OneMo) summary(M1.RV.2) anova(M1.RV.2) #Check to see if you can drop term anova(M1.RV, M1.RV.2) # Yes, can drop qqnorm(resid(M1.RV.2)) qqline(resid(M1.RV.2)) hist(resid(M1.RV.2)) ad.test(resid(M1.RV.2)) #Checking normality AIC(M1.RV, M1.RV.2) #df AIC #M1.RV 12 135.7675 #M1.RV.2 10 121.5423 #DROPPING TERMS ------------------------------------------------ M1.RV.3<-lmer(Q10~dailymax+Timepoint+Species+(1|Pool), data=OneMo) summary(M1.RV.3) anova(M1.RV.3) #Check to see if you can drop term anova(M1.RV.2, M1.RV.3) # Yes, can drop qqnorm(resid(M1.RV.3)) qqline(resid(M1.RV.3)) hist(resid(M1.RV.3)) ad.test(resid(M1.RV.3)) #Checking normality AIC(M1.RV.2, M1.RV.3) #df AIC #M1.RV.2 10 121.5423 #M1.RV.3 8 111.2050 M1.Q10.emm.n<-emmeans(M1.RV.3, "Species") M1.Q10.emm.n emmeans(M1.RV.3, pairwise~Species) #contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.103 0.0875 93.1 1.182 0.4669 #Hermit - Mussel 0.256 0.0818 91.6 3.133 0.0065 #Littorine - Mussel 0.153 0.0820 93.7 1.862 0.1555 #------------------------------------------------------- #------------------------ #1WEEK ONLY------------------------------------------------------ #--------------------------------- OneWk<-filter(TH_Meta, TempDate == "1Week") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1WK.RV<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+Species*dailymax+(1|Pool), data=OneWk) summary(M1WK.RV) anova(M1WK.RV) qqnorm(resid(M1WK.RV)) qqline(resid(M1WK.RV)) hist(resid(M1WK.RV)) ad.test(resid(M1WK.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M1WK.RV.2<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+(1|Pool), data=OneWk) summary(M1WK.RV.2) anova(M1WK.RV.2) #Check to see if you can drop term anova(M1WK.RV, M1WK.RV.2) # Yes, can drop qqnorm(resid(M1WK.RV.2)) qqline(resid(M1WK.RV.2)) hist(resid(M1WK.RV.2)) ad.test(resid(M1WK.RV.2)) #Checking normality AIC(M1WK.RV, M1WK.RV.2) #df AIC #M1WK.RV 12 137.0267 #M1WK.RV.2 10 121.9377 #DROPPING TERMS ------------------------------------------------SIMPLEST MODEL M1WK.RV.3<-lmer(Q10~dailymax+Timepoint+Species+(1|Pool), data=OneWk) summary(M1WK.RV.3) anova(M1WK.RV.3) #Check to see if you can drop term anova(M1WK.RV.2, M1WK.RV.3) # Yes, can drop qqnorm(resid(M1WK.RV.3)) qqline(resid(M1WK.RV.3)) hist(resid(M1WK.RV.3)) ad.test(resid(M1WK.RV.3)) #Checking normality AIC(M1WK.RV.2, M1WK.RV.3) #df AIC #M1WK.RV.2 10 121.9377 #M1WK.RV.3 8 112.3737 M1WK.Q10.emm.n<-emmeans(M1WK.RV.3, "Species") M1WK.Q10.emm.n emmeans(M1WK.RV.3, pairwise~Species) #contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.093 0.0870 90.8 1.070 0.5352 #Hermit - Mussel 0.259 0.0814 90.7 3.183 0.0056 #Littorine - Mussel 0.166 0.0813 91.0 2.043 0.1079 #------------------------------------------------------- #------------------------ #1DAY ONLY------------------------------------------------------ #--------------------------------- OneDay<-filter(TH_Meta, TempDate == "1Day") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 DAY--------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1DY.RV<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+Species*dailymax+(1|Pool), data=OneDay) summary(M1DY.RV) anova(M1DY.RV) qqnorm(resid(M1DY.RV)) qqline(resid(M1DY.RV)) hist(resid(M1DY.RV)) ad.test(resid(M1DY.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------SIMPLEST MODEL M1DY.RV.2<-lmer(Q10~dailymax+Timepoint+Species+Timepoint*dailymax+(1|Pool), data=OneDay) summary(M1DY.RV.2) anova(M1DY.RV.2) #Check to see if you can drop term anova(M1DY.RV, M1DY.RV.2) # Yes, can drop qqnorm(resid(M1DY.RV.2)) qqline(resid(M1DY.RV.2)) hist(resid(M1DY.RV.2)) ad.test(resid(M1DY.RV.2)) #Checking normality AIC(M1DY.RV, M1DY.RV.2) #df AIC #M1DY.RV 12 133.7402 #M1DY.RV.2 10 118.4012 #Post hoc - emmeans-------------------------------------------- #Interested in overall effect of Season Spp.1day<- emmeans(M1DY.RV.2, ~Timepoint*dailymax) pairs(Spp.1day, by="dailymax") #contrast estimate SE df t.ratio p.value #(19-Jul) - (19-Mar) -0.0464 0.188 87.5 -0.247 0.9669 #(19-Jul) - (19-Sep) -0.1986 0.131 84.2 -1.520 0.2869 #(19-Mar) - (19-Sep) -0.1521 0.181 84.8 -0.842 0.6779 M1DY.Q10.emm.n<-emmeans(M1DY.RV.2, "Species") M1DY.Q10.emm.n emmeans(M1DY.RV.2, pairwise~Species) #contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.156 0.0904 96.4 1.722 0.2024 #Hermit - Mussel 0.256 0.0858 91.1 2.987 0.0100 #Littorine - Mussel 0.100 0.0879 94.5 1.144 0.4897 ################################################################ #DAILY 95 TEMP ONLY ################################################################ #---------------------------------------------------------------------------- #THERMAL HISTORY ANALYSIS--------------------------------------------------- TH_Meta<-read.csv("RangelandSorte_ThermalHistory_All.csv", na.strings = "nd", header=T) str(TH_Meta) #------------------------------------------------------- #------------------------ #3MONTH ONLY------------------------------------------------------ #--------------------------------- ThreeMo<-filter(TH_Meta, TempDate == "3Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #3 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M3.RV<-lmer(Q10~Range+Timepoint+Species+Timepoint*daily95+Species*daily95+Species*Timepoint+(1|Pool), data=ThreeMo) summary(M3.RV) anova(M3.RV) qqnorm(resid(M3.RV)) qqline(resid(M3.RV)) hist(resid(M3.RV)) ad.test(resid(M3.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------Removed Species*Timepoint M3.RV.2<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+Species*daily95+(1|Pool), data=ThreeMo) summary(M3.RV.2) anova(M3.RV.2) #Check to see if you can drop term anova(M3.RV, M3.RV.2) # Yes, can drop qqnorm(resid(M3.RV.2)) qqline(resid(M3.RV.2)) hist(resid(M3.RV.2)) ad.test(resid(M3.RV.2)) #Checking normality AIC(M3.RV, M3.RV.2) #df AIC #M3.RV 16 139.0296 #M3.RV.2 12 132.8001 #DROPPING TERMS ------------------------------------------------Removed Timepoint*daily95---SIMPLEST MODEL M3.RV.3<-lmer(Q10~daily95+Timepoint+Species+Species*daily95+(1|Pool), data=ThreeMo) summary(M3.RV.3) anova(M3.RV.3) #Check to see if you can drop term anova(M3.RV.2, M3.RV.3) # Yes, can drop qqnorm(resid(M3.RV.3)) qqline(resid(M3.RV.3)) hist(resid(M3.RV.3)) ad.test(resid(M3.RV.3)) #Checking normality AIC(M3.RV.2, M3.RV.3) #df AIC #M3.RV.2 12 132.8001 #M3.RV.3 10 123.4620 #DROPPING TERMS ------------------------------------------------Removed Species*dailymax95 M3.RV.4<-lmer(Q10~daily95+Timepoint+Species+(1|Pool), data=ThreeMo) #Simplest model summary(M3.RV.4) anova(M3.RV.4) #Check to see if you can drop term anova(M3.RV.3, M3.RV.4) # NO, cannot drop qqnorm(resid(M3.RV.4)) qqline(resid(M3.RV.4)) hist(resid(M3.RV.4)) ad.test(resid(M3.RV.4)) #Checking normality #------------------------------------------------------- #------------------------ #2MONTH ONLY------------------------------------------------------ #--------------------------------- TwoMo<-filter(TH_Meta, TempDate == "2Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #2 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M2.RV<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+Species*daily95+Species*Timepoint+(1|Pool), data=TwoMo) summary(M2.RV) anova(M2.RV) qqnorm(resid(M2.RV)) qqline(resid(M2.RV)) hist(resid(M2.RV)) ad.test(resid(M2.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------Removed Species*Timepoint M2.RV.2<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+Species*daily95+(1|Pool), data=TwoMo) summary(M2.RV.2) anova(M2.RV.2) #Check to see if you can drop term anova(M2.RV, M2.RV.2) # Yes, can drop qqnorm(resid(M2.RV.2)) qqline(resid(M2.RV.2)) hist(resid(M2.RV.2)) ad.test(resid(M2.RV.2)) #Checking normality AIC(M2.RV, M2.RV.2) #df AIC #M2.RV 16 140.1660 #M2.RV.2 12 135.1132 #DROPPING TERMS ------------------------------------------------Removed Timepoint*daily95 ---- -- SIMPLEST MODEL-- M2.RV.3<-lmer(Q10~daily95+Timepoint+Species+Species*daily95+(1|Pool), data=TwoMo) summary(M2.RV.3) anova(M2.RV.3) #Check to see if you can drop term anova(M2.RV.2, M2.RV.3) # Yes, can drop qqnorm(resid(M2.RV.3)) qqline(resid(M2.RV.3)) hist(resid(M2.RV.3)) ad.test(resid(M2.RV.3)) #Checking normality AIC(M2.RV.2, M2.RV.3) #df AIC #M2.RV.2 12 135.1132 #M2.RV.3 10 124.9252 #DROPPING TERMS ------------------------------------------------Removed Species*daily95 M2.RV.4<-lmer(Q10~daily95+Timepoint+Species+(1|Pool), data=TwoMo) summary(M2.RV.4) anova(M2.RV.4) #Check to see if you can drop term anova(M2.RV.3, M2.RV.4) # NOPE, cannot drop qqnorm(resid(M2.RV.4)) qqline(resid(M2.RV.4)) hist(resid(M2.RV.4)) ad.test(resid(M2.RV.4)) #Checking normality #------------------------------------------------------- #------------------------ #1MONTH ONLY------------------------------------------------------ #--------------------------------- OneMo<-filter(TH_Meta, TempDate == "1Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1.RV<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+Species*daily95+(1|Pool), data=OneMo) summary(M1.RV) anova(M1.RV) qqnorm(resid(M1.RV)) qqline(resid(M1.RV)) hist(resid(M1.RV)) ad.test(resid(M1.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M1.RV.2<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+(1|Pool), data=OneMo) summary(M1.RV.2) anova(M1.RV.2) #Check to see if you can drop term anova(M1.RV, M1.RV.2) # Yes, can drop qqnorm(resid(M1.RV.2)) qqline(resid(M1.RV.2)) hist(resid(M1.RV.2)) ad.test(resid(M1.RV.2)) #Checking normality AIC(M1.RV, M1.RV.2) #df AIC #M1.RV 12 135.3689 #M1.RV.2 10 121.1813 #DROPPING TERMS ------------------------------------------------SIMPLEST MODEL M1.RV.3<-lmer(Q10~daily95+Timepoint+Species+(1|Pool), data=OneMo) summary(M1.RV.3) anova(M1.RV.3) #Check to see if you can drop term anova(M1.RV.2, M1.RV.3) # Yes, can drop qqnorm(resid(M1.RV.3)) qqline(resid(M1.RV.3)) hist(resid(M1.RV.3)) ad.test(resid(M1.RV.3)) #Checking normality AIC(M1.RV.2, M1.RV.3) #df AIC #M1.RV.2 10 121.1813 #M1.RV.3 8 111.1456 M1DY.Q10.emm.n<-emmeans(M1.RV.3, "Species") M1DY.Q10.emm.n emmeans(M1.RV.3, pairwise~Species) #contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.104 0.0875 93.2 1.186 0.4646 #Hermit - Mussel 0.256 0.0817 91.5 3.134 0.0065 #Littorine - Mussel 0.152 0.0821 93.8 1.857 0.1571 #------------------------------------------------------- #------------------------ #1WEEK ONLY------------------------------------------------------ #--------------------------------- OneWk<-filter(TH_Meta, TempDate == "1Week") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1WK.RV<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+Species*daily95+(1|Pool), data=OneWk) summary(M1WK.RV) anova(M1WK.RV) qqnorm(resid(M1WK.RV)) qqline(resid(M1WK.RV)) hist(resid(M1WK.RV)) ad.test(resid(M1WK.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M1WK.RV.2<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+(1|Pool), data=OneWk) summary(M1WK.RV.2) anova(M1WK.RV.2) #Check to see if you can drop term anova(M1WK.RV, M1WK.RV.2) # Yes, can drop qqnorm(resid(M1WK.RV.2)) qqline(resid(M1WK.RV.2)) hist(resid(M1WK.RV.2)) ad.test(resid(M1WK.RV.2)) #Checking normality AIC(M1WK.RV, M1WK.RV.2) #df AIC # df AIC #M1WK.RV 12 136.4234 #M1WK.RV.2 10 121.2463 #DROPPING TERMS ------------------------------------------------SIMPLEST MODEL M1WK.RV.3<-lmer(Q10~daily95+Timepoint+Species+(1|Pool), data=OneWk) summary(M1WK.RV.3) anova(M1WK.RV.3) #Check to see if you can drop term anova(M1WK.RV.2, M1WK.RV.3) # Yes, can drop qqnorm(resid(M1WK.RV.3)) qqline(resid(M1WK.RV.3)) hist(resid(M1WK.RV.3)) ad.test(resid(M1WK.RV.3)) #Checking normality AIC(M1WK.RV.2, M1WK.RV.3) #df AIC #M1WK.RV.2 10 121.2463 #M1WK.RV.3 8 111.9167 #Post hoc - emmeans------------------------------------------- # M1WK.RV.emm.n<-emmeans(M1WK.RV.3, "Species") M1WK.RV.emm.n emmeans(M1WK.RV.3, pairwise~Species) # contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.0918 0.0865 90.2 1.061 0.5405 #Hermit - Mussel 0.2583 0.0810 90.5 3.188 0.0055 #Littorine - Mussel 0.1665 0.0808 90.4 2.061 0.1039 #------------------------------------------------------- #------------------------ #1DAY ONLY------------------------------------------------------ #--------------------------------- OneDay<-filter(TH_Meta, TempDate == "1Day") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1DY.RV<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+Species*daily95+(1|Pool), data=OneDay) summary(M1DY.RV) anova(M1DY.RV) qqnorm(resid(M1DY.RV)) qqline(resid(M1DY.RV)) hist(resid(M1DY.RV)) ad.test(resid(M1DY.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M1DY.RV.2<-lmer(Q10~daily95+Timepoint+Species+Timepoint*daily95+(1|Pool), data=OneDay) summary(M1DY.RV.2) anova(M1DY.RV.2) #Check to see if you can drop term anova(M1DY.RV, M1DY.RV.2) # Yes, can drop qqnorm(resid(M1DY.RV.2)) qqline(resid(M1DY.RV.2)) hist(resid(M1DY.RV.2)) ad.test(resid(M1DY.RV.2)) #Checking normality AIC(M1DY.RV, M1DY.RV.2) #df AIC #M1DY.RV 12 132.9109 #M1DY.RV.2 10 117.8557 #Post hoc - emmeans-------------------------------------------- #Interested in overall effect of Season Spp.1day<- emmeans(M1DY.RV.2, ~Timepoint*daily95) pairs(Spp.1day, by="daily95") # contrast estimate SE df t.ratio p.value #(19-Jul) - (19-Mar) -0.0532 0.185 90.5 -0.288 0.9555 #(19-Jul) - (19-Sep) -0.2359 0.134 83.3 -1.767 0.1870 #(19-Mar) - (19-Sep) -0.1827 0.179 89.1 -1.019 0.5670 M1DY.Q10.emm.n<-emmeans(M1DY.RV.2, "Species") M1DY.Q10.emm.n emmeans(M1DY.RV.2, pairwise~Species) #contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.158 0.0921 97.0 1.715 0.2050 #Hermit - Mussel 0.266 0.0859 91.5 3.095 0.0073 #Littorine - Mussel 0.108 0.0880 95.2 1.225 0.4414 ################################################################ #RANGE ONLY ################################################################ #---------------------------------------------------------------------------- #THERMAL HISTORY ANALYSIS--------------------------------------------------- TH_Meta<-read.csv("RangelandSorte_ThermalHistory_All.csv", na.strings = "nd", header=T) str(TH_Meta) #------------------------------------------------------- #------------------------ #3MONTH ONLY------------------------------------------------------ #--------------------------------- ThreeMo<-filter(TH_Meta, TempDate == "3Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #3 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M3.RV<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+Species*Range+Species*Timepoint+(1|Pool), data=ThreeMo) summary(M3.RV) anova(M3.RV) qqnorm(resid(M3.RV)) qqline(resid(M3.RV)) hist(resid(M3.RV)) ad.test(resid(M3.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------Removed Species*Range M3.RV.2<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+Species*Timepoint+(1|Pool), data=ThreeMo) summary(M3.RV.2) anova(M3.RV.2) #Check to see if you can drop term anova(M3.RV, M3.RV.2) # Yes, can drop qqnorm(resid(M3.RV.2)) qqline(resid(M3.RV.2)) hist(resid(M3.RV.2)) ad.test(resid(M3.RV.2)) #Checking normality AIC(M3.RV, M3.RV.2) #df AIC #M3.RV 16 148.6283 #M3.RV.2 14 134.1816 #DROPPING TERMS ------------------------------------------------Removed Timepoint*Range---SIMPLEST MODEL M3.RV.3<-lmer(Q10~Range+Timepoint+Species+Species*Timepoint+(1|Pool), data=ThreeMo) summary(M3.RV.3) anova(M3.RV.3) #Check to see if you can drop term anova(M3.RV.2, M3.RV.3) # Yes, can drop qqnorm(resid(M3.RV.3)) qqline(resid(M3.RV.3)) hist(resid(M3.RV.3)) ad.test(resid(M3.RV.3)) #Checking normality AIC(M3.RV.2, M3.RV.3) #df AIC #M3.RV.2 14 134.1816 #M3.RV.3 12 120.5765 #Post hoc - emmeans-------------------------------------------- #Interested in overall effect of Season Spp.1day<- emmeans(M3.RV.3, ~Timepoint*Species) pairs(Spp.1day, by="Timepoint") #Timepoint = 19-Jul: # contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.0974 0.144 84.8 0.678 0.7772 #Hermit - Mussel 0.2359 0.134 82.7 1.765 0.1876 #Littorine - Mussel 0.1385 0.138 84.3 1.004 0.5764 #Timepoint = 19-Mar: # contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.1780 0.135 97.7 1.318 0.3887 #Hermit - Mussel 0.1078 0.127 93.7 0.850 0.6728 #Littorine - Mussel -0.0702 0.125 93.6 -0.563 0.8399 #Timepoint = 19-Sep: # contrast estimate SE df t.ratio p.value #Hermit - Littorine -0.0592 0.141 80.2 -0.421 0.9071 #Hermit - Mussel 0.4567 0.128 82.9 3.571 0.0017 #Littorine - Mussel 0.5159 0.142 84.9 3.624 0.0014 #Interested in overall effect of Species Spp.3mo<- emmeans(M3.RV.3, ~Timepoint*Species) pairs(Spp.3mo, by="Species") #Species = Hermit: # contrast estimate SE df t.ratio p.value #(19-Jul) - (19-Mar) 0.0485 0.169 90.2 0.287 0.9557 #(19-Jul) - (19-Sep) -0.1985 0.150 99.9 -1.326 0.3841 #(19-Mar) - (19-Sep) -0.2469 0.135 93.8 -1.822 0.1679 #Species = Littorine: # contrast estimate SE df t.ratio p.value #(19-Jul) - (19-Mar) 0.1291 0.194 63.5 0.664 0.7849 #(19-Jul) - (19-Sep) -0.3551 0.170 99.8 -2.086 0.0979 #(19-Mar) - (19-Sep) -0.4841 0.156 99.1 -3.103 0.0070 #Species = Mussel: # contrast estimate SE df t.ratio p.value #(19-Jul) - (19-Mar) -0.0795 0.161 73.5 -0.493 0.8748 #(19-Jul) - (19-Sep) 0.0224 0.145 99.4 0.155 0.9869 #(19-Mar) - (19-Sep) 0.1019 0.129 99.1 0.790 0.7102 #------------------------------------------------------- #------------------------ #2MONTH ONLY------------------------------------------------------ #--------------------------------- TwoMo<-filter(TH_Meta, TempDate == "2Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #2 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M2.RV<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+Species*Range+Species*Timepoint+(1|Pool), data=TwoMo) summary(M2.RV) anova(M2.RV) qqnorm(resid(M2.RV)) qqline(resid(M2.RV)) hist(resid(M2.RV)) ad.test(resid(M2.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------Removed Species*Range M2.RV.2<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+Species*Timepoint+(1|Pool), data=TwoMo) summary(M2.RV.2) anova(M2.RV.2) #Check to see if you can drop term anova(M2.RV, M2.RV.2) # Yes, can drop qqnorm(resid(M2.RV.2)) qqline(resid(M2.RV.2)) hist(resid(M2.RV.2)) ad.test(resid(M2.RV.2)) #Checking normality AIC(M2.RV, M2.RV.2) #df AIC #M2.RV 16 148.8095 #M2.RV.2 14 134.4633 #DROPPING TERMS ------------------------------------------------Removed Timepoint*Range ---- -- SIMPLEST MODEL-- M2.RV.3<-lmer(Q10~Range+Timepoint+Species+Species*Timepoint+(1|Pool), data=TwoMo) summary(M2.RV.3) anova(M2.RV.3) #Check to see if you can drop term anova(M2.RV.2, M2.RV.3) # Yes, can drop qqnorm(resid(M2.RV.3)) qqline(resid(M2.RV.3)) hist(resid(M2.RV.3)) ad.test(resid(M2.RV.3)) #Checking normality AIC(M2.RV.2, M2.RV.3) #df AIC #M2.RV.2 14 134.4633 #M2.RV.3 12 120.2325 Spp.2mo<- emmeans(M2.RV.3, ~Timepoint*Species) pairs(Spp.2mo, by="Species") Spp.2mo<- emmeans(M2.RV.3, ~Timepoint*Species) pairs(Spp.2mo, by="Timepoint") #DROPPING TERMS ------------------------------------------------Removed Species*Timepoint M2.RV.4<-lmer(Q10~Range+Timepoint+Species+(1|Pool), data=TwoMo) summary(M2.RV.4) anova(M2.RV.4) #Check to see if you can drop term anova(M2.RV.3, M2.RV.4) # Cannot, cannot drop qqnorm(resid(M2.RV.4)) qqline(resid(M2.RV.4)) hist(resid(M2.RV.4)) ad.test(resid(M2.RV.4)) #Checking normality #------------------------------------------------------- #------------------------ #1MONTH ONLY------------------------------------------------------ #--------------------------------- OneMo<-filter(TH_Meta, TempDate == "1Month") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 MONTHS---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1.RV<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+Species*Range+(1|Pool), data=OneMo) summary(M1.RV) anova(M1.RV) qqnorm(resid(M1.RV)) qqline(resid(M1.RV)) hist(resid(M1.RV)) ad.test(resid(M1.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M1.RV.2<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+(1|Pool), data=OneMo) summary(M1.RV.2) anova(M1.RV.2) #Check to see if you can drop term anova(M1.RV, M1.RV.2) # Yes, can drop qqnorm(resid(M1.RV.2)) qqline(resid(M1.RV.2)) hist(resid(M1.RV.2)) ad.test(resid(M1.RV.2)) #Checking normality AIC(M1.RV, M1.RV.2) #df AIC #M1.RV 12 138.8697 #M1.RV.2 10 123.4780 #DROPPING TERMS ------------------------------------------------ M1.RV.3<-lmer(Q10~Range+Timepoint+Species+(1|Pool), data=OneMo) summary(M1.RV.3) anova(M1.RV.3) #Check to see if you can drop term anova(M1.RV.2, M1.RV.3) # Yes, can drop qqnorm(resid(M1.RV.3)) qqline(resid(M1.RV.3)) hist(resid(M1.RV.3)) ad.test(resid(M1.RV.3)) #Checking normality AIC(M1.RV.2, M1.RV.3) #df AIC #M1.RV.2 10 123.4780 #M1.RV.3 8 113.6754 M1Mo.Q10.emm.n<-emmeans(M1.RV.3, "Species") M1Mo.Q10.emm.n emmeans(M1.RV.3, pairwise~Species) #contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.0956 0.0872 91.5 1.096 0.5191 #Hermit - Mussel 0.2589 0.0821 90.9 3.152 0.0062 #Littorine - Mussel 0.1633 0.0824 94.1 1.981 0.1226 #------------------------------------------------------- #------------------------ #1WEEK ONLY------------------------------------------------------ #--------------------------------- OneWk<-filter(TH_Meta, TempDate == "1Week") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 MWEEK--------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1WK.RV<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+Species*Range+(1|Pool), data=OneWk) summary(M1WK.RV) anova(M1WK.RV) qqnorm(resid(M1WK.RV)) qqline(resid(M1WK.RV)) hist(resid(M1WK.RV)) ad.test(resid(M1WK.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------ M1WK.RV.2<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+(1|Pool), data=OneWk) summary(M1WK.RV.2) anova(M1WK.RV.2) #Check to see if you can drop term anova(M1WK.RV, M1WK.RV.2) # Yes, can drop qqnorm(resid(M1WK.RV.2)) qqline(resid(M1WK.RV.2)) hist(resid(M1WK.RV.2)) ad.test(resid(M1WK.RV.2)) #Checking normality AIC(M1WK.RV, M1WK.RV.2) #df AIC # df AIC #M1WK.RV 12 138.3163 #M1WK.RV.2 10 124.3536 #DROPPING TERMS ------------------------------------------------ M1WK.RV.3<-lmer(Q10~Range+Timepoint+Species+(1|Pool), data=OneWk) summary(M1WK.RV.3) anova(M1WK.RV.3) #Check to see if you can drop term anova(M1WK.RV.2, M1WK.RV.3) # Yes, can drop qqnorm(resid(M1WK.RV.3)) qqline(resid(M1WK.RV.3)) hist(resid(M1WK.RV.3)) ad.test(resid(M1WK.RV.3)) #Checking normality AIC(M1WK.RV.2, M1WK.RV.3) #df AIC #M1WK.RV.2 10 124.3536 #M1WK.RV.3 8 113.1174 #Post hoc - emmeans------------------------------------------- # M1WK.RV.emm.n<-emmeans(M1WK.RV.3, "Species") M1WK.RV.emm.n emmeans(M1WK.RV.3, pairwise~Species) # contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.0997 0.0871 89.2 1.146 0.4888 #Hermit - Mussel 0.2627 0.0816 90.4 3.220 0.0050 #Littorine - Mussel 0.1629 0.0811 90.4 2.010 0.1157 #------------------------------------------------------- #------------------------ #1DAY ONLY------------------------------------------------------ #--------------------------------- OneDay<-filter(TH_Meta, TempDate == "1Day") #count(ThreeMo$Pool) #Check counts #count(ThreeMo$Species) #1 DAY---------Full model ----> Simplify----------------------Using Real Pool Temp Values---------------------------- M1DY.RV<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+Species*Range+(1|Pool), data=OneDay) summary(M1DY.RV) anova(M1DY.RV) qqnorm(resid(M1DY.RV)) qqline(resid(M1DY.RV)) hist(resid(M1DY.RV)) ad.test(resid(M1DY.RV)) #Checking normality #DROPPING TERMS ------------------------------------------------SIMPLEST MODEL M1DY.RV.2<-lmer(Q10~Range+Timepoint+Species+Timepoint*Range+(1|Pool), data=OneDay) summary(M1DY.RV.2) anova(M1DY.RV.2) #Check to see if you can drop term anova(M1DY.RV, M1DY.RV.2) # Yes, can drop qqnorm(resid(M1DY.RV.2)) qqline(resid(M1DY.RV.2)) hist(resid(M1DY.RV.2)) ad.test(resid(M1DY.RV.2)) #Checking normality AIC(M1DY.RV, M1DY.RV.2) #df AIC #M1DY.RV 12 126.7225 #M1DY.RV.2 10 115.9732 #Post hoc - emmeans-------------------------------------------- #Interested in overall effect of Season Spp.1day<- emmeans(M1DY.RV.2, ~Timepoint*Range) pairs(Spp.1day, by="Range") #contrast estimate SE df t.ratio p.value #(19-Jul) - (19-Mar) 0.0555 0.0820 89.5 0.677 0.7775 #(19-Jul) - (19-Sep) -0.0932 0.0825 79.7 -1.129 0.4993 #(19-Mar) - (19-Sep) -0.1487 0.0807 87.8 -1.843 0.1616 M1DY.Q10.emm.n<-emmeans(M1DY.RV.2, "Species") M1DY.Q10.emm.n emmeans(M1DY.RV.2, pairwise~Species) # contrast estimate SE df t.ratio p.value #Hermit - Littorine 0.1415 0.0879 94.8 1.609 0.2467 #Hermit - Mussel 0.2359 0.0867 90.6 2.720 0.0212 #Littorine - Mussel 0.0943 0.0852 94.2 1.107 0.5119
rm(list=ls()) install.packages("sp") library(sp) xa <- round(runif(10),2) ya <- round(runif(10),2) xy <- cbind(xa,ya) xy.sp <- SpatialPoints(xy) xy.sp plot(xy.sp, axes=T, pch=2) x1 <-c(1,2,3,3) y1 <-c(4,3,3,4) l11 <-cbind(x1,y1) l12<- cbind(l11[,1]+.05,l11[,2]+.05) sl11<-Line(l11) sl12<-Line(l12) s1 <- Lines(list(sl11,sl12),ID='a') s2 <-Lines(list(sl12),ID='b') sl<-SpatialLines(list(s1,s2)) summary(sl) plot(sl,axes=T,col=c("red","blue"),lty=c(1,2),lwd=2) sr1<-Polygon(cbind(c(1,3,3,1,1),c(1,1,2,2,1))) sr2<-Polygon(cbind(c(1,3,2,1),c(2,2,3,2))) sr3<-Polygon(cbind(c(3,3,2,4,6,6,3),c(1,2,3,4,3,1,1))) sr4<-Polygon(cbind(c(4,4,5,5,4),c(2,3,3,2,2)),hole=TRUE) Srs1 <-Polygons(list(sr1),"s1") Srs2 <-Polygons(list(sr2),"s2") Srs3 <-Polygons(list(sr3,sr4),"s3/4") SP <- SpatialPolygons(list(Srs1,Srs2,Srs3),1:3) plot(SP,col=1:3,pbg="white",axes=T) title("Spatial Polygon Plot")
/06_29_05.R
no_license
yujinnnnn/R_practice
R
false
false
886
r
rm(list=ls()) install.packages("sp") library(sp) xa <- round(runif(10),2) ya <- round(runif(10),2) xy <- cbind(xa,ya) xy.sp <- SpatialPoints(xy) xy.sp plot(xy.sp, axes=T, pch=2) x1 <-c(1,2,3,3) y1 <-c(4,3,3,4) l11 <-cbind(x1,y1) l12<- cbind(l11[,1]+.05,l11[,2]+.05) sl11<-Line(l11) sl12<-Line(l12) s1 <- Lines(list(sl11,sl12),ID='a') s2 <-Lines(list(sl12),ID='b') sl<-SpatialLines(list(s1,s2)) summary(sl) plot(sl,axes=T,col=c("red","blue"),lty=c(1,2),lwd=2) sr1<-Polygon(cbind(c(1,3,3,1,1),c(1,1,2,2,1))) sr2<-Polygon(cbind(c(1,3,2,1),c(2,2,3,2))) sr3<-Polygon(cbind(c(3,3,2,4,6,6,3),c(1,2,3,4,3,1,1))) sr4<-Polygon(cbind(c(4,4,5,5,4),c(2,3,3,2,2)),hole=TRUE) Srs1 <-Polygons(list(sr1),"s1") Srs2 <-Polygons(list(sr2),"s2") Srs3 <-Polygons(list(sr3,sr4),"s3/4") SP <- SpatialPolygons(list(Srs1,Srs2,Srs3),1:3) plot(SP,col=1:3,pbg="white",axes=T) title("Spatial Polygon Plot")
/code/analizaEscenarios2013.r
no_license
csteclaire/mex-open-map
R
false
false
157,672
r
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/types_nimbleFunctionList.R \docType{class} \name{nimbleFunctionList-class} \alias{nimbleFunctionList} \alias{nimbleFunctionList-class} \title{Create a list of nimbleFunctions} \description{ Create an empty list of nimbleFunctions that all will inherit from a base class. } \details{ See the User Manual for information about creating and populating a \code{nimbleFunctionList}. } \author{ NIMBLE development team }
/packages/nimble/man/nimbleFunctionList-class.Rd
permissive
DRJP/nimble
R
false
true
494
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/types_nimbleFunctionList.R \docType{class} \name{nimbleFunctionList-class} \alias{nimbleFunctionList} \alias{nimbleFunctionList-class} \title{Create a list of nimbleFunctions} \description{ Create an empty list of nimbleFunctions that all will inherit from a base class. } \details{ See the User Manual for information about creating and populating a \code{nimbleFunctionList}. } \author{ NIMBLE development team }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/7-aldex2propr.R \name{aldex.cor} \alias{aldex.cor} \title{Correlate CLR Data with a Continuous Measurement} \usage{ aldex.cor(clr, conditions, ...) } \arguments{ \item{clr}{An \code{aldex.clr} object.} \item{conditions}{A numeric vector of a continuous variable.} \item{...}{Arguments passed to \code{cor.test}.} } \value{ Returns a data.frame of the average correlation statistic (e.g., \code{r}) and p-value (\code{p}) for each log-ratio transformed feature, with FDR appended as the \code{BH} column. } \description{ This function uses the Monte Carlo instances from an \code{aldex.clr} object to correlate each log-ratio transformed feature vector with a continuous numeric variable. See \code{\link{lr2cor}}. }
/man/aldex.cor.Rd
no_license
tpq/propr
R
false
true
802
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/7-aldex2propr.R \name{aldex.cor} \alias{aldex.cor} \title{Correlate CLR Data with a Continuous Measurement} \usage{ aldex.cor(clr, conditions, ...) } \arguments{ \item{clr}{An \code{aldex.clr} object.} \item{conditions}{A numeric vector of a continuous variable.} \item{...}{Arguments passed to \code{cor.test}.} } \value{ Returns a data.frame of the average correlation statistic (e.g., \code{r}) and p-value (\code{p}) for each log-ratio transformed feature, with FDR appended as the \code{BH} column. } \description{ This function uses the Monte Carlo instances from an \code{aldex.clr} object to correlate each log-ratio transformed feature vector with a continuous numeric variable. See \code{\link{lr2cor}}. }
library(lme4) library(dplyr) library(MuMIn) processed_data <- read.csv("~/Downloads/distances_data.csv") soa_200 <- read.csv("~/Documents/cs-clps-final_project/200SOA.csv") soa_1200 <- read.csv("~/Documents/cs-clps-final_project/1200SOA.csv") small_data <- processed_data %>% dplyr::select(Unnamed..0,prime, target, rel,jc_similarity, path_similarity, cosine_distance) small_data$jc_similarity[which(small_data$jc_similarity > 1)] <- 1 soa_200_complete <- merge(soa_200, small_data, by=c("prime", "target")) # get specific relationship column soa_200_complete$spec_rel <- as.numeric(!is.na(soa_200_complete$relation)) # get a dataset where all values are not NA for wordnet dataset_200 <- soa_200_complete %>% filter(!is.na(jc_similarity)) %>% filter (!is.na(cosine_distance)) # get the same data for 1200 soa_1200_complete <- merge(soa_1200, small_data, by=c("prime", "target")) soa_1200_complete$spec_rel <- as.numeric(!is.na(soa_1200_complete$relation)) dataset_1200 <- soa_1200_complete %>% filter(!is.na(jc_similarity)) %>% filter (!is.na(cosine_distance)) # model with just jc_similarity for soa200 model_wn_jc_200 <- lmer(formula = logRT ~ jc_similarity + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_200) summary(model_wn_jc_200) # model with both jc_similarity and relatedness for soa200 model_wn_jc_rel_200 <- lmer(formula = logRT ~ rel.y + jc_similarity + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_200) summary(model_wn_jc_rel_200) # model with interaction term b/w jc_similarity & specific relation for soa200 model_wn_interaction_200 <- lmer(formula = logRT ~ rel.y + jc_similarity + jc_similarity*spec_rel + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_200) summary(model_wn_interaction_200) # model with just cosine for 200 model_cosine_200 <- lmer(formula = logRT ~ cosine_distance + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_200) summary(model_cosine_200) # model with cosine and relatedness for 200 model_cosine_rel_200 <- lmer(formula = logRT ~ cosine_distance + rel.y + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_200) summary(model_cosine_rel_200) # model with interaction term for 200 model_cosine_interaction_200 <- lmer(formula = logRT ~ cosine_distance + rel.y + cosine_distance*spec_rel + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_200) summary(model_cosine_interaction_200) # repeat it all for 1200 # model with just jc_similarity for soa1200 model_wn_jc_1200 <- lmer(formula = logRT ~ jc_similarity + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_1200) summary(model_wn_jc_1200) # model with both jc_similarity and relatedness for soa1200 model_wn_jc_rel_1200 <- lmer(formula = logRT ~ rel.y + jc_similarity + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_1200) summary(model_wn_jc_rel_1200) # model with interaction term b/w jc_similarity & specific relation for soa1200 model_wn_interaction_1200 <- lmer(formula = logRT ~ rel.y + jc_similarity + jc_similarity*spec_rel + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_1200) summary(model_wn_interaction_1200) # model with just cosine for soa1200 model_cosine_1200 <- lmer(formula = logRT ~ cosine_distance + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_1200) summary(model_cosine_1200) # model with cosine and relatedness for soa1200 model_cosine_rel_1200 <- lmer(formula = logRT ~ cosine_distance + rel.y + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_1200) summary(model_cosine_rel_1200) # model with interaction term for soa1200 model_cosine_interaction_1200 <- lmer(formula = logRT ~ cosine_distance + rel.y + cosine_distance*spec_rel + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_1200) summary(model_cosine_interaction_1200) # retrain baseline models on the smaller datasets baseline_200 <- lmer(formula=logRT ~ rel.y + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_200) baseline_1200 <- lmer(formula=logRT ~ rel.y + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_1200) # r-squared for different models r.squaredGLMM(model_wn_jc_200) r.squaredGLMM(model_wn_jc_rel_200) r.squaredGLMM(model_wn_interaction_200) r.squaredGLMM(model_cosine_200) r.squaredGLMM(model_cosine_rel_200) r.squaredGLMM(model_cosine_interaction_200) r.squaredGLMM(model_wn_jc_1200) r.squaredGLMM(model_wn_jc_rel_1200) r.squaredGLMM(model_wn_interaction_1200) r.squaredGLMM(model_cosine_1200) r.squaredGLMM(model_cosine_rel_1200) r.squaredGLMM(model_cosine_interaction_1200) r.squaredGLMM(baseline_200) r.squaredGLMM(baseline_1200) # anova comparisons # baseline-200-wn vs model_wn_jc_rel_200 a1 <- anova(baseline_200, model_wn_jc_rel_200) # model_wn_jc_rel_200 vs model_wn_interaction_200 a2 <- anova(model_wn_jc_rel_200, model_wn_interaction_200) # and same for the other datasets a3 <- anova(baseline_1200, model_wn_jc_rel_1200) a4 <- anova(model_wn_jc_rel_1200, model_wn_interaction_1200) a5 <- anova(baseline_200, model_cosine_rel_200) a6 <- anova(model_cosine_rel_200, model_cosine_interaction_200) a7 <- anova(baseline_1200, model_cosine_rel_1200) a8 <- anova(model_cosine_rel_1200, model_cosine_interaction_1200) # use AIC to determine whether wordnet or cosine is better comp1 <- AIC(model_wn_jc_200, model_cosine_200) comp2 <- AIC(model_wn_jc_1200, model_cosine_1200) comp3 <- AIC(model_wn_jc_rel_200, model_cosine_rel_200) comp4 <- AIC(model_wn_jc_rel_1200, model_cosine_rel_1200) comp5 <- AIC(model_wn_interaction_200, model_cosine_interaction_200) comp6 <- AIC(model_wn_interaction_1200, model_cosine_interaction_1200) AIC(model_wn_jc_200, model_cosine_200, model_wn_jc_rel_200, model_cosine_rel_200, model_wn_interaction_200, model_cosine_interaction_200) AIC(model_wn_jc_1200, model_cosine_1200, model_wn_jc_rel_1200, model_cosine_rel_1200, model_wn_interaction_1200, model_cosine_interaction_1200)
/semantic_priming_analysis.R
no_license
annanakai/semantic_priming_analysis
R
false
false
6,734
r
library(lme4) library(dplyr) library(MuMIn) processed_data <- read.csv("~/Downloads/distances_data.csv") soa_200 <- read.csv("~/Documents/cs-clps-final_project/200SOA.csv") soa_1200 <- read.csv("~/Documents/cs-clps-final_project/1200SOA.csv") small_data <- processed_data %>% dplyr::select(Unnamed..0,prime, target, rel,jc_similarity, path_similarity, cosine_distance) small_data$jc_similarity[which(small_data$jc_similarity > 1)] <- 1 soa_200_complete <- merge(soa_200, small_data, by=c("prime", "target")) # get specific relationship column soa_200_complete$spec_rel <- as.numeric(!is.na(soa_200_complete$relation)) # get a dataset where all values are not NA for wordnet dataset_200 <- soa_200_complete %>% filter(!is.na(jc_similarity)) %>% filter (!is.na(cosine_distance)) # get the same data for 1200 soa_1200_complete <- merge(soa_1200, small_data, by=c("prime", "target")) soa_1200_complete$spec_rel <- as.numeric(!is.na(soa_1200_complete$relation)) dataset_1200 <- soa_1200_complete %>% filter(!is.na(jc_similarity)) %>% filter (!is.na(cosine_distance)) # model with just jc_similarity for soa200 model_wn_jc_200 <- lmer(formula = logRT ~ jc_similarity + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_200) summary(model_wn_jc_200) # model with both jc_similarity and relatedness for soa200 model_wn_jc_rel_200 <- lmer(formula = logRT ~ rel.y + jc_similarity + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_200) summary(model_wn_jc_rel_200) # model with interaction term b/w jc_similarity & specific relation for soa200 model_wn_interaction_200 <- lmer(formula = logRT ~ rel.y + jc_similarity + jc_similarity*spec_rel + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_200) summary(model_wn_interaction_200) # model with just cosine for 200 model_cosine_200 <- lmer(formula = logRT ~ cosine_distance + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_200) summary(model_cosine_200) # model with cosine and relatedness for 200 model_cosine_rel_200 <- lmer(formula = logRT ~ cosine_distance + rel.y + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_200) summary(model_cosine_rel_200) # model with interaction term for 200 model_cosine_interaction_200 <- lmer(formula = logRT ~ cosine_distance + rel.y + cosine_distance*spec_rel + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_200) summary(model_cosine_interaction_200) # repeat it all for 1200 # model with just jc_similarity for soa1200 model_wn_jc_1200 <- lmer(formula = logRT ~ jc_similarity + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_1200) summary(model_wn_jc_1200) # model with both jc_similarity and relatedness for soa1200 model_wn_jc_rel_1200 <- lmer(formula = logRT ~ rel.y + jc_similarity + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_1200) summary(model_wn_jc_rel_1200) # model with interaction term b/w jc_similarity & specific relation for soa1200 model_wn_interaction_1200 <- lmer(formula = logRT ~ rel.y + jc_similarity + jc_similarity*spec_rel + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_1200) summary(model_wn_interaction_1200) # model with just cosine for soa1200 model_cosine_1200 <- lmer(formula = logRT ~ cosine_distance + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_1200) summary(model_cosine_1200) # model with cosine and relatedness for soa1200 model_cosine_rel_1200 <- lmer(formula = logRT ~ cosine_distance + rel.y + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_1200) summary(model_cosine_rel_1200) # model with interaction term for soa1200 model_cosine_interaction_1200 <- lmer(formula = logRT ~ cosine_distance + rel.y + cosine_distance*spec_rel + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_1200) summary(model_cosine_interaction_1200) # retrain baseline models on the smaller datasets baseline_200 <- lmer(formula=logRT ~ rel.y + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_200) baseline_1200 <- lmer(formula=logRT ~ rel.y + t_length + p_length + p_logfreq + t_logfreq + p_orthoN + t_orthoN + (1|prime) + (1|target) + (1|Subject), data=dataset_1200) # r-squared for different models r.squaredGLMM(model_wn_jc_200) r.squaredGLMM(model_wn_jc_rel_200) r.squaredGLMM(model_wn_interaction_200) r.squaredGLMM(model_cosine_200) r.squaredGLMM(model_cosine_rel_200) r.squaredGLMM(model_cosine_interaction_200) r.squaredGLMM(model_wn_jc_1200) r.squaredGLMM(model_wn_jc_rel_1200) r.squaredGLMM(model_wn_interaction_1200) r.squaredGLMM(model_cosine_1200) r.squaredGLMM(model_cosine_rel_1200) r.squaredGLMM(model_cosine_interaction_1200) r.squaredGLMM(baseline_200) r.squaredGLMM(baseline_1200) # anova comparisons # baseline-200-wn vs model_wn_jc_rel_200 a1 <- anova(baseline_200, model_wn_jc_rel_200) # model_wn_jc_rel_200 vs model_wn_interaction_200 a2 <- anova(model_wn_jc_rel_200, model_wn_interaction_200) # and same for the other datasets a3 <- anova(baseline_1200, model_wn_jc_rel_1200) a4 <- anova(model_wn_jc_rel_1200, model_wn_interaction_1200) a5 <- anova(baseline_200, model_cosine_rel_200) a6 <- anova(model_cosine_rel_200, model_cosine_interaction_200) a7 <- anova(baseline_1200, model_cosine_rel_1200) a8 <- anova(model_cosine_rel_1200, model_cosine_interaction_1200) # use AIC to determine whether wordnet or cosine is better comp1 <- AIC(model_wn_jc_200, model_cosine_200) comp2 <- AIC(model_wn_jc_1200, model_cosine_1200) comp3 <- AIC(model_wn_jc_rel_200, model_cosine_rel_200) comp4 <- AIC(model_wn_jc_rel_1200, model_cosine_rel_1200) comp5 <- AIC(model_wn_interaction_200, model_cosine_interaction_200) comp6 <- AIC(model_wn_interaction_1200, model_cosine_interaction_1200) AIC(model_wn_jc_200, model_cosine_200, model_wn_jc_rel_200, model_cosine_rel_200, model_wn_interaction_200, model_cosine_interaction_200) AIC(model_wn_jc_1200, model_cosine_1200, model_wn_jc_rel_1200, model_cosine_rel_1200, model_wn_interaction_1200, model_cosine_interaction_1200)
#Patricia Moran 03/03/2019 #compile RSEM gene matrix options(stringsAsFactors=F) setwd("./pmlosada/CUL3/RSEM_files") files = dir(pattern="*.RSEM_Quant.genes.results",recursive=T) inFile=read.delim(files[[1]],row.names=1) genes = row.names(inFile) counts=inFile$expected_count tpm=inFile$TPM for(i in 2:length(files)) { print(i) inFile=read.delim(files[[i]],row.names=1) inFile = inFile[match(genes,rownames(inFile)),] counts = cbind(counts, inFile$expected_count) tpm = cbind(tpm, inFile$TPM) } rownames(counts) = rownames(tpm) = substr(genes,1,15) colnames(counts) = colnames(tpm) = gsub("/","_", gsub(".RSEM_Quant.genes.results","",files)) save(file="./RSEM_Quant.genes.counts.RData",counts) save(file="./RSEM_Quant.genes.tpm.RData",tpm)
/2.Compile_RSEM.R
no_license
IakouchevaLab/CUL3
R
false
false
784
r
#Patricia Moran 03/03/2019 #compile RSEM gene matrix options(stringsAsFactors=F) setwd("./pmlosada/CUL3/RSEM_files") files = dir(pattern="*.RSEM_Quant.genes.results",recursive=T) inFile=read.delim(files[[1]],row.names=1) genes = row.names(inFile) counts=inFile$expected_count tpm=inFile$TPM for(i in 2:length(files)) { print(i) inFile=read.delim(files[[i]],row.names=1) inFile = inFile[match(genes,rownames(inFile)),] counts = cbind(counts, inFile$expected_count) tpm = cbind(tpm, inFile$TPM) } rownames(counts) = rownames(tpm) = substr(genes,1,15) colnames(counts) = colnames(tpm) = gsub("/","_", gsub(".RSEM_Quant.genes.results","",files)) save(file="./RSEM_Quant.genes.counts.RData",counts) save(file="./RSEM_Quant.genes.tpm.RData",tpm)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dejong-map.R \name{dejong_map} \alias{dejong_map} \title{The De Jong map.} \usage{ dejong_map(n = 1000, xi = -5, yi = 0.1, a = 1.4, b = -2.3, c = 2.4, d = -2.1, add.timestep = FALSE, plot = FALSE) } \arguments{ \item{n}{Number of points in the time series.} \item{xi}{The initial value for x} \item{yi}{The initival value for y} \item{a}{The value of the parameter a} \item{b}{The value of the parameter b} \item{c}{The value of the parameter c} \item{d}{The value of the parameter d} \item{add.timestep}{Add a column indicating the time step?} \item{plot}{Plot the time series?} } \value{ A n-by-2 data frame containing the values of the x and y components. } \description{ The De Jong map. }
/man/dejong_map.Rd
no_license
kahaaga/chaoticmaps
R
false
true
782
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dejong-map.R \name{dejong_map} \alias{dejong_map} \title{The De Jong map.} \usage{ dejong_map(n = 1000, xi = -5, yi = 0.1, a = 1.4, b = -2.3, c = 2.4, d = -2.1, add.timestep = FALSE, plot = FALSE) } \arguments{ \item{n}{Number of points in the time series.} \item{xi}{The initial value for x} \item{yi}{The initival value for y} \item{a}{The value of the parameter a} \item{b}{The value of the parameter b} \item{c}{The value of the parameter c} \item{d}{The value of the parameter d} \item{add.timestep}{Add a column indicating the time step?} \item{plot}{Plot the time series?} } \value{ A n-by-2 data frame containing the values of the x and y components. } \description{ The De Jong map. }
##[Geoestatistica]=group ##load_vector_using_rgdal ##showplots ##Camada=vector ##Variavel=Field Camada ##Pasta=folder ##Modelo_de_Variograma=string ##Vizinhanca_max=number 0 ##Vizinhanca_min=number 0 ##Distancia_Maxima=number 0.1 ##CrossValidation= output vector library(sp) library(gstat) library(rgdal) Layer = Camada Field = Variavel str(Layer) setwd(Pasta) s = Modelo_de_Variograma x = c(s,"RData") s = paste(x,collapse=".") load(s) nmax = Vizinhanca_max nmin = Vizinhanca_min maxdist = Distancia_Maxima Field <- make.names(Field) names(Layer)[names(Layer)==Field]="Field" Layer$Field <- as.numeric(as.character(Layer$Field)) str(Layer) Layer = remove.duplicates(Layer) Layer = Layer[!is.na(Layer$Field),] crossval = krige.cv(Field~1,Layer,vgm1.fit,nmax=nmax,nmin=nmin,maxdist=maxdist,verbose=FALSE) proj4string(crossval) = proj4string(Layer) CrossValidation = crossval r = cor(crossval$var1.pred,crossval$observed) plot(crossval$var1.pred,crossval$observed,xlab="Valor Estimado",ylab="Valor Real",main="Validacao Cruzada",sub=paste(" coef.corr = ",round(r, 3))) abline(lm(crossval$observed~crossval$var1.pred),col="red") abline(h = median(crossval$observed), v = median(crossval$var1.pred), col = "gray60")
/scripts/Validação Cruzada.rsx
no_license
felipe-geo95/introd_geoestat
R
false
false
1,224
rsx
##[Geoestatistica]=group ##load_vector_using_rgdal ##showplots ##Camada=vector ##Variavel=Field Camada ##Pasta=folder ##Modelo_de_Variograma=string ##Vizinhanca_max=number 0 ##Vizinhanca_min=number 0 ##Distancia_Maxima=number 0.1 ##CrossValidation= output vector library(sp) library(gstat) library(rgdal) Layer = Camada Field = Variavel str(Layer) setwd(Pasta) s = Modelo_de_Variograma x = c(s,"RData") s = paste(x,collapse=".") load(s) nmax = Vizinhanca_max nmin = Vizinhanca_min maxdist = Distancia_Maxima Field <- make.names(Field) names(Layer)[names(Layer)==Field]="Field" Layer$Field <- as.numeric(as.character(Layer$Field)) str(Layer) Layer = remove.duplicates(Layer) Layer = Layer[!is.na(Layer$Field),] crossval = krige.cv(Field~1,Layer,vgm1.fit,nmax=nmax,nmin=nmin,maxdist=maxdist,verbose=FALSE) proj4string(crossval) = proj4string(Layer) CrossValidation = crossval r = cor(crossval$var1.pred,crossval$observed) plot(crossval$var1.pred,crossval$observed,xlab="Valor Estimado",ylab="Valor Real",main="Validacao Cruzada",sub=paste(" coef.corr = ",round(r, 3))) abline(lm(crossval$observed~crossval$var1.pred),col="red") abline(h = median(crossval$observed), v = median(crossval$var1.pred), col = "gray60")
\name{integrateSphereStroud11} \alias{integrateSphereStroud11} %- Also NEED an '\alias' for EACH other topic documented here. \title{Integrate a function over the sphere in n-dimensions. } \description{Approximate the integral of a function f(x)=f(x[1],...,x[n]) over the unit sphere in n-space using Stroud's method of degree 11. } \usage{ integrateSphereStroud11(f, n, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{f}{function f(x)=f(x[1],...,x[n]) to integrate } \item{n}{dimension of the space, implemented for n in the range 3:16. } \item{\dots}{optional arguments passed to f( ). If these are specified, they should be labeled with a tag, e.g. param1=3.4 } } \details{This method works if the integrand is smooth. If the function changes rapidly, adaptive integration can be tried as described in 'See Also' below. } \value{A single number, the approximation to the integral. } \references{ Stroud integration and related functions, adapted from fortran code by John Burkhart found at \cr http://people.sc.fsu.edu/~jburkardt/f77_src/stroud/stroud.html \cr Based on the book by A. H. Stroud, Approximate Calculation of multiple integrals, 1971, page 296-297. } \seealso{ \code{\link{adaptIntegrateSpherePolar}}, \code{\link{adaptIntegrateBallPolar}}, \code{\link{adaptIntegrateSphereTri}} } \examples{ f2 <- function( x ) { return(x[1]^2) } integrateSphereStroud11( f2, n=3 ) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ multivariate integration } \keyword{ cubature }
/man/integrateSphereStroud11.Rd
no_license
cran/SphericalCubature
R
false
false
1,625
rd
\name{integrateSphereStroud11} \alias{integrateSphereStroud11} %- Also NEED an '\alias' for EACH other topic documented here. \title{Integrate a function over the sphere in n-dimensions. } \description{Approximate the integral of a function f(x)=f(x[1],...,x[n]) over the unit sphere in n-space using Stroud's method of degree 11. } \usage{ integrateSphereStroud11(f, n, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{f}{function f(x)=f(x[1],...,x[n]) to integrate } \item{n}{dimension of the space, implemented for n in the range 3:16. } \item{\dots}{optional arguments passed to f( ). If these are specified, they should be labeled with a tag, e.g. param1=3.4 } } \details{This method works if the integrand is smooth. If the function changes rapidly, adaptive integration can be tried as described in 'See Also' below. } \value{A single number, the approximation to the integral. } \references{ Stroud integration and related functions, adapted from fortran code by John Burkhart found at \cr http://people.sc.fsu.edu/~jburkardt/f77_src/stroud/stroud.html \cr Based on the book by A. H. Stroud, Approximate Calculation of multiple integrals, 1971, page 296-297. } \seealso{ \code{\link{adaptIntegrateSpherePolar}}, \code{\link{adaptIntegrateBallPolar}}, \code{\link{adaptIntegrateSphereTri}} } \examples{ f2 <- function( x ) { return(x[1]^2) } integrateSphereStroud11( f2, n=3 ) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ multivariate integration } \keyword{ cubature }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{ENSURE_2A} \alias{ENSURE_2A} \title{ENSURE, figure 2A} \format{ A data frame of 217 observations and 3 variables: \tabular{lll}{ \tab \code{time} \tab event time (in months) \cr \tab \code{event} \tab PFS event indicator (\code{0}: no event, \code{1}: event) \cr \tab \code{arm} \tab treatment arms (erlotinib, gp) \cr } } \source{ Wu Y-L, Zhou C, Liam C-K, et al. First-line erlotinib versus gemcitabine/cisplatin in patients with advanced EGFR mutation-positive non-small-cell lung cancer: analyses from the phase III, randomized, open-label, ENSURE study. Ann Oncol 2015; 26: 1883–9. } \usage{ ENSURE_2A } \description{ Kaplan-Meier digitized data from ENSURE, figure 2A (PMID 26105600). A reported sample size of 217 for a primary endpoint of PFS in lung cancer. } \examples{ summary(ENSURE_2A) kmplot(ENSURE_2A) } \keyword{datasets}
/man/ENSURE_2A.Rd
no_license
Owain-S/kmdata
R
false
true
947
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{ENSURE_2A} \alias{ENSURE_2A} \title{ENSURE, figure 2A} \format{ A data frame of 217 observations and 3 variables: \tabular{lll}{ \tab \code{time} \tab event time (in months) \cr \tab \code{event} \tab PFS event indicator (\code{0}: no event, \code{1}: event) \cr \tab \code{arm} \tab treatment arms (erlotinib, gp) \cr } } \source{ Wu Y-L, Zhou C, Liam C-K, et al. First-line erlotinib versus gemcitabine/cisplatin in patients with advanced EGFR mutation-positive non-small-cell lung cancer: analyses from the phase III, randomized, open-label, ENSURE study. Ann Oncol 2015; 26: 1883–9. } \usage{ ENSURE_2A } \description{ Kaplan-Meier digitized data from ENSURE, figure 2A (PMID 26105600). A reported sample size of 217 for a primary endpoint of PFS in lung cancer. } \examples{ summary(ENSURE_2A) kmplot(ENSURE_2A) } \keyword{datasets}
# 5.4 # Doing 10.000.000 simulations for some X_n NSIM = 1e7 n = 10 Xn = sample(c(1/n, n), size = NSIM, replace = TRUE, prob = c(1-1/n^2, 1/n^2)) # Simulated vs. Theoretical mean(Xn) 2/n - 1/n^3 var(Xn) 1 - 3/n^2 + 3/n^4 - 1/n^6 n = 50 Xn = sample(c(1/n, n), size = NSIM, replace = TRUE, prob = c(1-1/n^2, 1/n^2)) # Simulated vs. Theoretical mean(Xn) 2/n - 1/n^3 var(Xn) 1 - 3/n^2 + 3/n^4 - 1/n^6 n = 200 Xn = sample(c(1/n, n), size = NSIM, replace = TRUE, prob = c(1-1/n^2, 1/n^2)) # Simulated vs. Theoretical mean(Xn) 2/n - 1/n^3 var(Xn) 1 - 3/n^2 + 3/n^4 - 1/n^6
/code/5.4.R
no_license
christophersalim/AllStatistics
R
false
false
604
r
# 5.4 # Doing 10.000.000 simulations for some X_n NSIM = 1e7 n = 10 Xn = sample(c(1/n, n), size = NSIM, replace = TRUE, prob = c(1-1/n^2, 1/n^2)) # Simulated vs. Theoretical mean(Xn) 2/n - 1/n^3 var(Xn) 1 - 3/n^2 + 3/n^4 - 1/n^6 n = 50 Xn = sample(c(1/n, n), size = NSIM, replace = TRUE, prob = c(1-1/n^2, 1/n^2)) # Simulated vs. Theoretical mean(Xn) 2/n - 1/n^3 var(Xn) 1 - 3/n^2 + 3/n^4 - 1/n^6 n = 200 Xn = sample(c(1/n, n), size = NSIM, replace = TRUE, prob = c(1-1/n^2, 1/n^2)) # Simulated vs. Theoretical mean(Xn) 2/n - 1/n^3 var(Xn) 1 - 3/n^2 + 3/n^4 - 1/n^6
library(argonR) ### Name: argonButton ### Title: Create a Boostrap 4 argon button ### Aliases: argonButton ### ** Examples if(interactive()){ library(argonR) argonButton( name = "Click me!", status = "danger", icon = "atom", size = "lg", toggle_modal = TRUE, modal_id = "modal1" ) }
/data/genthat_extracted_code/argonR/examples/argonButton.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
306
r
library(argonR) ### Name: argonButton ### Title: Create a Boostrap 4 argon button ### Aliases: argonButton ### ** Examples if(interactive()){ library(argonR) argonButton( name = "Click me!", status = "danger", icon = "atom", size = "lg", toggle_modal = TRUE, modal_id = "modal1" ) }
#' Reads xml data from lattes zip file #' #' @param zip.in A (single) name of zip file containing a xml file #' #' @return A list with the following items: #' \describe{ #' \item{tpesq}{A dataframe with information about researchers} #' \item{tpublic}{A dataframe with information about publications} #' } #' @export #' #' @examples #' #' f.in <- system.file('extdata/3262699324398819.zip', package = 'GetLattesData') #' my.l <- gld_read_zip(f.in) #' my.l gld_read_zip <- function(zip.in){ # error checking if (length(zip.in)>1) { stop('Function gld_read_zip only reads one zip file at a time..') } if (!file.exists(zip.in)) { stop('File ', zip.in, ' does not exists..') } cat('\nReading ', basename(zip.in)) # set temp dir for unzipping files my.tempdir <- tempdir() utils::unzip(zip.in, exdir = my.tempdir) # start reading files using XML my.l <- XML::xmlToList(XML::xmlParse(file.path(my.tempdir, 'curriculo.xml'), encoding = 'ISO-8859-1') ) # Do RESEARCHERS LATTES.LOG <- do.call(c,list(my.l$.attrs)) if (!is.list(my.l$`DADOS-GERAIS`$`FORMACAO-ACADEMICA-TITULACAO`$DOUTORADO)) { DOUTORADO <- do.call(c,list(my.l$`DADOS-GERAIS`$`FORMACAO-ACADEMICA-TITULACAO`$DOUTORADO)) } else { DOUTORADO <- do.call(c,list(my.l$`DADOS-GERAIS`$`FORMACAO-ACADEMICA-TITULACAO`$DOUTORADO$.attrs)) } DADOS.GERAIS <- do.call(c, list(my.l$`DADOS-GERAIS`$.attrs)) AREAS <- do.call(c, list(my.l$`DADOS-GERAIS`$`AREAS-DE-ATUACAO`)) if (is.null(DOUTORADO)) DOUTORADO <- c(NO.DOC=TRUE) GArea <- my.l$`DADOS-GERAIS`$`AREAS-DE-ATUACAO`[[1]][2] AArea <- my.l$`DADOS-GERAIS`$`AREAS-DE-ATUACAO`[[1]][3] # fix for GArea and AArea if (is.null(GArea)) GArea <- NA if (is.null(AArea)) AArea <- NA data.tpesq <- cbind(data.frame(t(LATTES.LOG)), data.frame(t(DOUTORADO)), data.frame(t(DADOS.GERAIS)), data.frame(GArea = GArea), data.frame(AArea= AArea)) # all cols cols.to.keep <- c("NOME.COMPLETO" ,"NUMERO.IDENTIFICADOR","DATA.ATUALIZACAO","CODIGO.INSTITUICAO","NOME.INSTITUICAO" , "ANO.DE.INICIO","ANO.DE.CONCLUSAO","FLAG.BOLSA","NOME.COMPLETO.DO.ORIENTADOR" , "NUMERO.ID.ORIENTADOR", "CODIGO.INSTITUICAO.DOUT", "NOME.INSTITUICAO.DOUT", "PAIS.DE.NACIONALIDADE", "CODIGO.INSTITUICAO.SANDUICHE","PERMISSAO.DE.DIVULGACAO",'GArea','AArea') # those to keep cols.to.keep <- c("NOME.COMPLETO" ,"DATA.ATUALIZACAO", "NOME.INSTITUICAO" , "ANO.DE.INICIO","ANO.DE.CONCLUSAO", "PAIS.DE.NACIONALIDADE", 'GArea','AArea') # set cols to change name better.names <- c('name', 'last.update', 'phd.institution', 'phd.start.year', 'phd.end.year', 'country.origin', 'major.field', 'minor.field') idx <- cols.to.keep %in% names(data.tpesq) data.tpesq <- data.tpesq[, cols.to.keep[idx]] # fix names to eng names(data.tpesq) <- better.names[idx] # clean data data.tpesq$last.update <- as.Date(data.tpesq$last.update, '%d%m%Y') rownames(data.tpesq) <- NULL data.tpesq$id.file <- basename(zip.in) # fix issue with no PhD if (is.null(data.tpesq$phd.institution)){ data.tpesq$phd.institution <- NA data.tpesq$phd.start.year <- NA data.tpesq$phd.end.year <- NA } # PUBLISHED PAPERS my.name <- as.character(data.tpesq$name) Encoding(my.name) <- 'UTF-8' cat(' - ', my.name) published.papers <- my.l$`PRODUCAO-BIBLIOGRAFICA`$`ARTIGOS-PUBLICADOS` data.tpublic.published <- gld.get.papers.info(published.papers, name.author = data.tpesq$name, id.author = basename(zip.in)) cat(paste0('\n\tFound ',nrow(data.tpublic.published), ' published papers')) # ACCEPTED PAPERS accpt.papers <- my.l$`PRODUCAO-BIBLIOGRAFICA`$`ARTIGOS-ACEITOS-PARA-PUBLICACAO` data.tpublic.accepted <- gld.get.papers.info(accpt.papers, name.author = data.tpesq$name, id.author = basename(zip.in)) cat(paste0('\n\tFound ', nrow(data.tpublic.accepted), ' accepted paper(s)')) # SUPERVISIONS ORIENTACOES <- my.l$`OUTRA-PRODUCAO`$`ORIENTACOES-CONCLUIDAS` ORIENTACOES.active <- my.l$`DADOS-COMPLEMENTARES`$`ORIENTACOES-EM-ANDAMENTO` cat(paste0('\n\tFound ', length(ORIENTACOES), ' supervisions')) data.supervisions <- data.frame() if (!is.null(ORIENTACOES)) { for (i.orient in ORIENTACOES) { i.orient[[1]] i.orient[[2]] course <- i.orient[[1]]['NATUREZA'] type.course <- i.orient[[1]]['TIPO'] std.name <- i.orient[[2]]['NOME-DO-ORIENTADO'] year.supervision <- as.numeric(i.orient[[1]]['ANO']) temp.df <- data.frame(id.file = basename(zip.in), name = data.tpesq$name, situation = 'CONCLUIDA', type.course, course, std.name, year.supervision) rownames(temp.df) <- NULL data.supervisions <- rbind(data.supervisions, temp.df) } } data.supervisions.active <- data.frame() if (!is.null(ORIENTACOES.active)) { for (i.orient in ORIENTACOES.active) { course <- i.orient[[1]]['NATUREZA'] type.course <- i.orient[[1]]['TIPO'] std.name <- i.orient[[2]]['NOME-DO-ORIENTANDO'] year.supervision <- as.numeric(i.orient[[1]]['ANO']) temp.df <- data.frame(id.file = basename(zip.in), name = data.tpesq$name, situation = 'EM ANDAMENTO', type.course, course, std.name, year.supervision) rownames(temp.df) <- NULL data.supervisions.active <- rbind(data.supervisions.active, temp.df) } data.supervisions <- rbind(data.supervisions, data.supervisions.active) } # books LIVROS.PUBLICADOS <- my.l$`PRODUCAO-BIBLIOGRAFICA`$`LIVROS-E-CAPITULOS`$`LIVROS-PUBLICADOS-OU-ORGANIZADOS` cat(paste0('\n\tFound ',length(LIVROS.PUBLICADOS), ' published books')) data.books.published <- data.frame() if (!is.null(LIVROS.PUBLICADOS)) { for (i.book in LIVROS.PUBLICADOS) { temp.df <- data.frame(id.file = basename(zip.in), name = data.tpesq$name, book.title = i.book$`DADOS-BASICOS-DO-LIVRO`['TITULO-DO-LIVRO'], book.year = i.book$`DADOS-BASICOS-DO-LIVRO`['ANO'], book.type = i.book$`DADOS-BASICOS-DO-LIVRO`['TIPO'], book.lan = i.book$`DADOS-BASICOS-DO-LIVRO`['IDIOMA'], book.issn = i.book$`DETALHAMENTO-DO-LIVRO`['ISBN'], book.npages = i.book$`DETALHAMENTO-DO-LIVRO`['NUMERO-DE-PAGINAS'], book.edition = i.book$`DETALHAMENTO-DO-LIVRO`['NUMERO-DA-EDICAO-REVISAO'], book.editor = i.book$`DETALHAMENTO-DO-LIVRO`['NOME-DA-EDITORA'], stringsAsFactors = F) rownames(temp.df) <- NULL data.books.published <- rbind(data.books.published, temp.df) } } # books chapters LIVROS.CAPITULOS <- my.l$`PRODUCAO-BIBLIOGRAFICA`$`LIVROS-E-CAPITULOS`$`CAPITULOS-DE-LIVROS-PUBLICADOS` cat(paste0('\n\tFound ',length(LIVROS.CAPITULOS), ' book chapters')) data.books.chapters <- data.frame() if (!is.null(LIVROS.CAPITULOS)) { for (i.book in LIVROS.CAPITULOS) { temp.df <- data.frame(id.file = basename(zip.in), name = data.tpesq$name, book.title = i.book$`DETALHAMENTO-DO-CAPITULO`['TITULO-DO-LIVRO'], book.chapter = i.book$`DADOS-BASICOS-DO-CAPITULO`['TITULO-DO-CAPITULO-DO-LIVRO'], book.year = i.book$`DADOS-BASICOS-DO-CAPITULO`['ANO'], book.type = i.book$`DADOS-BASICOS-DO-CAPITULO`['TIPO'], book.lan = i.book$`DADOS-BASICOS-DO-CAPITULO`['IDIOMA'], book.issn = i.book$`DETALHAMENTO-DO-CAPITULO`['ISBN'], book.edition = i.book$`DETALHAMENTO-DO-CAPITULO`['NUMERO-DA-EDICAO-REVISAO'], book.editor = i.book$`DETALHAMENTO-DO-CAPITULO`['NOME-DA-EDITORA'], stringsAsFactors = F) rownames(temp.df) <- NULL data.books.chapters <- rbind(data.books.chapters, temp.df) } } data.books <- dplyr::bind_rows(data.books.published, data.books.chapters) # conferences CONFERENCES <- my.l$`PRODUCAO-BIBLIOGRAFICA`$`TRABALHOS-EM-EVENTOS` cat(paste0('\n\tFound ',length(CONFERENCES), ' conference papers')) data.conferences <- data.frame() if (!is.null(CONFERENCES)) { for (i.conf in CONFERENCES) { temp.df <- data.frame(id.file = basename(zip.in), name = data.tpesq$name, article.title = i.conf$`DADOS-BASICOS-DO-TRABALHO`['TITULO-DO-TRABALHO'], article.year = i.conf$`DADOS-BASICOS-DO-TRABALHO`['ANO-DO-TRABALHO'], event.classification = i.conf$`DETALHAMENTO-DO-TRABALHO`['CLASSIFICACAO-DO-EVENTO'], event.name = i.conf$`DETALHAMENTO-DO-TRABALHO`['NOME-DO-EVENTO'], event.isbn = i.conf$`DETALHAMENTO-DO-TRABALHO`['ISBN'], event.city = i.conf$`DETALHAMENTO-DO-TRABALHO`['CIDADE-DO-EVENTO'], stringsAsFactors = F) rownames(temp.df) <- NULL data.conferences <- rbind(data.conferences, temp.df) } } # output my.l <- list(tpesq = data.tpesq, tpublic.published = data.tpublic.published, tpublic.accepted = data.tpublic.accepted, tsupervisions = data.supervisions, tbooks = data.books, tconferences = data.conferences) return(my.l) }
/R/gld_read_lattes_zip.R
no_license
pcbrom/GetLattesData
R
false
false
10,061
r
#' Reads xml data from lattes zip file #' #' @param zip.in A (single) name of zip file containing a xml file #' #' @return A list with the following items: #' \describe{ #' \item{tpesq}{A dataframe with information about researchers} #' \item{tpublic}{A dataframe with information about publications} #' } #' @export #' #' @examples #' #' f.in <- system.file('extdata/3262699324398819.zip', package = 'GetLattesData') #' my.l <- gld_read_zip(f.in) #' my.l gld_read_zip <- function(zip.in){ # error checking if (length(zip.in)>1) { stop('Function gld_read_zip only reads one zip file at a time..') } if (!file.exists(zip.in)) { stop('File ', zip.in, ' does not exists..') } cat('\nReading ', basename(zip.in)) # set temp dir for unzipping files my.tempdir <- tempdir() utils::unzip(zip.in, exdir = my.tempdir) # start reading files using XML my.l <- XML::xmlToList(XML::xmlParse(file.path(my.tempdir, 'curriculo.xml'), encoding = 'ISO-8859-1') ) # Do RESEARCHERS LATTES.LOG <- do.call(c,list(my.l$.attrs)) if (!is.list(my.l$`DADOS-GERAIS`$`FORMACAO-ACADEMICA-TITULACAO`$DOUTORADO)) { DOUTORADO <- do.call(c,list(my.l$`DADOS-GERAIS`$`FORMACAO-ACADEMICA-TITULACAO`$DOUTORADO)) } else { DOUTORADO <- do.call(c,list(my.l$`DADOS-GERAIS`$`FORMACAO-ACADEMICA-TITULACAO`$DOUTORADO$.attrs)) } DADOS.GERAIS <- do.call(c, list(my.l$`DADOS-GERAIS`$.attrs)) AREAS <- do.call(c, list(my.l$`DADOS-GERAIS`$`AREAS-DE-ATUACAO`)) if (is.null(DOUTORADO)) DOUTORADO <- c(NO.DOC=TRUE) GArea <- my.l$`DADOS-GERAIS`$`AREAS-DE-ATUACAO`[[1]][2] AArea <- my.l$`DADOS-GERAIS`$`AREAS-DE-ATUACAO`[[1]][3] # fix for GArea and AArea if (is.null(GArea)) GArea <- NA if (is.null(AArea)) AArea <- NA data.tpesq <- cbind(data.frame(t(LATTES.LOG)), data.frame(t(DOUTORADO)), data.frame(t(DADOS.GERAIS)), data.frame(GArea = GArea), data.frame(AArea= AArea)) # all cols cols.to.keep <- c("NOME.COMPLETO" ,"NUMERO.IDENTIFICADOR","DATA.ATUALIZACAO","CODIGO.INSTITUICAO","NOME.INSTITUICAO" , "ANO.DE.INICIO","ANO.DE.CONCLUSAO","FLAG.BOLSA","NOME.COMPLETO.DO.ORIENTADOR" , "NUMERO.ID.ORIENTADOR", "CODIGO.INSTITUICAO.DOUT", "NOME.INSTITUICAO.DOUT", "PAIS.DE.NACIONALIDADE", "CODIGO.INSTITUICAO.SANDUICHE","PERMISSAO.DE.DIVULGACAO",'GArea','AArea') # those to keep cols.to.keep <- c("NOME.COMPLETO" ,"DATA.ATUALIZACAO", "NOME.INSTITUICAO" , "ANO.DE.INICIO","ANO.DE.CONCLUSAO", "PAIS.DE.NACIONALIDADE", 'GArea','AArea') # set cols to change name better.names <- c('name', 'last.update', 'phd.institution', 'phd.start.year', 'phd.end.year', 'country.origin', 'major.field', 'minor.field') idx <- cols.to.keep %in% names(data.tpesq) data.tpesq <- data.tpesq[, cols.to.keep[idx]] # fix names to eng names(data.tpesq) <- better.names[idx] # clean data data.tpesq$last.update <- as.Date(data.tpesq$last.update, '%d%m%Y') rownames(data.tpesq) <- NULL data.tpesq$id.file <- basename(zip.in) # fix issue with no PhD if (is.null(data.tpesq$phd.institution)){ data.tpesq$phd.institution <- NA data.tpesq$phd.start.year <- NA data.tpesq$phd.end.year <- NA } # PUBLISHED PAPERS my.name <- as.character(data.tpesq$name) Encoding(my.name) <- 'UTF-8' cat(' - ', my.name) published.papers <- my.l$`PRODUCAO-BIBLIOGRAFICA`$`ARTIGOS-PUBLICADOS` data.tpublic.published <- gld.get.papers.info(published.papers, name.author = data.tpesq$name, id.author = basename(zip.in)) cat(paste0('\n\tFound ',nrow(data.tpublic.published), ' published papers')) # ACCEPTED PAPERS accpt.papers <- my.l$`PRODUCAO-BIBLIOGRAFICA`$`ARTIGOS-ACEITOS-PARA-PUBLICACAO` data.tpublic.accepted <- gld.get.papers.info(accpt.papers, name.author = data.tpesq$name, id.author = basename(zip.in)) cat(paste0('\n\tFound ', nrow(data.tpublic.accepted), ' accepted paper(s)')) # SUPERVISIONS ORIENTACOES <- my.l$`OUTRA-PRODUCAO`$`ORIENTACOES-CONCLUIDAS` ORIENTACOES.active <- my.l$`DADOS-COMPLEMENTARES`$`ORIENTACOES-EM-ANDAMENTO` cat(paste0('\n\tFound ', length(ORIENTACOES), ' supervisions')) data.supervisions <- data.frame() if (!is.null(ORIENTACOES)) { for (i.orient in ORIENTACOES) { i.orient[[1]] i.orient[[2]] course <- i.orient[[1]]['NATUREZA'] type.course <- i.orient[[1]]['TIPO'] std.name <- i.orient[[2]]['NOME-DO-ORIENTADO'] year.supervision <- as.numeric(i.orient[[1]]['ANO']) temp.df <- data.frame(id.file = basename(zip.in), name = data.tpesq$name, situation = 'CONCLUIDA', type.course, course, std.name, year.supervision) rownames(temp.df) <- NULL data.supervisions <- rbind(data.supervisions, temp.df) } } data.supervisions.active <- data.frame() if (!is.null(ORIENTACOES.active)) { for (i.orient in ORIENTACOES.active) { course <- i.orient[[1]]['NATUREZA'] type.course <- i.orient[[1]]['TIPO'] std.name <- i.orient[[2]]['NOME-DO-ORIENTANDO'] year.supervision <- as.numeric(i.orient[[1]]['ANO']) temp.df <- data.frame(id.file = basename(zip.in), name = data.tpesq$name, situation = 'EM ANDAMENTO', type.course, course, std.name, year.supervision) rownames(temp.df) <- NULL data.supervisions.active <- rbind(data.supervisions.active, temp.df) } data.supervisions <- rbind(data.supervisions, data.supervisions.active) } # books LIVROS.PUBLICADOS <- my.l$`PRODUCAO-BIBLIOGRAFICA`$`LIVROS-E-CAPITULOS`$`LIVROS-PUBLICADOS-OU-ORGANIZADOS` cat(paste0('\n\tFound ',length(LIVROS.PUBLICADOS), ' published books')) data.books.published <- data.frame() if (!is.null(LIVROS.PUBLICADOS)) { for (i.book in LIVROS.PUBLICADOS) { temp.df <- data.frame(id.file = basename(zip.in), name = data.tpesq$name, book.title = i.book$`DADOS-BASICOS-DO-LIVRO`['TITULO-DO-LIVRO'], book.year = i.book$`DADOS-BASICOS-DO-LIVRO`['ANO'], book.type = i.book$`DADOS-BASICOS-DO-LIVRO`['TIPO'], book.lan = i.book$`DADOS-BASICOS-DO-LIVRO`['IDIOMA'], book.issn = i.book$`DETALHAMENTO-DO-LIVRO`['ISBN'], book.npages = i.book$`DETALHAMENTO-DO-LIVRO`['NUMERO-DE-PAGINAS'], book.edition = i.book$`DETALHAMENTO-DO-LIVRO`['NUMERO-DA-EDICAO-REVISAO'], book.editor = i.book$`DETALHAMENTO-DO-LIVRO`['NOME-DA-EDITORA'], stringsAsFactors = F) rownames(temp.df) <- NULL data.books.published <- rbind(data.books.published, temp.df) } } # books chapters LIVROS.CAPITULOS <- my.l$`PRODUCAO-BIBLIOGRAFICA`$`LIVROS-E-CAPITULOS`$`CAPITULOS-DE-LIVROS-PUBLICADOS` cat(paste0('\n\tFound ',length(LIVROS.CAPITULOS), ' book chapters')) data.books.chapters <- data.frame() if (!is.null(LIVROS.CAPITULOS)) { for (i.book in LIVROS.CAPITULOS) { temp.df <- data.frame(id.file = basename(zip.in), name = data.tpesq$name, book.title = i.book$`DETALHAMENTO-DO-CAPITULO`['TITULO-DO-LIVRO'], book.chapter = i.book$`DADOS-BASICOS-DO-CAPITULO`['TITULO-DO-CAPITULO-DO-LIVRO'], book.year = i.book$`DADOS-BASICOS-DO-CAPITULO`['ANO'], book.type = i.book$`DADOS-BASICOS-DO-CAPITULO`['TIPO'], book.lan = i.book$`DADOS-BASICOS-DO-CAPITULO`['IDIOMA'], book.issn = i.book$`DETALHAMENTO-DO-CAPITULO`['ISBN'], book.edition = i.book$`DETALHAMENTO-DO-CAPITULO`['NUMERO-DA-EDICAO-REVISAO'], book.editor = i.book$`DETALHAMENTO-DO-CAPITULO`['NOME-DA-EDITORA'], stringsAsFactors = F) rownames(temp.df) <- NULL data.books.chapters <- rbind(data.books.chapters, temp.df) } } data.books <- dplyr::bind_rows(data.books.published, data.books.chapters) # conferences CONFERENCES <- my.l$`PRODUCAO-BIBLIOGRAFICA`$`TRABALHOS-EM-EVENTOS` cat(paste0('\n\tFound ',length(CONFERENCES), ' conference papers')) data.conferences <- data.frame() if (!is.null(CONFERENCES)) { for (i.conf in CONFERENCES) { temp.df <- data.frame(id.file = basename(zip.in), name = data.tpesq$name, article.title = i.conf$`DADOS-BASICOS-DO-TRABALHO`['TITULO-DO-TRABALHO'], article.year = i.conf$`DADOS-BASICOS-DO-TRABALHO`['ANO-DO-TRABALHO'], event.classification = i.conf$`DETALHAMENTO-DO-TRABALHO`['CLASSIFICACAO-DO-EVENTO'], event.name = i.conf$`DETALHAMENTO-DO-TRABALHO`['NOME-DO-EVENTO'], event.isbn = i.conf$`DETALHAMENTO-DO-TRABALHO`['ISBN'], event.city = i.conf$`DETALHAMENTO-DO-TRABALHO`['CIDADE-DO-EVENTO'], stringsAsFactors = F) rownames(temp.df) <- NULL data.conferences <- rbind(data.conferences, temp.df) } } # output my.l <- list(tpesq = data.tpesq, tpublic.published = data.tpublic.published, tpublic.accepted = data.tpublic.accepted, tsupervisions = data.supervisions, tbooks = data.books, tconferences = data.conferences) return(my.l) }
# Scheffe Post Hoc - Chapter 11 chap11 <- function() { # Enter MS error and degrees of freedom from ANOVA table # Enter sales data, group vectors, j = 3 groups and save as data frame mserror <- 22.22 dfnum <- 2 dferror <- 9 sales <- c(30,30,40,40,25,20,25,30,15,20,25,20) store <- c(1,1,1,1,2,2,2,2,3,3,3,3) j <- 3 data <- data.frame(sales,store) # Group Summary Statistics store <- factor(store) grpmean <- tapply(sales,store,mean) grpsd <- round(tapply(sales,store,sd),digits=2) N <- tapply(sales,store,length) # One Way Analysis of Variance out <-summary(aov(sales~store)) # Tabled F for Scheffe F comparison tableF <- round(qf(.05,dfnum,dferror,lower.tail=F),digits=2) # Contrast Store 1 vs. Store 2 contrast1 <- (grpmean[1]-grpmean[2]) contrast1 <- contrast1 * contrast1 gpn <- 1/N[1] + 1/N[2] scheffe1 <- contrast1 / mserror*gpn*(j-1) scheffe1 <- round(scheffe1,digits=2) # Contrast Store 1 vs. Store 3 contrast2 <- (grpmean[1]-grpmean[3]) contrast2 <- contrast2 * contrast2 gpn <- 1/N[1] + 1/N[3] scheffe2 <- contrast2 / mserror*gpn*(j-1) scheffe2 <- round(scheffe2,digits=2) # Contrast Store 2 vs. Store 3 contrast3 <- (grpmean[2]-grpmean[3]) contrast3 <- contrast3 * contrast3 gpn <- 1/N[2] + 1/N[3] scheffe3 <- contrast3 / mserror*gpn*(j-1) scheffe3 <- round(scheffe3,digits=2) print(data) cat("\n","Group Means =",grpmean,"\n","Group SD =",grpsd,"\n","Sample Size =",N,"\n") cat("\n","One-way Analysis of Variance Summary Table","\n") cat("\n") print(out) cat("\n","Critical F for Scheffe Comparision =",tableF,"\n") cat("\n","Scheffe Post Hoc Comparisons","\n") cat("\n","Store 1 vs. Store 2 =",scheffe1) cat("\n","Store 1 vs. Store 3 =",scheffe2) cat("\n","Store 2 vs. Store 3 =",scheffe3,"\n") } chap11 ()
/understanding_stats_using_r/Chap11b_Scheffe.R
no_license
alexmerk/courses
R
false
false
1,746
r
# Scheffe Post Hoc - Chapter 11 chap11 <- function() { # Enter MS error and degrees of freedom from ANOVA table # Enter sales data, group vectors, j = 3 groups and save as data frame mserror <- 22.22 dfnum <- 2 dferror <- 9 sales <- c(30,30,40,40,25,20,25,30,15,20,25,20) store <- c(1,1,1,1,2,2,2,2,3,3,3,3) j <- 3 data <- data.frame(sales,store) # Group Summary Statistics store <- factor(store) grpmean <- tapply(sales,store,mean) grpsd <- round(tapply(sales,store,sd),digits=2) N <- tapply(sales,store,length) # One Way Analysis of Variance out <-summary(aov(sales~store)) # Tabled F for Scheffe F comparison tableF <- round(qf(.05,dfnum,dferror,lower.tail=F),digits=2) # Contrast Store 1 vs. Store 2 contrast1 <- (grpmean[1]-grpmean[2]) contrast1 <- contrast1 * contrast1 gpn <- 1/N[1] + 1/N[2] scheffe1 <- contrast1 / mserror*gpn*(j-1) scheffe1 <- round(scheffe1,digits=2) # Contrast Store 1 vs. Store 3 contrast2 <- (grpmean[1]-grpmean[3]) contrast2 <- contrast2 * contrast2 gpn <- 1/N[1] + 1/N[3] scheffe2 <- contrast2 / mserror*gpn*(j-1) scheffe2 <- round(scheffe2,digits=2) # Contrast Store 2 vs. Store 3 contrast3 <- (grpmean[2]-grpmean[3]) contrast3 <- contrast3 * contrast3 gpn <- 1/N[2] + 1/N[3] scheffe3 <- contrast3 / mserror*gpn*(j-1) scheffe3 <- round(scheffe3,digits=2) print(data) cat("\n","Group Means =",grpmean,"\n","Group SD =",grpsd,"\n","Sample Size =",N,"\n") cat("\n","One-way Analysis of Variance Summary Table","\n") cat("\n") print(out) cat("\n","Critical F for Scheffe Comparision =",tableF,"\n") cat("\n","Scheffe Post Hoc Comparisons","\n") cat("\n","Store 1 vs. Store 2 =",scheffe1) cat("\n","Store 1 vs. Store 3 =",scheffe2) cat("\n","Store 2 vs. Store 3 =",scheffe3,"\n") } chap11 ()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/isPositiveIntegerOrNaOrNanOrInfScalarOrNull.R \name{isPositiveIntegerOrNaOrNanOrInfScalarOrNull} \alias{isPositiveIntegerOrNaOrNanOrInfScalarOrNull} \title{Wrapper for the checkarg function, using specific parameter settings.} \usage{ isPositiveIntegerOrNaOrNanOrInfScalarOrNull(argument, default = NULL, stopIfNot = FALSE, message = NULL, argumentName = NULL) } \arguments{ \item{argument}{See checkarg function.} \item{default}{See checkarg function.} \item{stopIfNot}{See checkarg function.} \item{message}{See checkarg function.} \item{argumentName}{See checkarg function.} } \value{ See checkarg function. } \description{ This function can be used in 3 ways:\enumerate{ \item Return TRUE or FALSE depending on whether the argument checks are passed. This is suitable e.g. for if statements that take further action if the argument does not pass the checks.\cr \item Throw an exception if the argument does not pass the checks. This is suitable e.g. when no further action needs to be taken other than throwing an exception if the argument does not pass the checks.\cr \item Same as (2) but by supplying a default value, a default can be assigned in a single statement, when the argument is NULL. The checks are still performed on the returned value, and an exception is thrown when not passed.\cr } } \details{ Actual call to checkarg: checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = TRUE, n = 1, zeroAllowed = TRUE, negativeAllowed = FALSE, positiveAllowed = TRUE, nonIntegerAllowed = FALSE, naAllowed = TRUE, nanAllowed = TRUE, infAllowed = TRUE, message = message, argumentName = argumentName) } \examples{ isPositiveIntegerOrNaOrNanOrInfScalarOrNull(2) # returns TRUE (argument is valid) isPositiveIntegerOrNaOrNanOrInfScalarOrNull("X") # returns FALSE (argument is invalid) #isPositiveIntegerOrNaOrNanOrInfScalarOrNull("X", stopIfNot = TRUE) # throws exception with message defined by message and argumentName parameters isPositiveIntegerOrNaOrNanOrInfScalarOrNull(2, default = 1) # returns 2 (the argument, rather than the default, since it is not NULL) #isPositiveIntegerOrNaOrNanOrInfScalarOrNull("X", default = 1) # throws exception with message defined by message and argumentName parameters isPositiveIntegerOrNaOrNanOrInfScalarOrNull(NULL, default = 1) # returns 1 (the default, rather than the argument, since it is NULL) }
/man/isPositiveIntegerOrNaOrNanOrInfScalarOrNull.Rd
no_license
cran/checkarg
R
false
true
2,573
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/isPositiveIntegerOrNaOrNanOrInfScalarOrNull.R \name{isPositiveIntegerOrNaOrNanOrInfScalarOrNull} \alias{isPositiveIntegerOrNaOrNanOrInfScalarOrNull} \title{Wrapper for the checkarg function, using specific parameter settings.} \usage{ isPositiveIntegerOrNaOrNanOrInfScalarOrNull(argument, default = NULL, stopIfNot = FALSE, message = NULL, argumentName = NULL) } \arguments{ \item{argument}{See checkarg function.} \item{default}{See checkarg function.} \item{stopIfNot}{See checkarg function.} \item{message}{See checkarg function.} \item{argumentName}{See checkarg function.} } \value{ See checkarg function. } \description{ This function can be used in 3 ways:\enumerate{ \item Return TRUE or FALSE depending on whether the argument checks are passed. This is suitable e.g. for if statements that take further action if the argument does not pass the checks.\cr \item Throw an exception if the argument does not pass the checks. This is suitable e.g. when no further action needs to be taken other than throwing an exception if the argument does not pass the checks.\cr \item Same as (2) but by supplying a default value, a default can be assigned in a single statement, when the argument is NULL. The checks are still performed on the returned value, and an exception is thrown when not passed.\cr } } \details{ Actual call to checkarg: checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = TRUE, n = 1, zeroAllowed = TRUE, negativeAllowed = FALSE, positiveAllowed = TRUE, nonIntegerAllowed = FALSE, naAllowed = TRUE, nanAllowed = TRUE, infAllowed = TRUE, message = message, argumentName = argumentName) } \examples{ isPositiveIntegerOrNaOrNanOrInfScalarOrNull(2) # returns TRUE (argument is valid) isPositiveIntegerOrNaOrNanOrInfScalarOrNull("X") # returns FALSE (argument is invalid) #isPositiveIntegerOrNaOrNanOrInfScalarOrNull("X", stopIfNot = TRUE) # throws exception with message defined by message and argumentName parameters isPositiveIntegerOrNaOrNanOrInfScalarOrNull(2, default = 1) # returns 2 (the argument, rather than the default, since it is not NULL) #isPositiveIntegerOrNaOrNanOrInfScalarOrNull("X", default = 1) # throws exception with message defined by message and argumentName parameters isPositiveIntegerOrNaOrNanOrInfScalarOrNull(NULL, default = 1) # returns 1 (the default, rather than the argument, since it is NULL) }
build_geography_ruralUrban_pnad <- function(Data){ # Loading the crosswalk file_location <- system.file("extdata", "crosswalk_pnad_ruralUrban.csv", package = "harmonizePNAD") crosswalk <- data.table::fread(file_location, colClasses = "character") # Selecting the appropriate crosswalk for the current year metadata <- harmonizePNAD:::get_metadata(Data) crosswalk_i <- crosswalk[year == metadata$year] # Checking the variable availability if(metadata$year %in% c(1976, 1978, 1979)){ warning(paste0("For the 1976, 1978, and 1979 editions, the variable about rural/urban\n", "situation is not originally contained in the persons data file. You\n", "must import it from the household data file first.")) } harmonizePNAD:::check_necessary_vars(Data, crosswalk_i$var_ruralUrban) # Recoding Data[ , ruralUrban := as.numeric(NA)] expr_rural <- with(crosswalk_i, paste(var_ruralUrban,"%in% c(",value_rural, ")")) expr_urban <- with(crosswalk_i, paste(var_ruralUrban,"%in% c(",value_urban, ")")) Data[eval(parse(text = expr_rural)), ruralUrban := 0] Data[eval(parse(text = expr_urban)), ruralUrban := 1] gc() Data }
/R/build_geography_ruralUrban_pnad.R
no_license
arthurwelle/harmonizePNAD
R
false
false
1,416
r
build_geography_ruralUrban_pnad <- function(Data){ # Loading the crosswalk file_location <- system.file("extdata", "crosswalk_pnad_ruralUrban.csv", package = "harmonizePNAD") crosswalk <- data.table::fread(file_location, colClasses = "character") # Selecting the appropriate crosswalk for the current year metadata <- harmonizePNAD:::get_metadata(Data) crosswalk_i <- crosswalk[year == metadata$year] # Checking the variable availability if(metadata$year %in% c(1976, 1978, 1979)){ warning(paste0("For the 1976, 1978, and 1979 editions, the variable about rural/urban\n", "situation is not originally contained in the persons data file. You\n", "must import it from the household data file first.")) } harmonizePNAD:::check_necessary_vars(Data, crosswalk_i$var_ruralUrban) # Recoding Data[ , ruralUrban := as.numeric(NA)] expr_rural <- with(crosswalk_i, paste(var_ruralUrban,"%in% c(",value_rural, ")")) expr_urban <- with(crosswalk_i, paste(var_ruralUrban,"%in% c(",value_urban, ")")) Data[eval(parse(text = expr_rural)), ruralUrban := 0] Data[eval(parse(text = expr_urban)), ruralUrban := 1] gc() Data }
library(class) # Prediction of breast cancer diagnosis using the WBCD dataset. wbcd <- read.csv('data/wisc_bc_data.csv', stringsAsFactors=FALSE) # Remove the ID attribute wbcd = wbcd[-1] table(wbcd$diagnosis) # Recode the diagnosis as a factor wbcd$diagnosis <- factor(wbcd$diagnosis, levels=c('M', 'B'), labels=c('Malignant', 'Benign')) # The max-min normalization normalize <- function(x) { return ((x - min(x)) / (max(x) - min(x))) } # We need to normalize the 30 numeric columns of the dataframe. # lapply takes a list and a function, and returns a list of the function # applied to each of the elements in the input list. Then we convert this # output list to a dataframe. wbcd_n <- as.data.frame(lapply(wbcd[-1], normalize)) # Split the data into 469 training examples and 100 test examples. wbcd_train <- wbcd_n[1:469,] wbcd_test <- wbcd_n[470:569,] # Store the target labels in the same division and order. wbcd_train_labels <- wbcd[1:469, 1] wbcd_test_labels <- wbcd[470:569, 1] # --------------------------------------- # Use k = int(sqrt(469)) = 21 wbcd_test_pred <- knn(train=wbcd_train, test=wbcd_test, cl=wbcd_train_labels, k=21) # --------------------------------------- # See the performance CrossTable(x=wbcd_test_labels, y=wbcd_test_pred, prop.chisq=FALSE) # Select those elements from predicted which correspond to actual == 1 # Check how many of these selected elements equal 1. true_pos <- function(actual, predicted) sum(predicted[actual == 1] == 1) # Select those elements from predicted which correspond to actual == 1 # Check how many of these selected elements equal 0. false_neg <- function(actual, predicted) sum(predicted[actual == 1] == 0) # Select those elements from predicted which correspond to actual == 0 # Check how many of these selected elements equal 1. false_pos <- function(actual, predicted) sum(predicted[actual == 0] == 1) # Recall is the fraction of Malignant cases that we successfully classified # as such. # recall = TP / (TP + FN) recall <- function(actual, predicted) { tp <- true_pos(actual, predicted) fn <- false_neg(actual, predicted) return (tp / (tp + fn)) } precision <- function(actual, predicted) { tp <- true_pos(actual, predicted) fp <- false_pos(actual, predicted) return (tp / (tp + fp)) } f_score <- function(actual, predicted) { p <- precision(actual, predicted) r <- recall(actual, predicted) return (2 * p * r / (p + r)) } # See how our model performed! act <- wbcd_test_labels == 'Malignant' pred <- wbcd_test_pred == 'Malignant' f_score(act, pred) # A whopping 97% !! # --- Improvement # Since we see that the errors our model makes are dangerous false negatives( # telling someone they don't have cancer when they in reality do), # we need to improve. # Use z-score standardization instead of max-min normalization. wbcd_z <- as.data.frame(scale(wbcd[-1])) # Omit the diagnosis column. wbcd_train_z <- wbcd_z[1:469,] wbcd_test_z <- wbcd_z[470:569,] # Predict using k = 21 now wbcd_test_pred_z <- knn(train=wbcd_train_z, test=wbcd_test_z, cl=wbcd_train_labels, k=21) CrossTable(x=wbcd_test_labels, y=wbcd_test_pred_z, prop.chisq=FALSE) # See how our model performed! act <- wbcd_test_labels == 'Malignant' pred <- wbcd_test_pred_z == 'Malignant'
/kNN/wbcd.r
no_license
niclupfer/mlr
R
false
false
3,363
r
library(class) # Prediction of breast cancer diagnosis using the WBCD dataset. wbcd <- read.csv('data/wisc_bc_data.csv', stringsAsFactors=FALSE) # Remove the ID attribute wbcd = wbcd[-1] table(wbcd$diagnosis) # Recode the diagnosis as a factor wbcd$diagnosis <- factor(wbcd$diagnosis, levels=c('M', 'B'), labels=c('Malignant', 'Benign')) # The max-min normalization normalize <- function(x) { return ((x - min(x)) / (max(x) - min(x))) } # We need to normalize the 30 numeric columns of the dataframe. # lapply takes a list and a function, and returns a list of the function # applied to each of the elements in the input list. Then we convert this # output list to a dataframe. wbcd_n <- as.data.frame(lapply(wbcd[-1], normalize)) # Split the data into 469 training examples and 100 test examples. wbcd_train <- wbcd_n[1:469,] wbcd_test <- wbcd_n[470:569,] # Store the target labels in the same division and order. wbcd_train_labels <- wbcd[1:469, 1] wbcd_test_labels <- wbcd[470:569, 1] # --------------------------------------- # Use k = int(sqrt(469)) = 21 wbcd_test_pred <- knn(train=wbcd_train, test=wbcd_test, cl=wbcd_train_labels, k=21) # --------------------------------------- # See the performance CrossTable(x=wbcd_test_labels, y=wbcd_test_pred, prop.chisq=FALSE) # Select those elements from predicted which correspond to actual == 1 # Check how many of these selected elements equal 1. true_pos <- function(actual, predicted) sum(predicted[actual == 1] == 1) # Select those elements from predicted which correspond to actual == 1 # Check how many of these selected elements equal 0. false_neg <- function(actual, predicted) sum(predicted[actual == 1] == 0) # Select those elements from predicted which correspond to actual == 0 # Check how many of these selected elements equal 1. false_pos <- function(actual, predicted) sum(predicted[actual == 0] == 1) # Recall is the fraction of Malignant cases that we successfully classified # as such. # recall = TP / (TP + FN) recall <- function(actual, predicted) { tp <- true_pos(actual, predicted) fn <- false_neg(actual, predicted) return (tp / (tp + fn)) } precision <- function(actual, predicted) { tp <- true_pos(actual, predicted) fp <- false_pos(actual, predicted) return (tp / (tp + fp)) } f_score <- function(actual, predicted) { p <- precision(actual, predicted) r <- recall(actual, predicted) return (2 * p * r / (p + r)) } # See how our model performed! act <- wbcd_test_labels == 'Malignant' pred <- wbcd_test_pred == 'Malignant' f_score(act, pred) # A whopping 97% !! # --- Improvement # Since we see that the errors our model makes are dangerous false negatives( # telling someone they don't have cancer when they in reality do), # we need to improve. # Use z-score standardization instead of max-min normalization. wbcd_z <- as.data.frame(scale(wbcd[-1])) # Omit the diagnosis column. wbcd_train_z <- wbcd_z[1:469,] wbcd_test_z <- wbcd_z[470:569,] # Predict using k = 21 now wbcd_test_pred_z <- knn(train=wbcd_train_z, test=wbcd_test_z, cl=wbcd_train_labels, k=21) CrossTable(x=wbcd_test_labels, y=wbcd_test_pred_z, prop.chisq=FALSE) # See how our model performed! act <- wbcd_test_labels == 'Malignant' pred <- wbcd_test_pred_z == 'Malignant'
#' Default MMRDclassifier. #' #' MMRDclassifier trained on ~270 colorectal cancers. #' #' @format R object list of 30 #' "MMRDclassifier" #' MMRDetect classifier #' #' @param mutationVariable A list of input variables,"Del_rep_mean","RepIndel_num","MMR_sum","maxcossim" #' @param classifier provided classifier #' @return classification result #' @export MMRDetect.classify <- function(mutationVariable, classifier = MMRDclassifier) { classifyset = mutationVariable[,c("Del_rep_mean","RepIndel_num","MMR_sum","maxcossim")] classifyset$RepIndel_num <- classifyset$RepIndel_num/max(classifyset$RepIndel_num) classifyset$MMR_sum <- classifyset$MMR_sum/max(classifyset$MMR_sum) mutationVariable$glm_prob = stats::predict.glm(classifier, newdata=classifyset, type="response") return(mutationVariable) }
/R/MMRDetect.classify.R
no_license
Nik-Zainal-Group/MMRDetect
R
false
false
820
r
#' Default MMRDclassifier. #' #' MMRDclassifier trained on ~270 colorectal cancers. #' #' @format R object list of 30 #' "MMRDclassifier" #' MMRDetect classifier #' #' @param mutationVariable A list of input variables,"Del_rep_mean","RepIndel_num","MMR_sum","maxcossim" #' @param classifier provided classifier #' @return classification result #' @export MMRDetect.classify <- function(mutationVariable, classifier = MMRDclassifier) { classifyset = mutationVariable[,c("Del_rep_mean","RepIndel_num","MMR_sum","maxcossim")] classifyset$RepIndel_num <- classifyset$RepIndel_num/max(classifyset$RepIndel_num) classifyset$MMR_sum <- classifyset$MMR_sum/max(classifyset$MMR_sum) mutationVariable$glm_prob = stats::predict.glm(classifier, newdata=classifyset, type="response") return(mutationVariable) }
context('add_line_unity') data("example_data") test_that('basic',{ # perform moderated t-test stats_df <- calc_mod_ttest(example_data) sig_df <- id_significant_proteins(stats_df) # generate volcano plot with bait protein labeled basic_scatter <- plot_scatter_basic(sig_df) bait_scatter <- plot_overlay(basic_scatter,as.bait('BCL2')) # interactive volcano plot result = make_interactive(bait_scatter) %>% add_plotly_line_unity() expect_true(!is.null(result$x)) })
/tests/testthat/test.add_line_unity.R
permissive
lagelab/Genoppi
R
false
false
499
r
context('add_line_unity') data("example_data") test_that('basic',{ # perform moderated t-test stats_df <- calc_mod_ttest(example_data) sig_df <- id_significant_proteins(stats_df) # generate volcano plot with bait protein labeled basic_scatter <- plot_scatter_basic(sig_df) bait_scatter <- plot_overlay(basic_scatter,as.bait('BCL2')) # interactive volcano plot result = make_interactive(bait_scatter) %>% add_plotly_line_unity() expect_true(!is.null(result$x)) })
rankhospital <- function(state, outcome, num = "best") { ## Read outcome data filename <- "rprog-data-ProgAssignment3-data/outcome-of-care-measures.csv" data <- read.csv(filename, colClasses = "character") ## Check that state and outcome are valid state_mask <- data[,"State"] == state if (sum(state_mask) == 0) stop("invalid state") col_names <- colnames(data) outcome <- sub(" ", ".", outcome) outcome_matches <- grep(pattern=outcome, x=col_names, value=TRUE, ignore.case=TRUE) if (length(outcome_matches) == 0) stop("invalid outcome") ## Trim to just the hopital name and its rank for the specified outcome. state_data <- data[state_mask, c("Hospital.Name", outcome_matches[1])] colnames(state_data) <- c("name", "outcome") state_data[,"outcome"] <- as.numeric(state_data[,"outcome"]) state_data <- na.omit(state_data) ## Return hospital name in that state with the given rank ## 30-day death rate if (num == "best") num = 1 if (num == "worst") num = nrow(state_data) ordering <- with(state_data, order(outcome, name, na.last=NA)) pick <- state_data[ordering, ][num, "name"] pick }
/r-programming/PA3/rankhospital.R
no_license
snelsen/datasciencecoursera
R
false
false
1,177
r
rankhospital <- function(state, outcome, num = "best") { ## Read outcome data filename <- "rprog-data-ProgAssignment3-data/outcome-of-care-measures.csv" data <- read.csv(filename, colClasses = "character") ## Check that state and outcome are valid state_mask <- data[,"State"] == state if (sum(state_mask) == 0) stop("invalid state") col_names <- colnames(data) outcome <- sub(" ", ".", outcome) outcome_matches <- grep(pattern=outcome, x=col_names, value=TRUE, ignore.case=TRUE) if (length(outcome_matches) == 0) stop("invalid outcome") ## Trim to just the hopital name and its rank for the specified outcome. state_data <- data[state_mask, c("Hospital.Name", outcome_matches[1])] colnames(state_data) <- c("name", "outcome") state_data[,"outcome"] <- as.numeric(state_data[,"outcome"]) state_data <- na.omit(state_data) ## Return hospital name in that state with the given rank ## 30-day death rate if (num == "best") num = 1 if (num == "worst") num = nrow(state_data) ordering <- with(state_data, order(outcome, name, na.last=NA)) pick <- state_data[ordering, ][num, "name"] pick }
\name{psi.rks} \alias{psi.rks} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Random samples from Kolmogorov distribution } \description{ Generate random samples from Kolmogorov distribution. } \usage{ psi.rks(n, df, randgen = runif, cdf = punif, ...) } \arguments{ \item{n}{ number of observations. } \item{df}{ degrees of freedom of the Kolmogorov distribution } \item{randgen}{a function or a name of one that generates random numbers from the hypothesised distribution. } \item{cdf}{ the cdf of the hypothesised distribution. } \item{\dots}{ parameters to be passed down to \code{randgen} and \code{cdf} } } \details{ Instances of the \eqn{D_{df}} statistic are obtained by generating random samples of length \code{df} from the hypothesised distribution and computing \eqn{D_{df}} on them. In the case of a simple null hypothesis from a continuous distribution the distribution of the statistic does not depend on the underlying distribution. So, only the arguments \code{n} and \code{df} are really necessary for the Kolmogorov-Smirnov test. Imaginative settings for this argument may be used to generate samples in the case of composite hypotheses. } \value{ a numeric vector representing a sample from the distribution of the Kolmogorov statistic for the specified distribution. } % \references{ ~put references to the literature/web site here ~ } \author{Georgi N. Boshnakov} %\note{ ~~further notes~~} \seealso{ \code{\link{psi.pks}}% %, \code{\link{psi.rlks}}% %, etc. } \examples{ x1 <- psi.rks(1000,10) hist(x1) plot(ecdf(x1)) # simulation estimate of the dist of D_10 f1 <- function(x) psi.pks(x,10) curve(f1,0,1,col="blue",add=TRUE) # overlay the exact cdf } \keyword{ distribution }
/man/psi.rks.Rd
no_license
GeoBosh/psistat
R
false
false
1,796
rd
\name{psi.rks} \alias{psi.rks} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Random samples from Kolmogorov distribution } \description{ Generate random samples from Kolmogorov distribution. } \usage{ psi.rks(n, df, randgen = runif, cdf = punif, ...) } \arguments{ \item{n}{ number of observations. } \item{df}{ degrees of freedom of the Kolmogorov distribution } \item{randgen}{a function or a name of one that generates random numbers from the hypothesised distribution. } \item{cdf}{ the cdf of the hypothesised distribution. } \item{\dots}{ parameters to be passed down to \code{randgen} and \code{cdf} } } \details{ Instances of the \eqn{D_{df}} statistic are obtained by generating random samples of length \code{df} from the hypothesised distribution and computing \eqn{D_{df}} on them. In the case of a simple null hypothesis from a continuous distribution the distribution of the statistic does not depend on the underlying distribution. So, only the arguments \code{n} and \code{df} are really necessary for the Kolmogorov-Smirnov test. Imaginative settings for this argument may be used to generate samples in the case of composite hypotheses. } \value{ a numeric vector representing a sample from the distribution of the Kolmogorov statistic for the specified distribution. } % \references{ ~put references to the literature/web site here ~ } \author{Georgi N. Boshnakov} %\note{ ~~further notes~~} \seealso{ \code{\link{psi.pks}}% %, \code{\link{psi.rlks}}% %, etc. } \examples{ x1 <- psi.rks(1000,10) hist(x1) plot(ecdf(x1)) # simulation estimate of the dist of D_10 f1 <- function(x) psi.pks(x,10) curve(f1,0,1,col="blue",add=TRUE) # overlay the exact cdf } \keyword{ distribution }
# PCA #https://davetang.org/muse/2012/02/01/step-by-step-principal-components-analysis-using-r/ # use a simple two dimensional dataset to illustrate PCA x <- c(2.5, 0.5, 2.2, 1.9, 3.1, 2.3, 2, 1, 1.5, 1.1) y <- c(2.4, 0.7, 2.9, 2.2, 3.0, 2.7, 1.6, 1.1, 1.6, 0.9) plot(x, y, pch = 19) mean(x) mean(y) (x1 <- x - mean(x)) summary(x1) (y1 <- y - mean(y)) summary(y1) #Our standardised dataset visualised on the x-y coordinates. plot(x1, y1, pch = 19) cov(x1, y1) cov(x1, x1) cov(y1, y1) m <- matrix(c(cov(x1, x1), cov(x1, y1), cov(y1, x1),cov(y1, y1)), nrow=2, ncol=2, byrow=TRUE, dimnames=list(c("x","y"),c("x","y"))) m e = eigen(m) e pc1 = x1 * e$vectors[1,1] + y1 * e$vectors[2,1] pc1 pc2 = x1 * e$vectors[1,2] + y1 * e$vectors[2,2] pc2 data.frame(PC1 = pc1, PC2=pc2) plot(pc1,pc2, pch=19) (data = data.frame(x,y)) data.pca = prcomp(data) data.pca names(data.pca) data.pca$x plot(data.pca$x[,1], data.pca$x[,2], pch=19) eigen(m) data.pca x cc = matrix(1:24, ncol=4) cc PCAcc = princomp(cc, scores=T, cor=T) PCAcc$loadings PCAcc$scores apply(cc, 2, mean) cc t(cc) t(cc) -apply(cc, 2, mean) t(t(cc) -apply(cc, 2, mean)) rescaled = t ( t(cc) - apply(cc, 2, mean)) rescaled %*% PCAcc$loadings predict(PCAcc, newdata=cc) scale(cc, PCAcc$center, PCAcc$scale) %*% PCAcc$loadings
/pca/pca-eg5.R
no_license
dupadhyaya/dspgmsc2017
R
false
false
1,352
r
# PCA #https://davetang.org/muse/2012/02/01/step-by-step-principal-components-analysis-using-r/ # use a simple two dimensional dataset to illustrate PCA x <- c(2.5, 0.5, 2.2, 1.9, 3.1, 2.3, 2, 1, 1.5, 1.1) y <- c(2.4, 0.7, 2.9, 2.2, 3.0, 2.7, 1.6, 1.1, 1.6, 0.9) plot(x, y, pch = 19) mean(x) mean(y) (x1 <- x - mean(x)) summary(x1) (y1 <- y - mean(y)) summary(y1) #Our standardised dataset visualised on the x-y coordinates. plot(x1, y1, pch = 19) cov(x1, y1) cov(x1, x1) cov(y1, y1) m <- matrix(c(cov(x1, x1), cov(x1, y1), cov(y1, x1),cov(y1, y1)), nrow=2, ncol=2, byrow=TRUE, dimnames=list(c("x","y"),c("x","y"))) m e = eigen(m) e pc1 = x1 * e$vectors[1,1] + y1 * e$vectors[2,1] pc1 pc2 = x1 * e$vectors[1,2] + y1 * e$vectors[2,2] pc2 data.frame(PC1 = pc1, PC2=pc2) plot(pc1,pc2, pch=19) (data = data.frame(x,y)) data.pca = prcomp(data) data.pca names(data.pca) data.pca$x plot(data.pca$x[,1], data.pca$x[,2], pch=19) eigen(m) data.pca x cc = matrix(1:24, ncol=4) cc PCAcc = princomp(cc, scores=T, cor=T) PCAcc$loadings PCAcc$scores apply(cc, 2, mean) cc t(cc) t(cc) -apply(cc, 2, mean) t(t(cc) -apply(cc, 2, mean)) rescaled = t ( t(cc) - apply(cc, 2, mean)) rescaled %*% PCAcc$loadings predict(PCAcc, newdata=cc) scale(cc, PCAcc$center, PCAcc$scale) %*% PCAcc$loadings
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotingFunctions.R \name{precisionPlot} \alias{precisionPlot} \title{Precision plot} \usage{ precisionPlot(RES, byCond = FALSE, nullSet = c(-1, 1), avgCond = FALSE, ptm = FALSE) } \arguments{ \item{RES}{A results list generated from the function \code{\link{compBayes}}, which will be found in either the first (for protein summaries) or second (for ptm's) component of the result list object.} \item{byCond}{A boolean parameter which determines if separate plots should be made for each condition. The default is FALSE which results in the creation of a single plot.} \item{nullSet}{An interval which will determine the color scheme of the plot. The probability of being in the nullSet interval is approximated and points are colored according to categories of this probability. If a researcher is only interested in fold-changes greater than 2 then this interval should be set to (-1, 1).} \item{avgCond}{A boolean parameter that determines whether or not plots will be based on heierarchical population level mean parameters or average within sample parameters. The default is to use the population level parameter, however with very small sample sizes this plot may not be useful.} } \description{ This function takes a results summary from the \code{\link{compBayes}} function. The output is a plot of showing posterior means on the x-axis and precision on the y-axis, where precision ploted as the inverse of the posterior coefficient of variation. }
/man/precisionPlot.Rd
no_license
ColtoCaro/compositionalMS
R
false
true
1,544
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotingFunctions.R \name{precisionPlot} \alias{precisionPlot} \title{Precision plot} \usage{ precisionPlot(RES, byCond = FALSE, nullSet = c(-1, 1), avgCond = FALSE, ptm = FALSE) } \arguments{ \item{RES}{A results list generated from the function \code{\link{compBayes}}, which will be found in either the first (for protein summaries) or second (for ptm's) component of the result list object.} \item{byCond}{A boolean parameter which determines if separate plots should be made for each condition. The default is FALSE which results in the creation of a single plot.} \item{nullSet}{An interval which will determine the color scheme of the plot. The probability of being in the nullSet interval is approximated and points are colored according to categories of this probability. If a researcher is only interested in fold-changes greater than 2 then this interval should be set to (-1, 1).} \item{avgCond}{A boolean parameter that determines whether or not plots will be based on heierarchical population level mean parameters or average within sample parameters. The default is to use the population level parameter, however with very small sample sizes this plot may not be useful.} } \description{ This function takes a results summary from the \code{\link{compBayes}} function. The output is a plot of showing posterior means on the x-axis and precision on the y-axis, where precision ploted as the inverse of the posterior coefficient of variation. }
Component.Tendency <- function() { boxPlus( # 国内状況推移 title = tagList(icon("chart-line"), i18n$t("国内状況推移")), closable = F, collapsible = T, collapsed = F, enable_label = T, label_text = tagList(i18n$t("もっと見る"), icon("hand-point-right")), label_status = "warning", footer = tags$small(icon("lightbulb"), i18n$t("凡例クリックすると表示・非表示の切替ができます。")), width = 12, tabsetPanel( id = "linePlot", # 感染者数の推移 source( file = paste0(COMPONENT_PATH, "/Main/Tendency.Confirmed.ui.R"), local = T, encoding = "UTF-8" )$value, # PCR検査数推移 source( file = paste0(COMPONENT_PATH, "/Main/Tendency.Test.ui.R"), local = T, encoding = "UTF-8" )$value, # 退院数推移 source( file = paste0(COMPONENT_PATH, "/Main/Tendency.Discharged.ui.R"), local = T, encoding = "UTF-8" )$value, # コールセンターの対応 source( file = paste0(COMPONENT_PATH, "/Main/Tendency.CallCenter.ui.R"), local = T, encoding = "UTF-8" )$value ) ) }
/03_Components/Main/Tendency.ui.R
permissive
takewiki/2019-ncov-japan
R
false
false
1,230
r
Component.Tendency <- function() { boxPlus( # 国内状況推移 title = tagList(icon("chart-line"), i18n$t("国内状況推移")), closable = F, collapsible = T, collapsed = F, enable_label = T, label_text = tagList(i18n$t("もっと見る"), icon("hand-point-right")), label_status = "warning", footer = tags$small(icon("lightbulb"), i18n$t("凡例クリックすると表示・非表示の切替ができます。")), width = 12, tabsetPanel( id = "linePlot", # 感染者数の推移 source( file = paste0(COMPONENT_PATH, "/Main/Tendency.Confirmed.ui.R"), local = T, encoding = "UTF-8" )$value, # PCR検査数推移 source( file = paste0(COMPONENT_PATH, "/Main/Tendency.Test.ui.R"), local = T, encoding = "UTF-8" )$value, # 退院数推移 source( file = paste0(COMPONENT_PATH, "/Main/Tendency.Discharged.ui.R"), local = T, encoding = "UTF-8" )$value, # コールセンターの対応 source( file = paste0(COMPONENT_PATH, "/Main/Tendency.CallCenter.ui.R"), local = T, encoding = "UTF-8" )$value ) ) }
#' draw an oncoprint similar to the cBioportal output. #' @details takes output generated by \code{read.maf} and draw oncoprint using ggplot2. #' #' @param maf output generated by \code{read.maf} #' @param genes draw oncoprint for these genes. default NULL. Plots top 5 genes. #' @param sort logical sort oncomatrix for enhanced visualization. Defaults to True. #' @param annotation \code{data.frame} with first column containing Tumor_Sample_Barcodes and rest of columns with annotations. #' @param top how many top genes to be drawn. defaults to 5. #' @param removeNonMutated Logical. If \code{TRUE} removes samples with no mutations for better visualization. #' @param showTumorSampleBarcodes logical to include sample names. #' @return NULL #' @export oncoprint = function(maf, genes = NULL, sort = T, annotation = NULL, removeNonMutated = F, top = 5, showTumorSampleBarcodes = FALSE){ mat_origin = maf$numericMatrix require(package = "ComplexHeatmap", quietly = T, warn.conflicts = F) require(package = "RColorBrewer", quietly = T, warn.conflicts = F) #if user doesnt provide a gene vector, use top 5. if(is.null(genes)){ mat = mat_origin[1:top, ] } else{ mat = mat_origin[genes,] } #remove nonmutated samples to improve visualization if(removeNonMutated){ tsb = colnames(mat) tsb.exclude = colnames(mat[,colSums(mat) == 0]) tsb.include = tsb[!tsb %in% tsb.exclude] mat = mat[,tsb.include] } #Sort if(sort){ mat[mat != 0] = 1 #replacing all non-zero integers with 1 improves sorting (& grouping) tmat = t(mat) mat = t(tmat[do.call(order, c(as.list(as.data.frame(tmat)), decreasing = T)), ]) } char.mat = maf$oncoMatrix char.mat = char.mat[rownames(mat),] char.mat = char.mat[,colnames(mat)] #final matrix for plotting mat = char.mat col = c(brewer.pal(12, name = "Paired"), brewer.pal(11, name = "Spectral")[1:3], "maroon") names(col) = names = c("Nonstop_Mutation", "Frame_Shift_Del", "Intron", "Missense_Mutation", "IGR", "Nonsense_Mutation", "RNA", "Splice_Site", "In_Frame_Del", "Frame_Shift_Ins", "Silent", "In_Frame_Ins", "ITD", "3'UTR", "Translation_Start_Site", "two_hit") tc = unique(unlist(apply(mat,1,unique))) tc = tc[!tc==''] type_col = col[tc] type_name = names(type_col) names(type_name) = type_name #Make annotation if(!is.null(annotation)){ annotation[,1] = gsub(pattern = '-', replacement = '.', x = annotation[,1]) if(nrow(annotation[duplicated(annotation$Tumor_Sample_Barcode),]) > 0){ annotation = annotation[!duplicated(annotation$Tumor_Sample_Barcode),] } rownames(annotation) = annotation[,1] annotation = annotation[colnames(mat_origin),] annotation = annotation[complete.cases(annotation),] anno.df = data.frame(row.names = annotation[,1]) anno.df = cbind(anno.df, annotation[,2:ncol(annotation)]) colnames(anno.df) = colnames(annotation)[2:ncol(annotation)] bot.anno = HeatmapAnnotation(anno.df) } #from oncoprint source ComplexHeatmap add_oncoprint = function(type, x, y, width, height) { for(i in 1:length(type_name)){ if(any(type %in% type_name[i])) { grid.rect(x, y, width - unit(0.5, "mm"), height - unit(1, "mm"), gp = gpar(col = NA, fill = type_col[type_name[i]])) } } if(any(type %in% "")) { grid.rect(x, y, width - unit(0.5, "mm"), height - unit(1, "mm"), gp = gpar(col = NA, fill = "#CCCCCC")) } } if(is.null(annotation)){ ht = Heatmap(mat, rect_gp = gpar(type = "none"), cell_fun = function(j, i, x, y, width, height, fill) { type = mat[i,j] add_oncoprint(type, x, y, width, height) }, row_names_gp = gpar(fontsize = 10), show_column_names = showTumorSampleBarcodes, show_heatmap_legend = FALSE, top_annotation_height = unit(2, "cm")) }else{ ht = Heatmap(mat, rect_gp = gpar(type = "none"), cell_fun = function(j, i, x, y, width, height, fill) { type = mat[i,j] add_oncoprint(type, x, y, width, height) }, row_names_gp = gpar(fontsize = 10), show_column_names = showTumorSampleBarcodes, show_heatmap_legend = FALSE, top_annotation_height = unit(2, "cm"), bottom_annotation = bot.anno) } legend = legendGrob(labels = type_name[names(type_col)], pch = 15, gp = gpar(col = type_col), nrow = 2) draw(ht, newpage = FALSE, annotation_legend_side = "bottom", annotation_legend_list = list(legend)) }
/R/oncoprint.R
no_license
xtmgah/maftools
R
false
false
4,501
r
#' draw an oncoprint similar to the cBioportal output. #' @details takes output generated by \code{read.maf} and draw oncoprint using ggplot2. #' #' @param maf output generated by \code{read.maf} #' @param genes draw oncoprint for these genes. default NULL. Plots top 5 genes. #' @param sort logical sort oncomatrix for enhanced visualization. Defaults to True. #' @param annotation \code{data.frame} with first column containing Tumor_Sample_Barcodes and rest of columns with annotations. #' @param top how many top genes to be drawn. defaults to 5. #' @param removeNonMutated Logical. If \code{TRUE} removes samples with no mutations for better visualization. #' @param showTumorSampleBarcodes logical to include sample names. #' @return NULL #' @export oncoprint = function(maf, genes = NULL, sort = T, annotation = NULL, removeNonMutated = F, top = 5, showTumorSampleBarcodes = FALSE){ mat_origin = maf$numericMatrix require(package = "ComplexHeatmap", quietly = T, warn.conflicts = F) require(package = "RColorBrewer", quietly = T, warn.conflicts = F) #if user doesnt provide a gene vector, use top 5. if(is.null(genes)){ mat = mat_origin[1:top, ] } else{ mat = mat_origin[genes,] } #remove nonmutated samples to improve visualization if(removeNonMutated){ tsb = colnames(mat) tsb.exclude = colnames(mat[,colSums(mat) == 0]) tsb.include = tsb[!tsb %in% tsb.exclude] mat = mat[,tsb.include] } #Sort if(sort){ mat[mat != 0] = 1 #replacing all non-zero integers with 1 improves sorting (& grouping) tmat = t(mat) mat = t(tmat[do.call(order, c(as.list(as.data.frame(tmat)), decreasing = T)), ]) } char.mat = maf$oncoMatrix char.mat = char.mat[rownames(mat),] char.mat = char.mat[,colnames(mat)] #final matrix for plotting mat = char.mat col = c(brewer.pal(12, name = "Paired"), brewer.pal(11, name = "Spectral")[1:3], "maroon") names(col) = names = c("Nonstop_Mutation", "Frame_Shift_Del", "Intron", "Missense_Mutation", "IGR", "Nonsense_Mutation", "RNA", "Splice_Site", "In_Frame_Del", "Frame_Shift_Ins", "Silent", "In_Frame_Ins", "ITD", "3'UTR", "Translation_Start_Site", "two_hit") tc = unique(unlist(apply(mat,1,unique))) tc = tc[!tc==''] type_col = col[tc] type_name = names(type_col) names(type_name) = type_name #Make annotation if(!is.null(annotation)){ annotation[,1] = gsub(pattern = '-', replacement = '.', x = annotation[,1]) if(nrow(annotation[duplicated(annotation$Tumor_Sample_Barcode),]) > 0){ annotation = annotation[!duplicated(annotation$Tumor_Sample_Barcode),] } rownames(annotation) = annotation[,1] annotation = annotation[colnames(mat_origin),] annotation = annotation[complete.cases(annotation),] anno.df = data.frame(row.names = annotation[,1]) anno.df = cbind(anno.df, annotation[,2:ncol(annotation)]) colnames(anno.df) = colnames(annotation)[2:ncol(annotation)] bot.anno = HeatmapAnnotation(anno.df) } #from oncoprint source ComplexHeatmap add_oncoprint = function(type, x, y, width, height) { for(i in 1:length(type_name)){ if(any(type %in% type_name[i])) { grid.rect(x, y, width - unit(0.5, "mm"), height - unit(1, "mm"), gp = gpar(col = NA, fill = type_col[type_name[i]])) } } if(any(type %in% "")) { grid.rect(x, y, width - unit(0.5, "mm"), height - unit(1, "mm"), gp = gpar(col = NA, fill = "#CCCCCC")) } } if(is.null(annotation)){ ht = Heatmap(mat, rect_gp = gpar(type = "none"), cell_fun = function(j, i, x, y, width, height, fill) { type = mat[i,j] add_oncoprint(type, x, y, width, height) }, row_names_gp = gpar(fontsize = 10), show_column_names = showTumorSampleBarcodes, show_heatmap_legend = FALSE, top_annotation_height = unit(2, "cm")) }else{ ht = Heatmap(mat, rect_gp = gpar(type = "none"), cell_fun = function(j, i, x, y, width, height, fill) { type = mat[i,j] add_oncoprint(type, x, y, width, height) }, row_names_gp = gpar(fontsize = 10), show_column_names = showTumorSampleBarcodes, show_heatmap_legend = FALSE, top_annotation_height = unit(2, "cm"), bottom_annotation = bot.anno) } legend = legendGrob(labels = type_name[names(type_col)], pch = 15, gp = gpar(col = type_col), nrow = 2) draw(ht, newpage = FALSE, annotation_legend_side = "bottom", annotation_legend_list = list(legend)) }
% Generated by roxygen2 (4.0.1): do not edit by hand \name{add_tooltip} \alias{add_tooltip} \title{Add tooltips to a plot.} \usage{ add_tooltip(vis, html, on = c("hover", "click")) } \arguments{ \item{vis}{Visualisation to add tooltips to.} \item{html}{A function that takes a single argument as input. This argument will be a list containing the data in the mark currently under the mouse. It should return a string containing HTML.} \item{on}{Should tooltips appear on hover, or on click?} } \description{ Add tooltips to a plot. } \examples{ \donttest{ all_values <- function(x) { if(is.null(x)) return(NULL) paste0(names(x), ": ", format(x), collapse = "<br />") } base <- mtcars \%>\% ggvis(x = ~wt, y = ~mpg) \%>\% layer_points() base \%>\% add_tooltip(all_values, "hover") base \%>\% add_tooltip(all_values, "click") } }
/man/add_tooltip.Rd
no_license
harryprince/ggvis
R
false
false
838
rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{add_tooltip} \alias{add_tooltip} \title{Add tooltips to a plot.} \usage{ add_tooltip(vis, html, on = c("hover", "click")) } \arguments{ \item{vis}{Visualisation to add tooltips to.} \item{html}{A function that takes a single argument as input. This argument will be a list containing the data in the mark currently under the mouse. It should return a string containing HTML.} \item{on}{Should tooltips appear on hover, or on click?} } \description{ Add tooltips to a plot. } \examples{ \donttest{ all_values <- function(x) { if(is.null(x)) return(NULL) paste0(names(x), ": ", format(x), collapse = "<br />") } base <- mtcars \%>\% ggvis(x = ~wt, y = ~mpg) \%>\% layer_points() base \%>\% add_tooltip(all_values, "hover") base \%>\% add_tooltip(all_values, "click") } }
library(shiny) shinyUI(fluidPage( titlePanel("d3vennR demo"), fluidRow( column(width = 6, d3vennROutput("venn") ), column(width = 6, tableOutput("table") ) ) ))
/ui.R
no_license
lifan0127/r-mini-talk
R
false
false
223
r
library(shiny) shinyUI(fluidPage( titlePanel("d3vennR demo"), fluidRow( column(width = 6, d3vennROutput("venn") ), column(width = 6, tableOutput("table") ) ) ))
#' @title Aggregate Group-Time Average Treatment Effects #' #' @description A function to take group-time average treatment effects #' and aggregate them into a smaller number of parameters. There are #' several possible aggregations including "simple", "dynamic", "group", #' and "calendar." #' #' @param MP an MP object (i.e., the results of the \code{att_gt} method) #' @param type Which type of aggregated treatment effect parameter to compute. #' One option is "simple" (this just computes a weighted average of all #' group-time average treatment effects with weights proportional to group #' size). Other options are "dynamic" (this computes average effects across #' different lengths of exposure to the treatment and is similar to an #' "event study"; here the overall effect averages the effect of the #' treatment across all positive lengths of exposure); "group" (this #' is the default option and #' computes average treatment effects across different groups; here #' the overall effect averages the effect across different groups); and #' "calendar" (this computes average treatment effects across different #' time periods; here the overall effect averages the effect across each #' time period). #' @param balance_e If set (and if one computes dynamic effects), it balances #' the sample with respect to event time. For example, if \code{balance.e=2}, #' \code{aggte} will drop groups that are not exposed to treatment for #' at least three periods. (the initial period when \code{e=0} as well as the #' next two periods when \code{e=1} and the \code{e=2}). This ensures that #' the composition of groups does not change when event time changes. #' @param min_e For event studies, this is the smallest event time to compute #' dynamic effects for. By default, \code{min_e = -Inf} so that effects at #' all lengths of exposure are computed. #' @param max_e For event studies, this is the largest event time to compute #' dynamic effects for. By default, \code{max_e = Inf} so that effects at #' all lengths of exposure are computed. #' @param na.rm Logical value if we are to remove missing Values from analyses. Defaults is FALSE. #' @param bstrap Boolean for whether or not to compute standard errors using #' the multiplier boostrap. If standard errors are clustered, then one #' must set \code{bstrap=TRUE}. Default is value set in the MP object. If bstrap is \code{FALSE}, then analytical #' standard errors are reported. #' @param biters The number of boostrap iterations to use. The default is the value set in the MP object, #' and this is only applicable if \code{bstrap=TRUE}. #' #' @param cband Boolean for whether or not to compute a uniform confidence #' band that covers all of the group-time average treatment effects #' with fixed probability \code{1-alp}. In order to compute uniform confidence #' bands, \code{bstrap} must also be set to \code{TRUE}. The default is #' the value set in the MP object #' @param alp the significance level, default is value set in the MP object. #' @param clustervars A vector of variables to cluster on. At most, there #' can be two variables (otherwise will throw an error) and one of these #' must be the same as idname which allows for clustering at the individual #' level. Default is the variables set in the MP object #' #' @return An \code{\link{AGGTEobj}} object that holds the results from the #' aggregation #' #' @export aggte <- function(MP, type = "group", balance_e = NULL, min_e = -Inf, max_e = Inf, na.rm = FALSE, bstrap = NULL, biters = NULL, cband = NULL, alp = NULL, clustervars = NULL ) { call <- match.call() compute.aggte(MP = MP, type = type, balance_e = balance_e, min_e = min_e, max_e = max_e, na.rm = na.rm, bstrap = bstrap, biters = biters, cband = cband, alp = alp, clustervars = clustervars, call = call) }
/R/aggte.R
no_license
advancehs/did
R
false
false
4,331
r
#' @title Aggregate Group-Time Average Treatment Effects #' #' @description A function to take group-time average treatment effects #' and aggregate them into a smaller number of parameters. There are #' several possible aggregations including "simple", "dynamic", "group", #' and "calendar." #' #' @param MP an MP object (i.e., the results of the \code{att_gt} method) #' @param type Which type of aggregated treatment effect parameter to compute. #' One option is "simple" (this just computes a weighted average of all #' group-time average treatment effects with weights proportional to group #' size). Other options are "dynamic" (this computes average effects across #' different lengths of exposure to the treatment and is similar to an #' "event study"; here the overall effect averages the effect of the #' treatment across all positive lengths of exposure); "group" (this #' is the default option and #' computes average treatment effects across different groups; here #' the overall effect averages the effect across different groups); and #' "calendar" (this computes average treatment effects across different #' time periods; here the overall effect averages the effect across each #' time period). #' @param balance_e If set (and if one computes dynamic effects), it balances #' the sample with respect to event time. For example, if \code{balance.e=2}, #' \code{aggte} will drop groups that are not exposed to treatment for #' at least three periods. (the initial period when \code{e=0} as well as the #' next two periods when \code{e=1} and the \code{e=2}). This ensures that #' the composition of groups does not change when event time changes. #' @param min_e For event studies, this is the smallest event time to compute #' dynamic effects for. By default, \code{min_e = -Inf} so that effects at #' all lengths of exposure are computed. #' @param max_e For event studies, this is the largest event time to compute #' dynamic effects for. By default, \code{max_e = Inf} so that effects at #' all lengths of exposure are computed. #' @param na.rm Logical value if we are to remove missing Values from analyses. Defaults is FALSE. #' @param bstrap Boolean for whether or not to compute standard errors using #' the multiplier boostrap. If standard errors are clustered, then one #' must set \code{bstrap=TRUE}. Default is value set in the MP object. If bstrap is \code{FALSE}, then analytical #' standard errors are reported. #' @param biters The number of boostrap iterations to use. The default is the value set in the MP object, #' and this is only applicable if \code{bstrap=TRUE}. #' #' @param cband Boolean for whether or not to compute a uniform confidence #' band that covers all of the group-time average treatment effects #' with fixed probability \code{1-alp}. In order to compute uniform confidence #' bands, \code{bstrap} must also be set to \code{TRUE}. The default is #' the value set in the MP object #' @param alp the significance level, default is value set in the MP object. #' @param clustervars A vector of variables to cluster on. At most, there #' can be two variables (otherwise will throw an error) and one of these #' must be the same as idname which allows for clustering at the individual #' level. Default is the variables set in the MP object #' #' @return An \code{\link{AGGTEobj}} object that holds the results from the #' aggregation #' #' @export aggte <- function(MP, type = "group", balance_e = NULL, min_e = -Inf, max_e = Inf, na.rm = FALSE, bstrap = NULL, biters = NULL, cband = NULL, alp = NULL, clustervars = NULL ) { call <- match.call() compute.aggte(MP = MP, type = type, balance_e = balance_e, min_e = min_e, max_e = max_e, na.rm = na.rm, bstrap = bstrap, biters = biters, cband = cband, alp = alp, clustervars = clustervars, call = call) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/write_reports.R \name{write_reports} \alias{write_reports} \title{Write NSSP BioSense Platform Data Quality Summary Reports} \usage{ write_reports(username, password, table, mft, start, end, directory = "", nexamples = 0, offset) } \arguments{ \item{username}{Your BioSense username, as a string. This is the same username you may use to log into RStudio or Adminer.} \item{password}{Your BioSense password, as a string. This is the same password you may use to log into RStudio or Adminer.} \item{table}{The table that you want to retrieve the data from, as a string.} \item{mft}{The MFT (master facilities table) from where the facility names will be retrieved, as a string.} \item{start}{The start date time that you wish to begin pulling data from, as a string.} \item{end}{The end data time that you wish to stop pulling data from, as a string.} \item{directory}{The directory where you would like to write the reports to (i.e., "~/Documents/MyReports"), as a string.} \item{nexamples}{An integer number of examples you would like for each type of invalid or null field in the examples workbooks for each facility. This defaults to 0, which will not generate these example workbooks.} \item{offset}{The number of hours you wish to offset Arrived_Date_Time (which is in UTC). The offset value is how far off your local time zone is from UTC. For example, the Central Time Zone would set this to 5 or 6, depending on if it is daylight savings or not. This value should be an integer. This is used for timeliness reports using the `va_lag` function.} } \description{ This function calls upon all of the other functions in the package to write a large number of Excel workbooks. First, it generates a state summary workbook that shows percents and counts of nulls and invalids at both the facility and statewide level, as well as message delivery lag (i.e., timeliness reports). Second, it generates a summary workbook for every single facility that includes only information for that facility. Third, it generates an example workbook for every single facility, including detailed information on records and visits that are null or invalid. These example workbooks make the function longer to run, so by default this function does not generate them (see `nexamples` input below). }
/man/write_reports.Rd
no_license
markhwhiteii/biosensequality
R
false
true
2,374
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/write_reports.R \name{write_reports} \alias{write_reports} \title{Write NSSP BioSense Platform Data Quality Summary Reports} \usage{ write_reports(username, password, table, mft, start, end, directory = "", nexamples = 0, offset) } \arguments{ \item{username}{Your BioSense username, as a string. This is the same username you may use to log into RStudio or Adminer.} \item{password}{Your BioSense password, as a string. This is the same password you may use to log into RStudio or Adminer.} \item{table}{The table that you want to retrieve the data from, as a string.} \item{mft}{The MFT (master facilities table) from where the facility names will be retrieved, as a string.} \item{start}{The start date time that you wish to begin pulling data from, as a string.} \item{end}{The end data time that you wish to stop pulling data from, as a string.} \item{directory}{The directory where you would like to write the reports to (i.e., "~/Documents/MyReports"), as a string.} \item{nexamples}{An integer number of examples you would like for each type of invalid or null field in the examples workbooks for each facility. This defaults to 0, which will not generate these example workbooks.} \item{offset}{The number of hours you wish to offset Arrived_Date_Time (which is in UTC). The offset value is how far off your local time zone is from UTC. For example, the Central Time Zone would set this to 5 or 6, depending on if it is daylight savings or not. This value should be an integer. This is used for timeliness reports using the `va_lag` function.} } \description{ This function calls upon all of the other functions in the package to write a large number of Excel workbooks. First, it generates a state summary workbook that shows percents and counts of nulls and invalids at both the facility and statewide level, as well as message delivery lag (i.e., timeliness reports). Second, it generates a summary workbook for every single facility that includes only information for that facility. Third, it generates an example workbook for every single facility, including detailed information on records and visits that are null or invalid. These example workbooks make the function longer to run, so by default this function does not generate them (see `nexamples` input below). }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/correctSigns.R \name{applyFix} \alias{applyFix} \title{Apply flips and swaps to a record.} \usage{ applyFix(flips, swaps, r) } \arguments{ \item{flips}{Index vector in r. r will be sign-flipped at flips} \item{swaps}{nx2 matrix denoting value swaps in r.} \item{r}{numerical record.} } \value{ r, with flips and swaps applied } \description{ Apply flips and swaps to a record. } \keyword{internal}
/man/applyFix.Rd
no_license
cran/deducorrect
R
false
false
487
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/correctSigns.R \name{applyFix} \alias{applyFix} \title{Apply flips and swaps to a record.} \usage{ applyFix(flips, swaps, r) } \arguments{ \item{flips}{Index vector in r. r will be sign-flipped at flips} \item{swaps}{nx2 matrix denoting value swaps in r.} \item{r}{numerical record.} } \value{ r, with flips and swaps applied } \description{ Apply flips and swaps to a record. } \keyword{internal}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/alpha_to_num.R \name{alpha_to_num} \alias{alpha_to_num} \title{Convert alphabet to numbers 1-26} \usage{ alpha_to_num(x) } \arguments{ \item{x}{character} } \value{ number from 1-26 (a = 1, b = 2, etc.) } \description{ Convert alphabet to numbers 1-26 } \examples{ abc <- c("a", "b", "c") fed <- c("f", "e", "d") aaa <- c("a", "a", "a") alpha_to_num(LETTERS) alpha_to_num(letters) alpha_to_num(abc) alpha_to_num(fed) alpha_to_num(aaa) }
/man/alpha_to_num.Rd
permissive
njtierney/njtmisc
R
false
true
516
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/alpha_to_num.R \name{alpha_to_num} \alias{alpha_to_num} \title{Convert alphabet to numbers 1-26} \usage{ alpha_to_num(x) } \arguments{ \item{x}{character} } \value{ number from 1-26 (a = 1, b = 2, etc.) } \description{ Convert alphabet to numbers 1-26 } \examples{ abc <- c("a", "b", "c") fed <- c("f", "e", "d") aaa <- c("a", "a", "a") alpha_to_num(LETTERS) alpha_to_num(letters) alpha_to_num(abc) alpha_to_num(fed) alpha_to_num(aaa) }
parncpt=function(tstat, df, zeromean=TRUE, ...) { stopifnot(all(df>0)) if(!zeromean && any(is.infinite(df)) && !all(is.infinite(df)) ) { df[is.infinite(df)]=500 } method=c('L-BFGS-B') # method=match.arg(method) if (method=='EM') { stop("EM algorithm not implemented") # parncpt.em(tstat,df,zeromean,...) }else if (method=='NR') { stop("Newton-Raphson algorithm not implemented") # parncpt.nr(tstat,df,zeromean,...) }else if (method=='L-BFGS-B') { if(zeromean) parncpt.bfgs.0mean(tstat,df,...) else parncpt.bfgs.non0mean(tstat,df,...) } } parncpt.bfgs.non0mean=function(tstat,df,starts, grids, approximation='int2',...) { G=max(c(length(tstat),length(df))) dt.null=dt(tstat,df) obj=function(parms){ pi0=parms[1]; mu.ncp=parms[2]; sd.ncp=parms[3]; scale.fact=sqrt(1+sd.ncp*sd.ncp) Lik=pi0*dt.null+(1-pi0)*dtn.mix(tstat,df,mu.ncp,sd.ncp,FALSE,approximation) # Lik=pi0*dt.null+(1-pi0)*dt.int(tstat/scale.fact,df,mu.ncp/scale.fact)/scale.fact ans=-sum(log(Lik)) if(!is.finite(ans)){ ans=-sum(log(pi0*dt.null+(1-pi0)*dtn.mix(tstat,df,mu.ncp,sd.ncp,FALSE,approximation='none'))) } ans } deriv.non0mean=function(parms) { pi0=parms[1]; mu.ncp=parms[2]; sd.ncp=parms[3]; scale.fact=sqrt(1+sd.ncp*sd.ncp); s2=scale.fact*scale.fact dt.alt=dtn.mix(tstat, df, mu.ncp, sd.ncp, FALSE, approximation) # dt.alt=dt.int(tstat/scale.fact,df,mu.ncp/scale.fact)/scale.fact f=(pi0*dt.null+(1-pi0)*dt.alt) der.pi0=sum( (dt.alt-dt.null) / f ) ## correct if(all(is.infinite(df))){ z.std=(tstat-mu.ncp)/scale.fact der.mu=-(1-pi0)*sum( dt.alt*z.std/scale.fact / f) der.scale=(1-pi0)*sum( dt.alt*(1-z.std*z.std)/scale.fact / f) }else{ df[is.infinite(df)]=500 df.half=df/2; t2=tstat*tstat; t2vs2=t2+df*s2 logK2=df.half*log(df.half)-.5*log(pi/2)-lgamma(df.half) logC=logK2-(df.half+.5)*log(t2/s2+df)- df.half*mu.ncp*mu.ncp/t2vs2 # integral.xv1=.C(C_intTruncNormVec,n=as.integer(G),r=rep(as.integer(df+1),length=G), mu=tstat*mu.ncp/scale.fact/sqrt(t2vs2), # sd=rep(as.double(1),length=G), lower=numeric(G), upper=rep(Inf,length=G), ans=numeric(G),NAOK=TRUE)$ans integral.xv1=mTruncNorm.int2(r=df+1, mu=tstat*mu.ncp/scale.fact/sqrt(t2vs2), sd=1, lower=0, upper=Inf, takeLog=TRUE, ndiv=8) der.mu=-sum((1-pi0)/f/s2*(tstat*exp(logC)/sqrt(t2vs2)*integral.xv1-mu.ncp*dt.alt)) ## correct der.scale=-sum((1-pi0)/f /s2/scale.fact/t2vs2*(#*dhs.ds) dt.alt*(s2*df*(t2-s2)+mu.ncp*mu.ncp*t2vs2)-exp(logC)*mu.ncp*tstat*(t2vs2+df*s2)/sqrt(t2vs2)*integral.xv1) ) } der.sd=sd.ncp/scale.fact*der.scale c(pi0=der.pi0, mu.ncp=der.mu, sd.ncp=der.sd) } if(missing(starts)) { default.grids=list(lower=c(1e-3, -2, 1e-3), upper=c(1-1e-3, 2, 2), ngrid=c(5,5,5)) if(!missing(grids)) for(nn in names(grids)) default.grids[[nn]]=grids[[nn]] starts=grid.search(obj, default.grids$lower, default.grids$upper, default.grids$ngrid) } names(starts)=c('pi0','mu.ncp','sd.ncp') optimFit=try(optim(starts,obj,gr=deriv.non0mean, method='L-BFGS-B',lower=c(0,-Inf,0),upper=c(1,Inf,Inf),hessian=TRUE,...)) if(class(optimFit)=='try-error'){ optimFit=try(nlminb(starts,obj,deriv.non0mean,lower=c(0,-Inf,0),upper=c(1,Inf,Inf), ...)) } if(class(optimFit)=='try-error'){ return(NA_real_) } ll=-optimFit$value attr(ll,'df')=3 attr(ll,'nobs')=G class(ll)='logLik' #library("numDeriv") # tmp=make.link('logit'); logit=tmp$linkfun; logitinv=tmp$linkinv; dlogitinv=tmp$mu.eta # obj.nobound=function(par)obj(c(logitinv(par[1]),par[2], exp(par[3]))) # app.hess.nobound=hessian(obj.nobound, c(logit(optimFit$par[1]), optimFit$par[2], log(optimFit$par[3]))) ## need to consider hitting boundaries # app.hess=app.hess.nobound/tcrossprod(c(dlogitinv(logit(optimFit$par[1])), 1, optimFit$par[3])) # ans=list(pi0=optimFit$par[1], mu.ncp=optimFit$par[2], sd.ncp=optimFit$par[3], data=list(tstat=tstat, df=df), logLik=ll, enp=3, par=optimFit$par, obj=obj, gradiant=deriv.non0mean(optimFit$par), hessian=optimFit$hessian,nobs=G) class(ans)=c('parncpt','ncpest') ans } parncpt.bfgs.0mean=function(tstat,df, starts, grids, approximation='int2',...) { G=max(c(length(tstat),length(df))) dt.null=dt(tstat,df) obj=function(parms){ pi0=parms[1]; mu.ncp=0; sd.ncp=parms[2]; s2=1+sd.ncp*sd.ncp; scale.fact=sqrt(s2) Lik=pi0*dt(tstat,df)+(1-pi0)*dt(tstat/scale.fact,df)/scale.fact -sum(log(Lik)) } deriv.0mean=function(parms){ pi0=parms[1]; mu.ncp=0; sd.ncp=parms[2]; s2=1+sd.ncp*sd.ncp; scale.fact=sqrt(s2) dt.alt=dt(tstat/scale.fact,df)/scale.fact f=(pi0*dt.null+(1-pi0)*dt.alt) der.pi0=sum( (dt.alt-dt.null) / f ) ## correct if(all(is.infinite(df))){ z.std=(tstat-mu.ncp)/scale.fact # der.mu=-(1-pi0)*sum( dt.alt*z.std/scale.fact / f) der.scale=(1-pi0)*sum( dt.alt*(1-z.std*z.std)/scale.fact / f) }else{ df[is.infinite(df)]=500 df.half=df/2; t2=tstat*tstat; t2vs2=t2+df*s2 logK2=df.half*log(df.half)-.5*log(pi/2)-lgamma(df.half) logC=logK2-(df.half+.5)*log(t2/s2+df)- df.half*mu.ncp*mu.ncp/t2vs2 # integral.xv1=.C(C_intTruncNormVec,n=as.integer(G),r=rep(as.integer(df+1),length=G), mu=tstat*mu.ncp/scale.fact/sqrt(t2vs2), # sd=rep(as.double(1),length=G), lower=numeric(G), upper=rep(Inf,length=G), ans=numeric(G),NAOK=TRUE)$ans integral.xv1=mTruncNorm.int2(r=df+1, mu=tstat*mu.ncp/scale.fact/sqrt(t2vs2), sd=1, lower=0, upper=Inf, takeLog=TRUE, ndiv=8) # der.mu=-sum((1-pi0)/f/s2*(tstat*exp(logC)/sqrt(t2vs2)*integral.xv1-mu.ncp*dt.alt)) ## correct der.scale=-sum((1-pi0)/f /s2/scale.fact/t2vs2*(#*dhs.ds) dt.alt*(s2*df*(t2-s2)+mu.ncp*mu.ncp*t2vs2)-exp(logC)*mu.ncp*tstat*(t2vs2+df*s2)/sqrt(t2vs2)*integral.xv1) ) } der.sd=sd.ncp/scale.fact*der.scale c(der.pi0, der.sd) } if(missing(starts)) { default.grids=list(lower=c(1e-3, 1e-3), upper=c(1-1e-3, 2), ngrid=c(15,15)) if(!missing(grids)) for(nn in names(grids)) default.grids[[nn]]=grids[[nn]] starts=grid.search(obj, default.grids$lower, default.grids$upper, default.grids$ngrid) } names(starts)=c('pi0','sd.ncp') optimFit=optim(starts,obj,gr=deriv.0mean, method='L-BFGS-B', lower=c(0,0),upper=c(1,Inf), hessian=TRUE,...) ll=-optimFit$value attr(ll,'df')=2 attr(ll,'nobs')=G class(ll)='logLik' #library("numDeriv") # tmp=make.link('logit'); logit=tmp$linkfun; logitinv=tmp$linkinv; dlogitinv=tmp$mu.eta # obj.nobound=function(par)obj(c(logitinv(par[1]),exp(par[2]))) # app.hess.nobound=hessian(obj.nobound, c(logit(optimFit$par[1]), log(optimFit$par[2]))) ## need to consider hitting boundaries # app.hess=app.hess.nobound/tcrossprod(c(dlogitinv(logit(optimFit$par[1])), optimFit$par[2])) # ans=list(pi0=optimFit$par[1], mu.ncp=0, sd.ncp=optimFit$par[2], data=list(tstat=tstat, df=df), logLik=ll, enp=2, par=optimFit$par, obj=obj, gradiant=deriv.0mean(optimFit$par), hessian=optimFit$hessian,nobs=G) class(ans)=c('parncpt','ncpest') ans } ## vcov and logLik are defined for the ncpest class in nparncp.R #vcov.parncp=function(obj) #{ # obj$hessian #} #logLik.parncp=function(obj) #{ # obj$logLik #} #coef.parncp=#coefficients.parncp= #function(obj) #{ # obj$par #} fitted.parncpt=#fitted.values.parncpt= function(object, ...) { object$pi0*dt(object$data$tstat, object$data$df)+(1-object$pi0)*dtn.mix(object$data$tstat, object$data$df,object$mu.ncp,object$sd.ncp,FALSE,...) } #lfdr=ppee=function(object)UseMethod('lfdr') #lfdr.parncpt=ppee.parncpt=function(object) #{ # pmin(pmax(object$pi0*dt(object$data$tstat, object$data$df)/fitted(object), 0), 1) #} summary.parncpt=function(object,...) { cat("pi0 (proportion of null hypotheses) =", object$pi0, fill=TRUE) cat("mu.ncp (mean of noncentrality parameters) =", object$mu.ncp, fill=TRUE) cat("sd.ncp (SD of noncentrality parameters) =", object$sd.ncp, fill=TRUE) invisible(object) } print.parncpt=function(x,...) { summary.parncpt(x,...) } plot.parncpt=function(x,...) { # dev.new(width=8, height=4) op=par(mfrow=c(1,2)) hist(x$data$tstat, pr=TRUE, br=min(c(max(c(20, length(x$data$tstat)/100)), 200)), xlab='t',main='t-statistics') ord=order(x$data$tstat) lines(x$data$tstat[ord], fitted.parncpt(x)[ord], col='red', lwd=2) d.ncp=function(d) dnorm(d, x$mu.ncp, x$sd.ncp) curve(d.ncp, min(x$data$tstat), max(x$data$tstat), 500, xlab=expression(delta), ylab='density',main='noncentrality parameters') abline(v=c(0, x$mu.ncp), lty=1:2) par(op) invisible(x) } grid.search=function(obj, lower, upper, ngrid, ...) { p=max(c(length(lower),length(upper),length(ngrid))) lower=rep(lower, length=p) upper=rep(upper, length=p) ngrid=rep(ngrid, length=p) knot.list=list() for(i in 1:p) knot.list[[i]]=seq(from=lower[i], to=upper[i], length=ngrid[i]) names(knot.list)=paste(names(lower),names(upper),names(ngrid),sep='.') grids=do.call(expand.grid, knot.list) ans=apply(grids, 1, obj, ...) if(sum(ans==min(ans))>1) warning('multiple minimums found in grid search') return(unlist(grids[which.min(ans),])) }
/R/parncp.R
no_license
gitlongor/pi0
R
false
false
9,890
r
parncpt=function(tstat, df, zeromean=TRUE, ...) { stopifnot(all(df>0)) if(!zeromean && any(is.infinite(df)) && !all(is.infinite(df)) ) { df[is.infinite(df)]=500 } method=c('L-BFGS-B') # method=match.arg(method) if (method=='EM') { stop("EM algorithm not implemented") # parncpt.em(tstat,df,zeromean,...) }else if (method=='NR') { stop("Newton-Raphson algorithm not implemented") # parncpt.nr(tstat,df,zeromean,...) }else if (method=='L-BFGS-B') { if(zeromean) parncpt.bfgs.0mean(tstat,df,...) else parncpt.bfgs.non0mean(tstat,df,...) } } parncpt.bfgs.non0mean=function(tstat,df,starts, grids, approximation='int2',...) { G=max(c(length(tstat),length(df))) dt.null=dt(tstat,df) obj=function(parms){ pi0=parms[1]; mu.ncp=parms[2]; sd.ncp=parms[3]; scale.fact=sqrt(1+sd.ncp*sd.ncp) Lik=pi0*dt.null+(1-pi0)*dtn.mix(tstat,df,mu.ncp,sd.ncp,FALSE,approximation) # Lik=pi0*dt.null+(1-pi0)*dt.int(tstat/scale.fact,df,mu.ncp/scale.fact)/scale.fact ans=-sum(log(Lik)) if(!is.finite(ans)){ ans=-sum(log(pi0*dt.null+(1-pi0)*dtn.mix(tstat,df,mu.ncp,sd.ncp,FALSE,approximation='none'))) } ans } deriv.non0mean=function(parms) { pi0=parms[1]; mu.ncp=parms[2]; sd.ncp=parms[3]; scale.fact=sqrt(1+sd.ncp*sd.ncp); s2=scale.fact*scale.fact dt.alt=dtn.mix(tstat, df, mu.ncp, sd.ncp, FALSE, approximation) # dt.alt=dt.int(tstat/scale.fact,df,mu.ncp/scale.fact)/scale.fact f=(pi0*dt.null+(1-pi0)*dt.alt) der.pi0=sum( (dt.alt-dt.null) / f ) ## correct if(all(is.infinite(df))){ z.std=(tstat-mu.ncp)/scale.fact der.mu=-(1-pi0)*sum( dt.alt*z.std/scale.fact / f) der.scale=(1-pi0)*sum( dt.alt*(1-z.std*z.std)/scale.fact / f) }else{ df[is.infinite(df)]=500 df.half=df/2; t2=tstat*tstat; t2vs2=t2+df*s2 logK2=df.half*log(df.half)-.5*log(pi/2)-lgamma(df.half) logC=logK2-(df.half+.5)*log(t2/s2+df)- df.half*mu.ncp*mu.ncp/t2vs2 # integral.xv1=.C(C_intTruncNormVec,n=as.integer(G),r=rep(as.integer(df+1),length=G), mu=tstat*mu.ncp/scale.fact/sqrt(t2vs2), # sd=rep(as.double(1),length=G), lower=numeric(G), upper=rep(Inf,length=G), ans=numeric(G),NAOK=TRUE)$ans integral.xv1=mTruncNorm.int2(r=df+1, mu=tstat*mu.ncp/scale.fact/sqrt(t2vs2), sd=1, lower=0, upper=Inf, takeLog=TRUE, ndiv=8) der.mu=-sum((1-pi0)/f/s2*(tstat*exp(logC)/sqrt(t2vs2)*integral.xv1-mu.ncp*dt.alt)) ## correct der.scale=-sum((1-pi0)/f /s2/scale.fact/t2vs2*(#*dhs.ds) dt.alt*(s2*df*(t2-s2)+mu.ncp*mu.ncp*t2vs2)-exp(logC)*mu.ncp*tstat*(t2vs2+df*s2)/sqrt(t2vs2)*integral.xv1) ) } der.sd=sd.ncp/scale.fact*der.scale c(pi0=der.pi0, mu.ncp=der.mu, sd.ncp=der.sd) } if(missing(starts)) { default.grids=list(lower=c(1e-3, -2, 1e-3), upper=c(1-1e-3, 2, 2), ngrid=c(5,5,5)) if(!missing(grids)) for(nn in names(grids)) default.grids[[nn]]=grids[[nn]] starts=grid.search(obj, default.grids$lower, default.grids$upper, default.grids$ngrid) } names(starts)=c('pi0','mu.ncp','sd.ncp') optimFit=try(optim(starts,obj,gr=deriv.non0mean, method='L-BFGS-B',lower=c(0,-Inf,0),upper=c(1,Inf,Inf),hessian=TRUE,...)) if(class(optimFit)=='try-error'){ optimFit=try(nlminb(starts,obj,deriv.non0mean,lower=c(0,-Inf,0),upper=c(1,Inf,Inf), ...)) } if(class(optimFit)=='try-error'){ return(NA_real_) } ll=-optimFit$value attr(ll,'df')=3 attr(ll,'nobs')=G class(ll)='logLik' #library("numDeriv") # tmp=make.link('logit'); logit=tmp$linkfun; logitinv=tmp$linkinv; dlogitinv=tmp$mu.eta # obj.nobound=function(par)obj(c(logitinv(par[1]),par[2], exp(par[3]))) # app.hess.nobound=hessian(obj.nobound, c(logit(optimFit$par[1]), optimFit$par[2], log(optimFit$par[3]))) ## need to consider hitting boundaries # app.hess=app.hess.nobound/tcrossprod(c(dlogitinv(logit(optimFit$par[1])), 1, optimFit$par[3])) # ans=list(pi0=optimFit$par[1], mu.ncp=optimFit$par[2], sd.ncp=optimFit$par[3], data=list(tstat=tstat, df=df), logLik=ll, enp=3, par=optimFit$par, obj=obj, gradiant=deriv.non0mean(optimFit$par), hessian=optimFit$hessian,nobs=G) class(ans)=c('parncpt','ncpest') ans } parncpt.bfgs.0mean=function(tstat,df, starts, grids, approximation='int2',...) { G=max(c(length(tstat),length(df))) dt.null=dt(tstat,df) obj=function(parms){ pi0=parms[1]; mu.ncp=0; sd.ncp=parms[2]; s2=1+sd.ncp*sd.ncp; scale.fact=sqrt(s2) Lik=pi0*dt(tstat,df)+(1-pi0)*dt(tstat/scale.fact,df)/scale.fact -sum(log(Lik)) } deriv.0mean=function(parms){ pi0=parms[1]; mu.ncp=0; sd.ncp=parms[2]; s2=1+sd.ncp*sd.ncp; scale.fact=sqrt(s2) dt.alt=dt(tstat/scale.fact,df)/scale.fact f=(pi0*dt.null+(1-pi0)*dt.alt) der.pi0=sum( (dt.alt-dt.null) / f ) ## correct if(all(is.infinite(df))){ z.std=(tstat-mu.ncp)/scale.fact # der.mu=-(1-pi0)*sum( dt.alt*z.std/scale.fact / f) der.scale=(1-pi0)*sum( dt.alt*(1-z.std*z.std)/scale.fact / f) }else{ df[is.infinite(df)]=500 df.half=df/2; t2=tstat*tstat; t2vs2=t2+df*s2 logK2=df.half*log(df.half)-.5*log(pi/2)-lgamma(df.half) logC=logK2-(df.half+.5)*log(t2/s2+df)- df.half*mu.ncp*mu.ncp/t2vs2 # integral.xv1=.C(C_intTruncNormVec,n=as.integer(G),r=rep(as.integer(df+1),length=G), mu=tstat*mu.ncp/scale.fact/sqrt(t2vs2), # sd=rep(as.double(1),length=G), lower=numeric(G), upper=rep(Inf,length=G), ans=numeric(G),NAOK=TRUE)$ans integral.xv1=mTruncNorm.int2(r=df+1, mu=tstat*mu.ncp/scale.fact/sqrt(t2vs2), sd=1, lower=0, upper=Inf, takeLog=TRUE, ndiv=8) # der.mu=-sum((1-pi0)/f/s2*(tstat*exp(logC)/sqrt(t2vs2)*integral.xv1-mu.ncp*dt.alt)) ## correct der.scale=-sum((1-pi0)/f /s2/scale.fact/t2vs2*(#*dhs.ds) dt.alt*(s2*df*(t2-s2)+mu.ncp*mu.ncp*t2vs2)-exp(logC)*mu.ncp*tstat*(t2vs2+df*s2)/sqrt(t2vs2)*integral.xv1) ) } der.sd=sd.ncp/scale.fact*der.scale c(der.pi0, der.sd) } if(missing(starts)) { default.grids=list(lower=c(1e-3, 1e-3), upper=c(1-1e-3, 2), ngrid=c(15,15)) if(!missing(grids)) for(nn in names(grids)) default.grids[[nn]]=grids[[nn]] starts=grid.search(obj, default.grids$lower, default.grids$upper, default.grids$ngrid) } names(starts)=c('pi0','sd.ncp') optimFit=optim(starts,obj,gr=deriv.0mean, method='L-BFGS-B', lower=c(0,0),upper=c(1,Inf), hessian=TRUE,...) ll=-optimFit$value attr(ll,'df')=2 attr(ll,'nobs')=G class(ll)='logLik' #library("numDeriv") # tmp=make.link('logit'); logit=tmp$linkfun; logitinv=tmp$linkinv; dlogitinv=tmp$mu.eta # obj.nobound=function(par)obj(c(logitinv(par[1]),exp(par[2]))) # app.hess.nobound=hessian(obj.nobound, c(logit(optimFit$par[1]), log(optimFit$par[2]))) ## need to consider hitting boundaries # app.hess=app.hess.nobound/tcrossprod(c(dlogitinv(logit(optimFit$par[1])), optimFit$par[2])) # ans=list(pi0=optimFit$par[1], mu.ncp=0, sd.ncp=optimFit$par[2], data=list(tstat=tstat, df=df), logLik=ll, enp=2, par=optimFit$par, obj=obj, gradiant=deriv.0mean(optimFit$par), hessian=optimFit$hessian,nobs=G) class(ans)=c('parncpt','ncpest') ans } ## vcov and logLik are defined for the ncpest class in nparncp.R #vcov.parncp=function(obj) #{ # obj$hessian #} #logLik.parncp=function(obj) #{ # obj$logLik #} #coef.parncp=#coefficients.parncp= #function(obj) #{ # obj$par #} fitted.parncpt=#fitted.values.parncpt= function(object, ...) { object$pi0*dt(object$data$tstat, object$data$df)+(1-object$pi0)*dtn.mix(object$data$tstat, object$data$df,object$mu.ncp,object$sd.ncp,FALSE,...) } #lfdr=ppee=function(object)UseMethod('lfdr') #lfdr.parncpt=ppee.parncpt=function(object) #{ # pmin(pmax(object$pi0*dt(object$data$tstat, object$data$df)/fitted(object), 0), 1) #} summary.parncpt=function(object,...) { cat("pi0 (proportion of null hypotheses) =", object$pi0, fill=TRUE) cat("mu.ncp (mean of noncentrality parameters) =", object$mu.ncp, fill=TRUE) cat("sd.ncp (SD of noncentrality parameters) =", object$sd.ncp, fill=TRUE) invisible(object) } print.parncpt=function(x,...) { summary.parncpt(x,...) } plot.parncpt=function(x,...) { # dev.new(width=8, height=4) op=par(mfrow=c(1,2)) hist(x$data$tstat, pr=TRUE, br=min(c(max(c(20, length(x$data$tstat)/100)), 200)), xlab='t',main='t-statistics') ord=order(x$data$tstat) lines(x$data$tstat[ord], fitted.parncpt(x)[ord], col='red', lwd=2) d.ncp=function(d) dnorm(d, x$mu.ncp, x$sd.ncp) curve(d.ncp, min(x$data$tstat), max(x$data$tstat), 500, xlab=expression(delta), ylab='density',main='noncentrality parameters') abline(v=c(0, x$mu.ncp), lty=1:2) par(op) invisible(x) } grid.search=function(obj, lower, upper, ngrid, ...) { p=max(c(length(lower),length(upper),length(ngrid))) lower=rep(lower, length=p) upper=rep(upper, length=p) ngrid=rep(ngrid, length=p) knot.list=list() for(i in 1:p) knot.list[[i]]=seq(from=lower[i], to=upper[i], length=ngrid[i]) names(knot.list)=paste(names(lower),names(upper),names(ngrid),sep='.') grids=do.call(expand.grid, knot.list) ans=apply(grids, 1, obj, ...) if(sum(ans==min(ans))>1) warning('multiple minimums found in grid search') return(unlist(grids[which.min(ans),])) }
#### Data cleansing script #### ## This script encapsulates the data cleansing operations ## outlined in 1_Exploration.R library(dplyr) source("./Code/Processing/Processing_Functions.R") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Clean Missing Values #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ clean_dataset <- function(new.data) { # Manually set column typing on specific columns: factor.columns <- c("MSSubClass") new.data[, factor.columns] <- sapply(new.data[, factor.columns], as.factor) # Re-label NA records amend.cols <- c("PoolQC", "MiscFeature", "Alley", "Fence", "FireplaceQu", "GarageFinish", "GarageQual", "GarageCond", "GarageType", "BsmtExposure", "BsmtFinType1", "BsmtFinType2", "BsmtQual", "BsmtCond", "MasVnrType") new.data[,amend.cols] <- sapply(new.data[,amend.cols], amend_na) # Impute missing values new.data$LotFrontage[is.na(new.data$LotFrontage)] <- mean(new.data$LotFrontage, na.rm = TRUE) new.data$Electrical[is.na(new.data$Electrical)] <- "SBrkr" # This is the most common electrical type new.data$MasVnrArea[is.na(new.data$MasVnrArea)] <- 0 new.data$BsmtFinSF1[is.na(new.data$BsmtFinSF1)] <- 0 new.data$BsmtUnfSF[is.na(new.data$BsmtUnfSF)] <- 0 new.data$BsmtFullBath[is.na(new.data$BsmtFullBath)] <- 0 new.data$BsmtHalfBath[is.na(new.data$BsmtHalfBath)] <- 0 new.data$GarageCars[is.na(new.data$GarageCars)] <- 0 # Re-set classes data.types <- sapply(new.data, class) char.columns <- colnames(new.data[,which(data.types == "character")]) new.data[, char.columns] <- lapply(new.data[, char.columns], as.factor) ## Return return(new.data) }
/Capstone/Code/Modeling/Data_Cleanser.R
no_license
DannyGsGit/UW_DS450
R
false
false
1,722
r
#### Data cleansing script #### ## This script encapsulates the data cleansing operations ## outlined in 1_Exploration.R library(dplyr) source("./Code/Processing/Processing_Functions.R") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Clean Missing Values #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ clean_dataset <- function(new.data) { # Manually set column typing on specific columns: factor.columns <- c("MSSubClass") new.data[, factor.columns] <- sapply(new.data[, factor.columns], as.factor) # Re-label NA records amend.cols <- c("PoolQC", "MiscFeature", "Alley", "Fence", "FireplaceQu", "GarageFinish", "GarageQual", "GarageCond", "GarageType", "BsmtExposure", "BsmtFinType1", "BsmtFinType2", "BsmtQual", "BsmtCond", "MasVnrType") new.data[,amend.cols] <- sapply(new.data[,amend.cols], amend_na) # Impute missing values new.data$LotFrontage[is.na(new.data$LotFrontage)] <- mean(new.data$LotFrontage, na.rm = TRUE) new.data$Electrical[is.na(new.data$Electrical)] <- "SBrkr" # This is the most common electrical type new.data$MasVnrArea[is.na(new.data$MasVnrArea)] <- 0 new.data$BsmtFinSF1[is.na(new.data$BsmtFinSF1)] <- 0 new.data$BsmtUnfSF[is.na(new.data$BsmtUnfSF)] <- 0 new.data$BsmtFullBath[is.na(new.data$BsmtFullBath)] <- 0 new.data$BsmtHalfBath[is.na(new.data$BsmtHalfBath)] <- 0 new.data$GarageCars[is.na(new.data$GarageCars)] <- 0 # Re-set classes data.types <- sapply(new.data, class) char.columns <- colnames(new.data[,which(data.types == "character")]) new.data[, char.columns] <- lapply(new.data[, char.columns], as.factor) ## Return return(new.data) }
file_path <- "/Users/blahiri/healthcare/data/tcga/Raw_Data/v3" load(paste(file_path, "/vital_logr.rda", sep = "")) drug_vars <- c("Avastin", "BCNU", "CCNU", "CPT.11", "Dexamethasone", "Gliadel.Wafer", "Other_drug", "Tarceva", "Temozolomide", "VP.16") radiation_vars <- c("EXTERNAL.BEAM", "Other_radiation") inputs_for_demog_ch <<- list() #The input is a bit vector that specifies a chromosome, which essentially #tells which of the treatment options will be used. The output is the negative of the #linear combination generated by logistic regression. The negative is taken because #rbga.bin only minimizes the objective function. evalFunc <- function(bit_string) { file_path <- "/Users/blahiri/healthcare/data/tcga/Raw_Data/v3" load(paste(file_path, "/vital_logr.rda", sep = "")) obj_func <- as.numeric(vital.logr$coefficients[1:16]) %*% as.numeric(inputs_for_demog_ch) obj_func <- obj_func + as.numeric(vital.logr$coefficients[17:28]) %*% bit_string n_bits <- length(bit_string) if ((sum(bit_string) > 3) || (sum(bit_string[(n_bits-1): n_bits]) == 2)) { return(0) } else { return(-obj_func) } } genetic_algorithm_for_optimal <- function(input_data) { library(genalg) treatment_options <- sort(append(drug_vars, radiation_vars)) n_options <- length(treatment_options) inputs_for_demog_ch[["intercept_coeff"]] <<- 1 inputs_for_demog_ch[["age_at_initial_pathologic_diagnosis"]] <<- input_data[["age_at_initial_pathologic_diagnosis"]] inputs_for_demog_ch[["ethnicityHISPANIC OR LATINO"]] <<- as.numeric(input_data[["ethnicity"]] == 'HISPANIC OR LATINO') inputs_for_demog_ch[["genderMALE"]] <<- as.numeric(input_data[["gender"]] == 'MALE') inputs_for_demog_ch[["histological_typeTreated primary GBM"]] <<- as.numeric(input_data[["histological_type"]] == 'Treated primary GBM') inputs_for_demog_ch[["histological_typeGlioblastoma Multiforme (GBM)"]] <<- as.numeric(input_data[["histological_type"]] == 'Glioblastoma Multiforme (GBM)') inputs_for_demog_ch[["history_of_neoadjuvant_treatmentNo"]] <<- as.numeric(input_data[["history_of_neoadjuvant_treatment"]] == 'No') inputs_for_demog_ch[["initial_pathologic_diagnosis_methodExcisional Biopsy"]] <<- as.numeric(input_data[["initial_pathologic_diagnosis_method"]] == 'Excisional Biopsy') inputs_for_demog_ch[["initial_pathologic_diagnosis_methodIncisional Biopsy"]] <<- as.numeric(input_data[["initial_pathologic_diagnosis_method"]] == 'Incisional Biopsy') inputs_for_demog_ch[["initial_pathologic_diagnosis_methodOther method, specify"]] <<- as.numeric(input_data[["initial_pathologic_diagnosis_method"]] == 'Other method, specify') inputs_for_demog_ch[["initial_pathologic_diagnosis_methodFine needle aspiration biopsy"]] <<- as.numeric(input_data[["initial_pathologic_diagnosis_method"]] == 'Fine needle aspiration biopsy') inputs_for_demog_ch[["karnofsky_performance_score"]] <<- input_data[["karnofsky_performance_score"]] inputs_for_demog_ch[["person_neoplasm_cancer_statusTUMOR FREE"]] <<- as.numeric(input_data[["person_neoplasm_cancer_status"]] == 'TUMOR FREE') inputs_for_demog_ch[["prior_gliomaYES"]] <<- as.numeric(input_data[["prior_glioma"]] == 'YES') inputs_for_demog_ch[["raceBLACK OR AFRICAN AMERICAN"]] <<- as.numeric(input_data[["race"]] == 'BLACK OR AFRICAN AMERICAN') inputs_for_demog_ch[["raceASIAN"]] <<- as.numeric(input_data[["race"]] == 'ASIAN') GAmodel <- rbga.bin(size = n_options, popSize = 200, iters = 6, mutationChance = 1/(n_options + 1), elitism = T, evalFunc = evalFunc, verbose = TRUE) df <- process_output_of_genetic(GAmodel, treatment_options) } #Rank the chromosomes of the final population by evaluation. Since the GA #minimizes the objective function, the lower the evaluation, the higher they should #be ranked. Survival probability = sigmoid(-evaluation) process_output_of_genetic <- function(GAmodel, treatment_options) { library(boot) df <- as.data.frame(GAmodel$population) df$evaluation <- GAmodel$evaluations df$surv_prob <- inv.logit(-df$evaluation) df <- df[order(-df[, "surv_prob"]),] df <- df[!duplicated(df), ] df$combination <- apply(df, 1, function(row) convert_bit_string_to_text(row[paste("V", 1:12, sep = "")], treatment_options)) df <- df[,!(names(df) %in% c("evaluation", paste("V", 1:12, sep = "")))] row.names(df) <- NULL df <- df[c(2,1)] colnames(df) <- c("Combination", "1-year survival probability") df } convert_bit_string_to_text <- function(bit_string, treatment_options) { paste(treatment_options[which(bit_string == 1)], collapse = ", ") } # Define server logic required to do the hill climbing for a given patient shinyServer(function(input, output) { datasetInput <- reactive({ age_at_initial_pathologic_diagnosis <- input$age_at_initial_pathologic_diagnosis ethnicity <- input$ethnicity gender <- input$gender race <- input$race histological_type <- input$histological_type history_of_neoadjuvant_treatment <- input$history_of_neoadjuvant_treatment initial_pathologic_diagnosis_method <- input$initial_pathologic_diagnosis_method karnofsky_performance_score <- input$karnofsky_performance_score person_neoplasm_cancer_status <- input$person_neoplasm_cancer_status prior_glioma <- input$prior_glioma list("age_at_initial_pathologic_diagnosis" = age_at_initial_pathologic_diagnosis, "ethnicity" = ethnicity, "gender" = gender, "race" = race, "histological_type" = histological_type, "history_of_neoadjuvant_treatment" = history_of_neoadjuvant_treatment, "initial_pathologic_diagnosis_method" = initial_pathologic_diagnosis_method, "karnofsky_performance_score" = karnofsky_performance_score, "person_neoplasm_cancer_status" = person_neoplasm_cancer_status, "prior_glioma" = prior_glioma) }) output$ranked_results <- renderTable({ input_data <- datasetInput() genetic_algorithm_for_optimal(input_data) }) })
/code/recomm_system/combi_chem/server.R
no_license
bibudhlahiri/healthcare
R
false
false
6,017
r
file_path <- "/Users/blahiri/healthcare/data/tcga/Raw_Data/v3" load(paste(file_path, "/vital_logr.rda", sep = "")) drug_vars <- c("Avastin", "BCNU", "CCNU", "CPT.11", "Dexamethasone", "Gliadel.Wafer", "Other_drug", "Tarceva", "Temozolomide", "VP.16") radiation_vars <- c("EXTERNAL.BEAM", "Other_radiation") inputs_for_demog_ch <<- list() #The input is a bit vector that specifies a chromosome, which essentially #tells which of the treatment options will be used. The output is the negative of the #linear combination generated by logistic regression. The negative is taken because #rbga.bin only minimizes the objective function. evalFunc <- function(bit_string) { file_path <- "/Users/blahiri/healthcare/data/tcga/Raw_Data/v3" load(paste(file_path, "/vital_logr.rda", sep = "")) obj_func <- as.numeric(vital.logr$coefficients[1:16]) %*% as.numeric(inputs_for_demog_ch) obj_func <- obj_func + as.numeric(vital.logr$coefficients[17:28]) %*% bit_string n_bits <- length(bit_string) if ((sum(bit_string) > 3) || (sum(bit_string[(n_bits-1): n_bits]) == 2)) { return(0) } else { return(-obj_func) } } genetic_algorithm_for_optimal <- function(input_data) { library(genalg) treatment_options <- sort(append(drug_vars, radiation_vars)) n_options <- length(treatment_options) inputs_for_demog_ch[["intercept_coeff"]] <<- 1 inputs_for_demog_ch[["age_at_initial_pathologic_diagnosis"]] <<- input_data[["age_at_initial_pathologic_diagnosis"]] inputs_for_demog_ch[["ethnicityHISPANIC OR LATINO"]] <<- as.numeric(input_data[["ethnicity"]] == 'HISPANIC OR LATINO') inputs_for_demog_ch[["genderMALE"]] <<- as.numeric(input_data[["gender"]] == 'MALE') inputs_for_demog_ch[["histological_typeTreated primary GBM"]] <<- as.numeric(input_data[["histological_type"]] == 'Treated primary GBM') inputs_for_demog_ch[["histological_typeGlioblastoma Multiforme (GBM)"]] <<- as.numeric(input_data[["histological_type"]] == 'Glioblastoma Multiforme (GBM)') inputs_for_demog_ch[["history_of_neoadjuvant_treatmentNo"]] <<- as.numeric(input_data[["history_of_neoadjuvant_treatment"]] == 'No') inputs_for_demog_ch[["initial_pathologic_diagnosis_methodExcisional Biopsy"]] <<- as.numeric(input_data[["initial_pathologic_diagnosis_method"]] == 'Excisional Biopsy') inputs_for_demog_ch[["initial_pathologic_diagnosis_methodIncisional Biopsy"]] <<- as.numeric(input_data[["initial_pathologic_diagnosis_method"]] == 'Incisional Biopsy') inputs_for_demog_ch[["initial_pathologic_diagnosis_methodOther method, specify"]] <<- as.numeric(input_data[["initial_pathologic_diagnosis_method"]] == 'Other method, specify') inputs_for_demog_ch[["initial_pathologic_diagnosis_methodFine needle aspiration biopsy"]] <<- as.numeric(input_data[["initial_pathologic_diagnosis_method"]] == 'Fine needle aspiration biopsy') inputs_for_demog_ch[["karnofsky_performance_score"]] <<- input_data[["karnofsky_performance_score"]] inputs_for_demog_ch[["person_neoplasm_cancer_statusTUMOR FREE"]] <<- as.numeric(input_data[["person_neoplasm_cancer_status"]] == 'TUMOR FREE') inputs_for_demog_ch[["prior_gliomaYES"]] <<- as.numeric(input_data[["prior_glioma"]] == 'YES') inputs_for_demog_ch[["raceBLACK OR AFRICAN AMERICAN"]] <<- as.numeric(input_data[["race"]] == 'BLACK OR AFRICAN AMERICAN') inputs_for_demog_ch[["raceASIAN"]] <<- as.numeric(input_data[["race"]] == 'ASIAN') GAmodel <- rbga.bin(size = n_options, popSize = 200, iters = 6, mutationChance = 1/(n_options + 1), elitism = T, evalFunc = evalFunc, verbose = TRUE) df <- process_output_of_genetic(GAmodel, treatment_options) } #Rank the chromosomes of the final population by evaluation. Since the GA #minimizes the objective function, the lower the evaluation, the higher they should #be ranked. Survival probability = sigmoid(-evaluation) process_output_of_genetic <- function(GAmodel, treatment_options) { library(boot) df <- as.data.frame(GAmodel$population) df$evaluation <- GAmodel$evaluations df$surv_prob <- inv.logit(-df$evaluation) df <- df[order(-df[, "surv_prob"]),] df <- df[!duplicated(df), ] df$combination <- apply(df, 1, function(row) convert_bit_string_to_text(row[paste("V", 1:12, sep = "")], treatment_options)) df <- df[,!(names(df) %in% c("evaluation", paste("V", 1:12, sep = "")))] row.names(df) <- NULL df <- df[c(2,1)] colnames(df) <- c("Combination", "1-year survival probability") df } convert_bit_string_to_text <- function(bit_string, treatment_options) { paste(treatment_options[which(bit_string == 1)], collapse = ", ") } # Define server logic required to do the hill climbing for a given patient shinyServer(function(input, output) { datasetInput <- reactive({ age_at_initial_pathologic_diagnosis <- input$age_at_initial_pathologic_diagnosis ethnicity <- input$ethnicity gender <- input$gender race <- input$race histological_type <- input$histological_type history_of_neoadjuvant_treatment <- input$history_of_neoadjuvant_treatment initial_pathologic_diagnosis_method <- input$initial_pathologic_diagnosis_method karnofsky_performance_score <- input$karnofsky_performance_score person_neoplasm_cancer_status <- input$person_neoplasm_cancer_status prior_glioma <- input$prior_glioma list("age_at_initial_pathologic_diagnosis" = age_at_initial_pathologic_diagnosis, "ethnicity" = ethnicity, "gender" = gender, "race" = race, "histological_type" = histological_type, "history_of_neoadjuvant_treatment" = history_of_neoadjuvant_treatment, "initial_pathologic_diagnosis_method" = initial_pathologic_diagnosis_method, "karnofsky_performance_score" = karnofsky_performance_score, "person_neoplasm_cancer_status" = person_neoplasm_cancer_status, "prior_glioma" = prior_glioma) }) output$ranked_results <- renderTable({ input_data <- datasetInput() genetic_algorithm_for_optimal(input_data) }) })
#----------------------------------# # Estimation Using Delta Normal # # & Historical Simulation # # # # Dr Aric LaBarr # #----------------------------------# # Needed Libraries for Analysis # library(graphics) library(haven) library(ks) library(scales) # Load Stock Data From SAS # stocks <- read_sas('E:/Courses/Simulation and Risk/Data/stocks.sas7bdat') # Stock Information # msft.holding <- 1700 aapl.holding <- 2500 VaR.percentile <- 0.05 # Calculate Needed Variances and Covariances # var.msft <- var(stocks$msft_r, na.rm=TRUE) var.aapl <- var(stocks$aapl_r, na.rm=TRUE) cov.m.a <- cov(stocks$msft_r, stocks$aapl_r, use="pairwise.complete.obs") cor.m.a <- cor(stocks$msft_r, stocks$aapl_r, use="pairwise.complete.obs") # Calculate Current Price of Holdings (Portfolio) # msft.p <- stocks$msft_p[length(stocks$msft_p)] aapl.p <- stocks$aapl_p[length(stocks$aapl_p)] # Monte Carlo Simulation Approach # n.simulations <- 10000 R <- matrix(data=cbind(1,cor.m.a, cor.m.a, 1), nrow=2) U <- t(chol(R)) msft.r <- rnorm(n=n.simulations, mean=0, sd=sqrt(var.msft)) aapl.r <- rnorm(n=n.simulations, mean=0, sd=sqrt(var.aapl)) Both.r <- cbind(msft.r, aapl.r) port.r <- U %*% t(Both.r) port.r <- t(port.r) value <- msft.holding*(exp(msft.r + log(msft.p))) + aapl.holding*(exp(aapl.r + log(aapl.p))) value.change = value - (msft.holding*msft.p + aapl.holding*aapl.p) VaR <- quantile(value.change, VaR.percentile, na.rm=TRUE) VaR.label <- dollar(VaR) hist(value.change, breaks=50, main='1 Day Value Change Distribution', xlab='Value Change') abline(v = VaR, col="red", lwd=2) mtext(paste("Value at Risk",VaR.label, sep=" = "), at=VaR, col="red") ES <- mean(value.change[value.change < VaR], na.rm=TRUE) dollar(ES) # Confidence Intervals for Value at Risk & Expected Shortfall - Bootstrap Approach # n.bootstraps <- 1000 sample.size <- 1000 VaR.boot <- rep(0,n.bootstraps) ES.boot <- rep(0,n.bootstraps) for(i in 1:n.bootstraps){ bootstrap.sample <- sample(value.change, size=sample.size) VaR.boot[i] <- quantile(bootstrap.sample, VaR.percentile, na.rm=TRUE) ES.boot[i] <- mean(bootstrap.sample[bootstrap.sample < VaR.boot[i]], na.rm=TRUE) } VaR.boot.U <- quantile(VaR.boot, 0.975, na.rm=TRUE) VaR.boot.L <- quantile(VaR.boot, 0.025, na.rm=TRUE) dollar(VaR.boot.L) dollar(VaR) dollar(VaR.boot.U) ES.boot.U <- quantile(ES.boot, 0.975, na.rm=TRUE) ES.boot.L <- quantile(ES.boot, 0.025, na.rm=TRUE) dollar(ES.boot.L) dollar(ES) dollar(ES.boot.U)
/MSA/Estimation Using Monte Carlo Simulation.R
no_license
nadolsw/R
R
false
false
2,599
r
#----------------------------------# # Estimation Using Delta Normal # # & Historical Simulation # # # # Dr Aric LaBarr # #----------------------------------# # Needed Libraries for Analysis # library(graphics) library(haven) library(ks) library(scales) # Load Stock Data From SAS # stocks <- read_sas('E:/Courses/Simulation and Risk/Data/stocks.sas7bdat') # Stock Information # msft.holding <- 1700 aapl.holding <- 2500 VaR.percentile <- 0.05 # Calculate Needed Variances and Covariances # var.msft <- var(stocks$msft_r, na.rm=TRUE) var.aapl <- var(stocks$aapl_r, na.rm=TRUE) cov.m.a <- cov(stocks$msft_r, stocks$aapl_r, use="pairwise.complete.obs") cor.m.a <- cor(stocks$msft_r, stocks$aapl_r, use="pairwise.complete.obs") # Calculate Current Price of Holdings (Portfolio) # msft.p <- stocks$msft_p[length(stocks$msft_p)] aapl.p <- stocks$aapl_p[length(stocks$aapl_p)] # Monte Carlo Simulation Approach # n.simulations <- 10000 R <- matrix(data=cbind(1,cor.m.a, cor.m.a, 1), nrow=2) U <- t(chol(R)) msft.r <- rnorm(n=n.simulations, mean=0, sd=sqrt(var.msft)) aapl.r <- rnorm(n=n.simulations, mean=0, sd=sqrt(var.aapl)) Both.r <- cbind(msft.r, aapl.r) port.r <- U %*% t(Both.r) port.r <- t(port.r) value <- msft.holding*(exp(msft.r + log(msft.p))) + aapl.holding*(exp(aapl.r + log(aapl.p))) value.change = value - (msft.holding*msft.p + aapl.holding*aapl.p) VaR <- quantile(value.change, VaR.percentile, na.rm=TRUE) VaR.label <- dollar(VaR) hist(value.change, breaks=50, main='1 Day Value Change Distribution', xlab='Value Change') abline(v = VaR, col="red", lwd=2) mtext(paste("Value at Risk",VaR.label, sep=" = "), at=VaR, col="red") ES <- mean(value.change[value.change < VaR], na.rm=TRUE) dollar(ES) # Confidence Intervals for Value at Risk & Expected Shortfall - Bootstrap Approach # n.bootstraps <- 1000 sample.size <- 1000 VaR.boot <- rep(0,n.bootstraps) ES.boot <- rep(0,n.bootstraps) for(i in 1:n.bootstraps){ bootstrap.sample <- sample(value.change, size=sample.size) VaR.boot[i] <- quantile(bootstrap.sample, VaR.percentile, na.rm=TRUE) ES.boot[i] <- mean(bootstrap.sample[bootstrap.sample < VaR.boot[i]], na.rm=TRUE) } VaR.boot.U <- quantile(VaR.boot, 0.975, na.rm=TRUE) VaR.boot.L <- quantile(VaR.boot, 0.025, na.rm=TRUE) dollar(VaR.boot.L) dollar(VaR) dollar(VaR.boot.U) ES.boot.U <- quantile(ES.boot, 0.975, na.rm=TRUE) ES.boot.L <- quantile(ES.boot, 0.025, na.rm=TRUE) dollar(ES.boot.L) dollar(ES) dollar(ES.boot.U)
library(ggplot2) library(reshape2) library(rdrop2) library(googlesheets) token <- drop_auth() saveRDS(token, "droptoken.rds") # Upload droptoken to your server # ******** WARNING ******** # Losing this file will give anyone # complete control of your Dropbox account # You can then revoke the rdrop2 app from your # dropbox account and start over. # ******** WARNING ******** # read it back with readRDS token <- readRDS("droptoken.rds") # Then pass the token to each drop_ function drop_acc(dtoken = token) outputDir <- "wordcounttrackerdata" saveData <- function(data) { #data <- t(data) # Create a unique file name fileName <- sprintf("%s_%s.csv", as.integer(Sys.time()), digest::digest(data)) # Write the data to a temporary file locally filePath <- file.path(tempdir(), fileName) write.csv(data, filePath, row.names = FALSE, quote = TRUE) # Upload the file to Dropbox drop_upload(filePath, dest = outputDir, dtoken = token) } loadData <- function() { # Read all the files into a list filesInfo <- drop_dir(outputDir, dtoken = token) filePaths <- filesInfo$path data <- lapply(filePaths, drop_read_csv, stringsAsFactors = FALSE) # Concatenate all data together into one data.frame data <- do.call(rbind, data) return(data) } my_records <<- loadData() my_records$Date = as.Date(my_records$Date) all_records <<- my_records MinDate <<- min(all_records$Date) recordWC <- function(wordcount, date, project, writer) { date = as.Date(date) if (wordcount <= 0) { return() } if (project == "Enter the name of your project...") { project = "Untitled" } if (writer == "Enter your name...") { writer = "anonymous" } new_record = data.frame(Date = date, Project = project, WordCount = wordcount, Writer = writer) all_records <<- rbind(all_records, new_record) saveData(new_record) paste(writer, " submitted ", wordcount, " for ", project, " on ", date, sep = "") } plotWC_proj <- function(minDate = MinDate, maxDate = Sys.Date(), plotby = "Project") { rawData = all_records rawData$Date = as.Date(rawData$Date) rawData$WordCount = as.numeric(rawData$WordCount) if (minDate == MinDate & maxDate - minDate > 700) { minDate = maxDate = 60 } rawData = subset(rawData, Date >= minDate) rawData = subset(rawData, Date <= maxDate) if (plotby == "Writer") { ggplot(data = rawData, aes(x = Date, y = WordCount, group = Writer, colour = Writer)) + geom_point() + scale_colour_hue(l = 45) } else { ggplot(data = rawData, aes(x = Date, y = WordCount, group = Project, colour = Project)) + geom_point() + scale_colour_hue(l = 45) } } getWC_Summary <- function(minDate = MinDate, maxDate = Sys.Date()) { rawData = all_records rawData$Date = as.Date(rawData$Date) rawData$WordCount = as.numeric(rawData$WordCount) rawData = subset(rawData, Date >= minDate) rawData = subset(rawData, Date <= maxDate) avg_day = sum(rawData$WordCount) / length(unique(rawData$Date)) max_wc = max(rawData$WordCount) topproj = dcast(rawData, Project + Writer ~., value.var = "WordCount", sum) topproj = topproj[topproj[, 3] == max(topproj[, 3]), ] topproj = topproj[1, ] summary = data.frame(AverageWCSubmission = avg_day, MaxWCSubmission = max_wc, TopWriter = topproj[1, 2], TopProject = topproj[1, 1], TopProjectWC = topproj[1, 3]) return(summary) } getWC_Table <- function(minDate = MinDate, maxDate = Sys.Date()) { rawData = all_records rawData$Date = as.Date(rawData$Date) rawData$WordCount = as.numeric(rawData$WordCount) rawData = subset(rawData, Date >= minDate) rawData = subset(rawData, Date <= maxDate) projs = dcast(rawData, Project + Writer ~., value.var = "WordCount", sum, na.rm = T) names(projs)[3] = "Word Count" return(head(projs, n = 5)) }
/wordCountTracker.R
no_license
nicaless/wordcounttracker
R
false
false
3,900
r
library(ggplot2) library(reshape2) library(rdrop2) library(googlesheets) token <- drop_auth() saveRDS(token, "droptoken.rds") # Upload droptoken to your server # ******** WARNING ******** # Losing this file will give anyone # complete control of your Dropbox account # You can then revoke the rdrop2 app from your # dropbox account and start over. # ******** WARNING ******** # read it back with readRDS token <- readRDS("droptoken.rds") # Then pass the token to each drop_ function drop_acc(dtoken = token) outputDir <- "wordcounttrackerdata" saveData <- function(data) { #data <- t(data) # Create a unique file name fileName <- sprintf("%s_%s.csv", as.integer(Sys.time()), digest::digest(data)) # Write the data to a temporary file locally filePath <- file.path(tempdir(), fileName) write.csv(data, filePath, row.names = FALSE, quote = TRUE) # Upload the file to Dropbox drop_upload(filePath, dest = outputDir, dtoken = token) } loadData <- function() { # Read all the files into a list filesInfo <- drop_dir(outputDir, dtoken = token) filePaths <- filesInfo$path data <- lapply(filePaths, drop_read_csv, stringsAsFactors = FALSE) # Concatenate all data together into one data.frame data <- do.call(rbind, data) return(data) } my_records <<- loadData() my_records$Date = as.Date(my_records$Date) all_records <<- my_records MinDate <<- min(all_records$Date) recordWC <- function(wordcount, date, project, writer) { date = as.Date(date) if (wordcount <= 0) { return() } if (project == "Enter the name of your project...") { project = "Untitled" } if (writer == "Enter your name...") { writer = "anonymous" } new_record = data.frame(Date = date, Project = project, WordCount = wordcount, Writer = writer) all_records <<- rbind(all_records, new_record) saveData(new_record) paste(writer, " submitted ", wordcount, " for ", project, " on ", date, sep = "") } plotWC_proj <- function(minDate = MinDate, maxDate = Sys.Date(), plotby = "Project") { rawData = all_records rawData$Date = as.Date(rawData$Date) rawData$WordCount = as.numeric(rawData$WordCount) if (minDate == MinDate & maxDate - minDate > 700) { minDate = maxDate = 60 } rawData = subset(rawData, Date >= minDate) rawData = subset(rawData, Date <= maxDate) if (plotby == "Writer") { ggplot(data = rawData, aes(x = Date, y = WordCount, group = Writer, colour = Writer)) + geom_point() + scale_colour_hue(l = 45) } else { ggplot(data = rawData, aes(x = Date, y = WordCount, group = Project, colour = Project)) + geom_point() + scale_colour_hue(l = 45) } } getWC_Summary <- function(minDate = MinDate, maxDate = Sys.Date()) { rawData = all_records rawData$Date = as.Date(rawData$Date) rawData$WordCount = as.numeric(rawData$WordCount) rawData = subset(rawData, Date >= minDate) rawData = subset(rawData, Date <= maxDate) avg_day = sum(rawData$WordCount) / length(unique(rawData$Date)) max_wc = max(rawData$WordCount) topproj = dcast(rawData, Project + Writer ~., value.var = "WordCount", sum) topproj = topproj[topproj[, 3] == max(topproj[, 3]), ] topproj = topproj[1, ] summary = data.frame(AverageWCSubmission = avg_day, MaxWCSubmission = max_wc, TopWriter = topproj[1, 2], TopProject = topproj[1, 1], TopProjectWC = topproj[1, 3]) return(summary) } getWC_Table <- function(minDate = MinDate, maxDate = Sys.Date()) { rawData = all_records rawData$Date = as.Date(rawData$Date) rawData$WordCount = as.numeric(rawData$WordCount) rawData = subset(rawData, Date >= minDate) rawData = subset(rawData, Date <= maxDate) projs = dcast(rawData, Project + Writer ~., value.var = "WordCount", sum, na.rm = T) names(projs)[3] = "Word Count" return(head(projs, n = 5)) }
setwd("/Users/carlybarbera/Desktop/Biocomputing_Junk") iris <- read.csv("iris.csv") # function to return #odd rows of a dataframe nootevenstevens <- function(x){ x[seq(1,nrow(x), by=2),] } nootevenstevens(iris) # function to return number of rows for a given species rowcount <- function(x, y){ count <- sum(x == y) return(count) } rowcount(x=iris, y="setosa") # function to return a dataframe with sepalwidth # greater than specified value widthcount <- function(x,y,z){ width <- (x[y > z,]) return(width) } widthcount(iris, iris$Sepal.Width, 3.5) # write the data for a given species to a commma del file # with the given species name as the file name speciescsv <- function(x,y,z){ write.csv(x[y == z,], file = paste(z, "csv", sep = ".")) } speciescsv(x = iris, y = iris$Species, z = "setosa")
/BarberaNorthIBC07.R
no_license
dnorthnd/IBC_Exercise_07
R
false
false
841
r
setwd("/Users/carlybarbera/Desktop/Biocomputing_Junk") iris <- read.csv("iris.csv") # function to return #odd rows of a dataframe nootevenstevens <- function(x){ x[seq(1,nrow(x), by=2),] } nootevenstevens(iris) # function to return number of rows for a given species rowcount <- function(x, y){ count <- sum(x == y) return(count) } rowcount(x=iris, y="setosa") # function to return a dataframe with sepalwidth # greater than specified value widthcount <- function(x,y,z){ width <- (x[y > z,]) return(width) } widthcount(iris, iris$Sepal.Width, 3.5) # write the data for a given species to a commma del file # with the given species name as the file name speciescsv <- function(x,y,z){ write.csv(x[y == z,], file = paste(z, "csv", sep = ".")) } speciescsv(x = iris, y = iris$Species, z = "setosa")
## These functions will cache the inverse of a matrix for future use ## makeCacheMatrix creates a special "matrix" object that can cache its inverse makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setinverse <- function(inverse) m <<- inverse getinverse <- function() m list(set=set, get=get, setinverse=setinverse, getinverse=getinverse) } ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. ## If the inverse has already been calculated (and the matrix has not changed), then cacheSolve ## should retrieve the inverse from the cache. cacheSolve <- function(x, ...) { m <- x$getinverse() if (!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data) x$setinverse(m) m }
/cachematrix.R
no_license
MsTiggy/ProgrammingAssignment2
R
false
false
1,025
r
## These functions will cache the inverse of a matrix for future use ## makeCacheMatrix creates a special "matrix" object that can cache its inverse makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setinverse <- function(inverse) m <<- inverse getinverse <- function() m list(set=set, get=get, setinverse=setinverse, getinverse=getinverse) } ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. ## If the inverse has already been calculated (and the matrix has not changed), then cacheSolve ## should retrieve the inverse from the cache. cacheSolve <- function(x, ...) { m <- x$getinverse() if (!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data) x$setinverse(m) m }
plotSampleClusteringDendro <- function(dendro, fontSize = 15, fileName = "SampleClustering.pdf"){ sampleClusteringQC <- ggdendrogram(dendro, rotate = TRUE)+ ggtitle("Sample Clustering to Detect Outliers")+ xlab("Sample")+ ylab("Height")+ coord_flip() theme_bw() theme_bw(base_size = fontSize) ggsave(filename = fileName, path = "Results", plot = last_plot()) message("Sample dendrogram created") }
/R/plotSampleClusteringDendro.R
permissive
avcarr2/MetaNetworkDownloadable
R
false
false
451
r
plotSampleClusteringDendro <- function(dendro, fontSize = 15, fileName = "SampleClustering.pdf"){ sampleClusteringQC <- ggdendrogram(dendro, rotate = TRUE)+ ggtitle("Sample Clustering to Detect Outliers")+ xlab("Sample")+ ylab("Height")+ coord_flip() theme_bw() theme_bw(base_size = fontSize) ggsave(filename = fileName, path = "Results", plot = last_plot()) message("Sample dendrogram created") }
library(shiny) library(shinyjs) ui <- fluidPage( useShinyjs(), # Include shinyjs actionButton("button", "Click me"), textInput("text", "Text") ) server <- function(input, output) { observeEvent(input$button, { toggle("text") # toggle is a shinyjs function }) } shinyApp(ui, server)
/js.R
no_license
avijandiran/My-shiny-dashboards
R
false
false
319
r
library(shiny) library(shinyjs) ui <- fluidPage( useShinyjs(), # Include shinyjs actionButton("button", "Click me"), textInput("text", "Text") ) server <- function(input, output) { observeEvent(input$button, { toggle("text") # toggle is a shinyjs function }) } shinyApp(ui, server)
testlist <- list(data = structure(3.52936705200262e+30, .Dim = c(1L, 1L)), q = 0) result <- do.call(biwavelet:::rcpp_row_quantile,testlist) str(result)
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610554720-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
156
r
testlist <- list(data = structure(3.52936705200262e+30, .Dim = c(1L, 1L)), q = 0) result <- do.call(biwavelet:::rcpp_row_quantile,testlist) str(result)
shinyUI( pageWithSidebar( headerPanel("Diabetes prediction"), sidebarPanel( numericInput('glucose', 'Glucose mg/dl', 90, min = 50, max = 200, step = 5), submitButton('Submit') ), mainPanel( h3('Results of prediction'), h4('You entered'), verbatimTextOutput("inputValue"), h4('Which resulted in a prediction of '), verbatimTextOutput("prediction") ) ) )
/Tutorials & Trails/jhp_1/ui.R
no_license
AaronRanAn/JHU_Developing_Data_Products
R
false
false
539
r
shinyUI( pageWithSidebar( headerPanel("Diabetes prediction"), sidebarPanel( numericInput('glucose', 'Glucose mg/dl', 90, min = 50, max = 200, step = 5), submitButton('Submit') ), mainPanel( h3('Results of prediction'), h4('You entered'), verbatimTextOutput("inputValue"), h4('Which resulted in a prediction of '), verbatimTextOutput("prediction") ) ) )
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/burk-data.R \docType{data} \name{burk} \alias{burk} \title{Burkitt's lymphoma in Uganda} \format{\code{burk} is a named list with three members: \describe{ \item{\code{$cases}}{ An object of class \code{\link[spatstat.geom]{ppp}} giving the spatial locations (eastings/northings) of the 188 cases of Burkitt's lymphoma recorded in individuals of various ages (mostly children); the spatial study region as a polygonal \code{\link[spatstat.geom]{owin}}; as well as the time (in days since 1/1/1960) of each observation stored as the \code{marks} of the points. } \item{\code{$cases.age}}{ A numeric vector of length 188 giving the age of each individual in \code{$cases}. } \item{\code{$controls}}{ An object of class \code{\link[spatstat.geom]{ppp}} giving 500 \bold{artificially simulated} spatial-only observations to pose as a `control' data set representing the at-risk population. The data were generated from a smooth kernel estimate of the spatial margin of the cases. The similarity between the case point distribution and the true at-risk population dispersion can be seen in e.g. Figure 2 of Middleton and Greenland (1954). } }} \source{ The case data were extracted from the \code{\link[splancs]{burkitt}} object of the \code{splancs} R package; see \cr\cr Rowlingson B. and Diggle P.J. (2017), splancs: Spatial and Space-Time Point Pattern Analysis, R package version 2.01-40; \url{https://CRAN.R-project.org/package=splancs}. } \description{ Data of the spatiotemporal locations of Burkitt's lymphoma in the Western Nile district of Uganda from 1960 to 1975. } \examples{ data(burk) summary(burk$cases) par(mfrow=c(1,3)) plot(burk$cases) plot(burk$controls) plot(density(marks(burk$cases)),xlim=range(marks(burk$cases))) } \references{ Bailey, T.C. and Gatrell, A.C. (1995), \emph{Interactive spatial data analysis}, Longman; Harlow. Middleton, J.F.M. and Greenland, D.J. (1954), Land and population in West Nile District, Uganda, \emph{The Geographical Journal}, \bold{120}, 446--455. } \keyword{data}
/sparr/man/burk.Rd
no_license
albrizre/spatstat.revdep
R
false
true
2,101
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/burk-data.R \docType{data} \name{burk} \alias{burk} \title{Burkitt's lymphoma in Uganda} \format{\code{burk} is a named list with three members: \describe{ \item{\code{$cases}}{ An object of class \code{\link[spatstat.geom]{ppp}} giving the spatial locations (eastings/northings) of the 188 cases of Burkitt's lymphoma recorded in individuals of various ages (mostly children); the spatial study region as a polygonal \code{\link[spatstat.geom]{owin}}; as well as the time (in days since 1/1/1960) of each observation stored as the \code{marks} of the points. } \item{\code{$cases.age}}{ A numeric vector of length 188 giving the age of each individual in \code{$cases}. } \item{\code{$controls}}{ An object of class \code{\link[spatstat.geom]{ppp}} giving 500 \bold{artificially simulated} spatial-only observations to pose as a `control' data set representing the at-risk population. The data were generated from a smooth kernel estimate of the spatial margin of the cases. The similarity between the case point distribution and the true at-risk population dispersion can be seen in e.g. Figure 2 of Middleton and Greenland (1954). } }} \source{ The case data were extracted from the \code{\link[splancs]{burkitt}} object of the \code{splancs} R package; see \cr\cr Rowlingson B. and Diggle P.J. (2017), splancs: Spatial and Space-Time Point Pattern Analysis, R package version 2.01-40; \url{https://CRAN.R-project.org/package=splancs}. } \description{ Data of the spatiotemporal locations of Burkitt's lymphoma in the Western Nile district of Uganda from 1960 to 1975. } \examples{ data(burk) summary(burk$cases) par(mfrow=c(1,3)) plot(burk$cases) plot(burk$controls) plot(density(marks(burk$cases)),xlim=range(marks(burk$cases))) } \references{ Bailey, T.C. and Gatrell, A.C. (1995), \emph{Interactive spatial data analysis}, Longman; Harlow. Middleton, J.F.M. and Greenland, D.J. (1954), Land and population in West Nile District, Uganda, \emph{The Geographical Journal}, \bold{120}, 446--455. } \keyword{data}
library(sfsmisc) ### Name: tkdensity ### Title: GUI Density Estimation using Tcl/Tk ### Aliases: tkdensity ### Keywords: hplot dynamic ### ** Examples if (dev.interactive(TRUE)) ## does really not make sense otherwise if(try(require("tcltk"))) { ## sometimes (rarely) there, but broken data(faithful) tkdensity(faithful $ eruptions) set.seed(7) if(require("nor1mix")) tkdensity(rnorMix(1000, MW.nm9), kernels = c("gaussian", "epanechnikov")) }
/data/genthat_extracted_code/sfsmisc/examples/tkdensity.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
468
r
library(sfsmisc) ### Name: tkdensity ### Title: GUI Density Estimation using Tcl/Tk ### Aliases: tkdensity ### Keywords: hplot dynamic ### ** Examples if (dev.interactive(TRUE)) ## does really not make sense otherwise if(try(require("tcltk"))) { ## sometimes (rarely) there, but broken data(faithful) tkdensity(faithful $ eruptions) set.seed(7) if(require("nor1mix")) tkdensity(rnorMix(1000, MW.nm9), kernels = c("gaussian", "epanechnikov")) }
# /usr/bin/rscript ## Coursera Getting and Cleaning Data ## by Jeff Leek, PhD, Roger D. Peng, PhD, Brian Caffo, PhD ## Johns Hopkins University ## ## Question 4 ## ## How many characters are in the 10th, 20th, 30th and 100th lines of HTML from ## this page: ## ## http://biostat.jhsph.edu/~jleek/contact.html ## ## (Hint: the nchar() function in R may be helpful) ## ## 43 99 8 6 ## 43 99 7 25 ## 45 92 7 2 ## 45 31 2 25 ## 45 31 7 31 ## 45 31 7 25 ## 45 0 2 2 ## ## Description: ## This script attempts to answer the above question. ## ## Author: ## Min Wang (min.wang@depi.vic.gov.au) ## ## Date Created: ## 17 June 2015 ## ## Date modified and reason: ## ## Execution: ## Rscript <MODULE_NAME> ## ## Answer: ## 45 31 7 25 con = url("http://biostat.jhsph.edu/~jleek/contact.html") htmlCode = readLines(con) close(con) htmlCode lapply(as.vector(htmlCode[c(10, 20, 30, 100)]), nchar)
/03.GettingAndCleaningData/quizzes/quiz2/ques4.R
no_license
minw2828/datasciencecoursera
R
false
false
899
r
# /usr/bin/rscript ## Coursera Getting and Cleaning Data ## by Jeff Leek, PhD, Roger D. Peng, PhD, Brian Caffo, PhD ## Johns Hopkins University ## ## Question 4 ## ## How many characters are in the 10th, 20th, 30th and 100th lines of HTML from ## this page: ## ## http://biostat.jhsph.edu/~jleek/contact.html ## ## (Hint: the nchar() function in R may be helpful) ## ## 43 99 8 6 ## 43 99 7 25 ## 45 92 7 2 ## 45 31 2 25 ## 45 31 7 31 ## 45 31 7 25 ## 45 0 2 2 ## ## Description: ## This script attempts to answer the above question. ## ## Author: ## Min Wang (min.wang@depi.vic.gov.au) ## ## Date Created: ## 17 June 2015 ## ## Date modified and reason: ## ## Execution: ## Rscript <MODULE_NAME> ## ## Answer: ## 45 31 7 25 con = url("http://biostat.jhsph.edu/~jleek/contact.html") htmlCode = readLines(con) close(con) htmlCode lapply(as.vector(htmlCode[c(10, 20, 30, 100)]), nchar)
context("Test query functions") orig.options <- options(test_context = TRUE) ## test_that("regions and compilations are mandatory args", { ## expect_error(query_jx(compilation = "tcga"), "argument \"regions\" is missing, with no default") ## expect_error(query_jx(regions = "CD99"), "argument \"compilation\" is missing, with no default") ## }) test_that("simple junction query", { sb <- SnaptronQueryBuilder$new() sb$compilation("srav2") sb$regions("CD99") query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/srav2/snaptron?regions=CD99") }) test_that("using genomic ranges", { x1 <- "chr2:100-200:-" g_range <- GenomicRanges::GRanges(x1) sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions(g_range) sb <- set_column_filters(sb, SMTS == "Brain") query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=chr2:100-200&rfilter=strand:-&sfilter=SMTS:Brain") }) test_that("junction query with NSE sample filter", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb <- set_column_filters(sb, SMTS == "Brain") query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&sfilter=SMTS:Brain") }) test_that("junction query with mutiple NSE sample filters", { sb <- SnaptronQueryBuilder$new() sb$compilation("srav2") sb$regions("CD99") sb <- set_column_filters(sb, library_name == "HG00115.6", study_accession == "ERP001942") query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/srav2/snaptron?regions=CD99&sfilter=library_name:HG00115.6&sfilter=study_accession:ERP001942") }) test_that("junction query with one NSE range filter", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb <- set_row_filters(sb, samples_count >= 5) query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&rfilter=samples_count>:5") }) test_that("junction query with mutiple NSE range filters", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb <- set_row_filters(sb, samples_count <= 10, coverage_sum < 3) query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&rfilter=samples_count<:10&rfilter=coverage_sum<3") }) test_that("invalid sample filter name", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb <- set_column_filters(sb, SNTS == "Brain") expect_error( query_jx(sb), "`SNTS' is not a valid sample filter") }) test_that("invalid sample filter value", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb <- set_column_filters(sb, SMTS == 2) expect_error( query_jx(sb), "`SMTS' filter expects value of type String, but got Integer") }) test_that("junction query with sids", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb$sids(1:3) query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&sids=1,2,3") }) test_that("query with non-numeric sids", { sb <- SnaptronQueryBuilder$new() sb$compilation("tcga") sb$regions("CD99") expect_error(sb$sids(c("1", "2", "3")), msg = "sids should be whole numbers") }) test_that("test coordinate Coordinates$Exact", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb$coordinate_modifier(Coordinates$Exact) query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&exact=1") }) test_that("test coordinate Coordinate$Within", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb$coordinate_modifier(Coordinates$Within) query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&contains=1") }) test_that("test coordinate Coordinates$StartIsExactorWithin", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb$coordinate_modifier(Coordinates$StartIsExactOrWithin) query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&either=1") }) test_that("test coordinate Coordinates$EndIsExactOrWithin", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb$coordinate_modifier(Coordinates$EndIsExactOrWithin) query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&either=2") }) options(orig.options)
/tests/testthat/test-query-functions.R
permissive
langmead-lab/snapcount
R
false
false
5,101
r
context("Test query functions") orig.options <- options(test_context = TRUE) ## test_that("regions and compilations are mandatory args", { ## expect_error(query_jx(compilation = "tcga"), "argument \"regions\" is missing, with no default") ## expect_error(query_jx(regions = "CD99"), "argument \"compilation\" is missing, with no default") ## }) test_that("simple junction query", { sb <- SnaptronQueryBuilder$new() sb$compilation("srav2") sb$regions("CD99") query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/srav2/snaptron?regions=CD99") }) test_that("using genomic ranges", { x1 <- "chr2:100-200:-" g_range <- GenomicRanges::GRanges(x1) sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions(g_range) sb <- set_column_filters(sb, SMTS == "Brain") query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=chr2:100-200&rfilter=strand:-&sfilter=SMTS:Brain") }) test_that("junction query with NSE sample filter", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb <- set_column_filters(sb, SMTS == "Brain") query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&sfilter=SMTS:Brain") }) test_that("junction query with mutiple NSE sample filters", { sb <- SnaptronQueryBuilder$new() sb$compilation("srav2") sb$regions("CD99") sb <- set_column_filters(sb, library_name == "HG00115.6", study_accession == "ERP001942") query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/srav2/snaptron?regions=CD99&sfilter=library_name:HG00115.6&sfilter=study_accession:ERP001942") }) test_that("junction query with one NSE range filter", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb <- set_row_filters(sb, samples_count >= 5) query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&rfilter=samples_count>:5") }) test_that("junction query with mutiple NSE range filters", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb <- set_row_filters(sb, samples_count <= 10, coverage_sum < 3) query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&rfilter=samples_count<:10&rfilter=coverage_sum<3") }) test_that("invalid sample filter name", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb <- set_column_filters(sb, SNTS == "Brain") expect_error( query_jx(sb), "`SNTS' is not a valid sample filter") }) test_that("invalid sample filter value", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb <- set_column_filters(sb, SMTS == 2) expect_error( query_jx(sb), "`SMTS' filter expects value of type String, but got Integer") }) test_that("junction query with sids", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb$sids(1:3) query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&sids=1,2,3") }) test_that("query with non-numeric sids", { sb <- SnaptronQueryBuilder$new() sb$compilation("tcga") sb$regions("CD99") expect_error(sb$sids(c("1", "2", "3")), msg = "sids should be whole numbers") }) test_that("test coordinate Coordinates$Exact", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb$coordinate_modifier(Coordinates$Exact) query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&exact=1") }) test_that("test coordinate Coordinate$Within", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb$coordinate_modifier(Coordinates$Within) query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&contains=1") }) test_that("test coordinate Coordinates$StartIsExactorWithin", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb$coordinate_modifier(Coordinates$StartIsExactOrWithin) query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&either=1") }) test_that("test coordinate Coordinates$EndIsExactOrWithin", { sb <- SnaptronQueryBuilder$new() sb$compilation("gtex") sb$regions("CD99") sb$coordinate_modifier(Coordinates$EndIsExactOrWithin) query_jx(sb) expect_equal(uri_of_last_successful_request(), "http://snaptron.cs.jhu.edu/gtex/snaptron?regions=CD99&either=2") }) options(orig.options)
S.basis=function(tt,basis,lambda=0,Lfdobj=vec2Lfd(c(0,0)),w=NULL,...){ phi=getbasismatrix(tt,basis,...) np<-length(tt) if (is.null(w)) w<-rep(1,np) if (!is.matrix(w)) w<-diag(w) if (lambda!=0) { R=getbasispenalty(basis,Lfdobj) S=phi%*%solve(t(phi)%*%w%*%phi+lambda*R)%*%t(phi)%*%w} else {S=phi%*%solve(t(phi)%*%w%*%phi)%*%t(phi)%*%w} return(S) }
/R/S.basis.R
no_license
dgorbachev/fda.usc
R
false
false
398
r
S.basis=function(tt,basis,lambda=0,Lfdobj=vec2Lfd(c(0,0)),w=NULL,...){ phi=getbasismatrix(tt,basis,...) np<-length(tt) if (is.null(w)) w<-rep(1,np) if (!is.matrix(w)) w<-diag(w) if (lambda!=0) { R=getbasispenalty(basis,Lfdobj) S=phi%*%solve(t(phi)%*%w%*%phi+lambda*R)%*%t(phi)%*%w} else {S=phi%*%solve(t(phi)%*%w%*%phi)%*%t(phi)%*%w} return(S) }
priorTau <- list(tau0 = 0, v0 = 1000) priorPsi <- list(psi0 = 500, eta0 = 1) priorVar <- list(s0 = 500, kappa0 = 1) priorBeta <- list(b0 = c(0,0), bMat = matrix(c(1000,100,100,1000), nc = 2)) data(hiermeanRegTest.df) data.df <- hiermeanRegTest.df design <- list(y = data.df$y, group = data.df$group, x = as.matrix(data.df[,3:4])) r<-hierMeanReg(design, priorTau, priorPsi, priorVar, priorBeta)
/Bolstad2/demo/hiermeanReg.r
no_license
ingted/R-Examples
R
false
false
422
r
priorTau <- list(tau0 = 0, v0 = 1000) priorPsi <- list(psi0 = 500, eta0 = 1) priorVar <- list(s0 = 500, kappa0 = 1) priorBeta <- list(b0 = c(0,0), bMat = matrix(c(1000,100,100,1000), nc = 2)) data(hiermeanRegTest.df) data.df <- hiermeanRegTest.df design <- list(y = data.df$y, group = data.df$group, x = as.matrix(data.df[,3:4])) r<-hierMeanReg(design, priorTau, priorPsi, priorVar, priorBeta)
#' Performs support vectors analysis for data sets with survival outcome. #' Three approaches are available in the package: #' The regression approach takes censoring into account when formulating the inequality constraints of the support vector problem. #' In the ranking approach, the inequality constraints set the objective to maximize the concordance index for comparable pairs of observations. #' The hybrid approach combines the regression and ranking constraints in the same model. #' #' The following denotations are used for the models implemented: #' \itemize{ #' \item \code{'regression'} referring to the regression approach, named \code{SVCR} in Van Belle et al. (2011b), #' \item \code{'vanbelle1'} according to the first version of survival surpport vector machines based on ranking constraints, #' named \code{RANKSVMC} by Van Belle et al. (2011b), #' \item \code{'vanbelle2'} according to the second version of survival surpport vector machines based on ranking constraints #' like presented in \code{model1} by Van Belle et al. (2011b) and #' \item \code{'hybrid'} combines simultaneously the regression and ranking constraints in the same model. Hybrid model is labeled #' \code{model2} by Van Belle et al. (2011b). #' #' } #' The argument \code{'type'} of the function \code{survivalsvm} is used to set the type of model to be fitted. #' For the models \code{vanbelle1}, \code{vanbelle2} and \code{hybrid}, differences between comparable #' pairs of observations are required. Each observation is compared with its nearest neighbor according to the survival time, and the #' three possible comparison approaches \link{makediff1}, \link{makediff2} and \link{makediff3} are offered to compute the #' differences between comparable neighbors. #' #' The current version of \code{survivalsvm} uses the solvers \code{\link{ipop}} and \code{\link{quadprog}} to solve the dual #' optimization problems deduced from the suport vector formulations of the models presented above. Notice that for using \code{quadprog} #' the kernel matrix needs to be symmetric and positive definite. Therefore when the conditions are not met, the kernel matrix needs be slightly perturbed to obtain the nearest positive definite kernel matrix. #' The alternative to \code{quadprog} is \code{ipop}, that can also handle a non-negative definite kernel matrix, however more time may be #' required to solve the quadratic optimization dual problem. The argument \code{opt.meth} is used to select the solver. #' #' The \code{survivalsvm} command can be called giving a formula, in which the survival time and the status are grouped into a #' two colunm matrix using the command \code{\link{Surv}} from the package \code{survival}. An alternative is to pass the data #' frame of training data points as an argument using \code{data}, to mention the name of the survival time variable and #' the name of the status variable as illustrated in the third example below. #' #' @title survivalsvm #' @param formula [\code{formula(1)}]\cr #' Object of class \code{formula}. See \code{\link{formula}} for more details. #' @param data [\code{data.frame(1)}]\cr #' Object of class \code{data.frame} containing data points that will be used to fit the model. #' @param subset [\code{vector(1)}]\cr #' An index vector specifying the cases to be used in the training sample. #' @param type [\code{character(1)}]\cr #' String indicating which type of survival support vectors model is desired. This must be one #' of the following strings: 'regression', 'vanbelle1', 'vanbelle2' or 'hybrid'. #' @param diff.meth [\code{character(1)}]\cr #' String indicating which of \code{'makediff1'}, \code{'makediff2'} or \code{'makediff3'} #' is used in case of 'vanbelle1', 'vanbelle2' and 'hybrid'. #' @param gamma.mu [\code{numeric(1)|vector(1)}]\cr #' Parameters of regularization. Note that a vector with two parameters is required in case of \code{hybrid} approach. Just #' one value is required in case of \code{regression}, \code{vanbelle1} or \code{vanbelle2}. #' @param opt.meth [\code{character(1)}]\cr #' Program used to solve the quadratic optimization problem. Either "\code{\link{quadprog}}" or "\code{\link{ipop}}". #' @param kernel [\code{\link{Kernel}(1)}]\cr #' Kernel used to fit the model: linear kern ('lin_kernel'), additive kernel ('add_kernel'), #' radial basis kernels ('rbf_kernel') and the polynomial kernel ('poly_kernel'). #' @param kernel.pars [\code{vector(1)}]\cr #' Parameters of kernel, when required. #' @param time.variable.name [\code{character}]\cr #' Name of the survival time variable in \code{data}, when given in argument. #' @param status.variable.name [\code{character(1)}]\cr #' Name of the status variable in \code{data}. #' @param sgf.sv [\code{character(1)}]\cr #' Number of decimal digits in the solution of the quadratic optimization problem. #' @param sigf [\code{numeric(1)}]\cr #' Used by \code{\link{ipop}}. See \code{\link{ipop}} for details. #' @param maxiter [\code{integer(1)}]\cr #' Used by \code{\link{ipop}}. See \code{\link{ipop}} for details. #' @param margin [\code{numeric(1)}]\cr #' Used by \code{\link{ipop}}. See \code{\link{ipop}} for details. #' @param bound [\code{numeric(1)}]\cr #' Used by \code{\link{ipop}}. See \code{\link{ipop}} for details. #' @param eig.tol [\code{numeric(1)}]\cr #' Used by \code{nearPD} for adjusting positive definiteness. See \code{\link{nearPD}} for detail. #' @param conv.tol [\code{numeric(1)}]\cr #' Used by \code{nearPD} for adjusting positive definiteness. See \code{\link{nearPD}} for detail. #' @param posd.tol [\code{numeric(1)}]\cr #' Used by \code{nearPD} for adjusting positive definiteness. See \code{\link{nearPD}} for detail. #' #' @return \code{survivalsvm} #' Object of class \code{survivalsvm}, with elements: #' \tabular{ll}{ #' \code{call} \tab command calling this program, \cr #' \code{typeofsurvivalsvm} \tab type of survival support vector machines approach,\cr #' \code{model.fit} \tab the fitted survival model,\cr #' \code{var.names} \tab names of variables used.\cr #' } #' @export #' #' @seealso \link{predict.survivalsvm} #' @examples #' survivalsvm(Surv(time, status) ~ ., veteran, gamma.mu = 0.1) #' #' survsvm.reg <- survivalsvm(formula = Surv(diagtime, status) ~ ., data = veteran, #' type = "regression", gamma.mu = 0.1, #' opt.meth = "ipop", kernel = "add_kernel") #' #' survsvm.vb2 <- survivalsvm(data = veteran, time.variable.name = "diagtime", #' status.variable.name = "status", #' type = "vanbelle2", gamma.mu = 0.1, #' opt.meth = "quadprog", diff.meth = "makediff3", #' kernel = "lin_kernel", #' sgf.sv = 5, sigf = 7, maxiter = 20, #' margin = 0.05, bound = 10) #' #' @author Cesaire J. K. Fouodo #' #' @note This implementation is in part inspired by the \code{Matlab} toolbox \code{Survlab} #' (\href{http://user.it.uu.se/~kripe367/survlab/instruction.html}{\code{A Survival Analysis Toolbox}}). #' @references #' \itemize{ #' \item Van Belle, V., Pelcmans, K., Van Huffel S. and Suykens J. A.K. (2011a). #' Improved performance on high-dimensional survival data by application of Survival-SVM. #' Bioinformatics (Oxford, England) 27, 87-94. #' \item Van Belle, V., Pelcmans, K., Van Huffel S. and Suykens J. A.K. (2011b). #' Support vector methods for survival analysis: a comparaison between ranking and regression approaches. #' Artificial Intelligence in medecine 53, 107-118. #' } #' @import survival #' @importFrom utils packageVersion survivalsvm <- function (formula = NULL, data = NULL, subset = NULL, type = "regression", diff.meth = NULL, gamma.mu = NULL, opt.meth = "quadprog", kernel = "lin_kernel", kernel.pars = NULL, time.variable.name = NULL, status.variable.name = NULL, sgf.sv = 5, sigf = 7, maxiter = 20, margin = 0.05, bound = 10, eig.tol = 1e-06, conv.tol = 1e-07, posd.tol = 1e-08) { if (!(tolower(type) %in% c("regression", "vanbelle1", "vanbelle2", "hybrid"))) { stop("Error: 'type' must be either 'regression', 'vanbelle1', 'vanbelle2' or 'hybrid'.") } if (!is.null(diff.meth)) { if (tolower(type) != "regression") { if (!(tolower(diff.meth) %in% c("makediff1", "makediff2", "makediff3"))) { stop("'diff.meth' must be either 'makediff1', 'makediff2' or 'makediff3'.") } } } else { if (tolower(type) %in% c("vanbelle1", "vanbelle2", "hybrid")) { stop("Types 'vanbelle1', 'vanbell2' and 'hybrid' require an argument diff.meth.") } } if (is.null(gamma.mu)) { stop("gamma.mu can not be NULL.") } if (any(gamma.mu <= 0)) { stop("gamma.mu: only positive values allowed.") } if (tolower(type) == "hybrid") { if(length(gamma.mu) != 2) { stop("'gamma.mu' must be a vector of two numeric values.") } } else { if(length(gamma.mu) == 2) { warning("The second element of 'gamma.mu' has been ignored because of not hybrid type.") } } if (is.null(kernel.pars)){ kernel.pars <- NA } if (!(tolower(opt.meth) %in% c("quadprog", "ipop"))) { stop("Error: opt.meth must be either 'quadprog' or 'ipop'.") } if (!(tolower(kernel) %in% c("lin_kernel", "add_kernel", "rbf_kernel", "rbf4_kernel", "poly_kernel"))) { stop("'kernel' must be either 'lin_kern', 'add_kernel', 'rbf_kernel', 'rbf4_kernel' or 'poly_kernel'.") } if (!inherits(sgf.sv, "numeric")) { stop("'sgf.sv' must be a numeric.") } else { if(sgf.sv <= 0) { stop("'sgf.sv' must be greather than 0.") } } # test on the formula if (is.null(formula)) { if (is.null(time.variable.name)) { stop("Error: Please give a formula or dependent variable names.") } if (is.null(data)) { stop("'data' can not be NULL.") } if (!(inherits(data, "data.frame"))) { stop("data must be a data.frame") } if (is.null(status.variable.name)) { status.variable.name <- "none" response <- data[, time.variable.name] } else { if (is.null(subset)) { # the given data is a data frame response <- data[, c(time.variable.name, status.variable.name)] covar <- setdiff(names(data), c(time.variable.name, status.variable.name)) if (length(covar) == 0) stop("No covariable found.") traindata <- data[, setdiff(names(data), c(time.variable.name, status.variable.name))] } else { response <- data[subset, c(time.variable.name, status.variable.name)] covar <- setdiff(names(data), c(time.variable.name, status.variable.name)) if (length(covar) == 0) { stop("No covariable hat been found.") } traindata <- data[subset, setdiff(names(data), c(time.variable.name, status.variable.name))] } index.factor <- if (kernel == "add_kernel") which(sapply(traindata, is.factor)) else integer(0) X <- data.matrix(traindata) X <- X[stats::complete.cases(X), , drop = FALSE] if (prod(dim(X)) == 0) { stop("No observation in the data frame given in argument.") } Y <- if (status.variable.name == "none") response else response[,1] delta <- if (status.variable.name == "none") rep(1, length(Y)) else response[, 2] } } else { formula <- formula(formula) if (!inherits(formula,"formula")) { stop("Error: Invalid formula.") } data.selected <- stats::model.frame(formula, data) response <- data.selected[[1]] if (inherits(response, "matrix")) { is.named <- FALSE if (!is.null(status.variable.name) && !is.null(time.variable.name)) { if (status.variable.name %in% names(response)) { delta <- response[, status.variable.name] if (time.variable.name %in% names(response)) { Y <- response[, time.variable.name] is.named <- TRUE } } } if (!is.named) { if (ncol(response) > 2) { stop("Error: Please names the intresting column by 'time' and 'status'.") } Y <- response[, 1] delta <- response[, 2] } }# the response was a matrix if (inherits(response, "Surv")) { response <- as.matrix(response) Y <- response[, "time"] delta <- response[, "status"] } traindata <- data.selected[-1] # extract the subset of interest data points, when given if (!is.null(subset)) { if (!all(subset %in% 1:length(Y))) { stop("Error: invalid subset.") } Y <- Y[subset] delta <- delta[subset] traindata <- traindata[subset, , drop = FALSE] } covar <- names(traindata) index.factor <- if(kernel == "add_kernel") which(sapply(traindata, is.factor)) else integer(0) X <- data.matrix(traindata) X <- X[stats::complete.cases(X), , drop = FALSE] if (ncol(X) == 0) { stop("No variable in the data frame given in argument.") } if (nrow(X) == 0) { stop("No observation in the data frame given in argument.") } } if (!inherits(Y, "numeric")) { stop("The time must be a numeric vector.") } if (!all(delta %in% c(0,1))) { stop("Error: Status must either be 0 or 1.") } #selection of a method if (tolower(type) == "regression") { model.fit <- regFit(X = X, Y = Y, delta = delta, meth_par = gamma.mu, kernel_type = kernel, kernel_pars = kernel.pars, bin_cat = index.factor, opt_alg = opt.meth, sgf_sv = sgf.sv, sigf = sigf, maxiter = maxiter, margin = margin, bound = bound, eig.tol = eig.tol, conv.tol = conv.tol, posd.tol = posd.tol) } if (tolower(type) == "vanbelle1") { model.fit <- vanbelle1Fit(X = X, Y = Y, delta = delta, meth_par = gamma.mu, kernel_type = kernel, kernel_pars = kernel.pars, bin_cat = index.factor, makediff = match.fun(diff.meth), opt_alg = opt.meth, sgf_sv = sgf.sv, sigf = sigf, maxiter = maxiter, margin = margin, bound = bound, eig.tol = eig.tol, conv.tol = conv.tol, posd.tol = posd.tol) } if (tolower(type) == "vanbelle2") { model.fit <- vanbelle2Fit(X = X, Y = Y, delta = delta, meth_par = gamma.mu, kernel_type = kernel, kernel_pars = kernel.pars, bin_cat = index.factor, makediff = match.fun(diff.meth), opt_alg = opt.meth, sgf_sv = sgf.sv, sigf = sigf, maxiter = maxiter, margin = margin, bound = bound, eig.tol = eig.tol, conv.tol = conv.tol, posd.tol = posd.tol) } if (tolower(type) == "hybrid") { model.fit <- hybridFit(X = X, Y = Y, delta = delta, meth_par = gamma.mu, kernel_type = kernel, kernel_pars = kernel.pars, bin_cat = index.factor, makediff = match.fun(diff.meth), opt_alg = opt.meth, sgf_sv = sgf.sv, sigf = sigf, maxiter = maxiter, margin = margin, bound = bound, eig.tol = eig.tol, conv.tol = conv.tol, posd.tol = posd.tol) } result <- list( call = sys.call(), typeofsurvivalsvm = tolower(type), model.fit = model.fit, var.names = covar ) class(result) <- "survivalsvm" result$package.version <- unlist(packageVersion("survivalsvm")) return(result) }
/output/sources/authors/6913/survivalsvm/survivalsvm.R
no_license
Irbis3/crantasticScrapper
R
false
false
15,816
r
#' Performs support vectors analysis for data sets with survival outcome. #' Three approaches are available in the package: #' The regression approach takes censoring into account when formulating the inequality constraints of the support vector problem. #' In the ranking approach, the inequality constraints set the objective to maximize the concordance index for comparable pairs of observations. #' The hybrid approach combines the regression and ranking constraints in the same model. #' #' The following denotations are used for the models implemented: #' \itemize{ #' \item \code{'regression'} referring to the regression approach, named \code{SVCR} in Van Belle et al. (2011b), #' \item \code{'vanbelle1'} according to the first version of survival surpport vector machines based on ranking constraints, #' named \code{RANKSVMC} by Van Belle et al. (2011b), #' \item \code{'vanbelle2'} according to the second version of survival surpport vector machines based on ranking constraints #' like presented in \code{model1} by Van Belle et al. (2011b) and #' \item \code{'hybrid'} combines simultaneously the regression and ranking constraints in the same model. Hybrid model is labeled #' \code{model2} by Van Belle et al. (2011b). #' #' } #' The argument \code{'type'} of the function \code{survivalsvm} is used to set the type of model to be fitted. #' For the models \code{vanbelle1}, \code{vanbelle2} and \code{hybrid}, differences between comparable #' pairs of observations are required. Each observation is compared with its nearest neighbor according to the survival time, and the #' three possible comparison approaches \link{makediff1}, \link{makediff2} and \link{makediff3} are offered to compute the #' differences between comparable neighbors. #' #' The current version of \code{survivalsvm} uses the solvers \code{\link{ipop}} and \code{\link{quadprog}} to solve the dual #' optimization problems deduced from the suport vector formulations of the models presented above. Notice that for using \code{quadprog} #' the kernel matrix needs to be symmetric and positive definite. Therefore when the conditions are not met, the kernel matrix needs be slightly perturbed to obtain the nearest positive definite kernel matrix. #' The alternative to \code{quadprog} is \code{ipop}, that can also handle a non-negative definite kernel matrix, however more time may be #' required to solve the quadratic optimization dual problem. The argument \code{opt.meth} is used to select the solver. #' #' The \code{survivalsvm} command can be called giving a formula, in which the survival time and the status are grouped into a #' two colunm matrix using the command \code{\link{Surv}} from the package \code{survival}. An alternative is to pass the data #' frame of training data points as an argument using \code{data}, to mention the name of the survival time variable and #' the name of the status variable as illustrated in the third example below. #' #' @title survivalsvm #' @param formula [\code{formula(1)}]\cr #' Object of class \code{formula}. See \code{\link{formula}} for more details. #' @param data [\code{data.frame(1)}]\cr #' Object of class \code{data.frame} containing data points that will be used to fit the model. #' @param subset [\code{vector(1)}]\cr #' An index vector specifying the cases to be used in the training sample. #' @param type [\code{character(1)}]\cr #' String indicating which type of survival support vectors model is desired. This must be one #' of the following strings: 'regression', 'vanbelle1', 'vanbelle2' or 'hybrid'. #' @param diff.meth [\code{character(1)}]\cr #' String indicating which of \code{'makediff1'}, \code{'makediff2'} or \code{'makediff3'} #' is used in case of 'vanbelle1', 'vanbelle2' and 'hybrid'. #' @param gamma.mu [\code{numeric(1)|vector(1)}]\cr #' Parameters of regularization. Note that a vector with two parameters is required in case of \code{hybrid} approach. Just #' one value is required in case of \code{regression}, \code{vanbelle1} or \code{vanbelle2}. #' @param opt.meth [\code{character(1)}]\cr #' Program used to solve the quadratic optimization problem. Either "\code{\link{quadprog}}" or "\code{\link{ipop}}". #' @param kernel [\code{\link{Kernel}(1)}]\cr #' Kernel used to fit the model: linear kern ('lin_kernel'), additive kernel ('add_kernel'), #' radial basis kernels ('rbf_kernel') and the polynomial kernel ('poly_kernel'). #' @param kernel.pars [\code{vector(1)}]\cr #' Parameters of kernel, when required. #' @param time.variable.name [\code{character}]\cr #' Name of the survival time variable in \code{data}, when given in argument. #' @param status.variable.name [\code{character(1)}]\cr #' Name of the status variable in \code{data}. #' @param sgf.sv [\code{character(1)}]\cr #' Number of decimal digits in the solution of the quadratic optimization problem. #' @param sigf [\code{numeric(1)}]\cr #' Used by \code{\link{ipop}}. See \code{\link{ipop}} for details. #' @param maxiter [\code{integer(1)}]\cr #' Used by \code{\link{ipop}}. See \code{\link{ipop}} for details. #' @param margin [\code{numeric(1)}]\cr #' Used by \code{\link{ipop}}. See \code{\link{ipop}} for details. #' @param bound [\code{numeric(1)}]\cr #' Used by \code{\link{ipop}}. See \code{\link{ipop}} for details. #' @param eig.tol [\code{numeric(1)}]\cr #' Used by \code{nearPD} for adjusting positive definiteness. See \code{\link{nearPD}} for detail. #' @param conv.tol [\code{numeric(1)}]\cr #' Used by \code{nearPD} for adjusting positive definiteness. See \code{\link{nearPD}} for detail. #' @param posd.tol [\code{numeric(1)}]\cr #' Used by \code{nearPD} for adjusting positive definiteness. See \code{\link{nearPD}} for detail. #' #' @return \code{survivalsvm} #' Object of class \code{survivalsvm}, with elements: #' \tabular{ll}{ #' \code{call} \tab command calling this program, \cr #' \code{typeofsurvivalsvm} \tab type of survival support vector machines approach,\cr #' \code{model.fit} \tab the fitted survival model,\cr #' \code{var.names} \tab names of variables used.\cr #' } #' @export #' #' @seealso \link{predict.survivalsvm} #' @examples #' survivalsvm(Surv(time, status) ~ ., veteran, gamma.mu = 0.1) #' #' survsvm.reg <- survivalsvm(formula = Surv(diagtime, status) ~ ., data = veteran, #' type = "regression", gamma.mu = 0.1, #' opt.meth = "ipop", kernel = "add_kernel") #' #' survsvm.vb2 <- survivalsvm(data = veteran, time.variable.name = "diagtime", #' status.variable.name = "status", #' type = "vanbelle2", gamma.mu = 0.1, #' opt.meth = "quadprog", diff.meth = "makediff3", #' kernel = "lin_kernel", #' sgf.sv = 5, sigf = 7, maxiter = 20, #' margin = 0.05, bound = 10) #' #' @author Cesaire J. K. Fouodo #' #' @note This implementation is in part inspired by the \code{Matlab} toolbox \code{Survlab} #' (\href{http://user.it.uu.se/~kripe367/survlab/instruction.html}{\code{A Survival Analysis Toolbox}}). #' @references #' \itemize{ #' \item Van Belle, V., Pelcmans, K., Van Huffel S. and Suykens J. A.K. (2011a). #' Improved performance on high-dimensional survival data by application of Survival-SVM. #' Bioinformatics (Oxford, England) 27, 87-94. #' \item Van Belle, V., Pelcmans, K., Van Huffel S. and Suykens J. A.K. (2011b). #' Support vector methods for survival analysis: a comparaison between ranking and regression approaches. #' Artificial Intelligence in medecine 53, 107-118. #' } #' @import survival #' @importFrom utils packageVersion survivalsvm <- function (formula = NULL, data = NULL, subset = NULL, type = "regression", diff.meth = NULL, gamma.mu = NULL, opt.meth = "quadprog", kernel = "lin_kernel", kernel.pars = NULL, time.variable.name = NULL, status.variable.name = NULL, sgf.sv = 5, sigf = 7, maxiter = 20, margin = 0.05, bound = 10, eig.tol = 1e-06, conv.tol = 1e-07, posd.tol = 1e-08) { if (!(tolower(type) %in% c("regression", "vanbelle1", "vanbelle2", "hybrid"))) { stop("Error: 'type' must be either 'regression', 'vanbelle1', 'vanbelle2' or 'hybrid'.") } if (!is.null(diff.meth)) { if (tolower(type) != "regression") { if (!(tolower(diff.meth) %in% c("makediff1", "makediff2", "makediff3"))) { stop("'diff.meth' must be either 'makediff1', 'makediff2' or 'makediff3'.") } } } else { if (tolower(type) %in% c("vanbelle1", "vanbelle2", "hybrid")) { stop("Types 'vanbelle1', 'vanbell2' and 'hybrid' require an argument diff.meth.") } } if (is.null(gamma.mu)) { stop("gamma.mu can not be NULL.") } if (any(gamma.mu <= 0)) { stop("gamma.mu: only positive values allowed.") } if (tolower(type) == "hybrid") { if(length(gamma.mu) != 2) { stop("'gamma.mu' must be a vector of two numeric values.") } } else { if(length(gamma.mu) == 2) { warning("The second element of 'gamma.mu' has been ignored because of not hybrid type.") } } if (is.null(kernel.pars)){ kernel.pars <- NA } if (!(tolower(opt.meth) %in% c("quadprog", "ipop"))) { stop("Error: opt.meth must be either 'quadprog' or 'ipop'.") } if (!(tolower(kernel) %in% c("lin_kernel", "add_kernel", "rbf_kernel", "rbf4_kernel", "poly_kernel"))) { stop("'kernel' must be either 'lin_kern', 'add_kernel', 'rbf_kernel', 'rbf4_kernel' or 'poly_kernel'.") } if (!inherits(sgf.sv, "numeric")) { stop("'sgf.sv' must be a numeric.") } else { if(sgf.sv <= 0) { stop("'sgf.sv' must be greather than 0.") } } # test on the formula if (is.null(formula)) { if (is.null(time.variable.name)) { stop("Error: Please give a formula or dependent variable names.") } if (is.null(data)) { stop("'data' can not be NULL.") } if (!(inherits(data, "data.frame"))) { stop("data must be a data.frame") } if (is.null(status.variable.name)) { status.variable.name <- "none" response <- data[, time.variable.name] } else { if (is.null(subset)) { # the given data is a data frame response <- data[, c(time.variable.name, status.variable.name)] covar <- setdiff(names(data), c(time.variable.name, status.variable.name)) if (length(covar) == 0) stop("No covariable found.") traindata <- data[, setdiff(names(data), c(time.variable.name, status.variable.name))] } else { response <- data[subset, c(time.variable.name, status.variable.name)] covar <- setdiff(names(data), c(time.variable.name, status.variable.name)) if (length(covar) == 0) { stop("No covariable hat been found.") } traindata <- data[subset, setdiff(names(data), c(time.variable.name, status.variable.name))] } index.factor <- if (kernel == "add_kernel") which(sapply(traindata, is.factor)) else integer(0) X <- data.matrix(traindata) X <- X[stats::complete.cases(X), , drop = FALSE] if (prod(dim(X)) == 0) { stop("No observation in the data frame given in argument.") } Y <- if (status.variable.name == "none") response else response[,1] delta <- if (status.variable.name == "none") rep(1, length(Y)) else response[, 2] } } else { formula <- formula(formula) if (!inherits(formula,"formula")) { stop("Error: Invalid formula.") } data.selected <- stats::model.frame(formula, data) response <- data.selected[[1]] if (inherits(response, "matrix")) { is.named <- FALSE if (!is.null(status.variable.name) && !is.null(time.variable.name)) { if (status.variable.name %in% names(response)) { delta <- response[, status.variable.name] if (time.variable.name %in% names(response)) { Y <- response[, time.variable.name] is.named <- TRUE } } } if (!is.named) { if (ncol(response) > 2) { stop("Error: Please names the intresting column by 'time' and 'status'.") } Y <- response[, 1] delta <- response[, 2] } }# the response was a matrix if (inherits(response, "Surv")) { response <- as.matrix(response) Y <- response[, "time"] delta <- response[, "status"] } traindata <- data.selected[-1] # extract the subset of interest data points, when given if (!is.null(subset)) { if (!all(subset %in% 1:length(Y))) { stop("Error: invalid subset.") } Y <- Y[subset] delta <- delta[subset] traindata <- traindata[subset, , drop = FALSE] } covar <- names(traindata) index.factor <- if(kernel == "add_kernel") which(sapply(traindata, is.factor)) else integer(0) X <- data.matrix(traindata) X <- X[stats::complete.cases(X), , drop = FALSE] if (ncol(X) == 0) { stop("No variable in the data frame given in argument.") } if (nrow(X) == 0) { stop("No observation in the data frame given in argument.") } } if (!inherits(Y, "numeric")) { stop("The time must be a numeric vector.") } if (!all(delta %in% c(0,1))) { stop("Error: Status must either be 0 or 1.") } #selection of a method if (tolower(type) == "regression") { model.fit <- regFit(X = X, Y = Y, delta = delta, meth_par = gamma.mu, kernel_type = kernel, kernel_pars = kernel.pars, bin_cat = index.factor, opt_alg = opt.meth, sgf_sv = sgf.sv, sigf = sigf, maxiter = maxiter, margin = margin, bound = bound, eig.tol = eig.tol, conv.tol = conv.tol, posd.tol = posd.tol) } if (tolower(type) == "vanbelle1") { model.fit <- vanbelle1Fit(X = X, Y = Y, delta = delta, meth_par = gamma.mu, kernel_type = kernel, kernel_pars = kernel.pars, bin_cat = index.factor, makediff = match.fun(diff.meth), opt_alg = opt.meth, sgf_sv = sgf.sv, sigf = sigf, maxiter = maxiter, margin = margin, bound = bound, eig.tol = eig.tol, conv.tol = conv.tol, posd.tol = posd.tol) } if (tolower(type) == "vanbelle2") { model.fit <- vanbelle2Fit(X = X, Y = Y, delta = delta, meth_par = gamma.mu, kernel_type = kernel, kernel_pars = kernel.pars, bin_cat = index.factor, makediff = match.fun(diff.meth), opt_alg = opt.meth, sgf_sv = sgf.sv, sigf = sigf, maxiter = maxiter, margin = margin, bound = bound, eig.tol = eig.tol, conv.tol = conv.tol, posd.tol = posd.tol) } if (tolower(type) == "hybrid") { model.fit <- hybridFit(X = X, Y = Y, delta = delta, meth_par = gamma.mu, kernel_type = kernel, kernel_pars = kernel.pars, bin_cat = index.factor, makediff = match.fun(diff.meth), opt_alg = opt.meth, sgf_sv = sgf.sv, sigf = sigf, maxiter = maxiter, margin = margin, bound = bound, eig.tol = eig.tol, conv.tol = conv.tol, posd.tol = posd.tol) } result <- list( call = sys.call(), typeofsurvivalsvm = tolower(type), model.fit = model.fit, var.names = covar ) class(result) <- "survivalsvm" result$package.version <- unlist(packageVersion("survivalsvm")) return(result) }
\name{importCufflinksFiles} \alias{importCufflinksFiles} \title{ Import CuffDiff (Cufflinks) Data Into R } \description{ This function enables users to run Cufflinks/Cuffdiff on Galaxy and then afterwards import the result into R for post analysis with isoformSwitchAnalyzeR. The user just has to download (some of) the Cuffdiff result files from galaxy and input the paths to this function. The data is then imported into R, massaged and returned as a switchAnalyzeRlist enabling a full analysis with IsoformSwitchAnalyzeR. } \usage{ importCufflinksFiles( pathToGTF, pathToGeneDEanalysis, pathToIsoformDEanalysis, pathToGeneFPKMtracking, pathToIsoformFPKMtracking, pathToIsoformReadGroupTracking, pathToSplicingAnalysis=NULL, pathToReadGroups, pathToRunInfo, fixCufflinksAnnotationProblem=TRUE, quiet=FALSE ) } \arguments{ \item{pathToGTF}{ A string indicating the path to the GTF file used as input to Cuffdiff file (downloaded from fx galaxy). } \item{pathToGeneDEanalysis}{ A string indicating the path to the file "gene differential expression testing" file (downloaded from fx galaxy). } \item{pathToIsoformDEanalysis}{ A string indicating the path to the file "transcript differential expression testing" file (downloaded from fx galaxy). } \item{pathToGeneFPKMtracking}{ A string indicating the path to the file "gene FPKM tracking" file (downloaded from fx galaxy). } \item{pathToIsoformReadGroupTracking}{ A string indicating the path to the file "isoform read group tracking" file (downloaded from fx galaxy). } \item{pathToIsoformFPKMtracking}{ A string indicating the path to the file "transcript FPKM tracking" file (downloaded from fx galaxy). } \item{pathToSplicingAnalysis}{ A string indicating the path to the file "splicing differential expression testing" file (downloaded from fx galaxy).. Only needed if the splicing analysis should be added. Default is NULL (not added). } \item{pathToReadGroups}{ A string indicating the path to the file "Read groups" file (downloaded from fx galaxy). } \item{pathToRunInfo}{ A string indicating the path to the file "Run details" file (downloaded from fx galaxy). } \item{fixCufflinksAnnotationProblem}{ A logic indicating whether to fix the problem with Cufflinks gene symbol annotation. Please see the details for additional information. Default is TRUE. } \item{quiet}{ A logic indicating whether to avoid printing progress messages. Default is FALSE} } \details{ One problem with cufflinks is that it considers islands of overlapping transcripts - this means that sometimes multiple genes (defined by gene short name) as combined into one cufflinks gene (XLOC_XXXXXX) and this gene is quantified and tested for differential expression. Setting fixCufflinksAnnotationProblem to TRUE will make the import function modify the data so that false conclusions are not made in downstream analysis. More specificly this cause the function to re-calculate expression values, set gene standard error (of mean) to NA and the p-value and q-value of the differential expression analysis to 1 whereby false conclusions can be prevented. Cuffdiff performs a statistical test for changes in alternative splicing between transcripts that utilize the same transcription start site (TSS). If evidence for alternative splicing, resulting in alternative isoforms, are found within a gene then there must per definition also be isoform switching occurring within that gene. Therefore we have implemented the \code{addCufflinksSwichTest} parameter which will add the FDR corrected p-value (q-value) of Cuffdiffs splicing test as the gene-level evidence for isoform switching (the \code{gene_switch_q_value} column). By coupling this evidence with a cutoff on minimum switch size (which is measured a gene-level and controlled via \code{dIFcutoff}) in the downstream analysis, switches that are not negligible at gene-level will be ignored. Note that CuffDiff have a parameter ('-min-reps-for-js-test) which controls how many replicates (default is 3) are needed for the test of alternative splicing is performed and that the test requires TSSs are annotated in the GTF file supplied to Cuffmerge via the '-g/-ref-gtf' parameter. } \value{ A \code{switchAnalyzeRlist} containing all the gene and transcript information as well as the isoform structure. See ?switchAnalyzeRlist for more details. If \code{addCufflinksSwichTest=TRUE} a data.frame with the result of cuffdiff's test for alternative splicing is also added to the switchAnalyzeRlist under the entry 'isoformSwitchAnalysis' (only if analysis was performed). } \references{ Vitting-Seerup et al. The Landscape of Isoform Switches in Human Cancers. Mol. Cancer Res. (2017). } \author{ Kristoffer Vitting-Seerup } \seealso{ \code{\link{createSwitchAnalyzeRlist}}\cr \code{\link{importCufflinksCummeRbund}}\cr \code{\link{preFilter}} } \note{ Note that since there was an error in Cufflinks/Cuffdiff's estimation of standard errors that was not corrected until cufflinks 2.2.1. This function will give a warning if the cufflinks version used is older than this. Note that it will not be possible to test for differential isoform usage (isoform switches) with data from older versions of cufflinks (because the test amongst other uses the standard errors. } \examples{ ### Use the files from the cummeRbund example data testSwitchList <- importCufflinksFiles( pathToGTF = system.file('extdata/chr1_snippet.gtf', package = "cummeRbund"), pathToGeneDEanalysis = system.file('extdata/gene_exp.diff', package = "cummeRbund"), pathToIsoformDEanalysis = system.file('extdata/isoform_exp.diff', package = "cummeRbund"), pathToGeneFPKMtracking = system.file('extdata/genes.fpkm_tracking', package = "cummeRbund"), pathToIsoformFPKMtracking = system.file('extdata/isoforms.fpkm_tracking', package = "cummeRbund"), pathToIsoformReadGroupTracking = system.file('extdata/isoforms.read_group_tracking', package = "cummeRbund"), pathToSplicingAnalysis = system.file('extdata/splicing.diff', package = "cummeRbund"), pathToReadGroups = system.file('extdata/read_groups.info', package = "cummeRbund"), pathToRunInfo = system.file('extdata/run.info', package = "cummeRbund"), fixCufflinksAnnotationProblem=TRUE, quiet=TRUE ) testSwitchList }
/man/importCufflinksGalaxyData.Rd
no_license
afonsoguerra/IsoformSwitchAnalyzeR
R
false
false
6,522
rd
\name{importCufflinksFiles} \alias{importCufflinksFiles} \title{ Import CuffDiff (Cufflinks) Data Into R } \description{ This function enables users to run Cufflinks/Cuffdiff on Galaxy and then afterwards import the result into R for post analysis with isoformSwitchAnalyzeR. The user just has to download (some of) the Cuffdiff result files from galaxy and input the paths to this function. The data is then imported into R, massaged and returned as a switchAnalyzeRlist enabling a full analysis with IsoformSwitchAnalyzeR. } \usage{ importCufflinksFiles( pathToGTF, pathToGeneDEanalysis, pathToIsoformDEanalysis, pathToGeneFPKMtracking, pathToIsoformFPKMtracking, pathToIsoformReadGroupTracking, pathToSplicingAnalysis=NULL, pathToReadGroups, pathToRunInfo, fixCufflinksAnnotationProblem=TRUE, quiet=FALSE ) } \arguments{ \item{pathToGTF}{ A string indicating the path to the GTF file used as input to Cuffdiff file (downloaded from fx galaxy). } \item{pathToGeneDEanalysis}{ A string indicating the path to the file "gene differential expression testing" file (downloaded from fx galaxy). } \item{pathToIsoformDEanalysis}{ A string indicating the path to the file "transcript differential expression testing" file (downloaded from fx galaxy). } \item{pathToGeneFPKMtracking}{ A string indicating the path to the file "gene FPKM tracking" file (downloaded from fx galaxy). } \item{pathToIsoformReadGroupTracking}{ A string indicating the path to the file "isoform read group tracking" file (downloaded from fx galaxy). } \item{pathToIsoformFPKMtracking}{ A string indicating the path to the file "transcript FPKM tracking" file (downloaded from fx galaxy). } \item{pathToSplicingAnalysis}{ A string indicating the path to the file "splicing differential expression testing" file (downloaded from fx galaxy).. Only needed if the splicing analysis should be added. Default is NULL (not added). } \item{pathToReadGroups}{ A string indicating the path to the file "Read groups" file (downloaded from fx galaxy). } \item{pathToRunInfo}{ A string indicating the path to the file "Run details" file (downloaded from fx galaxy). } \item{fixCufflinksAnnotationProblem}{ A logic indicating whether to fix the problem with Cufflinks gene symbol annotation. Please see the details for additional information. Default is TRUE. } \item{quiet}{ A logic indicating whether to avoid printing progress messages. Default is FALSE} } \details{ One problem with cufflinks is that it considers islands of overlapping transcripts - this means that sometimes multiple genes (defined by gene short name) as combined into one cufflinks gene (XLOC_XXXXXX) and this gene is quantified and tested for differential expression. Setting fixCufflinksAnnotationProblem to TRUE will make the import function modify the data so that false conclusions are not made in downstream analysis. More specificly this cause the function to re-calculate expression values, set gene standard error (of mean) to NA and the p-value and q-value of the differential expression analysis to 1 whereby false conclusions can be prevented. Cuffdiff performs a statistical test for changes in alternative splicing between transcripts that utilize the same transcription start site (TSS). If evidence for alternative splicing, resulting in alternative isoforms, are found within a gene then there must per definition also be isoform switching occurring within that gene. Therefore we have implemented the \code{addCufflinksSwichTest} parameter which will add the FDR corrected p-value (q-value) of Cuffdiffs splicing test as the gene-level evidence for isoform switching (the \code{gene_switch_q_value} column). By coupling this evidence with a cutoff on minimum switch size (which is measured a gene-level and controlled via \code{dIFcutoff}) in the downstream analysis, switches that are not negligible at gene-level will be ignored. Note that CuffDiff have a parameter ('-min-reps-for-js-test) which controls how many replicates (default is 3) are needed for the test of alternative splicing is performed and that the test requires TSSs are annotated in the GTF file supplied to Cuffmerge via the '-g/-ref-gtf' parameter. } \value{ A \code{switchAnalyzeRlist} containing all the gene and transcript information as well as the isoform structure. See ?switchAnalyzeRlist for more details. If \code{addCufflinksSwichTest=TRUE} a data.frame with the result of cuffdiff's test for alternative splicing is also added to the switchAnalyzeRlist under the entry 'isoformSwitchAnalysis' (only if analysis was performed). } \references{ Vitting-Seerup et al. The Landscape of Isoform Switches in Human Cancers. Mol. Cancer Res. (2017). } \author{ Kristoffer Vitting-Seerup } \seealso{ \code{\link{createSwitchAnalyzeRlist}}\cr \code{\link{importCufflinksCummeRbund}}\cr \code{\link{preFilter}} } \note{ Note that since there was an error in Cufflinks/Cuffdiff's estimation of standard errors that was not corrected until cufflinks 2.2.1. This function will give a warning if the cufflinks version used is older than this. Note that it will not be possible to test for differential isoform usage (isoform switches) with data from older versions of cufflinks (because the test amongst other uses the standard errors. } \examples{ ### Use the files from the cummeRbund example data testSwitchList <- importCufflinksFiles( pathToGTF = system.file('extdata/chr1_snippet.gtf', package = "cummeRbund"), pathToGeneDEanalysis = system.file('extdata/gene_exp.diff', package = "cummeRbund"), pathToIsoformDEanalysis = system.file('extdata/isoform_exp.diff', package = "cummeRbund"), pathToGeneFPKMtracking = system.file('extdata/genes.fpkm_tracking', package = "cummeRbund"), pathToIsoformFPKMtracking = system.file('extdata/isoforms.fpkm_tracking', package = "cummeRbund"), pathToIsoformReadGroupTracking = system.file('extdata/isoforms.read_group_tracking', package = "cummeRbund"), pathToSplicingAnalysis = system.file('extdata/splicing.diff', package = "cummeRbund"), pathToReadGroups = system.file('extdata/read_groups.info', package = "cummeRbund"), pathToRunInfo = system.file('extdata/run.info', package = "cummeRbund"), fixCufflinksAnnotationProblem=TRUE, quiet=TRUE ) testSwitchList }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DataSet.R, R/DataSetList.R \name{get_FV_summary} \alias{get_FV_summary} \alias{get_FV_summary.DataSet} \alias{get_FV_summary.DataSetList} \title{Get Function Value Summary} \usage{ get_FV_summary(ds, ...) \method{get_FV_summary}{DataSet}(ds, runtime, ...) \method{get_FV_summary}{DataSetList}(ds, runtime, algorithm = "all", ...) } \arguments{ \item{ds}{A DataSet or DataSetList object} \item{...}{Arguments passed to other methods} \item{runtime}{A Numerical vector. Runtimes at which function values are reached} \item{algorithm}{Which algorithms in the DataSetList to consider.} } \value{ A data.table containing the function value statistics for each provided target runtime value } \description{ Get Function Value Summary } \examples{ get_FV_summary(dsl, 100) get_FV_summary(dsl[[1]], 100) }
/man/get_FV_summary.Rd
permissive
nojhan/IOHanalyzer
R
false
true
881
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DataSet.R, R/DataSetList.R \name{get_FV_summary} \alias{get_FV_summary} \alias{get_FV_summary.DataSet} \alias{get_FV_summary.DataSetList} \title{Get Function Value Summary} \usage{ get_FV_summary(ds, ...) \method{get_FV_summary}{DataSet}(ds, runtime, ...) \method{get_FV_summary}{DataSetList}(ds, runtime, algorithm = "all", ...) } \arguments{ \item{ds}{A DataSet or DataSetList object} \item{...}{Arguments passed to other methods} \item{runtime}{A Numerical vector. Runtimes at which function values are reached} \item{algorithm}{Which algorithms in the DataSetList to consider.} } \value{ A data.table containing the function value statistics for each provided target runtime value } \description{ Get Function Value Summary } \examples{ get_FV_summary(dsl, 100) get_FV_summary(dsl[[1]], 100) }
\name{college} \alias{college} \title{ Statistics on colleges in 15 states. } \description{ The data frame college contains statistics relating to colleges from 15 states. This is a sample of fifteen states and certain statistics taken from the Chronicle of Higher Education (most data is for 1992). All entries are in thousands so that Arkansas (first row) has a population of 2,399,000, a yearly per capita income of \$15,400, 85,700 undergraduates students, 7,000 graduate students, and average cost of tuition and fees at public universities of \$1,540, and is located in the south (s for south). } \format{ A data frame with 15 observations on the following 7 variables (all data in thousands). \describe{ \item{school:}{State in which school is located.} \item{pop:}{State population.} \item{inc:}{Yearly per capita income.} \item{undergrad:}{Total number of undergraduate students.} \item{graduate:}{Total number of graduate students.} \item{fees:}{Average cost of tuition and fees.} \item{loc:}{Area of the country (s for south, w for west, ne for northeast, mw for midwest).} } } \keyword{datasets}
/man/college.Rd
no_license
cran/Rlab
R
false
false
1,172
rd
\name{college} \alias{college} \title{ Statistics on colleges in 15 states. } \description{ The data frame college contains statistics relating to colleges from 15 states. This is a sample of fifteen states and certain statistics taken from the Chronicle of Higher Education (most data is for 1992). All entries are in thousands so that Arkansas (first row) has a population of 2,399,000, a yearly per capita income of \$15,400, 85,700 undergraduates students, 7,000 graduate students, and average cost of tuition and fees at public universities of \$1,540, and is located in the south (s for south). } \format{ A data frame with 15 observations on the following 7 variables (all data in thousands). \describe{ \item{school:}{State in which school is located.} \item{pop:}{State population.} \item{inc:}{Yearly per capita income.} \item{undergrad:}{Total number of undergraduate students.} \item{graduate:}{Total number of graduate students.} \item{fees:}{Average cost of tuition and fees.} \item{loc:}{Area of the country (s for south, w for west, ne for northeast, mw for midwest).} } } \keyword{datasets}
# Note, this app can't be developed in bluevelvet. rm(list=ls()) library(shiny) library(tidyverse) d <- readRDS("/figures/risk-factor/figure-data/all_forest_plot_RR.RDS") df <- d %>% filter() intervention_options <- dplyr::count(df, intervention_variable, sort = TRUE)[1] outcome_options <- dplyr::count(df, outcome_variable, sort = TRUE)[1] age_options <- dplyr::count(df, agecat, sort = TRUE)[1] ui <- fluidPage( titlePanel("Relative risk forest plots"), sidebarLayout( sidebarPanel( selectInput( "exposure", "Pick an exposure: ", intervention_options, ), selectInput( "outcome", "Pick an outcome: ", outcome_options, ), selectInput( "age", "Pick an age: ", age_options, ), h5("Note: No plot will be displayed with invalid inputs.") ), mainPanel( plotOutput("selected_plot") ) ) ) server <- function(input, output, session) { datasetInput <- reactive({ df %>% filter(intervention_variable == input$exposure & outcome_variable == input$outcome & agecat == input$age) }) output$selected_plot <- renderPlot({ dataset <- datasetInput() dataset$plot }) } shinyApp(ui, server)
/app.R
permissive
child-growth/causes
R
false
false
1,280
r
# Note, this app can't be developed in bluevelvet. rm(list=ls()) library(shiny) library(tidyverse) d <- readRDS("/figures/risk-factor/figure-data/all_forest_plot_RR.RDS") df <- d %>% filter() intervention_options <- dplyr::count(df, intervention_variable, sort = TRUE)[1] outcome_options <- dplyr::count(df, outcome_variable, sort = TRUE)[1] age_options <- dplyr::count(df, agecat, sort = TRUE)[1] ui <- fluidPage( titlePanel("Relative risk forest plots"), sidebarLayout( sidebarPanel( selectInput( "exposure", "Pick an exposure: ", intervention_options, ), selectInput( "outcome", "Pick an outcome: ", outcome_options, ), selectInput( "age", "Pick an age: ", age_options, ), h5("Note: No plot will be displayed with invalid inputs.") ), mainPanel( plotOutput("selected_plot") ) ) ) server <- function(input, output, session) { datasetInput <- reactive({ df %>% filter(intervention_variable == input$exposure & outcome_variable == input$outcome & agecat == input$age) }) output$selected_plot <- renderPlot({ dataset <- datasetInput() dataset$plot }) } shinyApp(ui, server)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/generics.R \name{grbase_generics} \alias{grbase_generics} \alias{fit} \alias{compile} \alias{propagate} \alias{stepwise} \title{Compile and propagate functions} \usage{ fit(object, ...) compile(object, ...) propagate(object, ...) stepwise(object, ...) } \arguments{ \item{object}{An object} \item{\dots}{Additional arguments which depends on the class of the object} } \value{ The value returned depends on the class of the first argument. } \description{ \code{compile} and \code{propagate} are generic functions which invoke particular methods which depend on the class of the first argument } \references{ Højsgaard, Søren; Edwards, David; Lauritzen, Steffen (2012): Graphical Models with R, Springer } \author{ Søren Højsgaard, \email{sorenh@math.aau.dk} } \keyword{utilities}
/gRbase/man/grbase_generics.Rd
no_license
akhikolla/TestedPackages-NoIssues
R
false
true
879
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/generics.R \name{grbase_generics} \alias{grbase_generics} \alias{fit} \alias{compile} \alias{propagate} \alias{stepwise} \title{Compile and propagate functions} \usage{ fit(object, ...) compile(object, ...) propagate(object, ...) stepwise(object, ...) } \arguments{ \item{object}{An object} \item{\dots}{Additional arguments which depends on the class of the object} } \value{ The value returned depends on the class of the first argument. } \description{ \code{compile} and \code{propagate} are generic functions which invoke particular methods which depend on the class of the first argument } \references{ Højsgaard, Søren; Edwards, David; Lauritzen, Steffen (2012): Graphical Models with R, Springer } \author{ Søren Højsgaard, \email{sorenh@math.aau.dk} } \keyword{utilities}
?USArrests nrow(USArrests) ncol(USArrests) fix(USArrests) head(USArrests) head(USArrests,10) USArrests$Murder L=USArrests$Murder>3 L USArrests[L,]$Assault ?euro fix(euro) df=data.frame(euro) df nrow(df) ncol(df) ?eurodist fix(eurodist) df=data.matrix(eurodist) df
/task_10_datasets.r
no_license
Nivas138/LearningRStudio
R
false
false
284
r
?USArrests nrow(USArrests) ncol(USArrests) fix(USArrests) head(USArrests) head(USArrests,10) USArrests$Murder L=USArrests$Murder>3 L USArrests[L,]$Assault ?euro fix(euro) df=data.frame(euro) df nrow(df) ncol(df) ?eurodist fix(eurodist) df=data.matrix(eurodist) df
west <- c( 'Western Europe', 'Northern Europe', 'Southern Europe', 'Northern America', 'Australia and New Zealand' ) countries2 <- c( 'United Kingdom', 'Portugal' ) dat <- gapminder %>% filter( year %in% c( 2010, 2015 ) & region %in% west & !is.na( life_expectancy ) & population > 10^7 ) head( dat ) dat %>% mutate( location = ifelse( year == 2010, 1, 2 ), location = ifelse( year == 2015 & country %in% countries2, location + 0.22, location ), hjust = ifelse( year == 2010, 1, 0 ) ) %>% mutate( year = as.factor( year ) ) %>% ggplot( aes( year, life_expectancy, group = country ) ) + geom_line( aes( color = country ), show.legend = FALSE ) + geom_text( aes( x = location, label = country, hjust = hjust, check_overlap = TRUE, color = country ), show.legend = FALSE ) + xlab( '' ) + ylab( 'Life Expectancy' ) # Vaccines case study # Data related to the impact of vaccines data( "us_contagious_diseases" ) str( us_contagious_diseases ) states3 <- c( 'Hawaii', 'Alaska' ) # data object contains all measles data # it includes a per 100.000 rate, orders states by average value of disease, and removes Alaska and Hawaii, since they only vecame states in the late 50s the_disease <- 'Measles' dat <- us_contagious_diseases %>% filter( !state %in% states3 & disease == the_disease ) %>% mutate( rate = count / population * 10000 ) %>% mutate( state = reorder( state, rate ) ) # plot disease rates for per year dat %>% filter( state == 'California' ) %>% ggplot( aes( year, rate ) ) + geom_line() + ylab( 'Cases per 10.000' ) + geom_vline( xintercept = 1963, col = 'blue' ) class( dat ) head( dat ) # show all states in one graph library( RColorBrewer ) #display.brewer.all( type = 'seq' ) # sequential colors #display.brewer.all( type = 'div' ) # divergent colors dat %>% ggplot( aes( year, state, fill = rate ) ) + geom_tile( color = 'grey50') + scale_x_continuous( expand = c( 0,0 ) ) + scale_fill_gradientn( colors = brewer.pal( 9, 'Reds' ), trans = 'sqrt') + geom_vline( xintercept = 1963, col = 'blue' ) + theme_minimal() + theme( panel.grid = element_blank() ) + ggtitle( the_disease ) + ylab( '' ) + xlab( '' ) head( us_contagious_diseases ) avg <- us_contagious_diseases %>% filter( !state %in% states3 & disease == the_disease ) %>% #filter( disease == the_disease ) %>% group_by( year ) %>% summarize( us_rate = sum( count, na.rm = TRUE ) / sum( population, na.rm = TRUE ) * 10000 ) avg %>% ggplot( aes( year, us_rate ) ) + geom_line( color = 'black' ) dat3 <- us_contagious_diseases %>% filter( !state %in% states3 & disease == the_disease ) %>% mutate( rate = count / population * 10000 ) %>% mutate( state = reorder( state, rate ) ) # libraries library( tidyverse ) library( dslabs ) data( "gapminder" ) args( ggplot ) library( RColorBrewer ) data( "us_contagious_diseases" ) states3 <- c( 'Hawaii', 'Alaska' ) the_disease <- 'Measles' greyColors <- c( '#dcdcdc', '#d3d3d3', '#c0c0c0', '#a9a9a9', '#808080', '#696969', '#778899', '#708090', '#2f4f4f' ) head(us_contagious_diseases) avg <- us_contagious_diseases %>% filter( !state %in% states3 & disease == the_disease ) %>% mutate( us_rate = sum( count, na.rm = TRUE ) / sum( population, na.rm = TRUE ) * 10000 ) %>% group_by( year ) %>% summarize( us_rate = sum( count, na.rm = TRUE ) / sum( population, na.rm = TRUE ) * 10000 ) avg dat3 <- us_contagious_diseases %>% filter( !state %in% states3 & disease == the_disease ) %>% mutate( rate = count / population * 10000 ) %>% mutate( state = reorder( state, rate ) ) head(avg) head(dat3) class( dat3 ) class( avg ) ceiling( runif( 1, 1, 9 ) ) ### ### Linel graph ### dat4 <- ggplot() + # dat3 plot geom_line( data = dat3, aes( year, rate, group = state ), color = 'darkgray', alpha = 0.75 ) + # avg plot geom_line( data = avg, aes( year, us_rate ), color = 'black', size = 1.15 ) + scale_y_continuous( trans = 'sqrt', limits = c( 0, 300), breaks = c( 5, 25, 125, 300 ) ) + geom_vline( xintercept = 1963, col = 'blue' ) + ylab( '' ) + xlab( '' ) dat4 ### ### SMOTTH GRAPH ### dat5 <- ggplot() + # dat3 plot geom_smooth( data = dat3, aes( year, rate, group = state ), color = 'gray' ) + # avg plot geom_smooth( data = avg, aes( year, us_rate ), color = 'black' ) + scale_y_continuous( breaks = c( 5, 25, 125, 300 ) ) + geom_vline( xintercept = 1963, col = 'blue' ) + ylab( '' ) + xlab( '' ) dat5 # plot disease rates for per year dat3 %>% ggplot( aes( year, rate, group = state ) ) + #scale_y_continuous( trans = 'logit', limit = c( 0.1, 300 ), # breaks = c( 5, 25, 125, 300 ) ) + scale_y_continuous( breaks = c( 5, 25, 125, 300 ) ) + geom_line( color = 'grey' ) + geom_vline( xintercept = 1963, col = 'blue' ) + ylab( '' ) + xlab( '' )
/HarvardX_PH125.2x_Visualization/1.dataVisualization.R
no_license
JayChart/HarvardX_DataScienceProfessional
R
false
false
4,989
r
west <- c( 'Western Europe', 'Northern Europe', 'Southern Europe', 'Northern America', 'Australia and New Zealand' ) countries2 <- c( 'United Kingdom', 'Portugal' ) dat <- gapminder %>% filter( year %in% c( 2010, 2015 ) & region %in% west & !is.na( life_expectancy ) & population > 10^7 ) head( dat ) dat %>% mutate( location = ifelse( year == 2010, 1, 2 ), location = ifelse( year == 2015 & country %in% countries2, location + 0.22, location ), hjust = ifelse( year == 2010, 1, 0 ) ) %>% mutate( year = as.factor( year ) ) %>% ggplot( aes( year, life_expectancy, group = country ) ) + geom_line( aes( color = country ), show.legend = FALSE ) + geom_text( aes( x = location, label = country, hjust = hjust, check_overlap = TRUE, color = country ), show.legend = FALSE ) + xlab( '' ) + ylab( 'Life Expectancy' ) # Vaccines case study # Data related to the impact of vaccines data( "us_contagious_diseases" ) str( us_contagious_diseases ) states3 <- c( 'Hawaii', 'Alaska' ) # data object contains all measles data # it includes a per 100.000 rate, orders states by average value of disease, and removes Alaska and Hawaii, since they only vecame states in the late 50s the_disease <- 'Measles' dat <- us_contagious_diseases %>% filter( !state %in% states3 & disease == the_disease ) %>% mutate( rate = count / population * 10000 ) %>% mutate( state = reorder( state, rate ) ) # plot disease rates for per year dat %>% filter( state == 'California' ) %>% ggplot( aes( year, rate ) ) + geom_line() + ylab( 'Cases per 10.000' ) + geom_vline( xintercept = 1963, col = 'blue' ) class( dat ) head( dat ) # show all states in one graph library( RColorBrewer ) #display.brewer.all( type = 'seq' ) # sequential colors #display.brewer.all( type = 'div' ) # divergent colors dat %>% ggplot( aes( year, state, fill = rate ) ) + geom_tile( color = 'grey50') + scale_x_continuous( expand = c( 0,0 ) ) + scale_fill_gradientn( colors = brewer.pal( 9, 'Reds' ), trans = 'sqrt') + geom_vline( xintercept = 1963, col = 'blue' ) + theme_minimal() + theme( panel.grid = element_blank() ) + ggtitle( the_disease ) + ylab( '' ) + xlab( '' ) head( us_contagious_diseases ) avg <- us_contagious_diseases %>% filter( !state %in% states3 & disease == the_disease ) %>% #filter( disease == the_disease ) %>% group_by( year ) %>% summarize( us_rate = sum( count, na.rm = TRUE ) / sum( population, na.rm = TRUE ) * 10000 ) avg %>% ggplot( aes( year, us_rate ) ) + geom_line( color = 'black' ) dat3 <- us_contagious_diseases %>% filter( !state %in% states3 & disease == the_disease ) %>% mutate( rate = count / population * 10000 ) %>% mutate( state = reorder( state, rate ) ) # libraries library( tidyverse ) library( dslabs ) data( "gapminder" ) args( ggplot ) library( RColorBrewer ) data( "us_contagious_diseases" ) states3 <- c( 'Hawaii', 'Alaska' ) the_disease <- 'Measles' greyColors <- c( '#dcdcdc', '#d3d3d3', '#c0c0c0', '#a9a9a9', '#808080', '#696969', '#778899', '#708090', '#2f4f4f' ) head(us_contagious_diseases) avg <- us_contagious_diseases %>% filter( !state %in% states3 & disease == the_disease ) %>% mutate( us_rate = sum( count, na.rm = TRUE ) / sum( population, na.rm = TRUE ) * 10000 ) %>% group_by( year ) %>% summarize( us_rate = sum( count, na.rm = TRUE ) / sum( population, na.rm = TRUE ) * 10000 ) avg dat3 <- us_contagious_diseases %>% filter( !state %in% states3 & disease == the_disease ) %>% mutate( rate = count / population * 10000 ) %>% mutate( state = reorder( state, rate ) ) head(avg) head(dat3) class( dat3 ) class( avg ) ceiling( runif( 1, 1, 9 ) ) ### ### Linel graph ### dat4 <- ggplot() + # dat3 plot geom_line( data = dat3, aes( year, rate, group = state ), color = 'darkgray', alpha = 0.75 ) + # avg plot geom_line( data = avg, aes( year, us_rate ), color = 'black', size = 1.15 ) + scale_y_continuous( trans = 'sqrt', limits = c( 0, 300), breaks = c( 5, 25, 125, 300 ) ) + geom_vline( xintercept = 1963, col = 'blue' ) + ylab( '' ) + xlab( '' ) dat4 ### ### SMOTTH GRAPH ### dat5 <- ggplot() + # dat3 plot geom_smooth( data = dat3, aes( year, rate, group = state ), color = 'gray' ) + # avg plot geom_smooth( data = avg, aes( year, us_rate ), color = 'black' ) + scale_y_continuous( breaks = c( 5, 25, 125, 300 ) ) + geom_vline( xintercept = 1963, col = 'blue' ) + ylab( '' ) + xlab( '' ) dat5 # plot disease rates for per year dat3 %>% ggplot( aes( year, rate, group = state ) ) + #scale_y_continuous( trans = 'logit', limit = c( 0.1, 300 ), # breaks = c( 5, 25, 125, 300 ) ) + scale_y_continuous( breaks = c( 5, 25, 125, 300 ) ) + geom_line( color = 'grey' ) + geom_vline( xintercept = 1963, col = 'blue' ) + ylab( '' ) + xlab( '' )
#Chapter 8 범주형 자료분석 #값으로부터 직접 계산을 할수 없는 질적자료의 경우 #1 Sample T And Sample Prop #2 sample T #Paired T #ANOVA #등분산성, 정규성 #예제 1. 멘델의 법칙 x <- c(315,101,108,32) chisq.test(x,p=c(9,3,3,1)/16) #p-value = 0.9254 ychi <- dchisq(x,3) alpha <- 0.05 tol <- qchisq(0.95, df=3) par(mar=c(0,1,1,1)) curve(x,ychi,type = "l", axes=F, ylim=c(-0.03,0.05),xlab="",ylab="") chisq.plot curve(dchisq(x,3),col = "black", main= "",xlim=c(0,8)) abline(h=0) x <- seq(0, 15, by = 0.01) dc <- dchisq(x, df=3) alpha <- 0.05 tol <- qchisq(0.95,df=3) par(mar=c(0,1,1,1)) plot(x, dc, type = "l", axes=F, ylim=c(-0.03,0.25), xlab="",ylab="") abline(h=0) tol.g <- round(tol,2) polygon(c(tol.g, x[x>tol.g],15), c(0,dc[x>tol.g],0), col="yellow") text(0,-0.03,"0", cex=0.8) text(tol, -0.03, expression(chi[0.05]^{2}==2.14), cex=0.8) tol2 <- qchisq(1-0.9254, df=3) tol2.g <-round(tol2, 2) polygon(c(tol2.g, x[x>tol2.g],15), c(0,dc[x>tol2.g],0),col = "red", density=20, angle=305) text(0,-0.03, "0", cex=0.8) text(tol2, -0.03, expression(chi[0.9254]^{2} ==0.47), cex=0.8) #2 동질성 검정과 독립성 검정 #예제-2 연령대별 SNS 이용률의 동질성 검정 sns.c <- read.csv("./data/snsbyage.csv", header=T, stringsAsFactors=FALSE) str( sns.c ) head(sns.c) sns.c <- transform(sns.c, age.c = factor(age, levels=c(1, 2, 3), labels=c("20대", "30대", "40대"))) sns.c <- transform(sns.c, service.c = factor(service, levels=c("F", "T", "K", "C", "E"), ordered=TRUE)) head(sns.c) c.tab <- table(sns.c$age.c, sns.c$service.c) (a.n <- margin.table(c.tab, margin=1)) (s.n <- margin.table(c.tab, margin=2)) (s.p <- s.n / margin.table(c.tab)) (expected <- a.n %*% t(s.p)) (o.e <- c.tab-expected) (t.t <- sum( (o.e)^2 / expected )) qchisq(0.95, df=8) 1-pchisq(t.t, df=8) #p-value chisq.test(c.tab) names <-chisq.test(c.tab) addmargins(chisq.test(c.tab)$expected) result$expected str(result) result$p.value #독립성 검정 #에제-3 성별에 따른 대학원 입학 여부의 독립성 검정 data("UCBAdmissions") UCBAdmissions ucba.tab <- apply(UCBAdmissions, c(1,2), sum) ucba.tab round(prop.table(ucba.tab,margin =2) *100,1) #독립성 검정 a.n <- margin.table(ucba.tab, margin =1) g.n <- margin.table(ucba.tab, margin= 2) (a.p <- a.n/margin.table(ucba.tab)) (g.p <- g.n/margin.table(ucba.tab)) (expected <- margin.table(ucba.tab) * (a.p %*% t(g.p))) 1 - pchisq(112.250, df = 1) #chi-squared statistics o.e <- (ucba.tab - expected)^2 / expected addmargins(o.e) chi sq.t <- sum(o.e) #검정 통계량 chisq.t qchisq(chisq.t, df=1) #p value chisq.test(ucba.tab) ##continu
/ch8/점주형 자료분석.R
no_license
joeychoi12/R_Statistics
R
false
false
2,765
r
#Chapter 8 범주형 자료분석 #값으로부터 직접 계산을 할수 없는 질적자료의 경우 #1 Sample T And Sample Prop #2 sample T #Paired T #ANOVA #등분산성, 정규성 #예제 1. 멘델의 법칙 x <- c(315,101,108,32) chisq.test(x,p=c(9,3,3,1)/16) #p-value = 0.9254 ychi <- dchisq(x,3) alpha <- 0.05 tol <- qchisq(0.95, df=3) par(mar=c(0,1,1,1)) curve(x,ychi,type = "l", axes=F, ylim=c(-0.03,0.05),xlab="",ylab="") chisq.plot curve(dchisq(x,3),col = "black", main= "",xlim=c(0,8)) abline(h=0) x <- seq(0, 15, by = 0.01) dc <- dchisq(x, df=3) alpha <- 0.05 tol <- qchisq(0.95,df=3) par(mar=c(0,1,1,1)) plot(x, dc, type = "l", axes=F, ylim=c(-0.03,0.25), xlab="",ylab="") abline(h=0) tol.g <- round(tol,2) polygon(c(tol.g, x[x>tol.g],15), c(0,dc[x>tol.g],0), col="yellow") text(0,-0.03,"0", cex=0.8) text(tol, -0.03, expression(chi[0.05]^{2}==2.14), cex=0.8) tol2 <- qchisq(1-0.9254, df=3) tol2.g <-round(tol2, 2) polygon(c(tol2.g, x[x>tol2.g],15), c(0,dc[x>tol2.g],0),col = "red", density=20, angle=305) text(0,-0.03, "0", cex=0.8) text(tol2, -0.03, expression(chi[0.9254]^{2} ==0.47), cex=0.8) #2 동질성 검정과 독립성 검정 #예제-2 연령대별 SNS 이용률의 동질성 검정 sns.c <- read.csv("./data/snsbyage.csv", header=T, stringsAsFactors=FALSE) str( sns.c ) head(sns.c) sns.c <- transform(sns.c, age.c = factor(age, levels=c(1, 2, 3), labels=c("20대", "30대", "40대"))) sns.c <- transform(sns.c, service.c = factor(service, levels=c("F", "T", "K", "C", "E"), ordered=TRUE)) head(sns.c) c.tab <- table(sns.c$age.c, sns.c$service.c) (a.n <- margin.table(c.tab, margin=1)) (s.n <- margin.table(c.tab, margin=2)) (s.p <- s.n / margin.table(c.tab)) (expected <- a.n %*% t(s.p)) (o.e <- c.tab-expected) (t.t <- sum( (o.e)^2 / expected )) qchisq(0.95, df=8) 1-pchisq(t.t, df=8) #p-value chisq.test(c.tab) names <-chisq.test(c.tab) addmargins(chisq.test(c.tab)$expected) result$expected str(result) result$p.value #독립성 검정 #에제-3 성별에 따른 대학원 입학 여부의 독립성 검정 data("UCBAdmissions") UCBAdmissions ucba.tab <- apply(UCBAdmissions, c(1,2), sum) ucba.tab round(prop.table(ucba.tab,margin =2) *100,1) #독립성 검정 a.n <- margin.table(ucba.tab, margin =1) g.n <- margin.table(ucba.tab, margin= 2) (a.p <- a.n/margin.table(ucba.tab)) (g.p <- g.n/margin.table(ucba.tab)) (expected <- margin.table(ucba.tab) * (a.p %*% t(g.p))) 1 - pchisq(112.250, df = 1) #chi-squared statistics o.e <- (ucba.tab - expected)^2 / expected addmargins(o.e) chi sq.t <- sum(o.e) #검정 통계량 chisq.t qchisq(chisq.t, df=1) #p value chisq.test(ucba.tab) ##continu
############################################################################## # Copyright (c) 2012-2016 Russell V. Lenth # # # # This file is part of the emmeans package for R (*emmeans*) # # # # *emmeans* is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 2 of the License, or # # (at your option) any later version. # # # # *emmeans* is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with R and *emmeans*. If not, see # # <https://www.r-project.org/Licenses/> and/or # # <http://www.gnu.org/licenses/>. # ############################################################################## ### support for the ordinal package recover_data.clm = function(object, mode = "latent", ...) { if (!is.na(pmatch(mode, "scale"))) { if (is.null(trms <- object$S.terms)) return("Specified mode=\"scale\", but no scale model is present") # ref_grid's error handler takes it from here recover_data(object$call, trms, object$na.action, ...) } else if (is.null(object$S.terms) && is.null(object$nom.terms)) recover_data.lm(object, ...) else { # bring-in predictors from loc, scale, and nom models trms = delete.response(object$terms) x.preds = union(.all.vars(object$S.terms), .all.vars(object$nom.terms)) x.trms = terms(update(trms, .reformulate(c(".", x.preds)))) recover_data(object$call, x.trms, object$na.action, ...) } } # For now at least, clmm doesn't cover scale, nominal options recover_data.clmm = function(object, ...) recover_data.lm(object, ...) # Note: For ALL thresholds, object$Theta has all the threshold values # for the different cuts (same as object$alpha when threshold=="flexible") # and object$tJac is s.t. tJac %*% alpha = Theta # Note also that some functions of cut are constrained to be zero when # threshold != "flexible". Can get basis using nonest.basis(t(tJac)) # # opt arg 'mode' - determines what goes into ref_grid # 'rescale' - (loc, scale) for linear transformation of latent result emm_basis.clm = function (object, trms, xlev, grid, mode = c("latent", "linear.predictor", "cum.prob", "exc.prob", "prob", "mean.class", "scale"), rescale = c(0,1), ...) { # general stuff mode = match.arg(mode) if (mode == "scale") return (.emm_basis.clm.scale(object, trms, xlev, grid, ...)) # if (is.null(object$contrasts)) # warning("Contrasts used to fit the model are unknown.\n", # "Defaulting to system option, but results may be wrong.") bhat = coef(object) V = .my.vcov(object, ...) tJac = object$tJac dffun = function(...) Inf link = as.character(object$info$link) cnm = dimnames(object$tJac)[[1]] if (is.null(cnm)) cnm = paste(seq_len(nrow(tJac)), "|", 1 + seq_len(nrow(tJac)), sep = "") misc = list() # My strategy is to piece together the needed matrices for each threshold parameter # Then assemble the results ### ----- Location part ----- ### contrasts = object$contrasts # Remember trms was trumped-up to include scale and nominal predictors. # Recover the actual terms for the principal model trms = delete.response(object$terms) m = model.frame(trms, grid, na.action = na.pass, xlev = object$xlevels) X = model.matrix(trms, m, contrasts.arg = contrasts) # Need following code because clmm objects don't have NAs for dropped columns... nms.needed = c(names(object$alpha), setdiff(colnames(X), "(Intercept)")) if (length(setdiff(nms.needed, bnm <- names(bhat))) > 0) { bhat = seq_along(nms.needed) * NA names(bhat) = nms.needed bhat[bnm] = coef(object) object$coefficients = bhat # will be needed by model.matrix object$beta = bhat[setdiff(nms.needed, names(object$alpha))] } xint = match("(Intercept)", colnames(X), nomatch = 0L) if (xint > 0L) { X = X[, -xint, drop = FALSE] } ### ----- Nominal part ----- ### if (is.null(object$nom.terms)) NOM = matrix(1, nrow = nrow(X)) else { mn = model.frame(object$nom.terms, grid, na.action = na.pass, xlev = object$nom.xlevels) NOM = model.matrix(object$nom.terms, mn, contrasts.arg = object$nom.contrasts) } bigNom = kronecker(tJac, NOM) # cols are in wrong order... I'll get the indexes by transposing a matrix of subscripts if (ncol(NOM) > 1) bigNom = bigNom[, as.numeric(t(matrix(seq_len(ncol(bigNom)), nrow=ncol(NOM))))] ### ----- Scale part ----- ### if (!is.null(object$S.terms)) { ms = model.frame(object$S.terms, grid, na.action = na.pass, xlev = object$S.xlevels) S = model.matrix(object$S.terms, ms, contrasts.arg = object$S.contrasts) S = S[, names(object$zeta), drop = FALSE] if (!is.null(attr(object$S.terms, "offset"))) { soff = .get.offset(object$S.terms, grid) # we'll add a column to S and adjust bhat and V accordingly S = cbind(S, offset = soff) bhat = c(bhat, offset = 1) V = rbind(cbind(V, offset = 0), offset = 0) } si = misc$scale.idx = length(object$alpha) + length(object$beta) + seq_len(ncol(S)) # Make sure there are no name clashes names(bhat)[si] = paste(".S", names(object$zeta), sep=".") misc$estHook = ".clm.estHook" misc$vcovHook = ".clm.vcovHook" } else S = NULL ### ----- Get non-estimability basis ----- ### nbasis = snbasis = estimability::all.estble if (any(is.na(bhat))) { obj = object # work around fact that model.matrix.clmm doesn't work class(obj) = "clm" mm = try(model.matrix(obj), silent = TRUE) if (inherits(mm, "try-error")) stop("Currently, it is not possible to construct a reference grid for this\n", "object, because it is rank-deficient and no model matrix is available.") # note: mm has components X, NOM, and S if (any(is.na(c(object$alpha, object$beta)))) { NOMX = if (is.null(mm$NOM)) mm$X else cbind(mm$NOM, mm$X[, -1]) nbasis = estimability::nonest.basis(NOMX) # replicate and reverse the sign of the NOM parts nomcols = seq_len(ncol(NOM)) nbasis = apply(nbasis, 2, function(x) c(rep(-x[nomcols], each = nrow(NOM)), x[-nomcols])) } if (!is.null(mm$S)) { if (any(is.na(object$zeta))) { snbasis = estimability::nonest.basis(mm$S) # put intercept part at end snbasis = rbind(snbasis[-1, , drop=FALSE], snbasis[1, ]) if (!is.null(attr(object$S.terms, "offset"))) snbasis = rbind(snbasis, 0) snbasis = rbind(matrix(0, ncol=ncol(snbasis), nrow=min(si)-1), snbasis) # Note scale intercept is included, so tack it on to the end of everything S = cbind(S, .S.intcpt = 1) bhat = c(bhat, .S.intcpt = 0) V = rbind(cbind(V, .S.intcpt = 0), .S.intcpt = 0) si = misc$scale.idx = c(si, 1 + max(si)) } } if (is.na(nbasis[1])) # then only nonest part is scale nbasis = snbasis else { if (!is.null(S)) # pad nbasis with zeros when there's a scale model nbasis = rbind(nbasis, matrix(0, nrow=length(si), ncol=ncol(nbasis))) if (!is.na(snbasis[1])) nbasis = cbind(nbasis, snbasis) } } if (mode == "latent") { # Create constant columns for means of scale and nominal parts J = matrix(1, nrow = nrow(X)) nomm = rescale[2] * apply(bigNom, 2, mean) X = rescale[2] * X if (!is.null(S)) { sm = apply(S, 2, mean) X = cbind(X, kronecker(-J, matrix(sm, nrow = 1))) } bigX = cbind(kronecker(-J, matrix(nomm, nrow = 1)), X) misc$offset.mult = misc$offset.mult * rescale[2] intcpt = seq_len(ncol(tJac)) bhat[intcpt] = bhat[intcpt] - rescale[1] / rescale[2] } else { ### ----- Piece together big matrix for each threshold ----- ### misc$ylevs = list(cut = cnm) # support for links not in make.link if (is.character(link) && !(link %in% c("logit", "probit", "cauchit", "cloglog"))) { setLinks = get("setLinks", asNamespace("ordinal")) env = new.env() setLinks(env, link) link = list(linkfun = quote(stop), linkinv=env$pfun, mu.eta = env$dfun, name = env$link, lambda = env$lambda) } misc$tran = link misc$inv.lbl = "cumprob" misc$offset.mult = -1 if (!is.null(S)) X = cbind(X, S) J = matrix(1, nrow=nrow(tJac)) bigX = cbind(bigNom, kronecker(-J, X)) if (mode != "linear.predictor") { misc$mode = mode misc$respName = as.character.default(object$terms)[2] misc$postGridHook = ".clm.postGrid" } } dimnames(bigX)[[2]] = names(bhat) list(X = bigX, bhat = bhat, nbasis = nbasis, V = V, dffun = dffun, dfargs = list(), misc = misc) } # function called at end of ref_grid # I use this for polr as well # Also used for stanreg result of stan_polr & potentially other MCMC ordinal models .clm.postGrid = function(object, ...) { mode = object@misc$mode object@misc$postGridHook = object@misc$mode = NULL object = regrid(object, transform = "response", ...) if(object@misc$estName == "exc.prob") { # back-transforming yields exceedance probs object@bhat = 1 - object@bhat if(!is.null(object@post.beta[1])) object@post.beta = 1 - object@post.beta object@misc$estName = "cum.prob" } if (mode == "prob") { object = .clm.prob.grid(object, ...) } else if (mode == "mean.class") { object = .clm.mean.class(object, ...) } else if (mode == "exc.prob") { object@bhat = 1 - object@bhat if(!is.null(object@post.beta[1])) object@post.beta = 1 - object@post.beta object@misc$estName = "exc.prob" } # (else mode == "cum.prob" and it's all OK) object@misc$respName = NULL # cleanup object } # Make the linear-predictor ref_grid into one for class probabilities # This assumes that object has already been re-gridded and back-transformed .clm.prob.grid = function(object, thresh = "cut", newname = object@misc$respName, ...) { byv = setdiff(names(object@levels), thresh) newrg = contrast(object, ".diff_cum", by = byv, ...) newrg@grid$.offset. = (apply(newrg@linfct, 1, sum) < 0) + 0 if (!is.null(wgt <- object@grid[[".wgt."]])) { km1 = length(object@levels[[thresh]]) wgt = wgt[seq_len(length(wgt) / km1)] # unique weights for byv combs newrg = force_regular(newrg) key = do.call(paste, object@grid[byv])[seq_along(wgt)] tgt = do.call(paste, newrg@grid[byv]) for (i in seq_along (wgt)) newrg@grid[[".wgt."]][tgt == key[i]] = wgt[i] } # proceed to disavow that this was ever exposed to 'emmeans' or 'contrast' ## class(newrg) = "ref.grid" misc = newrg@misc if(!is.null(misc$display) && all(misc$display)) misc$display = NULL misc$is.new.rg = TRUE misc$infer = c(FALSE,FALSE) misc$estName = "prob" misc$pri.vars = misc$by.vars = misc$con.coef = misc$orig.grid = NULL newrg@misc = misc conid = which(names(newrg@levels) == "contrast") names(newrg@levels)[conid] = names(newrg@grid)[conid] = newname newrg@roles = object@roles newrg@roles$multresp = newname newrg } # special 'contrast' fcn used by .clm.mean.class .meanclass.emmc = function(levs, lf, ...) data.frame(mean = lf) .clm.mean.class = function(object, ...) { prg = .clm.prob.grid(object, newname = "class", ...) byv = setdiff(names(prg@levels), "class") lf = as.numeric(prg@levels$class) newrg = contrast(prg, ".meanclass", lf = lf, by = byv, ...) newrg = update(newrg, infer = c(FALSE, FALSE), pri.vars = NULL, by.vars = NULL, estName = "mean.class") newrg@levels$contrast = newrg@grid$contrast = NULL prg@roles$multresp = NULL newrg@roles = prg@roles ## class(newrg) = "ref.grid" update(force_regular(newrg), is.new.rg = TRUE) } # Contrast fcn for turning estimates of cumulative probabilities # into cell probabilities .diff_cum.emmc = function(levs, sep = "|", ...) { plevs = unique(setdiff(unlist(strsplit(levs, sep, TRUE)), sep)) k = 1 + length(levs) if (length(plevs) != k) plevs = seq_len(k) M = matrix(0, nrow = length(levs), ncol = k) for (i in seq_along(levs)) M[i, c(i,i+1)] = c(1,-1) dimnames(M) = list(levs, plevs) M = as.data.frame(M) attr(M, "desc") = "Differences of cumulative probabilities" attr(M, "adjust") = "none" attr(M, "offset") = c(rep(0, k-1), 1) M } #### replacement estimation routines for cases with a scale param ## workhorse for estHook and vcovHook functions .clm.hook = function(object, tol = 1e-8, ...) { scols = object@misc$scale.idx bhat = object@bhat active = !is.na(bhat) bhat[!active] = 0 linfct = object@linfct estble = estimability::is.estble(linfct, object@nbasis, tol) ###apply(linfct, 1, .is.estble, object@nbasis, tol) estble[!estble] = NA rsigma = estble * as.numeric(linfct[, scols, drop = FALSE] %*% object@bhat[scols]) rsigma = exp(rsigma) * estble # I'll do the scaling later eta = as.numeric(linfct[, -scols, drop = FALSE] %*% bhat[-scols]) if (!is.null(object@grid$.offset.)) eta = eta + object@grid$.offset. for (j in scols) linfct[, j] = eta * linfct[, j] linfct = (.diag(rsigma) %*% linfct) [, active, drop = FALSE] list(est = eta * rsigma, V = linfct %*% tcrossprod(object@V, linfct)) } .clm.estHook = function(object, do.se = TRUE, tol = 1e-8, ...) { raw.matl = .clm.hook(object, tol, ...) SE = if (do.se) sqrt(diag(raw.matl$V)) else NA cbind(est = raw.matl$est, SE = SE, df = Inf) } .clm.vcovHook = function(object, tol = 1e-8, ...) { .clm.hook(object, tol, ...)$V } ### Special emm_basis fcn for the scale model .emm_basis.clm.scale = function(object, trms, xlev, grid, ...) { m = model.frame(trms, grid, na.action = na.pass, xlev = xlev) X = model.matrix(trms, m, contrasts.arg = object$S.contrasts) bhat = c(`(intercept)` = 0, object$zeta) nbasis = estimability::all.estble if (any(is.na(bhat))) nbasis = estimability::nonest.basis(model.matrix(object)$S) k = sum(!is.na(bhat)) - 1 V = .my.vcov(object, ...) pick = nrow(V) - k + seq_len(k) V = V[pick, pick, drop = FALSE] V = cbind(0, rbind(0,V)) misc = list(tran = "log") list(X = X, bhat = bhat, nbasis = nbasis, V = V, dffun = function(...) Inf, dfargs = list(), misc = misc) } emm_basis.clmm = function (object, trms, xlev, grid, ...) { if(is.null(object$Hessian)) { message("Updating the model to obtain the Hessian...") object = update(object, Hess = TRUE) } # borrowed from Maxime's code -- need to understand this better, e.g. when it happens H = object$Hessian if (any(apply(object$Hessian, 1, function(x) all(x == 0)))) { H = H[names(coef(object)), names(coef(object))] object$Hessian = H } result = emm_basis.clm(object, trms, xlev, grid, ...) # strip off covariances of random effects keep = seq_along(result$bhat[!is.na(result$bhat)]) result$V = result$V[keep,keep] result }
/R/ordinal-support.R
no_license
hauselin/emmeans
R
false
false
16,812
r
############################################################################## # Copyright (c) 2012-2016 Russell V. Lenth # # # # This file is part of the emmeans package for R (*emmeans*) # # # # *emmeans* is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 2 of the License, or # # (at your option) any later version. # # # # *emmeans* is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with R and *emmeans*. If not, see # # <https://www.r-project.org/Licenses/> and/or # # <http://www.gnu.org/licenses/>. # ############################################################################## ### support for the ordinal package recover_data.clm = function(object, mode = "latent", ...) { if (!is.na(pmatch(mode, "scale"))) { if (is.null(trms <- object$S.terms)) return("Specified mode=\"scale\", but no scale model is present") # ref_grid's error handler takes it from here recover_data(object$call, trms, object$na.action, ...) } else if (is.null(object$S.terms) && is.null(object$nom.terms)) recover_data.lm(object, ...) else { # bring-in predictors from loc, scale, and nom models trms = delete.response(object$terms) x.preds = union(.all.vars(object$S.terms), .all.vars(object$nom.terms)) x.trms = terms(update(trms, .reformulate(c(".", x.preds)))) recover_data(object$call, x.trms, object$na.action, ...) } } # For now at least, clmm doesn't cover scale, nominal options recover_data.clmm = function(object, ...) recover_data.lm(object, ...) # Note: For ALL thresholds, object$Theta has all the threshold values # for the different cuts (same as object$alpha when threshold=="flexible") # and object$tJac is s.t. tJac %*% alpha = Theta # Note also that some functions of cut are constrained to be zero when # threshold != "flexible". Can get basis using nonest.basis(t(tJac)) # # opt arg 'mode' - determines what goes into ref_grid # 'rescale' - (loc, scale) for linear transformation of latent result emm_basis.clm = function (object, trms, xlev, grid, mode = c("latent", "linear.predictor", "cum.prob", "exc.prob", "prob", "mean.class", "scale"), rescale = c(0,1), ...) { # general stuff mode = match.arg(mode) if (mode == "scale") return (.emm_basis.clm.scale(object, trms, xlev, grid, ...)) # if (is.null(object$contrasts)) # warning("Contrasts used to fit the model are unknown.\n", # "Defaulting to system option, but results may be wrong.") bhat = coef(object) V = .my.vcov(object, ...) tJac = object$tJac dffun = function(...) Inf link = as.character(object$info$link) cnm = dimnames(object$tJac)[[1]] if (is.null(cnm)) cnm = paste(seq_len(nrow(tJac)), "|", 1 + seq_len(nrow(tJac)), sep = "") misc = list() # My strategy is to piece together the needed matrices for each threshold parameter # Then assemble the results ### ----- Location part ----- ### contrasts = object$contrasts # Remember trms was trumped-up to include scale and nominal predictors. # Recover the actual terms for the principal model trms = delete.response(object$terms) m = model.frame(trms, grid, na.action = na.pass, xlev = object$xlevels) X = model.matrix(trms, m, contrasts.arg = contrasts) # Need following code because clmm objects don't have NAs for dropped columns... nms.needed = c(names(object$alpha), setdiff(colnames(X), "(Intercept)")) if (length(setdiff(nms.needed, bnm <- names(bhat))) > 0) { bhat = seq_along(nms.needed) * NA names(bhat) = nms.needed bhat[bnm] = coef(object) object$coefficients = bhat # will be needed by model.matrix object$beta = bhat[setdiff(nms.needed, names(object$alpha))] } xint = match("(Intercept)", colnames(X), nomatch = 0L) if (xint > 0L) { X = X[, -xint, drop = FALSE] } ### ----- Nominal part ----- ### if (is.null(object$nom.terms)) NOM = matrix(1, nrow = nrow(X)) else { mn = model.frame(object$nom.terms, grid, na.action = na.pass, xlev = object$nom.xlevels) NOM = model.matrix(object$nom.terms, mn, contrasts.arg = object$nom.contrasts) } bigNom = kronecker(tJac, NOM) # cols are in wrong order... I'll get the indexes by transposing a matrix of subscripts if (ncol(NOM) > 1) bigNom = bigNom[, as.numeric(t(matrix(seq_len(ncol(bigNom)), nrow=ncol(NOM))))] ### ----- Scale part ----- ### if (!is.null(object$S.terms)) { ms = model.frame(object$S.terms, grid, na.action = na.pass, xlev = object$S.xlevels) S = model.matrix(object$S.terms, ms, contrasts.arg = object$S.contrasts) S = S[, names(object$zeta), drop = FALSE] if (!is.null(attr(object$S.terms, "offset"))) { soff = .get.offset(object$S.terms, grid) # we'll add a column to S and adjust bhat and V accordingly S = cbind(S, offset = soff) bhat = c(bhat, offset = 1) V = rbind(cbind(V, offset = 0), offset = 0) } si = misc$scale.idx = length(object$alpha) + length(object$beta) + seq_len(ncol(S)) # Make sure there are no name clashes names(bhat)[si] = paste(".S", names(object$zeta), sep=".") misc$estHook = ".clm.estHook" misc$vcovHook = ".clm.vcovHook" } else S = NULL ### ----- Get non-estimability basis ----- ### nbasis = snbasis = estimability::all.estble if (any(is.na(bhat))) { obj = object # work around fact that model.matrix.clmm doesn't work class(obj) = "clm" mm = try(model.matrix(obj), silent = TRUE) if (inherits(mm, "try-error")) stop("Currently, it is not possible to construct a reference grid for this\n", "object, because it is rank-deficient and no model matrix is available.") # note: mm has components X, NOM, and S if (any(is.na(c(object$alpha, object$beta)))) { NOMX = if (is.null(mm$NOM)) mm$X else cbind(mm$NOM, mm$X[, -1]) nbasis = estimability::nonest.basis(NOMX) # replicate and reverse the sign of the NOM parts nomcols = seq_len(ncol(NOM)) nbasis = apply(nbasis, 2, function(x) c(rep(-x[nomcols], each = nrow(NOM)), x[-nomcols])) } if (!is.null(mm$S)) { if (any(is.na(object$zeta))) { snbasis = estimability::nonest.basis(mm$S) # put intercept part at end snbasis = rbind(snbasis[-1, , drop=FALSE], snbasis[1, ]) if (!is.null(attr(object$S.terms, "offset"))) snbasis = rbind(snbasis, 0) snbasis = rbind(matrix(0, ncol=ncol(snbasis), nrow=min(si)-1), snbasis) # Note scale intercept is included, so tack it on to the end of everything S = cbind(S, .S.intcpt = 1) bhat = c(bhat, .S.intcpt = 0) V = rbind(cbind(V, .S.intcpt = 0), .S.intcpt = 0) si = misc$scale.idx = c(si, 1 + max(si)) } } if (is.na(nbasis[1])) # then only nonest part is scale nbasis = snbasis else { if (!is.null(S)) # pad nbasis with zeros when there's a scale model nbasis = rbind(nbasis, matrix(0, nrow=length(si), ncol=ncol(nbasis))) if (!is.na(snbasis[1])) nbasis = cbind(nbasis, snbasis) } } if (mode == "latent") { # Create constant columns for means of scale and nominal parts J = matrix(1, nrow = nrow(X)) nomm = rescale[2] * apply(bigNom, 2, mean) X = rescale[2] * X if (!is.null(S)) { sm = apply(S, 2, mean) X = cbind(X, kronecker(-J, matrix(sm, nrow = 1))) } bigX = cbind(kronecker(-J, matrix(nomm, nrow = 1)), X) misc$offset.mult = misc$offset.mult * rescale[2] intcpt = seq_len(ncol(tJac)) bhat[intcpt] = bhat[intcpt] - rescale[1] / rescale[2] } else { ### ----- Piece together big matrix for each threshold ----- ### misc$ylevs = list(cut = cnm) # support for links not in make.link if (is.character(link) && !(link %in% c("logit", "probit", "cauchit", "cloglog"))) { setLinks = get("setLinks", asNamespace("ordinal")) env = new.env() setLinks(env, link) link = list(linkfun = quote(stop), linkinv=env$pfun, mu.eta = env$dfun, name = env$link, lambda = env$lambda) } misc$tran = link misc$inv.lbl = "cumprob" misc$offset.mult = -1 if (!is.null(S)) X = cbind(X, S) J = matrix(1, nrow=nrow(tJac)) bigX = cbind(bigNom, kronecker(-J, X)) if (mode != "linear.predictor") { misc$mode = mode misc$respName = as.character.default(object$terms)[2] misc$postGridHook = ".clm.postGrid" } } dimnames(bigX)[[2]] = names(bhat) list(X = bigX, bhat = bhat, nbasis = nbasis, V = V, dffun = dffun, dfargs = list(), misc = misc) } # function called at end of ref_grid # I use this for polr as well # Also used for stanreg result of stan_polr & potentially other MCMC ordinal models .clm.postGrid = function(object, ...) { mode = object@misc$mode object@misc$postGridHook = object@misc$mode = NULL object = regrid(object, transform = "response", ...) if(object@misc$estName == "exc.prob") { # back-transforming yields exceedance probs object@bhat = 1 - object@bhat if(!is.null(object@post.beta[1])) object@post.beta = 1 - object@post.beta object@misc$estName = "cum.prob" } if (mode == "prob") { object = .clm.prob.grid(object, ...) } else if (mode == "mean.class") { object = .clm.mean.class(object, ...) } else if (mode == "exc.prob") { object@bhat = 1 - object@bhat if(!is.null(object@post.beta[1])) object@post.beta = 1 - object@post.beta object@misc$estName = "exc.prob" } # (else mode == "cum.prob" and it's all OK) object@misc$respName = NULL # cleanup object } # Make the linear-predictor ref_grid into one for class probabilities # This assumes that object has already been re-gridded and back-transformed .clm.prob.grid = function(object, thresh = "cut", newname = object@misc$respName, ...) { byv = setdiff(names(object@levels), thresh) newrg = contrast(object, ".diff_cum", by = byv, ...) newrg@grid$.offset. = (apply(newrg@linfct, 1, sum) < 0) + 0 if (!is.null(wgt <- object@grid[[".wgt."]])) { km1 = length(object@levels[[thresh]]) wgt = wgt[seq_len(length(wgt) / km1)] # unique weights for byv combs newrg = force_regular(newrg) key = do.call(paste, object@grid[byv])[seq_along(wgt)] tgt = do.call(paste, newrg@grid[byv]) for (i in seq_along (wgt)) newrg@grid[[".wgt."]][tgt == key[i]] = wgt[i] } # proceed to disavow that this was ever exposed to 'emmeans' or 'contrast' ## class(newrg) = "ref.grid" misc = newrg@misc if(!is.null(misc$display) && all(misc$display)) misc$display = NULL misc$is.new.rg = TRUE misc$infer = c(FALSE,FALSE) misc$estName = "prob" misc$pri.vars = misc$by.vars = misc$con.coef = misc$orig.grid = NULL newrg@misc = misc conid = which(names(newrg@levels) == "contrast") names(newrg@levels)[conid] = names(newrg@grid)[conid] = newname newrg@roles = object@roles newrg@roles$multresp = newname newrg } # special 'contrast' fcn used by .clm.mean.class .meanclass.emmc = function(levs, lf, ...) data.frame(mean = lf) .clm.mean.class = function(object, ...) { prg = .clm.prob.grid(object, newname = "class", ...) byv = setdiff(names(prg@levels), "class") lf = as.numeric(prg@levels$class) newrg = contrast(prg, ".meanclass", lf = lf, by = byv, ...) newrg = update(newrg, infer = c(FALSE, FALSE), pri.vars = NULL, by.vars = NULL, estName = "mean.class") newrg@levels$contrast = newrg@grid$contrast = NULL prg@roles$multresp = NULL newrg@roles = prg@roles ## class(newrg) = "ref.grid" update(force_regular(newrg), is.new.rg = TRUE) } # Contrast fcn for turning estimates of cumulative probabilities # into cell probabilities .diff_cum.emmc = function(levs, sep = "|", ...) { plevs = unique(setdiff(unlist(strsplit(levs, sep, TRUE)), sep)) k = 1 + length(levs) if (length(plevs) != k) plevs = seq_len(k) M = matrix(0, nrow = length(levs), ncol = k) for (i in seq_along(levs)) M[i, c(i,i+1)] = c(1,-1) dimnames(M) = list(levs, plevs) M = as.data.frame(M) attr(M, "desc") = "Differences of cumulative probabilities" attr(M, "adjust") = "none" attr(M, "offset") = c(rep(0, k-1), 1) M } #### replacement estimation routines for cases with a scale param ## workhorse for estHook and vcovHook functions .clm.hook = function(object, tol = 1e-8, ...) { scols = object@misc$scale.idx bhat = object@bhat active = !is.na(bhat) bhat[!active] = 0 linfct = object@linfct estble = estimability::is.estble(linfct, object@nbasis, tol) ###apply(linfct, 1, .is.estble, object@nbasis, tol) estble[!estble] = NA rsigma = estble * as.numeric(linfct[, scols, drop = FALSE] %*% object@bhat[scols]) rsigma = exp(rsigma) * estble # I'll do the scaling later eta = as.numeric(linfct[, -scols, drop = FALSE] %*% bhat[-scols]) if (!is.null(object@grid$.offset.)) eta = eta + object@grid$.offset. for (j in scols) linfct[, j] = eta * linfct[, j] linfct = (.diag(rsigma) %*% linfct) [, active, drop = FALSE] list(est = eta * rsigma, V = linfct %*% tcrossprod(object@V, linfct)) } .clm.estHook = function(object, do.se = TRUE, tol = 1e-8, ...) { raw.matl = .clm.hook(object, tol, ...) SE = if (do.se) sqrt(diag(raw.matl$V)) else NA cbind(est = raw.matl$est, SE = SE, df = Inf) } .clm.vcovHook = function(object, tol = 1e-8, ...) { .clm.hook(object, tol, ...)$V } ### Special emm_basis fcn for the scale model .emm_basis.clm.scale = function(object, trms, xlev, grid, ...) { m = model.frame(trms, grid, na.action = na.pass, xlev = xlev) X = model.matrix(trms, m, contrasts.arg = object$S.contrasts) bhat = c(`(intercept)` = 0, object$zeta) nbasis = estimability::all.estble if (any(is.na(bhat))) nbasis = estimability::nonest.basis(model.matrix(object)$S) k = sum(!is.na(bhat)) - 1 V = .my.vcov(object, ...) pick = nrow(V) - k + seq_len(k) V = V[pick, pick, drop = FALSE] V = cbind(0, rbind(0,V)) misc = list(tran = "log") list(X = X, bhat = bhat, nbasis = nbasis, V = V, dffun = function(...) Inf, dfargs = list(), misc = misc) } emm_basis.clmm = function (object, trms, xlev, grid, ...) { if(is.null(object$Hessian)) { message("Updating the model to obtain the Hessian...") object = update(object, Hess = TRUE) } # borrowed from Maxime's code -- need to understand this better, e.g. when it happens H = object$Hessian if (any(apply(object$Hessian, 1, function(x) all(x == 0)))) { H = H[names(coef(object)), names(coef(object))] object$Hessian = H } result = emm_basis.clm(object, trms, xlev, grid, ...) # strip off covariances of random effects keep = seq_along(result$bhat[!is.na(result$bhat)]) result$V = result$V[keep,keep] result }