blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
4dc85041b994045d4827f545a4c2277b1e5f37d6
594471d77b19ca6686054eedb90d90b0680d3e4c
/man/sample.id_s.sc.Rd
f7777d0becb6460923366e5b2296f0fa35ed136f
[]
no_license
NCKU-Bioinformatic-Club/deconvSeq
076a79a91c132e00712288a3027bf018afd8763e
dc7dfdc0ae9668b6780d8be9b1f54589107dd8e7
refs/heads/master
2022-12-04T18:41:59.174971
2020-08-17T21:15:17
2020-08-17T21:15:17
null
0
0
null
null
null
null
UTF-8
R
false
true
322
rd
sample.id_s.sc.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/withDoc.R \docType{data} \name{sample.id_s.sc} \alias{sample.id_s.sc} \title{sample IDs (scRNAseq)} \format{ An object of class \code{character} of length 238. } \usage{ sample.id_s.sc } \description{ sample IDs (scRNAseq) } \keyword{datasets}
c94da6a7b0e973d0d92a2852bf067e160d123967
d8d149c243a5c8888679096d619d10a7c06b8127
/R/bal.ms.psa.R
fccbe0651c197a1492229ce1c75f03b2054af802
[]
no_license
cran/PSAgraphics
ed4997e10989cb08c3c0b8f042593d9cf442d995
33b1eed4d75374f05b171f5751c9fe5db00c6621
refs/heads/master
2023-04-13T19:25:56.884945
2023-03-21T11:30:02
2023-03-21T11:30:02
17,681,448
0
0
null
null
null
null
UTF-8
R
false
false
4,667
r
bal.ms.psa.R
#' Balance for Continuous Covariate: Random Strata as part of a PSA #' #' Function provides a measure (based on the trimmed mean) of the balance #' achieved between control and treatment groups for a continuous covariate #' from user defined strata. This statistic is compared to the same measure for #' randomly permuted strata. #' #' This function measures the balance achieved across K strata for a continuous #' covariate. If \eqn{ \mu_{ik} } is the covariate trimmed (as specified by #' user) mean of cases in stratum k, treatment i, then the statistic is the sum #' over all K of \eqn{ |\mu_{0k} - \mu_{1k}| }. A permutation distribution is #' generated by randomly assigning cases to strata, thus generating B permuted #' stratifications and the associated B permutation statistics. The #' permutation stratifications are generated under a fixed marginals model to #' retain comparability with the original stratification. A histogram of the #' permutation statistics is produced with the original statistic referenced as #' a red dot. #' #' @param continuous Quantitative covariate that is being balanced within #' strata in a PSA. If \code{continuous} has three columns, then the second #' and third are assumed to be the treatment and strata respectively. Missing #' values are not allowed. #' @param treatment Binary variable of same length as \code{continuous}; #' generally 0 for 'control,' 1 for 'treatment.' #' @param strata Integer variable; a vector of same length as \code{continuous} #' indicating the derived strata from estimated propensity scores. #' @param trim Fraction (0 to 0.5) of observations to be trimmed from each end #' of stratum-treatment level before the mean is computed. See #' \code{\link{mean}}. #' @param B Numeric; number of randomly generated iterations of the balance #' measure are created for the comparison distribution. #' @param main Title passed to \code{histogram}. #' @return In addition to the histogram, a list with the following components #' is returned: \item{balance.orig }{Balance measure of user defined strata.} #' \item{rank.orig}{Rank of original balance measure in comparison with the B #' randomly generated values.} #' @author James E. Helmreich \email{ James.Helmreich@@Marist.edu} #' #' Robert M. Pruzek \email{RMPruzek@@yahoo.com} #' @seealso \code{bal.ks.psa}, \code{bal.cws.psa}, \code{bal.cs.psa} #' @keywords htest #' @examples #' #Balance stat should be close to zero #' meas<-rnorm(500) #' continuous<-c(meas,meas+rnorm(500,0,.1)) #' treatment<-c(rep(0,500),rep(1,500)) #' strata<-rep(c(rep(1,100),rep(2,100),rep(3,100),rep(4,100),rep(5,100)),2) #' bal.ms.psa(continuous,treatment,strata) #' #' #' #Balance stat should be close to .4 #' meas<-rnorm(500) #' continuous<-c(meas, meas[1:250] + runif(250,0,.2), #' meas[251:500]-runif(250,0,.2)) #' treatment<-c(rep(0,500),rep(1,500)) #' strata<-rep(c(rep(1,100), rep(2,100), rep(3,100), #' rep(4,100),rep(5,100)),2) #' bal.ms.psa(continuous, treatment, strata, B=200) #' #' @export bal.ms.psa bal.ms.psa <- function(continuous, treatment = NULL, strata = NULL, trim = 0, B = 1000, main = NULL) { #Compares means within randomly generated strata for a continuous covariate. #The analogue of bal.cs.psa n <- length(continuous) nstrat <- dim(table(strata)) #If "continuous" has three columns, treat as c, t, s. if (dim(as.data.frame(continuous))[2] == 3) { treatment <- continuous[, 2] strata <- continuous[, 3] continuous <- continuous[, 1] } meas.mns <- tapply(continuous, list(treatment, strata), mean, trim = trim) sum.abs.diff.original <- sum((abs(meas.mns[1, ] - meas.mns[2, ])) * table(strata) / n) sum.abs.diff <- NULL for (i in 1:B) { rstrat <- sample(strata, n) meas.mns <- tapply(continuous, list(treatment, rstrat), mean, trim = trim) sum.abs.diff <- c(sum.abs.diff, sum((abs( meas.mns[1, ] - meas.mns[2, ] )) * table(strata) / n)) } res <- c(sum.abs.diff.original, sum.abs.diff) rnk <- NULL rnk <- rank(c(sum.abs.diff.original[1], sum.abs.diff))[1] hist( c(sum.abs.diff.original, sum.abs.diff), xlab = paste( "Balance Statistics for", B, "Randomly Permuted Stratifications" ), main = main ) points( sum.abs.diff.original, 0, col = "red", cex = 4, pch = 20 ) legend( x = "topright", legend = list( paste("Original Balance:", round(sum.abs.diff.original, 2)), paste("Rank of Original:", round(rnk, 2), "of", B + 1) ), pch = c(19, 19), col = c(2, 0) ) out <- list(balance.orig = sum.abs.diff.original, rank.orig = rnk) return(out) }
ab2d07ecff37cbc06a8807f7077828e17c5968b7
d5e3596f41df8aba3ee20a6fe10d0685f3ec7634
/Clase 3/Clase3.R
df4e2118c552cca0a9c6424dbe0b8c6d679e58b8
[]
no_license
JesusCuellar00/MCF202
41da172105dd75b3356a6789e03f2d1ff8b03c34
bf56ab7a12ca1bb77d171653a4f8f577f077348f
refs/heads/master
2020-06-30T03:48:14.734513
2019-08-09T20:39:56
2019-08-09T20:39:56
200,714,563
0
0
null
null
null
null
UTF-8
R
false
false
1,609
r
Clase3.R
# Jesús Cuéllar Loera # 7/Agosto/2019 # Clase 3 Grupo<- gl(2,12, labels= c("Fotografia", "Araña")) Ansiedad <- c(30, 35, 45, 40, 50, 35, 55, 25, 30, 45, 40, 50, 40, 35, 50, 55, 65, 55, 50, 35, 30, 50, 60, 39) Datos<- data.frame(Grupo, Ansiedad) head(Datos) summary(Datos) boxplot(Datos$Ansiedad ~ Datos$Grupo, col="green", ylab= "Nivel de ansiedad") length(Datos$Grupo) tapply(Datos$Ansiedad,Datos$Grupo, mean) shapiro.test(Datos$Ansiedad) bartlett.test(Datos$Ansiedad ~ Datos$Grupo) library(pastecs) by(Datos$Ansiedad, Datos$Grupo, stat.desc, basic= FALSE, norm= TRUE) gr.t<- t.test(Datos$Ansiedad ~ Datos$Grupo, var.equal = TRUE) t.test(Datos$Ansiedad ~ Datos$Grupo, var.equal = TRUE) #En promedio los participantes experimentaron niveles de ansiedad mas alto en el grupo # en el que tuvieron una tarantula en mano (M= 47) # Ejercicio 2 ------------------------------------------------------------- # H0 no existe diferencia entre medias(80 kg) y la alternativa #es que la media es menor a 80 kg costal <- c(87.7, 80.01, 77.28, 78.76, 81.52, 74.2, 80.71, 79.5, 77.87, 81.94, 80.7, 82.32, 75.78, 80.19, 83.91, 79.4, 77.52, 77.62, 81.4, 74.89, 82.95, 73.59, 77.92, 77.18, 79.83, 81.23, 79.28, 78.44, 79.01, 80.47, 76.23, 78.89, 77.14, 69.94, 78.54, 79.7, 82.45, 77.29, 75.52, 77.21, 75.99, 81.94, 80.41, 77.7) #observaciones n<- length(costal) #Media mean.cos <- mean(costal) #desv cos.sd <- sd(costal) cost.se<- cos.sd/sqrt(n) cos.t<- (mean.cos - 80)/ cost.se pt(cos.t, df= n-1) t.test(costal, mu = 80, alternative = "less")
afbba86c920f89b8d27b2638b158e71828ce09e8
f2d3a834eb614c444e4c4d2f863577e804d9fb70
/R/check_quality.R
2e8b722434bcdc398a85e8bd57595c647bb7b1b9
[]
no_license
David-Hervas/clickR
150669cc67575659258e2bb44f429544e52e809c
cb738e505375376d91ac37eb01813ac3fb0e1432
refs/heads/master
2023-08-14T05:06:15.095067
2023-08-07T17:01:53
2023-08-07T17:01:53
90,495,146
3
3
null
null
null
null
UTF-8
R
false
false
10,432
r
check_quality.R
#' Checks if each value might be numeric #' #' @description Checks if each value from a vector might be numeric #' @param x A vector #' @return A logical vector may.numeric <- function(x) suppressWarnings(!is.na(numeros(x))) #' Extreme values from a numeric vector #' #' @description Returns the nth lowest and highest values from a vector #' @param x A vector #' @param n Number of extreme values to return #' @param id ID column to reference the found extreme values #' @return A matrix with the lowest and highest values from a vector extreme_values <- function(x, n=5, id=NULL){ h<-matrix(rep("", n*2), ncol=n) m<-matrix(sort(na.omit(x))[c(1:n, (length(na.omit(x))-(n-1)):length(na.omit(x)))], nrow=2, byrow=TRUE) if(!is.null(id)){ h<- matrix(id[order(x)][c(1:n, (length(na.omit(x))-(n-1)):length(na.omit(x)))], nrow=2, byrow=TRUE) } return(list(Values=setNames(data.frame(t(m)), c("Low", "High")), IDs=setNames(data.frame(t(h)), c("Low", "High")))) } #' Brute numeric coercion #' #' @description If possible, coerces values from a vector to numeric #' @param x A vector #' @return A numeric vector numeros <- function(x){ suppressWarnings( as.numeric( gsub( paste(c(",", "\\.\\.", ",,", "\\.,", ",\\.", "\\."), collapse = "|"), ".", gsub( "[A-Za-z]", "", iconv( gsub( "^ *|(?<= ) | *$", "", gsub("\\$|\u20ac|\u00A3", "", tolower(as.character(x))), perl = TRUE), to = "ASCII//TRANSLIT"))))) } #' True TRUE #' #' @description Makes possible vectorized logical comparisons against NULL and NA values #' @param x A logical vector #' @return A logical vector #' @export ttrue <- function(x){ x[is.na(x)] <- FALSE if(length(x) == 0L) x <- FALSE x } #' greater & NA #' #' @description '>' operator where NA values return FALSE #' @param x Vector for the left side of the operator #' @param y A Scalar or vector of the same length as x for the right side of the operator #' @return A logical vector of the same length as x #' @export `%>NA%` <- function(x, y){ ttrue(x > y) } #' less & NA #' #' @description '<' operator where NA values return FALSE #' @param x Vector for the left side of the operator #' @param y A Scalar or vector of the same length as x for the right side of the operator #' @return A logical vector of the same length as x #' @export `%<NA%` <- function(x, y){ ttrue(x < y) } #' geq & not NA #' #' @description '>=' operator where NA values return FALSE #' @param x Vector for the left side of the operator #' @param y A Scalar or vector of the same length as x for the right side of the operator #' @return A logical vector of the same length as x #' @export `%>=NA%` <- function(x, y){ ttrue(x >= y) } #' leq & not NA #' #' @description '<=' operator where NA values return FALSE #' @param x Vector for the left side of the operator #' @param y A Scalar or vector of the same length as x for the right side of the operator #' @return A logical vector of the same length as x #' @export `%<=NA%` <- function(x, y){ ttrue(x <= y) } #' Checks data quality of a variable #' #' @description Returns different data quality details of a numeric or categorical variable #' @param x A variable from a data.frame #' @param id ID column to reference the found extreme values #' @param plot If the variable is numeric, should a boxplot be drawn? #' @param numeric If set to TRUE, forces the variable to be considered numeric #' @param k Number of different numeric values in a variable to be considered as numeric #' @param n Number of extreme values to extract #' @param output Format of the output. If TRUE, optimize for exporting as csv #' @param ... further arguments passed to boxplot() #' @return A list of a data.frame with information about data quality of the variable #' @importFrom graphics boxplot dotchart #' @importFrom stats setNames #' @export #' @examples #' check_quality(airquality$Ozone) #For one variable #' lapply(airquality, check_quality) #For a data.frame #' lapply(airquality, check_quality, output=TRUE) #For a data.frame, one row per variable check_quality <- function(x, id=1:length(x), plot=TRUE, numeric=NULL, k=5, n=ifelse(is.numeric(x) | ttrue(numeric) | class(x) %in% "Date", 5, 2), output=FALSE, ...){ call_n <- !is.null(as.list(match.call())$n) num <- numeric date <- class(x) %in% "Date" numbers <- sum(may.numeric(x)) offending_values<-NA if(is.null(numeric)){ if(numbers>(length(x)/10) & length(unique(numeros(x))) > k) { num<-TRUE n<-max(c(n, 5*!call_n))} else num <- FALSE } if(num & !is.numeric(x)){ warning("Numeric variable encoded as a factor. Use fix_numerics() to amend", call.=FALSE) off<-table(x)[is.na(numeros(names(table(x))))] offending_values<-paste(paste(names(off), " (", off, ")", sep=""), collapse="; ", sep="") } if(plot & num) boxplot(numeros(x), col="gray", outcol="darkred", pch=16, las=1, ylab=as.character(as.list(match.call())$x)[3], ...) if(plot & !num & !date) dotchart(sort(setNames(as.numeric(table(x)), names(table(x)))), pch=16) if(plot & date) plot(sort(x, na.last = TRUE), 1:length(x), pch=16, col=rgb(50, 50, 50, 100, maxColorValue = 255), las=1, xlab="Date", ylab="") Extremes_low<-NA Extremes_high<-NA Tabla2<-NA if(num){ Extremes <- extreme_values(x, n, id) } if(!num & !date){ Table <- data.frame(sort(table(x))[1:n]) Tabla2<-paste(apply(data.frame(sort(table(x))[1:n]), 1, function(x) paste(x[1], " (", x[2], ")", sep="")), collapse="; ") } if(date){ h<-matrix(rep("", n*2), ncol=2) m<-data.frame(Low=sort(na.omit(x))[1:n], high=sort(na.omit(x))[(length(na.omit(x))-(n-1)):length(na.omit(x))]) if(!is.null(id)){ h<- t(matrix(id[order(x)][c(1:n, (length(na.omit(x))-(n-1)):length(na.omit(x)))], nrow=2, byrow=TRUE)) } Extremes <- list(Values=m, IDs=h) } if(num | date){ Extremes_low <- gsub(";", "];", paste(paste(Extremes$Values[, 1], Extremes$IDs[, 1], sep=" ["), "; ", collapse="", sep="")) Extremes_high <- gsub(";", "];", paste(paste(Extremes$Values[, 2], Extremes$IDs[, 2], sep=" ["), "; ", collapse="", sep="")) } N.Categories <- length(table(x)) Strings <- sum(grepl("[[:alpha:]]", x)) NAs<-sum(is.na(x)) whitespace<-sum(x %in% "" | x %in% " ", na.rm=TRUE) if(output){ res<-data.frame(n=length(x), NAs=NAs, whitespace=whitespace, numbers=numbers, Strings=Strings, N.Categories=N.Categories, Extremes_low=Extremes_low, Extremes_high=Extremes_high, Table=strtrim(Tabla2, 150), Offenders=strtrim(offending_values, 150)) } else{ res<-list(Summary=data.frame(n=length(x), NAs=NAs, whitespace=whitespace, numbers=numbers, strings=Strings, class=class(x)), Extremes=if(num | date) Extremes else cbind(Table, N.Categories=c(rep("", n-1), length(table(x)))), Offending=offending_values) } return(res) } #' Explores global environment workspace #' #' @description Returns information regarding the different objects in global environment #' @param table If TRUE a table with the frequencies of each type of object is given #' @return A list of object names by class or a table with frequencies if table = TRUE #' @export #' @examples #' df1 <- data.frame(x=rnorm(10), y=rnorm(10, 1, 2)) #' df2 <- data.frame(x=rnorm(20), y=rnorm(20, 1, 2)) #' workspace(table=TRUE) #Frequency table of the different object classes #' workspace() #All objects in the global object separated by class workspace <- function(table=FALSE) { list_obj <- split(objects(envir=.GlobalEnv), sapply(objects(envir=.GlobalEnv), function(x) class(get(x, envir=.GlobalEnv))[length(class(get(x, envir=.GlobalEnv)))])) if(table) sapply(list_obj, function(x) length(x)) else list_obj } #' Applies a function over objects of a specific class #' #' @description Applies a function over all objects of a specific class in the global environment #' @param object_class Class of the objects where the function is to be applied #' @param action Name of the function to apply #' @return Results of the function #' @export #' @examples #' df1 <- data.frame(x=rnorm(10), y=rnorm(10, 1, 2)) #' df2 <- data.frame(x=rnorm(20), y=rnorm(20, 1, 2)) #' workspace_sapply("data.frame", "summary") #Gives a summary of each data.frame workspace_sapply <- function(object_class, action="summary"){ sapply(workspace()[[object_class]], function(x) get(action)(get(x)), simplify=FALSE) } #' Check for bivariate outliers #' #' @description Checks for bivariate outliers in a data.frame #' @param x A data.frame object #' @param threshold_r Threshold for the case of two continuous variables #' @param threshold_b Threshold for the case of one continuous and one categorical variable #' @return A data frame with all the observations considered as bivariate outliers #' @importFrom stats cooks.distance #' @importFrom utils combn #' @export #' @examples #' bivariate_outliers(iris) bivariate_outliers <- function(x, threshold_r=10, threshold_b=1.5){ pairwise_comb <- combn(1:ncol(x), 2) outliers <- apply(pairwise_comb, 2, function(y){ if(all(sapply(x[,y], is.numeric))){ data_l <- data.frame(x=x[ , y[1]], y=x[ , y[2]]) mod_a <- stats::rstudent(lm(x ~ y + I(y^2) + I(y^3), data=data_l, na.action = "na.exclude"))^2 mod_b <- stats::rstudent(lm(y ~ x + I(x^2) + I(x^3), data=data_l, na.action = "na.exclude"))^2 rs <- (mod_a+mod_b)/mean(mod_a+mod_b, na.rm=TRUE) if(any(rs %>NA% threshold_r)){ data.frame(row=rownames(x)[which(rs %>NA% threshold_r)], variable1=names(x)[y[1]], value1=x[,y[1]][which(rs %>NA% threshold_r)], variable2=names(x)[y[2]], value2=x[,y[2]][which(rs %>NA% threshold_r)]) } } else{ if(sum(sapply(x[,y], is.numeric) * rev(sapply(x[,y], is.factor))) == 1){ factor <- sapply(x[,y], is.factor) case <- unsplit(lapply(split(x[,y][,!factor], x[,y][,factor]), function(x) outliers(x, threshold_b)), x[,y][,factor]) if(any(case)){ data.frame(row=rownames(x)[ttrue(case)], variable1=names(x)[y[1]], value1=as.character(x[,y[1]][ttrue(case)]), variable2=names(x)[y[2]], value2=as.character(x[,y[2]][ttrue(case)])) } } } }) output <- do.call(rbind, as.list(outliers)) rownames(output) <- NULL output }
6c0f574d6958b785df5b6485e8c9326fcc3a74b8
61754c7594c605059883710506bef4a2679992bc
/inst/src/tabs/analysis/svr_analysis.R
e86c0cc1d5265edc40c5d535d5fc047ae06b359e
[ "MIT" ]
permissive
genignored/dgeAnalysis
5065eb4416fd3ba55b4c6cdff37c3ba33182ae5c
8546d1d189adff4d4188b61d5fe8573edfdc6079
refs/heads/master
2023-01-21T23:41:45.867416
2020-11-30T21:13:35
2020-11-30T21:13:35
314,323,981
0
0
MIT
2020-11-30T20:58:18
2020-11-19T17:38:52
null
UTF-8
R
false
false
5,639
r
svr_analysis.R
## Set deTab table output[["detab_table"]] <- DT::renderDataTable({ tryCatch({ checkReload() if (input$setdeTab == "all") { DT::datatable(inUse_deTab, options = list(pageLength = 50, scrollX = TRUE)) } else { DT::datatable(inUse_deTab[inUse_deTab$DE != 0,], options = list(pageLength = 50, scrollX = TRUE)) } }, error = function(err) { return(DT::datatable(data.frame(c( "No data available in table" )), rownames = FALSE, colnames = "")) }) }) ## DE ratio output[["de_ratio"]] <- renderPlotly({ tryCatch({ checkReload() deRatioPlot(inUse_deTab) }, error = function(err) { return(NULL) }) }) ## Mean-Difference (MA) plots output[["ma_plot"]] <- renderPlotly({ tryCatch({ checkReload() ma_plot(inUse_deTab) }, error = function(err) { return(NULL) }) }) ## Genes selected in MA plot output[["selected_ma"]] <- DT::renderDataTable({ tryCatch({ s <- event_data(event = "plotly_selected", source = "analysis_ma") if (is.null(s)) { s <- "" } DT::datatable(inUse_deTab[s$key,], options = list(pageLength = 15, scrollX = TRUE)) }, error = function(err) { return(DT::datatable(data.frame(c( "No data available in table" )), rownames = FALSE, colnames = "")) }) }) ## Volcano plots output[["volcano_plot"]] <- renderPlotly({ tryCatch({ checkReload() volcanoPlot(inUse_deTab, input$vulcanoLogCut, -log10(input$vulcanoPCut)) }, error = function(err) { return(NULL) }) }) ## Genes selected in volcano plot output[["selected_volcano"]] <- DT::renderDataTable({ tryCatch({ s <- event_data(event = "plotly_selected", source = "analysis_volcano") if (is.null(s)) { s <- "" } DT::datatable(inUse_deTab[s$key,], options = list(pageLength = 15, scrollX = TRUE)) }, error = function(err) { return(DT::datatable(data.frame(c( "No data available in table" )), rownames = FALSE, colnames = "")) }) }) ## Barcode plot output[["barcode_plot"]] <- renderPlotly({ tryCatch({ checkReload() barcodePlot( inUse_deTab, inUse_normDge, input$group_analysis_bar, input$slider_barcode, input$selected_analysis_bar ) }, error = function(err) { return(NULL) }) }) ## Set color of barcode output[["group_analysis_bar"]] <- renderUI({ tryCatch({ selectInput( inputId = "group_analysis_bar", label = "Color by:", choices = colnames(data_samples()) ) }, error = function(err) { return(NULL) }) }) ## Add specific gene to barplot observe({ tryCatch({ updateSelectizeInput( session = session, inputId = 'selected_analysis_bar', choices = rownames(inUse_deTab), server = TRUE ) }, error = function(err) { return(NULL) }) }) ## P value plots output[["p_val_plot"]] <- renderPlotly({ tryCatch({ checkReload() pValuePlot(inUse_deTab) }, error = function(err) { return(NULL) }) }) ## INFORMATION BOXES output[["de_ratio_info"]] <- renderUI({ infoText <- "The DE ratio plot gives a quick overview of the amount of differentially expressed genes. (down, not and up-regulated) The exact amount of genes are shown inside the plot. The dispersion of the Y-axis is based on the percentages. This is calculated based on the total amount of genes after filtering." informationBox(infoText) }) output[["ma_plot_info"]] <- renderUI({ infoText <- "The MA plot (Mean average), plots all genes after filtering. This plot creates a figure with log-intensity ratios (M-values) and log-intensity averages (A-values). The MA plot can give a view of all genes and their amount of expression in a comparison of two groups. On the X-axis the Log2CPM values (A) are plotted against the Log2FC values on the Y-axis (M). In this plot, the genes are colored based on the DE results (down, not and up-regulated). Also, the trend of expression is shown. This trend shows the average expression flow." informationBox(infoText) }) output[["volcano_plot_info"]] <- renderUI({ infoText <- "The volcano plot also shows the most expressed genes and can give a good view of the expression results. The X-axis contains the Log2FC (magnitude of change) and the Y-axis shows -Log10 p-value (statistical significance). A great benefit of the volcano plot is that it makes genes with a high fold-change, who are statistically significant, well visible. If genes are down-regulated, they will be visible towards the left of the plot and up-regulated genes more towards the right. The most statistically significant genes are placed towards the top." informationBox(infoText) }) output[["barcode_plot_info"]] <- renderUI({ infoText <- "The barcode plot shows the most expressed genes. The X-axis shows the Log2CPM for every sample per gene. The Y-axis shows most expressed genes (from top to bottom). Every bar stands for a sample. With this plot differences between samples for a specific gene is visible." informationBox(infoText) }) output[["p_val_plot_info"]] <- renderUI({ infoText <- "The p-value plot is used as a quality control plot. When all bars, for example, are all the same height, then the analysis can be described as “noisy”. The ideal pattern would be that bars around a p-value of zero are high. Next, the bars should get lower in a nice (steep) curve and should be (almost) zero at the p-value of one." informationBox(infoText) })
e009be5577e8f227b1201cc96c2735f033787ec6
918030d6c9437ae7c6abfae4b9bf29240eb7bec5
/track2/presentation-src.R
5c6d3a63d5a6e1d1a6a4c834be525b1e62abd968
[]
no_license
USCbiostats/rbootcamp
ad32982d71f91df1770ec9c6c10e9379b4ec0e81
c449e23c765ca049c494e42b46bcf8aafcfbae4c
refs/heads/master
2023-08-26T13:03:46.746674
2023-08-11T15:00:45
2023-08-11T15:00:45
131,031,250
30
18
null
2020-08-11T06:59:33
2018-04-25T16:01:27
HTML
UTF-8
R
false
false
3,003
r
presentation-src.R
## ------------------------------------------------------------------------ mymat <- matrix(1:9, ncol=3) # A matrix with numbers 1 through 9 with 3 columns mymat str(mymat) ## ---- collapse=TRUE------------------------------------------------------ mymat[1, 2] # The element in the (1, 2) location of the matrix mymat[4] # The fourth element in the vector... which is (1, 2) ## ----eg-ols-sim---------------------------------------------------------- # Simulating an Linear Model set.seed(181) X <- cbind(1, rnorm(1000)) y <- X %*% cbind(c(2.3, 5)) + rnorm(1000) # Estimating the model using the lm function coef(lm(y ~ -1 + X)) # O a la linear algebra :) solve(t(X) %*% X) %*% t(X) %*% y ## ---- R.options=list(digits=2)------------------------------------------- set.seed(122) A <- matrix(rnorm(12), ncol=3) B <- matrix(rnorm(12), nrow=3) C <- A %*% cbind(1:3) + rnorm(4) # Adding a vector of length 4! ## ---- eval=FALSE, echo=FALSE--------------------------------------------- ## # Matrix multiplication: ## A %*% B ## ## # Transpose ## t(A) ## ## # Element-wise product ## A*t(B) ## ## # Inverse ## solve(t(A) %*% A) ## ## # OLS ## solve(t(A) %*% A) %*% t(A) %*% C ## ----eg-indexing-columns-dgp1-------------------------------------------- set.seed(11122) dat <- matrix(runif(100*5), nrow = 100) head(dat) ## ----eg-indexing-columns-sol1, include=FALSE----------------------------- # To get the min is equivalent to get the max of the negative! idx <- max.col(-dat) head(idx) # Let's just see the first observations cbind( head(dat), lowest = head(dat[cbind(1:100, idx)]) ) ## ----eg-indexing-columns-dgp2-------------------------------------------- set.seed(881) dat <- matrix(runif(100*5), nrow = 5) str(dat) ## ----eg-indexing-columns-sol2, include = FALSE--------------------------- # Well... we don't have a max.row... but we can always transpose it! idx <- max.col(-t(dat)) head(idx) cbind( head(dat), lowest = head(dat[cbind(idx, 1:100)]) ) ## ----solution-er, eval=FALSE, echo=FALSE--------------------------------- ## set.seed(81) ## # How many edges ## n <- 1e3 ## m <- rbinom(1, n^2, .05) ## ## # Where are the edges? ## idx <- sample.int(n^2, m, FALSE) ## mat <- matrix(0, 1e3, 1e3) ## mat[idx] <- 1L ## ## image(mat) ## ----solution-er2-------------------------------------------------------- set.seed(81) # How many edges n <- 5e4 m <- rbinom(1, n^2, .0005) # Where are the edges? idx <- sample.int(n^2, m, FALSE) library(Matrix) # Getting the row idx_row <- (idx - 1) %% n + 1 idx_col <- (idx - 1) %/% n + 1 # Empty sparse mat <- sparseMatrix( i = idx_row, j = idx_col, x = rep(1, length(idx_row)), dims = c(n, n) ) # Is it working? sum(mat) == m ## ----er-space-saver------------------------------------------------------ print(object.size(mat), units = "Mb") # Sparse matrix (8 * n^2) / 1024^2 # Dense matrix ## ------------------------------------------------------------------------ devtools::session_info()
ea81922db621829817fbae7b87ed7de2cd441225
d675f6523862bdabdf2ae570b8c8f30628e352c3
/man/ComputeAExp.Rd
7bbc1d7d6c645e8c603236542b2f337108d60e59
[ "MIT" ]
permissive
valcourgeau/ev-trawl
70d9d58b31b28d6036bed283df5aac55ac460405
9fea733039ffcbb01f1ad3b53114953cc989b073
refs/heads/master
2020-03-26T11:31:08.768540
2019-05-01T11:38:21
2019-05-01T11:38:21
null
0
0
null
null
null
null
UTF-8
R
false
true
541
rd
ComputeAExp.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pairwise_latent_trawl.R \name{ComputeAExp} \alias{ComputeAExp} \title{Wrapper to compute total area under exponential trawl function.} \usage{ ComputeAExp(rho) } \arguments{ \item{rho}{Exponential trawl parameter (should be non-negative).} } \value{ Total area under exponential trawl function (1/rho). } \description{ Wrapper to compute total area under exponential trawl function. } \examples{ ComputeAExp(1) # should be 1.0 ComputeAExp(0.2) # should be 5.0. }
9eebe974fa4e0b82c3d34209916bbd806123fa4a
15b7edbff872f46a3029c00061177d3e9898ae18
/run_analysis.R
3b5d2a6e29129e7840a037df2f22c79ca705637b
[]
no_license
Tau-Sigma/Getting_and_cleaning_data-CourseProject
dd6be97e63e954880a733b4323e551046aa591d2
00f63997b4344b2973f758b9a5d85c622fa06017
refs/heads/master
2021-01-19T00:44:24.160242
2015-07-25T17:26:13
2015-07-25T17:26:13
39,650,980
0
0
null
null
null
null
UTF-8
R
false
false
2,799
r
run_analysis.R
#Global settings library(plyr) setwd("/home/sascha/Dokumente/Coursera/Getting and cleaning data/Course Project/UCI HAR Dataset") #set working directory #Common data activity_labels=read.table("./activity_labels.txt",header=FALSE,col.names = c("ID","ActivityLabel"),stringsAsFactors = FALSE) #load activity_labels.txt file for activity labels features=read.table("./features.txt",stringsAsFactors = FALSE) #load features.txt file for column names xlabels<-features[,2] #load feature names into vector for column names of x_train data #Training data subject_train=read.table("./train/subject_train.txt",header=FALSE,col.names = "Subject") #load subject_train.txt file y_train=read.table("./train/y_train.txt",header=FALSE,col.names = "Activity") #load y_train.txt file x_train=read.table("./train/X_train.txt",header=FALSE,col.names = xlabels) #load x_train.txt file combinedTable_train<-cbind(subject_train,y_train,x_train) #create complete table with subject, activity and measurements combinedTable_train=merge(combinedTable_train,activity_labels,by.x="Activity",by.y="ID") #add activity labels by merging combinedTable_train$Source<-"Train" rm(list=c("y_train","x_train","subject_train")) #object not needed anymore are removed from memory #Test data subject_test=read.table("./test/subject_test.txt",header=FALSE,col.names = "Subject") #load subject_test.txt file y_test=read.table("./test/y_test.txt",header=FALSE,col.names = "Activity") #load y_test.txt file x_test=read.table("./test/X_test.txt",header=FALSE,col.names = xlabels) #load x_test.txt file combinedTable_test<-cbind(subject_test,y_test,x_test) #create complete table with subject, activity and measurements combinedTable_test=merge(combinedTable_test,activity_labels,by.x="Activity",by.y="ID") #add activity labels by merging combinedTable_test$Source<-"Test" rm(list=c("y_test","x_test","subject_test")) #object not needed anymore are removed from memory #Combined training and test data combinedTable=rbind(combinedTable_test,combinedTable_train) combinedTable=combinedTable[c(2,564,3:563)] #Resort columns selectedColumns<-combinedTable[,c(1:4,grep("mean|MEAN|std",colnames(combinedTable)))] #Select columns 1-3 and columns with either mean, Mean or std in the column name #Column means of data grouped by subject and activity GroupedColMeans<-ddply(selectedColumns, .(Subject, ActivityLabel), colwise(mean)) #group by subject and activity and calculate the mean for each remaining column rm(list=c("activity_labels","combinedTable","combinedTable_train","combinedTable_test","features","xlabels")) #object not needed anymore are removed from memory #Create output file write.table(GroupedColMeans, file="/home/sascha/Dokumente/Coursera/Getting and cleaning data/Course Project/Solution/tidy_dataset.txt", row.name=FALSE)
c7bfd1990b3b65fcb4a746a8bd5a53dfb81ce719
403189a2a53554f008180dc8e0395daac7d6a9c1
/individual_cutoff.R
604e9cb03ecf3ffbefb3512972f22c2079fd575f
[]
no_license
PeerChristensen/RT_analysis_SA
2edea510a27bbbdf0217e67d7d285d14636f2032
06dd1d9a56ddcf80f2eaad2c1f08fe3604a81264
refs/heads/master
2020-03-15T04:15:21.884706
2018-10-17T16:09:24
2018-10-17T16:09:24
131,961,016
1
0
null
null
null
null
UTF-8
R
false
false
723
r
individual_cutoff.R
# HOW TO SUBSET DATA WITH INDIVIDUAL RT CUT-OFFS ####### LOAD PACKAGES AND DATA ################################################### library(tidyverse) library(lme4) library(lmerTest) library(viridis) library(janitor) library(Rmisc) library(jtools) library(effects) library(sjPlot) library(ggridges) library(gridExtra) library(retimes) library(data.table) df = read_csv("RT_task2_CLEAN") limits = df %>% filter(accuracy == "1") %>% ddply(.(participant), summarise, m = mean(RT), limit = mean(RT) + (2.5*sd(RT))) new = df %>% filter(accuracy == "1") %>% group_by(participant) %>% inner_join(limits, by = "participant") #%>% #select(-Gender , -RTinv, - age, -RTlag, -logRT, -trialCount) nrow(df) nrow(new)
75ab746a65513f8372632a5f4bafb20148affbc5
68ac24961548233e5bf09fca52d24e277c0d52bd
/man/lcsm_data.Rd
3cb117fd45c5a1fb2764af00ddc8ce10f9d2f2c8
[ "MIT" ]
permissive
milanwiedemann/lcsm
437219c79431aa807ffd3f282c391875a228718d
501d0d242fb54d3ee9b83736972a2dff8164fd65
refs/heads/main
2023-03-18T14:23:49.335544
2023-02-25T23:20:25
2023-02-25T23:20:25
146,106,026
20
3
NOASSERTION
2023-02-04T19:41:04
2018-08-25T15:18:39
R
UTF-8
R
false
true
918
rd
lcsm_data.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{lcsm_data} \alias{lcsm_data} \title{Longitudinal dataset with repeated measures of two constructs} \format{ A longitudinal dataset in wide format: \itemize{ \item{id}{: ID variable, unique identifier for each person} \item{x1}{: x value at time point 1} \item{x2}{: x value at time point 2} \item{x3}{: x value at time point 3} \item{x4}{: x value at time point 4} \item{x5}{: x value at time point 5} \item{y1}{: y value at time point 1} \item{y2}{: y value at time point 2} \item{y3}{: y value at time point 3} \item{y4}{: y value at time point 4} \item{y5}{: y value at time point 5} } } \usage{ data(lcsm_data) } \description{ Example dataset with 5 repeated measures of two constructs to illustrate how the package works. } \examples{ # Load data into global environment data(lcsm_data) } \keyword{dataset}
d9eb59b42ba74f947afaf2fac351c78962e4309b
979ca3b56b2f358bf2866d14b233103e90a0db20
/run_analysis.R
8d1576e8a8321c118b9b5877c44165a8c7f9774d
[]
no_license
tbel/getdata-012-prj
beb0cfc6d78de00b3e733c8cb94cfe9ca4a76074
9a7220ac279f119f9447e1fa3b409d978bd2099a
refs/heads/master
2021-01-24T06:22:54.933473
2015-04-26T20:30:17
2015-04-26T20:30:17
32,673,265
0
0
null
null
null
null
UTF-8
R
false
false
2,143
r
run_analysis.R
# Reading raw data activity <- read.csv("dataset/activity_labels.txt", header=FALSE, col.names=c("aID", "activity"), sep=" ") features <- read.csv("dataset/features.txt", header=FALSE, col.names=c("fID", "feature"), sep=" ") s_train <- read.csv("dataset/train/subject_train.txt", header=FALSE, col.names=c("subject")) s_test <- read.csv("dataset/test/subject_test.txt", header=FALSE, col.names=c("subject")) train_act <- read.csv("dataset/train/Y_train.txt", header=FALSE, col.names=c("aID")) test_act <- read.csv("dataset/test/Y_test.txt", header=FALSE, col.names=c("aID")) train_data <- read.table("dataset/train/X_train.txt", heade=FALSE) test_data <- read.table("dataset/test/X_test.txt", heade=FALSE) ########################## # taking only means and stds library(dplyr) # features<-filter(features, (grepl("mean", features$feature) | grepl("std", features$feature)) & !grepl("meanFreq", features$feature)) features<-filter(features, (grepl("mean", features$feature) | grepl("std", features$feature) | grepl("Mean", features$feature))) # nrow(features) train_data <- select(train_data, features$fID) test_data <- select(test_data, features$fID) ########################## # denormalize (tidy) data<-rbind(train_data, test_data) subj <- rbind(s_train, s_test) activities <- rbind(train_act, test_act) # nrow(data) == nrow(train_data) + nrow(test_data) # nrow(subj) # nrow(activities) ########################## # proper names for s & a activities <- merge(activities, activity, by="aID", all = FALSE, sort=FALSE) # head(activities) colnames(data) <- features$feature data <- cbind(subj, activities) %>% select(c(1,3)) %>% cbind(data) # head(data) ########################## # avg for activity,subject #head(data) #names(data) aggdata<-aggregate(data[,c(-1,-2)], by=list(subject=data$subject, activity=data$activity), mean) aggdata<-arrange(aggdata, subject, activity) all_comb<-expand.grid(subject=sort(unique(aggdata$subject)), activity=sort(unique(aggdata$activity))) aggdata <- merge(all_comb, aggdata, by=c("subject","activity"),all=TRUE) write.table(aggdata, file="step5dataset.txt", row.name=FALSE)
785e9c75fb5b90696749f5fe856b143b51fc985a
c2c1c9e685d66b565df17498b4e14e83ceee89ab
/src/r-scripts/simulation-one.r
316f5c36fea150013acd39be8d137c37d5690eea
[]
no_license
crsl4/GAMuT
0fd298bf5f24921102644a97ee6e922a99e91fac
15e74e20eb2192f9e7e5eb359ebc02297d202d9a
refs/heads/master
2020-03-19T14:41:40.463971
2019-01-24T16:00:34
2019-01-24T16:00:34
136,635,514
0
0
null
null
null
null
UTF-8
R
false
false
13,939
r
simulation-one.r
library(MASS) library(corpcor) ##------------------------------------------------------------------------------ ## Variables: ##------------------------------------------------------------------------------ ## Past from Julia better maf = 0.25 traitcor = "med" nassoc = 1 npheno = 2 n_unrelated = 5000 n_variants = 500 causal_var = 0.01 ##------------------------------------------------------------------------------ ## Preliminary Functions: ##------------------------------------------------------------------------------ minorAlleleCountsBinomial = function(numtrios,MAF){ mom1 = rbinom(numtrios, 1, MAF) mom1[mom1==0]=2 ##to match cosi mom2 = rbinom(numtrios, 1, MAF) mom2[mom2==0]=2 dad1 = rbinom(numtrios, 1, MAF) dad1[dad1==0]=2 dad2 = rbinom(numtrios, 1, MAF) dad2[dad2==0]=2 eur = cbind(mom1,mom2,dad1,dad2) kids1 <- sample(c(1,2), numtrios, replace=TRUE) kids2 <- sample(c(3,4), numtrios, replace=TRUE) kids <- matrix(numeric(numtrios*2), nrow=numtrios) for(id in 1:numtrios){ kids[id,1] <- eur[id, kids1[id]] kids[id,2] <- eur[id, kids2[id]] } eur_kid <- (kids[,1] == 1) + (kids[,2] == 1) eur_mom <- (eur[,1] == 1) + (eur[,1] == 1) eur_dad <- (eur[,3] == 1) + (eur[,4] == 1) return( list(G_kid=eur_kid, G_mom=eur_mom, G_dad=eur_dad) ) } # builds a block matrix whose diagonals are the square matrices provided. # m1=matrix(runif(10*10),nrow=10,ncol=10) # m2=matrix(runif(5*5),nrow=5,ncol=5) # blockMatrix<-blockMatrixDiagonal(m1,m2,m2,m1) # or # blockMatrix<-blockMatrixDiagonal(list(m1,m2,m2,m1)) # C.Ladroue # http://chrisladroue.com/2011/04/block-diagonal-matrices-in-r/ blockMatrixDiagonal<-function(...){ matrixList<-list(...) if(is.list(matrixList[[1]])) matrixList<-matrixList[[1]] dimensions<-sapply(matrixList,FUN=function(x) dim(x)[1]) finalDimension<-sum(dimensions) finalMatrix<-matrix(0,nrow=finalDimension,ncol=finalDimension) index<-1 for(k in 1:length(dimensions)){ finalMatrix[index:(index+dimensions[k]-1),index:(index+dimensions[k]-1)]<-matrixList[[k]] index<-index+dimensions[k] } return(finalMatrix) } ## input only one matrix (and size of block) that will have the block diagonal ## replaced by identities blockMatrixAntiDiagonal<-function(M,n){ ntotal = dim(M)[1] if(ntotal %% n != 0) stop("Error: dimension of matrix M should be a multiple of n") numI = ntotal/n matrixList <- vector("list", numI) for(i in 1:numI) matrixList[[i]] = diag(n) dimensions<-sapply(matrixList,FUN=function(x) dim(x)[1]) index<-1 for(k in 1:length(dimensions)){ M[index:(index+dimensions[k]-1),index:(index+dimensions[k]-1)]<-matrixList[[k]] index<-index+dimensions[k] } return(M) } ## function to create the phenotype covariance matrix ## it takes as input the number of phenotypes: npheno, and the type of correlation: ## "none","low", "med", "high" ## "block" = 1-2, 3-4, "antiblock": only correlation btw phenotype matrices, not within createCovMatrix = function(npheno, traitcor){ if(traitcor == "block" || traitcor == "antiblock"){ if(npheno %% 2 != 0) stop("cannot do block covariance matrix if npheno%%2 != 0") } if(traitcor == "none"){ phencor_ll <- 0 phencor_ul <- 0 }else if (traitcor=="low") { phencor_ll <- 0 phencor_ul <- 0.3 } else { if (traitcor=="med" || traitcor == "block" || traitcor == "antiblock") { phencor_ll <- 0.3 phencor_ul <- 0.5 } else { if (traitcor=="high") { phencor_ll <- 0.5 phencor_ul <- 0.7 } } } if(npheno == 1){ mat = as.matrix(runif(1,min=phencor_ll,max=phencor_ul)) }else{ if(traitcor != 'block' && traitcor != 'antiblock'){ cor = runif((npheno*(npheno-1))/2,min=phencor_ll,max=phencor_ul) mat <- matrix(0, npheno,npheno) mat[lower.tri(mat, diag=FALSE)] <- cor mat = mat + t(mat) + diag(1, nrow=npheno) }else{ if(traitcor == 'block'){ m = npheno/2 cor1 = runif((m*(m-1))/2,min=phencor_ll,max=phencor_ul) mat1 <- matrix(0, m,m) mat1[lower.tri(mat1, diag=FALSE)] <- cor1 mat1 = mat1 + t(mat1) + diag(1, nrow=m) cor2 = runif((m*(m-1))/2,min=phencor_ll,max=phencor_ul) mat2 <- matrix(0, m,m) mat2[lower.tri(mat2, diag=FALSE)] <- cor2 mat2 = mat2 + t(mat2) + diag(1, nrow=m) mat = blockMatrixDiagonal(mat1,mat2) }else{ cor = runif((npheno*(npheno-1))/2,min=phencor_ll,max=phencor_ul) mat1 <- matrix(0, npheno,npheno) mat1[lower.tri(mat1, diag=FALSE)] <- cor mat1 = mat1 + t(mat1) + diag(1, nrow=npheno) mat = blockMatrixAntiDiagonal(mat1,npheno/2) } } } return(mat) } ## function to set up the parameters to simulate the phenotypes associated with genotypes ## it returns the beta matrix and the covariance matrix parameters4phenotypeSimulation = function(npheno, traitcor, causal.ind, nassoc, variant, MAF_unr){ if(npheno < nassoc) stop("Error: npheno<nassoc") MAF_C_unr<-MAF_unr[causal.ind] ## pairwise similarity randomly generated from unif(phencor_ll, phencor_ul) cov_unr= createCovMatrix(npheno, traitcor) ## beta matrix: npheno by num of causal variants betamat_unr <- matrix(0,nrow=npheno,ncol=length(causal.ind)) if(nassoc > 0){ hvec_unr<-rep(0.0,nassoc) for (i in 1:nassoc) { if (variant=="rare"){ betamat_unr[i,] <- (0.4 + rnorm(length(causal.ind), 0, 0.1))*abs(log(MAF_C_unr, base=10)) }else if (variant=="common") betamat_unr[i,] <- rep(log(1.5),length(causal.ind)) hvec_unr[i] <- sum(betamat_unr[i,]^2*2*MAF_C_unr*(1-MAF_C_unr)) } ## note: the first nassoc phenotypes are the ones that are associated with the genotype for (i in 1:nassoc) { for (ii in 1:nassoc) { if (i==ii){ cov_unr[i,ii] <- 1-hvec_unr[i] } else if(ii>i) { cov_unr[i,ii] <- cov_unr[i,ii]*((1-hvec_unr[i])^0.5)*((1-hvec_unr[ii])^0.5) cov_unr[ii,i] <- cov_unr[i,ii] } } } if(!is.positive.definite(cov_unr)) cov_unr <- make.positive.definite(cov_unr) } return( list(betamat=betamat_unr, cov=cov_unr) ) } simulatePhenotypes = function(npheno, traitcor, causal.ind, nassoc, variant, MAF, n_unrelated, G){ ## Creating parameters to simulate phenotype out2 = parameters4phenotypeSimulation(npheno, traitcor, causal.ind, nassoc, variant, MAF) betamat_unr = out2$betamat cov_unr = out2$cov ## print(betamat_unr) ## print(cov_unr) ## print(nrow(UNR_OBS[1,causal.ind])) ## print(ncol(UNR_OBS[1,causal.ind])) ## Actual phenotype simulation: P0_UNR <- matrix(numeric(n_unrelated*npheno), ncol=npheno) for(i in 1:n_unrelated) P0_UNR[i,] <- mvrnorm(1,betamat_unr %*% G[i,causal.ind], cov_unr) return( P0_UNR ) } ## Get Y1 and Y2 from phenotype matrix splitPhenotypeMatrix = function(P,nassoc1,nassoc2, npheno1,npheno2){ n = nrow(P) m = ncol(P) if(nassoc1>npheno1) stop("Error: nassoc1>npheno1") if(nassoc2>npheno2) stop("Error: nassoc2>npheno2") if(npheno1+npheno2 != m) stop("Error: npheno1+npheno2 != npheno") if(nassoc1 == 0){ Y2 = P[,1:npheno2] Y1 = P[,(npheno2+1):m] }else if(nassoc2 == 0){ Y1 = P[,1:npheno1] Y2 = P[,(npheno1+1):m] } else { Y1 = P[,c(1:nassoc1,(nassoc1+nassoc2+1):(nassoc1+nassoc2+(npheno1-nassoc1)))] Y2 = P[,c((nassoc1+1):(nassoc1+nassoc2),(nassoc1+nassoc2+npheno1-nassoc1+1):m)] } return(list(Y1=Y1,Y2=Y2)) } ## Each row in Y1 is simulated as normal with the row of Y2 as mean (assuming beta=1), and cov=I simulateFullyMediatedPhenotype = function(Y2,npheno1,n_unrelated,traitcor){ Y1 <- matrix(numeric(npheno1*n_unrelated), ncol=npheno1) cov = createCovMatrix(npheno1, traitcor) for(i in 1:n_unrelated){ if( ncol(Y2) <= npheno1 ){ mu = rep(0,npheno1) for(j in 1:ncol(Y2)) mu[j] = Y2[i,j] }else { mu = Y2[i,1:npheno1] } Y1[i,] <- mvrnorm(1,mu, cov) } return( Y1 ) } ## Each row in Y1 is simulated as function of Y2 and G simulatePartiallyMediatedPhenotype = function(Y2,npheno1,n_unrelated, traitcor,causal.ind,nassoc1,variant,MAF_unr,UNR_OBS){ ## Creating parameters to simulate phenotype out2 = parameters4phenotypeSimulation(npheno1, traitcor, causal.ind, nassoc1, variant, MAF_unr) betamat_unr = out2$betamat cov_unr = out2$cov ## Actual phenotype simulation: Y1 <- matrix(numeric(n_unrelated*npheno1), ncol=npheno1) for(i in 1:n_unrelated){ if( ncol(Y2) <= npheno1 ){ mu = rep(0,npheno1) for(j in 1:ncol(Y2)) mu[j] = Y2[i,j] }else { mu = Y2[i,1:npheno1] } Y1[i,] <- mvrnorm(1,betamat_unr %*% UNR_OBS[i,causal.ind]+mu, cov_unr) } return( Y1 ) } simulatePhenotypesMediation = function(npheno1, npheno2,traitcor, nassoc1, nassoc2, causal.ind, MAF_unr, n_unrelated, variant,UNR_OBS, approach=1, numpcs=1, ignoreZ=FALSE){ if(approach == 1){ P0_UNR = simulatePhenotypes(npheno1+npheno2, traitcor, causal.ind, nassoc1+nassoc2, variant, MAF_unr, n_unrelated, UNR_OBS) ## Now, we want to split this matrix into two to test for mediation out3 = splitPhenotypeMatrix(P0_UNR,nassoc1, nassoc2,npheno1,npheno2) Y1 = out3$Y1 Y2 = out3$Y2 }else if(approach == 20){ ##approach2: no mediation Y1 = simulatePhenotypes(npheno1, traitcor, causal.ind, nassoc1, variant, MAF_unr, n_unrelated, UNR_OBS) Y2 = simulatePhenotypes(npheno2, traitcor, causal.ind, nassoc2, variant, MAF_unr, n_unrelated, UNR_OBS) }else if(approach == 2 && nassoc1 == 0){ ##approach2: full mediation Y2 = simulatePhenotypes(npheno2, traitcor, causal.ind, nassoc2, variant, MAF_unr, n_unrelated, UNR_OBS) Y1 = simulateFullyMediatedPhenotype(Y2, npheno1, n_unrelated, traitcor) }else if(approach == 2 && nassoc1 > 0){ ##approach2: partial mediation Y2 = simulatePhenotypes(npheno2, traitcor, causal.ind, nassoc2, variant, MAF_unr, n_unrelated, UNR_OBS) Y1 = simulatePartiallyMediatedPhenotype(Y2,npheno1,n_unrelated, traitcor,causal.ind,nassoc1,variant,MAF_unr,UNR_OBS) }else if(approach == 3){ Z = as.matrix(rnorm(n_unrelated)) if(nassoc2 == 0){ ##approach3: no association with G if(nassoc1 != 0) stop("nassoc1 cannot be != 0 if nassoc2 is ==0") P2 = simulateFullyMediatedPhenotype(Z, npheno2, n_unrelated, traitcor) P1 = simulateFullyMediatedPhenotype(Z, npheno1, n_unrelated, traitcor) }else if(nassoc2 > 0){ ##approach3: Y2 associated with G P2 = simulatePartiallyMediatedPhenotype(Z,npheno2,n_unrelated, traitcor,causal.ind,nassoc2,variant,MAF_unr,UNR_OBS) if(nassoc1 == 0){ P1 = simulateFullyMediatedPhenotype(Z, npheno1, n_unrelated, traitcor) }else if(nassoc1 > 0){ P1 = simulatePartiallyMediatedPhenotype(Z,npheno1,n_unrelated, traitcor,causal.ind,nassoc1,variant,MAF_unr,UNR_OBS) }else{ stop("nassoc1 should be >=0") } } if(ignoreZ){ Y1 = P1 Y2 = P2 }else{ ## PCA --------------------------------------- P = cbind(P1,P2) pc = prcomp(P, scale. = TRUE, center = TRUE) prop = pc$sdev[1]/sum(pc$sdev) print(paste("Proportion of variance explained by PC1:",prop)) Z = as.matrix(pc$x[,1:numpcs]) ## ------------------------------------------- Y2 = matrix(numeric(nrow(P2)*ncol(P2)), ncol=ncol(P2)) for(i in 1:ncol(P2)){ f = lm(P2[,i]~Z) Y2[,i] = residuals(f) } Y1 = matrix(numeric(nrow(P1)*ncol(P1)), ncol=ncol(P1)) for(i in 1:ncol(P1)){ f = lm(P1[,i]~Z) Y1[,i] = residuals(f) } } } return( list(Y1=Y1, Y2=Y2)) } ##------------------------------------------------------------------------------ ## Simulating genotypes: ##------------------------------------------------------------------------------ UNR_OBS = matrix(rep(NA,n_variants*n_unrelated),nrow=n_unrelated) for(i in 1:n_variants){ out = minorAlleleCountsBinomial(n_unrelated, maf) UNR_OBS[,i]= as.matrix(out$G_mom) } n_causal = floor(causal_var*n_variants) causal.ind = sample(1:n_variants,n_causal,replace=FALSE) ## determine the MAF of each variant in the sample MAF_unr = colMeans(UNR_OBS)/2 beta_weight = dbeta(MAF,1,25)/dbeta(0,1,25) # assume beta-distribution weights ## Note: one can use other weight functions by simply recoding beta_weight as desired G0 = as.matrix(UNR_OBS)%*%diag(beta_weight) # Weighted rare variants G = as.matrix(scale(G0,center=T,scale=F) ## we set "variant" to use the same functions as with cosi if(maf > 0.05){ variant = 'common' }else{ variant = 'rare' } X = UNR_OBS ##------------------------------------------------------------------------------ ## Simulating phenotypes: ##------------------------------------------------------------------------------ Y = simulatePhenotypes(npheno, traitcor, causal.ind, nassoc, variant, MAF_unr, n_unrelated, X)
f79dcac6d4b401d9219c032fe8f730f91e0d201b
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/Rdimtools/examples/nonlinear_CRCA.Rd.R
059f0b63d8456f6a4f9c04a99404b8eb104e4a7c
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
544
r
nonlinear_CRCA.Rd.R
library(Rdimtools) ### Name: do.crca ### Title: Curvilinear Component Analysis ### Aliases: do.crca ### ** Examples ## Not run: ##D ## generate sample data ##D X <- aux.gensamples(n=200) ##D ##D ## different initial learning rates ##D out1 <- do.crca(X,alpha=1) ##D out2 <- do.crca(X,alpha=5) ##D out3 <- do.crca(X,alpha=10) ##D ##D ## visualize ##D par(mfrow=c(1,3)) ##D plot(out1$Y[,1],out1$Y[,2],main="alpha=1.0") ##D plot(out2$Y[,1],out2$Y[,2],main="alpha=5.0") ##D plot(out3$Y[,1],out3$Y[,2],main="alpha=10.0") ## End(Not run)
a1e67b6ab8f7fd437de528d543a71bf6fa6edd48
952ede43b43ee71b3b48f700c26adc0b5ee3ba45
/plot3.R
2b1f725e23ae0442b938d74fc8f9d3e873017676
[]
no_license
lisha7677/ExData_Plotting1
b884a9f33434662d2b65367916475689cad4bcaf
f8092274cae970e8fa748b491024214772c51536
refs/heads/master
2020-06-29T18:40:51.355942
2016-11-22T04:04:40
2016-11-22T04:04:40
74,418,073
0
0
null
2016-11-22T00:25:30
2016-11-22T00:25:30
null
UTF-8
R
false
false
1,189
r
plot3.R
#load data.table package to use fast file reader function fread() library(data.table) #download and unzip file download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "exdata.zip") unzip("exdata.zip") #read 2075259 rows and 9 columns quickly, subset a dataframe of 2880 rows and 9 columns with two dates power<-fread("household_power_consumption.txt", header = TRUE, na.strings = "?", stringsAsFactors = FALSE, data.table = FALSE) powerData<-power[power$Date %in% c("1/2/2007","2/2/2007"),] #plot sub_metering_1 according to different times of two days plot(dateTime, powerData$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering") #add line graph for sub_metering_2 to existing plot lines(dateTime, powerData$Sub_metering_2, col="red") #add line graph for sub_metering_3 to existing plot lines(dateTime, powerData$Sub_metering_3, col="blue") #add legend to graph legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col = c("black","red","blue"), lty = 1, cex = 0.75) #send plot to graphic device png dev.copy(png, file="plot3.png", height=480, width=480) dev.off()
5cf660f4f9c0e726975bde74615dce374c569a70
8a5829091098d120bf520b2345141ae64808694a
/man/list_tsvs.Rd
ec679e53d48442c129d64c8ad2cd3902a142b092
[]
no_license
dennismalandro/remisc
97e7bf175763bac29623cc4abbd2c0bc61f33ab8
65f21f82be01d7f6e901a8c6c4e78857253c6d30
refs/heads/master
2021-01-10T11:55:46.620324
2016-11-24T15:46:53
2016-11-24T15:46:53
44,412,174
0
0
null
null
null
null
UTF-8
R
false
true
638
rd
list_tsvs.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/remisc.R \name{list_tsvs} \alias{list_csvs} \alias{list_tsvs} \alias{list_txts} \alias{list_vbrs} \title{Read common file types} \usage{ list_tsvs(path = ".", full.names = FALSE, recursive = FALSE) list_csvs(path = ".", full.names = FALSE, recursive = FALSE) list_txts(path = ".", full.names = FALSE, recursive = FALSE) list_vbrs(path = ".", full.names = FALSE, recursive = FALSE) } \arguments{ \item{path}{Path to files} \item{full.names}{Return full file names?} \item{recursive}{Recurse into sub-directories?} } \description{ Read common file types }
7d0015ffc97b16c3acd6654369ba9a3eefcd323e
6154f648b34e1dda486345298c34a60754b0e26d
/R slides/4_MVA/code for r-course.R
ab33a6c8edbb3778e13d3c1aa6b8da1012b43769
[]
no_license
MichaelBelias/My-Complete-Book-In-R
c2715604283f0d0411eca431d03e22fe0374fbda
cb2968352557d2999d4319c9bede6718d4406543
refs/heads/master
2021-01-18T22:47:23.793744
2020-03-12T20:38:03
2020-03-12T20:38:03
87,069,215
1
0
null
null
null
null
UTF-8
R
false
false
9,269
r
code for r-course.R
# mentioned in this tutorial RatAUEB <- c("HDclassif", "corrgram", "cluster", "mclust","FactMixtAnalysis","nnet","class","tree") # Install the packages install.packages(RatAUEB, repos = "http://cran.rstudio.com/") #packages installed # we need now to call them library(corrgram) library(HDclassif) library(cluster) library(mclust) library(FactMixtAnalysis) library(nnet) library(class) library(tree) ###### START #### distances in R dist1<-dist(winedata[,2:4]) dist2<-daisy(winedata[,2:4]) x0<-winedata[,-1] dM2 <- as.dist(apply(x0, 1, function(i) mahalanobis(x0, i, cov = cov(x0)))) corrgram(winedata[,-1]) pairs(winedata[,-1]) pairs(winedata[,-1], col=winedata[,1]) hclust(dist(winedata[,-1]),method="complete") hc1<-hclust(dist(winedata[,-1]),method="complete") summary(hc1) plot(hc1$height) ### run with other linkage hc2<-hclust(dist(winedata[,-1]),method="ward.D") hc3<-hclust(dist(winedata[,-1]),method="single") hc4<-hclust(dist(winedata[,-1]),method="average") #### create clasifications clas1<-cutree(hc1, k=2:5) clas2<-cutree(hc2, k=2:5) clas3<-cutree(hc3, k=2:5) clas4<-cutree(hc4, k=2:5) ##### random noise data ######## fakedata<-mvrnorm(100,mu=rep(0,5), Sigma=diag(1,5)) fake<- hclust(dist(fakedata),method="ward.D") plot(fake) plot(fake$height) ####replicate fake data fakedata<-mvrnorm(100,mu=rep(0,5), Sigma=diag(1,5)) fake<- hclust(dist(fakedata),method="ward.D") points(fake$height) ##### data with two clusters fakedata<-mvrnorm(100,mu=rep(0,5), Sigma=diag(1,5)) fakedata<-rbind(fakedata,mvrnorm(100,mu=rep(5,5), Sigma=diag(1,5))) fake<- hclust(dist(fakedata),method="ward.D") plot(fake) plot(fake$height) ##################################################### plot(silhouette(clas1[,1], dist(winedata[,-1]))) wine<-as.matrix(winedata[,-1]) m <- manova(wine~clas1[,2]) summary(m,test="Wilks") mywilks<- summary(m,test="Wilks")$stats[1,2] ###################################################### adjustedRandIndex(clas1[,2],clas2[,2]) indices<-NULL for ( i in 3:28) { usevar<- winedata[,2:i] myclust<-hclust(dist(usevar),method="complete") myclass<- cutree(myclust,3) indices<-c(indices,adjustedRandIndex(myclass,winedata[,1])) } ############################################################# ### K-MEANS ############################################################# km1<- kmeans(wine, 3) km1$cluster km1$centers km1$totss km1$withinss km1$tot.withinss km1$betweenss km1$size km1$iter km1$ifault table(km1$cluster,clas1[,2]) km2<- kmeans(wine, 3, algorithm="Lloyd") table(km1$cluster, km2$cluster) ################################################################# ### mclust ################################################################# wine<-winedata[,2:7] mc1<-Mclust(wine, G=2:5, modelNames=c("EII", "VII", "EEI", "EVI", "VEI", "VVI")) mc1$G #The optimal number of mixture components. mc1$BIC #All BIC values. mc1$bic #Optimal BIC value. mc1$loglik #The loglikelihood corresponding to the optimal BIC. mc1$df #The number of estimated parameters. mc1$parameters #A list with the following components: mc1$pro #A vector whose kth component is the mixing proportion for the kth component mc1$mean #The mean for each component. mc1$variance #A list of variance parameters for the model. mc1$z #posterior probabilities. mc1$classification #map(z): The classification corresponding to z. mc1$uncertainty #The uncertainty associated with the classification. plot(mc1) summary(mc1) #### FActore analyzers mymfa<-fma(wine, 3, 2) mymfa$H #The estimated factor loading matrix. mymfa$lik #The log-likelihood computed at each iteration of the EM algorithm. mymfa$w #A matrix with the estimated weights of the mixture. mymfa$Beta # estimated component means of the mixture. mymfa$phi # coefficients of the covariates mymfa$sigma #An array of dimension k x r x r which contains the estimated component covariance mymfa$psi #The noise diagonal variance matrix. mymfa$index #The allocation vector. mymfa$bic #The BIC value. mymfa$aic #The AIC value. mymfa$elapsed #Computational time in seconds. ##### how many parameters we need to estimate? mymfaall<-fma(winedata[,2:14], 4, 2) ############################################################### ncol <- 304 nrow <- 268 ##pict <- matrix(scan("c:\\paulia.rgb"), ncol=3, byrow=T) row<-rep(1:nrow,each=ncol) column<-rep(1:ncol,nrow) pixels <- which(apply(pict, 1, function(x) any(x)>0 & all(x<255))) npixels <- length(pixels) metr<-0 plot(c(0,nrow),c(0,ncol),type="n",xlab="",ylab="") for (i in 1:nrow) { for (j in 1:ncol){ metr<-metr+1 points(i,j,col=rgb(pict[metr,1]/255,pict[metr,2]/255,pict[metr,3]/255)) }} plot(c(0,nrow),c(0,ncol),type="n",xlab="",ylab="",axes=FALSE) points(row[pixels],column[pixels],col=rgb(pict[pixels,1]/255, pict[pixels,2]/255,pict[pixels,3]/255)) mydata<-cbind(pict[pixels,],row[pixels],column[pixels]) ### Change the next lines as appropriate setsize <- 2000 trset <- sample(npixels, setsize, replace=FALSE) minclus <- 3 maxclus <- 15 Modelnames<- c("EII","VII","EEI","VEI","EVI","VVI","EEE","EEV","VEV","VVV") ### Go! X <- pict[pixels[trset], ] bicvals <- mclustBIC(X, G=minclus:maxclus, modelNames = Modelnames) sumry <- summary(bicvals, X) znew <- do.call("estep", c(list(data=pict[pixels,]), sumry)) classif <- map(znew$z) newcolors<-cbind(tapply(pict[pixels,1],classif,mean), tapply(pict[pixels,2],classif,mean), tapply(pict[pixels,3],classif,mean) ) plot(c(0,nrow),c(0,ncol),type="n",xlab="",ylab="",axes=FALSE) points(row[pixels],column[pixels],col=rgb(newcolors[classif,1]/255, newcolors[classif,2]/255,newcolors[classif,3]/255)) ################################################################################## wines<-as.data.frame(winedata[,c(1,2,15,28)]) ########### STEP 2: LDA m1<-lda(Type~., data=wines) m2<-predict(m1) table(wines[,1],m2$class) m3<-lda(Type~., data=wines, CV=TRUE) table(wines[,1],m3$class) par(mfrow=c(3,1)) plot(m3$posterior[,1], col=winedata[,1]) plot(m3$posterior[,2], col=winedata[,1]) plot(m3$posterior[,3], col=winedata[,1]) plot(m2$x,col=winedata[,1]) plot(m1) ########### QDA mq1<-qda(Type~., data=wines) mq2<-predict(mq1) table(wines$Type,mq2$class) ########################### multinomial logistic mult <- multinom(Type ~ ., wines) summary(mult) plot(mult$fitted,m2$posterior) mult.class<- apply(mult$fitted,1,which.max) table(mult.class,m2$class) ############################ knn library(class) km1<-knn(winedata[,2:7],winedata[,2:7], cl=winedata[,1],k=4) table(winedata[,1],km1) km2<-knn(winedata[,2:7],winedata[,2:7], cl=winedata[,1],k=7) table(winedata[,1],km2) knn.cv(winedata[,2:7], cl=winedata[,1],k=7) ######################## tree wines<-as.data.frame(winedata[,1:15]) fit1<-tree(as.factor(Type)~.,data=wines) fit1$frame fit1$where table(wines$Type, predict(fit1,type='class')) plot(fit1) summary(fit1) text(fit1) fit2<-tree(as.factor(Type)~.,data=wines, split="gini") plot(fit2) text(fit2) predict(fit1,type='vector') predict(fit1,type='class') predict(fit1,type='tree') ##################################################################### ### variable selection using the diabetes data ### the variables are ### ### Number of times pregnant ### Plasma glucose concentration a 2 hours in an oral glucose tolerance test ### Diastolic blood pressure (mm Hg) ### Triceps skin fold thickness (mm) ### 2-Hour serum insulin (mu U/ml) ### Body mass index (weight in kg/(height in m)^2) ### Diabetes pedigree function ### Age (years) ### Class variable (1 or 2) re<-NULL for (k in 1:20) { print(k) t<-NULL for (i in 1:100) { te<- sample(1:768,k) train <- diabetes[-te,2:4] test <- diabetes[te,2:4] cl <- factor(diabetes[-te,9]) z <- lda(train, cl) pr<- predict(z, test)$class t<- c(t, sum(diabetes[te,9] == pr) /dim(test)[1]) } re<-c(re,mean(t)) } ########################################################### ### ### k-fold cross validation ### ############################################################ deiktes<-sample(1:768) variab<-2:8 re<-NULL for (k in c(1,2,3,4,6,8,12,16,24)) { print(k) omades<- 768/k t<-NULL for (i in 1:omades) { te<- deiktes[ ((i-1)*k+1):(i*k)] train <- diabetes[-te,variab] test <- diabetes[te,variab] cl <- factor(diabetes[-te,9]) z <- qda(train, cl) pr<- predict(z, test)$class t<- c(t, sum(diabetes[te,9] == pr) /dim(test)[1]) } re<-c(re,mean(t)) } ########################################################### ### ### variable selction using WILKS LAMBDA ### (we can use minor changes to use accuracy ### ############################################################ allcomb<-combn(2:8,7) tw<-NULL for (i in 1:dim(allcomb)[2]) { model<-manova(as.matrix(diabetes[,allcomb[,i]])~diabetes[,9]) tw<-c(tw,summary(model,test="Wilks")$stats[1,2]) } cbind(t(allcomb),tw) cbind(t(allcomb),tw)[which.min(tw),]
59aa9b8d9d41f0f287001bd9e6b4b2ebeef3b19b
b97454dda53628e218771a0324f7ab8e8de08d3a
/R/6_Export_to_DF.R
d0cc22267024fe5adc30b6aaf3a11b0d2561e77e
[]
no_license
camilodlt/ML_Gutenberg
1e137bd4b87fbc5fac8d0657823e9f8eb93e3eec
67e3bbc89aa3cd19871f9914fe4c6a61eaba60bb
refs/heads/main
2023-07-04T17:03:51.682137
2021-08-23T10:41:39
2021-08-23T10:42:26
374,972,247
0
0
null
null
null
null
UTF-8
R
false
false
3,405
r
6_Export_to_DF.R
################################################################# ## Step 5: Create synthetic mistakes ## ################################################################# ###### PATHS <- c("data_working/5_ML_formatted_synthetic", "data_working/5_unbias_constant_synthetic","data_working/5_unbias_multiply_synthetic") SOURCES <- c("Normal", "Constant", "Unbiased") ##### source("R/common.R") source("R/functions_synthetic_mistakes.R") options(future.rng.onMisuse="ignore") # Caller step <- "Step 5: Read synthetic mistakes" # Purpose explanations <- " # Mistakes are now created # This script reads them and binds them into a txt file " print(banner(step)) print(boxup(explanations, centre = F)) # DIRECTORY FN ------ dir_check<- function(path){ info(logger, "Checking directory") if (dir.exists(path) & override==FALSE ){ # Exists & not to override warning("DIRECTORY ALDEADY EXIST") quit(save="ask") } else if (!dir.exists(path) | (dir.exists(path) & isTRUE(override))){ # does not exist OR exist & override unlink(path,recursive = TRUE) dir.create(path,showWarnings = FALSE,recursive = TRUE) info(logger, "Directory created") } } # LIBRARIES ------ suppressMessages(library(dplyr,quietly = TRUE)) suppressMessages(library(disk.frame,quietly = TRUE)) suppressMessages(library(purrr,quietly = TRUE)) suppressMessages(library(furrr,quietly = TRUE)) suppressMessages(library(pryr,quietly = TRUE)) # HYPER ------ override<- TRUE source_choice<- 1 # 1-3 PATH<- PATHS[source_choice] # 1 - 3 SOURCE <- SOURCES[source_choice] ##--------------------------------------------------------------- ## Apply synthethic mistakes - ##--------------------------------------------------------------- info(logger, "Reading disk.frames") # FILES ------ df_no_touched <- disk.frame(PATH) # INFORMATION ------ info(logger, "Print information about disk.frame") print(df_no_touched) # LOAD TO MEMORY AND CONVERT TO DF ------ info(logger, "LOAD TO MEMORY") n_chunk<- seq(nchunks(df_no_touched)) plan(multisession,workers = 6) Text_data <-furrr::future_map_dfr(n_chunk, function(x){ as.data.frame(get_chunk(df_no_touched, x))}) cat("NUMBER OF ROWS:", nrow(Text_data),"\n") cat("SIZE OF DF IN MEMORY:", object_size(Text_data)/(1024*1024),"\n") cat("% OF MISTAKES IN LINES:", round(1-mean(Text_data$eq),3),"\n") # SAVING ----- info(logger, "SAVING") base_path<- paste0(getwd(),"/data_output/",SOURCE) dir_check(base_path) # Create, check or override message("Saving to", base_path) # write.csv2(Text_data[,c("Truth","Mistake")], # Just Truth and Mistake # file=paste0(base_path,"/READY_ML.csv"), # row.names = FALSE, quote= FALSE) write.table(Text_data[,c("Truth","Mistake")], # Just Truth and Mistake file=paste0(base_path,"/READY_ML.txt"), sep=", ,", quote=FALSE, row.names = FALSE, col.names = TRUE) # Write sample # write.csv2(Text_data[1:1000,c("Truth","Mistake")], # Just Truth and Mistake # file=paste0(base_path,"/Sample_ML.csv"), # row.names = FALSE, quote= FALSE) write.table(Text_data[1:1000,c("Truth","Mistake")], # Just Truth and Mistake file=paste0(base_path,"/Sample_ML.txt"), sep=", ,", quote=FALSE, row.names = FALSE, col.names = TRUE) cat("FINISHED")
6db4d1df220ff8c9d4b388c62a33697b2abac15c
fabd46df1d09027c5f61000cf89266ee4a68c447
/plot3.R
bd395160288bb6d6d85e170f62f954d12e9c0471
[]
no_license
hhulme/ExData_Plotting1
2315425bdc09847371602b4a9a9424a845902a5b
0ac5154bb1149a9d1db27c628274cb8892b1617e
refs/heads/master
2021-01-21T06:00:55.466722
2014-11-07T23:38:05
2014-11-07T23:38:05
null
0
0
null
null
null
null
UTF-8
R
false
false
1,158
r
plot3.R
temp <- tempfile() download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp) data <- read.table(unz(temp, "household_power_consumption.txt"), sep = ";", header=TRUE, na.strings="?", stringsAsFactors = FALSE) unlink(temp) head(data) head(data$Date) dim(data) #[1] 2075259 9 sum(data$Date == "1/2/2007") #[1] 1440 sum(data$Date == "2/2/2007") #[1] 1440 data1 <-data[data$Date == "1/2/2007",] data2 <-data[data$Date == "2/2/2007",] data3 <- rbind(data1, data2) data3$Global_active_power<-as.numeric(data3$Global_active_power) library(lubridate) data3$DateLub <- dmy(data3$Date) data3$DateTimeLub <-dmy_hms(paste(data3$Date, data3$Time)) png(filename = "plot3.png", width = 480, height = 480) plot(data3$DateTimeLub, data3$Sub_metering_1, type="l",xlab ="", ylab = "Energy sub metering") points(data3$DateTimeLub, data3$Sub_metering_2, type="l", col="red") points(data3$DateTimeLub, data3$Sub_metering_3, type="l", col="blue") legend("topright", col = c("black", "red", "blue"), lty=1, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) dev.off()
6d5c508d70e7b85409aaeab50b1adc77313b4e69
1f092c22a5746af10ce574af15002d53881c6ef7
/man/getSetOfSchemes.Rd
8a16a8b7a2b9f3de0c89fc32dffaf87300c99d26
[]
no_license
cran/microsamplingDesign
50ce4ca2e1439049c20733194c5962e0e3b696c8
49a02faf102dfc9abef4d34bbdd7041a251f64f8
refs/heads/master
2021-10-27T09:12:27.351890
2021-10-13T12:52:04
2021-10-13T12:52:04
131,901,288
0
0
null
null
null
null
UTF-8
R
false
true
3,935
rd
getSetOfSchemes.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/schemeGenerator.R \name{getSetOfSchemes} \alias{getSetOfSchemes} \title{Generate a \code{\link{SetOfSchemes-class}} object of speficified dimensions ( subjects, observations per t) for a given set of time points which meets user specified constraints} \usage{ getSetOfSchemes(minNSubjects, maxNSubjects, minObsPerSubject, maxObsPerSubject, timePoints, constraints = NULL, maxRepetitionIndSchemes = 1, maxNumberOfSchemesBeforeChecks = 10^5, returnNSchemesBeforeConstraints = FALSE) } \arguments{ \item{minNSubjects}{numeric, the mimimum number of subjects per scheme} \item{maxNSubjects}{numeric, the maximum number of subjects per scheme} \item{minObsPerSubject}{numeric, the mimimum number of sampling occasions per subject} \item{maxObsPerSubject}{numeric, the maximum number of sampling occasions per subject} \item{timePoints}{numeric vector of time points larger then zero, at which subject can be sampled} \item{constraints}{data.frame specifying constraints the scheme should meet. with columns: \itemize{ \item check: identifier of the function to perform the check \item level: the level at wich the check is applied: either at the subject level or scheme level \item value: input value used by the check function } (a user can add constraint functions following naming convention \code{check_[level]_[check]} see examples: ( \code{\link{check_scheme_minObsPerTimePoint}} and \code{\link{check_subject_maxConsecSamples}}) ) remark: number of subjects per scheme or number of observations per scheme should not be specified in \code{constraints}} \item{maxRepetitionIndSchemes}{the maximum number of times an individual subject scheme can be repeated, defaults to 1} \item{maxNumberOfSchemesBeforeChecks}{the maximum number of schemes to consider before applying scheme constraints, to avoid to long processing and using up memory. defaults to 10^5} \item{returnNSchemesBeforeConstraints}{if \code{TRUE} return only number of schemes before checking constraints instead of the schemes themselves, defaults to \code{FALSE}} } \description{ Generate a \code{\link{SetOfSchemes-class}} object of speficified dimensions ( subjects, observations per t) for a given set of time points which meets user specified constraints } \note{ keep number of subjects , range of number of subjects and observations per subject and number of timep points restricted to avoid a large number of potential schemes slowing down computation and increasing memory usage only schemes with minimal one observation per subject are contained even if not specified in constraints } \examples{ timePoints <- c( 1.2 , 1.3 , 2, 5 ) constraints <- getConstraintsExample() ex1 <- getSetOfSchemes( minNSubjects = 4 , maxNSubjects = 4 , minObsPerSubject = 3 , maxObsPerSubject = 3 , timePoints , constraints ) ex2 <- getSetOfSchemes( minNSubjects = 4 , maxNSubjects = 4 , minObsPerSubject = 3 , maxObsPerSubject = 3 , timePoints , constraints , maxRepetitionIndSchemes = 1 ) ex3 <- getSetOfSchemes( minNSubjects = 4 , maxNSubjects = 4 , minObsPerSubject = 2 , maxObsPerSubject = 3 , timePoints , constraints , maxRepetitionIndSchemes = 1 ) ex4 <- getSetOfSchemes( minNSubjects = 2 , maxNSubjects = 5 , minObsPerSubject = 2 , maxObsPerSubject = 3 , timePoints , constraints , maxRepetitionIndSchemes = 1 ) ex5 <- getSetOfSchemes( minNSubjects = 2 , maxNSubjects = 5 , minObsPerSubject = 2 , maxObsPerSubject = 3 , timePoints , maxRepetitionIndSchemes = 2 ) \dontrun{ # this should trow an error (to many combinations required ) ex6 <- getSetOfSchemes( minNSubjects = 2 , maxNSubjects = 5 , minObsPerSubject = 2 , maxObsPerSubject = 3 , timePoints , maxRepetitionIndSchemes = 2 , maxNumberOfSchemesBeforeChecks = 1000 ) } }
59fda72e1304d1ec0a2d75d9a70f1706925f3b76
f9481740429df0426c353d017a207b16e20394b9
/Statistics-With-R-Specialization/Capstone/week 5/week5quiz.R
e955d4ff024ab0388f077c3b217d637e9fe31283
[]
no_license
ashutoshtiwari13/Statistical-Learning-Hub
524accc79c131e1db0de74d5ef226d00aaa06e43
9c26772fbb843b26b240760cfa0a8412a6c847e4
refs/heads/master
2022-11-22T22:14:31.400391
2020-07-13T17:44:46
2020-07-13T17:44:46
255,832,052
0
0
null
null
null
null
UTF-8
R
false
false
3,533
r
week5quiz.R
##### QUESTION 1 ## Suppose you are regressing log(price) on log(area), log(Lot.Area), Bedroom.AbvGr, Overall.Qual, and Land.Slope. Which of the following variables are included with stepwise variable selection using AIC but not BIC? question1_full <- lm(log(price) ~ log(area) + log(Lot.Area) + Bedroom.AbvGr + Overall.Qual + Land.Slope, data=ames_train) # AIC stepAIC(question1_full, direction = "backward", trace = TRUE) # BIC stepAIC(question1_full, direction = "backward", k=log(nrow(ames_train)), trace = TRUE) # Answer: Land.Slope ##### QUESTION 2 ## When regressing log(price) on Bedroom.AbvGr, the coefficient for Bedroom.AbvGr is strongly positive. However, once log(area) is added to the model, the coefficient for Bedroom.AbvGr becomes strongly negative. Which of the following best explains this phenomenon? summary(lm(log(price) ~ Bedroom.AbvGr, data=ames_train) summary(lm(log(price) ~ Bedroom.AbvGr + log(area), data=ames_train)) # Answer: Larger houses on average have more bedrooms and sell for higher prices. However, holding constant the size of a house, the number of bedrooms decreases property valuation. ##### QUESTION 3 ## Run a simple linear model for log(price), with log(area) as the independent variable. Which of the following neighborhoods has the highest average residuals? ## Method 1 question3$residuals <- q3r ames_train <- ames_train %>% + mutate(question3residuals = q3r) question3means <- ames_train %>% select(Neighborhood, question3residuals) %>% group_by(Neighborhood) %>% summarize(mean=mean(question3residuals)) ## Method 2 sort(tapply(ames_train$question3residuals, ames_train$Neighborhood, mean), decreasing=TRUE) # Answer: GrnHill ##### QUESTION 4 # We are interested in determining how well the model fits the data for each neighborhood. The model from Question 3 does the worst at predicting prices in which of the following neighborhoods? # Answer: GrnHill ##### length(which(is.na(ames_train$Garage.Qual))) + length(which(is.na(ames_train$Bsmt.Qual))) + length(which(is.na(ames_train$Overall.Qual))) # intersection..... length(which(is.na(ames_train$Overall.Qual))) = 0 length(which(is.na(ames_train$Bsmt.Qual) & is.na(ames_train$Garage.Qual) )) # Answer: 64 ##### QUESTION 7 question7 <- lm(log(price) ~ Overall.Cond + Overall.Qual, data=ames_train) q7 <- question7$fitted.values ames_train <- ames_train %>% + mutate(question7values = q7) sort(tapply(ames_train$question7values, ames_train$MS.SubClass, median), decreasing=TRUE) ##### QUESTION 8 q8 <- hatvalues(question7) which.max(q8) # Answer: 268 ##### QUESTION 9 ## Which of the following corresponds to a correct interpretation of the coefficient k of Bedroom.AbvGr, where log(price) is the dependent variable? # Answer: Holding constant all other variables in the model, on average, an additional bedroom will increase housing price by k percent. ##### QUESTION 10 ## Which of the following sale condition categories shows significant differences from the normal selling condition? lm(price ~ Sale.Condition, data=ames_train) # Answer: Abnorm and Partial ##### QUESTION 11 ## Subset ames_train to only include houses sold under normal sale conditions. What percent of the original observations remain? length(which(ames_train$Sale.Condition=="Normal")) # Answer: 83.4% ##### QUESTION 12 q12ames <- subset(ames_train, ames_train$Sale.Condition=="Normal") q12model <- lm(log(price) ~ log(area), data=q12ames) summary(q12model) summary(question3)
a450954fda242ff6da475aa43fd631cadf406d12
3d9de2ab4797d89a64e37028bf3427eed3e2c866
/aprox/nuts/modelos/temp.R
8fb0a43ffeb79639cc00d9bf3cf5728262362851
[]
no_license
anyosa/binreg
dfe2fdd8e504640cccd550ec329a972a132840ef
7dfe63ae27777959b9ce206b26857c0733931b1e
refs/heads/master
2021-05-11T06:16:40.975209
2018-08-05T23:08:14
2018-08-05T23:08:14
117,982,147
0
0
null
null
null
null
UTF-8
R
false
false
3,379
r
temp.R
mod.stan = ' data { int<lower=0> N; # number of observations int<lower=0,upper=1> y[N]; # setting the dependent variable (vote) as binary vector[N] x1; # independent variable 1 vector[N] x2; # independent variable 2 } #transformed data { #vector[N] age_sq; # create new variable (4), age squared (no dots in the variable name) #age_sq <- age .* age; # formula for the variable, do not forget the . before multiplication #} parameters { real b0; # intercept real b1; # beta for educate, etc real b2; } model { b0 ~ normal(0,100); # you can set priors for all betas b1 ~ normal(0,100); # if you prefer not to, uniform priors will be used b2 ~ normal(0,100); y ~ bernoulli_logit(b0 + b1 * x1 + b2 * x2 ); # model } ' mod1 = """ data { int<lower=0> N; # number of observations int<lower=0,upper=1> y[N]; # setting the dependent variable (vote) as binary vector[N] x1; # independent variable 1 vector[N] x2; # independent variable 2 } transformed data { vector[N] age_sq; # create new variable (4), age squared (no dots in the variable name) age_sq <- age .* age; # formula for the variable, do not forget the . before multiplication } parameters { real alpha; # intercept real b_educate; # beta for educate, etc real b_income; real b_age; real b_age_sq; } model { alpha ~ normal(0,100); # you can set priors for all betas b_educate ~ normal(0,100); # if you prefer not to, uniform priors will be used b_income ~ normal(0,100); b_age ~ normal(0,100); b_age_sq ~ normal(0,100); vote ~ bernoulli_logit(alpha + b_educate * educate + b_income * income + b_age * age + b_age_sq * age_sq); # model } generated quantities { # simulate quantities of interest real y_hat; # create a new variable for the predicted values y_hat <- inv_logit(alpha + b_educate * 10 + b_income * 15 + b_age * 40 + b_age_sq * 1600); # model } """ mod2 = """ data { int<lower=0> N; int<lower=0> p; int death[N]; int<lower=0> qsmk[N]; int<lower=0> sex[N]; real<lower=0> age[N]; int<lower=0> race[N]; real<lower=0> smokeyrs[N]; } parameters { real beta[p]; } transformed parameters { real<lower=0> odds[N]; real<lower=0, upper=1> prob[N]; for (i in 1:N) { odds[i] <- exp(beta[1] + beta[2]*qsmk[i] + beta[3]*sex[i] + beta[4]*age[i] + beta[5]*race[i] + beta[6]*smokeyrs[i]); prob[i] <- odds[i] / (odds[i] + 1); } } model { death ~ bernoulli(prob); } """ mod3 = """ data { int N; // number of obs (pregnancies) int M; // number of groups (women) int K; // number of predictors int y[N]; // outcome row_vector[K] x[N]; // predictors int g[N]; // map obs to groups (pregnancies to women) } parameters { real alpha; real a[M]; vector[K] beta; real<lower=0,upper=10> sigma; } model { alpha ~ normal(0,100); a ~ normal(0,sigma); beta ~ normal(0,100); for(n in 1:N) { y[n] ~ bernoulli(inv_logit( alpha + a[g[n]] + x[n]*beta)); } } """ mod4 = """ data { int<lower=0> N; int<lower=0,upper=1> y[N]; vector[N] x1; vector[N] x2; } parameters { real beta_0; real beta_1; real beta_2; } model { y ~ bernoulli_logit(beta_0 + beta_1 * x1 + beta_2 * x2); } """
196bdeb978dcac483a1c485caa0c4c9682c8e893
f0c6380692ce00322e2ef31c3b6ec2e2b4ca6d97
/run_analysis.R
b3b3194aca612c2a44a7450e035ebfbb8091781d
[]
no_license
rou07812/Peer-graded-Assignment-Getting-and-Cleaning-Data-Course-Project
64350b1fc6b60f8cd3803e44946ba71f89805559
0e25bf93491fe25c791df1a24166c5bf79f3cf39
refs/heads/master
2020-09-28T21:02:59.918385
2019-12-10T05:53:10
2019-12-10T05:53:10
226,864,317
0
0
null
null
null
null
UTF-8
R
false
false
2,860
r
run_analysis.R
#load libraries library(data.table) library(dplyr) setwd(".\\R\\data\\UCI HAR Dataset") ##Read supporting featureNames <- read.table(".\\features.txt") activityLabels <- read.table(".\\activity_labels.txt", header = FALSE) #read Training and Test sets subjTrain <- read.table(".\\train\\subject_train.txt", header = FALSE) actvTrain <- read.table(".\\train\\y_train.txt", header = FALSE) featTrain <- read.table(".\\train\\X_train.txt", header = FALSE) subjTest <- read.table(".\\test\\subject_test.txt", header = FALSE) actvTest <- read.table(".\\test\\y_test.txt", header = FALSE) featTest <- read.table(".\\test\\X_test.txt", header = FALSE) #Merge Training and Test data sets subjMerge <- rbind(subjTrain,subjTest) actvMerge <- rbind(actvTrain,actvTest) featMerge <- rbind(featTrain,featTest) #Name columns colnames(subjMerge) <- "Subject" colnames(actvMerge) <- "Activity" colnames(featMerge) <- t(featureNames[2]) #create completed Data Frame completedData <- cbind(featMerge,actvMerge,subjMerge) #extract mean and std dev columnsWithMeanSTD <- grep(".*Mean.*|.*Std.*", names(completedData), ignore.case=TRUE) requiredColumns <- c(columnsWithMeanSTD,562,563) extractedData <- completedData[,requiredColumns] #name activities in dataset #1 = WALKING etc... extractedData$Activity <- as.character(extractedData$Activity) for (x in 1:6){ extractedData$Activity[extractedData$Activity == x] <- as.character(activityLabels[x,2]) } extractedData$Activity <- as.factor(extractedData$Activity) #rename variable names names(extractedData)<-gsub("Acc", "Accelerometer", names(extractedData)) names(extractedData)<-gsub("Gyro", "Gyroscope", names(extractedData)) names(extractedData)<-gsub("BodyBody", "Body", names(extractedData)) names(extractedData)<-gsub("Mag", "Magnitude", names(extractedData)) names(extractedData)<-gsub("^t", "Time", names(extractedData)) names(extractedData)<-gsub("^f", "Frequency", names(extractedData)) names(extractedData)<-gsub("tBody", "TimeBody", names(extractedData)) names(extractedData)<-gsub("-mean()", "Mean", names(extractedData), ignore.case = TRUE) names(extractedData)<-gsub("-std()", "STD", names(extractedData), ignore.case = TRUE) names(extractedData)<-gsub("-freq()", "Frequency", names(extractedData), ignore.case = TRUE) names(extractedData)<-gsub("angle", "Angle", names(extractedData)) names(extractedData)<-gsub("gravity", "Gravity", names(extractedData)) #create tidy data set extractedData$Subject <- as.factor(extractedData$Subject) extractedData <- data.table(extractedData) tidyData <- aggregate(. ~Subject + Activity, extractedData, mean) tidyData <- tidyData[order(tidyData$Subject,tidyData$Activity),] write.table(tidyData, file = "TidyData.txt", row.names = FALSE) #desciption on variables of final data frame str(tidyData)
da84da22d55490b3a627ac9f9a5e92d0e5240ca7
064fdebe446ace3596ed1af13c92f6ce613a3e66
/man/Rmd_bind.Rd
1175b3c9ef4c40566dafea9cec39f7640a0e58be
[]
no_license
curso-r/puBuild
6dd704527d3731ac516d110e07e39b58f29bce74
2a998876a1b393b5c5339e0b0db38ef91bf0d310
refs/heads/master
2021-01-11T23:24:58.011253
2017-10-10T02:46:47
2017-10-10T02:46:47
78,579,218
0
1
null
2017-01-12T13:57:12
2017-01-10T22:13:03
R
UTF-8
R
false
true
333
rd
Rmd_bind.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/render_pu.R \name{Rmd_bind} \alias{Rmd_bind} \title{Concatena Capítulos De Um Power Up} \usage{ Rmd_bind(dir = ".", titulo, output_file = NULL, output_dir = NULL) } \description{ Concatena os capítulos de um power up em um único arquivo index.html. }
170fc1047c50cd3d9cc067fc37793a94a3c8c547
ebbe711114abaed7a4c7e97c4bfb78b630d45364
/man/incomplete.Rd
696c1d78c221a8fefca98defc6b3a0a7c59ffa6e
[]
no_license
cran/imputeMDR
79a9a94f4d742acd9ecf3522f08b4ab520e9bbd8
d8bac5d21cb1c822166142d0171e8e0fe0360f22
refs/heads/master
2020-05-26T09:04:52.034454
2011-06-28T00:00:00
2011-06-28T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
1,280
rd
incomplete.Rd
\name{incomplete} \alias{incomplete} \docType{data} \title{A simulated example data containing missing values} \description{ This data is an example of a simulated case-control study data with 10% missing genotype values. 10 biallelic marker genotypes (ex : single nucleotide polymorphisms; SNPs) for 200 cases and 200 controls are included. Class variable (response; phenotype; disease status) is in the first column. } \usage{data(incomplete)} \format{ A data frame with 400 observations on the following 11 variables. \describe{ \item{\code{class}}{a numeric vector} \item{\code{snp1}}{a numeric vector} \item{\code{snp2}}{a numeric vector} \item{\code{snp3}}{a numeric vector} \item{\code{snp4}}{a numeric vector} \item{\code{snp5}}{a numeric vector} \item{\code{snp6}}{a numeric vector} \item{\code{snp7}}{a numeric vector} \item{\code{snp8}}{a numeric vector} \item{\code{snp9}}{a numeric vector} \item{\code{snp10}}{a numeric vector} } } \references{ Namkung J, Elston RC, Yang JM, Park T. "Identification of gene-gene interactions in the presence of missing data using the multifactor dimensionality reduction method" Genet Epidemiol. 2009 Nov;33(7):646-56. } \examples{ data(incomplete) } \keyword{datasets}
ecb53c54479b9fc1abf99e80fe7a8bc232e24798
4c3c7bedd64dae0d8726739393f38d6863b32d8f
/R/INSPEcT-GUI-functions.R
5c19c7d992f26333bcf40180b9b3ebff5856b1dd
[]
no_license
ste-depo/INSPEcT
11425638964219b0b619cc75b13f17dc1e4682e6
e56818c3166c95809c1b05ead6b36b7395559932
refs/heads/master
2022-12-20T01:06:23.322215
2020-09-29T15:22:08
2020-09-29T15:22:08
113,035,680
5
2
null
2020-08-09T08:13:42
2017-12-04T11:45:43
R
UTF-8
R
false
false
67,572
r
INSPEcT-GUI-functions.R
#function for the indicator "Loading...". This function was taken from #xiaodaigh and dcurrier (https://github.com/AnalytixWare/ShinySky/blob/master/R/busy-indicator.r) #and corrected. Maybe the real time is inside setInterval function .busyIndicator <- function(text = "Processing..." , image = "http://i.giphy.com/l3V0EQrPMh1nnfbFe.gif" , wait=1000) { tagList( singleton(tags$head( tags$link(rel = "stylesheet" , type = "text/css" ,href = file.path("panel","inst","extdata","busyIndicator.css") ))) ,div(class = "mybusyindicator",p(text)) #,img(src=image)) ,tags$script(sprintf( " setInterval(function(){ if ($('html').hasClass('shiny-busy')) { setTimeout(function() { if ($('html').hasClass('shiny-busy')) { $('div.mybusyindicator').show() } }, %d) } else { $('div.mybusyindicator').hide() } },1000)",wait) ) ) } ########################## getK1params <- function(input) switch(input$k1_function, "Constant" = paste0("starting levels=",input$k1_h0), "Sigmoidal" = c(paste0("starting levels=",input$k1_h0,"; final levels=",input$k1_h1), paste0("first response time=",input$k1_t1,"; slope=",input$k1_beta)), "Impulsive" = c(paste0("starting levels=",input$k1_h0,"; intermediate levels=",input$k1_h1), paste0("final levels=",input$k1_h2,"; first response time=",input$k1_t1), paste0("second response time=",input$k1_t2,"; slope:",input$k1_beta)) ) getK2params <- function(input) switch(input$k2_function, "Constant" = paste0("starting levels=",input$k2_h0), "Sigmoidal" = c(paste0("starting levels=",input$k2_h0,"; final levels=",input$k2_h1), paste0("first response time=",input$k2_t1,"; slope=",input$k2_beta)), "Impulsive" = c(paste0("starting levels=",input$k2_h0,"; intermediate levels=",input$k2_h1), paste0("final levels=",input$k2_h2,"; first response time=",input$k2_t1), paste0("second response time=",input$k2_t2,"; slope:",input$k2_beta)) ) getK3params <- function(input) switch(input$k3_function, "Constant" = paste0("starting levels=",input$k3_h0), "Sigmoidal" = c(paste0("starting levels=",input$k3_h0,"; final levels=",input$k3_h1), paste0("first response time=",input$k3_t1,"; slope=",input$k3_beta)), "Impulsive" = c(paste0("starting levels=",input$k3_h0,"; intermediate levels=",input$k3_h1), paste0("final levels=",input$k3_h2,"; first response time=",input$k3_t1), paste0("second response time=",input$k3_t2,"; slope:",input$k3_beta)) ) # getK2params <- function(input) # switch(input$k2_function, # "Constant" = c("starting levels:"=input$k2_h0), # "Sigmoidal" = c("starting levels:"=input$k2_h0, # "final levels:"=input$k2_h1, # "first response time:"=input$k2_t1, # "slope:"=input$k2_beta), # "Impulsive" = c("starting levels:"=input$k2_h0, # "intermediate levels:"=input$k2_h1, # "final levels:"=input$k2_h2, # "first response time:"=input$k2_t1, # "second response time:"=input$k2_t2, # "slope:"=input$k2_beta) # ) # # getK3params <- function(input) # switch(input$k3_function, # "Constant" = c("starting levels:"=input$k3_h0), # "Sigmoidal" = c("starting levels:"=input$k3_h0, # "final levels:"=input$k3_h1, # "first response time:"=input$k3_t1, # "slope:"=input$k3_beta), # "Impulsive" = c("starting levels:"=input$k3_h0, # "intermediate levels:"=input$k3_h1, # "final levels:"=input$k3_h2, # "first response time:"=input$k3_t1, # "second response time:"=input$k3_t2, # "slope:"=input$k3_beta) # ) modeling_report <- function(modeling, inspect, experiment, input, ranges) { modeling_strategy <- if( experiment$steady_state ) { mode <- 0 'stedy-state data, integrative framework' } else { if( inspect$mod_method == 'int' ) { mode <- 1 'time-course data, integrative framework' } else { mode <- 2 'time-course data, derivative framework' } } first_rate_name <- if( mode == 1 ) 'Synthesis rate' else 'Mature RNA' legend_text <- c() legend_text <- c(legend_text, paste("Usage:" , input$data_selection)) legend_text <- c(legend_text, '') legend_text <- c(legend_text, '---- Modeling parameters ----') legend_text <- c(legend_text, '') legend_text <- c(legend_text, paste("Modeling strategy:" , modeling_strategy)) legend_text <- c(legend_text, paste("Time range:" , paste('[',ranges$time_min,',',ranges$time_max,']'))) legend_text <- c(legend_text, '') legend_text <- c(legend_text, paste(first_rate_name, "modeled with" , input$k1_function, "functional form, with parameters:")) legend_text <- c(legend_text, getK1params(input)) legend_text <- c(legend_text, '') legend_text <- c(legend_text, paste("Processing rate modeled with" , input$k2_function, "functional form, with parameters:")) legend_text <- c(legend_text, getK2params(input)) # legend_text <- c(legend_text, paste(getK2params(input), collapse='; ')) legend_text <- c(legend_text, '') legend_text <- c(legend_text, paste("Degradation rate modeled with" , input$k3_function, "functional form, with parameters:")) legend_text <- c(legend_text, getK3params(input)) # legend_text <- c(legend_text, paste(getK3params(input), collapse='; ')) if( input$data_selection != 'User defined' ) { legend_text <- c(legend_text, '') legend_text <- c(legend_text, '---- Fit results ----') legend_text <- c(legend_text, '') legend_text <- c(legend_text, paste("goodness of fit (p-value):", as.character(signif(isolate(modeling$simdata$scores$pchisq),3)))) legend_text <- c(legend_text, paste("Akaike information criterion:", as.character(signif(isolate(modeling$simdata$scores$aic),3)))) } return(legend_text) } output_pars <- function(modeling, inspect, experiment, input, ranges) { legend_text <- modeling_report(modeling, inspect, experiment, input, ranges) par(mfrow=c(1,1), mar=c(1,1,1,1)) plot.new() legend('center', legend=legend_text, bty='n') } ########################## # convert_gene_classes <- function(gene_classes) { # diz <- c('0'='KKK', 'a'='VKK', 'b'='KKV', 'c'='KVK', # 'ab'='VKV', 'ac'='VVK', 'bc'='KVV', 'abc'='VVV') # unname(diz[gene_classes]) # } # reconvert_gene_classes <- function(gene_classes) { diz <- c('KKK'='0','VKK'='a','KKV'='b','KVK'='c', 'VKV'='ab','VVK'='ac','KVV'='bc','VVV'='abc') unname(diz[gene_classes]) } ########################### ## function for ranges #### ########################### define_parameter_ranges <- function(ids) { range_k1_h_pars <- quantile( unlist(lapply(ids@model@ratesSpecs, function(gene) { rate <- gene[[1]][[ 1 ]] switch(rate$type, "constant" = rate$params[1], "sigmoid" = rate$params[1:2], "impulse" = rate$params[1:3] ) })) #, probs=c(.025, .975)) , probs=c(0, 1)) range_k1_h_pars <- c( floor(range_k1_h_pars[1]), ceiling(range_k1_h_pars[2]) ) range_k2_h_pars <- quantile( unlist(lapply(ids@model@ratesSpecs, function(gene) { rate <- gene[[1]][[ 3 ]] switch(rate$type, "constant" = rate$params[1], "sigmoid" = rate$params[1:2], "impulse" = rate$params[1:3] ) })) # , probs=c(.025, .975)) , probs=c(0, 1)) range_k2_h_pars <- c( floor(range_k2_h_pars[1]), ceiling(range_k2_h_pars[2]) ) range_k3_h_pars <- quantile( unlist(lapply(ids@model@ratesSpecs, function(gene) { rate <- gene[[1]][[ 2 ]] switch(rate$type, "constant" = rate$params[1], "sigmoid" = rate$params[1:2], "impulse" = rate$params[1:3] ) })) # , probs=c(.025, .975)) , probs=c(0, 1)) range_k3_h_pars <- c( floor(range_k3_h_pars[1]), ceiling(range_k3_h_pars[2]) ) range_t_pars <- quantile( unlist(lapply(ids@model@ratesSpecs, function(gene) { rate_k1 <- gene[[1]][[ 1 ]] k1_t <- switch(rate_k1$type, "constant" = NULL, "sigmoid" = rate_k1$params[3], "impulse" = rate_k1$params[4:5] ) rate_k2 <- gene[[1]][[ 2 ]] k2_t <- switch(rate_k2$type, "constant" = NULL, "sigmoid" = rate_k2$params[3], "impulse" = rate_k2$params[4:5] ) rate_k3 <- gene[[1]][[ 3 ]] k3_t <- switch(rate_k3$type, "constant" = NULL, "sigmoid" = rate_k3$params[3], "impulse" = rate_k3$params[4:5] ) c(k1_t, k2_t, k3_t) })) # , probs=c(.025, .975)) , probs=c(0, 1)) # range_t_pars <- timetransf_inv(range_t_pars, logshift, linshift) range_t_pars <- c( floor(range_t_pars[1]), # (arrotonda per difetto al secondo decimale) ceiling(range_t_pars[2]) ) range_beta_pars <- quantile( unlist(lapply(ids@model@ratesSpecs, function(gene) { rate_k1 <- gene[[1]][[ 1 ]] k1_t <- switch(rate_k1$type, "constant" = NULL, "sigmoid" = rate_k1$params[4], "impulse" = rate_k1$params[6] ) rate_k2 <- gene[[1]][[ 2 ]] k2_t <- switch(rate_k2$type, "constant" = NULL, "sigmoid" = rate_k2$params[4], "impulse" = rate_k2$params[6] ) rate_k3 <- gene[[1]][[ 3 ]] k3_t <- switch(rate_k3$type, "constant" = NULL, "sigmoid" = rate_k3$params[4], "impulse" = rate_k3$params[6] ) c(k1_t, k2_t, k3_t) })) # , probs=c(.025, .975)) , probs=c(0, 1)) range_beta_pars <- c( floor(range_beta_pars[1]), ceiling(range_beta_pars[2]) ) return(list( k1_h_pars=range_k1_h_pars, k2_h_pars=range_k2_h_pars, k3_h_pars=range_k3_h_pars, t_pars=range_t_pars, beta_pars=range_beta_pars )) } ####################### ## PLOT FUNCTION ###### ####################### RNAdynamicsAppMake <- function(data_selection, time_min, time_max, experiment, k1_function, k2_function, k3_function, k1_params, k2_params, k3_params, mod_method) { if( data_selection == 'Experimental data' ) { reference_mRNA <- experiment$mRNA secondary_mRNA <- experiment$mRNA_smooth } else { reference_mRNA <- experiment$mRNA_smooth secondary_mRNA <- experiment$mRNA } if( data_selection == 'Experimental data' ) { reference_preMRNA <- experiment$preMRNA secondary_preMRNA <- experiment$preMRNA_smooth } else { reference_preMRNA <- experiment$preMRNA_smooth secondary_preMRNA <- experiment$preMRNA } if( data_selection == 'Experimental data' ) { reference_synthesis <- experiment$synthesis secondary_synthesis <- experiment$synthesis_smooth } else { reference_synthesis <- experiment$synthesis_smooth secondary_synthesis <- experiment$synthesis } experimental_mRNAsd <- experiment$mRNAsd experimental_preMRNAsd <- experiment$preMRNAsd experimental_synthesissd <- experiment$synthesissd if( !experiment$steady_state ) { simulation_time <- experiment_tpts <- experiment$tpts if( data_selection == 'User defined' ) { simulation_time <- seq(min(simulation_time),max(simulation_time),length.out=1000) # simulation_time <- sort(unique(c(simulation_time, experiment_tpts))) } } else { experiment_tpts <- 0 simulation_time <- seq(0,16,length.out=1000) } if( mod_method == 'int' ) { sim <- deterministic_simulation( simulation_time, k1_function, k2_function, k3_function, k1_params, k2_params, k3_params) } else { # mod_method == 'der' gene_class <- paste0( switch(k1_function, "Constant"="K", "Sigmoidal"="V", "Impulsive"="V"), switch(k2_function, "Constant"="K", "Sigmoidal"="V", "Impulsive"="V"), switch(k3_function, "Constant"="K", "Sigmoidal"="V", "Impulsive"="V") ) sim <- derivative_solution( simulation_time, gene_class, k1_function, k2_function, k3_function, k1_params, k2_params, k3_params) } # calculate the scores of the modeling and assign to output if( data_selection != 'User defined' & !experiment$steady_state ) { scores <- list() loglik_score <- logLikelihoodFunction(reference_preMRNA, sim[simulation_time %in% experiment_tpts,'p'], experimental_preMRNAsd^2) + logLikelihoodFunction(reference_mRNA, sim[simulation_time %in% experiment_tpts,'m'], experimental_mRNAsd^2) + ifelse(experiment$no_nascent, 0, logLikelihoodFunction(reference_synthesis, sim[simulation_time %in% experiment_tpts,'k1'], experimental_synthesissd^2)) chisq_score <- chisqFunction(reference_preMRNA, sim[simulation_time %in% experiment_tpts,'p'], experimental_preMRNAsd^2) + chisqFunction(reference_mRNA, sim[simulation_time %in% experiment_tpts,'m'], experimental_mRNAsd^2) + ifelse(experiment$no_nascent, 0, chisqFunction(reference_synthesis, sim[simulation_time %in% experiment_tpts,'k1'], experimental_synthesissd^2)) k <- length(c(ifelse(experiment$no_nascent, 0, k1_params), k2_params, k3_params)) scores$pchisq <- pchisq( chisq_score, 3*length(experiment_tpts) - k ) scores$aic <- 2*k - 2*loglik_score } else { scores <- list() scores$pchisq <- NA scores$aic <- NA } conf_int <- list( k1 = cbind(left=rep(NA, length(simulation_time)),right=rep(NA, length(simulation_time))), k2 = cbind(left=rep(NA, length(simulation_time)),right=rep(NA, length(simulation_time))), k3 = cbind(left=rep(NA, length(simulation_time)),right=rep(NA, length(simulation_time))) ) return(list(sim = sim, conf_int = conf_int, scores = scores)) } RNAdynamicsAppMakeConfInt <- function(data_selection, time_min, time_max, experiment, k1_function, k2_function, k3_function, k1_params, k2_params, k3_params, mod_method) { if( data_selection == 'Experimental data' ) { reference_mRNA <- experiment$mRNA secondary_mRNA <- experiment$mRNA_smooth } else { reference_mRNA <- experiment$mRNA_smooth secondary_mRNA <- experiment$mRNA } if( data_selection == 'Experimental data' ) { reference_preMRNA <- experiment$preMRNA secondary_preMRNA <- experiment$preMRNA_smooth } else { reference_preMRNA <- experiment$preMRNA_smooth secondary_preMRNA <- experiment$preMRNA } if( data_selection == 'Experimental data' ) { reference_synthesis <- experiment$synthesis secondary_synthesis <- experiment$synthesis_smooth } else { reference_synthesis <- experiment$synthesis_smooth secondary_synthesis <- experiment$synthesis } experimental_mRNAsd <- experiment$mRNAsd experimental_preMRNAsd <- experiment$preMRNAsd experimental_synthesissd <- experiment$synthesissd if( !experiment$steady_state ) { simulation_time <- experiment_tpts <- experiment$tpts # simulation_time <- seq(time_min,time_max,length.out=1000) # simulation_time <- sort(unique(c(simulation_time, experiment_tpts))) } else { experiment_tpts <- 0 simulation_time <- seq(0,16,length.out=1000) } gene_class <- paste0( switch(k1_function, "Constant"="K", "Sigmoidal"="V", "Impulsive"="V"), switch(k2_function, "Constant"="K", "Sigmoidal"="V", "Impulsive"="V"), switch(k3_function, "Constant"="K", "Sigmoidal"="V", "Impulsive"="V") ) if( mod_method == 'int' ) { conf_int <- compute_ci_Integrative_Nascent(c(k1_params, k2_params, k3_params), tpts = experiment_tpts, model_tpts = simulation_time, classTmp = gene_class, experimentalP = reference_preMRNA, experimentalM = reference_mRNA, experimentalA = reference_synthesis, varianceP = experiment$preMRNAsd^2, varianceM = experiment$mRNAsd^2, varianceA = experiment$synthesissd^2, confidenceThreshold = qchisq(.95,1) ) } else { # mod_method == 'der' conf_int <- compute_ci_Derivative_Nascent(c(k1_params, k2_params, k3_params), tpts = experiment_tpts, model_tpts = simulation_time, classTmp = reconvert_gene_classes(gene_class), experimentalP = reference_preMRNA, experimentalM = reference_mRNA, experimentalA = reference_synthesis, varianceP = experiment$preMRNAsd^2, varianceM = experiment$mRNAsd^2, varianceA = if(is.null(experiment$synthesissd)) NULL else experiment$synthesissd^2, confidenceThreshold = qchisq(.95,1) ) } # calculate the scores of the modeling and assign to output p_k1 <- rate_var_p(conf_int$k1[simulation_time %in% experiment_tpts,]) p_k2 <- rate_var_p(conf_int$k2[simulation_time %in% experiment_tpts,]) p_k3 <- rate_var_p(conf_int$k3[simulation_time %in% experiment_tpts,]) rate_p <- c(k1=p_k1, k2=p_k2, k3=p_k3) return(list(conf_int = conf_int, rate_p = rate_p)) } RNAdynamicsAppPlot <- function(data_selection, show_logtime, show_relexpr, logshift, linshift, time_min, time_max, experiment, simdata, ylims, rate_p ) { # get experimental values if( data_selection == 'Experimental data' ) { reference_mRNA <- experiment$mRNA secondary_mRNA <- experiment$mRNA_smooth } else { reference_mRNA <- experiment$mRNA_smooth secondary_mRNA <- experiment$mRNA } if( data_selection == 'Experimental data' ) { reference_preMRNA <- experiment$preMRNA secondary_preMRNA <- experiment$preMRNA_smooth } else { reference_preMRNA <- experiment$preMRNA_smooth secondary_preMRNA <- experiment$preMRNA } if( data_selection == 'Experimental data' ) { reference_synthesis <- experiment$synthesis secondary_synthesis <- experiment$synthesis_smooth } else { reference_synthesis <- experiment$synthesis_smooth secondary_synthesis <- experiment$synthesis } experimental_mRNAsd <- experiment$mRNAsd experimental_preMRNAsd <- experiment$preMRNAsd experimental_synthesissd <- experiment$synthesissd if( !experiment$steady_state ) { simulation_time <- experiment_tpts <- experiment$tpts if( data_selection == 'User defined' ) { simulation_time <- seq(min(simulation_time),max(simulation_time),length.out=1000) # simulation_time <- sort(unique(c(simulation_time, experiment_tpts))) } } else { experiment_tpts <- seq(0,16,by=4) simulation_time <- seq(0,16,length.out=1000) reference_mRNA <- c(reference_mRNA, rep(NA, 4)) secondary_mRNA <- c(secondary_mRNA, rep(NA, 4)) reference_preMRNA <- c(reference_preMRNA, rep(NA, 4)) secondary_preMRNA <- c(secondary_preMRNA, rep(NA, 4)) reference_synthesis <- c(reference_synthesis, rep(NA, 4)) secondary_synthesis <- c(secondary_synthesis, rep(NA, 4)) experimental_mRNAsd <- c(experimental_mRNAsd, rep(NA, 4)) experimental_preMRNAsd <- c(experimental_preMRNAsd, rep(NA, 4)) experimental_synthesissd <- c(experimental_synthesissd, rep(NA, 4)) } # make the simulation if( !show_logtime ) { simtimeplot <- simulation_time exptimeplot <- experiment_tpts } else { simtimeplot <- timetransf(simulation_time, logshift) exptimeplot <- timetransf(experiment_tpts, logshift) } sim <- simdata$sim conf_int <- simdata$conf_int # start plot routine par(mfrow=c(5,1)) par(mar=c(2.5,8,0,1)+.1) # plot k1 plot_k1_experiment = ! (data_selection == 'User defined' | experiment$no_nascent) k1_ylim <- plotSingleRNADynamic( 'synthesis', 's', simtimeplot, sim[,'k1'], conf_int$k1[,'left'], conf_int$k1[,'right'], plot_k1_experiment, exptimeplot, reference_synthesis, secondary_synthesis, experimental_synthesissd, show_relexpr, ylims$k1_ylim, rate_p = rate_p['k1'] ) # plot pre-RNA dynamics p_ylim <- plotSingleRNADynamic( 'pre-RNA', '', simtimeplot, sim[,'p'], rep(NA, length(simtimeplot)), rep(NA, length(simtimeplot)), data_selection != 'User defined', exptimeplot, reference_preMRNA, secondary_preMRNA, experimental_preMRNAsd, show_relexpr, ylims$p_ylim ) # plot k2 k2_ylim <- plotSingleRNADynamic( 'processing', 'p', simtimeplot, sim[,'k2'], conf_int$k2[,'left'], conf_int$k2[,'right'], FALSE, show_relexpr = show_relexpr, ylim = ylims$k2_ylim, rate_p = rate_p['k2'] )#, exptimeplot, reference_synthesis, secondary_synthesis, experimental_synthesissd ) # plot mRNA dynamics m_ylim <- plotSingleRNADynamic( 'mature-RNA', '', simtimeplot, sim[,'m'], rep(NA, length(simtimeplot)), rep(NA, length(simtimeplot)), data_selection != 'User defined', exptimeplot, reference_mRNA, secondary_mRNA, experimental_mRNAsd, show_relexpr, ylims$m_ylim ) # plot k3 k3_ylim <- plotSingleRNADynamic( 'degradation', 'd', simtimeplot, sim[,'k3'], conf_int$k3[,'left'], conf_int$k3[,'right'], FALSE, show_relexpr = show_relexpr, ylim = ylims$k3_ylim, rate_p = rate_p['k3'] )#, exptimeplot, reference_synthesis, secondary_synthesis, experimental_synthesissd ) # draw x-axis if( show_logtime ) { axis(1, at=exptimeplot, labels = signif(experiment_tpts,2) , cex.axis = 1.3) } else { axis(1, at=experiment_tpts, labels = signif(experiment_tpts,2) , cex.axis = 1.3) } # return ylims upon request ylims <- list( k1_ylim = k1_ylim, k2_ylim = k2_ylim, k3_ylim = k3_ylim, p_ylim = p_ylim, m_ylim = m_ylim ) } deltaylim <- function( yrange ) { deltarange <- yrange[2] * .05 ylim <- yrange + c(-deltarange, deltarange) } plotSingleRNADynamic <- function( dyn_name, tag, simtimeplot, simprofile, ci_left, ci_right, plot_exp, exptimeplot, ref_exp, sec_exp, ssd_exp, show_relexpr = FALSE, ylim, rate_p = NULL, command_line = FALSE, col = 1, prior = NULL, constant = NULL) { if( !is.null(rate_p) ) { p_name <- paste0('(p=',signif(rate_p,2),')') } else { p_name <- '' } if( tag != '' ) { dyn_name <- gsub("^","paste('", gsub("$",")", gsub("\\)", "'),')'", gsub("\\(", "(', bold('", paste(dyn_name, paste0('(', tag, ')')))))) } if( is.null(prior) ) { prior <- rep(NA, length(simtimeplot)) plot_prior <- FALSE } else plot_prior <- TRUE if( is.null(constant) ) { constant <- rep(NA, length(simtimeplot)) plot_constant <- FALSE } else plot_constant <- TRUE if( plot_exp ) { sec_exp_plus_ssd <- sec_exp + ssd_exp ref_exp_plus_ssd <- ref_exp + ssd_exp sec_exp_minus_ssd <- sec_exp - ssd_exp ref_exp_minus_ssd <- ref_exp - ssd_exp } if(show_relexpr) { refexpression <- simprofile[1] if( is.na(refexpression) ) if( plot_exp ) refexpression <- ref_exp[1] else refexpression <- prior[1] simprofile <- simprofile/refexpression ci_left <- ci_left/refexpression ci_right <- ci_right/refexpression prior <- prior/refexpression constant <- constant/refexpression if( plot_exp ) { sec_exp <- sec_exp/refexpression ref_exp <- ref_exp/refexpression sec_exp_plus_ssd <- sec_exp_plus_ssd/refexpression ref_exp_plus_ssd <- ref_exp_plus_ssd/refexpression sec_exp_minus_ssd <- sec_exp_minus_ssd/refexpression ref_exp_minus_ssd <- ref_exp_minus_ssd/refexpression } } if( is.null(ylim) ) { if( plot_exp ) { yrange <- range(c(simprofile, sec_exp_plus_ssd, ref_exp_plus_ssd, sec_exp_minus_ssd, ref_exp_minus_ssd, prior), na.rm=TRUE) ylim <- deltaylim(yrange) } else { ylim <- deltaylim( range(c(simprofile, ci_left, ci_right, prior), na.rm=TRUE) ) } } if( command_line ) { lwd <- 3; cex.lab <- 1; cex.axis <- 1 } else { lwd <- 2; cex.lab <- 1.7; cex.axis <- 1.3 } plot(simtimeplot, simprofile, xaxs='i', yaxs='i', xaxt = 'n', ylab='', type='l', xlab='', lwd = lwd, cex.lab = cex.lab, cex.axis=cex.axis, xlim = range(simtimeplot) + diff(range(simtimeplot)) * c(-.05, .05), ylim = ylim, col = col ) if( !command_line ) { mtext(parse(text=dyn_name), 2, 4) mtext(p_name, 2, 3) } ci_matrix <- cbind(ci_left, ci_right) if( !all(is.na(ci_matrix)) ) matlines(simtimeplot, ci_matrix, lty=2, col=col) if( plot_exp ) { if( !all(is.na(sec_exp)) ) points( exptimeplot, sec_exp, pch=1, col='grey') if( command_line ) pch <- 21 else pch <- 19 points( exptimeplot, ref_exp, pch = pch, col=col) segments( exptimeplot , ref_exp_minus_ssd , exptimeplot , ref_exp_plus_ssd , col = col) } if( plot_prior ) { lines(simtimeplot, prior, col=col) } if( plot_constant ) { lines(simtimeplot, constant, col=col, lty=3, lwd=3) } # return ylim upon request ylim <- ylim } rate_var_p <- function(rate_conf_int) { k_start <- mean(rate_conf_int[,2],na.rm=TRUE) if(!is.finite(k_start)) return(NaN) #return(list(par=NaN, value=NaN)) k_scores_out <- optim(k_start, k_score_fun, method='BFGS', rate_conf_int=rate_conf_int) pchisq(k_scores_out$value,nrow(rate_conf_int)-1,lower.tail=FALSE) } constantModelRNApp <- function(x , par, log_shift, lin_shift) rep(par, length(x)) sigmoidModelRNApp <- function(x, par, log_shift, lin_shift=0) par[1]+(par[2]-par[1])*(1/(1+exp(-par[4]*(timetransf(x,log_shift,lin_shift)-timetransf(par[3],log_shift,lin_shift))))) impulseModelRNApp <- function(x, par, log_shift, lin_shift=0) 1/par[2]*(par[1]+(par[2]-par[1])*(1/(1+exp(-par[6]*(timetransf(x,log_shift,lin_shift)-timetransf(par[4],log_shift,lin_shift))))))* (par[3]+(par[2]-par[3])*(1/(1+exp(par[6]*(timetransf(x,log_shift,lin_shift)-timetransf(par[5],log_shift,lin_shift)))))) ############################# ## SIMULATION FUNCTION ###### ############################# rxnrateMatureRNA <- function(t,c,parms){ alpha <- parms$alpha beta <- parms$beta gamma <- parms$gamma r=rep(0,length(c)) r[1] <- alpha(t) - gamma(t) * c["p"] r[2] <- gamma(t) * c["p"] - beta(t) * c["m"] return(list(r)) } deterministic_simulation <- function(simulation_time, k1_function, k2_function, k3_function, k1_params, k2_params, k3_params ) # in this version of the function, # all three rates are modeled as impulse models { tpts <- simulation_time params <- list( alpha = function(x) switch(k1_function, "Constant" = constantModel(x, k1_params), "Sigmoidal" = sigmoidModel(x, k1_params), "Impulsive" = impulseModel(x, k1_params) ) , beta = function(x) switch(k3_function, "Constant" = constantModel(x, k3_params), "Sigmoidal" = sigmoidModel(x, k3_params), "Impulsive" = impulseModel(x, k3_params) ) , gamma = function(x) switch(k2_function, "Constant" = constantModel(x, k2_params), "Sigmoidal" = sigmoidModel(x, k2_params), "Impulsive" = impulseModel(x, k2_params) ) ) cinit <- c(params$alpha(tpts[1]) / params$gamma(tpts[1]) , params$alpha(tpts[1]) / params$beta(tpts[1])) names(cinit) <- c('p', 'm') model <- ode(y=cinit, times=tpts, func=rxnrateMatureRNA, parms=params) #model[,2:3] <- t(t(model[,2:3])/model[1,2:3]) synthesis <- params$alpha(tpts) processing <- params$gamma(tpts) degradation <- params$beta(tpts) model <- data.frame( time=model[,1] , k1=synthesis , k2=processing , k3=degradation , p=model[,2] , m=model[,3] ) return(model) } derivative_solution <- function(simulation_time, gene_class, k1_function, k2_function, k3_function, k1_params, k2_params, k3_params ) # in this version of the function, # all three rates are modeled as impulse models { tpts <- simulation_time model <- data.frame( time = tpts, k1 = switch(gene_class, "KKK"=k1KKK_Der(tpts, c(k1_params, k2_params, k3_params)), "VKK"=k1VKK_Der(tpts, c(k1_params, k2_params, k3_params)), "KVK"=k1KVK_Der_App(tpts, c(k1_params, k2_params, k3_params)), "KKV"=k1KKV_Der_App(tpts, c(k1_params, k2_params, k3_params)), "VVK"=k1VVK_Der(tpts, c(k1_params, k2_params, k3_params)), "VKV"=k1VKV_Der(tpts, c(k1_params, k2_params, k3_params)), "KVV"=k1KVV_Der_App(tpts, c(k1_params, k2_params, k3_params)), "VVV"=k1VVV_Der(tpts, c(k1_params, k2_params, k3_params)) ) , k2 = switch(k2_function, "Constant" = constantModel(tpts, k2_params), "Sigmoidal" = sigmoidModel(tpts, k2_params), "Impulsive" = impulseModel(tpts, k2_params) ) , k3 = switch(k3_function, "Constant" = constantModel(tpts, k3_params), "Sigmoidal" = sigmoidModel(tpts, k3_params), "Impulsive" = impulseModel(tpts, k3_params) ) , p = switch(gene_class, "KKK"=prematureKKK_Der(tpts, c(k1_params, k2_params, k3_params)), "VKK"=prematureVKK_Der(tpts, c(k1_params, k2_params, k3_params)), "KVK"=prematureKVK_Der_App(tpts, c(k1_params, k2_params, k3_params)), "KKV"=prematureKKV_Der_App(tpts, c(k1_params, k2_params, k3_params)), "VVK"=prematureVVK_Der(tpts, c(k1_params, k2_params, k3_params)), "VKV"=prematureVKV_Der(tpts, c(k1_params, k2_params, k3_params)), "KVV"=prematureKVV_Der_App(tpts, c(k1_params, k2_params, k3_params)), "VVV"=prematureVVV_Der(tpts, c(k1_params, k2_params, k3_params)) ) , m = switch(k1_function, "Constant" = constantModel(tpts, k1_params), "Sigmoidal" = sigmoidModel(tpts, k1_params), "Impulsive" = impulseModel(tpts, k1_params) ) ) return(model) } derivative_solution_no_nascent <- function(simulation_time, gene_class, k1_function, k2_function, k3_function, k1_params, k2_params, k3_params ) # in this version of the function, # all three rates are modeled as impulse models { tpts <- simulation_time model <- data.frame( time = tpts, k1 = switch(gene_class, "KKK"=k1KKK_Der(tpts, c(k1_params, k2_params, k3_params)), "VKK"=k1VKK_Der(tpts, c(k1_params, k2_params, k3_params)), "KVK"=k1KVK_Der(tpts, c(k1_params, k2_params, k3_params)), "KKV"=k1KKV_Der(tpts, c(k1_params, k2_params, k3_params)), "VVK"=k1VVK_Der(tpts, c(k1_params, k2_params, k3_params)), "VKV"=k1VKV_Der(tpts, c(k1_params, k2_params, k3_params)), "KVV"=k1KVV_Der(tpts, c(k1_params, k2_params, k3_params)), "VVV"=k1VVV_Der(tpts, c(k1_params, k2_params, k3_params)) ) , k2 = switch(gene_class, "KKK"=k2KKK_Der(tpts, c(k1_params, k2_params, k3_params)), "VKK"=k2VKK_Der(tpts, c(k1_params, k2_params, k3_params)), "KVK"=k2KVK_Der(tpts, c(k1_params, k2_params, k3_params)), "KKV"=k2KKV_Der(tpts, c(k1_params, k2_params, k3_params)), "VVK"=k2VVK_Der(tpts, c(k1_params, k2_params, k3_params)), "VKV"=k2VKV_Der(tpts, c(k1_params, k2_params, k3_params)), "KVV"=k2KVV_Der(tpts, c(k1_params, k2_params, k3_params)), "VVV"=k2VVV_Der(tpts, c(k1_params, k2_params, k3_params)) ) , k3 = switch(gene_class, "KKK"=k3KKK_Der(tpts, c(k1_params, k2_params, k3_params)), "VKK"=k3VKK_Der(tpts, c(k1_params, k2_params, k3_params)), "KVK"=k3KVK_Der(tpts, c(k1_params, k2_params, k3_params)), "KKV"=k3KKV_Der(tpts, c(k1_params, k2_params, k3_params)), "VVK"=k3VVK_Der(tpts, c(k1_params, k2_params, k3_params)), "VKV"=k3VKV_Der(tpts, c(k1_params, k2_params, k3_params)), "KVV"=k3KVV_Der(tpts, c(k1_params, k2_params, k3_params)), "VVV"=k3VVV_Der(tpts, c(k1_params, k2_params, k3_params)) ) , p = switch(gene_class, "KKK"=prematureKKK_Der(tpts, c(k1_params, k2_params, k3_params)), "VKK"=prematureVKK_Der(tpts, c(k1_params, k2_params, k3_params)), "KVK"=prematureKVK_Der(tpts, c(k1_params, k2_params, k3_params)), "KKV"=prematureKKV_Der(tpts, c(k1_params, k2_params, k3_params)), "VVK"=prematureVVK_Der(tpts, c(k1_params, k2_params, k3_params)), "VKV"=prematureVKV_Der(tpts, c(k1_params, k2_params, k3_params)), "KVV"=prematureKVV_Der(tpts, c(k1_params, k2_params, k3_params)), "VVV"=prematureVVV_Der(tpts, c(k1_params, k2_params, k3_params)) ) , m = switch(gene_class, "KKK"=matureKKK_Der(tpts, c(k1_params, k2_params, k3_params)), "VKK"=matureVKK_Der(tpts, c(k1_params, k2_params, k3_params)), "KVK"=matureKVK_Der(tpts, c(k1_params, k2_params, k3_params)), "KKV"=matureKKV_Der(tpts, c(k1_params, k2_params, k3_params)), "VVK"=matureVVK_Der(tpts, c(k1_params, k2_params, k3_params)), "VKV"=matureVKV_Der(tpts, c(k1_params, k2_params, k3_params)), "KVV"=matureKVV_Der(tpts, c(k1_params, k2_params, k3_params)), "VVV"=matureVVV_Der(tpts, c(k1_params, k2_params, k3_params)) ) ) return(model) } smoothModel <- function(tpts, experiment, nInit=10, nIter=500, seed=1234) { if( !is.null(seed) ) set.seed(seed) optimFailOut <- function(e) list(par=NA, value=NA, counts=NA, convergence=1, message=e) im.parguess <- function(tpts , values, log_shift ) { # tpts <- timetransf(tpts, log_shift) ntp <- length(tpts) peaks <- which(diff(sign(diff(values)))!=0)+1 if( length(peaks) == 1 ) peak <- peaks if( length(peaks) > 1 ) peak <- sample(peaks, 1) if( length(peaks) == 0 ) peak <- round(length(tpts)/2) # initial_values <- runif( 1, min=min(values[1:3]) , max=max(values[1:3])) intermediate_values <- values[peak] if( intermediate_values==0 ) intermediate_values <- mean(values[seq(peak-1,peak+1)]) end_values <- runif( 1, min=min(values[(ntp-2):ntp]) , max=max(values[(ntp-2):ntp])) time_of_first_response <- tpts[peak-1] time_of_second_response <- tpts[peak+1] slope_of_response <- diff(range(tpts)) / (time_of_second_response-time_of_first_response) # return(c(h0=initial_values, h1=intermediate_values , h2=end_values, t1=time_of_first_response , t2=time_of_second_response, b=slope_of_response)) } im.chisq <- function(par, tpts, experiment, log_shift) { model <- impulseModelRNApp(tpts, par, log_shift) chisqFunction(experiment, model, 1) } log_shift <- findttpar(tpts) outIM <- sapply(1:nInit, function(x) suppressWarnings(tryCatch(optim( par=im.parguess(tpts, experiment, log_shift) , fn=im.chisq, tpts=tpts , experiment=experiment , log_shift=log_shift , control=list(maxit=nIter) ), error=function(e) optimFailOut(e)))) bestIM <- which.min(unlist(outIM[2,])) impulseModelRNApp( tpts, outIM[,bestIM]$par, log_shift) } chisqFunction <- function(experiment, model, variance=NULL) { if( is.null(variance)) variance <- stats::var(experiment) sum((experiment - model )^2/variance ) } logLikelihoodFunction <- function(experiment, model, variance=NULL) { if( is.null(variance)) variance <- stats::var(experiment) sum(log(2*pnorm(-abs(experiment-model),mean=0,sd=sqrt(variance)))) } ############################### ## MINIMIZATION FUNCTION ###### ############################### modelChisqMatureRNA <- function(par, tpts, fun, df, alpha_exp, alpha_var #, pval , mature_exp, mature_var, preMRNA_exp, preMRNA_var #, log_shift, lin_shift , no_nascent) { splitpar <- split(par , c(rep('alpha',df['alpha']), rep('beta',df['beta']), rep('gamma',df['gamma'])) ) # params <- list() params$alpha <- function(x) fun$alpha$value(x, splitpar$alpha)#, log_shift, lin_shift) params$beta <- function(x) fun$beta$value(x, splitpar$beta)#, log_shift, lin_shift) params$gamma <- function(x) fun$gamma$value(x, splitpar$gamma)#, log_shift, lin_shift) # cinit <- c(params$alpha(tpts[1]) / params$gamma(tpts[1]) , params$alpha(tpts[1]) / params$beta(tpts[1])) names(cinit) <- c('p', 'm') model <- ode(y=cinit, times=tpts, func=rxnrateMatureRNA, parms=params) # alpha_model <- params$alpha(tpts) matureodel <- model[,'m'] preMRNA_model <- model[,'p'] # ifelse(no_nascent, 0, chisqFunction(alpha_exp, alpha_model, alpha_var)) + chisqFunction(mature_exp, matureodel, mature_var) + chisqFunction(preMRNA_exp, preMRNA_model, preMRNA_var) } optimParamsMatureRNA <- function(interpRates, tpts_exp, alpha_exp, alpha_var, mature_exp , mature_var, preMRNA_exp, preMRNA_var, maxit=500, method='NM' #, log_shift, lin_shift , no_nascent, mod_method) { if( method == 'NM' ) method <- 'Nelder-Mead' if( mod_method == 'int') { tryCatch({ optOut <- optim( par=c(interpRates$alpha$params, interpRates$beta$params, interpRates$gamma$params) , fn=modelChisqMatureRNA , tpts=tpts_exp , fun=sapply(interpRates, '[[', 'fun') , df=unlist(sapply(interpRates, '[[', 'df')) , alpha_exp=alpha_exp, mature_exp=mature_exp , preMRNA_exp=preMRNA_exp , alpha_var=alpha_var, mature_var=mature_var , preMRNA_var=preMRNA_var # , log_shift = log_shift # , lin_shift = lin_shift , no_nascent = no_nascent , control = list(maxit = maxit) , method = method ) splitpar <- split(optOut$par , c(rep('alpha',interpRates$alpha$df) , rep('beta',interpRates$beta$df) , rep('gamma',interpRates$gamma$df)) ) interpRates$alpha$params <- splitpar$alpha interpRates$beta$params <- splitpar$beta interpRates$gamma$params <- splitpar$gamma return(list( alpha=interpRates$alpha , beta=interpRates$beta , gamma=interpRates$gamma , counts=optOut$counts[1] , convergence=optOut$convergence , message=optOut$message ))} , error=function(e) { print(e) return(list( alpha=interpRates$alpha , beta=interpRates$beta , gamma=interpRates$gamma , counts=0 , convergence=1 , message=e )) }) } else { ## derivative modeling tryCatch({ k1_function <- interpRates[['alpha']]$type k2_function <- interpRates[['gamma']]$type k3_function <- interpRates[['beta']]$type gene_class <- paste0( switch(k1_function, "constant"="K", "sigmoid"="V", "impulse"="V"), switch(k2_function, "constant"="K", "sigmoid"="V", "impulse"="V"), switch(k3_function, "constant"="K", "sigmoid"="V", "impulse"="V") ) if(gene_class=='KKK') { ## KKK error function requires less arguments optOut <- optim(unname(unlist(lapply(interpRates, '[[', 'params'))) , errorKKK_Der , tpts = tpts_exp , premature = preMRNA_exp , mature = mature_exp , alpha = alpha_exp , prematureVariance = preMRNA_var , matureVariance = mature_var , alphaVariance = alpha_var , control = list(maxit = maxit * 1000) , method = method ) } else { error_fun_Der <- switch(gene_class, "VKK"=errorVKK_Der, "KVK"=errorKVK_Der_App, "KKV"=errorKKV_Der_App, "VVK"=errorVVK_Der, "VKV"=errorVKV_Der, "KVV"=errorKVV_Der_App, "VVV"=errorVVV_Der ) if( no_nascent ) { ## in case of no nascent optimization the native error function (not the _App) should ## be used also for K-- models and KKK, initialChisquare and initialDistances should ## be set. In order to skip them, all their values are set to 1 and initialPenalityRelevance to 0 optOut <- optim(unname(unlist(lapply(interpRates, '[[', 'params'))) , error_fun_Der , tpts = tpts_exp , premature = preMRNA_exp , mature = mature_exp , alpha = alpha_exp , prematureVariance = preMRNA_var , matureVariance = mature_var , alphaVariance = alpha_var , KKK = c(1,1,1) , initialChisquare = 1 , initialDistances = 1 , initialPenalityRelevance = 0 , derivativePenalityRelevance = 10^-50 , clean = FALSE , control = list(maxit = maxit * 1000)) } else { # with nascent optOut <- optim(unname(unlist(lapply(interpRates, '[[', 'params'))) , error_fun_Der , tpts = tpts_exp , premature = preMRNA_exp , mature = mature_exp , alpha = alpha_exp , prematureVariance = preMRNA_var , matureVariance = mature_var , alphaVariance = alpha_var , KKK = NULL , initialChisquare = NULL , initialDistances = NULL , initialPenalityRelevance = 1 , derivativePenalityRelevance = 10^-50 , clean = FALSE , control = list(maxit = maxit * 1000)) } } splitpar <- split(optOut$par , c(rep('alpha',interpRates$alpha$df) , rep('gamma',interpRates$gamma$df) , rep('beta',interpRates$beta$df) ) ) interpRates$alpha$params <- splitpar$alpha interpRates$beta$params <- splitpar$beta interpRates$gamma$params <- splitpar$gamma return(list( alpha=interpRates$alpha , beta=interpRates$beta , gamma=interpRates$gamma , counts=optOut$counts[1] , convergence=optOut$convergence , message=optOut$message ))} , error=function(e) { print(e) return(list( alpha=interpRates$alpha , beta=interpRates$beta , gamma=interpRates$gamma , counts=0 , convergence=1 , message=e )) }) } } ##################################### ########## confidence intervals ############## ######################################## compute_ci_Integrative_Nascent <- function(parameters, tpts, model_tpts = tpts, classTmp, experimentalP, experimentalM, experimentalA, varianceP, varianceM, varianceA, confidenceThreshold ) { if( is.null(names(parameters)) ) { names(parameters) <- as.character(seq_along(parameters)) } foe <- capture.output({ # Just to capture the output of multiroot function suppressWarnings({ intervals <- sapply(names(parameters),function(parname) { par <- parameters[parname] mOut = list( left_1 = tryCatch(multiroot(f = logLikelihoodCIerror, start = 1e-2*par, name = parname, parameters = parameters, class = classTmp, tpts = tpts, experimentalP = experimentalP, experimentalM = experimentalM, experimentalA = experimentalA, varianceP = varianceP, varianceM = varianceM, varianceA = varianceA, confidenceThreshold = confidenceThreshold, derivative = FALSE),error=function(e)return(emptyList)), left_2 = tryCatch(multiroot(f = logLikelihoodCIerror, start = 1/2*par, name = parname, parameters = parameters, class = classTmp, tpts = tpts, experimentalP = experimentalP, experimentalM = experimentalM, experimentalA = experimentalA, varianceP = varianceP, varianceM = varianceM, varianceA = varianceA, confidenceThreshold = confidenceThreshold, derivative = FALSE),error=function(e)return(emptyList)), center = tryCatch(multiroot(f = logLikelihoodCIerror, start = par, name = parname, parameters = parameters, class = classTmp, tpts = tpts, experimentalP = experimentalP, experimentalM = experimentalM, experimentalA = experimentalA, varianceP = varianceP, varianceM = varianceM, varianceA = varianceA, confidenceThreshold = confidenceThreshold, derivative = FALSE),error=function(e)return(emptyList)), right_1 = tryCatch(multiroot(f = logLikelihoodCIerror, start = 1.5*par, name = parname, parameters = parameters, class = classTmp, tpts = tpts, experimentalP = experimentalP, experimentalM = experimentalM, experimentalA = experimentalA, varianceP = varianceP, varianceM = varianceM, varianceA = varianceA, confidenceThreshold = confidenceThreshold, derivative = FALSE),error=function(e)return(emptyList)), right_2 = tryCatch(multiroot(f = logLikelihoodCIerror, start = 1e2*par, name = parname, parameters = parameters, class = classTmp, tpts = tpts, experimentalP = experimentalP, experimentalM = experimentalM, experimentalA = experimentalA, varianceP = varianceP, varianceM = varianceM, varianceA = varianceA, confidenceThreshold = confidenceThreshold, derivative = FALSE),error=function(e)return(emptyList)) ) precis = sapply(mOut, '[[', 'f.root') if( length(which(precis<1e-2))>0 ) { conf_int = sapply(mOut[which(precis<1e-2)], '[[', 'root') low_int = min(conf_int) high_int = max(conf_int) left = ifelse( low_int < par, low_int, NA) right = ifelse( high_int > par, high_int, NA) left = unname(left) right = unname(right) } else { left = NA right = NA } return(c(left,right)) }) intervals[1,!is.finite(intervals[2,])] <- NaN intervals[2,!is.finite(intervals[1,])] <- NaN }) }) perturbedRates <- matrix(rep(NaN,3*length(model_tpts)),ncol=1) for(parname in names(parameters)) { for(extremePar in intervals[,parname]) { perturbedParameters <- parameters perturbedParameters[parname] <- extremePar perturbedRates <- cbind(perturbedRates,rates_integrativeModels(tpts=model_tpts, class=classTmp, parameters=perturbedParameters)) } };perturbedRates <- perturbedRates[,-1] perturbedRates[perturbedRates<0] <- 0 optTmp <- rates_integrativeModels(tpts=model_tpts, class=classTmp, parameters=parameters) k1left <- apply(perturbedRates[grep("alpha",rownames(perturbedRates)),],1,min,na.rm=TRUE) k1TC <- optTmp[grep("alpha",names(optTmp))] k1right <- apply(perturbedRates[grep("alpha",rownames(perturbedRates)),],1,max,na.rm=TRUE) k2left <- apply(perturbedRates[grep("gamma",rownames(perturbedRates)),],1,min,na.rm=TRUE) k2TC <- optTmp[grep("gamma",names(optTmp))] k2right <- apply(perturbedRates[grep("gamma",rownames(perturbedRates)),],1,max,na.rm=TRUE) k3left <- apply(perturbedRates[grep("beta",rownames(perturbedRates)),],1,min,na.rm=TRUE) k3TC <- optTmp[grep("beta",names(optTmp))] k3right <- apply(perturbedRates[grep("beta",rownames(perturbedRates)),],1,max,na.rm=TRUE) return(list( k1 = cbind(left=k1left, opt=k1TC, right=k1right), k2 = cbind(left=k2left, opt=k2TC, right=k2right), k3 = cbind(left=k3left, opt=k3TC, right=k3right) )) } compute_ci_Derivative_Nascent <- function(parameters, tpts, model_tpts = tpts, classTmp, experimentalP, experimentalM, experimentalA, varianceP, varianceM, varianceA, confidenceThreshold ) { if( is.null(names(parameters)) ) { names(parameters) <- as.character(seq_along(parameters)) } foe <- capture.output({ # Just to capture the output of multiroot function suppressWarnings({ intervals <- sapply(names(parameters),function(parname) { par <- parameters[parname] mOut = list( left_1 = tryCatch(multiroot(f = logLikelihoodCIerror, start = 1e-2*par, name = parname, parameters = parameters, class = classTmp, tpts = tpts, experimentalP = experimentalP, experimentalM = experimentalM, experimentalA = experimentalA, varianceP = varianceP, varianceM = varianceM, varianceA = varianceA, confidenceThreshold = confidenceThreshold, derivative = TRUE, app=TRUE),error=function(e)return(emptyList)), left_2 = tryCatch(multiroot(f = logLikelihoodCIerror, start = 1/2*par, name = parname, parameters = parameters, class = classTmp, tpts = tpts, experimentalP = experimentalP, experimentalM = experimentalM, experimentalA = experimentalA, varianceP = varianceP, varianceM = varianceM, varianceA = varianceA, confidenceThreshold = confidenceThreshold, derivative = TRUE, app=TRUE),error=function(e)return(emptyList)), center = tryCatch(multiroot(f = logLikelihoodCIerror, start = par, name = parname, parameters = parameters, class = classTmp, tpts = tpts, experimentalP = experimentalP, experimentalM = experimentalM, experimentalA = experimentalA, varianceP = varianceP, varianceM = varianceM, varianceA = varianceA, confidenceThreshold = confidenceThreshold, derivative = TRUE, app=TRUE),error=function(e)return(emptyList)), right_1 = tryCatch(multiroot(f = logLikelihoodCIerror, start = 1.5*par, name = parname, parameters = parameters, class = classTmp, tpts = tpts, experimentalP = experimentalP, experimentalM = experimentalM, experimentalA = experimentalA, varianceP = varianceP, varianceM = varianceM, varianceA = varianceA, confidenceThreshold = confidenceThreshold, derivative = TRUE, app=TRUE),error=function(e)return(emptyList)), right_2 = tryCatch(multiroot(f = logLikelihoodCIerror, start = 1e2*par, name = parname, parameters = parameters, class = classTmp, tpts = tpts, experimentalP = experimentalP, experimentalM = experimentalM, experimentalA = experimentalA, varianceP = varianceP, varianceM = varianceM, varianceA = varianceA, confidenceThreshold = confidenceThreshold, derivative = TRUE, app=TRUE),error=function(e)return(emptyList)) ) precis = sapply(mOut, '[[', 'f.root') if( length(which(precis<1e-2))>0 ) { conf_int = sapply(mOut[which(precis<1e-2)], '[[', 'root') low_int = min(conf_int) high_int = max(conf_int) left = ifelse( low_int < par, low_int, NA) right = ifelse( high_int > par, high_int, NA) left = unname(left) right = unname(right) } else { left = NA right = NA } return(c(left,right)) }) intervals[1,!is.finite(intervals[2,])] <- NaN intervals[2,!is.finite(intervals[1,])] <- NaN }) }) optTmp <- rates_derivativeModels(tpts=model_tpts, class=classTmp, parameters=parameters, app=TRUE) perturbedRates <- matrix(rep(NaN,3*length(model_tpts)),ncol=1) for(parname in names(parameters)) { for(extremePar in intervals[,parname]) { perturbedParameters <- parameters perturbedParameters[parname] <- extremePar perturbedRates <- cbind(perturbedRates,rates_derivativeModels(tpts=model_tpts, class=classTmp, parameters=perturbedParameters, app=TRUE)) } };perturbedRates <- perturbedRates[,-1] perturbedRates[perturbedRates<0] <- 0 k1left <- apply(perturbedRates[grep("alpha",rownames(perturbedRates)),],1,min,na.rm=TRUE) k1TC <- optTmp[grep("alpha",names(optTmp))] k1right <- apply(perturbedRates[grep("alpha",rownames(perturbedRates)),],1,max,na.rm=TRUE) k2left <- apply(perturbedRates[grep("gamma",rownames(perturbedRates)),],1,min,na.rm=TRUE) k2TC <- optTmp[grep("gamma",names(optTmp))] k2right <- apply(perturbedRates[grep("gamma",rownames(perturbedRates)),],1,max,na.rm=TRUE) k3left <- apply(perturbedRates[grep("beta",rownames(perturbedRates)),],1,min,na.rm=TRUE) k3TC <- optTmp[grep("beta",names(optTmp))] k3right <- apply(perturbedRates[grep("beta",rownames(perturbedRates)),],1,max,na.rm=TRUE) confidenceIntervals <- list( k1 = cbind(left=k1left, opt=k1TC, right=k1right), k2 = cbind(left=k2left, opt=k2TC, right=k2right), k3 = cbind(left=k3left, opt=k3TC, right=k3right) ) for(r in 1:3) { confidenceIntervals[[r]] <- t(apply(confidenceIntervals[[r]],1,function(row) { if((!is.finite(row[1])|row[1]==row[2])&(is.finite(row[3])&row[3]!=row[2])) row[1] <- row[2] - (row[3]-row[2]) if((!is.finite(row[3])|row[3]==row[2])&(is.finite(row[1])&row[1]!=row[2])) row[3] <- row[2] + (row[2]-row[1]) row })) } k1_low <- median(abs(confidenceIntervals[[1]][,2] - confidenceIntervals[[1]][,1])/confidenceIntervals[[1]][,1],na.rm=TRUE) k1_high <- median(abs(confidenceIntervals[[1]][,3] - confidenceIntervals[[1]][,1])/confidenceIntervals[[1]][,1],na.rm=TRUE) k2_low <- median(abs(confidenceIntervals[[2]][,2] - confidenceIntervals[[2]][,1])/confidenceIntervals[[2]][,1],na.rm=TRUE) k2_high <- median(abs(confidenceIntervals[[2]][,3] - confidenceIntervals[[2]][,1])/confidenceIntervals[[2]][,1],na.rm=TRUE) k3_low <- median(abs(confidenceIntervals[[3]][,2] - confidenceIntervals[[3]][,1])/confidenceIntervals[[3]][,1],na.rm=TRUE) k3_high <- median(abs(confidenceIntervals[[3]][,3] - confidenceIntervals[[3]][,1])/confidenceIntervals[[3]][,1],na.rm=TRUE) ### Possible for very few genes # if(k1_low==0)k1_low <- mean(abs(confidenceIntervals[[1]][,2] - confidenceIntervals[[1]][,1])/confidenceIntervals[[1]][,1],na.rm=TRUE) if(k1_high==0)k1_high <- mean(abs(confidenceIntervals[[1]][,3] - confidenceIntervals[[1]][,1])/confidenceIntervals[[1]][,1],na.rm=TRUE) if(k2_low==0)k2_low <- mean(abs(confidenceIntervals[[2]][,2] - confidenceIntervals[[2]][,1])/confidenceIntervals[[2]][,1],na.rm=TRUE) if(k2_high==0)k2_high <- mean(abs(confidenceIntervals[[2]][,3] - confidenceIntervals[[2]][,1])/confidenceIntervals[[2]][,1],na.rm=TRUE) if(k3_low==0)k3_low <- mean(abs(confidenceIntervals[[3]][,2] - confidenceIntervals[[3]][,1])/confidenceIntervals[[3]][,1],na.rm=TRUE) if(k3_high==0)k3_high <- mean(abs(confidenceIntervals[[3]][,3] - confidenceIntervals[[3]][,1])/confidenceIntervals[[3]][,1],na.rm=TRUE) median_low <- c(k1=k1_low,k2=k2_low,k3=k3_low) median_high <- c(k1=k1_high,k2=k2_high,k3=k3_high) for(r in 1:3) { confidenceIntervals[[r]] <- t(apply(confidenceIntervals[[r]],1,function(row) { if(is.finite(row[2])) { if(row[1]==row[2] & row[1]==row[3]) {row[1] <- row[2]*(1-median_low[[r]]); row[3] <- row[2]*(1+median_high[[r]])} } row })) } return(confidenceIntervals) } ## functions for solve the derivative system specific for the app (always centered on mature RNA) ##### KVK k1KVK_Der_App <- function(x, parameters) { if(length(parameters)==8) { matureParameters <- parameters[1] k2Parameters <- parameters[2:7] k3Parameters <- parameters[8] return( ( k3Parameters * matureParameters ) * ( 1 + .DimpulseModel(x, k2Parameters) / impulseModel(x, k2Parameters)^2 ) ) }else{ matureParameters <- parameters[1] k2Parameters <- parameters[2:5] k3Parameters <- parameters[6] return( ( k3Parameters * matureParameters ) * ( 1 + .DsigmoidModel(x, k2Parameters) / sigmoidModel(x, k2Parameters)^2 ) ) } } k2KVK_Der_App <- function(x, parameters) { if(length(parameters)==8) { matureParameters <- parameters[1] k2Parameters <- parameters[2:7] k3Parameters <- parameters[8] return( impulseModel(x, k2Parameters) ) }else{ matureParameters <- parameters[1] k2Parameters <- parameters[2:5] k3Parameters <- parameters[6] return( sigmoidModel(x, k2Parameters) ) } } k3KVK_Der_App <- function(x, parameters) { if(length(parameters)==8) { matureParameters <- parameters[1] k2Parameters <- parameters[2:7] k3Parameters <- parameters[8] return( rep(k3Parameters, length(tpts)) ) }else{ matureParameters <- parameters[1] k2Parameters <- parameters[2:5] k3Parameters <- parameters[6] return( rep(k3Parameters, length(tpts)) ) } } prematureKVK_Der_App <- function(x, parameters) { if(length(parameters)==8) { matureParameters <- parameters[1] k2Parameters <- parameters[2:7] k3Parameters <- parameters[8] return( ( k3Parameters * matureParameters ) / impulseModel(x, k2Parameters) ) }else{ matureParameters <- parameters[1] k2Parameters <- parameters[2:5] k3Parameters <- parameters[6] return( ( k3Parameters * matureParameters ) / sigmoidModel(x, k2Parameters) ) } } errorKVK_Der_App <- function(parameters, tpts , premature, mature, alpha , prematureVariance, matureVariance, alphaVariance , KKK = NULL , initialChisquare = NULL , initialDistances = NULL , initialPenalityRelevance = 1 , derivativePenalityRelevance = 10^-50 , clean) { if(length(parameters)==8) { matureParameters <- parameters[1] k2Parameters <- parameters[2:7] k3Parameters <- parameters[8] D0_M <- 0 D0_k2 <- .DimpulseModel(0,k2Parameters) D0_k3 <- 0 D0_P <- k3Parameters * matureParameters * .DimpulseModel(0, k2Parameters) / impulseModel(0, k2Parameters)^2 } else { matureParameters <- parameters[1] k2Parameters <- parameters[2:5] k3Parameters <- parameters[6] D0_M <- 0 D0_k2 <- .DsigmoidModel(0,k2Parameters) D0_k3 <- 0 D0_P <- k3Parameters * matureParameters * .DsigmoidModel(0, k2Parameters) / sigmoidModel(0, k2Parameters)^2 } matureEstimated <- rep(matureParameters, length(tpts)) prematureEstimated <- prematureKVK_Der_App(x = tpts, parameters = parameters) alphaEstimated <- k1KVK_Der_App(x = tpts, parameters = parameters) alphaEstimated[alphaEstimated<0] <- NaN prematureEstimated[prematureEstimated<0] <- NaN matureEstimated[matureEstimated<0] <- NaN if(any(!is.finite(alphaEstimated)) | any(!is.finite(prematureEstimated)) | any(!is.finite(matureEstimated)) | !is.finite(D0_M) | !is.finite(D0_k2) | !is.finite(D0_k3) | !is.finite(D0_P) ) return(NaN) prematureChiSquare <- sum((premature - prematureEstimated )^2/prematureVariance) matureChiSquare <- sum((mature - matureEstimated)^2/matureVariance) if(is.null(KKK)&is.null(initialChisquare)&is.null(initialDistances)&!is.null(alpha)&!is.null(alphaVariance)) { alphaChiSquare <- sum((alpha - alphaEstimated)^2/alphaVariance) initialPenality <- 0 }else{ # stop('errorKVK_Der_App: KKK version not implemented') if(clean){initialPenality <- 0}else{ initialPenality <- initialPenalityRelevance*(initialChisquare/initialDistances)*((k1KKK_Der(0,KKK)-k1KVK_Der(0,parameters))^2 + (k2KKK_Der(0,KKK)-k2KVK_Der(0,parameters))^2 + (k3KKK_Der(0,KKK)-k3KVK_Der(0,parameters))^2) } alphaChiSquare <- 0 } chiSquare <- sum(c(prematureChiSquare,matureChiSquare,alphaChiSquare)) penalty <- abs(D0_M)+abs(D0_P)+abs(D0_k2)+abs(D0_k3) if(penalty <= chiSquare*derivativePenalityRelevance){penalty <- 0} if(clean){return(chiSquare)}else{return(chiSquare+penalty+initialPenality)} } ###### KKV k1KKV_Der_App <- function(x, parameters) { if(length(parameters)==8) { matureParameters <- parameters[1] k2Parameters <- parameters[2] k3Parameters <- parameters[3:8] return( matureParameters * ( .DimpulseModel(x, k3Parameters) / k2Parameters + impulseModel(x, k3Parameters) ) ) }else{ matureParameters <- parameters[1] k2Parameters <- parameters[2] k3Parameters <- parameters[3:6] return( matureParameters * ( .DsigmoidModel(x, k3Parameters) / k2Parameters + sigmoidModel(x, k3Parameters) ) ) } } k2KKV_Der_App <- function(x, parameters) { if(length(parameters)==8) { matureParameters <- parameters[1] k2Parameters <- parameters[2] k3Parameters <- parameters[3:8] return( rep(k2Parameters, length(tpts)) ) }else{ matureParameters <- parameters[1] k2Parameters <- parameters[2] k3Parameters <- parameters[3:6] return( rep(k2Parameters, length(tpts)) ) } } k3KKV_Der_App <- function(x, parameters) { if(length(parameters)==8) { matureParameters <- parameters[1] k2Parameters <- parameters[2] k3Parameters <- parameters[3:8] return( impulseModel(x, k3Parameters) ) }else{ matureParameters <- parameters[1] k2Parameters <- parameters[2] k3Parameters <- parameters[3:6] return( sigmoidModel(x, k3Parameters) ) } } prematureKKV_Der_App <- function(x, parameters) { if(length(parameters)==8) { matureParameters <- parameters[1] k2Parameters <- parameters[2] k3Parameters <- parameters[3:8] return( ( impulseModel(x, k3Parameters) * matureParameters ) / k2Parameters ) }else{ matureParameters <- parameters[1] k2Parameters <- parameters[2] k3Parameters <- parameters[3:6] return( ( sigmoidModel(x, k3Parameters) * matureParameters ) / k2Parameters ) } } errorKKV_Der_App <- function(parameters, tpts , premature, mature, alpha , prematureVariance, matureVariance, alphaVariance , KKK = NULL , initialChisquare = NULL , initialDistances = NULL , initialPenalityRelevance = 1 , derivativePenalityRelevance = 10^-50 , clean) { if(length(parameters)==8) { matureParameters <- parameters[1] k2Parameters <- parameters[2] k3Parameters <- parameters[3:8] D0_M <- 0 D0_k2 <- 0 D0_k3 <- .DimpulseModel(0,k3Parameters) D0_P <- .DimpulseModel(0, k3Parameters) * matureParameters / k2Parameters } else { matureParameters <- parameters[1] k2Parameters <- parameters[2] k3Parameters <- parameters[3:6] D0_M <- 0 D0_k2 <- 0 D0_k3 <- .DsigmoidModel(0,k3Parameters) D0_P <- .DsigmoidModel(0, k3Parameters) * matureParameters / k2Parameters } matureEstimated <- rep(matureParameters, length(tpts)) prematureEstimated <- prematureKKV_Der_App(x = tpts, parameters = parameters) alphaEstimated <- k1KKV_Der_App(x = tpts, parameters = parameters) alphaEstimated[alphaEstimated<0] <- NaN prematureEstimated[prematureEstimated<0] <- NaN matureEstimated[matureEstimated<0] <- NaN if(any(!is.finite(alphaEstimated)) | any(!is.finite(prematureEstimated)) | any(!is.finite(matureEstimated)) | !is.finite(D0_M) | !is.finite(D0_k2) | !is.finite(D0_k3) | !is.finite(D0_P) ) return(NaN) prematureChiSquare <- sum((premature - prematureEstimated )^2/prematureVariance) matureChiSquare <- sum((mature - matureEstimated)^2/matureVariance) if(is.null(KKK)&is.null(initialChisquare)&is.null(initialDistances)&!is.null(alpha)&!is.null(alphaVariance)) { alphaChiSquare <- sum((alpha - alphaEstimated)^2/alphaVariance) initialPenality <- 0 }else{ # stop('errorKKV_Der_App: KKK version not implemented') if(clean){initialPenality <- 0}else{ initialPenality <- initialPenalityRelevance*(initialChisquare/initialDistances)*((k1KKK_Der(0,KKK)-k1KKV_Der(0,parameters))^2 + (k2KKK_Der(0,KKK)-k2KKV_Der(0,parameters))^2 + (k3KKK_Der(0,KKK)-k3KKV_Der(0,parameters))^2) } alphaChiSquare <- 0 } chiSquare <- sum(c(prematureChiSquare,matureChiSquare,alphaChiSquare)) penalty <- abs(D0_M)+abs(D0_P)+abs(D0_k2)+abs(D0_k3) if(penalty <= chiSquare*derivativePenalityRelevance){penalty <- 0} if(clean){return(chiSquare)}else{return(chiSquare+penalty+initialPenality)} } ########## KVV k1KVV_Der_App <- function(x, parameters) { if(length(parameters)==13) { matureParameters <- parameters[1] k2Parameters <- parameters[2:7] k3Parameters <- parameters[8:13] return( matureParameters * ( impulseModel(x, k3Parameters) + ( impulseModel(x, k2Parameters) * .DimpulseModel(x, k3Parameters) - impulseModel(x, k3Parameters) * .DimpulseModel(x, k2Parameters) ) / impulseModel(x, k2Parameters)^2 ) ) }else{ matureParameters <- parameters[1] k2Parameters <- parameters[2:5] k3Parameters <- parameters[6:9] return( matureParameters * ( sigmoidModel(x, k3Parameters) + ( sigmoidModel(x, k2Parameters) * .DsigmoidModel(x, k3Parameters) - sigmoidModel(x, k3Parameters) * .DsigmoidModel(x, k2Parameters) ) / sigmoidModel(x, k2Parameters)^2 ) ) } } k2KVV_Der_App <- function(x, parameters) { if(length(parameters)==13) { matureParameters <- parameters[1] k2Parameters <- parameters[2:7] k3Parameters <- parameters[8:13] return( impulseModel(x, k2Parameters) ) }else{ matureParameters <- parameters[1] k2Parameters <- parameters[2:5] k3Parameters <- parameters[6:9] return( sigmoidModel(x, k2Parameters) ) } } k3KVV_Der_App <- function(x, parameters) { if(length(parameters)==13) { matureParameters <- parameters[1] k2Parameters <- parameters[2:7] k3Parameters <- parameters[8:13] return( impulseModel(x, k3Parameters) ) }else{ matureParameters <- parameters[1] k2Parameters <- parameters[2:5] k3Parameters <- parameters[6:9] return( sigmoidModel(x, k3Parameters) ) } } prematureKVV_Der_App <- function(x, parameters) { if(length(parameters)==13) { matureParameters <- parameters[1] k2Parameters <- parameters[2:7] k3Parameters <- parameters[8:13] return( ( impulseModel(x, k3Parameters) * matureParameters ) / impulseModel(x, k2Parameters) ) }else{ matureParameters <- parameters[1] k2Parameters <- parameters[2:5] k3Parameters <- parameters[6:9] return( ( sigmoidModel(x, k3Parameters) * matureParameters ) / sigmoidModel(x, k2Parameters) ) } } errorKVV_Der_App <- function(parameters, tpts , premature, mature, alpha , prematureVariance, matureVariance, alphaVariance , KKK = NULL , initialChisquare = NULL , initialDistances = NULL , initialPenalityRelevance = 1 , derivativePenalityRelevance = 10^-50 , clean) { if(length(parameters)==13) { matureParameters <- parameters[1] k2Parameters <- parameters[2:7] k3Parameters <- parameters[8:13] D0_M <- 0 D0_k2 <- .DimpulseModel(0,k2Parameters) D0_k3 <- .DimpulseModel(0,k3Parameters) D0_P <- matureParameters * ( ( impulseModel(0, k2Parameters) * .DimpulseModel(0, k3Parameters) - impulseModel(0, k3Parameters) * .DimpulseModel(0, k2Parameters) ) / impulseModel(0, k2Parameters)^2 ) } else { matureParameters <- parameters[1] k2Parameters <- parameters[2:5] k3Parameters <- parameters[6:9] D0_M <- 0 D0_k2 <- 0 D0_k3 <- .DsigmoidModel(0,k3Parameters) D0_P <- matureParameters * ( ( sigmoidModel(0, k2Parameters) * .DsigmoidModel(0, k3Parameters) - sigmoidModel(0, k3Parameters) * .DsigmoidModel(0, k2Parameters) ) / sigmoidModel(0, k2Parameters)^2 ) } matureEstimated <- rep(matureParameters, length(tpts)) prematureEstimated <- prematureKVV_Der_App(x = tpts, parameters = parameters) alphaEstimated <- k1KVV_Der_App(x = tpts, parameters = parameters) alphaEstimated[alphaEstimated<0] <- NaN prematureEstimated[prematureEstimated<0] <- NaN matureEstimated[matureEstimated<0] <- NaN if(any(!is.finite(alphaEstimated)) | any(!is.finite(prematureEstimated)) | any(!is.finite(matureEstimated)) | !is.finite(D0_M) | !is.finite(D0_k2) | !is.finite(D0_k3) | !is.finite(D0_P) ) return(NaN) prematureChiSquare <- sum((premature - prematureEstimated )^2/prematureVariance) matureChiSquare <- sum((mature - matureEstimated)^2/matureVariance) if(is.null(KKK)&is.null(initialChisquare)&is.null(initialDistances)&!is.null(alpha)&!is.null(alphaVariance)) { alphaChiSquare <- sum((alpha - alphaEstimated)^2/alphaVariance) initialPenality <- 0 }else{ # stop('errorKVV_Der_App: KKK version not implemented') if(clean){initialPenality <- 0}else{ initialPenality <- initialPenalityRelevance*(initialChisquare/initialDistances)*((k1KKK_Der(0,KKK)-k1KVV_Der(0,parameters))^2 + (k2KKK_Der(0,KKK)-k2KVV_Der(0,parameters))^2 + (k3KKK_Der(0,KKK)-k3KVV_Der(0,parameters))^2) } alphaChiSquare <- 0 } chiSquare <- sum(c(prematureChiSquare,matureChiSquare,alphaChiSquare)) penalty <- abs(D0_M)+abs(D0_P)+abs(D0_k2)+abs(D0_k3) if(penalty <= chiSquare*derivativePenalityRelevance){penalty <- 0} if(clean){return(chiSquare)}else{return(chiSquare+penalty+initialPenality)} }
bb3ae46cc07237d72f63f75bfd3b9026dfd21354
85ee7e5ce48cf3884c5601e554266fac112de14b
/man/spearman_brown.Rd
7ef047779f11fd6c7ac73aa55a99633158a2f253
[]
no_license
cran/splithalfr
274b4b813ea28666b2a7a491e8e6b725aba79d9f
02c387bf8fc2dcc4cc429764a47d3cf7c5dd85e8
refs/heads/master
2021-10-09T13:41:49.940719
2021-09-29T03:30:01
2021-09-29T03:30:01
236,901,406
0
0
null
null
null
null
UTF-8
R
false
true
1,351
rd
spearman_brown.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coefficients.R \name{spearman_brown} \alias{spearman_brown} \title{Calculate Spearman-brown coefficient} \usage{ spearman_brown(x, y, fn_cor = cor, ...) } \arguments{ \item{x}{(vector) a numeric vector} \item{y}{(vector) a numeric vector with compatible dimensions to x} \item{fn_cor}{(function) a function returning a correlation coefficient} \item{...}{Arguments passed to \code{fn_cor}} } \value{ (numeric) Spearman-Brown coefficient } \description{ Spearman-Brown reliability coefficient for doubling test length. Formula obtained from Warrens (2015) <\doi{10.1007/s11634-015-0198-6}> } \examples{ # Generate two variables with different means, variances and a correlation of about 0.5 library(MASS) vars = mvrnorm(30, mu = c(0, 2), Sigma = matrix(c(5, 2, 2, 3), ncol = 2), empirical = TRUE) # Calculate coefficient based on Pearson correlation spearman_brown(vars[,1], vars[,2]) # Calculate coefficient based on ICC, two-way, random effects, absolute agreement, single rater spearman_brown(vars[,1], vars[,2], short_icc, type = "ICC1", lmer = FALSE) } \seealso{ Other splithalfr coefficients: \code{\link{angoff_feldt}()}, \code{\link{assmd}()}, \code{\link{flanagan_rulon}()}, \code{\link{sdregi}()}, \code{\link{short_icc}()} } \concept{splithalfr coefficients}
38513a38639af4911a947f9674e0bea5357738a8
1204edc5a9b264e2efd282aba9fc7a72dc0f1c5a
/server.R
43f27cbf8e9c55ae8c5f9f9c7bd9ddce7b525118
[]
no_license
gpagliuca/surge_calculator
fc15b3e426594c3a39a7e80e0c250ae7d2d3569f
206b205c8d20f2d75add60b9cfdd72348701d98d
refs/heads/master
2021-05-03T11:59:22.782705
2018-02-06T17:03:01
2018-02-06T17:03:01
120,491,500
0
0
null
null
null
null
UTF-8
R
false
false
3,625
r
server.R
library(shiny) library(ggplot2) library(dplyr) library(grid) shinyServer(function(input, output) { output$plot <- renderPlot({ # Input from the ui inFile <- input$datafile if (is.null(inFile)) return(NULL) data <- read.csv(inFile$datapath, header = TRUE, stringsAsFactors = FALSE) drain <- input$drain surge_time <- input$time # Function fo claculate the surge volume. It assimes the liquid flowrate # in bbl/d and it returns the surge in m3 surge_calc <- function(time, liq, drain) { dt <- diff(time) mav <- function(x,n=2) {stats::filter(x,rep(1/n,n), sides=2)} acc <- (mav(liq) - drain) acc[acc<0] <- 0 cum_acc <- cumsum(acc) * (dt[1]/(24*3600)) return(cum_acc*0.1589873) } # Here a df for the plotting is created surge <- surge_calc(data[, 1], data[, 2], drain*1000) df <- data.frame(data) df <- cbind(df, 'Surge'=surge) df <- cbind(df, 'Surge_12'=surge*1.2) df <- cbind(df, 'Surge_08'=surge*0.8) # Some calcs for the plots xintercept = 0.66*surge_time/100*(tail(df['Time'], n=1)/3600) discreteXintercept <- findInterval(xintercept, df["Time"][, 1]/3600) s <- df['Surge'][, 1][discreteXintercept] # Create the two plots. plot1 <- df %>% select(Time, Liq) %>% na.omit() %>% ggplot() + geom_line(aes(x = Time/3600, y = Liq/1000, color='Liq. flow'), size = 1.5, alpha = 1) + labs(y="Liquid flowrate [kbbl/d]", x="Time [h]", title='Simulated liquid outflow [kbbl/d] (input data)') + scale_color_manual(values=c("#CC6666", "#9999CC")) + guides(fill=guide_legend(title=NULL)) + theme(legend.title=element_blank()) + geom_hline(yintercept=drain, linetype="longdash", color = "blue", size=.75) + theme(axis.text=element_text(size=14), axis.title=element_text(size=14,face="bold")) plot2 <- df %>% select(Time, Surge, Surge_12, Surge_08) %>% na.omit() %>% ggplot() + geom_line(aes(x = Time/3600, y = Surge, color='Surge vol.'), size = 1.5, alpha = 1) + geom_line(aes(x = Time/3600, y = Surge_12, color='Surge vol. + 20%'), size = 1.5, alpha = 0.75, linetype = "dashed") + geom_line(aes(x = Time/3600, y = Surge_08, color='Surge vol. - 20%'), size = 1.5, alpha = 0.75, linetype = "dotdash") + labs(y="Surge [m3]", x="Time [h]", title='Calculated surge volumes') + scale_color_manual(values=c("#000000", "#666666", "#666666")) + theme(legend.title=element_blank()) + geom_vline(xintercept=xintercept, linetype="longdash", color = "blue", size=.75) + theme(axis.text=element_text(size=14), axis.title=element_text(size=14,face="bold")) grid.newpage() g <- grid.draw(rbind(ggplotGrob(plot1), ggplotGrob(plot2), size = "last")) print(g) # Data for the table surgesValues <- reactive({ data.frame('Drain' = drain, 'Surge' = s) }) output$surges <- renderTable({ surgesValues()}) }) })
f187fa1c278f5fbf288f5de9a7bc8dc4bd26e062
67de61805dd839979d8226e17d1316c821f9b1b4
/inst/models/nightly/ifa-ms.R
87393613a3811f1445240afa4c9a1a3623c174ca
[ "Apache-2.0" ]
permissive
falkcarl/OpenMx
f22ac3e387f6e024eae77b73341e222d532d0794
ee2940012403fd94258de3ec8bfc8718d3312c20
refs/heads/master
2021-01-14T13:39:31.630260
2016-01-17T03:08:46
2016-01-17T03:08:46
49,652,924
1
0
null
2016-01-14T14:41:06
2016-01-14T14:41:05
null
UTF-8
R
false
false
7,026
r
ifa-ms.R
library(OpenMx) library(rpf) set.seed(1) m2.data <- suppressWarnings(try(read.table("models/nightly/data/ms-data.csv"), silent=TRUE)) if (is(m2.data, "try-error")) m2.data <- read.table("data/ms-data.csv") m2.data[m2.data==-9] <- NA m2.data <- m2.data + 1 gpcm <- function(outcomes) { rpf.nrm(outcomes, T.c=lower.tri(diag(outcomes-1),TRUE) * -1) # rpf.nrm(outcomes, T.c=diag(outcomes-1)) } m2.spec <- list() m2.spec[1:22] <- gpcm(5) m2.spec[2] <- gpcm(4) m2.spec[5] <- gpcm(3) m2.spec[6] <- gpcm(4) m2.spec[13:14] <- gpcm(4) m2.numItems <- length(m2.spec) for (c in 1:m2.numItems) { m2.data[[c]] <- mxFactor(m2.data[[c]], levels=1:m2.spec[[c]]$outcomes) } m2.maxParam <-max(sapply(m2.spec, rpf.numParam)) ip.mat <- mxMatrix(name="item", nrow=m2.maxParam, ncol=m2.numItems, values=c(1, 1, rep(0, m2.maxParam-2)), free=FALSE) colnames(ip.mat) <- colnames(m2.data) rownames(ip.mat) <- c('f1', rep('n', nrow(ip.mat)-1)) ip.mat$labels[1,] <- 'a1' ip.mat$free[1,] <- TRUE rstart <- lapply(m2.spec, rpf.rparam, version=1) for (ix in 1:m2.numItems) { thr <- m2.spec[[ix]]$outcomes - 1 ip.mat$free[(2+thr):(1+2*thr), ix] <- TRUE ip.mat$values[ip.mat$free[,ix],ix] <- rstart[[ix]][ip.mat$free[,ix]] } ip.mat$values[!is.na(ip.mat$labels) & ip.mat$labels == 'a1'] <- sample(ip.mat$values[!is.na(ip.mat$labels) & ip.mat$labels == 'a1'], 1) # m2.fmfit <- read.flexmirt("~/2012/sy/fm/ms-rasch-prm.txt") # cat(deparse(round(m2.fmfit$G1$param,6))) fmfit <- structure(c(0.941583, 1, 0, 0, 0, -0.676556, 0.758794, -0.802595, 1.28891, 0.941583, 1, 0, 0, -0.182632, 0.897435, 1.30626, NA, NA, 0.941583, 1, 0, 0, 0, 0.177835, -1.82185, 0.005832, -0.81109, 0.941583, 1, 0, 0, 0, -1.15962, -1.229, 0.032677, 0.4922, 0.941583, 1, 0, 0.457533, 0.324595, NA, NA, NA, NA, 0.941583, 1, 0, 0, -2.69186, -1.04012, 1.61232, NA, NA, 0.941583, 1, 0, 0, 0, -1.38231, 0.034368, -1.214, -0.648291, 0.941583, 1, 0, 0, 0, -1.85655, -1.17135, -0.262079, -0.531158, 0.941583, 1, 0, 0, 0, -1.29475, -0.376539, 0.02024, 0.135187, 0.941583, 1, 0, 0, 0, -1.38279, 0.524151, -0.508742, 0.633671, 0.941583, 1, 0, 0, 0, -0.979595, -0.048528, 0.659669, 0.544857, 0.941583, 1, 0, 0, 0, -2.09039, -1.45472, -0.472137, -0.666386, 0.941583, 1, 0, 0, 0.174682, 0.645437, 0.907132, NA, NA, 0.941583, 1, 0, 0, -0.842216, 0.490717, 1.28034, NA, NA, 0.941583, 1, 0, 0, 0, -0.913355, -0.319602, -0.310164, -0.15536, 0.941583, 1, 0, 0, 0, 0.567085, -1.56762, 0.884553, 0.122113, 0.941583, 1, 0, 0, 0, -0.152985, -0.341317, -0.183837, 1.17952, 0.941583, 1, 0, 0, 0, 0.168869, -0.490354, 0.373892, 1.29714, 0.941583, 1, 0, 0, 0, -0.827385, 0.626197, -1.52994, 0.494209, 0.941583, 1, 0, 0, 0, 0.511263, -0.750358, 1.01852, 0.840026, 0.941583, 1, 0, 0, 0, 0.968905, -0.009671, 1.52297, 1.69255, 0.941583, 1, 0, 0, 0, 1.89582, 0.051828, 2.25758, 1.52469), .Dim = c(9L, 22L), .Dimnames = list(NULL, c("i1", "i2", "i3", "i4", "i5", "i6", "i7", "i8", "i9", "i10", "i11", "i12", "i13", "i14", "i15", "i16", "i17", "i18", "i19", "i20", "i21", "i22"))) # ip.mat$values <- m2.fmfit$G1$param if (1) { cip.mat <- ip.mat cip.mat$values <- fmfit cM <- mxModel(model="ms", cip.mat, mxData(observed=m2.data, type="raw"), mxExpectationBA81(ItemSpec=m2.spec), mxFitFunctionML(), mxComputeOnce('fitfunction', 'fit')) cM <- mxRun(cM, silent=TRUE) omxCheckCloseEnough(cM$fitfunction$result, 50661.38, .01) } plan <- mxComputeSequence(steps=list( mxComputeEM('expectation', 'scores', mxComputeNewtonRaphson(freeSet='item', verbose=0L), information="mr1991", infoArgs=list(fitfunction='fitfunction')), mxComputeStandardError(), mxComputeHessianQuality())) m2 <- mxModel(model="m2", ip.mat, mxData(observed=m2.data, type="raw"), mxExpectationBA81(ItemSpec=m2.spec), mxFitFunctionML(), plan) # m2 <- mxOption(m2, "Number of Threads", 1) m2 <- mxRun(m2, silent=TRUE) omxCheckCloseEnough(m2$output$minimum, 50661.377, .01) omxCheckCloseEnough(log(m2$output$conditionNumber), 6.57, .5) #omxCheckTrue(is.na(m2$output$conditionNumber)) #cat(deparse(round(c(m2$output$standardErrors), 3))) semse <- c(0.022, 0.095, 0.116, 0.116, 0.108, 0.176, 0.222, 0.305, 0.382, 0.359, 0.244, 0.215, 0.105, 0.082, 0.067, 0.07, 0.185, 0.215, 0.134, 0.061, 0.071, 0.25, 0.244, 0.231, 0.155, 0.328, 0.209, 0.177, 0.16, 0.211, 0.176, 0.182, 0.185, 0.187, 0.189, 0.201, 0.194, 0.174, 0.161, 0.2, 0.234, 0.409, 0.236, 0.179, 0.154, 0.064, 0.078, 0.092, 0.084, 0.074, 0.092, 0.584, 0.493, 0.441, 0.362, 0.1, 0.097, 0.079, 0.085, 0.113, 0.115, 0.102, 0.111, 0.079, 0.082, 0.076, 0.092, 0.541, 0.607, 0.554, 0.337, 0.081, 0.083, 0.083, 0.098, 0.072, 0.084, 0.103, 0.138, 0.084, 0.103, 0.141, 0.178) #max(abs(c(m2$output$standardErrors) - semse)) omxCheckCloseEnough(c(m2$output$standardErrors), semse, .01) # similar to flexMIRT emstat <- m2$compute$steps[[1]]$output omxCheckCloseEnough(emstat$EMcycles, 19, 2) omxCheckCloseEnough(emstat$totalMstep, 73, 10) omxCheckCloseEnough(emstat$semProbeCount / length(semse), 3, .1) omxCheckCloseEnough(m2$output$evaluations, 1062, 5) #print(m2$matrices$item$values - fmfit) print(m2$output$backendTime) n <- apply(!is.na(m2.data), 2, sum) i1 <- mxModel(m2, mxComputeSequence(steps=list( mxComputeOnce('fitfunction', 'information', "meat"), mxComputeStandardError(), mxComputeHessianQuality()))) i1 <- mxRun(i1, silent=TRUE) omxCheckTrue(i1$output$infoDefinite) omxCheckCloseEnough(log(i1$output$conditionNumber), 7.3, .5) #cat(deparse(round(c(i1$output$standardErrors), 3))) se <- c(0.019, 0.1, 0.123, 0.121, 0.119, 0.237, 0.246, 0.33, 0.417, 0.386, 0.281, 0.24, 0.108, 0.086, 0.072, 0.076, 0.221, 0.265, 0.138, 0.068, 0.085, 0.275, 0.267, 0.263, 0.196, 0.359, 0.237, 0.208, 0.203, 0.227, 0.191, 0.199, 0.225, 0.21, 0.215, 0.232, 0.235, 0.184, 0.179, 0.218, 0.254, 0.437, 0.26, 0.201, 0.194, 0.07, 0.083, 0.101, 0.089, 0.079, 0.096, 0.649, 0.549, 0.507, 0.421, 0.106, 0.102, 0.084, 0.093, 0.125, 0.124, 0.112, 0.127, 0.088, 0.089, 0.087, 0.109, 0.633, 0.704, 0.61, 0.415, 0.089, 0.089, 0.09, 0.112, 0.083, 0.092, 0.115, 0.17, 0.095, 0.11, 0.16, 0.192) omxCheckCloseEnough(c(i1$output$standardErrors), se, .001) # matches flexmirt if (0) { library(mirt) rdata <- sapply(m2.data, unclass)-1 # for flexMIRT, write CSV #write.table(rdata, file="ifa-drm-mg.csv", quote=FALSE, row.names=FALSE, col.names=FALSE) pars <- mirt(rdata, 1, itemtype="Rasch", D=1, quadpts=49, pars='values') # pars[pars$name=="a1",'value'] <- 1 # pars[pars$name=="a1",'est'] <- FALSE # pars[pars$name=="COV_11",'est'] <- TRUE fit <- mirt(rdata, 1, itemtype="Rasch", D=1, quadpts=49, pars=pars, SE=TRUE, SE.type="crossprod") # LL -25330.691 * -2 = 50661.38 got <- coef(fit) }
56f6776e99f0054e6d5e1ec3f5cdab4c68d58348
3ea8066910a8b32d9a4b3204e720a45f06405efb
/man/predict.HDPMcdensity.Rd
d0b82ba224b10245d7df85ec86136a6ff83c6643
[]
no_license
cran/DPpackage
ae76a06a3f35dc88d1f18476e2470473a67e2277
33af05b258c49ae4826655dd196d0ecbf5a008b1
refs/heads/master
2020-05-16T23:58:39.158728
2018-01-06T07:39:08
2018-01-06T07:39:08
17,678,687
3
7
null
null
null
null
UTF-8
R
false
false
3,796
rd
predict.HDPMcdensity.Rd
\name{predict.HDPMcdensity} \alias{predict.HDPMcdensity} \title{Predictive Information for the Dependent Random Probability Measures.} \description{ Plot the probability measures arising from a HDPM of normals model for conditional density estimation. Support provided by the NIH/NCI R01CA75981 grant. } \usage{ \method{predict}{HDPMcdensity}(object,pred,i,r,ask=TRUE,nfigr=2,nfigc=2, ...) } \arguments{ \item{object}{ \code{HDPMcdensity} fitted model object.} \item{pred}{ indicator for the values of the predictors, given by the row pred in xpred, for which the conditional densities must be drawn.} \item{i}{ study indicator.} \item{r}{ indicator for including (0) or not (1) the common measure.} \item{ask}{ logical variable indicating whether the plots must be displayed sequentially or not.} \item{nfigr}{ number of rows in the figure.} \item{nfigc}{ number of columns in the figure.} \item{...}{ further arguments to be passed.} } \details{ Must run \code{\link{HDPMcdensity}} first to generate posterior simulations. } \seealso{ \code{\link{HDPMcdensity}} } \references{ Mueller, P., Quintana, F. and Rosner, G. (2004). A Method for Combining Inference over Related Nonparametric Bayesian Models. Journal of the Royal Statistical Society, Series B, 66: 735-749. } \examples{ \dontrun{ # Data data(calgb) attach(calgb) y <- cbind(Z1,Z2,Z3,T1,T2,B0,B1) x <- cbind(CTX,GM,AMOF) z <- cbind(y,x) # Data for prediction data(calgb.pred) xpred <- as.matrix(calgb.pred[,8:10]) # Prior information prior <- list(pe1=0.1, pe0=0.1, ae=1, be=1, a0=rep(1,3), b0=rep(1,3), nu=12, tinv=0.25*var(z), m0=apply(z,2,mean), S0=var(z), nub=12, tbinv=var(z)) # Initial state state <- NULL # MCMC parameters mcmc <- list(nburn=5000, nsave=5000, nskip=3, ndisplay=100) # Fitting the model fit1 <- HDPMcdensity(formula=y~x, study=~study, xpred=xpred, prior=prior, mcmc=mcmc, state=state, status=TRUE) # Posterior inference fit1 summary(fit1) # Plot the parameters # (to see the plots gradually set ask=TRUE) plot(fit1,ask=FALSE) # Plot the a specific parameters # (to see the plots gradually set ask=TRUE) plot(fit1,ask=FALSE,param="eps",nfigr=1,nfigc=2) # Plot the measure for each study # under first values for the predictors, xpred[1,] predict(fit1,pred=1,i=1,r=1) # pred1, study 1 predict(fit1,pred=1,i=2,r=1) # pred1, study 2 # Plot the measure for each study # under second values for the predictors, xpred[2,] predict(fit1,pred=2,i=1,r=1) # pred2, study 1 predict(fit1,pred=2,i=2,r=1) # pred2, study 2 # Plot the idiosyncratic measure for each study # under first values for the predictors, xpred[1,] predict(fit1,pred=1,i=1,r=0) # study 1 predict(fit1,pred=1,i=2,r=0) # study 2 # Plot the common measure # under first values for the predictors, xpred[1,] predict(fit1,pred=1,i=0) } } \author{ Alejandro Jara \email{<atjara@uc.cl>} Peter Mueller \email{<pmueller@mdanderson.org>} } \keyword{models} \keyword{nonparametric}
0bb0ac6868c6b9a4a4acadecd7d56ba859c43826
679e1c340bff1f310c25cc689bcf481ff592f680
/man/add_intervals.Rd
4b3f9bb71e7668b7fb2bdc6eeea3aada74ca11da
[]
no_license
deruncie/PLS205_package
cc47b19dc8961575dbc11784b7ea6c731b5b120e
6bbc73ccbcf5b73a67a6b6da858c78bd235f1083
refs/heads/master
2022-03-04T20:03:12.922151
2022-02-22T21:31:55
2022-02-22T21:31:55
75,047,152
1
0
null
null
null
null
UTF-8
R
false
true
779
rd
add_intervals.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add_intervals.R \name{add_intervals} \alias{add_intervals} \title{Add a model predictions and SE band to a data.frame} \usage{ add_intervals(data, fitted_model, SEs = 2) } \arguments{ \item{data}{A data.frame object with all terms used in the model} \item{fitted_model}{a model object (ex from lm or lmer) compatible with the `predict` function} \item{SEs}{width of confidence interval band (in se's)} } \value{ dataframe augmented with columns \code{y_pred}, \code{ymin}, and \code{ymax} } \description{ Adds columns y_pred, ymax and ymin to a data.frame based on predictions from a previously fitted linear model (lm or lmer). Note: lmer models don't provide SEs, so those values are set to NA. }
69233dd74479febb891bba462c4c6de93af33e7f
37c0a409c4f06dfac2365fb792a953f59758f245
/man/GetFixedParams.Rd
2ce081d5c50edd4da328927cf4001c67712ecf72
[ "MIT" ]
permissive
cmlegault/ASAPplots
8a3aee8a79137dd8911305397430965aa18ae683
75adfd7cf889a5a2b66b6ef0a4dbe22b51aa2084
refs/heads/master
2021-07-13T00:48:41.370521
2021-03-22T21:02:40
2021-03-22T21:02:40
87,811,600
3
4
MIT
2021-03-19T14:02:42
2017-04-10T13:04:09
R
UTF-8
R
false
true
383
rd
GetFixedParams.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_fixed_params.R \name{GetFixedParams} \alias{GetFixedParams} \title{GetFixedParams} \usage{ GetFixedParams(asap.dat) } \arguments{ \item{asap}{asap dat object from ReadASAP3DatFile} } \description{ Returns data frame of parameters for a given ASAP run as either "fixed" or "estimated." }
936463dd3ba506f14363f6eb149d63ffcdbfd911
9ec885208283c21c0fe6d714ef89534158a00237
/man/tryTryTry.Rd
51699b6e7d8f3434741198b12cb9c8b4ec520fb1
[ "MIT" ]
permissive
bi-sdal/sdalr
21bcbf3f637bad697022852262d7c3d00a5bab87
8fb2c78c1b4e326dfde955570313dbfe2ad3a3bc
refs/heads/master
2021-01-20T01:22:34.308419
2018-07-16T18:52:27
2018-07-16T18:52:27
89,265,260
0
1
null
2017-11-13T17:28:32
2017-04-24T16:53:50
R
UTF-8
R
false
true
364
rd
tryTryTry.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/trytrytry.R \name{tryTryTry} \alias{tryTryTry} \title{Try an expression n times before quitting} \usage{ tryTryTry(x, n = 3L) } \arguments{ \item{x}{The expression to try.} \item{n}{The number of times to try. Default is 3.} } \description{ Try an expression n times before quitting }
7abb02b3ca1fb3cbba47783818b4d5ec744b2294
082ae6acef6cfafb85ccc8c98c67c670ffd934ee
/man/ragtag_html.Rd
35896b5c88dbf977fed1637abf49e5f17093500c
[ "MIT" ]
permissive
AnirudhHimself/ragtag
9825b3905e79a14e0b08f1f6051c4f38923dd86f
6b0eda50ad886860e0bd653f8c516a7d53e101a7
refs/heads/master
2023-02-05T20:48:25.921055
2020-12-25T21:36:54
2020-12-25T21:36:54
267,073,259
0
0
null
null
null
null
UTF-8
R
false
true
718
rd
ragtag_html.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/htmlrmd.R \name{ragtag_html} \alias{ragtag_html} \title{Personal Rmd Temlate. It only converts to HTML right now.} \usage{ ragtag_html(number_sections = FALSE, ...) } \arguments{ \item{number_sections}{Should sections be numbered?} \item{...}{additional arguments sent to \@code{html_document}} } \description{ It essentially uses the base rmarkdown html_document function but adds in additional CSS and and a footer template file. It essentially uses the base rmarkdown html_document function, but adds in additional CSS and and a footer template file. I haven't added much to this footer template so it's more of a skeleton for later. }
4f136005220d0c7d852582c2eea278f754737388
ac968965c2093c38542eb0db597df82b971dd3f3
/Tidyverse Webinar/umn_rmcc_tidy2020_part1_morning_lesson/tidy2020_part1_morning_lesson_starter.R
b0f48ebf93be8222e7d9c030405382cb65e02e21
[]
no_license
nf-exphys/UMN-classes
6a4e0aa2790ff32cfa8eb03850d1170fe33b252a
379fb7c623eacf921a9db3b74278a30179945187
refs/heads/master
2023-03-29T22:29:52.617463
2021-04-18T17:09:55
2021-04-18T17:09:55
348,120,741
0
0
null
null
null
null
UTF-8
R
false
false
2,772
r
tidy2020_part1_morning_lesson_starter.R
######################################################## # Into the Mud: Data Wrangling with R and the Tidyverse # Jeffrey K. Bye, Ph.D. & Ethan C. Brown, Ph.D. # Friday, December 11, 2020; 8:30 am–12:30 pm ######################################################## # 09:00 - 09:30 Morning Zoom synchronous -- 30 minute lecture ## OPEN PROJECT, OPEN FILE # Notice the handy .csv file # load tidyverse! library(tidyverse) # Read in data test <- read_csv("student_data.csv") # a benefit of RProjects is that if we keep everything in the same folder, # R can find the file *relative* to where the .RProj file is sitting # (i.e., inside its own folder) # Examine data: # Note: this is completely fictional, random data # With the 'theme' that it's CEHD people answer test Qs about R summary(test) # summarize str(test) # structure test # oh no -- this data isn't tidy! test %>% pivot_longer( cols =c(Q1_MultipleChoice, Q2_MultipleChoice, Q3_ShortAnswer, Q4_ShortAnswer), #list columns to go from wide to long names_to = "Question", #moves score to one variable, with Q1-4 as a separate column values_to = "Score" ) test %>% pivot_longer( cols = -Student_ID, #same as above but cleaner than listing all column names names_to = "Question", values_to = "Score" ) test_long <- test %>% #keeping original data and saving new data like this is known as "breadcrumming" pivot_longer( cols = starts_with("Q"), #finds all columns that start with letter Q names_to = "Question", #moves score to one variable, with Q1-4 as a separate column values_to = "Score" ) test_long %>% separate( col = Question, into = c("Question_Number", "Question_Type"), sep = "_" ) #Doing all of this using pivot_longer test_long2 <- test %>% pivot_longer( cols = starts_with("Q"), #finds all columns that start with letter Q names_to = c("Question_Number", "Question_Type"), #defines where to put new columns values_to = "Score", #defines where to put values that were in each of these columns names_sep = "_" #where do I separate at? ) test_long2 %>% write_csv("test_long.csv") survey <- read_csv("r_survey.csv") survey #we have 2 measures at pre and 2 measures at post survey_longish <- survey %>% pivot_longer( cols = starts_with("P"), names_to = c("Time", "Question"), names_sep = "_", values_to = "Rating" #generally, new column name needs quotes, existing column names don't need quotes ) %>% pivot_wider( #gives us a partially long format, may be useful for some analyses names_from = Question, values_from = Rating ) survey_longish %>% write_csv("survey_longish.csv") #FYI: pivot longer is the successor to gather and reshape/melt
6a825fb641e6d14443a129f376430c109a102235
dc7549847fa8fe32a2d3e0d181ed2fd2c4030144
/man/extractTS.Rd
3aadb520a4f93d55f2f740aeb8c2896f335b11bb
[]
no_license
RRemelgado/CAWaR
5e312fa7ad6f72ae21c1f9e55cc0f53e1426b2ad
7536d06ce5073b9b686263871135add46adca8b1
refs/heads/master
2021-07-14T19:58:47.962666
2020-06-04T11:18:34
2020-06-04T11:18:34
141,430,724
2
2
null
null
null
null
UTF-8
R
false
true
2,288
rd
extractTS.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/extractTS.R \name{extractTS} \alias{extractTS} \title{extractTS} \usage{ extractTS(x, y, z, id) } \arguments{ \item{x}{Object of class \emph{SpatialPolygons}, \emph{SpatialPolygonsDataFrame}, \emph{SpatialPoints} or \emph{SpatialPointsDataFrame}.} \item{y}{A \emph{raster} object, a list of \emph{RasterLayer} objects or a numeric element.} \item{z}{\emph{Numeric} vector with weights for each element in \emph{x} (when points).} \item{id}{\emph{Numeric} vector with unique identifiers for \emph{x} (when points).} } \value{ A \emph{list}. } \description{ Extracts time series data from a \emph{RasterStack} for a \emph{SpatialPolygons} or a \emph{SpatialPolygonsDataFrame} object. } \details{ {For each polygon in \emph{x} - if \emph{x} is a \emph{SpatialPolygons} and \emph{SpatialPolygonsDataFrame} object - the function identifies the overlapping pixels in \emph{y} and, for each pixel, estimates the percentage area covered by the polygon. Using this data as weights, the function calculates the weighted mean for each band in \emph{y}. If \emph{y} is a numeric element, the function will build a raster with resolution equal to \emph{y} over which the pixel cover will be estimated. Moreover, if \emph{x} is a \emph{SpatialPoints} or a \emph{SpatialPointsDataFrame} object, the function will skip the pixel extraction step. In this case, the user may provide a vector with sample weights through \emph{z} and a vector of unique identifiers (reporting on e.g. the polygon membership) The function returns a list of three \emph{data.frame} objects where each row represents a different polygon in \emph{x}: \itemize{ \item{\emph{pixel.info} - \emph{SpatialPointsDataFrame} with pixel-wise samples for each polygon (identified by the field \emph{id}).} \item{\emph{polygon.info} - Mean, min, max and standard deviation of the pixel cover; centroid coordinates.} \item{\emph{weighted.mean} - Weighted mean raster values (if \emph{y} is a raster object).}}} } \examples{ { require(raster) require(fieldRS) # read raster data r <- brick(system.file("extdata", "ndvi.tif", package="fieldRS")) # read field data data(fieldData) extractTS(fieldData[1:5,], r) } } \seealso{ \code{\link{analyseTS}} }
b9e95fe794c04f147552c2e2912726ee089833ab
1d80ea56e9759f87ef9819ed92a76526691a5c3b
/R/utils_ci.R
799e8667fd607ca32112eece8288254fc5fc0704
[]
no_license
cran/effectsize
5ab4be6e6b9c7f56d74667e52162c2ca65976516
e8baef181cc221dae96f60b638ed49d116384041
refs/heads/master
2023-08-16T21:23:58.750452
2023-08-09T18:40:02
2023-08-09T19:30:51
236,590,396
0
0
null
null
null
null
UTF-8
R
false
false
3,217
r
utils_ci.R
# NCP ------------------------- #' @keywords internal .get_ncp_F <- function(f, df, df_error, conf.level = 0.9) { if (!is.finite(f) || !is.finite(df) || !is.finite(df_error)) { return(c(NA, NA)) } alpha <- 1 - conf.level probs <- c(alpha / 2, 1 - alpha / 2) lambda <- f * df ncp <- suppressWarnings(stats::optim( par = 1.1 * rep(lambda, 2), fn = function(x) { p <- stats::pf(q = f, df, df_error, ncp = x) abs(max(p) - probs[2]) + abs(min(p) - probs[1]) }, control = list(abstol = 1e-09) )) f_ncp <- sort(ncp$par) if (f <= stats::qf(probs[1], df, df_error)) { f_ncp[2] <- 0 } if (f <= stats::qf(probs[2], df, df_error)) { f_ncp[1] <- 0 } return(f_ncp) } #' @keywords internal .get_ncp_t <- function(t, df_error, conf.level = 0.95) { # # Note: these aren't actually needed - all t related functions would fail earlier # if (!is.finite(t) || !is.finite(df_error)) { # return(c(NA, NA)) # } alpha <- 1 - conf.level probs <- c(alpha / 2, 1 - alpha / 2) ncp <- suppressWarnings(stats::optim( par = 1.1 * rep(t, 2), fn = function(x) { p <- stats::pt(q = t, df = df_error, ncp = x) abs(max(p) - probs[2]) + abs(min(p) - probs[1]) }, control = list(abstol = 1e-09) )) t_ncp <- unname(sort(ncp$par)) return(t_ncp) } #' @keywords internals .get_ncp_chi <- function(chi, df, conf.level = 0.95) { # # Note: these aren't actually needed - all chisq related functions would fail earlier # if (!is.finite(chi) || !is.finite(df)) { # return(c(NA, NA)) # } alpha <- 1 - conf.level probs <- c(alpha / 2, 1 - alpha / 2) ncp <- suppressWarnings(stats::optim( par = 1.1 * rep(chi, 2), fn = function(x) { p <- stats::pchisq(q = chi, df, ncp = x) abs(max(p) - probs[2]) + abs(min(p) - probs[1]) }, control = list(abstol = 1e-09) )) chi_ncp <- sort(ncp$par) if (chi <= stats::qchisq(probs[1], df)) { chi_ncp[2] <- 0 } if (chi <= stats::qchisq(probs[2], df)) { chi_ncp[1] <- 0 } chi_ncp } # Validators -------------------------------------- #' @keywords internal .test_ci <- function(ci) { if (is.null(ci)) { return(FALSE) } if (!is.numeric(ci) || length(ci) != 1L || ci < 0 || ci > 1) { insight::format_error("ci must be a single numeric value between (0, 1)") } return(TRUE) } #' @keywords internal .adjust_ci <- function(ci, alternative) { if (alternative == "two.sided") { return(ci) } 2 * ci - 1 } #' @keywords internal .limit_ci <- function(out, alternative, lb, ub) { if (alternative == "two.sided") { return(out) } if (alternative == "less") { out$CI_low <- lb } else if (alternative == "greater") { out$CI_high <- ub } out } #' @keywords internal .match.alt <- function(alternative, two.sided = TRUE) { if (is.null(alternative)) { if (two.sided) { return("two.sided") } else { return("greater") } } match.arg(alternative, c("two.sided", "less", "greater")) }
d3991ce51a1e18b515c9c3cd27ef5c2b79fb1b56
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/handlr/examples/HandlrClient.Rd.R
09a17c7abd92cf6f22adbc7ca2cbc7efead80ebf
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
2,636
r
HandlrClient.Rd.R
library(handlr) ### Name: HandlrClient ### Title: handlr client ### Aliases: HandlrClient ### Keywords: datasets ### ** Examples # read() can be run with format specified or not # if format not given, we attempt to guess the format and then read z <- system.file('extdata/citeproc.json', package = "handlr") (x <- HandlrClient$new(x = z)) x$read() x$read("citeproc") x$parsed # you can run read() then write() # or just run write(), and read() will be run for you if possible z <- system.file('extdata/citeproc.json', package = "handlr") (x <- HandlrClient$new(x = z)) cat(x$write("ris")) # read from a DOI as a url if (interactive()) { (x <- HandlrClient$new('https://doi.org/10.7554/elife.01567')) x$parsed x$read() x$write('bibtex') } # read from a DOI if (interactive()) { (x <- HandlrClient$new('10.7554/elife.01567')) x$parsed x$read() x$write('bibtex') } # read in citeproc, write out bibtex z <- system.file('extdata/citeproc.json', package = "handlr") (x <- HandlrClient$new(x = z)) x$path x$ext x$read("citeproc") x$parsed x$write("bibtex") f <- tempfile(fileext = ".bib") x$write("bibtex", file = f) readLines(f) unlink(f) # read in ris, write out ris z <- system.file('extdata/peerj.ris', package = "handlr") (x <- HandlrClient$new(x = z)) x$path x$format x$read("ris") x$parsed x$write("ris") cat(x$write("ris")) # read in bibtex, write out ris (z <- system.file('extdata/bibtex.bib', package = "handlr")) (x <- HandlrClient$new(x = z)) x$path x$format x$read("bibtex") x$parsed x$write("ris") cat(x$write("ris")) # read in bibtex, write out RDF XML if (interactive()) { (z <- system.file('extdata/bibtex.bib', package = "handlr")) (x <- HandlrClient$new(x = z)) x$path x$format x$read("bibtex") x$parsed x$write("rdfxml") cat(x$write("rdfxml")) } # codemeta (z <- system.file('extdata/codemeta.json', package = "handlr")) (x <- HandlrClient$new(x = z)) x$path x$format x$read("codemeta") x$parsed x$write("codemeta") # > 1 z <- system.file('extdata/citeproc-many.json', package = "handlr") (x <- HandlrClient$new(x = z)) x$parsed x$read() x$parsed ## schmea org x$write("schema_org") ## bibtex x$write("bibtex") ## bibtex to file f <- tempfile(fileext=".bib") x$write("bibtex", f) readLines(f) unlink(f) ## to RIS x$write("ris") ### only one per file, so not combined files <- replicate(2, tempfile(fileext=".ris")) x$write("ris", files) lapply(files, readLines) # handle strings instead of files z <- system.file('extdata/citeproc-crossref.json', package = "handlr") (x <- HandlrClient$new(x = readLines(z))) x$read("citeproc") x$parsed cat(x$write("bibtex"), sep = "\n")
89dd6608c00aa2ccab8f3ed3681f3c398f7c1225
ce8cebb9873ded8f0f6b8c1366551fe810f9071c
/server.R
f3cfe93019a16c4bcecf10c0d124f9a4532cc84c
[]
no_license
black-tea/lacity-centerline-locate
a374fd7e34f7562b63e1ac8544baa38eaf95087b
71de3dd7b700cbf894bf5cf054755187f1125622
refs/heads/master
2020-03-27T09:43:33.294170
2018-08-30T00:15:54
2018-08-30T00:15:54
146,366,970
0
0
null
null
null
null
UTF-8
R
false
false
7,847
r
server.R
####################################### # Infrastructure Geocoder Server Code # ####################################### library(shiny) library(sf) library(googlesheets) #### Functions # Function to create Icons for map createIcon <- function(color) { custom_icon <- awesomeIcons( icon = 'circle-o', iconColor = '#ffffff', library = 'fa', # The markercolor is from a fixed set of color choices markerColor = color ) return(custom_icon) } #### Prep Code # Load intersections intersections <- read_sf('data/Intersections/Intersections.shp') streets <- read_sf('data/Centerline/Streets.shp') councilDistricts <- read_sf('data/CouncilDistricts/CnclDist_July2012_wgs84.shp') dotDistricts <- read_sf('data/DOTDistricts/LADOT_District_wgs84.shp') #### Server Code server <- function(input, output, session) { ### UI Elements # Treatment Type Selection output$treatment_type <- renderUI({ # Selection Input selectInput("treatment_type", label = "Treatment Type", c("Intersection Tightening")) }) # Intersection Selection output$int_select <- renderUI({ # Selection Input selectizeInput(inputId = "int", label = "Intersection", choices = intersections$TOOLTIP, selected = NULL, multiple = FALSE) }) # Message Object output$message <- renderText({rv_msg$msg}) # here we should render a list for each item in the msg output$message <- renderUI({ #HTML(paste(rv_msg$msg, sep = '<br/>')) #HTML(lapply(rv_msg$msg, paste, collapse = " ")) HTML(paste(rv_msg$msg, collapse ='<br/>')) }) ### Reactive Objects #RV for location objects rv_location <- reactiveValues(Intersection=list(), Segment=list()) #RV storing UI message variable rv_msg <- reactiveValues() # Reactive expression to grab intersection data based on user selection intersection_r <- reactive({ if(!is.null(input$int) && input$int != "" && length(input$int) > 0){ # int_query <- paste0("SELECT * FROM intersections WHERE tooltip=","'",toString(input$int),"'") # intersection_r <- sqlQuery(int_query, type = 'spatial') intersection_r <- intersections %>% filter(TOOLTIP == toString(input$int)) } else {return(NULL)} }) # Reactive expression to grab the council district based on the point cd_r <- reactive({ if(!is.null(rv_location$Intersection)){ cd_r <- sf::st_join(rv_location$Intersection, councilDistricts) return(cd_r$DISTRICT) } else { return(NULL) } }) # Reactive expression to grab the DOT district based on the point dotR <- reactive({ if(!is.null(rv_location$Intersection)){ dotR <- sf::st_join(rv_location$Intersection, dotDistricts) return(dotR$DOT_DIST) } else { return(NULL) } }) # Reactive expression to grab cross streets from selected intersection xstreet_r <- reactive({ if(!is.null(input$int) && input$int != "" && length(input$int) > 0){ # Grab selected intersection information intersection_r <- intersection_r() # Query for streets related to the intersection # xstreet_query = paste0("SELECT * # FROM streets # WHERE int_id_fro=",intersection_r$cl_node_id," OR int_id_to=",intersection_r$cl_node_id) # xstreet <- sqlQuery(xstreet_query, type='spatial') xstreet <- streets %>% filter(INT_ID_FRO == intersection_r$CL_NODE_ID | INT_ID_TO == intersection_r$CL_NODE_ID) } else {return(NULL)} }) ### Map output$map <- renderLeaflet({ # Map object map <- leaflet() %>% addProviderTiles(providers$CartoDB.Positron, options = providerTileOptions(minZoom = 10, maxZoom = 18)) %>% setView(lng = -118.329327, lat = 34.0546143, zoom = 11) }) # Map observer that updates based on the intersection observeEvent(input$int, { if(!is.null(input$int) && input$int != "" && length(input$int) > 0){ # Get intersection reactive var, clear markers, clear RV intersection_r <- intersection_r() rv_location$Segment <- NULL proxy <- leafletProxy("map") %>% clearMarkers() %>% clearShapes() # If there is one marker in the query, it is blue if(nrow(intersection_r) == 1 && length(intersection_r) > 0) { # Add intersection to RV object rv_location$Intersection <- intersection_r # Add intersection marker to map proxy %>% addAwesomeMarkers( data = intersection_r, icon = createIcon('darkblue') ) if(input$treatment_type == "Intersection Tightening") { rv_msg$msg <- c(paste0("Coordinates: ", toString(intersection_r$geometry)), paste0("Council District: ",toString(cd_r())), paste0("DOT District: ", toString(dotR()))) } else { # Get cross streets xstreet_r <- xstreet_r() # Update message to choose a street rv_msg$msg <- c('Select a Cross Street') # If there is at least one related segment, add it if(length(xstreet_r) > 0) { proxy %>% addPolylines( data = xstreet_r, layerId = as.numeric(rownames(xstreet_r)), color = "gray" ) } } # If there is > 1 marker, gray initially } else if(nrow(intersection_r) > 1) { proxy %>% addAwesomeMarkers( data = intersection_r, layerId = as.numeric(rownames(intersection_r)), icon = createIcon("gray") ) rv_msg$msg <- c('Select One Intersection Node') } # Update the map zoom bounds proxy %>% fitBounds(lng1 = as.double(st_bbox(intersection_r)[1]), lat1 = as.double(st_bbox(intersection_r)[2]), lng2 = as.double(st_bbox(intersection_r)[3]), lat2 = as.double(st_bbox(intersection_r)[4])) } }) # Map Observer based on int selection observeEvent(input$map_marker_click, { if(nrow(intersection_r()) > 1) { # Grab ID of the shape that was clicked click_id <- input$map_marker_click$id # Filter intersections based on the click intS <- intersection_r() %>% filter(rownames(.) == click_id) rv_location$Intersection <- intS # Add selected line on top proxy <- leafletProxy("map") %>% # Add selected intersection to map addAwesomeMarkers( data = intS, layerId = "intselected", icon = createIcon('darkblue') ) rv_msg$msg <- c(paste0("Coordinates: ", toString(rv_location$Intersection$geometry)), paste0("Council District: ",toString(cd_r())), paste0("DOT District: ", toString(dotR()))) } }) # Map Observer based on the polyline selection observeEvent(input$map_shape_click, { if(!is.null(xstreet_r())){ # Grab ID of the shape that was clicked click_id <- input$map_shape_click$id # Filter polylines based on the click polyline_s <- xstreet_r() %>% filter(rownames(.) == click_id ) rv_location$Segment <- polyline_s # Add selected line on top as another color proxy <- leafletProxy("map") %>% # Add selected line shape to map addPolylines( data = polyline_s, layerId = "selected", color = "#0066a1", opacity = 1 ) # Once user has selected the street segment, becomes NULL rv_msg$msg <- c('.') } }) }
c5d7a18d0decbcaad7fad6e69adba142d20990d0
5151abd43e2b8ea407ff19fb51531d9f50c31f87
/miRNA_theory.R
fba6fb39ccea6f8a422cbd6baea11a2290ee4c42
[]
no_license
amandazheutlin/NAPLS_RNA
096b43e51f1d5573b2f16a7959dbc02ab20b9447
c576bea8ed0619345c1917e5332949f5d8bc67d4
refs/heads/master
2021-01-01T18:27:22.021352
2017-08-07T20:06:54
2017-08-07T20:06:54
98,345,951
0
0
null
null
null
null
UTF-8
R
false
false
2,083
r
miRNA_theory.R
# RNA-seq data from NAPLS - miRNAs # correlations with gray matter longitudinal phenotype # plasticity-related miRNA #### housekeeping workdir <- "/data/swe_gwas/ABZ/NAPLS/RNAseq/" setwd(workdir) # source("http://bioconductor.org/biocLite.R") libs <- c("dplyr", "psych", "ggplot2", "CALF", "glmnet", "caret", "nlme", "car", "mediation") invisible(lapply(libs, require, character.only = TRUE)) #### load in data # 136 miRNAs; 3 groups (NP, P, UC) data <- read.table("miRNA_norm.txt",header=T) gm <- read.table("GMpheno.txt",header=T) covar <- read.table("age-sex.txt",header=T) # data$cc <- data$ProdromeStatus # levels(data$cc) <- c(0,1) gm$SiteSubjID <- paste(gm$SiteNumber,gm$SubjectNumber,sep="") %>% as.integer() covar$SiteSubjID <- paste(covar$SiteNumber,covar$SubjectNumber,sep="") %>% as.integer() data$gm <- gm[match(data$SiteSubjID,gm$SiteSubjID),3] data$age <- covar[match(data$SiteSubjID,covar$SiteSubjID),1] data$sex <- covar[match(data$SiteSubjID,covar$SiteSubjID),2] # review: Mellios & Sur, 2012, Frontiers in Psychiatry # miR-21 (2), miR-30a (1), miR-30d (1), miR-34a (0), miR-128 (1), # miR-132 (1), miR-134 (0), miR-137 (0), miR-138 (0), miR-181b (1), # miR-195 (0), miR-212 (0), miR-219 (0), miR-346 (0) miRNA <- c("miR.21.5p", "miR.21.3p", "miR.30a.5p", "miR.30d.5p", "miR.128", "miR.132.3p", "miR.181b.5p") # multiple regression m1 <- lm(gm ~ miR.21.5p + miR.21.3p + miR.30a.5p + miR.30d.5p + miR.128 + miR.132.3p + miR.181b.5p, data = data, na.action = na.omit) # univariate analyses models <- lapply(miRNA, function(x) { lm(eval(substitute(gm ~ i, list(i = as.name(x)))),data = data, na.action = na.omit) }) # stats model_stats <- lapply(models, function(x) summary(x)) # list of overall effects gm = NULL for (i in 1:7) { temp <- model_stats[[i]]$coefficients %>% as.data.frame() temp$R2 <- model_stats[[i]]$r.squared gm <- rbind(gm,temp[2,3:5]) } colnames(gm)[1:2] <- c("tvalue","pvalue") gm$marker <- rownames(gm) gm <- gm[order(gm$pvalue),]
49f2134ce14772ee6ddb5c78e568938a623d62ea
1cd18bed03f132588465f8763be10c7fc33e9457
/tests/testthat/test-format-u.R
6dc38c2ad2e5d66a9c8f0315c55a3ec9d5581c49
[]
no_license
jrnold/fivemat
5c4931c4a25dfffce3826145b74b7a360a2999da
98fbdf174e9e4ab3c4bc4e3c345dd429275db8bd
refs/heads/master
2021-01-19T04:38:45.368705
2017-03-23T16:58:20
2017-03-23T16:58:20
84,902,811
2
0
null
null
null
null
UTF-8
R
false
false
638
r
test-format-u.R
context("format u") test_that("fmt_new(\"u\") unicode character", { expect_equal(fmt_new("u")(0x2603), "\u2603") expect_equal(fmt_new("020u")(0x2603), "0000000000000000000\u2603") expect_equal(fmt_new(" ^20u")(0x2603), " \u2603 ") expect_equal(fmt_new("$u")(0x2603), "$\u2603") }) test_that("fmt_new(\"u\") does not localize a decimal point", { expect_equal(fmt(46, spec = "u", locale = fmt_locale(decimal_mark = "/")), ".") }) test_that("fmt_new(\"u\") works with special values", { expect_equal(fmt(c(-Inf, Inf, NA, NaN), spec = "u"), c("-Inf", "Inf", "NA", "NaN")) })
8c482219b5c76864e35c103509a663e1d282f22e
9550cea67bef21d3295d85c0a4681ce6b0e72b89
/R/smartsizer.R
bbc22350b6fcebcf05c799c34af5cf586afd29c9
[]
no_license
cran/smartsizer
3ad1d3bc4856386480b981c3d220462dbd45471b
985af5ca550027501741c3ae386ab2ae0f37a3f2
refs/heads/master
2021-06-16T11:23:55.933768
2021-01-06T01:10:02
2021-01-06T01:10:02
129,715,700
1
0
null
null
null
null
UTF-8
R
false
false
617
r
smartsizer.R
#' smartsizer: A package for Sizing SMART Designs #' #' The smartsizer package is designed to assist investigators with sizing sequential, #' multiple assignment, randomized trial (SMART) for determination of the optimal #' dynamic treatment regime (DTR). smartsizer includes functions which permit #'calculation of the minimum number of individuals to enroll in a SMART #' in order to be able to detect a specified effect size between the best and #' inferior embedded DTR, with a specified power. #' smartsizer is designed for an arbitrary SMART design. #' #' @docType package #' @name smartsizer NULL
32583d313f200590b9712cc4e9e057b2b675d8b4
29585dff702209dd446c0ab52ceea046c58e384e
/fullfact/R/barMANA.R
a2406cb1932e5ccee6c8e1978c6055fff0c07918
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
1,998
r
barMANA.R
barMANA <- function(ci_dat,type="perc",bar_len=0.1,ymax=NULL,ymin=NULL,yunit=NULL,leg="topright", #ci object cex_ylab=1,cex_yaxis=1,cex_names=1) { error.bar <- function(x, y, upper, lower=upper, length=bar_len,...){ if(length(x) != length(y) | length(y) !=length(lower) | length(lower) != length(upper)) stop("vectors must be same length") arrows(x,upper, x, lower, angle=90, code=3, length=length, ...)} if (type == "perc") { dat<- ci_dat$percentage; label<- "phenotypic variance (%)" } if (type == "raw") { dat<- ci_dat$raw; label<- "phenotypic variance" } if (is.null(dat$trait)) { num <- 1; name_lab<- "" ord<- matrix(0,ncol=1,nrow=3) ord[,1][1]<- which(dat$component=="additive") ord[,1][2]<- which(dat$component=="nonadd") ord[,1][3]<- which(dat$component=="maternal") } if (!is.null(dat$trait)) { num <- length(levels(dat$trait)); name_lab<- levels(dat$trait) ord<- matrix(0,ncol=1,nrow=3*num) ord[,1][seq(1,3*num,3)]<- which(dat$component=="additive") ord[,1][seq(2,3*num,3)]<- which(dat$component=="nonadd") ord[,1][seq(3,3*num,3)]<- which(dat$component=="maternal") } dat_ci<- matrix(dat[,3][ord],ncol=num,nrow=3) #median/mean lwr_ci<- matrix(dat$lower[ord],ncol=num,nrow=3) upp_ci<- matrix(dat$upper[ord],ncol=num,nrow=3) if (is.null(ymax)) { ymax<- max(dat$upper[ord]) } if (!is.null(ymax)) { ymax<- ymax } if (is.null(ymin)) { ymin<- 0 } if (!is.null(ymin)) { ymin<- ymin } ci_plot<- barplot(dat_ci,beside=T,ylab=label,col=c("gray55","gray","gray95"), names.arg=name_lab,cex.names=cex_names, yaxt='n',ylim=c(ymin,ymax),cex.lab=cex_ylab) error.bar(ci_plot,dat_ci,upper=upp_ci,lower=lwr_ci) legend(paste(leg),c("additive","non-additive","maternal"),fill=c("gray55","gray","gray95")) if (is.null(yunit)) { yunit<- (ymax-ymin)/5 } if (!is.null(yunit)) { yunit<- yunit } axis(1, at=c(0,4*num),labels=FALSE) axis(2, at=seq(ymin,ymax,yunit),labels=seq(ymin,ymax,yunit),las=1,cex.axis=cex_yaxis) }
4f0831c6bb3059e74eef4617e8675f5501c1261f
a3011e4901d9cda7fa28b5c53079743273a34ab2
/source/fns_technicalAnalysis.R
a582b03bf85dd2f567d19cc96656d31e56626e84
[]
no_license
amit-agni/FiMATS
ee56427132989d2c0beacdf9be9225699a4c02b7
78d42982c7391f8fa9c80ab2ef2677202e8a4c44
refs/heads/master
2021-05-21T09:26:16.056174
2020-05-18T06:08:02
2020-05-18T06:08:02
252,637,321
0
0
null
null
null
null
UTF-8
R
false
false
6,348
r
fns_technicalAnalysis.R
#UI and Server function definitions for the Technical Analysis page ############### Called from ui.R ################ fnUI_technicalAnalysis <- function(){ fluidPage(fluidRow(box(width = 12,solidHeader = T,background = "navy" ,column(width = 2,h3("Technical Analysis for :")) ,column(width = 3,uiOutput("lovCharts_all"))) ) ,fluidRow(column(width = 6,style='padding:0px;margin:0px;' ,box(width = 12,solidHeader = T,background = "navy",height = PLOT_HEIGHT*1.7 ,plotOutput('plotCharts_technicalChart') ,title = "Technical Indicators")) ,column(width = 6 ,fluidRow(column(width=12,style='padding:0px;' ,box(width = 12,solidHeader = T,background = "navy" ,title = "Prices for the last six sessions + Highs and Lows" ,tableHTML_output('charts_tblKPI'))) ,column(width=12,style='padding:0px;' ,box(width = 12,solidHeader = T,background = "navy" ,collapsible = T,collapsed = F ,span(htmlOutput('txt_chartingNotes'), style="color:white;font-size:11px") ,style = paste("height:",PLOT_HEIGHT*1.2,"px;overflow-y: scroll;",sep="") ,title = "Notes on Technical Indicators")) ,column(width = 12,style='padding:0px;margin:0px;' ,box(width = 12,solidHeader = T,background = "navy" #,height = PLOT_HEIGHT*0.9 ,collapsible = T,collapsed = T ,title = "Candlestick Cheat sheet" ,imageOutput('charts_Image'))) ,column(width=12,style='padding:0px;' ,box(width = 12,solidHeader = F,collapsible = T,collapsed = T ,title = "OHLC Data" ,dataTableOutput('charts_tblOHLCdata'))) ) ) ) ) } ############### Called from server.R ################ fnServer_technicalAnalysis <- function(input,output,session){ output$lovCharts_all <- renderUI({ req(DT_stats()$category) selectInput("lovCharts_all",label = "" #,choices = split(DT_stats()[,.N,.(category,name)][,-"N"],by="category",keep.by=F,flatten=F) ,choices = list("Stocks" = DT_stats()[category == 'stock']$name ,"Indices" = DT_stats()[category == 'index']$name ,"Currencies" = DT_stats()[category == 'currency']$name ,"Commodities" = DT_stats()[category == 'commodity']$name) ,selected = DT_stats()[category=='stock' & country == 'Australia'][1, ]$name) }) output$charts_tblKPI <- render_tableHTML({ req(input$lovCharts_all) fn_tblKPI(DT_hist(), DT_stats(), input$lovCharts_all) %>% tableHTML(rownames = F,border=2,widths = rep(100,10),spacing = "10px") #%>% add_theme('rshiny-blue') }) output$charts_tblOHLCdata <- renderDataTable({ req(input$lovCharts_all) datatable(DT_hist()[name == input$lovCharts_all & date >= input$dt_start & date <= input$dt_end][ order(-date)][,.(date,open,high,low,close,volume,adjusted)],rownames = F) %>% formatRound(.,c(2:8), 0) # formatStyle(columns = c(1:3), 'text-align' = 'center') },options=list(pageLength = 5 #,lengthMenu = c(2, 12, 18) ,searching= FALSE ,columnDefs = list(list(className = 'dt-center')) ,rowCallback = JS("function(r,d) {$(r).attr('height', '20px')}") #,class="compact" #,class = 'white-space: nowrap stripe hover' ) ) output$txt_chartingNotes <- renderText({ read_file(here::here(DATA_FOLDER,'charting-notes.html')) }) output$charts_Image <- renderImage({ return(list(src = here::here(DATA_FOLDER,"Candlestick-Cheat-Sheet_web-01.jpg") ,contentType = "image/jpg" ,height = "100%" ,width = "100%")) },deleteFile = F) observeEvent(input$lovCharts_all,{ print("in chart") req(input$lovCharts_all) output$plotCharts_technicalChart <- renderPlot({ temp <- DT_hist()[name == input$lovCharts_all & date >= input$dt_start & date <= input$dt_end] xts(temp[,.(open,high,low,close,volume)],order.by =temp$date) %>% chartSeries(TA='addBBands();addRSI();addMACD();addVo()' ,theme = "white" ,multi.col = F ,up.col="white" ,dn.col ="darkslategray1") #name = 'Technical Charts') # myPars <-chart_pars() # myPars$cex<-2 # mychartTheme <- chart_theme() # xts(temp[,.(open,high,low,close,volume)],order.by =temp$date) %>% # chart_Series(TA='addBBands();' # #TA='addRSI(on=1)' # ,theme = "white" # #,theme = mychartTheme # #,pars = myPars # ) },height = PLOT_HEIGHT*1.5) }) }
c4c69f7ec67cba0d8e1089637628a37ba54ecaa3
6526ee470658c2f1d6837f7dc86a81a0fbdcffd5
/man/setVars.mwIPM.Rd
63cd2bb269bca60d0f11595a245a4330a1326790
[]
no_license
mdlama/milkweed
c7e8a24021a35eb6fbef13360400d2d4069b4649
b791c8b39802f33471f8e827f369afa47c06d6af
refs/heads/master
2023-09-06T03:00:45.554997
2022-09-14T15:25:58
2022-09-14T15:25:58
76,479,540
0
0
null
2021-09-21T19:04:44
2016-12-14T16:59:01
R
UTF-8
R
false
true
317
rd
setVars.mwIPM.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mw_ipm.R \name{setVars.mwIPM} \alias{setVars.mwIPM} \title{Sets model variables.} \usage{ \method{setVars}{mwIPM}(obj) } \arguments{ \item{obj}{A mwIPM model object.} } \value{ A mwIPM model object. } \description{ Sets model variables. }
aef2ccb9b2a34035c8735d96a2d381d61d709ea9
3eae9a446a3311ffbc68fe33992239ac47f0e76d
/R/predict.R
42b390cba3d96e0d184149a389cea7c07cba1707
[]
no_license
cran/AmpGram
5389f3a0fc0b5709a415359446913c2baa17bd0d
2163942d3f35ef061ed20ecb584c387056432c30
refs/heads/master
2022-09-11T06:39:45.428988
2020-05-31T09:10:03
2020-05-31T09:10:03
268,352,629
0
0
null
null
null
null
UTF-8
R
false
false
3,715
r
predict.R
#' Predict antimicrobial peptides #' #' Recognizes antimicrobial peptides using the AmpGram algorithm. #' @param object \code{ampgram_model} object. #' @param newdata \code{list} of sequences (for example as given by #' \code{\link[biogram]{read_fasta}} or \code{\link{read_txt}}). #' @param ... further arguments passed to or from other methods. #' @return \code{list} of objects of class \code{single_ampgram_pred}. Each object #' of this class contains analyzed sequence, values of predictions for 10-mers and #' result of the prediction for the whole peptide/protein. #' @export #' @details AmpGram requires the external package, AmpGramModel, which #' contains models necessary to perform the prediction. The model #' can be installed using \code{\link{install_AmpGramModel}}. #' #' Predictions for each protein are stored in objects of class #' \code{single_ampgram_pred}. It consists of three elements: #' \describe{ #' \item{seq}{Character vector of amino acid sequence of an analyzed peptide/protein} #' \item{all_mers_pred}{Numeric vector of predictions for each 10-mer (subsequence #' of 10 amino acids) of a sequence. Prediction value indicates probability that #' a 10-mer possesses antimicrobial activity and ranges from 0 (non-AMP) to 1 #' (AMP).} #' \item{single_prot_pred}{Named numeric vector of a single prediction value for #' a whole peptide/protein. Its value corresponds to the probability that a #' peptide/protein exhibits antimicrobial activity. It assumes name \code{TRUE} #' if probability is equal or greater than 0.5, i.e. peptide/protein is classified #' as antimicrobial (AMP), and \code{FALSE} if probability is less that 0.5, #' i.e. peptide/protein is classified as non-antimicrobial (non-AMP).} #' } #' @importFrom biogram binarize decode_ngrams #' @importFrom pbapply pblapply #' @importFrom ranger ranger #' @importFrom stats predict #' @importFrom stringi stri_count predict.ampgram_model <- function(object, newdata, ...) { require_AmpGramModel() ngrams <- object[["imp_features"]] decoded_ngrams <- gsub(pattern = "_", replacement = ".", x = decode_ngrams(ngrams), fixed = TRUE) all_preds <- pblapply(newdata, function(ith_seq) { ngram_count <- find_ngrams(seq = ith_seq, decoded_ngrams = decoded_ngrams) colnames(ngram_count) <- ngrams all_mers_pred <- predict(object[["rf_mers"]], ngram_count)[["predictions"]][, 2] single_prot_pred <- predict(object[["rf_peptides"]], calculate_statistics(all_mers_pred))[["predictions"]][, 2] res <- list(seq = ith_seq, all_mers_pred = all_mers_pred, single_prot_pred = single_prot_pred) class(res) <- "single_ampgram_pred" res }) if(is.null(names(all_preds))) names(all_preds) <- paste0("seq", 1L:length(all_preds)) all_preds } # data(AmpGram_model) # sample_seq <- list(seq1 = c("F", "E", "N", "C", "N", "I", "T", "M", "G", "N", "M", "V", # "R", "H", "I", "R", "W", "Y", "R", "D", "R", "Q", "K", "G", "D", # "Y", "W", "W", "Y", "T", "I", "K", "Y", "S", "M", "A", "M", "I", # "A", "C", "N", "I", "N", "V", "T", "I", "N", "Q", "C", "V"), # seq2 = c("Q", "Y", "T", "S", "I", "M", "F", "L", "T", "A", "G", "H", # "L", "A", "P", "W", "D", "R", "W", "C", "R", "S", "L", "T", "T", # "W", "F", "G", "A", "P", "S", "A", "T", "Y", "P", "F", "F", "W", # "E", "P", "E", "D", "I", "I", "I", "K", "P", "N", "T", "A")) # predict(AmpGram_model, sample_seq)
ec1819afd9f0a6628a1152fef1678a5735661285
02ce497b24e5aed144fd915257252ff62d0d44d2
/cachematrix.R
ce87510c98f61900639506a9f40051ead509d029
[]
no_license
meelik/ProgrammingAssignment2
0445842a414ef805c4604145bbb963a388645128
1fdb8dc3ef029ce7b08505c868d617dc1b32dc1c
refs/heads/master
2021-01-17T05:14:34.000953
2015-10-25T13:43:48
2015-10-25T13:43:48
44,909,993
0
0
null
2015-10-25T12:54:45
2015-10-25T12:54:45
null
UTF-8
R
false
false
1,384
r
cachematrix.R
## Matrix inversion is usually a costly computation and there may be some benefit to caching the inverse of a matrix rather than compute it repeatedly. ## Those functions cache the inverse of a matrix. ## makeCacheMatrix() creates a special "matrix" object that caches its inverse makeCacheMatrix <- function(x = matrix()) { if ((nrow(x) <= 0) || (ncol(x) <= 0)) { message("Warning! At least one dimension of your matrix is missing!") } else if (nrow(x) != ncol(x)) { message("Warning! Your matrix is not a squared matrix!") } inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setinv <- function(inversed) inv <<- inversed getinv <- function() inv list(set = set, get = get, setinv = setinv, getinv = getinv) } ## cacheSolve() computes the inverse of the special "matrix" returned by makeCacheMatrix. ## If the inverse has already been calculated, then the cachesolve retrieves the inverse from the cache. cacheSolve <- function(x, ...) { inv <- x$getinv() if(!is.null(inv)) { message("getting cached data") return(inv) } data <- x$get() if (nrow(data) != ncol(data)) { message("Error! Your matrix is not a squared matrix!") return } inv <- solve(data, ...) x$setinv(inv) inv }
5ffd51c48eb0fc4e7ca9602b6c6d47491256e76a
0bbead7aac0c282df7f84ee70f8a83018a3b5d3c
/0ShinyBook-梅津/ShinyBook-master/chapter05/06-ReporteRs/server.R
40f20c0eefdef92aa856eb2700b3de2f88e59556
[]
no_license
luka3117/JcShiny
284fa9ee29c5b9b86ee022b179e41e3cca153911
2c7c9fb410eac2c0f3acf6350a6daca780648d5a
refs/heads/master
2022-12-12T00:35:35.684502
2020-09-17T17:42:00
2020-09-17T17:42:00
290,563,226
0
0
null
null
null
null
UTF-8
R
false
false
1,481
r
server.R
library(shiny) library(DT) library(ReporteRs) library(rJava) library(ggplot2) shinyServer(function(input, output, session) { output_plot_fun <- reactive({ data <- data.frame(x = iris[, input$select], y = iris$Sepal.Length) ggplot(data, aes(x = x, y = y)) + geom_point(colour = input$color) }) output$plot <- renderPlot({ print(output_plot_fun()) }) output$downloadData <- downloadHandler(filename = "testfile.pptx", content <- function(file) { doc <- pptx() # Slide 1 doc <- addSlide(doc, "Title Slide") doc <- addTitle(doc, "Rから作ったパワポです") doc <- addSubtitle(doc, "皆さん使ってください") # Slide 2 doc <- addSlide(doc, "Title and Content") doc <- addTitle(doc, "2ページ目") doc <- addPlot(doc, fun = print, x = output_plot_fun()) writeDoc(doc, file) }) })
1b9d2719b357b29ae6803aea5cb17c112d69d1a2
44cf65e7ab4c487535d8ba91086b66b0b9523af6
/data/Newspapers/2000.10.12.editorial.58916.0462.r
62a0da029eca4d2542120c46d09b26c64985bd07
[]
no_license
narcis96/decrypting-alpha
f14a746ca47088ec3182d610bfb68d0d4d3b504e
5c665107017922d0f74106c13d097bfca0516e66
refs/heads/master
2021-08-22T07:27:31.764027
2017-11-29T12:00:20
2017-11-29T12:00:20
111,142,761
0
1
null
null
null
null
UTF-8
R
false
false
4,490
r
2000.10.12.editorial.58916.0462.r
gata ! Mugur Isarescu si - a anuntat candidatura . cei care au facut pariu ca nu va intra in jocul pentru Cotroceni au pierdut . puteau la fel de bine sa cistige pentru ca premierul s - a lansat de parca ar fi renuntat . ieri , pe pajistea insorita de la Palatul Elisabeta , intr - o discretie ca de receptie in stil american , Mugur Isarescu a sustinut o conferinta de presa . pe fundal era o fotografie mare si frumoasa pe care scria " Mugur Isarescu - presedinte " . evenimentul n - a fost cu nimic diferit de unul organizat de General Motors cu ocazia intrarii in Romania . la fel de bine ai fi putut crede ca se lansa o retea de banci , un produs electronic sau se anunta o asociere . fata de bilciul cu urale , strigaturi , muzica , mititei , orchestre , cintareti , perii si slugarnicii , familiare tuturor lansarilor politice din Romania , Mugur Isarescu a preferat o formula socanta numai prin firescul si bunul ei simt . chiar nu dadea impresia ca ar dori sa faca din asta un moment aparte . premierul nu vrea sa joace rolul unui actor politic . in aerul molcom de toamna parea un om neatins de paranoia puterii , putin mirat de ceea ce face si a fost extrem de zgircit in promisiuni . convingerea mea este ca Mugur Isarescu nu si - a dorit niciodata sa devina presedintele Romaniei . nu arata ca un ins dispus sa mearga in patru labe sau sa se maimutareasca numai si numai pentru a obtine un vot in plus . nu arata nici disperat sa smulga reactii de simpatie . discursul sau este atit de masurat incit risca sa treaca neobservat . cu greu propozitii din el ar putea ajunge la nevoiasi , la periferia oraselor sau la tarani . iar precizarea ca nu apartine nici unui partid politic a fost facuta cu aceeasi discretie incit ai putea crede ca e al nimanui , dar si al tuturor . as zice ca discursul de ieri al premierului contine chiar si o greseala . el a spus ca deviza sa va fi " O tara cu frontiere sigure si cu strazi sigure ! " . aceste cuvinte ne trimit , mai degraba , la ministrul apararii si la ministrul de interne , adica la doua domenii care sint si acum in subordinea sa . daca la capitolul " frontiere " nu am putea spune ceva de rau , in ceea ce priveste siguranta strazilor , guvernul Mugur Isarescu , desi ne - a oferit spectacolul de opereta intitulat " Luna " ( regizor Constantin Dudu Ionescu ) , a ramas dator . Centrarea campaniei sale pe " frontiere si strazi sigure " presupune o intelegere destul de generoasa a atributiilor prezidentiale . la fel si insistenta lui Mugur Isarescu legata de situatia economica . din acest punct de vedere , mesajul seamana cu al lui Theodor Stolojan . amindoi o tin sus si tare cu economia pentru ca se pricep si pentru ca populatia resimte din plin rezultatele proaste din domeniu . atita doar ca aceasta componenta vitala a societatii romanesti nu intra in competenta presedintelui , ci a executivului . presedintele poate doar sa nu fie de acord cu numirea unui figurant intr - o pozitie - cheie ( cum nu s - a intimplat cu Emil Constantinescu in 1996 si 1998 ) . Mugur Isarescu a generat , prin discretia de pina acum , un mare orizont de asteptare . tacerea sa , intrerupta din cind in cind de cite o declaratie prin intermediul agentiilor de presa sau purtatorului de cuvint , a contrariat si i - a incitat pe contracandidati . iesirea sa la rampa nu ne arata o schimbare socanta de abordare . cine e obisnuit cu lansarile de candidaturi in alte tari europene sau in SUA nu poate sa nu - si arate surprinderea . asa ceva n - au mai vazut niciodata ! pare de - a dreptul original . nicaieri in lume , un politician n - a soptit populatiei ca el vrea sa devina numarul unu intr - o tara si cu aceasta atitudine sa stirneasca un val de simpatie si incredere si sa si cistige . or fi romanii atit de abili incit sa recepteze cu mare entuziasm un mesaj rostit ca un " buna ziua " ? sa dea Domnul ca nevoia romanilor de seriozitate si profesionalism , de bun simt si tact sa se regaseasca la numaratoarea voturilor . dar maindoiesc . daca Mugur Isarescu , la sfirsitul acestei campanii , nu va ajunge in virf , strategia sa bazata pe discretie , in loc sa capete girul originalitatii , va fi taxata ca o inadmisibila naivitate . p . S . Ieri , la conferinta de presa cu pricina , pagubiti FNI , iritati , au venit sa - si apere cauza . pe citiva dintre ei i - am invitat la redactie dupa ce termina cu huiduitul spre a le publica punctele de vedere . ca prin minune , au disparut !
a80547ec54282509bb7d82947dca357862346ce0
3a28197b0054bb9ec0aeb7b9cd4e0804b67019f0
/data cleaning code.R
c5d1ede737454bb126129242c49046b1f511e586
[]
no_license
soilhealthfeedback/SHEAF_DATA_CREATION
c73ce3b40a71c2816b29d28e02cbfd3a7f99bbd2
bcc16c433630aba7bc51c6f15a4b5e70ec8f747b
refs/heads/master
2021-07-12T16:55:36.674760
2020-07-03T17:58:21
2020-07-03T17:58:21
165,294,891
1
0
null
null
null
null
UTF-8
R
false
false
10,400
r
data cleaning code.R
# !diagnostics off soilnonscaled<-MIDWEST_CORN_SOYBEANS_Model2_nonscaled_new ##rescaled farm cost variables to be dollars per acre soilnonscaled$FARMCOSTS_Chemicals_1000Doll_2012_peracre<-(soilnonscaled$FARMCOSTS_Chemicals_1000Doll_2012*1000)/soilnonscaled$AGCENSUS_Cropland_Acres soilnonscaled$FARMCOSTS_Depreciation_expenses_1000Doll_2012_peracre<-(soilnonscaled$FARMCOSTS_Depreciation_expenses_1000Doll_2012*1000)/soilnonscaled$AGCENSUS_Cropland_Acres soilnonscaled$FARMCOSTS_cashrent_land_building_pasture_1000Doll_2012_peracre<-(soilnonscaled$FARMCOSTS_cashrent_land_building_pasture_1000Doll_2012*1000)/soilnonscaled$AGCENSUS_Cropland_Acres soilnonscaled$FARMCOSTS_Fert_lime_soilcond_purchased_1000dolls_2012_peracre<-(soilnonscaled$FARMCOSTS_Fert_lime_soilcond_purchased_1000dolls_2012*1000)/soilnonscaled$AGCENSUS_Cropland_Acres soilnonscaled$FARMCOSTS_Interest_Expenses_1000Dolls_2012_peracre<-(soilnonscaled$FARMCOSTS_Interest_Expenses_1000Dolls_2012*1000)/soilnonscaled$AGCENSUS_Cropland_Acres soilnonscaled$FARMCOSTS_gas_fuel_oil_purchased_1000Dolls_2012_peracre<-(soilnonscaled$FARMCOSTS_gas_fuel_oil_purchased_1000Dolls_2012*1000)/soilnonscaled$AGCENSUS_Cropland_Acres soilnonscaled$FARMCOSTS_Utilities_expenses_1000Dolls_2012_peracre<-(soilnonscaled$FARMCOSTS_Utilities_expenses_1000Dolls_2012*1000)/soilnonscaled$AGCENSUS_Cropland_Acres soilnonscaled$FARMCOSTS_FeedExpenses_1000Dolls_2012_peracre<-(soilnonscaled$FARMCOSTS_FeedExpenses_1000Dolls_2012*1000)/soilnonscaled$AGCENSUS_Cropland_Acres soilnonscaled$FARMVALUE_farmincome_peracre<-(soilnonscaled$FARMVALUE_income_farmsources_gross*1000)/soilnonscaled$AGCENSUS_Cropland_Acres soilnonscaled$PAYMENTS_payments_reserve_total_peracre<-(soilnonscaled$PAYMENTS_payments_reserve_total_2012*1000)/soilnonscaled$AGCENSUS_Cropland_Acres soilnonscaled$PAYMENTS_payments_fedfarmprograms_total_peracre<-(soilnonscaled$PAYMENTS_payments_fedfarmprograms_total_2012*1000)/soilnonscaled$AGCENSUS_Cropland_Acres soilnonscaled$FARMCOSTS_ContractLaborExpense_1000Doll_2012_peracre<-(soilnonscaled$FARMCOSTS_ContractLaborExpense_1000Doll_2012*1000)/soilnonscaled$AGCENSUS_Cropland_Acres soilnonscaled$FARMCOSTS_Property_taxes_paid_1000Dolls_2012_peracre<-(soilnonscaled$FARMCOSTS_Property_taxes_paid_1000Dolls_2012*1000)/soilnonscaled$AGCENSUS_Cropland_Acres soilnonscaled$FARMCOSTS_rent_lease_machinery_equipment_Expense_1000Dolls_2012_peracre<-(soilnonscaled$FARMCOSTS_rent_lease_machinery_equipment_Expense_1000Dolls_2012*1000)/soilnonscaled$AGCENSUS_Cropland_Acres soilnonscaled$FARMCOSTS_Customwork_hauling_expenses_1000Doll_2012_peracre<-(soilnonscaled$FARMCOSTS_Customwork_hauling_expenses_1000Doll_2012*1000)/soilnonscaled$AGCENSUS_Cropland_Acres ####reduce skew in dependent variables#### ####cover crops skewness(soilnonscaled$AGCENSUS_Cover_Acres_Ratio, na.rm=T) summary(soilnonscaled$AGCENSUS_Cover_Acres_Ratio) h<-hist(soilnonscaled$AGCENSUS_Cover_Acres_Ratio) h soilnonscaled$AGCENSUS_Cover_Acres_Ratio[soilnonscaled$AGCENSUS_Cover_Acres_Ratio>=1] <- NA skewness(soilnonscaled$AGCENSUS_Cover_Acres_Ratio, na.rm=T) summary(soilnonscaled$AGCENSUS_Cover_Acres_Ratio) ## to find optimal lambda lambda = BoxCox.lambda(soilnonscaled$AGCENSUS_Cover_Acres_Ratio) lambda ## now to transform vector soilnonscaled$AGCENSUS_Cover_Acres_Ratio_transformed<-BoxCox(soilnonscaled$AGCENSUS_Cover_Acres_Ratio, lambda=0) skewness(soilnonscaled$AGCENSUS_Cover_Acres_Ratio_transformed, na.rm=T) summary(soilnonscaled$AGCENSUS_Cover_Acres_Ratio_transformed) hist(soilnonscaled$AGCENSUS_Cover_Acres_Ratio_transformed) ###no till skewness(soilnonscaled$AGCENSUS_Notill_Ratio, na.rm=T) summary(soilnonscaled$AGCENSUS_Notill_Ratio) h<-hist(soilnonscaled$AGCENSUS_Notill_Ratio) h soilnonscaled$AGCENSUS_Notill_Ratio[soilnonscaled$AGCENSUS_Notill_Ratio>=.5] <- NA skewness(soilnonscaled$AGCENSUS_Notill_Ratio, na.rm=T) summary(soilnonscaled$AGCENSUS_Notill_Ratio) ## to find optimal lambda lambda = BoxCox.lambda(soilnonscaled$AGCENSUS_Notill_Ratio) lambda ## now to transform vector soilnonscaled$AGCENSUS_Notill_Ratio_transformed<-BoxCox(soilnonscaled$AGCENSUS_Notill_Ratio, lambda=0) skewness(soilnonscaled$AGCENSUS_Notill_Ratio_transformed, na.rm=T) summary(soilnonscaled$AGCENSUS_Notill_Ratio_transformed) hist(soilscaled$AGCENSUS_Notill_Ratio_transformed) ###multitill skewness(soilnonscaled$AGCENSUS_Multitill_Ratio, na.rm=T) summary(soilnonscaled$AGCENSUS_Multitill_Ratio) h<-hist(soilnonscaled$AGCENSUS_Multitill_Ratio) h soilnonscaled$AGCENSUS_Multitill_Ratio[soilnonscaled$AGCENSUS_Multitill_Ratio>=.8] <- NA skewness(soilnonscaled$AGCENSUS_Multitill_Ratio, na.rm=T) summary(soilnonscaled$AGCENSUS_Multitill_Ratio) ## to find optimal lambda lambda = BoxCox.lambda(soilnonscaled$AGCENSUS_Multitill_Ratio) lambda ## now to transform vector soilnonscaled$AGCENSUS_Multitill_Ratio_transformed<-BoxCox(soilnonscaled$AGCENSUS_Multitill_Ratio, lambda=0) skewness(soilnonscaled$AGCENSUS_Multitill_Ratio_transformed, na.rm=T) summary(soilnonscaled$AGCENSUS_Multitill_Ratio_transformed) hist(soilnonscaled$AGCENSUS_Multitill_Ratio_transformed) hist(soilscaled$AGCENSUS_Multitill_Ratio_transformed) ##look at distributions and remove outliers #chemicals hist(soilnonscaled$FARMCOSTS_Chemicals_1000Doll_2012_peracre) summary(soilnonscaled$FARMCOSTS_Chemicals_1000Doll_2012_peracre) soilnonscaled$FARMCOSTS_Chemicals_1000Doll_2012_peracre[soilnonscaled$FARMCOSTS_Chemicals_1000Doll_2012_peracre>=100] <- NA #rent hist(soilnonscaled$FARMCOSTS_cashrent_land_building_pasture_1000Doll_2012_peracre) summary(soilnonscaled$FARMCOSTS_cashrent_land_building_pasture_1000Doll_2012_peracre) soilnonscaled$FARMCOSTS_cashrent_land_building_pasture_1000Doll_2012_peracre[soilnonscaled$FARMCOSTS_cashrent_land_building_pasture_1000Doll_2012_peracre>=200] <- NA #fertilizer hist(soilnonscaled$FARMCOSTS_Fert_lime_soilcond_purchased_1000dolls_2012_peracre) summary(soilnonscaled$FARMCOSTS_Fert_lime_soilcond_purchased_1000dolls_2012_peracre) soilnonscaled$FARMCOSTS_Fert_lime_soilcond_purchased_1000dolls_2012_peracre[soilnonscaled$FARMCOSTS_Fert_lime_soilcond_purchased_1000dolls_2012_peracre>=200] <- NA #feed hist(soilnonscaled$FARMCOSTS_FeedExpenses_1000Dolls_2012_peracre) summary(soilnonscaled$FARMCOSTS_FeedExpenses_1000Dolls_2012_peracre) soilnonscaled$FARMCOSTS_FeedExpenses_1000Dolls_2012_peracre[soilnonscaled$FARMCOSTS_FeedExpenses_1000Dolls_2012_peracre>=600] <- NA #interest hist(soilnonscaled$FARMCOSTS_Interest_Expenses_1000Dolls_2012_peracre) summary(soilnonscaled$FARMCOSTS_Interest_Expenses_1000Dolls_2012_peracre) soilnonscaled$FARMCOSTS_Interest_Expenses_1000Dolls_2012_peracre[soilnonscaled$FARMCOSTS_Interest_Expenses_1000Dolls_2012_peracre>=100] <- NA #fuel hist(soilnonscaled$FARMCOSTS_gas_fuel_oil_purchased_1000Dolls_2012_peracre) summary(soilnonscaled$FARMCOSTS_gas_fuel_oil_purchased_1000Dolls_2012_peracre) soilnonscaled$FARMCOSTS_gas_fuel_oil_purchased_1000Dolls_2012_peracre[soilnonscaled$FARMCOSTS_gas_fuel_oil_purchased_1000Dolls_2012_peracre>=120] <- NA #utilities hist(soilnonscaled$FARMCOSTS_Utilities_expenses_1000Dolls_2012_peracre) summary(soilnonscaled$FARMCOSTS_Utilities_expenses_1000Dolls_2012_peracre) soilnonscaled$FARMCOSTS_Utilities_expenses_1000Dolls_2012_peracre[soilnonscaled$FARMCOSTS_Utilities_expenses_1000Dolls_2012_peracre>=60] <- NA #depreciation hist(soilnonscaled$FARMCOSTS_Depreciation_expenses_1000Doll_2012_peracre) summary(soilnonscaled$FARMCOSTS_Depreciation_expenses_1000Doll_2012_peracre) soilnonscaled$FARMCOSTS_Depreciation_expenses_1000Doll_2012_peracre[soilnonscaled$FARMCOSTS_Depreciation_expenses_1000Doll_2012_peracre>=200] <- NA #contract labor hist(soilnonscaled$FARMCOSTS_ContractLaborExpense_1000Doll_2012_peracre) summary(soilnonscaled$FARMCOSTS_ContractLaborExpense_1000Doll_2012_peracre) soilnonscaled$FARMCOSTS_ContractLaborExpense_1000Doll_2012_peracre[soilnonscaled$FARMCOSTS_ContractLaborExpense_1000Doll_2012_peracre>=20] <- NA #property taxes hist(soilnonscaled$FARMCOSTS_Property_taxes_paid_1000Dolls_2012_peracre) summary(soilnonscaled$FARMCOSTS_Property_taxes_paid_1000Dolls_2012_peracre) soilnonscaled$FARMCOSTS_Property_taxes_paid_1000Dolls_2012_peracre[soilnonscaled$FARMCOSTS_Property_taxes_paid_1000Dolls_2012_peracre>=60] <- NA #rent machinery hist(soilnonscaled$FARMCOSTS_rent_lease_machinery_equipment_Expense_1000Dolls_2012_peracre) summary(soilnonscaled$FARMCOSTS_rent_lease_machinery_equipment_Expense_1000Dolls_2012_peracre) soilnonscaled$FARMCOSTS_rent_lease_machinery_equipment_Expense_1000Dolls_2012_peracre[soilnonscaled$FARMCOSTS_rent_lease_machinery_equipment_Expense_1000Dolls_2012_peracre>=20] <- NA #hauling hist(soilnonscaled$FARMCOSTS_Customwork_hauling_expenses_1000Doll_2012_peracre) summary(soilnonscaled$FARMCOSTS_Customwork_hauling_expenses_1000Doll_2012_peracre) soilnonscaled$FARMCOSTS_Customwork_hauling_expenses_1000Doll_2012_peracre[soilnonscaled$FARMCOSTS_Customwork_hauling_expenses_1000Doll_2012_peracre>=50] <- NA #net income per acre hist(soilnonscaled$FARMVALUE_farmincome_peracre) summary(soilnonscaled$FARMVALUE_farmincome_peracre) soilnonscaled$FARMVALUE_farmincome_peracre[soilnonscaled$FARMVALUE_farmincome_peracre>=600] <- NA #reserve program payments hist(soilnonscaled$PAYMENTS_payments_reserve_total_peracre) summary(soilnonscaled$PAYMENTS_payments_reserve_total_peracre) soilnonscaled$PAYMENTS_payments_reserve_total_peracre[soilnonscaled$PAYMENTS_payments_reserve_total_peracre>=200] <- NA #federal farm program payments hist(soilnonscaled$PAYMENTS_payments_fedfarmprograms_total_peracre) summary(soilnonscaled$PAYMENTS_payments_fedfarmprograms_total_peracre) soilnonscaled$PAYMENTS_payments_fedfarmprograms_total_peracre[soilnonscaled$PAYMENTS_payments_fedfarmprograms_total_peracre>=150] <- NA #race hist(soilnonscaled$RACE_Entropy) summary(soilnonscaled$RACE_Entropy) #farmvalue hist(soilnonscaled$FARMVALUE_marketvalue_aveperacre) summary(soilnonscaled$FARMVALUE_marketvalue_aveperacre) #look at farm income distribution hist(soilnonscaled$FARMVALUE_farmincome_peracre) summary(soilnonscaled$FARMVALUE_farmincome_peracre) soilnonscaled$FARMVALUE_farmincome_peracre[soilnonscaled$FARMVALUE_farmincome_peracre>=400] <- NA #save write.csv(soilnonscaled,"soilnonscaledupdated11_22.csv")
01828c7b6eb9ce827673af6df86746e94fa7148b
ee8dd63922e47711a5911d282472e6784c5d67c0
/man/quantify.Rd
691c722d21b8efb2953d7fea92da7fc9462118f7
[ "MIT" ]
permissive
atusy/qntmap
07ff96149b4d8fb5ee2386b0892d524d1f55aa34
5b6a349ac12b600daad7e806e22982e514150b86
refs/heads/master
2021-06-04T06:01:19.809161
2021-04-06T13:54:15
2021-04-06T13:54:15
97,662,265
2
0
MIT
2021-04-06T13:54:15
2017-07-19T02:07:09
R
UTF-8
R
false
true
2,623
rd
quantify.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/quantify.R \name{quantify} \alias{quantify} \title{Quantify X-ray maps} \usage{ quantify( xmap, qnt, cluster, maps_x = attr(xmap, "pixel")[1L], maps_y = attr(xmap, "pixel")[2L], phase = everything(), element = everything(), fine_phase = NULL, fine_th = 0.9, fix = NULL, se = FALSE, saving = TRUE, ... ) } \arguments{ \item{xmap}{Object returned by \code{\link[=read_xmap]{read_xmap()}}.} \item{qnt}{Object returned by \code{\link[=read_qnt]{read_qnt()}}.} \item{cluster}{Object returned by \code{\link[=cluster_xmap]{cluster_xmap()}}.} \item{maps_x, maps_y}{Sizes of maps along x- and y-axes comprising guide net map. (default: \code{NULL}).} \item{phase}{Selected ones are referenced to detect outliers. Default selects everything. Tidy selection is available. For example \code{c(Si, Ti)} selects them, and \code{c(-Si, -Ti)} selects everything except them.} \item{element}{Selected ones are referenced to detect outliers. Default selects everything. Tidy selection is available. For example \code{c(Si, Ti)} selects them, and \code{c(-Si, -Ti)} selects everything except them.} \item{fine_phase}{Deprecated as of qntmap > 0.4.0. Use \code{phase} instead.} \item{fine_th}{A threshold of membership degrees to 0.9} \item{fix}{A path to the file specifying chemical compositions of some elements in some phases (default: \code{NULL}).} \item{se}{\code{FALSE} in default, and is forced when \code{fix} is specified. \code{TRUE} calculates standard errors, but require large memories.} \item{saving}{\code{TRUE} (default) saves the results into \code{qntmap} directory under the directory \code{xmap} is read from. \code{FALSE} does not save.`} \item{...}{ Arguments passed on to \code{\link[=find_outlier]{find_outlier}} \describe{ \item{\code{interval}}{A type of the interval. Data points outside intervals are treated as outliers. If \code{"prediction"} (default), prediction intervals are used based on Poisson process. If `"tukey"``, conditional lower and upper whiskers are used where the lower is $Q_{1/4} - 1.5 IQR$ and the upper is $Q_{3/4} + 1.5 IQR$.} \item{\code{method}}{Applicable when \code{interval = "prediction"}. If \code{"rq"} (default), quantile regression is performed (\code{\link[quantreg:rq]{quantreg::rq()}}). If \code{"lsfit"}, least square regression is performed (\code{\link[stats:lsfit]{stats::lsfit()}}).} \item{\code{percentile}}{A percentile of predictive interaval. Applicable when \code{interval = "prediction"}.} }} } \description{ Quantify X-ray maps }
58caba98d9a8d66fdcbe794537221f1f1078a7f0
83ace7c3562159301606e510dc9f98c92af60deb
/src/new/bestProducts.R
e5afa0d1af93f7d25390425e98c8c34c5d251c02
[]
no_license
avz-cmf/bigdata
0c18cba04dd3a26231abb0efcd83c7d6f5b2adb1
b43c25892865a0cb479780789bc00eebd2922cd4
refs/heads/master
2023-08-11T05:32:02.549562
2016-08-16T11:14:25
2016-08-16T11:14:25
52,428,755
1
2
null
null
null
null
UTF-8
R
false
false
676
r
bestProducts.R
# какие столбцы # title, count_sold getBestProduct<-function() { # проверят есть ли новые данные сохраняя id последнего спарсеного товара # { queryProduct =paste("select * ", "from ", myDbname, ".best_product;", sep = ""); }# создаем запросы { res = readTable(queryProduct); }# считывем таблицу в которой храниться список самых продаваемых товаров { res = transform(res, id = 1:nrow(res)) }# добавляем поле id к таблице return(res) }
fb1d4cb134bab7e3caa1adc28aa9c190227b7b3d
29585dff702209dd446c0ab52ceea046c58e384e
/RcmdrPlugin.SM/R/globals.R
c540cc7f62ae5b0962e7234105f8e2a0074cdbb8
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
545
r
globals.R
# created 2012-08-28 by J. Fox if (getRversion() >= '2.15.1') globalVariables(c('top', 'buttonsFrame', 'slider.env','scaleVariable','scaleFrame','boxPlot', 'groupsFrame','meanVariable','.activeDataSet','checkBoxFrame','identifyVariable','identifyFrame','subwin', 'scatterPlot','optionsFrame','jitterXVariable','jitterYVariable','logXVariable','logYVariable','subButtonFrame','boxplotsVariable', 'lsLineVariable','smoothLineVariable','spreadVariable','subsetVariable','percentsVariable','subsetFrame','percentsFrame', 'subButtonsFrame'))
3bc46d56931076f63fe82561dcf8a0d52c872ee0
2726e7bdfc2b688ccf7ecaac75a259bcaa0e9817
/R/analysis/complaints/top_allegations.R
581da1f6505b25b2023bd0ef557a068269367b62
[ "MIT", "GPL-3.0-only", "LicenseRef-scancode-warranty-disclaimer", "LGPL-2.0-or-later", "GPL-3.0-or-later", "GPL-1.0-or-later", "LGPL-2.1-or-later", "LicenseRef-scancode-other-copyleft", "LicenseRef-scancode-unknown-license-reference", "AGPL-3.0-or-later" ]
permissive
marvinmarnold/oipm_annual_report_2018
e62c292c4398bc8f1222b75bb1003b6c5bc0100f
da5e5bf06b38280552ea8029cfa0ab4d6dc17c66
refs/heads/master
2023-03-12T06:24:14.192280
2022-02-14T19:47:34
2022-02-14T19:56:01
172,810,954
1
1
MIT
2023-02-28T12:16:21
2019-02-27T00:09:56
HTML
UTF-8
R
false
false
2,444
r
top_allegations.R
check.vars(c("allegations.for.year")) ######################################################################################################## ######################################################################################################## title <- "Top allegations" alleg.by.alleg <- allegations.for.year %>% group_by(Allegation.simple) count.by.alleg <- summarise(alleg.by.alleg, alleg.count = n()) p.top.alleg <- plot_ly(count.by.alleg, type = 'pie', name = title, labels = ~Allegation.simple, values = ~alleg.count, textposition = 'inside', textinfo = 'label+value+percent', insidetextfont = list(color = '#FFFFFF')) p.top.alleg ######################################################################################################## ######################################################################################################## title <- "Top sustained" sustained.alleg.by.alleg <- allegations.for.year %>% filter(Disposition.OIPM.by.officer == "Sustained") %>% group_by(Allegation.simple) sustained.count.by.alleg <- summarise(sustained.alleg.by.alleg, count = n()) p.top.sustained.alleg <- plot_ly(sustained.count.by.alleg, type = 'pie', name = title, labels = ~Allegation.simple, values = ~count, textposition = 'inside', textinfo = 'label+value+percent', insidetextfont = list(color = '#FFFFFF')) p.top.sustained.alleg ######################################################################################################## ######################################################################################################## title <- "Top DI2" di2.alleg.by.alleg <- allegations.for.year %>% filter(Disposition.OIPM.by.officer == "DI-2") %>% group_by(Allegation.simple) di2.count.by.alleg <- summarise(di2.alleg.by.alleg, count = n()) p.top.di2.alleg <- plot_ly(di2.count.by.alleg, type = 'pie', name = title, labels = ~Allegation.simple, values = ~count, textposition = 'inside', textinfo = 'label+value+percent', insidetextfont = list(color = '#FFFFFF')) p.top.di2.alleg
67a7ccab619a8adcf26c51c8d0322ac12d3388df
042cb2b8204b16a09e4ebd0e5142f89018b2f868
/R/internalFunc.R
44077f3b187142741dba4dfef88d39771b8760ef
[]
no_license
ijazic/FreqIDSpline
3a07cc7123c17af3527c918cc723c8620d8f5b07
80861ab0b38bd3be5cd8997fe4988a127e16f819
refs/heads/master
2020-04-15T14:39:47.422169
2019-01-11T01:35:05
2019-01-11T01:35:05
164,758,630
0
0
null
null
null
null
UTF-8
R
false
false
1,000
r
internalFunc.R
## function for sandwich estimator. sandwichEstCheese() is an Rcpp export sandwich.var <- function(fit, y1, y2, delta1, delta2, x1, x2, x3, wts, b1.pred, b2.pred, b3.pred){ x <- fit$x jac <- fit$jac A.inv.mat <- solve(jac) B.mat <- sandwichEstCheese(xvec = as.double(x), y1 = as.double(y1), y2 = as.double(y2), delta1 = as.integer(delta1), delta2 = as.integer(delta2), x1 = as.matrix(x1), x2 = as.matrix(x2), x3 = as.matrix(x3), wts = as.double(wts), m1pred = as.matrix(b1.pred), m2pred = as.matrix(b2.pred), m3pred = as.matrix(b3.pred)) varcov.mat <- A.inv.mat%*%B.mat%*%A.inv.mat return(varcov.mat) }
ba0099e542f9d375a2d9b99bb39272f601e62018
08457a9bcd1fa7bf10d1f22116ce42f8696dad32
/examples/ds.job.R
4377fb6e902e74c45d4a6e8e2cbb0f1825dfba26
[]
no_license
ogutu/clim.pact
8a249acc5d4e3c68c0293fca0f3ec6effbfef910
dd4a01bb38f0f8d92fcdc443ab38ecec16f95523
refs/heads/master
2021-01-18T16:30:56.801697
2011-11-22T00:00:00
2011-11-22T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
1,848
r
ds.job.R
rm(list=ls()) library(clim.pact) source("ds_one.R") elems <- c(101,601) scens <- c("sresa1b","sresb1","sresa2") test <- FALSE options(device="png") rcm.locs <- c("oslo","berg","trom","stoc","hels","koeb","reyk","tors") for (ele in elems) { stations1 <- avail.locs(ele=ele)$name[is.element(avail.locs(ele=ele)$ident,"NORDKLIM")] countries1 <- avail.locs(ele=ele)$country[is.element(avail.locs(ele=ele)$ident,"NORDKLIM")] countries1 <- strip(countries1) # number 31 is 'Ship' and causes an error... stations1 <- stations1[-c(31)] countries1 <- countries1[-c(31)] srt.1 <- c(23,5,6,54,18,22,28,26,27,31,34,35,36,33) srt.1 <- c(srt.1,(1:length(stations1))[-srt.1]) stations2 <- avail.locs(ele=ele)$name[is.element(avail.locs(ele=ele)$ident,"NACD")] stations3 <- getnarp()$number; stations3 <- stations3[-2] for (scen in scens) { is <- 0 print("NORDKLIM") for (station in stations1) { is <- is + 1 if (countries1[is]=="N") predictand <- "nordklim+metno" else predictand <- "nordklim" do.rcm <- (1:length(rcm.locs))[is.element(lower.case(substr(station,1,4)),rcm.locs)] #print(do.rcm) if ((ele!=101) | (is.null(do.rcm )) | (length(do.rcm)==0)) do.rcm <- 0 ds.one(ele=ele,scen=scen,predictand=predictand,station=station, do.rcm=FALSE,test=test,silent=TRUE) } print("NACD") for (station in stations2) { do.rcm <- (1:length(rcm.locs))[is.element(lower.case(substr(station,1,4)),rcm.locs)] if ((ele!=101) | (is.null(do.rcm )) | (length(do.rcm)==0)) do.rcm <- 0 ds.one(ele=ele,scen=scen,predictand="nacd",station=station,do.rcm=FALSE) } print("NARP") for (station in stations3) { ds.one(ele=ele,scen=scen,predictand="narp",station=station) } } } source("ds_tomas.R")
e00a59a444128bf4b6a3bfa20cafdf3ff1252b12
8915dc6d03526b6e015738d70298a22f3716d5ce
/folds.R
aa8389e9a7d083e5bbacfb8994d917e6480f5a5e
[]
no_license
philippeback/practical-machine-learning
caf81342591f214f07597ae8874ae0441cc38f5d
3666fc2865ca57a11e488013e2b7c5706b74ebf5
refs/heads/master
2021-01-23T03:48:14.127660
2015-06-21T22:05:02
2015-06-21T22:05:02
37,826,018
0
0
null
null
null
null
UTF-8
R
false
false
374
r
folds.R
# Let's have some fun with partitions and folds, to see how things are going. # Always nice to have several entries to test the model against. folds <- createFolds(trainingSet$classe, k=5) training1<-trainingSet[folds$Fold1,] training2<-trainingSet[folds$Fold2,] training3<-trainingSet[folds$Fold3,] training4<-trainingSet[folds$Fold4,] training5<-trainingSet[folds$Fold5,]
0da5a3a5904b181efc0ec106300bc9d9eda27ce4
5965c2779ae9a801c6cb00e937b80f9af7fef9a2
/plot1.R
ac45aafeb27edf30950584c1931c69b62e8a3638
[]
no_license
sundaramesh/ExData_Plotting1
c93accdc9a5c909de0e646d8afc54811aca65266
fa5138a3d1e8e173c1df264c8f92af206bda05e6
refs/heads/master
2020-07-03T13:32:21.146721
2016-11-18T21:07:28
2016-11-18T21:07:28
74,165,939
0
0
null
2016-11-18T21:00:22
2016-11-18T21:00:22
null
UTF-8
R
false
false
1,096
r
plot1.R
########################################################### # Exploratory Data Analysis - Project 1 ########################################################### # Author : Ramesh SUndaram # Code : plot1.R # Date : 11/17/2016 # github : https://github.com/sundaramesh/ExData_Plotting1 ########################################################### # Setting Read file variable PowerDataFile <- "household_power_consumption.txt" # Reading the file as table into a variable PowerData <- read.table(PowerDataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".") # Filtering and Reading Data for Date Range 2007-02-01 and 2007-02-02. OneMonthData <- PowerData[PowerData$Date %in% c("1/2/2007","2/2/2007") ,] # Getting Active Power Data GlobalActivePower <- as.numeric(OneMonthData$Global_active_power) # Setting the output device as png png("plot1.png", width=480, height=480) # Plotting the Graph hist(GlobalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)") # Close the output device dev.off()
cbba4e2d88150768f8bb3f166a6b6305e6850bff
d217e37eec62c7bd990f7aad46ae39758910f0e4
/02_voting_example.R
8a82a4bdc37c4323827446f538c5caa57a722bb8
[]
no_license
vfonsecad/ensemble_methods_workshop
545e232536b4fa45425e3c2923ad2d80e079108b
bf25333579a96050c34628e2ca48954763f93445
refs/heads/master
2022-02-04T23:01:38.854151
2019-08-04T20:21:57
2019-08-04T20:21:57
199,320,292
1
0
null
null
null
null
UTF-8
R
false
false
6,384
r
02_voting_example.R
# ----------------------------------------------------------------------- # ------------- VOTING (PLS MODELS) ---------------------- # ----------------------------------------------------------------------- rm(list = ls()) # --- Libraries --- library(data.table) library(caret) library(ggplot2) # --- Read data sets --- source("01_read_data.R") # --- Select one current dataset for work current_dataset_train <- spectra_train current_dataset_test <- spectra_test current_response_var <- "Y" yvar <- current_dataset_train[[current_response_var]] yvar_test <- current_dataset_test[[current_response_var]] # -------------------------- PLS MODEL TRAINING ----------------------------- # --- train Model by Tag in Caret my_formula <- as.formula(paste0(current_response_var, " ~ .")) # --- Model 0 # Fit a regular pls model. No preprocessing because this data is already centered and no scaling is needed. # In PLS models (partial least squares) the tuning parameter is the number of components for dimension reduction 'ncomp' # - CV my_pls0_cv <- caret::train(my_formula, current_dataset_train ,method = "simpls", trControl = trainControl( method = "cv", number = 10, verboseIter = TRUE), tuneGrid=expand.grid(ncomp=1:20) ) # CV tuning plots my_pls0_cv_dt <- melt(my_pls0_cv$results, id.vars = "ncomp") ggplot(my_pls0_cv_dt, aes(x = ncomp, y = value)) + geom_line(color = "white", size = 2)+ facet_wrap(~variable, nrow = 3, ncol=2, scales = "free")+ theme_dark()+ ggtitle("PLS tuning number of components") # - Train my_pls0 <- caret::train(my_formula, current_dataset_train ,method = "simpls", trControl = trainControl( method = "cv", number = 10, verboseIter = FALSE), tuneGrid=expand.grid(ncomp=10) ) my_pls0 # - Predicted values on test my_pls0_pred <-predict(my_pls0,current_dataset_test) my_pls0_rmsep <- round(sqrt(mean((yvar_test - my_pls0_pred)^2)),6) # Predicted values plot my_pls0_pred_dt <- data.table(observed = yvar_test, predicted = my_pls0_pred) ggplot(my_pls0_pred_dt, aes(x = observed, y = predicted))+ geom_point(colour = "red", size = 2)+ geom_segment(x = min(my_pls0_pred_dt[["observed"]]), y = min(my_pls0_pred_dt[["predicted"]]), xend = max(my_pls0_pred_dt[["observed"]]), yend = max(my_pls0_pred_dt[["predicted"]]), colour = "blue")+ ggtitle("Observed vs Predicted in test set")+ geom_text(aes(x = quantile(yvar_test,0.3),y = quantile(my_pls0_pred,0.9), label = paste0("RMSEP: ", my_pls0_rmsep))) # -------------------------- VOTING STRATEGY --------------------- # --- train data subsets id1 <- (rbinom(length(yvar),1,0.5) == 1) id2 <- (yvar >= median(yvar)) current_dataset_train1 <- current_dataset_train[id1] current_dataset_train2 <- current_dataset_train[id2] # --- Model 1 # Fit a regular pls model my_pls1_cv <- caret::train(my_formula, current_dataset_train1 ,method = "simpls", trControl = trainControl( method = "cv", number = 10, verboseIter = TRUE), tuneGrid=expand.grid(ncomp=1:20) ) my_pls1_cv_dt <- melt(my_pls1_cv$results, id.vars = "ncomp") ggplot(my_pls1_cv_dt, aes(x = ncomp, y = value)) + geom_line(color = "white", size = 2)+ facet_wrap(~variable, nrow = 3, ncol=2, scales = "free")+ theme_dark()+ ggtitle("PLS tuning number of components train1") # - Train my_pls1 <- caret::train(my_formula, current_dataset_train1 ,method = "simpls", trControl = trainControl( method = "cv", number = 10, verboseIter = FALSE), tuneGrid=expand.grid(ncomp=10) ) my_pls1 # - Predicted values on test my_pls1_pred <-predict(my_pls1,current_dataset_test) # --- Model 2 # Fit a regular pls model my_pls2_cv <- caret::train(my_formula, current_dataset_train2 ,method = "simpls", trControl = trainControl( method = "cv", number = 10, verboseIter = TRUE), tuneGrid=expand.grid(ncomp=1:20) ) my_pls2_cv_dt <- melt(my_pls2_cv$results, id.vars = "ncomp") ggplot(my_pls2_cv_dt, aes(x = ncomp, y = value)) + geom_line(color = "white", size = 2)+ facet_wrap(~variable, nrow = 3, ncol=2, scales = "free")+ theme_dark()+ ggtitle("PLS tuning number of components train2") # - Train my_pls2 <- caret::train(my_formula, current_dataset_train2 ,method = "simpls", trControl = trainControl( method = "cv", number = 10, verboseIter = FALSE), tuneGrid=expand.grid(ncomp=9) ) my_pls2 # - Predicted values on test my_pls2_pred <-predict(my_pls2,current_dataset_test) # --- Voting my_pls12_pred <- 0.5*my_pls1_pred + 0.5*my_pls2_pred my_pls12_rmsep <- round(sqrt(mean((yvar_test - my_pls12_pred)^2)),6) # Predicted values plot my_pls12_pred_dt <- data.table(observed = current_dataset_test[[current_response_var]], predicted = my_pls12_pred) ggplot(my_pls12_pred_dt, aes(x = observed, y = predicted))+ geom_point(colour = "red", size = 2)+ geom_segment(x = min(my_pls0_pred_dt[["observed"]]), y = min(my_pls0_pred_dt[["predicted"]]), xend = max(my_pls0_pred_dt[["observed"]]), yend = max(my_pls0_pred_dt[["predicted"]]), colour = "blue")+ ggtitle("Observed vs Predicted in test set by voting")+ geom_text(aes(x = quantile(yvar_test,0.3),y = quantile(my_pls12_pred,0.9), label = paste0("RMSEP: ", my_pls12_rmsep))) # ------------ Optional my_pls0_pred_dt[["model"]] <- "unique" my_pls12_pred_dt[["model"]] <- "ensemble" my_pls012_pred_dt <- rbind(my_pls0_pred_dt, my_pls12_pred_dt) p <- ggplot(my_pls012_pred_dt, aes(x = observed, y = predicted))+ geom_point(colour = "red", size = 2)+ geom_segment(x = min(my_pls0_pred_dt[["observed"]]), y = min(my_pls0_pred_dt[["predicted"]]), xend = max(my_pls0_pred_dt[["observed"]]), yend = max(my_pls0_pred_dt[["predicted"]]), colour = "blue")+ ggtitle("Observed vs Predicted in test set - animation- ")+ facet_wrap(~model) p
f44006742999989bb71c36396e21089c0d56c7cd
7e323ebc12c514729ff4df23ff7fe6d8d2c3e395
/man/add.hopar.Rd
35be1590c82b79232cfc1aab3e57452b6482403c
[]
no_license
einarhjorleifsson/fjolst2
05fc78df588d4d749983dde53123e28effaad9f6
a7385f789086e1e8c8e00452aa001e3dbc0259a2
refs/heads/master
2021-01-19T09:44:53.857406
2015-07-15T11:46:04
2015-07-15T11:48:54
39,079,259
0
0
null
null
null
null
UTF-8
R
false
false
615
rd
add.hopar.Rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/add.hopar.R \name{add.hopar} \alias{add.hopar} \title{Add prey information to a dataframe} \usage{ add.hopar(faeduhopur, flokkur, col.names = c("thyngd"), isl = F, return.on.error = F, hopur = NULL, tmptable = F, oracle = fjolstOracle, hopar.col = hopar.col) } \arguments{ \item{faeduhopur}{xxx} \item{flokkur}{xxx} \item{col.names}{xxx} \item{isl}{xxx} \item{return.on.error}{xxx} \item{hopur}{xxx} \item{tmptable}{xxx} \item{oracle}{xxx} \item{hopar.col}{xxx} } \description{ Add prey information to a dataframe }
5291b23285a44bbc39b51fa95b855dbf6ee5cf41
77157987168fc6a0827df2ecdd55104813be77b1
/palm/inst/testfiles/euc_distances/libFuzzer_euc_distances/euc_distances_valgrind_files/1612968331-test.R
9615ab779704c9f602d900c327854e4fd0ec9b59
[]
no_license
akhikolla/updatedatatype-list2
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
a3a519440e02d89640c75207c73c1456cf86487d
refs/heads/master
2023-03-21T13:17:13.762823
2021-03-20T15:46:49
2021-03-20T15:46:49
349,766,184
0
0
null
null
null
null
UTF-8
R
false
false
415
r
1612968331-test.R
testlist <- list(x1 = numeric(0), x2 = NA_real_, y1 = NaN, y2 = c(NaN, 2.90856445706742e-320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(palm:::euc_distances,testlist) str(result)
daef9570d55ea89c579a6bd9317edc5a6bf42442
2c38fc71287efd16e70eb69cf44127a5f5604a81
/tests/testthat/test-class_branch.R
5df4efbef0a03048616d31725d57a9e0474361ca
[ "MIT", "Apache-2.0" ]
permissive
ropensci/targets
4ceef4b2a3cf7305972c171227852338dd4f7a09
a906886874bc891cfb71700397eb9c29a2e1859c
refs/heads/main
2023-09-04T02:27:37.366455
2023-09-01T15:18:21
2023-09-01T15:18:21
200,093,430
612
57
NOASSERTION
2023-08-28T16:24:07
2019-08-01T17:33:25
R
UTF-8
R
false
false
4,248
r
test-class_branch.R
tar_test("branch$pedigree", { command <- command_init(quote(1 + 1)) settings <- settings_init(name = "x", pattern = quote(map(y))) cue <- cue_init() branch <- branch_init( command, settings, cue, NULL, character(0), "y", 1L ) expect_true(inherits(branch, "tar_branch")) }) tar_test("branch$pedigree", { command <- command_init(quote(1 + 1)) settings <- settings_init(name = "x", pattern = quote(map(y))) cue <- cue_init() branch <- branch_init( command, settings, cue, NULL, character(0), "x_1", 1L ) expect_silent(pedigree_validate(branch$pedigree)) expect_equal(settings$name, "x") expect_equal(branch$settings$name, "x_1") }) tar_test("branch priority", { command <- command_init(quote(1 + 1)) settings <- settings_init( name = "x", pattern = quote(map(y)), priority = 0.5 ) cue <- cue_init() branch <- branch_init( command, settings, cue, NULL, character(0), "y", 1L ) expect_equal(branch$settings$priority, 0.5) }) tar_test("branches are not branchable", { command <- command_init(quote(1 + 1)) settings <- settings_init(name = "x", pattern = quote(map(y))) cue <- cue_init() branch <- branch_init( command, settings, cue, NULL, character(0), "y", 1L ) expect_false(target_is_branchable(branch)) }) tar_test("target_get_name()", { command <- command_init(quote(1 + 1)) settings <- settings_init(name = "x", pattern = quote(map(y))) cue <- cue_init() branch <- branch_init( command, settings, cue, NULL, character(0), "y", 1L ) expect_equal(settings$name, "x") expect_equal(target_get_name(branch), "y") }) tar_test("target_get_parent(branch)", { command <- command_init(quote(1 + 1)) settings <- settings_init(name = "x", pattern = quote(map(y))) cue <- cue_init() branch <- branch_init( command, settings, cue, NULL, character(0), "y", 1L ) expect_equal(target_get_parent(branch), "x") }) tar_test("target_deps_deep()", { pipeline <- pipeline_init( list( target_init( name = "data0", expr = quote(seq_len(3L)) ), target_init( name = "data", expr = quote(seq_len(3L)) ), target_init( name = "map", expr = quote(c(data0, data)), pattern = quote(map(data)) ) ) ) local <- local_init(pipeline) local$run() name <- target_get_children(pipeline_get_target(pipeline, "map"))[2] branch <- pipeline_get_target(pipeline, name) bud <- target_get_children(pipeline_get_target(pipeline, "data"))[2] expect_equal( sort(target_deps_deep(branch, pipeline)), sort(c("data0", "data", bud)) ) }) tar_test("branch$produce_record() of a successful branch", { stem <- target_init("x", quote(sample.int(4))) map <- target_init("y", quote(x), pattern = quote(map(x))) pipeline <- pipeline_init(list(stem, map), clone_targets = FALSE) local <- local_init(pipeline) local$run() meta <- local$meta target <- pipeline_get_target(pipeline, target_get_children(map)[2L]) record <- target_produce_record(target, pipeline, meta) expect_silent(record_validate(record)) expect_true(grepl("^y_", record$name)) expect_equal(record$parent, "y") expect_equal(record$type, "branch") expect_equal(nchar(record$command), 16L) expect_equal(nchar(record$depend), 16L) name <- target_get_name(target) expect_equal(record$path, file.path("_targets", "objects", name)) expect_equal(nchar(record$data), 16L) expect_true(record$bytes > 0) expect_true(record$time > 0) expect_equal(record$format, "rds") expect_equal(record$iteration, "vector") expect_equal(record$children, NA_character_) expect_true(is.numeric(record$seconds)) expect_equal(record$warnings, NA_character_) expect_equal(record$error, NA_character_) }) tar_test("branch_validate()", { command <- command_init(quote(1 + 1)) settings <- settings_init(name = "x", pattern = quote(map(y))) cue <- cue_init() branch <- branch_init( command, settings, cue, NULL, character(0), "x_f4acd87c52d4e62b", 1L ) expect_silent(target_validate(branch)) })
5fead71a28caccf84feec0412d0a26280dc6b944
21f043f9c54a9e17d32afe3ba03d16dc8c031105
/stats/Rfiles/stats_per_day_graph.R
e6ccd0d5a5fa8f2d314168748539b76bdc37f09f
[]
no_license
mquezada/tesis
569d08ef26e25c9e9f72ed304c762d2eab4f4b84
f2cdf46a9e2e129dfe7654db3ffb63cddc7ef871
refs/heads/master
2016-09-05T13:33:56.228985
2013-06-05T00:22:34
2013-06-05T00:22:34
null
0
0
null
null
null
null
UTF-8
R
false
false
422
r
stats_per_day_graph.R
plot(c(16,22), c(200000,350000), type='n', xlab='day', ylab='frequency', ylim=c(0, max(tw)), xlim=c(16,22)) lines(x, ht, col='cyan', lwd=.5) lines(x, mt, col='red', lwd=.5) lines(x, tw, col='green', lwd=.5) lines(x, ur, col='blue', lwd=.5) lines(x, us, col='yellow', lwd=.3) legend("topleft", c('Hashtags', 'Tweets', 'Mentions', 'URLs', 'Users'), lty=c(1,1,1,1,1), col=c('cyan', 'green', 'red', 'blue', 'yellow'), cex=0.5)
216300ee085ab98b25c09239fa08327c45c56bbf
c523971f97f27f169b3e996d390667dd80459e8f
/Lab3/Final_report/RL_Lab1.R
f94d25e327826d8d4a77473c5cee50ab106c65c7
[ "MIT" ]
permissive
hodfa840/Adv-Machine-Learning
c40fb59ece6d256c8685ddddd200b7574d37f2bb
d68016ae56431a605616ec761f1acf26bdaff924
refs/heads/main
2023-08-21T14:14:17.404502
2021-10-21T17:42:45
2021-10-21T17:42:45
null
0
0
null
null
null
null
UTF-8
R
false
false
8,507
r
RL_Lab1.R
# By Jose M. Peña and Joel Oskarsson. # For teaching purposes. # jose.m.pena@liu.se. ##################################################################################################### # Q-learning ##################################################################################################### # install.packages("ggplot2") # install.packages("vctrs") library(ggplot2) arrows <- c("^", ">", "v", "<") action_deltas <- list(c(1,0), # up c(0,1), # right c(-1,0), # down c(0,-1)) # left vis_environment <- function(iterations=0, epsilon = 0.5, alpha = 0.1, gamma = 0.95, beta = 0){ # Visualize an environment with rewards. # Q-values for all actions are displayed on the edges of each tile. # The (greedy) policy for each state is also displayed. # # Args: # iterations, epsilon, alpha, gamma, beta (optional): for the figure title. # reward_map (global variable): a HxW array containing the reward given at each state. # q_table (global variable): a HxWx4 array containing Q-values for each state-action pair. # H, W (global variables): environment dimensions. df <- expand.grid(x=1:H,y=1:W) foo <- mapply(function(x,y) ifelse(reward_map[x,y] == 0,q_table[x,y,1],NA),df$x,df$y) df$val1 <- as.vector(round(foo, 2)) foo <- mapply(function(x,y) ifelse(reward_map[x,y] == 0,q_table[x,y,2],NA),df$x,df$y) df$val2 <- as.vector(round(foo, 2)) foo <- mapply(function(x,y) ifelse(reward_map[x,y] == 0,q_table[x,y,3],NA),df$x,df$y) df$val3 <- as.vector(round(foo, 2)) foo <- mapply(function(x,y) ifelse(reward_map[x,y] == 0,q_table[x,y,4],NA),df$x,df$y) df$val4 <- as.vector(round(foo, 2)) foo <- mapply(function(x,y) ifelse(reward_map[x,y] == 0,arrows[GreedyPolicy(x,y)],reward_map[x,y]),df$x,df$y) df$val5 <- as.vector(foo) foo <- mapply(function(x,y) ifelse(reward_map[x,y] == 0,max(q_table[x,y,]), ifelse(reward_map[x,y]<0,NA,reward_map[x,y])),df$x,df$y) df$val6 <- as.vector(foo) print(ggplot(df,aes(x = y,y = x)) + scale_fill_gradient(low = "white", high = "green", na.value = "red", name = "") + geom_tile(aes(fill=val6)) + geom_text(aes(label = val1),size = 4,nudge_y = .35,na.rm = TRUE) + geom_text(aes(label = val2),size = 4,nudge_x = .35,na.rm = TRUE) + geom_text(aes(label = val3),size = 4,nudge_y = -.35,na.rm = TRUE) + geom_text(aes(label = val4),size = 4,nudge_x = -.35,na.rm = TRUE) + geom_text(aes(label = val5),size = 10) + geom_tile(fill = 'transparent', colour = 'black') + ggtitle(paste("Q-table after ",iterations," iterations\n", "(epsilon = ",epsilon,", alpha = ",alpha,"gamma = ",gamma,", beta = ",beta,")")) + theme(plot.title = element_text(hjust = 0.5)) + scale_x_continuous(breaks = c(1:W),labels = c(1:W)) + scale_y_continuous(breaks = c(1:H),labels = c(1:H))) } GreedyPolicy <- function(x, y){ # Get a greedy action for state (x,y) from q_table. # # Args: # x, y: state coordinates. # q_table (global variable): a HxWx4 array containing Q-values for each state-action pair. # # Returns: # An action, i.e. integer in {1,2,3,4}. # Your code here. action <- which.max(q_table[x, y,]) return(action) } EpsilonGreedyPolicy <- function(x, y, epsilon){ # Get an epsilon-greedy action for state (x,y) from q_table. # # Args: # x, y: state coordinates. # epsilon: probability of acting randomly. # # Returns: # An action, i.e. integer in {1,2,3,4}. # Your code here. seed <- rbinom(1, 1, epsilon) if (seed == 1) { action <- which.max(q_table[x, y,]) }else{ action = sample(c(1,2,3,4), 1) } return(action) } transition_model <- function(x, y, action, beta){ # Computes the new state after given action is taken. The agent will follow the action # with probability (1-beta) and slip to the right or left with probability beta/2 each. # # Args: # x, y: state coordinates. # action: which action the agent takes (in {1,2,3,4}). # beta: probability of the agent slipping to the side when trying to move. # H, W (global variables): environment dimensions. # # Returns: # The new state after the action has been taken. delta <- sample(-1:1, size = 1, prob = c(0.5*beta,1-beta,0.5*beta)) final_action <- ((action + delta + 3) %% 4) + 1 foo <- c(x,y) + unlist(action_deltas[final_action]) foo <- pmax(c(1,1),pmin(foo,c(H,W))) return (foo) } q_learning <- function(start_state, epsilon = 0.5, alpha = 0.1, gamma = 0.95, beta = 0){ # Perform one episode of Q-learning. The agent should move around in the # environment using the given transition model and update the Q-table. # The episode ends when the agent reaches a terminal state. # # Args: # start_state: array with two entries, describing the starting position of the agent. # epsilon (optional): probability of acting greedily. # alpha (optional): learning rate. # gamma (optional): discount factor. # beta (optional): slipping factor. # reward_map (global variable): a HxW array containing the reward given at each state. # q_table (global variable): a HxWx4 array containing Q-values for each state-action pair. # # Returns: # reward: reward received in the episode. # correction: sum of the temporal difference correction terms over the episode. # q_table (global variable): Recall that R passes arguments by value. So, q_table being # a global variable can be modified with the superassigment operator <<-. # Your code here. repeat{ # Follow policy, execute action, get reward. x <- start_state[1] y <- start_state[2] action <- EpsilonGreedyPolicy(x, y, epsilon) start_state <- transition_model(x, y, action, beta) new_x <- start_state[1] new_y <- start_state[2] # Q-table update. reward <- reward_map[new_x, new_y] episode_correction <- reward + gamma * max(q_table[new_x, new_y, ]) - q_table[x, y, action] q_table[x, y, action] <<- q_table[x, y, action] + alpha * episode_correction if(reward!=0) # End episode. return (c(reward,episode_correction)) } } ##################################################################################################### # Q-Learning Environments ##################################################################################################### # Environment A (learning) H <- 5 W <- 7 reward_map <- matrix(0, nrow = H, ncol = W) reward_map[3,6] <- 10 reward_map[2:4,3] <- -1 q_table <- array(0,dim = c(H,W,4)) vis_environment() for(i in 1:10000){ foo <- q_learning(start_state = c(3,1)) if(any(i==c(10,100,1000,10000))) vis_environment(i) } # Environment B (the effect of epsilon and gamma) H <- 7 W <- 8 reward_map <- matrix(0, nrow = H, ncol = W) reward_map[1,] <- -1 reward_map[7,] <- -1 reward_map[4,5] <- 5 reward_map[4,8] <- 10 q_table <- array(0,dim = c(H,W,4)) vis_environment() MovingAverage <- function(x, n){ cx <- c(0,cumsum(x)) rsum <- (cx[(n+1):length(cx)] - cx[1:(length(cx) - n)]) / n return (rsum) } for(j in c(0.5,0.75,0.95)){ q_table <- array(0,dim = c(H,W,4)) reward <- NULL correction <- NULL for(i in 1:30000){ foo <- q_learning(gamma = j, start_state = c(4,1)) reward <- c(reward,foo[1]) correction <- c(correction,foo[2]) } vis_environment(i, gamma = j) plot(MovingAverage(reward,100),type = "l") plot(MovingAverage(correction,100),type = "l") } for(j in c(0.5,0.75,0.95)){ q_table <- array(0,dim = c(H,W,4)) reward <- NULL correction <- NULL for(i in 1:30000){ foo <- q_learning(epsilon = 0.1, gamma = j, start_state = c(4,1)) reward <- c(reward,foo[1]) correction <- c(correction,foo[2]) } vis_environment(i, epsilon = 0.1, gamma = j) plot(MovingAverage(reward,100),type = "l") plot(MovingAverage(correction,100),type = "l") } # Environment C (the effect of beta). H <- 3 W <- 6 reward_map <- matrix(0, nrow = H, ncol = W) reward_map[1,2:5] <- -1 reward_map[1,6] <- 10 q_table <- array(0,dim = c(H,W,4)) vis_environment() for(j in c(0,0.2,0.4,0.66)){ q_table <- array(0,dim = c(H,W,4)) for(i in 1:10000) foo <- q_learning(gamma = 0.6, beta = j, start_state = c(1,1)) vis_environment(i, gamma = 0.6, beta = j) }
c8ce88e8b46645f467fa6f6a4c40ffbc62927a35
769778ca8129fdd210b008d74110a77af8e38c5e
/man/scale_quantitative.Rd
640e46daa1b60bca9cdebc302f5d65b75595f8ea
[]
no_license
wch/ggvis
c36f1a6d95174fa15b2ef106db930d1d9ea86c8f
1a7732c7e57eebc2f08ae28588276418e99649a7
refs/heads/master
2023-03-17T00:39:41.002979
2014-01-13T18:24:54
2014-01-13T18:24:54
11,941,640
0
1
null
null
null
null
UTF-8
R
false
false
2,618
rd
scale_quantitative.Rd
\name{scale_quantitative} \alias{scale_quantitative} \title{Create a quantitative scale} \usage{ scale_quantitative(name, trans = "linear", exponent = NULL, clamp = FALSE, nice = TRUE, zero = FALSE, domain = NULL, range = NULL, reverse = FALSE, round = FALSE) } \arguments{ \item{trans}{A scale transformation: one of "linear", "log", "pow", "sqrt", "quantile", "quantize", "threshold"} \item{exponent}{Sets the exponent of the scale transformation. For pow transform only.} \item{clamp}{If \code{TRUE}, values that exceed the data domain are clamped to either the minimum or maximum range value.} \item{nice}{If \code{TRUE}, modifies the scale domain to use a more human-friendly number range (e.g., 7 instead of 6.96).} \item{zero}{If \code{TRUE}, ensures that a zero baseline value is included in the scale domain. This option is ignored for non-quantitative scales.} \item{name}{name of the scale.} \item{domain}{The domain of the scale, representing the set of data values. For ordinal scales, a character vector; for quantitative scales, a numeric vector of length two. Either value (but not both) may be missing, in which case \code{domainMin} or \code{domainMax} is set.} \item{range}{The range of the scale, representing the set of visual values. For numeric values, the range can take the form of a two-element array with minimum and maximum values. For ordinal data, the range may by an array of desired output values, which are mapped to elements in the specified domain. The following range literals are also available: "width", "height", "shapes", "category10", "category20".} \item{reverse}{If true, flips the scale range.} \item{round}{If true, rounds numeric output values to integers. This can be helpful for snapping to the pixel grid.} } \description{ A quantitative scale controls the mapping of continuous variables to visual properties. } \details{ Generally, you should create new scales with \code{\link{dscale}} because that will automatically set the range to a reasonable default, and it will automatically pick the correct type of scale given the variable type. } \examples{ scale_quantitative("y") dscale("y", "numeric") scale_quantitative("y", "pow", 0.5) dscale("y", "numeric", trans = "pow", exp = 0.5) scale_quantitative("x", clamp = TRUE, nice = FALSE, zero = TRUE) dscale("x", "numeric", clamp = TRUE, nice = FALSE, zero = TRUE) } \seealso{ \url{https://github.com/trifacta/vega/wiki/Scales#quantitative-scale-properties} Other vega scales: \code{\link{scale_ordinal}}; \code{\link{scale_time}} }
89ea205ab942c7350cc0e585a4d47f519132104c
d3c500e5204d2a7d8965f25c8c61e46d24d4fe28
/tests/testthat/test-site2sf.R
45e671f52940d62c8d16d4cefc839e5fa05ec6f5
[ "MIT" ]
permissive
WeiquanLuo/rivertopo
c7d4f7fd9617ebdeff79d2115aad47d5fab1e788
e0e533c8e3d58b8c7b8d10dc3145ad37c76a684c
refs/heads/master
2020-09-09T18:20:21.296681
2020-01-27T05:52:11
2020-01-27T05:52:11
221,524,684
1
0
null
null
null
null
UTF-8
R
false
false
421
r
test-site2sf.R
context("test-site2sf") test_that("check site2sf.R", { df <- data.frame(id = c("Point1", "Point2", "Point3"), lat = c(42.032974, 44.032323, 47.123123), long = c(-93.581543, -92.58345343, -96.2324543), stringsAsFactors = FALSE) result <- site2sf(df=df,id_cn = "id", Lon_cn= "long", Lat_cn= "lat") expect_is(result, "sf") expect_is(result, "data.frame") })
907324355ccd4706dd0a5cde1b4007f4e3820add
9f9fc5049f7ce3e86b5a19127aa1fb1d56e7d805
/R/get_prop_alt_rctr_know.R
ca048c5b43a50bc295820a5b0fbe43a9a60d40f7
[]
no_license
jcfisher/hkestimator
567c009ca5908bf03efcc6390fc3891f7e185029
2c3b21e576568c1b6c90260c910e7b872702a5de
refs/heads/master
2021-01-19T04:08:34.665069
2016-06-30T13:24:51
2016-06-30T13:24:51
62,176,476
0
0
null
null
null
null
UTF-8
R
false
false
515
r
get_prop_alt_rctr_know.R
#' Calculates the proportion that alter's recruiter (?) knows #' #' @param data: a data.frame #' @param net: an adjacency matrix #' #' @export getpropaltrctrknow #' #' @return a matrix, little phi? getpropaltrctrknow <- function(data, net) { r <- nrow(data) lilphi <- matrix(0, nrow = r, ncol = 1) for (k in 1:r) { potential.recruitees <- apply(t(net[data[k, 2], ]), 1, which) lilphi[k, 1] <- sum(net[potential.recruitees, data[k, 1]]) / (NROW(potential.recruitees) - 1) } return(lilphi) }
b5c9ead0b39a440ce1ce7792b142e351c45bb1a6
3ad815d8a9a7ae03acef9e58080e5e7a7901b153
/man/working_life_remuneration.Rd
75345752910d935893fecf2cbc349c2b12148658
[]
no_license
HughParsonage/CRIMpp
0942503e5c11ecbff728fed62b2237d8caa92e3d
511e7256709ee63a8a35ee0415298dba0cddaa3e
refs/heads/master
2020-04-12T02:15:17.764726
2018-10-29T10:50:00
2018-10-29T10:50:00
68,201,653
0
1
null
null
null
null
UTF-8
R
false
true
1,719
rd
working_life_remuneration.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/working_life_remuneration.R \name{working_life_remuneration} \alias{working_life_remuneration} \title{Working life remuneration} \usage{ working_life_remuneration(starting_age = 30L, retirement_age = 70L, start_fy = "2015-16", wage.method = c("Constant \% of AWOTE", "Lifetime percentile"), wage.method.control = list(AWOTE.start = 78200, AWOTE.multiple = 1, percentile = NULL), wage.inflator = c("grattan::wage_inflator()", "Treasury"), forecast.series = "mean", forecast.level = 95) } \arguments{ \item{starting_age}{An integer (thus \code{30L} not \code{30}) given the starting year of the individual.} \item{retirement_age}{An integer. Youngest age of retirement age, not oldest age of work.} \item{start_fy}{The financial year corresponding to \code{starting_age}.} \item{wage.method}{One of \code{"Constant \% of AWOTE"} or \code{"Lifetime percentile"}.} \item{wage.method.control}{A \code{list}: \describe{ \item{\code{"Constant \% of AWOTE"}}{\describe{ \item{\code{AWOTE.start}}{the AWOTE at \code{starting_age};} \item{\code{AWOTE.multiple}}{the multiple of AWOTE to use.} }} \item{\code{"Lifetime percentile"}}{\code{percentile} the percentile to use.} } Using one set of controls in the wrong \code{wage.method} is a warning.} \item{wage.inflator}{One of \code{"grattan::wage_inflator()"} or \code{"Treasury"}.} \item{forecast.series}{See \code{"grattan::wage_inflator()"}.} \item{forecast.level}{See above.} } \value{ A \code{data.table} of information about the individual's remuneration from \code{starting_age} till the year before \code{retirement_age}. } \description{ Working life remuneration }
d7a96a1e509d3f42f6cf7de09d819e6c2e737111
274599f0c79becbb07a121414ef5161e192d219e
/code/rscripts/rqa/hri/v05/Ba_timeseries_aH.R
a44434c2357249c883822e9d95f938c6e936fc33
[ "MIT" ]
permissive
mxochicale/phd-thesis-code-data
44ab2f34a8407eca7c88e390a5acd19f18c0e3ea
6da4eea7643ff747ac84d5b2ed542fd7cb19db31
refs/heads/master
2022-01-09T17:58:13.132829
2019-05-12T19:00:56
2019-05-12T19:00:56
142,457,831
0
0
null
null
null
null
UTF-8
R
false
false
8,612
r
Ba_timeseries_aH.R
############################################################################### # # # # # # # Written by Miguel P Xochicale [http://mxochicale.github.io] # # If you see any errors or have any questions # please create an issue at https://github.com/mxochicale/phd-thesis-code-data/issues # ############################################################################### # OUTLINE: # (0) Definifing paths # (1) Loading libraries and functions # (2) Reading # (3) Creating paths # (4) Selecting Variables in data.table # (4.1) Selecting Participants # (5) Adding vectors # (5.1) Deleting some Magnetomer and quaternion data # (5.2) zero mean and unit variance # (5.3) Savitzky-Golay filter # (6) Selecting Axis after postprocessing # (7) Creating preprocessed data path # (8) Writing data.table object to a file ################# # Start the clock! start.time <- Sys.time() ################################################################################ # (0) Defining paths for main_path, r_scripts_path, ..., etc. r_scripts_path <- getwd() setwd("../../../../../") github_repo_path <- getwd() setwd("../") github_path <- getwd() ##VERSION version <- '05' feature_path <- '/rqa/hri' ## Outcomes Plot Path outcomes_plot_path <- paste(github_path,"/phd-thesis/figs/results", feature_path, '/v', version,sep="") ## Data Path data_path <- paste(github_repo_path,'/data-outputs', feature_path, '/v', version, sep="") setwd(file.path(data_path)) ################################################################################ # (1) Loading Functions and Libraries and Setting up digits library(data.table) # for manipulating data library(signal)# for butterworth filter and sgolay source( paste(github_repo_path,'/code/rfunctions/extra_rqa.R',sep='') ) ################################################################################ # (2) Reading data file_ext <- paste('xdata_v', version, '.dt',sep='') data <- fread( file_ext, header=TRUE) # axis for horizontal movments data <- data[,.( sg0zmuvGyroZ, sg1zmuvGyroZ, sg2zmuvGyroZ ), by=. (Participant,Activity,Sensor,Sample)] ################################################################################ ################################################################################ ################################################################################ ################################################################################ ### (4.1) Windowing Data [xdata[,.SD[1:2],by=.(Participant,Activity,Sensor)]] # ############################ ####### one window lenght #windowsl <- c(100) #windowsn <- c('w2') # ########################### ###### one window lenght windowsl <- c(500) windowsn <- c('w10') ########################## ##### two windows lenght #windowsl <- c(500, 750) #windowsn <- c('w10', 'w15') ############################ ###### four window lenghts #windowsl <- c(100,250,500,750) #windowsn <- c('w2', 'w5', 'w10', 'w15') ######################################## #### w2, 2-second window (100 samples) ## 100 to 200 ######################################## #### w5, 5-second window (250 samples) # 100 to 350 ####################################### #### w10, 10-second window (500 samples) ## 100 to 600 ######################################## #### w15, 15-second window (750 samples) ## 100 to 850 for ( wk in 1:(length(windowsl)) ) { xdata <- data windowlengthnumber <- windowsl[wk] windowksams <- paste('w', windowlengthnumber, sep='') windowksecs <- windowsn[wk] message('****************') message('****************') message('****************') message('****************') message('*** window:', windowksams) # general variables for window legnth wstar=100 wend=wstar+windowlengthnumber windowlength=wend-wstar windowframe =wstar:wend wkdata <- xdata[,.SD[windowframe],by=.(Participant,Activity,Sensor)]; ################################################################################# ################################################################################# ################################################################################ ################################################################################# ################################ #### (4.2.2) Sensor Selection #sensors <- c('HS01') # HumanSensor01 sensors <- c('RS01','HS01')# RobotSensor01 and HumanSensor01 ######################################################### for (sensor_k in 1:length(sensors) ) { swkdata <- wkdata sensork <- sensors[sensor_k] message(sensork) if (sensork == 'RS01' ) { setkey(swkdata, Sensor) skwkdata <- swkdata[.(c('RS01'))] } else if (sensork == 'HS01' ) { setkey(swkdata, Sensor) skwkdata <- swkdata[.(c('HS01'))] } else { message('no valid movement_variable') } ########################## ## (4.2.3) Activities Selection setkey(skwkdata, Activity) activityMovement <- 'aH' skwkdata <- skwkdata[.(c('HN', 'HF'))] ################# Reorder Factor: skwkdata$Activity <- factor(skwkdata$Activity) skwkdata$Activity <- factor(skwkdata$Activity, levels= levels (skwkdata$Activity)[c(2,1)] ) ################################################################################# ################################################################################# ################################################################################# ################################################################################# ### (4.2.4) Axis Selection ################################################################################ # (3) Outcomes Plots Path if (file.exists(outcomes_plot_path)){ setwd(file.path(outcomes_plot_path)) } else { dir.create(outcomes_plot_path, recursive=TRUE) setwd(file.path(outcomes_plot_path)) } axis <- names(skwkdata)[5: ( length(skwkdata)) ] ####### Axisk for (axis_k in c(1:length(axis) )){ #for (axis_k in c(1:length(axis))){ axisk<- axis[axis_k] message('#### axis:' , axisk ) ################################################################################ # (5.0) Creating and Changing to PlotPath plot_path <- paste(outcomes_plot_path, '/timeseries_plots',sep="") if (file.exists(plot_path)){ setwd(file.path(plot_path)) } else { dir.create(plot_path, recursive=TRUE) setwd(file.path(plot_path)) } plotlinewidtg <- 1.5 basefontsize <- 18 pts <- ggplot(skwkdata)+ geom_line( aes(x=Sample,y=get(axisk), color=Sensor), size=plotlinewidtg )+ facet_grid(Participant~Activity)+ scale_y_continuous()+ #geom_segment(aes(x = 100, y = -125, xend = 100, yend = 125)) + coord_cartesian(xlim=c(wstar,wend), ylim=c(-5,5)) + theme_bw(base_size=basefontsize)+ theme(legend.position='none')+ labs(y=axisk, x='Sample') ### Save Picture width = 800 height = 1200 text.factor = 1 dpi <- text.factor * 100 width.calc <- width / dpi height.calc <- height / dpi file_ext <- paste(activityMovement, '-', axisk,'-', sensork, '-', windowksams, '.png', sep='') ggsave(filename = file_ext, dpi = dpi, width = width.calc, height = height.calc, units = 'in', bg = "transparent", device = "png", pts) }##end##for (axis_k in c(1:length(axis) )){ ################################################################################# ################################################################################# ################################################################################# ################################################################################# }##end##for (sensor_k in 1:length(sensors) ) { ################################################################################# ################################################################################# ################################################################################# ################################################################################# } ##end## for ( wk in 1:(length(windowsl)) ) { ################################################################################ ################################################################################ ################################################################################ ################################################################################ ################# # Stop the clock! end.time <- Sys.time() end.time - start.time # message('Execution Time: ', end.time - start.time) ################################################################################ setwd(r_scripts_path) ## go back to the r-script source path
d5d85db70e14a60fafc01fa127509bacb380a3bf
eec115235405a54b642d3286863dcca14786c9e8
/selectiveModel/man/dot-hit_run_next_point_radial.Rd
7500e5127cdba02e896fd68d691035be3dba7af4
[ "MIT" ]
permissive
linnykos/selectiveModel
5f5c95f84c006dc8bd83fdc2032ea251a9f4d3e6
75e42567a9a4fe1daf4d80a54cc15e47e549f60d
refs/heads/master
2023-01-21T18:24:22.665232
2020-12-01T16:45:43
2020-12-01T16:45:43
110,089,332
0
0
null
null
null
null
UTF-8
R
false
true
482
rd
dot-hit_run_next_point_radial.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hitrun_radial.R \name{.hit_run_next_point_radial} \alias{.hit_run_next_point_radial} \title{Output the next point for hit-and-run sampler, Radial} \usage{ .hit_run_next_point_radial(y, null_mat, polyhedra) } \arguments{ \item{y}{data} \item{null_mat}{matrix to represent the null space of \code{.segments}} \item{polyhedra}{\code{polyhedra} object} } \value{ vector } \description{ For unknown sigma }
0174a569967ae41122a1ddfd9d40909787c95207
a626e42ef9c0a192f5e014fcfd15523a89d8d555
/man/mle2d_svc.Rd
603091af58b1e637dce7363de968b31788217f50
[]
no_license
cran/sEparaTe
b01342b39377dfa5083db35df52b93aa1e7e8327
002ab63c3fc8b48c5d8ece2b3b282cc8d1a1eb5b
refs/heads/master
2023-08-20T05:28:14.255591
2023-08-18T07:50:02
2023-08-18T08:31:07
60,652,434
1
0
null
null
null
null
UTF-8
R
false
true
3,092
rd
mle2d_svc.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mle2d_svc.R \name{mle2d_svc} \alias{mle2d_svc} \title{Maximum likelihood estimation of the parameters of a matrix normal distribution} \usage{ mle2d_svc(value2d, Id1, Id2, subject, data_2d, eps, maxiter, startmat) } \arguments{ \item{value2d}{from the formula value2d ~ Id1 + Id2} \item{Id1}{from the formula value2d ~ Id1 + Id2} \item{Id2}{from the formula value2d ~ Id1 + Id2} \item{subject}{the replicate, also called individual} \item{data_2d}{the name of the matrix data} \item{eps}{the threshold in the stopping criterion for the iterative mle algorithm} \item{maxiter}{the maximum number of iterations for the iterative mle algorithm} \item{startmat}{the value of the second factor variance-covariance matrix used for initialization, i.e., to start the algorithm and obtain the initial estimate of the first factor variance-covariance matrix} } \description{ Maximum likelihood estimation for the parameters of a matrix normal distribution \bold{X}, which is characterized by a simply separable variance-covariance structure. In the general case, which is the case considered here, two unstructured factor variance-covariance matrices determine the covariability of random matrix entries, depending on the row (one factor matrix) and the column (the other factor matrix) where two \bold{X}-entries are. In the required function, the Id1 and Id2 variables correspond to the row and column subscripts, respectively; \dQuote{value2d} indicates the observed variable. } \section{Output}{ \dQuote{Convergence}, TRUE or FALSE \dQuote{Iter}, will indicate the number of iterations needed for the mle algorithm to converge \dQuote{Xmeanhat}, the estimated mean matrix (i.e., the sample mean) \dQuote{First}, the row subscript, or the second column in the data file \dQuote{U1hat}, the estimated variance-covariance matrix for the rows \dQuote{Standardized.U1hat}, the standardized estimated variance-covariance matrix for the rows; the standardization is performed by dividing each entry of U1hat by entry(1, 1) of U1hat \dQuote{Second}, the column subscript, or the third column in the data file \dQuote{U2hat}, the estimated variance-covariance matrix for the columns \dQuote{Standardized.U2hat}, the standardized estimated variance-covariance matrix for the columns; the standardization is performed by multiplying each entry of U2hat by entry(1, 1) of U1hat \dQuote{Shat}, is the sample variance-covariance matrix computed from of the vectorized data matrices } \section{References}{ Dutilleul P. 1990. Apport en analyse spectrale d'un periodogramme modifie et modelisation des series chronologiques avec repetitions en vue de leur comparaison en frequence. D.Sc. Dissertation, Universite catholique de Louvain, Departement de mathematique. Dutilleul P. 1999. The mle algorithm for the matrix normal distribution. Journal of Statistical Computation and Simulation 64: 105-123. } \examples{ output <- mle2d_svc(data2d$value2d, data2d$Id1, data2d$Id2, data2d$K, data_2d = data2d) output }
92379f998209963f73bfb214444f1f26d3b314a1
db0b537705b0671f527a8d406f13fb1de5b49616
/data/test_dataset/R/Max-Min-of-Vector.R
3e2384c7fef73b6d7bde0bfa6dde3f8643b1c8b0
[]
no_license
rakeshamireddy/Automatic-Code-Translation
7e43d9232b1af2f9e1c62e76c1720f9469bdd842
9c4d0b040ee2eaccdcec8d8321f262d748c5c4b0
refs/heads/master
2023-04-22T23:29:55.205930
2021-05-15T03:06:03
2021-05-15T03:06:03
299,511,652
0
0
null
null
null
null
UTF-8
R
false
false
215
r
Max-Min-of-Vector.R
max_min_vector <- function() { n = c(10, 20, 30, 40, 50, 60) print('Original vector:') print(n) print(paste('Maximum value of the said vector:',max(n))) print(paste('Minimum value of the said vector:',min(n))) }
91cd63c9462e872b5ff0d0642580785c43e94c42
5c601d4d005328a2a52fa542f00bc38b64de7eb2
/R/effect_sizes.r
a40af57c6c0591f801d45c15dad053fef6865d85
[]
no_license
skranz/regtools
012ef2e250160473ff4f7898c1003e06781c34c1
426fb244b238bb30b3f0bc3c6b9f6ba66a030cd9
refs/heads/master
2020-12-25T17:34:08.353180
2019-04-11T13:48:19
2019-04-11T13:48:19
22,969,689
1
2
null
null
null
null
UTF-8
R
false
false
17,733
r
effect_sizes.r
#' Change units of columns in a data frame, e.g. measure relative to one standard deviation #' #' useful to make coefficients in regressions better comparable scale.data.cols = function(dat,unit="sd",cols = setdiff(colnames(dat),exclude), exclude=NULL, dummy01=TRUE) { units = lapply(colnames(dat), function(col) { list(unit = "original",size=1, descr="") }) dummy.unit = list(unit="dummy",size=1,descr="unit: dummy") unit.val = rep names(units)=cols for (col in cols) { val = dat[[col]] if (is.dummy.val(val, dummy01)) { attr(dat[[col]],"unit") = dummy.unit next } nval = scale.values(val, unit=unit, var.name=col) dat[[col]] = nval } attr(dat,"units") = units dat } is.dummy.val = function(val, dummy01=TRUE) { if (!is.numeric(val)) { return(TRUE) } if (dummy01) { if (all(unique(val) %in% c(0,1,NA))) { return(TRUE) } } return(FALSE) } make.val.unit = function(val, unit.code=c("sd","1sd","2sd","4sd","6sd","10-90","20-80","25-75","min-max","dummy", "original")) { #unit = "10-90"; val = dat$account_days if (unit.code=="original") { return(list(code = "original",size=1, descr="")) } val = na.omit(val) unit.code = unit.code[1] if (unit.code=="sd") unit.code="1sd" if (substring(unit.code,2)=="sd") { denom = sd(val) * as.numeric(substring(unit.code,1,1)) } else if (unit.code=="min-max") { denom = max(val)-min(val) } else if (unit.code=="dummy") { denom = 1 } else { start = as.numeric(substring(unit.code,1,2)) end = as.numeric(substring(unit.code,4,5)) denom = diff(quantile(val, c(start, end)/100)) } list(code=unit.code, size=denom, descr=paste0("unit ", unit.code, ": ", signif(denom,3))) } scale.values = function(val, unit.code=c("sd","1sd","2sd","4sd","6sd","10-90","20-80","25-75","min-max","nochange"), var.name="" ) { restore.point("scale.values") unit = make.val.unit(val, unit.code) ret = val / unit$size attr(ret,"unit") = unit ret } add.values.for.effect = function(val, effect, overwrite=TRUE) { restore.point("add.values.for.effect") val = na.omit(val) if (!overwrite) old.effect = effect if (effect$type=="quantile") { effect$baseline.val = median(val) effect$low.val = quantile(val,effect$low.percent) effect$high.val = quantile(val,effect$high.percent) } else if (effect$type == "sd") { m = mean(val) s = sd(val) effect$baseline.val = m effect$low.val = m-s*0.5*effect$size effect$high.val = m+s*0.5*effect$size } else if (effect$type == "dummy") { effect$baseline.val = median(val) effect$low.val = 0 effect$high.val = 1 # original values } else { m = mean(val) effect$baseline.val = m effect$low.val = m-0.5*effect$size effect$high.val = m+0.5*effect$size } if (!overwrite) { effect[names(old.effect)] = old.effect } effect } get.effect.base = function(val=NULL, effect=c("sd","1sd","2sd","4sd","6sd","10-90","20-80","25-75","min-max","dummy", "original"), var=NULL) { if (is.character(effect)) { code= effect[1] if (code=="sd") code="1sd" if (code=="original") { effect=list(code = "original",type="original",var=var, descr="") } else if (substring(code,2)=="sd") { effect=list(code = code,type="sd",var=var, size= as.numeric(substring(code,1,1))) } else if (code=="dummy") { effect = list(code=code, type=code) } else { start = as.numeric(substring(code,1,2)) end = as.numeric(substring(code,4,5)) effect = list(code = code,type="quantile",var=var, low.percent=start/100, high.percent=end/100) } } if (!is.null(val)) effect=add.values.for.effect(val, effect) effect } get.effect.base.df = function(dat, numeric.effect = "10-90", dummy01 = TRUE, effect.bases=NULL) { restore.point("get.effect.base.df") dummy.effect = list(type="dummy") val = dat[[1]] li = lapply(colnames(dat), function(col) { val = dat[[col]] if (is.null(effect.bases[[col]])) { if (is.dummy.val(val)) { effect= get.effect.base(val, "dummy", var=col) } else { effect = get.effect.base(val, numeric.effect, var=col) } } else { effect = add.values.for.effect(val,effect.bases[[col]], overwrite=FALSE) } as.data.frame(c(list(var=col,code=effect$code, type=effect$type), effect[c("baseline.val","low.val","high.val")] )) }) df = do.call(rbind,li) rownames(df)=NULL #df = rbindlist(li) df$val.descr = paste0(signif(df$low.val,3)," ",signif(df$baseline.val,3)," ",signif(df$high.val,3)) df } #' Simple function to compute effect sizes used by effectplot #' @export get.effect.sizes = function(reg, dat, vars=intersect(colnames(dat), names(coef(reg))), scale.depvar=NULL, depvar = names(reg$model)[[1]],data.fun = NULL,numeric.effect="10-90", dummy01=TRUE, predict.type="response", effect.bases=NULL, compute.se=FALSE, ci.prob=0.95, repl.for.se=10000) { restore.point("get.effect.sizes") library(tidyr) ebd = get.effect.base.df(dat, numeric.effect=numeric.effect, dummy01=dummy01, effect.bases=effect.bases) nr = length(vars)*2 base.df = as.data.frame(t(ebd$baseline.val)) colnames(base.df) = colnames(dat) base.df key.df = expand.grid(list(level=c("low","high"),var=vars)) df = cbind(key.df,base.df) var.ebd = match(vars,ebd$var) names(var.ebd) = vars row = 0 for (var in vars) { row = row+1 df[row, var] = ebd$low.val[var.ebd[var]] row = row+1 df[row, var] = ebd$high.val[var.ebd[var]] } df newdata = df[,-(1:3)] # compute values of dependent data like # df$x_sqr = df$x^2 if (!is.null(data.fun)) { df = data.fun(df) } if (is(reg,"felm")) { pred = predict.felm(reg,newdata=newdata, use.fe = FALSE) } else { pred = predict(reg,newdata=newdata, type=predict.type) } scale.y =1 if (!is.null(scale.depvar)) { scale.y = make.val.unit(dat[[depvar]], scale.depvar)$size pred = pred / scale.y } rdf = cbind(key.df, pred) d = spread(rdf, key = level, value=pred) d$effect = d$high-d$low d$abs.effect = abs(d$effect) d$effect.sign = sign(d$effect) d$base.descr = ebd$val.descr[var.ebd] if (compute.se) { newdf = model.matrix.from.new.data(newdata,reg) se.df = compute.effect.size.se(reg, repl.for.se,newdata=newdf,scale=scale.y) d = cbind(d, se.df) } d } model.matrix.from.new.data = function(newdata, reg, na.action=na.pass,...) { object=reg tt <- terms(object) Terms <- delete.response(tt) na.action=na.pass m <- model.frame(Terms, newdata, na.action = na.action, xlev = reg$xlevels) X <- model.matrix(Terms, m, contrasts.arg = reg$contrasts) X } compute.effect.size.se = function(reg, repl.for.se,newdata,scale=1, add.intercept = FALSE, ci.prob=c(0.05,0.95),...) { restore.point("compute.effect.size.se") newmatrix = as.matrix(newdata) if (add.intercept) newmatrix=cbind(intercept=1,newmatrix) pred.fun = get.predict.from.coef.fun(reg) # check if pred.fun is null if (is.null(pred.fun)) { warning("No predict.from.coef function for model of class ", class(reg)[1], " skip computation of se and confidence intervals for effect.") return(NULL) } coef.mat = draw.from.estimator(n=repl.for.se,coef=reg$coef, vcov=vcov(reg)) # P rows of newmatrix (number of predictions) # R number of draws from estimator # P x R mat = apply(coef.mat,1,pred.fun,newmatrix=newmatrix, reg=reg ) mat[,1:2] pred.vec = as.numeric(apply(coef.mat,1,pred.fun,newmatrix=newmatrix, reg=reg )) pred.mat = matrix(pred.vec,ncol=2, byrow=TRUE) pred.effect = (pred.mat[,2]-pred.mat[,1]) / scale effect.mat = matrix(pred.effect, nrow=repl.for.se, byrow=TRUE) effect.se = apply(effect.mat,2,sd, na.rm=TRUE) if (length(ci.prob)==1) ci.prob = c((1-ci.prob)/2, 1- (1-ci.prob)/2) ci = apply(effect.mat,2, quantile, probs=ci.prob, na.rm=TRUE) data.frame(effect.se = effect.se, ci.low=ci[1,], ci.high=ci[2,]) } examples.effectplot = function() { # simulate some data set.seed(12345) n = 1000 x = rnorm(n) z = rnorm(n) q = rnorm(n) # binary outcome y = ifelse(pnorm(1 + 0.5*x + 0.25*x^2 - 0.5*z + rnorm(n))>0.5, 1, 0) data = data.frame(y,x,z,q) # Logit regression reg = glm(y~x + x^2 + z +q, data=data, family="binomial") effectplot(reg,main="Effects", horizontal=TRUE, show.ci=TRUE) coefplot(reg) # An example with factors T = 10 x = sample(1:4, T, replace = TRUE) y = x^2 + rnorm(T) xf = as.character(x) # effectplot can currently not handle factor variables # in the regression formula reg = lm(y~xf) effectplot(reg) # Workaround: first explicitly generate a # data.frame with all dummy variables df = data.frame(y,xf,x) dat = expanded.regression.data(y~xf,df) reg = lm(regression.formula(dat), data=dat) reg effectplot(reg) # Short-cut reg = expanded.regression(lm(y~xf)) effectplot(reg) # An example for felm T = 120 x1 <- rnorm(T) x2 <- rnorm(T) fe1 = sample(c("yes","no"), T, replace=TRUE) fe2 = sample(c("a","b","c"), T, replace=TRUE) y <- -1*x1 + 3*x2 + (fe1=="yes") + 2*(fe2=="a") + (fe2=="b")+ rnorm(T) dat = data.frame(y,x1,x2,fe1,fe2) reg = felm(y~x1+x2 | fe1+fe2) effectplot(reg) } name.of.depvar = function(reg) { if (is(reg,"felm")) return(reg$lhs) names(reg$model)[[1]] } #' Plot for regressions to compare effects sizes of normalized changes in the explanatory variables #' #' The plot shall help to compare magnitudes of the influence of different explanatory variables. The default effect is "10-90", i.e. the effect of when -ceteris paribus- changing an (numeric) explanatory variable from its 10% quantile value to its 90% quantile. For dummy variables, we just consider the effect from changing it from 0 to 1. #' #' @param reg the results from a regression, e.g. from a call to lm or glm #' @param dat default = the data frame the regression was estimated from #' @param vars the explanatory variables that shall be shown in the plot #' @param numeric.effect a code describing the lowest and highest values of numeric explanatory variables used to calculate the effect, e.g. "05-95" means taking the effect of moving from the 5 percent to the 95 percent quantile. #' @param dummy01 shall numeric varibles that have only 0 and 1 as values be treated as a dummy variables? #' @param sort if TRUE (default) sort the effects by size #' @param scale.depvar a scaling for the dependent variable #' @param depvar name of the dependent variable #' @param xlab, ylab labels #' @param colors colors for positive values (pos) and negative values (neg) #' @param horizontal shall bars be shown horizontally? #' @param show.ci shall confidence intervals be shown? #' @param ci.prob left and right probability level for confidence intervals #' @param num.ticks the number of tick marks on the effect size axis #' @param add.numbers shall the effect sizes be added as numbers in the plot? #' @param numbers.align How shall the effect size numbers be aligned: "left","center", "right" align all numbers at the same horizontal posistion for all variables. "left_of_bar_end" and "right_of_bar_end" align them at the end of each bar. #' @param numbers.vjust Vertical adjustment of the numbers #' @param left.margin extra margin left of bars as fraction of maximum bar width #' @param right.margin extra margin right of bars as fraction of maximum bar width #' @param signif.digits number of significant digits for effect sizes #' @param round.digits number of digits effect sizes shall be rounded to #' @param ... further arguments passed to qplot. E.g. you can set "main" to specify a title of the plot. #' @export effectplot = function(reg, dat=get.regression.data(reg,source.data=source.data),source.data = NULL, main = NULL, vars=intersect(colnames(dat), names(coef(reg))), ignore.vars = NULL, numeric.effect="10-90", dummy01=TRUE, sort = TRUE, scale.depvar=NULL, depvar = name.of.depvar(reg), xlab="Explanatory variables\n(low baseline high)", ylab=paste0("Effect on ", depvar,""), colors = c("pos" = "#11AAAA", "neg" = "#EE3355"), effect.sizes=NULL, effect.bases = NULL, horizontal=TRUE, show.ci = FALSE, ci.prob =c(0.05,0.95), num.ticks=NULL, add.numbers=TRUE, numbers.align = c("center","left","right","left_of_bar_end","right_of_bar_end")[1], numbers.vjust = ifelse(show.ci,0,0.5), left.margin = 0, right.margin=0, signif.digits = 2, round.digits=8, alpha = 0.8,... ) { library(ggplot2) # # org.dat = dat # # if (missing(model.matrix)) { # model.matrix = dat # try({ # model.matrix <- model.matrix(reg) # if (all(model.matrix[,1]==1)) # model.matrix <- model.matrix[,-1, drop=FALSE] # },silent=TRUE) # } # if (!is.null(model.matrix)) { # dat = cbind(dat[[depvar]],as.data.frame(model.matrix)) # colnames(dat)[1] = depvar # } # rownames(dat) = NULL # if (missing(vars)) # vars = colnames(dat) restore.point("effectplot") vars = setdiff(vars, ignore.vars) if (is.null(effect.sizes)) { es = get.effect.sizes(reg,dat, vars,depvar=depvar, scale.depvar=scale.depvar,numeric.effect=numeric.effect, dummy01=dummy01, effect.bases=effect.bases, compute.se=show.ci, ci.prob=ci.prob) } else { es = effect.sizes } es$coef.name = paste0(es$var,"\n",es$base.descr) if (sort) es = es[order(es$abs.effect), ] es$sign = ifelse(sign(es$effect)>=0,"pos","neg") # Set factor name in order to show sorted plot es$name = factor(es$coef.name, es$coef.name) es$round.effect = round(es$effect,round.digits) es$round.effect = sapply(es$round.effect, signif, digits=signif.digits) add.str = ifelse(es$round.effect>=0, " ","") es$round.effect = paste0(add.str,es$round.effect) #qplot(data=es, y=abs.effect, x=name, fill=sign, geom="bar", stat="identity",xlab=xlab,ylab=ylab) + coord_flip() + if (show.ci) { es$abs.ci.low = es$ci.low * es$effect.sign es$abs.ci.high = es$ci.high * es$effect.sign } #p = qplot(data=es, y=abs.effect, x=name, fill=sign, geom="bar", stat="identity",xlab=xlab,ylab=ylab,...) + scale_fill_manual(values=colors) p = ggplot(data=es, aes(y=abs.effect, x=name, fill=sign)) + geom_bar(stat = "identity", alpha=alpha) + scale_fill_manual(values=colors) + xlab(xlab) + ylab(ylab) if (!is.null(main)) p = p + ggtitle(main) if (horizontal) p = p+coord_flip() #+ theme_wsj() if (show.ci) { p = p +geom_errorbar(aes(ymin=abs.ci.low, ymax=abs.ci.high), position="dodge", width=0.25, colour=gray(0.3, alpha=0.6)) } if (!is.null(num.ticks)) { number_ticks <- function(n) {function(limits) pretty(limits, n)} p = p + scale_y_continuous(breaks=number_ticks(num.ticks)) } if (add.numbers) { if (numbers.align=="left_of_bar_end") { p = p + geom_text(aes(x=name, y=abs.effect, ymax=max(abs.effect)*(1+right.margin), ymin=-(left.margin*max(abs.effect)), label=round.effect, hjust=1, vjust=numbers.vjust), position = position_dodge(width=1)) } else if (numbers.align=="right_of_bar_end") { p = p + geom_text(aes(x=name, y=abs.effect, ymax=max(abs.effect)*(1+right.margin), ymin=-(left.margin*max(abs.effect)), label=round.effect, hjust=0,vjust=numbers.vjust)) } else { .POS = 0.5 if (numbers.align == "left") { .POS = 0 } else if (numbers.align=="center") { .POS = 0.5 } else if (numbers.align=="right") { .POS = 1 } else if (is.numeric(numbers.align)) { .POS = numbers.align } p = p + geom_text(aes(x = name, y = .POS*max(abs.effect), ymax=max(abs.effect)*(1+right.margin), ymin=-(left.margin*max(abs.effect)), label=round.effect, hjust=0,vjust=numbers.vjust)) } } #numbers.align = "left_of_bar_end","right_of_bar_end","center","left","right" p } #' Graphically compare sizes of regression coefficient #' #' mainly useful if regression is run on data that has been normalized by set.data.units #' @export coefplot = function(reg,data=NULL, sort=TRUE, remove.intercept=TRUE, show.units=!is.null(data), xlab="Explanatory variables", ylab=paste0("Coefficient"), colors = c("pos" = "#11AAAA", "neg" = "#EE3355"), horizontal=TRUE, vars=names(coef(reg)),ignore.vars = NULL) { restore.point("coefplot") library(ggplot2) restore.point("coef.plot") coef = coef(reg) if (remove.intercept) coef = coef[-1] vars = setdiff(vars, ignore.vars) coef = coef[names(coef) %in% vars] df = data.frame(coef.name=names(coef),coef=coef, abs.coef = abs(coef), sign=ifelse(sign(coef)>=0,"pos","neg"), stringsAsFactors=FALSE) if (show.units) { cols = intersect(names(coef),colnames(data)) units = lapply(df$coef.name, function(col) { if (col %in% cols) return(attr(data[[col]],"unit")) return(list(unit = "original",size=1, descr="")) }) units.descr = lapply(units, function(unit) unit$descr) df$coef.name = paste0(df$coef.name,"\n",units.descr) } if (sort) df = df[order(abs(coef)), ] # Set factor name in order to show sorted plot df$name = factor(df$coef.name, df$coef.name) alpha = 1 p = ggplot(data=df, aes(y=abs.coef, x=name, fill=sign)) + geom_bar(stat = "identity", alpha=alpha) + scale_fill_manual(values=colors) + xlab(xlab) + ylab(ylab) if (horizontal) p = p+coord_flip() #+ theme_wsj() p }
be7f9119b20ef6f3fdbf222e07bc7b82c5e40b76
2ff2b73ee9b446747ffe79c1afd1790a6d4dccb2
/src/matchDegree.R
ffee85033225c3f7a48d1eb3fbf3fdcdc91d0f17
[]
no_license
AlpAribal/gradcafestats
6d79418485592944832b2d1a0bbd59864a9595e3
998ac372619be6ed58f87d614cd17fb4a1c20f7f
refs/heads/master
2022-04-13T05:19:01.242493
2020-04-02T11:26:14
2020-04-02T11:26:14
170,407,202
5
0
null
null
null
null
UTF-8
R
false
false
1,885
r
matchDegree.R
require(data.table) require(stringr) submissions <- fread(input = "../data/submissions.csv", sep = ";", header = T, select = c("submissionId", "institution", "major", "degree", "notes"), quote = "") submissions[str_detect(string = degree, pattern = regex("m", ignore_case = T)), `:=`(degree, "Master's")] submissions[str_detect(string = degree, pattern = regex("d", ignore_case = T)), `:=`(degree, "PhD")] submissions[degree == "Other", `:=`(degree, NA)] rgxMasters <- regex("\\bmaster|\\b(?:m[bfph\\.]?a|m\\.?p\\.?p|m\\.?s[cw]?|mcs|m[\\. ]*eng|mmath|m[\\. ]*(?:phil|arch[I1S]*)|MPS|MAUD|M[AS]?PH|M\\.?Div|M[S\\.]Ed|M\\.?St|MEM|MES|MALD|MSFS|MHSc?|MTS|MDesS?|MIA|MLA(?:UD)?|LL\\.?M|MCLS|MFE|MIB|MLS|MRes|MSIM|MSPPM|Th\\.?M)\\b", ignore_case = T) rgxPhD <- regex("\\bdoctor|\\b(?:ph[\\. ]*d|d[\\. ]*phil|A[\\. ]*u[\\. ]*D|Psy[\\. ]*D|DDes|Dr?PH|dr|Th\\.?D|DBA|DMA|DNP|DPT|ScD|DHSc?|DLA|DMP|EdD)\\b", ignore_case = T) submissions[str_detect(string = major, pattern = rgxMasters), `:=`(degree, "Master's")] submissions[str_detect(string = major, pattern = rgxPhD), `:=`(degree, "PhD")] rgxMasters <- regex('\\bmaster|\\b(?:m[bfph\\.]?a|m\\.?p\\.?p|m\\.?s[cw]?|mcs|m[\\. ]*eng|mmath|m[\\. ]*(?:phil|arch[I1S]*)|MPS|MAUD|M[AS]?PH|M\\.?Div|M[S\\.]Ed|M\\.?St|MEM|MES|MALD|MSFS|MHSc?|MTS|MDesS?|MIA|MLA(?:UD)?|LL\\.?M|MCLS|MFE|MIB|MLS|MRes|MSIM|MSPPM|Th\\.?M)[\\. ]*\\b', ignore_case = T) submissions[is.na(degree) & !str_detect(string = notes, pattern = regex("march", ignore_case = T)) & str_detect(string = notes, pattern = rgxMasters), `:=`(degree, "Master's")] submissions[is.na(degree) & str_detect(string = notes, pattern = rgxPhD), `:=`(degree, "PhD")] write_res <- submissions[, .(submissionId, degree)] write.table(x = write_res, file = "../data/matchDegrees.csv", append = F, sep = ";", row.names = F, col.names = T, quote = F)
c1b632c5fecdf83efc6fcb2bd328d4bb3cb29070
8e540ab736ac0682d95b3b8a3776782bd69bdafa
/man/as.abucount.Rd
ed2be9bb6cee8f1798c6ea20d6961d899b06a246
[]
no_license
JohnsonHsieh/iNEXT
4b71515a0c1a6a3143661fb351eadcaaf485fad4
9a2ceb13fb39bcf8fc6d4804f61b8b3c4d66bbe4
refs/heads/master
2022-08-31T06:49:43.096499
2022-08-29T10:57:46
2022-08-29T10:57:46
13,438,394
49
30
null
2022-08-29T12:01:15
2013-10-09T09:34:41
R
UTF-8
R
false
true
633
rd
as.abucount.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/invChat.R \name{as.abucount} \alias{as.abucount} \title{Transform abundance raw data to abundance row-sum counts (iNEXT input format)} \usage{ as.abucount(x) } \arguments{ \item{x}{a \code{data.frame} or \code{matirx} (species by sites matrix).} } \value{ a \code{vector} of species abundance row-sum counts. } \description{ \code{as.abucount}: transform species abundance raw data (a species by sites matrix) to row-sum counts (iNEXT input format) as species abundances. } \examples{ data(ciliates) lapply(ciliates, as.abucount) }
70f6ec809a33e615295e53ce3218d0cd824021b7
f26ea5d9d7488fd41ea17c17bf52ae3cf4f093c7
/R/tslarsP.R
3829a1c95bf048a09f05b27d59cf8e7772800436
[]
no_license
cran/robustHD
b62265881b537151dd5990d64dc0a63379d52030
f348809d3fc47b880050183225863908867aca05
refs/heads/master
2023-01-30T18:45:24.631406
2023-01-18T13:50:02
2023-01-18T13:50:02
17,699,287
0
1
null
null
null
null
UTF-8
R
false
false
14,434
r
tslarsP.R
# -------------------------------------- # Author: Andreas Alfons # Erasmus Universiteit Rotterdam # # based on code by Sarah Gelper # -------------------------------------- #' (Robust) least angle regression for time series data with fixed lag length #' #' (Robustly) sequence groups of candidate predictors and their respective #' lagged values according to their predictive content and find the optimal #' model along the sequence. Note that lagged values of the response are #' included as a predictor group as well. #' #' @aliases print.tslarsP #' #' @param formula a formula describing the full model. #' @param data an optional data frame, list or environment (or object coercible #' to a data frame by \code{\link{as.data.frame}}) containing the variables in #' the model. If not found in data, the variables are taken from #' \code{environment(formula)}, typically the environment from which #' \code{tslarsP} or \code{rtslarsP} is called. #' @param x a numeric matrix or data frame containing the candidate predictor #' series. #' @param y a numeric vector containing the response series. #' @param h an integer giving the forecast horizon (defaults to 1). #' @param p an integer giving the number of lags in the model (defaults to 2). #' @param sMax an integer giving the number of predictor series to be #' sequenced. If it is \code{NA} (the default), predictor groups are sequenced #' as long as there are twice as many observations as predictor variables. #' @param centerFun a function to compute a robust estimate for the center #' (defaults to \code{\link[stats]{median}}). #' @param scaleFun a function to compute a robust estimate for the scale #' (defaults to \code{\link[stats]{mad}}). #' @param regFun a function to compute robust linear regressions that can be #' interpreted as weighted least squares (defaults to #' \code{\link[robustbase]{lmrob}}). #' @param regArgs a list of arguments to be passed to \code{regFun}. #' @param combine a character string specifying how to combine the data #' cleaning weights from the robust regressions with each predictor group. #' Possible values are \code{"min"} for taking the minimum weight for each #' observation, \code{"euclidean"} for weights based on Euclidean distances #' of the multivariate set of standardized residuals (i.e., multivariate #' winsorization of the standardized residuals assuming independence), or #' \code{"mahalanobis"} for weights based on Mahalanobis distances of the #' multivariate set of standardized residuals (i.e., multivariate winsorization #' of the standardized residuals). #' @param winsorize a logical indicating whether to clean the data by #' multivariate winsorization. #' @param const numeric; tuning constant for multivariate winsorization to be #' used in the initial corralation estimates based on adjusted univariate #' winsorization (defaults to 2). #' @param prob numeric; probability for the quantile of the #' \eqn{\chi^{2}}{chi-squared} distribution to be used in multivariate #' winsorization (defaults to 0.95). #' @param fit a logical indicating whether to fit submodels along the sequence #' (\code{TRUE}, the default) or to simply return the sequence (\code{FALSE}). #' @param s an integer vector of length two giving the first and last #' step along the sequence for which to compute submodels. The default #' is to start with a model containing only an intercept (step 0) and #' iteratively add all series along the sequence (step \code{sMax}). If #' the second element is \code{NA}, predictor groups are added to the #' model as long as there are twice as many observations as predictor #' variables. If only one value is supplied, it is recycled. #' @param crit a character string specifying the optimality criterion to be #' used for selecting the final model. Currently, only \code{"BIC"} for the #' Bayes information criterion is implemented. #' @param ncores a positive integer giving the number of processor cores to be #' used for parallel computing (the default is 1 for no parallelization). If #' this is set to \code{NA}, all available processor cores are used. For #' obtaining the data cleaning weights and for fitting models along the #' sequence, parallel computing is implemented on the \R level using package #' \pkg{parallel}. Otherwise parallel computing for some of of the more #' computer-intensive computations in the sequencing step is implemented on the #' C++ level via OpenMP (\url{https://www.openmp.org/}). #' @param cl a \pkg{parallel} cluster for parallel computing as generated by #' \code{\link[parallel]{makeCluster}}. This is preferred over \code{ncores} #' for tasks that are parallelized on the \R level, in which case \code{ncores} #' is only used for tasks that are parallelized on the C++ level. #' @param seed optional initial seed for the random number generator #' (see \code{\link{.Random.seed}}), which is useful because many robust #' regression functions (including \code{\link[robustbase]{lmrob}}) involve #' randomness. On parallel \R worker processes, random number streams are #' used and the seed is set via \code{\link{clusterSetRNGStream}}. #' @param model a logical indicating whether the model data should be included #' in the returned object. #' @param \dots additional arguments to be passed down. #' #' @return #' If \code{fit} is \code{FALSE}, an integer vector containing the indices of #' the sequenced predictor series. #' #' Otherwise an object of class \code{"tslarsP"} (inheriting from classes #' \code{"grplars"} and \code{"seqModel"}) with the following components: #' \describe{ #' \item{\code{active}}{an integer vector containing the sequence of predictor #' series.} #' \item{\code{s}}{an integer vector containing the steps for which submodels #' along the sequence have been computed.} #' \item{\code{coefficients}}{a numeric matrix in which each column contains the #' regression coefficients of the corresponding submodel along the sequence.} #' \item{\code{fitted.values}}{a numeric matrix in which each column contains #' the fitted values of the corresponding submodel along the sequence.} #' \item{\code{residuals}}{a numeric matrix in which each column contains #' the residuals of the corresponding submodel along the sequence.} #' \item{\code{df}}{an integer vector containing the degrees of freedom of the #' submodels along the sequence (i.e., the number of estimated coefficients).} #' \item{\code{robust}}{a logical indicating whether a robust fit was computed.} #' \item{\code{scale}}{a numeric vector giving the robust residual scale #' estimates for the submodels along the sequence (only returned for a robust #' fit).} #' \item{\code{crit}}{an object of class \code{"bicSelect"} containing the BIC #' values and indicating the final model (only returned if argument \code{crit} #' is \code{"BIC"} and argument \code{s} indicates more than one step along the #' sequence).} #' \item{\code{muX}}{a numeric vector containing the center estimates of the #' predictor variables.} #' \item{\code{sigmaX}}{a numeric vector containing the scale estimates of the #' predictor variables.} #' \item{\code{muY}}{numeric; the center estimate of the response.} #' \item{\code{sigmaY}}{numeric; the scale estimate of the response.} #' \item{\code{x}}{the matrix of candidate predictor series (if \code{model} is #' \code{TRUE}).} #' \item{\code{y}}{the response series (if \code{model} is \code{TRUE}).} #' \item{\code{assign}}{an integer vector giving the predictor group to which #' each predictor variable belongs.} #' \item{\code{w}}{a numeric vector giving the data cleaning weights (only #' returned for a robust fit).} #' \item{\code{h}}{the forecast horizon.} #' \item{\code{p}}{the number of lags in the model.} #' \item{\code{call}}{the matched function call.} #' } #' #' @note The predictor group of lagged values of the response is indicated by #' the index 0. #' #' @author Andreas Alfons, based on code by Sarah Gelper #' #' @references #' Alfons, A., Croux, C. and Gelper, S. (2016) Robust groupwise least angle #' regression. \emph{Computational Statistics & Data Analysis}, \bold{93}, #' 421--435. #' #' @seealso \code{\link[=coef.seqModel]{coef}}, #' \code{\link[=fitted.seqModel]{fitted}}, #' \code{\link[=plot.seqModel]{plot}}, #' \code{\link[=predict.seqModel]{predict}}, #' \code{\link[=residuals.seqModel]{residuals}}, #' \code{\link{tslars}}, \code{\link[robustbase]{lmrob}} #' #' @keywords regression robust ts #' #' @export tslarsP <- function(x, ...) UseMethod("tslarsP") #' @rdname tslarsP #' @method tslarsP formula #' @export tslarsP.formula <- function(formula, data, ...) { ## initializations call <- match.call() # get function call call[[1]] <- as.name("tslarsP") # prepare model frame mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data"), names(mf), 0) mf <- mf[c(1, m)] mf$drop.unused.levels <- TRUE mf[[1]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) mt <- attr(mf, "terms") if(is.empty.model(mt)) stop("empty model") # extract response and candidate predictors from model frame y <- model.response(mf, "numeric") x <- model.matrix(mt, mf) # remove first column for intercept, if existing if(attr(mt, "intercept")) x <- x[, -1, drop=FALSE] ## call default method out <- tslarsP.default(x, y, ...) if(inherits(out, "tslarsP")) { out$call <- call # add call to return object out$terms <- mt # add model terms to return object } out } #' @rdname tslarsP #' @method tslarsP default #' @export tslarsP.default <- function(x, y, h = 1, p = 2, sMax = NA, fit = TRUE, s = c(0, sMax), crit = "BIC", ncores = 1, cl = NULL, model = TRUE, ...) { ## call fit function with classical functions for center, scale, ## correlation and regression call <- match.call() # get function call call[[1]] <- as.name("tslarsP") out <- tslarsPFit(x, y, h=h, p=p, sMax=sMax, robust=FALSE, centerFun=mean, scaleFun=sd, fit=fit, s=s, crit=crit, ncores=ncores, cl=cl, model=model) if(inherits(out, "tslarsP")) out$call <- call # add call to return object out } #' @rdname tslarsP #' @export rtslarsP <- function(x, ...) UseMethod("rtslarsP") #' @rdname tslarsP #' @method rtslarsP formula #' @export rtslarsP.formula <- function(formula, data, ...) { ## initializations call <- match.call() # get function call call[[1]] <- as.name("rtslarsP") # prepare model frame mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data"), names(mf), 0) mf <- mf[c(1, m)] mf$drop.unused.levels <- TRUE mf[[1]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) mt <- attr(mf, "terms") if(is.empty.model(mt)) stop("empty model") # extract response and candidate predictors from model frame y <- model.response(mf, "numeric") x <- model.matrix(mt, mf) # remove first column for intercept, if existing if(attr(mt, "intercept")) x <- x[, -1, drop=FALSE] ## call default method and modify return object out <- rtslarsP.default(x, y, ...) if(inherits(out, "tslarsP")) { out$call <- call # add call to return object out$terms <- mt # add model terms to return object } out } #' @rdname tslarsP #' @method rtslarsP default #' @export rtslarsP.default <- function(x, y, h = 1, p = 2, sMax = NA, centerFun = median, scaleFun = mad, regFun = lmrob, regArgs = list(), combine = c("min", "euclidean", "mahalanobis"), winsorize = FALSE, const = 2, prob = 0.95, fit = TRUE, s = c(0, sMax), crit = "BIC", ncores = 1, cl = NULL, seed = NULL, model = TRUE, ...) { ## call fit function with classical functions for center, scale, ## correlation and regression call <- match.call() # get function call call[[1]] <- as.name("rtslarsP") out <- tslarsPFit(x, y, h=h, p=p, sMax=sMax, robust=TRUE, centerFun=centerFun, scaleFun=scaleFun, regFun=regFun, regArgs=regArgs, combine=combine, winsorize=winsorize, const=const, prob=prob, fit=fit, s=s, crit=crit, ncores=ncores, cl=cl, seed=seed, model=model) if(inherits(out, "tslarsP")) out$call <- call # add call to return object out } ## fit function for fixed lag length that allows to specify functions for ## center, scale, correlation and regression tslarsPFit <- function(x, y, h = 1, p = 2, sMax = NA, robust = FALSE, centerFun = mean, scaleFun = sd, regFun = lm.fit, regArgs = list(), combine = c("min", "euclidean", "mahalanobis"), winsorize = FALSE, const = 2, prob = 0.95, fit = TRUE, s = c(0, sMax), crit = "BIC", ncores = 1, cl = NULL, seed = NULL, model = TRUE) { ## initializations n <- length(y) x <- as.matrix(x) if(nrow(x) != n) stop(sprintf("'x' must have %d rows", n)) m <- ncol(x) assign <- rep(seq_len(m+1), each=p) crit <- match.arg(crit) model <- isTRUE(model) ## call workhorse function and modify return object out <- grouplars(fitBlocks(x, y, h, p), y[(p+h):n], sMax=sMax, assign=assign, robust=robust, centerFun=centerFun, scaleFun=scaleFun, regFun=regFun, regArgs=regArgs, combine=combine, winsorize=winsorize, const=const, prob=prob, fit=fit, s=s, crit=crit, ncores=ncores, cl=cl, seed=seed, model=model) # modify return object (lagged response should have index 0) if(inherits(out, "grplars")) { out[c("active", "assign", "h", "p")] <- list(out$active - 1, out$assign - 1, h, p) # include original data rather than derived data if(isTRUE(model)) out[c("x", "y")] <- list(x=x, y=y) class(out) <- c("tslarsP", class(out)) # inherits from "grplars" } else out <- out - 1 out }
4cba79dcbcb3f43981086616649997323dd4d2b4
d641a33455772b681975596383ab7b99e967665d
/code/07_plot_fig02_substate_all_opioids.R
74e3b9cd88d9376796e0607556d6e38e7b38d60c
[]
no_license
mkiang/opioid_inequities
e0271498dd764422d6dbe51a43dcbac7742912f4
5da5d38b423b2298cae1f9d0d250cdde818badf4
refs/heads/master
2023-04-06T16:34:06.488240
2021-10-19T02:47:40
2021-10-19T02:47:40
330,014,401
0
0
null
null
null
null
UTF-8
R
false
false
1,480
r
07_plot_fig02_substate_all_opioids.R
## Imports ---- library(tidyverse) library(geofacet) library(patchwork) library(here) source(here::here("code", "mk_nytimes.R")) source(here::here("code", "utils.R")) ## Data ---- results_df <- readr::read_csv(here::here("data", "joinpoint_estimates_suppressed.csv")) results_df <- results_df %>% categorize_opioids() %>% categorize_race() top_16_substate <- results_df %>% filter(nchar(abbrev) > 2, opioid_type == "opioid", race_eth != "total") %>% arrange(desc(model_rate)) %>% select(name, abbrev, st_fips) %>% distinct() %>% slice(1:16) %>% pull(st_fips) p2_all <- plot_substate_facets( results_df %>% dplyr::filter(st_fips %in% top_16_substate), opioid_x = "opioid", include_total_pop = FALSE, return_data = TRUE ) p2 <- p2_all$plot + facet_wrap( ~ abbrev, nrow = 2, labeller = label_wrap_gen(width = 16)) + ggplot2::scale_y_continuous( paste("Opioid-related mortality rate,", "per 100,000 (truncated)"), expand = c(0, 0), breaks = c(0, 30, 60, 90) ) p2_data <- p2_all$data ## Save ---- ggsave( here("plots", "fig02_substate_all_opioids.pdf"), p2, width = 7.5, height = 3, scale = 1.3, device = cairo_pdf ) ggsave( here("plots", "fig02_substate_all_opioids.jpg"), p2, width = 7.5, height = 3, scale = 1.3, dpi = 300 ) write_csv(p2_data, here("output", "fig02_data.csv"))
2a911c18c97268a51544562516a309b9ffe3427b
0319959633999a8b5da37dd02dce06b227349f73
/R/reference.R
bb1b0b04669c11c66d9cc4be08e4ed297514ebba
[]
no_license
nyuhuyang/scRNAseq-Lymphoma
b11d02192210691101259325636ee1726e191272
e85d9e0fc0809926164749d84c037efc1e1c3fc0
refs/heads/master
2021-04-04T22:01:18.048964
2019-03-26T15:34:32
2019-03-26T15:34:32
124,987,723
0
0
null
null
null
null
UTF-8
R
false
false
9,002
r
reference.R
library(SingleR) library(genefilter) library(dplyr) source("../R/Seurat_functions.R") #--------------------------- # E-GEOD-43717 (skip if GSE43717 is available) #--------------------------- E_GEOD_43717 <- read.table(file = './data/GeneSets/E-GEOD-43717/E-GEOD-43717-query-results.tpms.tsv', sep = '\t', header = TRUE) dup <- duplicated(E_GEOD_43717$Gene.Name) E_GEOD_43717 <- E_GEOD_43717[!dup,] rownames(E_GEOD_43717) <- E_GEOD_43717$Gene.Name E_GEOD_43717 <- E_GEOD_43717[,-c(1:2)] E_GEOD_43717[is.na(E_GEOD_43717)] <- 0 E_GEOD_43717 <- log1p(E_GEOD_43717) boxplot(E_GEOD_43717) name = 'Sertoli cells, spermatogonia, spermatocytes' expr = as.matrix(E_GEOD_43717) # the expression matrix types = gsub("\\."," ",colnames(E_GEOD_43717)) # a character list of the types. Samples from the same type should have the same name. main_types = as.character(types) # a character list of the main types. ref_E_GEOD_43717 = list(name=name,data = expr, types=types, main_types=main_types) # if using the de method, we can predefine the variable genes ref_E_GEOD_43717$de.genes.main = CreateVariableGeneSet(expr,main_types,300) # if using the sd method, we need to define an sd threshold sd = rowSds(expr) sd.thres = sort(sd, decreasing = T)[4000] # or any other threshold ref_E_GEOD_43717$sd.thres = sd.thres save(ref_E_GEOD_43717,file='./data/GeneSets/E-GEOD-43717/ref_E_GEOD_43717.RData') # it is best to name the object and the file with the same name. ############################ # GSE43717 ############################ library(GEOquery) library(dplyr) eList <- getGEOSuppFiles("GSE43717",baseDir = paste0(getwd(),"/data/GeneSets")) #it takes some time eList list.files("./data/GeneSets/GSE43717") untar("./data/GeneSets/GSE43717/GSE43717_RAW.tar", exdir = "./data/GeneSets/GSE43717/") list.files("./data/GeneSets/GSE43717/") FPKM_GSE43717_list <- grep("FPKM",list.files("./data/GeneSets/GSE43717/"),value = T) FPKM_GSE43717_txt_list <-lapply(paste0("./data/GeneSets/GSE43717/",FPKM_GSE43717_list),read.table,sep = '\t', header = TRUE) head(FPKM_GSE43717_txt_list[[1]]) FPKM_GSE43717_txt_list <-lapply(FPKM_GSE43717_txt_list,function(x) x[,c("gene_id","FPKM")]) FPKM_GSE43717_list <- gsub(".txt.gz","",FPKM_GSE43717_list) for(i in 1:length(FPKM_GSE43717_txt_list)) colnames(FPKM_GSE43717_txt_list[[i]])[2] <- FPKM_GSE43717_list[i] head(FPKM_GSE43717_txt_list[[1]]) FPKM_GSE43717_txt <- Reduce(merge,FPKM_GSE43717_txt_list) head(FPKM_GSE43717_txt) dim(FPKM_GSE43717_txt) boxplot(FPKM_GSE43717_txt) library(biomaRt) listMarts() ensembl=useMart("ensembl") listDatasets(ensembl) ensembl = useMart("ensembl",dataset="mmusculus_gene_ensembl") filters = listFilters(ensembl) filters[1:20,] attributes = listAttributes(ensembl) attributes[grepl("Gene name",attributes$description),] attributes[grepl("Transcript stable ID",attributes$description),] selected_attri <- c('ensembl_gene_id', 'ensembl_transcript_id', 'external_gene_name') lapply(selected_attri,function(x) attributes[grepl(x,attributes$name),]) biomart_results <- getBM(attributes=selected_attri, filters="ensembl_gene_id", values=FPKM_GSE43717_txt$gene_id, mart = ensembl) colnames(biomart_results)[which(colnames(biomart_results)=="ensembl_gene_id")] <- "gene_id" head(FPKM_GSE43717_txt) head(biomart_results) dim(FPKM_GSE43717_txt) dim(biomart_results) FPKM_GSE43717 <- inner_join(FPKM_GSE43717_txt,biomart_results,by="gene_id") dim(FPKM_GSE43717) dup <- duplicated(FPKM_GSE43717$external_gene_name) FPKM_GSE43717 <- FPKM_GSE43717[!dup,] dim(FPKM_GSE43717) rownames(FPKM_GSE43717) <- FPKM_GSE43717$external_gene_name FPKM_GSE43717 <- FPKM_GSE43717[,grep("FPKM",colnames(FPKM_GSE43717))] boxplot(FPKM_GSE43717) table(rowSums(FPKM_GSE43717)>1500000) FPKM_GSE43717 <- FPKM_GSE43717[rowSums(FPKM_GSE43717)<1500000,] # remove dim(FPKM_GSE43717) Log_FPKM_GSE43717 <- log1p(FPKM_GSE43717) GSE43717_types <- c("Sertoli cells","Spermatogonia","Spermatocytes","Spermatids","Spermatozoa") name = "GSE43717" ref_GSE43717 = CreateSinglerReference(name = "GSE43717", expr = as.matrix(Log_FPKM_GSE43717), types = GSE43717_types, main_types = GSE43717_types) save(ref_GSE43717,file='./data/GeneSets/GSE43717/ref_GSE43717.RData') # it is best to name the object and the file with the same name. ############################ # GSE83264 ############################ eList <- getGEOSuppFiles("GSE83264",baseDir = paste0(getwd(),"/data/GeneSets")) #it takes some time eList list.files("./data/GeneSets/GSE83264") GSE83264 <- read.table(file = './data/GeneSets/GSE83264/GSE83264_mRNA_genes_merged_counts.txt.gz', sep = '\t', header = TRUE) dup <- duplicated(GSE83264$gene) table(dup) GSE83264 <- GSE83264[!dup,] rownames(GSE83264) <- GSE83264$gene GSE83264 <- GSE83264[,-1] GSE83264[is.na(GSE83264)] <- 0 boxplot(GSE83264) dim(GSE83264) head(GSE83264) #convert counts to TPM lname <- load("../SingleR/data/gene_lengths.RData") lname head(mouse_lengths) length(mouse_lengths) TPM_GSE83264 <- TPM(GSE83264, mouse_lengths) Log_TPM_GSE83264 <- log1p(TPM_GSE83264) par(mfrow=c(1,2)) boxplot(Log_TPM_GSE83264) rownames(Log_TPM_GSE83264) = Hmisc::capitalize(tolower(rownames(Log_TPM_GSE83264))) name = "whole testis at day 1-7 postpartum" colnames(Log_TPM_GSE83264) GSE83264_types = c("Day1 pp whole testis","Day1 pp whole testis","Day3 pp whole testis", "Day3 pp whole testis","Day7 pp whole testis","Day7 pp whole testis", "leptotene/zygotene spermatocytes","leptotene/zygotene spermatocytes", "pachytene spermatocytes","pachytene spermatocytes") # a character list of the types. Samples from the same type should have the same name. GSE83264_main_types = as.character(GSE83264_types) # a character list of the main types. ref_GSE83264 = CreateSinglerReference(name = "GSE83264", expr = as.matrix(Log_TPM_GSE83264), types = GSE83264_types, main_types = GSE83264_main_types) save(ref_GSE83264,file='./data/GeneSets/GSE83264/ref_GSE83264.RData') # it is best to name the object and the file with the same name. ################################################ # merge hpca_blueprint_encode GSE43717 and GSE83264 ################################################ # load hpca_blueprint_encode reference database Iname = load(file='../SingleR/data/ref_Mouse.RData');Iname Iname = load(file='./data/GeneSets/GSE43717/ref_GSE43717.RData');Iname Iname = load(file='./data/GeneSets/GSE83264/ref_GSE83264.RData');Iname #ref_GSE43717_GSE83264 <- reshape::merge_all(list(ref_immgen_mouse.rnaseq$data, Ref_GSE43717 <- merge(ref_immgen_mouse.rnaseq$data, ref_GSE43717$data, by='row.names') rownames(Ref_GSE43717) = Ref_GSE43717$Row.names Ref_GSE43717 = Ref_GSE43717[,-1] ref_GSE43717_GSE83264 <- merge(Ref_GSE43717, ref_GSE83264$data, by='row.names') rownames(ref_GSE43717_GSE83264) = ref_GSE43717_GSE83264$Row.names ref_GSE43717_GSE83264 = ref_GSE43717_GSE83264[,-1] dim(ref_GSE43717_GSE83264) testMMM(ref_GSE43717_GSE83264) boxplot(ref_GSE43717_GSE83264) # too slow!! types = c(ref_immgen_mouse.rnaseq$types,ref_GSE43717$types,ref_GSE83264$types) main_types = c(ref_immgen_mouse.rnaseq$main_types,ref_GSE43717$main_types, ref_GSE83264$main_types) Ref_GSE43717_GSE83264 = CreateSinglerReference(name = "immgen_mouse.rnaseq_GSE43717_GSE83264", expr = as.matrix(ref_GSE43717_GSE83264), types = types, main_types = main_types) save(Ref_GSE43717_GSE83264,file='./data/GeneSets/Ref_GSE43717_GSE83264.RData') # it is best to name the object and the file with the same name. ################################################ # merge GSE43717 and GSE83264 ################################################ # load GSE43717 and GSE83264 reference database Iname = load(file='./data/GeneSets/GSE43717/ref_GSE43717.RData');Iname Iname = load(file='./data/GeneSets/GSE83264/ref_GSE83264.RData');Iname ref_GSE43717_GSE83264 <- merge(ref_GSE43717$data, ref_GSE83264$data, by='row.names') rownames(ref_GSE43717_GSE83264) = ref_GSE43717_GSE83264$Row.names ref_GSE43717_GSE83264 = ref_GSE43717_GSE83264[,-1] dim(ref_GSE43717_GSE83264) testMMM(ref_GSE43717_GSE83264) boxplot(ref_GSE43717_GSE83264) ref_GSE43717_GSE83264 = CreateSinglerReference(name = "GSE43717_GSE83264", expr = as.matrix(ref_GSE43717_GSE83264), types = c(ref_GSE43717$types,ref_GSE83264$types), main_types = c(ref_GSE43717$main_types,ref_GSE83264$main_types)) save(ref_GSE43717_GSE83264,file='./data/GeneSets/GSE43717_GSE83264.RData') # it is best to name the object and the file with the same name.
1584306b570a981db18304307ffff61ad834a634
5535aebd21d291783f77aa8d35e127d66022acb9
/tests/testthat/test-open-spreadsheets.R
cf10abf95a85c82f248af7dbb46dc96ef9d8eefc
[]
no_license
arturochian/gspreadr
41cb4e276a703ba7bfdf3006153cc08793aa5b99
e7b0b0a155bc2d1b8e3aa858fee79143530a5dad
refs/heads/master
2020-12-11T03:21:03.254666
2015-01-13T00:14:38
2015-01-13T00:14:38
null
0
0
null
null
null
null
UTF-8
R
false
false
1,395
r
test-open-spreadsheets.R
context("opening spreadsheets") test_that("Open my private spreadsheet", { sheet1 <- open_spreadsheet("Private Sheet Example") sheet2 <- open_by_key("1bd5wjZQI8XjPrVNUFbTLI-zhpS8qLJ1scPq1v4v3mWs") sheet3 <- open_by_url("https://docs.google.com/spreadsheets/d/1bd5wjZQI8XjPrVNUFbTLI-zhpS8qLJ1scPq1v4v3mWs/edit#gid=0") expect_equal(sheet1, sheet2) expect_equal(sheet1, sheet3) }) test_that("Open my public spreadsheet", { sheet1 <- open_spreadsheet("Public Sheet Example") sheet2 <- open_by_key("1hff6AzFAZgFdb5-onYc1FZySxTP4hlrcsPSkR0dG3qk") sheet3 <- open_by_url("https://docs.google.com/spreadsheets/d/1hff6AzFAZgFdb5-onYc1FZySxTP4hlrcsPSkR0dG3qk/pubhtml") expect_equal(sheet1, sheet2) expect_equal(sheet1, sheet3) }) # test_that("Open a spreadsheet shared with me", { # sheet1 <- open_spreadsheet("Private Sheet Example Shared") # sheet2 <- open_by_url("https://docs.google.com/spreadsheets/d/1WpFeaRU_9bBEuK8fI21e5TcbCjQZy90dQYgXF_0JvyQ/edit?usp=sharing") # # expect_equal(sheet1, sheet2) # }) # # test_that("Open a public spreadsheet not shared with me", { # # must set visibilty to public to open a public spreadsheet that's not shared with me # sheet1 <- open_by_url("https://docs.google.com/spreadsheets/d/11j3LvNgiwzw4CdYeKoRULfyqpOlPJb-OzyUur3qX63I/pubhtml", visibility = "public") # # expect_equal(class(sheet1), "spreadsheet") # }) #
40eb72f63208dff9336e82dc631a8f1fe6c71bd5
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
/GenEst/man/estM.Rd
2fa059ac3cf327f76f7579aa307d65b6e61f4a49
[ "LicenseRef-scancode-warranty-disclaimer" ]
no_license
akhikolla/InformationHouse
4e45b11df18dee47519e917fcf0a869a77661fce
c0daab1e3f2827fd08aa5c31127fadae3f001948
refs/heads/master
2023-02-12T19:00:20.752555
2020-12-31T20:59:23
2020-12-31T20:59:23
325,589,503
9
2
null
null
null
null
UTF-8
R
false
true
2,670
rd
estM.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mortality_functions.R \name{estM} \alias{estM} \title{Estimate mortality} \usage{ estM(data_CO, data_SS, data_DWP = NULL, frac = 1, COdate = "DateFound", model_SE, model_CP, model_DWP = NULL, unitCol = NULL, SSdate = NULL, sizeCol = NULL, IDcol = NULL, DWPCol = NULL, nsim = 1000, max_intervals = 8) } \arguments{ \item{data_CO}{Carcass Observation data} \item{data_SS}{Search Schedule data} \item{data_DWP}{Survey unit (rows) by carcass class (columns) density weighted proportion table} \item{frac}{fraction carcasses on ground that was surveyed but not accounted for in DWP} \item{COdate}{Column name for the date found data} \item{model_SE}{Searcher Efficiency model (or list of models if there are multiple carcass classes)} \item{model_CP}{Carcass Persistence model (or list of models if there are multiple carcass classes)} \item{model_DWP}{fitted dwp model (optional)} \item{unitCol}{Column name for the unit indicator (optional)} \item{SSdate}{Column name for the date searched data} \item{sizeCol}{Name of colum in \code{data_CO} where the carcass classes are recorded. Optional. If none provided, it is assumed there is no distinctions among carcass classes.} \item{IDcol}{column with unique carcass (CO) identifier} \item{DWPCol}{Column name for the DWP values in the DWP table when no carcass class is used and there is more than one column in \code{data_DWP} that could be interpreted as DWP.} \item{nsim}{the number of simulation draws} \item{max_intervals}{maximum number of arrival intervals to consider for each carcass} } \value{ list of Mhat, Aj, ghat, DWP (by carcass), and Xtot = total number of carcasses observe } \description{ Given given fitted Searcher Efficiency and Carcass Persistence models; Search Schedule, Density Weighted Proportion, and Carcass Observation data; and information about the fraction of the the facility that was surveyed. } \examples{ \dontrun{ data(mock) model_SE <- pkm(formula_p = p ~ HabitatType, formula_k = k ~ 1, data = mock$SE ) model_CP <- cpm(formula_l = l ~ Visibility, formula_s = s ~ Visibility, data = mock$CP, dist = "weibull", left = "LastPresentDecimalDays", right = "FirstAbsentDecimalDays" ) eM <- estM(nsim = 1000, data_CO = mock$CO, data_SS = mock$SS, data_DWP = mock$DWP, frac = 1, model_SE = model_SE, model_CP = model_CP, COdate = "DateFound", DWPCol = "S", sizeCol = NULL ) } }
8254f3b9c5e8038b2093f59107cc856557a36dda
60e830955164b8f0ff21a3dbd850319bae68567e
/WaveformCode/PopulationAnalysisCdfs.R
5f222917dcc1a61233153a179a132286efd8220b
[]
no_license
BenLopez/UHSM_BHF
739ed912182c36f2bf719d771c6392a1dfbe6f47
4afad6662f95f1d20f7270113c80a1836d035028
refs/heads/master
2021-05-26T07:31:07.476669
2019-12-03T09:52:56
2019-12-03T09:52:56
127,924,523
0
0
null
2018-10-10T11:03:39
2018-04-03T14:53:28
R
UTF-8
R
false
false
5,055
r
PopulationAnalysisCdfs.R
{ if(file.exists('CheckforDefaultsScript.R')){ source('CheckforDefaultsScript.R') }else{ pathFiles <- setwd(paste0(choose.dir(caption="Select folder with source code."), "\\")) source("LibrariesAndSettings.R" , print.eval = TRUE ) DP_LoadPatientIndex() DP_ChooseDataReps() FilestoProcess <- DP_ChooseECGstoProcess() HoursBeforeandAfter <- DP_SelectHoursBeforeandAfter() } } # Filter patients listAllPatients <- DP_FilterPatients(listAllPatients = listAllPatients , PatIndex2017 = PatIndex2017, HowtoFilterops = HowtoFilterops, path = path, FilestoProcess = FilestoProcess) PatientCDFs <- list() PatientRR <- list() PatinetNames <- list() AFLogical <- matrix(0 , 728 , 1) counter <- 1 for(ii in 1:length(listAllPatients)){ sub_pat <- DP_ExtractPatientRecordforIndex(PatIndex2017 = PatIndex2017 , PatientCode = listAllPatients[[ii]]) if(DP_checkRpeaksfilesprocessed(path , PatientsId = listAllPatients) && DP_CheckFileExists(path , PatientsID = listAllPatients[[ii]] , paste0(listAllPatients[[ii]] , '_CDFs' )) ){ tmp<- DP_LoadRpeaksfile(path , listAllPatients[[ii]])$RRCombined if(length(tmp$t) < 15000){next} PatientRR[[counter]] = tmp PatientCDFs[[counter]] = DP_LoadFile(path , PatientsID = listAllPatients[[ii]] , paste0(listAllPatients[[ii]] , '_CDFs' )) PatinetNames[[counter]] <- sub_pat$PseudoId[1] if(DP_CheckIfAFPatient(sub_pat)){ AFLogical[counter,1] <- 1 } counter <- counter + 1 } DP_WaitBar(ii/length(listAllPatients)) } PatientRR <- setNames( PatientRR , unlist(PatinetNames) ) PatientCDFs <- setNames( PatientCDFs , unlist(PatinetNames) ) BaselineCDF <- lapply( PatientCDFs , function(X){AFD_CDFCalulateBaseline(X)} ) {patienttoexamine <-'z1027' med <- as.numeric(apply( PatientCDFs[[patienttoexamine]][,] , 1 , function(X){AFD_CalulateMedianfromBinMatrix(as.numeric(colnames(PatientCDFs[[1]])) , X)})) index1 <- 10000 index2 <- 20000 x11() par(mfrow = c(2 , 1)) plot(PatientRR[[patienttoexamine]]$t , PatientRR[[patienttoexamine]]$RR , col = rgb(0,0,1,alpha = 0.01) , pch = 16 , ylab = 'RR' , xlab = 't' ) title('RR Times') abline(v = as.numeric(PatientRR[[patienttoexamine]]$t[index1]) , col = 'red' ) abline(v = as.numeric(PatientRR[[patienttoexamine]]$t[index1 + 1000]) , col = 'red') abline(v = as.numeric(PatientRR[[patienttoexamine]]$t[index2]) , col = 'green') abline(v = as.numeric(PatientRR[[patienttoexamine]]$t[index2 + 1000]) , col = 'green') plot(as.numeric(colnames(PatientCDFs[[1]])) , PatientCDFs[[patienttoexamine]][index1,] - BaselineCDF[[patienttoexamine]] , col = rgb(1,0,0 , alpha = 0.01) , type ='l' , xlab = 'x' , ylab = 'P(X - median(X) < x)', ylim = c(-1,1) , xlim =c(0,2)) for( i in 1:1000){ lines(as.numeric(colnames(PatientCDFs[[1]])) , PatientCDFs[[patienttoexamine]][index1 + i,] - BaselineCDF[[patienttoexamine]] , col = rgb(1,0,0 , alpha = 0.01) , type ='l') lines(as.numeric(colnames(PatientCDFs[[1]])) , PatientCDFs[[patienttoexamine]][index2 + i,] - BaselineCDF[[patienttoexamine]], col = rgb(0,1,0 , alpha = 0.01) , type ='l') } } AFBaselineCDFMatrices <- matrix(0 , sum(AFLogical ==1) , length( PatientCDFs[[1]][1,] ) ) NAFBaselineCDFMatrices <- matrix(0 , sum(AFLogical ==0) , length( PatientCDFs[[1]][1,] ) ) counter1 <- 1 counter2 <- 1 for( ii in 1:length(PatientCDFs) ){ if(AFLogical[ii] == 1){ AFBaselineCDFMatrices[counter1,] <- BaselineCDF[[ii]] counter1 <- counter1 + 1 }else{ NAFBaselineCDFMatrices[counter2,] <- BaselineCDF[[ii]] counter2 <- counter2 + 1 } } m_BL_AF <- apply(AFBaselineCDFMatrices[ ,5:27 ] , 2 , function(X){mean(X[!is.na(X)])}) v_BL_AF <- cov(AFBaselineCDFMatrices[ ,5:27 ]) m_BL_NAF <- apply(NAFBaselineCDFMatrices[ ,5:27 ] , 2 , function(X){mean(X[!is.na(X)])}) v_BL_NAF <- cov(NAFBaselineCDFMatrices[ ,5:27 ]) diffindit <- (m_BL_AF - m_BL_NAF)%*%solve(v_BL_NAF + v_BL_AF )%*%(m_BL_AF - m_BL_NAF) plot( m_BL_AF , type ='l' , col = 'red' ) lines(m_BL_AF + 2*sqrt( diag(v_BL_NAF) ) , type ='l' , col = 'blue') lines(m_BL_AF - 2*sqrt( diag(v_BL_NAF) ) , type ='l' , col = 'blue') lines( m_BL_NAF , type ='l' , col = 'black' ) lines(m_BL_NAF + 2*sqrt( diag(v_BL_NAF) ) , type ='l' , col = 'green') lines(m_BL_NAF - 2*sqrt( diag(v_BL_NAF) ) , type ='l' , col = 'green') binlims= c( seq(from = 0 , to = 1 , 0.1 )) par(mfrow = c(8,2)) for( ii in 5:27 ){ x11(20,10) hist(AFBaselineCDFMatrices[ , ii] , col = rgb(1 , 0 , 0 , alpha = 0.25) , breaks = binlims , axes = FALSE , freq=FALSE) hist(NAFBaselineCDFMatrices[ , ii] , col = rgb(0 , 0 , 1 , alpha = 0.25) , breaks = binlims, add = TRUE , axes = FALSE , freq=FALSE) } AFBaselineCDFMatrices[is.na(AFBaselineCDFMatrices)] <- 0 pairs(AFBaselineCDFMatrices[ ,7:20 ],col = rgb(1 , 0 , 0 , alpha = 0.1) , pch = 16) pairs(NAFBaselineCDFMatrices[ ,7:20 ],col = rgb(0 , 1 , 0 , alpha = 0.1) , pch = 16) x11() PlotFun_DoublePairs(AFBaselineCDFMatrices[ ,7:20 ] , NAFBaselineCDFMatrices[ ,7:20 ])
e7644c9cf00da6a46f1581bc37d8cc380e8ea2f3
39b9897c3e110aed4f8f0a62b616f7e14d2c91ec
/Monte_Carlo_Bayesian_Model.R
4ef95c1051612d2b9df28caae3f1f6e5e90c13e3
[]
no_license
planetx316/Monte-Carlo-simulation-Bayesian-estimation
60bf6ca7ab9ba428d247079bb93e52202ee583a7
2b4dc588a166001ce568a5f7d749514b2ceec5ae
refs/heads/main
2023-09-05T09:37:28.949643
2021-11-18T09:39:22
2021-11-18T09:39:22
429,372,533
0
0
null
null
null
null
UTF-8
R
false
false
441
r
Monte_Carlo_Bayesian_Model.R
set.seed(32) m=1000 a=2.0 b=1/3.0 #simulating gamma values theta=rgamma(m,a,b) hist(theta,freq=F) curve(dgamma(x,a,b),col='blue',add=TRUE) #Mean of the simulated values will get us closer to the true mean using the Law of large numbers mean(theta) #To calculate P(theta<5) ind=theta<5 ind mean(ind) #this is the P(0<theta<5) pgamma(q=5.0, a,b) #this is close to the MC value #90th percentile quantile(theta,probs=0.9) qgamma(p=0.9, a,b)
e6da48517717db1a6d68e1472ebbaecff6b0e916
aad7c7b66c10940ab3cb23cb24192b2417e74fef
/man/load_chromosome_lengths.Rd
67751dca275ae18ef50a9271de54b06381159ac5
[]
no_license
TransmissibleCancerGroup/dftdLowCov
7b029a3a2b62e359b60343d6579c3a8be9136099
2f884d69654b4289ef322932ba5077940c260ebe
refs/heads/master
2021-01-02T04:49:03.263038
2020-05-28T16:15:21
2020-05-28T16:15:21
239,495,322
0
0
null
null
null
null
UTF-8
R
false
true
280
rd
load_chromosome_lengths.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \name{load_chromosome_lengths} \alias{load_chromosome_lengths} \title{load_chromosome lengths into a list} \usage{ load_chromosome_lengths() } \description{ load_chromosome lengths into a list }
5d84dceb55a0e7a79944f6b23e466a587826216c
d93a089c03d265fd0458c29f16851b09807db3c2
/man/asciicast_renderer.Rd
b410038db57da3617016ccf913b0ceddca3e74b1
[ "MIT" ]
permissive
coolbutuseless/asciicast
1a1e20659779c721135170eb1cf13dbff28b05d6
8e88fefaeff3079e953b15f88e29e9ac4af0e592
refs/heads/master
2020-05-19T11:05:42.999817
2019-05-05T05:32:30
2019-05-05T05:32:30
184,983,600
6
0
null
null
null
null
UTF-8
R
false
true
614
rd
asciicast_renderer.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gganimate.R \name{asciicast_renderer} \alias{asciicast_renderer} \title{A 'renderer' to use with \code{gganimate::animate()}} \usage{ asciicast_renderer(filename = tempfile(fileext = ".cast"), ...) } \arguments{ \item{filename}{output file for the asciicast animation} \item{...}{further arguments passed to \code{asciicast::create_asciicast_flipbook()}} } \value{ Returns a function which takes 2 arguments: \code{frames} and \code{fps} } \description{ Warning: this only currently works with a hacked, custom version of gganimate. }
e9a78deb214b9a748701a5634f94aac10f1883f4
71c38902f491f8031015d8d6664ddd7e80e0db43
/FLBIEA_sim_cod_had_whg/prepare_input_fishing_effort_data_00.R
8cf15d90c49e8099fb17411d573394642c8fa389
[]
no_license
GhassenH/FLBEIA_GIT
d5707bdf5cb0969d51cf387e0af33909813c381b
37607124f96ba6c1033dce001317ae94eec5347c
refs/heads/master
2020-09-21T15:33:54.029693
2019-12-03T09:11:19
2019-12-03T09:11:19
224,831,367
0
0
null
null
null
null
UTF-8
R
false
false
6,143
r
prepare_input_fishing_effort_data_00.R
#################################################################### # Prepare data for FLBEIA simulations # Fishing effort by fleet and effort share for each fleet and métier # Ghassen Halouani # Februray 2019 ###################################################################### ## 1. Fishing effort by fleet age <- "all" year <- seq(2000, 2016) unit <- "unique" season <- "all" area <- "unique" iter <- 1 eff_fleet <- read.csv("data/fleet component/effort by fleets.csv") eff_fl1 <- subset(eff_fleet, fleets == "IRL_DTS_O10M") eff_fl2 <- subset(eff_fleet, fleets == "GBR_DTS_O10M") eff_fl3 <- subset(eff_fleet, fleets == "FRA_DTS_O10M") eff_fl4 <- subset(eff_fleet, fleets == "OTH_DTS_O10M") # Construction of the table age2 <- rep(age, each = length(year)) year2 <- rep(year, times = length(age)) unit2 <- rep(unit, times = length(age2)) season2 <- rep(season, times = length(age2)) area2 <- rep(area, times = length(age2)) iter2 <- rep(iter, times = length(age2)) data_fl1 <- c(c(0,0,0), eff_fl1$nominal_effort) data_fl2 <- c(c(0,0,0), eff_fl2$nominal_effort) data_fl3 <- c(c(0,0,0), eff_fl3$nominal_effort) data_fl4 <- c(c(0,0,0), eff_fl4$nominal_effort) # data_fl1 <- c(NA, NA, NA, eff_fl1$nominal_effort) # data_fl2 <- c(NA, NA, NA, eff_fl2$nominal_effort) # data_fl3 <- c(NA, NA, NA, eff_fl3$nominal_effort) # data_fl4 <- c(NA, NA, NA, eff_fl4$nominal_effort) eff_fl1 <- data.frame(age = age2, year = year2, unit = unit2, season = season2, area = area2, iter = iter2, data = data_fl1) eff_fl2 <- data.frame(age = age2, year = year2, unit = unit2, season = season2, area = area2, iter = iter2, data = data_fl2) eff_fl3 <- data.frame(age = age2, year = year2, unit = unit2, season = season2, area = area2, iter = iter2, data = data_fl3) eff_fl4 <- data.frame(age = age2, year = year2, unit = unit2, season = season2, area = area2, iter = iter2, data = data_fl4) # Save data write.csv(eff_fl1, "data/fleet component/fl1_effort.csv", row.names = FALSE) write.csv(eff_fl2, "data/fleet component/fl2_effort.csv", row.names = FALSE) write.csv(eff_fl3, "data/fleet component/fl3_effort.csv", row.names = FALSE) write.csv(eff_fl4, "data/fleet component/fl4_effort.csv", row.names = FALSE) ## 2. Effort share by fleet and métiers age <- "all" year <- seq(2000, 2016) unit <- "unique" season <- "all" area <- "unique" iter <- 1 eff_share <- read.csv("data/fleet component/effort share TR1_TR2.csv") # transform the percentage into values between 0 and 1 eff_share$TR1_eff_share <- eff_share$TR1_eff_share/100 eff_share$TR2_eff_share <- eff_share$TR2_eff_share/100 eff_share1 <- subset(eff_share, fleets == "IRL_DTS_O10M") eff_share2 <- subset(eff_share, fleets == "GBR_DTS_O10M") eff_share3 <- subset(eff_share, fleets == "FRA_DTS_O10M") eff_share4 <- subset(eff_share, fleets == "OTH_DTS_O10M") # Construction of the table age2 <- rep(age, each = length(year)) year2 <- rep(year, times = length(age)) unit2 <- rep(unit, times = length(age2)) season2 <- rep(season, times = length(age2)) area2 <- rep(area, times = length(age2)) iter2 <- rep(iter, times = length(age2)) share_fl1_TR1 <- c(NA, NA, NA, eff_share1$TR1_eff_share) share_fl2_TR1 <- c(NA, NA, NA, eff_share2$TR1_eff_share) share_fl3_TR1 <- c(NA, NA, NA, eff_share3$TR1_eff_share) share_fl4_TR1 <- c(NA, NA, NA, eff_share4$TR1_eff_share) share_fl1_TR2 <- c(NA, NA, NA, eff_share1$TR2_eff_share) share_fl2_TR2 <- c(NA, NA, NA, eff_share2$TR2_eff_share) share_fl3_TR2 <- c(NA, NA, NA, eff_share3$TR2_eff_share) share_fl4_TR2 <- c(NA, NA, NA, eff_share4$TR2_eff_share) effshare_fl1_met1 <- data.frame(age = age2, year = year2, unit = unit2, season = season2, area = area2, iter = iter2, data = share_fl1_TR1) effshare_fl2_met1 <- data.frame(age = age2, year = year2, unit = unit2, season = season2, area = area2, iter = iter2, data = share_fl2_TR1) effshare_fl3_met1 <- data.frame(age = age2, year = year2, unit = unit2, season = season2, area = area2, iter = iter2, data = share_fl3_TR1) effshare_fl4_met1 <- data.frame(age = age2, year = year2, unit = unit2, season = season2, area = area2, iter = iter2, data = share_fl4_TR1) effshare_fl1_met2 <- data.frame(age = age2, year = year2, unit = unit2, season = season2, area = area2, iter = iter2, data = share_fl1_TR2) effshare_fl2_met2 <- data.frame(age = age2, year = year2, unit = unit2, season = season2, area = area2, iter = iter2, data = share_fl2_TR2) effshare_fl3_met2 <- data.frame(age = age2, year = year2, unit = unit2, season = season2, area = area2, iter = iter2, data = share_fl3_TR2) effshare_fl4_met2 <- data.frame(age = age2, year = year2, unit = unit2, season = season2, area = area2, iter = iter2, data = share_fl4_TR2) # Save data write.csv(effshare_fl1_met1, "data/fleet component/fl1.met1_effshare.csv", row.names = FALSE) write.csv(effshare_fl2_met1, "data/fleet component/fl2.met1_effshare.csv", row.names = FALSE) write.csv(effshare_fl3_met1, "data/fleet component/fl3.met1_effshare.csv", row.names = FALSE) write.csv(effshare_fl4_met1, "data/fleet component/fl4.met1_effshare.csv", row.names = FALSE) write.csv(effshare_fl1_met2, "data/fleet component/fl1.met2_effshare.csv", row.names = FALSE) write.csv(effshare_fl2_met2, "data/fleet component/fl2.met2_effshare.csv", row.names = FALSE) write.csv(effshare_fl3_met2, "data/fleet component/fl3.met2_effshare.csv", row.names = FALSE) write.csv(effshare_fl4_met2, "data/fleet component/fl4.met2_effshare.csv", row.names = FALSE)
b222c3270f07156394217b9b4ab5580fc2d43dd8
7c6b5502b56e7be64f87dcb474f5f01ac88992a5
/man/var.in.colour.Rd
9fa1a60c8e976f1094d3c1711f1b3e27b7646a6c
[]
no_license
rxmenezes/rscreenorm
c4cc575c3c5c1f05aec3d7c14448a13452a7c310
88c8abca441fd8894d026a351c232ae832d5b228
refs/heads/master
2020-03-24T17:21:31.899634
2018-08-05T06:43:08
2018-08-05T06:43:08
141,115,438
0
0
null
null
null
null
UTF-8
R
false
true
1,769
rd
var.in.colour.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_functions.R \name{var.in.colour} \alias{var.in.colour} \title{Creates a colour variable corresponding to a factor.} \usage{ var.in.colour(myclass, mystart = 0.1, myend = 0.9) } \arguments{ \item{myclass}{a factor, or a variable that can be considered as (and transformed into) a factor. Observations that are NA for the input variable are also NA for the resulting colour variable.} \item{mystart}{the starting value for the colour range from \code{\link{rainbow}}. Must be a value between 0 and 1.} \item{myend}{the end value for the colour range from \code{\link{rainbow}}. Must be a value between 0 and 1.} } \value{ A list with the following slots: col_var, containing the variable with the colours, with as many entries as the input variable myclass; list_cols, a vector of colours used, as long as the number of levels/distinct values in myclass; and levels_class, a vector containing the categories of myclass, in the same order as in list_cols. } \description{ Creates a variable with each entry assuming a different colour, corresponding to levels of a factor. Such a variable is convenient for use with plotting. The colours chosen are picked by \code{\link{rainbow}}, given the number of factor levels. } \examples{ # Create a factor fcl <- factor(rep(5:3,each=3)) # Now create a vector of colours, one corresponding to each factor level # as well as objects to be used in a legend col.cl <- var.in.colour(fcl) # Make a simple plot displaying the colours and add a legend plot(as.numeric(as.character(fcl)), col=col.cl[["col_var"]], pch=20, cex=2) legend("topright", legend=col.cl[["levels_class"]], pch=20, col=col.cl[["list_cols"]] ) } \seealso{ \code{\link{rainbow}} }
23457c67e6d73a6c9e95fd2f9541edfd54b43dc3
59d501a829468e393db33cc38a192c1ed154f8ef
/R/ci_single_mean_theo.R
bb64d3b4825b77cb734020f865eb87fc4986a110
[]
no_license
aaronbaggett/labs4316
9687d80b2db2a73a80478bd343b75111c8821510
e467139cd2d14c0b11561db4a2146e7d969bbbce
refs/heads/master
2020-04-21T08:39:50.833663
2019-09-17T18:27:27
2019-09-17T18:27:27
169,426,048
1
0
null
null
null
null
UTF-8
R
false
false
1,678
r
ci_single_mean_theo.R
ci_single_mean_theo <- function(y, conf_level, y_name, show_var_types, show_summ_stats, show_res, show_eda_plot, show_inf_plot){ # calculate sample size n <- length(y) # calculate x-bar y_bar <- mean(y) # define degrees of freedom df <- n - 1 # find percentile associated with critical value perc_crit_value <- conf_level + ((1 - conf_level) / 2) # find critical value t_star <- qt(perc_crit_value, df) # calculate s s <- sd(y) # calculate SE se <- s / sqrt(n) # calculate ME me <- t_star * se # calculate CI ci <- y_bar + c(-1, 1)* me # print variable types if(show_var_types == TRUE){ cat("Single numerical variable\n") } # print summary statistics if(show_summ_stats == TRUE){ cat(paste0("n = ", n, ", y-bar = ", round(y_bar, 4), ", s = ", round(s, 4), "\n")) } # print results if(show_res == TRUE){ conf_level_perc = conf_level * 100 cat(paste0(conf_level_perc, "% CI: (", round(ci[1], 4), " , ", round(ci[2], 4), ")\n")) } # eda_plot d_eda <- data.frame(y = y) eda_plot <- ggplot2::ggplot(data = d_eda, ggplot2::aes(x = y), environment = environment()) + ggplot2::geom_histogram(fill = "#8FDEE1", binwidth = diff(range(y)) / 20) + ggplot2::xlab(y_name) + ggplot2::ylab("") + ggplot2::ggtitle("Sample Distribution") + ggplot2::geom_vline(xintercept = y_bar, col = "#1FBEC3", lwd = 1.5) # print plots if(show_eda_plot){ print(eda_plot) } if(show_inf_plot){ warning("No inference plot available.", call. = FALSE) } # return return(list(df = df, SE = se, ME = me, CI = ci)) }
2a87734cedb73b542c11a24f66f6eb15dd1db642
a07745b632ba07b4f66a4529ea8d3642b283a3e8
/Sentiment Analysis/Twitter.R
0b3e34270f1c97b452e807805ab2b5d7144f70d1
[]
no_license
siri1696/DataScience
f21ae3f0ba808f738be4c0be862522091d29a428
68c15bc4fd9e6a865b873e5650e08ea4a853fcb9
refs/heads/master
2022-12-11T22:17:44.722303
2020-09-13T15:53:32
2020-09-13T15:53:32
293,814,927
0
0
null
null
null
null
UTF-8
R
false
false
2,203
r
Twitter.R
library("twitteR") library("ROAuth") cred <- OAuthFactory$new(consumerKey='zh5iKDe3s4q2QERnp9ubr7RmR', # Consumer Key (API Key) consumerSecret='SAGa0Pv703f1HHLJH1lLl7byGsXgMx4VxgGRnVb4J1Apq5mR1A', #Consumer Secret (API Secret) requestURL='https://api.twitter.com/oauth/request_token', accessURL='https://api.twitter.com/oauth/access_token', authURL='https://api.twitter.com/oauth/authorize') save(cred, file="twitter authentication.Rdata") load("twitter authentication.Rdata") #install.packages("base64enc") library(base64enc) #install.packages("httpuv") library(httpuv) setup_twitter_oauth("zh5iKDe3s4q2QERnp9ubr7RmR", # Consumer Key (API Key) "SAGa0Pv703f1HHLJH1lLl7byGsXgMx4VxgGRnVb4J1Apq5mR1A", #Consumer Secret (API Secret) "1273539865687101440-E8YTkeQBk23O7roZxvZI2azs9eUZfF", # Access Token "1AjaLdIOGIp4CfbLjBxgc7jFQTsbqtSmKpjA0QI3Fp5VZ") #Access Token Secret #registerTwitterOAuth(cred) Tweets <- userTimeline('@narendramodi', n = 600,includeRts = T) TweetsDF <- twListToDF(Tweets) dim(TweetsDF) View(TweetsDF) write.csv(TweetsDF, "Tweets.csv",row.names = F) tweets_data=read.csv(file.choose()) tweets_data=as.data.frame(tweets_data[,1],drop = FALSE) library(tm) x=as.character(tweets_data$`tweets_data[, 1]`) #corpus x=Corpus(VectorSource(x)) inspect(x[2]) inspect(x[262]) #data cleansing x1=tm_map(x,tolower) inspect(x1[2]) inspect(x1[600]) x1=tm_map(x1,removePunctuation) inspect(x1[2]) inspect(x1[600]) x1=tm_map(x1,removeNumbers) inspect(x1[2]) inspect(x1[600]) x1=tm_map(x1,removeWords,stopwords('english')) inspect(x1[2]) inspect(x1[600]) library(SnowballC) x2=tm_map(x1,stemDocument) inspect(x2[2]) library(textstem) x3=tm_map(x2,lemmatize_strings) inspect(x3[2]) inspect(x3[262]) ###stripping white spaces x4=tm_map(x3,stripWhitespace) inspect(x4[262]) ####3tdm tdm=TermDocumentMatrix(x4) tdm m=as.matrix(tdm) v= sort(rowSums(m),decreasing = TRUE) d=data.frame(word=names(v),freq=v) head(d,10) dtm=DocumentTermMatrix(x4) dtm library(wordcloud2) wordcloud2(data = d,size = 1.6,shape = 'star')
ea74533be95c9345e3f98cac7720a4a0f4586f38
3a4dc294c0e19742daa2dd4f81249576d3712326
/200403/200403_text_mining.R
1b7c7c57a10602b6aa4382239e60877bc276ed9d
[]
no_license
Johnkim92/r_workspaces
e2bcd58e36e36db96597172e78a29b444ab8845e
fe9ed936879a590fe93cd41f7ce4c54bd57030f1
refs/heads/master
2021-05-18T22:50:24.839083
2020-04-08T01:47:15
2020-04-08T01:47:15
251,462,811
0
0
null
null
null
null
UTF-8
R
false
false
4,046
r
200403_text_mining.R
# 힙합 가사 텍스트 마이닝 # 패키지 설치 install.packages("rJava") install.packages("memoise") install.packages("KoNLP") # 패키지 로드 library(KoNLP) library(dplyr) # 사전 설정하기 useNIADic() # 데이터 준비 txt <- readLines("../datas/hiphop.txt") # 마지막줄에 엔터가 안눌려져있어서 불완전하게 마감되어있다는 경고메시지가 뜬다. head(txt) # 특수문자 제거 install.packages("stringr") library(stringr) txt <- str_replace_all(txt, "\\W", " ") class(txt) dim(txt) View(txt) # 가장 많이 사용된 단어 알아보기 # 명사 추출하기 extractNoun("대한민국의 영토는 한반도와 그 부속도서로 한다.") # 가사에서 명사 추출 nouns <- extractNoun(txt) # 명사 추출 함수를 사용하면 리스트 타입으로 반환된다. class(nouns) dim(nouns) # 추출한 명사 list를 문자열 벡터로 변환, 단어별 빈도표 생성 wordcount <- table(unlist(nouns)) # 리스트타입을 풀기 위해 unlist함수를 사용해야 table 타입으로 변환할 수 있다. class(wordcount) dim(wordcount) wordcount # 자주 사용된 단어 빈도표 만들기 # 데이터 프레임으로 변환 df_word <- as.data.frame(wordcount, stringsAsFactors = F) # 변수명 수정 df_word <- rename(df_word, word=Var1, freq = Freq) class(df_word) dim(df_word) # 두 글자 이상 단어 추출 df_word <- filter(df_word, nchar(word) >= 2) top_20 <- df_word %>% arrange(desc(freq)) %>% head(20) # 워드클라우딩 패키지 준비 library(wordcloud) library(RColorBrewer) # 단어 색상 목록 만들기 pal <- brewer.pal(8, "Dark2") # 워드 클라우드 생성 set.seed(1234) # 난수 고정 wordcloud(words = df_word$word, freq = df_word$freq, min.freq = 2, # 최소 단어 빈도 max.words = 200, # 표현 단어 수 random.order = F, # 고빈도 단어 중앙 배치 rot.per = .1, # 회전 단어 비율 scale = c(4, 0.3),# 단어 크기 범위 colors = pal) # 색깔 목록 ############################################################################### # 국정원 트윗 텍스트 마이닝 # 국정원 계쩡 트윗 데이터 # - 국정원 대선 개입 사실이 밝혀져 논란이 됐던 2013년 6월, 독립 언론 뉴스타파가 인터넷을 통해 공개한 것이다 # - 국정원 계정으로 작성된 3,744개 트윗 # 데이터 로드 twitter <- read.csv("../datas/twitter.csv", header = T, stringsAsFactors = F, fileEncoding = "UTF-8") # 변수명 수정 twitter <- rename(twitter, no=번호, id=계정이름, date=작성일, tw= 내용) head(twitter) class(twitter) str(twitter) dim(twitter) names(twitter) # 컬럼명이 없을 경우 R은 자동으로 컬럼명을 X라고 붙여준다. # 특수문자 제거 twitter$tw <- str_replace_all(twitter$tw, "\\W", " ") head(twitter) # 트윗에서 명사 추출 nouns1 <- extractNoun(twitter$tw) # 추출한 명사 list를 문자열 벡터로 변환, 단어별 빈도표 생성 nouns1 <- table(unlist(nouns1)) # 데이터 프레임으로 변환 df_twords <- as.data.frame(nouns1, stringsAsFactors = F) # 변수명 수정 df_twords <- rename(df_twords, word=Var1, freq = Freq) df_twords <- filter(df_twords, nchar(word) >= 3) top_40 <- df_twords %>% arrange(desc(freq)) %>% head(40) wordcloud(words = top_40$word, freq = top_40$freq, min.freq = 2, max.words = 200, random.order = F, scale = c(4,0.3), rot.per = .1, colors = pal) # 단어 빈도 막대 그래프 만들기 library(ggplot2) order <- arrange(top_40, freq)$word # 빈도 순서 변수 생성 ggplot(data=top_40, aes(x=word, y=freq))+ ylim(0,2500)+ geom_col()+ coord_flip()+ scale_x_discrete(limit = order)+ # 빈도 순서 변수 기준 막대 정렬 geom_text(aes(label=freq), hjust=-0.3) # 빈도 표시
5365b31474a2841b6f375d6f171d0c2e68968f91
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/quRan/examples/quran_en_yusufali.Rd.R
5140fe94939c0462af1561e2b5e69a47854bf463
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
363
r
quran_en_yusufali.Rd.R
library(quRan) ### Name: quran_en_yusufali ### Title: The Yusuf Ali translation of the Qur'an (English) ### Aliases: quran_en_yusufali ### Keywords: datasets ### ** Examples # See first few rows head(quran_en_yusufali) # How many ayahs are in each surah? library(dplyr) quran_en_yusufali %>% group_by(surah_title_en) %>% summarize(total_ayaat = n())
6a94833aca4ed404be237813c8fda3cb544f9e17
57a791c35698e39313c97598d7d528843a0b0f1f
/01-t-test-tables.R
17bf2ca078c2cc193aa849eb36010f7ed33c20bb
[]
no_license
arnsbarger/opioid-mine-proj
87d290723c551c382c36ba87ef80e2f4c1278cf3
b4385c7b12606243d07a4987ee6df7555cd83ccf
refs/heads/master
2023-08-04T15:47:32.994278
2021-09-17T17:22:25
2021-09-17T17:22:25
235,850,624
0
0
null
null
null
null
UTF-8
R
false
false
8,937
r
01-t-test-tables.R
# want to show that counties with closings are systematically different from counties without closings # libraries library(ggplot2) library(usmap) library(rgdal) library(reshape2) library(acs) library(dplyr) library(stargazer) options(scipen=999) # load mines data frame setwd("~/git/opioid-mine-proj/") source("00-clean-msha.R") # mines mines <- mines %>% select(MINE_ID, CURRENT_MINE_NAME, County.Code) # load mine data setwd("~/git/opioid-mine-proj/") source("00-clean-msha-closings.R") # mine_qtrly , mine_closings accidents <- read.delim("~/Documents/Pitt/Data/msha_mine_quarterly_employment_production/Accidents.txt", header = TRUE, sep = "|") # load mine accidents info (since 2000) violations <- read.delim("~/Documents/Pitt/Data/msha_mine_quarterly_employment_production/Violations.txt", header = TRUE, sep = "|")# load mine violations info (since 2000) accidents$MINE_ID <- str_pad(accidents$MINE_ID, 7, pad = "0") violations$MINE_ID <- str_pad(violations$MINE_ID, 7, pad = "0") acc_info <- data.frame(accidents %>% group_by(MINE_ID) %>% summarise(injuries_since_2000_cnt = sum(NO_INJURIES), accidents_since_2000_cnt = n())) vio_info <- data.frame(violations %>% group_by(MINE_ID) %>% summarise(violations_since_2000_cnt = n(), total_cnt_affected_empl = sum(NO_AFFECTED, na.rm = TRUE))) prod_info <- mine_qtrly %>% filter(COAL_METAL_IND == "C") %>% group_by(MINE_ID, CAL_PROD_QTR) %>% summarise(total_coal_production = sum(COAL_PRODUCTION, na.rm = TRUE), # total coal production over all subunits total_hours_worked = sum(HOURS_WORKED, na.rm = TRUE), total_employees = sum(AVG_EMPLOYEE_CNT, na.rm = TRUE)) avg_prod_info <- prod_info %>% group_by(MINE_ID) %>% summarise(avg_total_coal_production = mean(total_coal_production, na.rm = TRUE), avg_total_hours_worked = mean(total_hours_worked, na.rm = TRUE), avg_total_employees = mean(total_employees, na.rm = TRUE), earliest_yr_qtr = min(CAL_PROD_QTR)) # ATTEMPT 1 # mine_closings$ever_closed <- ifelse(mine_closings$AVG_EMPLOYEE_CNT > 0, 0, 1) # mine_closings$ever_closed_name <- ifelse(mine_closings$AVG_EMPLOYEE_CNT > 0, "Open", "Closed") # mine_closings <- mine_closings %>% select(MINE_ID, CAL_PROD_QTR, COAL_METAL_IND, ever_closed, ever_closed_name) # ATTEMPT 2 # mine_closings <- mine_qtrly %>% # filter(COAL_METAL_IND == "C") %>% # same reason as above # group_by(MINE_ID, SUBUNIT) %>% # arrange(CAL_PROD_QTR) %>% # slice(which.max(rleid(AVG_EMPLOYEE_CNT))) # mine_closings$ever_closed <- ifelse(test = as.numeric(mine_closings$CAL_PROD_QTR) < 20194, yes = 1, no = 0) # temp = mine_closings %>% group_by(MINE_ID) %>% summarise(length(unique(ever_closed))) # table(temp$`length(unique(ever_closed))`) # 478 mines had one portion close and another still open # ATTEMPT 3 mine_closings <- mine_qtrly %>% filter(COAL_METAL_IND == "C") %>% # same reason as above group_by(MINE_ID) %>% arrange(CAL_PROD_QTR) %>% slice(which.max(rleid(AVG_EMPLOYEE_CNT))) mine_closings$ever_closed <- ifelse(test = as.numeric(mine_closings$CAL_PROD_QTR) < 20194, yes = 1, no = 0) mine_closings <- mine_closings %>% select(MINE_ID, CAL_PROD_QTR, ever_closed) mine_data <- merge(x = avg_prod_info, y = mines, by = "MINE_ID", all.x = TRUE) mine_data <- merge(x = mine_data, y = acc_info, by = "MINE_ID", all.x = TRUE) mine_data <- merge(x = mine_data, y = vio_info, by = "MINE_ID", all.x = TRUE) mine_data <- merge(x = mine_data, y = mine_closings, by = "MINE_ID", all.x = TRUE) # mine_data$avg_coal_prod[is.nan(mine_data$avg_coal_prod)] <- NA # mine_data[is.na(mine_data)] <- 0 county_data <- mine_data %>% group_by(County.Code) %>% summarise(num_mines = length(unique(MINE_ID)), avg_total_coal_prod_cnty = mean(avg_total_coal_production, na.rm = TRUE), avg_total_emp_cnty = mean(avg_total_employees, na.rm = TRUE), earliest_yr_qtr_cnty = min(earliest_yr_qtr), #avg_earliest_yr = mean(earliest_yr, na.rm = TRUE), total_injuries_since2000 = sum(injuries_since_2000_cnt), total_accidents_since2000 = sum(accidents_since_2000_cnt), total_violations_since2000 = sum(violations_since_2000_cnt), total_empl_affected_viol2000 = sum(total_cnt_affected_empl), total_ever_closed = sum(ever_closed), percent_ever_closed = total_ever_closed / num_mines #cnt_metal_mines = sum(COAL_METAL_IND == "M"), #cnt_coal_mines = sum(COAL_METAL_IND == "C") ) # cdc data setwd("~/git/opioid-mine-proj/") source("00-clean-cdc.R") cdc_na %>% group_by(Year) %>% summarise(sum(!is.na(Crude.Rate))) # no clear jump; run t-test for all years cdc_na$est_crude.rate_from_upper <- as.numeric(cdc_na$Crude.Rate.Upper.95..Confidence.Interval) - 1.96*cdc_na$Crude.Rate.Standard.Error cdc_na$est_crude.rate_from_lower <- cdc_na$Crude.Rate.Lower.95..Confidence.Interval + 1.96*cdc_na$Crude.Rate.Standard.Error cdcna <- cdc_na[,-c(14:17)] # estimated crude.rate from lower CI is closer to the reported crude rates, so I'll put this estimate in the t-table too. test2 <- cdc_na %>% select(County.Code, Year, Crude.Rate) cdc_table_data2 <- reshape2::dcast(data = test2, formula = County.Code ~ Year, value.var = "Crude.Rate", fun.aggregate = mean)[,1:20] names(cdc_table_data2) <- c("County.Code",paste0("Crude.Rate.",names(cdc_table_data2)[-1])) test3 <- cdc_na %>% select(County.Code, Year, est_crude.rate_from_lower) cdc_table_data3 <- reshape2::dcast(data = test3, formula = County.Code ~ Year, value.var = "est_crude.rate_from_lower", fun.aggregate = mean)[,1:20] names(cdc_table_data3) <- c("County.Code",paste0("est_crude.rate_from_lower",names(cdc_table_data3)[-1])) cdc_table_data <- merge(cdc_table_data2, cdc_table_data3, by = "County.Code") # acs county characteristics data acs <- read.csv("~/Documents/Pitt/Data/acs_output/acs_mine_sample.csv")#[,-1] acs$fips <- str_pad(acs$fips, 5, pad = "0") ### MERGE cdc_acs <- merge(x = cdc_table_data, y = acs, by.x = "County.Code", by.y = "fips", all = TRUE) # merge cdc and acs data <- merge(x = county_data, y = cdc_acs, by = "County.Code", all.x = TRUE) # merge county mine stats with county characteristics # data$per_coal_mines <- data$cnt_coal_mines / data$num_mines # data$per_metal_mines <- data$cnt_metal_mines / data$num_mines data$any_mines_closed <- ifelse(test = data$total_ever_closed > 0, yes = 1, no = 0) table(data$any_mines_closed) data[is.na(data)] <- 0 variables <- c(2:4,6,7,8,23:30,42:50,55:58,73:78,81) # t-tests: establish differences between counties that # results <- data.frame(variable = names(data[,variables]), t_stat = NA, p_value = NA) # for (i in 2:(ncol(data)-1)) { # result <- t.test(data[,i] ~ any_mines_closed, data, var.equal = FALSE) # # results$t_stat[i-1] <- result$statistic # # results$p_value[i-1] <- result$p.value # # results$mean_0[i-1] <- result$estimate[1] # # results$mean_1[i-1] <- result$estimate[2] # # results$degrees_of_freedom[i-1] <- result$parameter # # } results <- data.frame(t_stat = NA, p_value = NA, mean_0 = NA, mean_1 = NA, degrees_of_freedom = NA) for (i in variables) { result <- t.test(data[,i] ~ any_mines_closed, data, var.equal = FALSE) row <- c(result$statistic, result$p.value, result$estimate[1], result$estimate[2], result$parameter) results <- rbind(row, results) } results <- results[-nrow(results),] results$variable <- names(data[,variables]) presentation <- results %>% filter(p_value <= .15) %>% arrange(p_value) stargazer(presentation, summary = FALSE, rownames = FALSE) # 250 counties with mines... sum(!is.na(data$num_mines)) length(unique(mine_data$County.Code))
124dd5fe376ee30b0a36dea1d6c9539440904a27
02b9550223b3125811ce5d559622be191ec62e7e
/man/est_paras.Rd
8cb862a8d539793ca9754db65016bbfd344426d7
[]
no_license
xinyiyu/MR-APSS
67ce7446f30c453be5fccbe14f29bf48ee13f211
ddb918905fe3ffedab46f38454c72cec725643b9
refs/heads/master
2023-07-17T20:50:17.456871
2021-08-25T14:13:50
2021-08-25T14:13:50
null
0
0
null
null
null
null
UTF-8
R
false
true
1,306
rd
est_paras.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/est_paras.R \name{est_paras} \alias{est_paras} \title{A function harmonising datasets and estimate background parameters by LD score regression.} \usage{ est_paras( dat1, dat2, trait1.name = "exposure", trait2.name = "outcome", LDSC = T, h2.fix.intercept = F, ldscore.dir = NULLL ) } \arguments{ \item{dat1:}{formmated summary statistics for trait 1.} \item{dat2:}{formmated summary statistics for trait 2.} \item{trait1.name:}{specify the name of trait 1, default `exposure`.} \item{trait2.name:}{specify the name of trait 2, default `outcome`.} \item{LDSC:}{whether to run LD score regression, default `TRUE`. If `FALSE`, the function will not give the parameter estimates but will do harmonising.} \item{h2.fix.intercept:}{whether to fix LD score regression intercept to 1, default `FALSE`.} \item{ldscore.dir:}{specify the path to the LD score files.} } \value{ List with the following elements: \describe{ \item{Mdat}{Homonised data set } \item{C}{the estimated C matrix capturing the effects of sample structure } \item{Omega}{the estimated variance-covariance matrix for polygenic effects} } } \description{ A function harmonising datasets and estimate background parameters by LD score regression. }
6e1d30d3dee3ee073c92d2cf6e80d5b4bb4cc83b
5a79eecb3ec54fa71022d07e0b61bfbe4f2e55d8
/diamonds-price-regression.R
9a626f4f728ad437504ddc301e3bbf441f1d3c91
[]
no_license
cdsmax/regression_models
c05122a68fad0aa6a015cbb570a6a82432b38981
75d88fc29de1ead762618c4bef7327c56ef549bd
refs/heads/master
2020-06-02T19:34:56.486419
2015-02-15T18:07:45
2015-02-15T18:07:45
30,831,571
0
0
null
null
null
null
UTF-8
R
false
false
2,298
r
diamonds-price-regression.R
# example of regression with confidence intervals for the coefficients # based on the diamond data # full calc example for this: # fit <- lm(y ~ x) # summary(fit)$coefficients library(UsingR) data(diamond) # we assume price of diamonds depends on their mass in carats x <- diamond$carat y <- diamond$price n <- length(y) # unbiased degrees of freedom freedom <- n - 2 # generate the coefficients # y = beta0 + beta1*x + e # beta0 is estimated price for a 0-carat diamond # beta1 is estimated price increase for 1 carat change beta1 <- cor(y, x) * sd(y)/sd(x) beta0 <- mean(y) - beta1 * mean(x) # get the residuals e <- y - beta0 - beta1 * x # unbiased (n-2) residual variance sigma <- sqrt(sum(e^2) / freedom) # sum of squared error for X ssx <- sum( (x - mean(x))^2 ) # standard error for beta0 seBeta0 <- (1/n + mean(x)^2 / ssx)^0.5 * sigma # standard error for beta1 seBeta1 <- sigma / sqrt(ssx) # t-distributions for beta0 and beta1 tBeta0 <- beta0 / seBeta0 tBeta1 <- beta1 / seBeta1 # p-values for beta0 and beta1 pBeta0 <- 2 * pt( abs(tBeta0), df = freedom, lower.tail=FALSE ) pBeta1 <- 2 * pt( abs(tBeta1), df = freedom, lower.tail=FALSE ) # make a good looking summary -- summary(fit)$coeffients coefTable <- rbind( c(beta0, seBeta0, tBeta0, pBeta0), c(beta1, seBeta1, tBeta1, pBeta1) ) colnames(coefTable) <- c("Estimate", "Std Error", "T values", "p-values for P(>|t)") rownames(coefTable) <- c("Intercept", "X") # 95% confidence intervals for beta0 and beta1 - two sided t-test confBeta0 <- coefTable[1, 1] + c(-1, 1) * qt(0.975, df=freedom) * coefTable[1, 2] confBeta1 <- coefTable[2, 1] + c(-1, 1) * qt(0.975, df=freedom) * coefTable[2, 2] # prediction: what is the expected price at average carat value, with confidence interval fit <- lm(y ~ x) predict(fit, data.frame(x=mean(x)), interval="confidence") # prediction: what is the expected price for a 1 carat diamond, with prediction interval predict(fit, data.frame(x=1), interval="prediction") # scaling a coefficient # what if we're working with deci-carats, ie carat * 10 # notice that we / fit_default <- lm(y ~ x) fit_deci <- lm(y ~ I(x/10)) fit_default$coefficients fit_deci$coefficients # slope is *10 # what about working in 10th of a carat # notice is * fit_tenth <- lm(y ~ I(x*10)) fit_tenth$coefficients
dd20a1d09563d66bd9521bbba0cfb175447b92d9
0079d55b6d1178f390b333cc6afc36e39c87ac87
/man/boundary.Rd
00ab1cb689ec8112d18c4675548adb9157a705c8
[]
no_license
cran/rWMBAT
08e88ec256fd6125b11ff16e9b510f888568c96a
1b5961730491fa9cea2bd4625b7fc0da6911fbf1
refs/heads/master
2018-12-29T21:26:46.553640
2009-10-28T00:00:00
2009-10-28T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
185
rd
boundary.Rd
\name{boundary} \alias{boundary} \docType{data} \title{Example Data} \description{ The location used to bin the data to get max MI } \usage{data(boundary)} \keyword{datasets}
948811073aa860c6f0f3aeabcdb8582e936869a3
075a864bb66f0339d033b62441bc6f7f3fea6a31
/varimp_rf.r
fd1d37bf382ee2124a70e60dc7e922565bf7df0d
[]
no_license
1enemyleft/Kaggle
130a470df6f61196d139319f4d14deb792d7c96d
90bcce2eb1a57f30b1257b8077350ad6c2ed9c19
refs/heads/master
2021-01-12T22:17:38.884153
2015-05-04T18:27:27
2015-05-04T18:27:27
35,021,837
1
1
null
null
null
null
UTF-8
R
false
false
1,050
r
varimp_rf.r
library(ggplot2) library(randomForest) library(readr) set.seed(1) train <- read_csv("train.csv") test <- read_csv("test.csv") submission <- data.frame(id=test$id, Class_1=NA, Class_2=NA, Class_3=NA, Class_4=NA, Class_5=NA, Class_6=NA, Class_7=NA, Class_8=NA, Class_9=NA) rf <- randomForest(train[,c(-1,-95)], as.factor(train$target), ntree=25, importance=TRUE) submission[,2:10] <- (predict(rf, test[,-1], type="prob")+0.01)/1.09 gz_out <- gzfile("1_random_forest_benchmark.csv.gz", "w") writeChar(write_csv(submission, ""), gz_out, eos=NULL) close(gz_out) imp <- importance(rf, type=1) featureImportance <- data.frame(Feature=row.names(imp), Importance=imp[,1]) p <- ggplot(featureImportance, aes(x=reorder(Feature, Importance), y=Importance)) + geom_bar(stat="identity", fill="#53cfff") + coord_flip() + theme_light(base_size=20) + xlab("Importance") + ylab("") + ggtitle("Random Forest Feature Importance\n") + theme(plot.title=element_text(size=18)) ggsave("2_feature_importance.png", p, height=20, width=8, units="in")
30fbfb9bcdb46e81b19802e6e90c1cfa81dccf28
5f3eed2db2d0e11017524dad7a7890278ab5bc3c
/rnn_cbg_v2.R
735e8a695d6def0ffc3b075b4d4ce5e1a612d9c3
[]
no_license
csainsbury/rnn_inpatient_cbg
c8e28922f6dc239834de2ba9baaf3eecee24ed78
e9d6bd8d618e23832eaf9a4d2d799f063f962e78
refs/heads/master
2020-12-30T13:20:32.506879
2017-05-22T20:02:19
2017-05-22T20:02:19
91,202,788
0
0
null
null
null
null
UTF-8
R
false
false
10,249
r
rnn_cbg_v2.R
library(data.table) returnUnixDateTime<-function(date) { returnVal<-as.numeric(as.POSIXct(date, format="%Y-%m-%d", tz="GMT")) return(returnVal) } ## DM summaryOutputName <- paste("~/R/GlCoSy/source/DTwithPerIDdata.csv",sep="") DT<-read.csv(summaryOutputName, header=TRUE , sep="," , row.names=NULL) DT<-data.table(DT) #list primes - to generate lookup table prime = function(n) { n = as.integer(n) if(n > 1e8) stop("n too large") primes = rep(TRUE, n) primes[1] = FALSE last.prime = 2L fsqr = floor(sqrt(n)) while (last.prime <= fsqr) { primes[seq.int(2L*last.prime, n, last.prime)] = FALSE sel = which(primes[(last.prime+1):(fsqr+1)]) if(any(sel)){ last.prime = last.prime + min(sel) }else last.prime = fsqr+1 } which(primes) } prime(10000) # take each day of admission # calcalate median and IQR of day # encode as single number using godel coding - plot # T1 only T1_DT <- DT[DiabetesMellitusType_Mapped == "Type 1 Diabetes Mellitus"] # interested in the 15th day of admission cut_DT <- T1_DT[admissionDurationDays >= 15] # mark dateTimeInDays cut_DT[, c("dateTimeDays") := (dateplustime1 - min(dateplustime1)) / (60*60*24) , by=.(ID, admissionNumberFlag)] cut_DT$dateTimeDays_floor <- floor(cut_DT$dateTimeDays) cut_DT[, c("day_median") := median(yyyy) , by=.(ID, admissionNumberFlag, dateTimeDays_floor)] cut_DT[, c("day_IQR") := quantile(yyyy)[4] - quantile(yyyy)[2], by=.(ID, admissionNumberFlag, dateTimeDays_floor)] # cut_DT <- DT[admissionDurationDays >= 5 & admissionDurationDays < 30 & nCBGperAdmission > 20] ## need to be able to take all admissions greater than n days and look at hypo probability on day n dayN_ofInterest = 10 daySeconds = (60*60*24) dayN_ofInterestSeconds <- dayN_ofInterest * daySeconds # limit dataset to admissions that will have the required data ie at least 5 days of data cut_DT <- DT[admissionDurationDays > dayN_ofInterest] # flag values within the window of interest - here 5 days (dayN_ofInterest == 5) cut_DT[, c("flagWithinNdays") := (ifelse(dateplustime1 < (min(dateplustime1) + dayN_ofInterestSeconds), 1, 0)) , by=.(ID, admissionNumberFlag)] # function and execution to ensure that there are more than 1 CBG values in the runin period (ie in days 0-4) - to make sure the code won't crash due to not being able to allocate a scaled time point. numberOfCBGsBeforeDayN <- function(dateplustime1) { x <- dateplustime1[dateplustime1 < (min(dateplustime1) + (dayN_ofInterestSeconds - (60*60*24)))] return(length(x)) } cut_DT[, c("numberOfCBGsBeforeDayN") := numberOfCBGsBeforeDayN(dateplustime1) , by=.(ID, admissionNumberFlag)] # find the odd case where there are only duplicate values in the analysis section check_all_dateplustime1_different <- function(dateplustime1) { x <- dateplustime1[dateplustime1 < (min(dateplustime1) + (dayN_ofInterestSeconds - (60*60*24)))] y <- ifelse(sum(diff(x)) == 0, 0, 1) return(y) } cut_DT[, c("check_all_dateplustime1_different") := check_all_dateplustime1_different(dateplustime1) , by=.(ID, admissionNumberFlag)] # cut the data to values within 5 days, and with enough runin CBGs cut_DT <- cut_DT[flagWithinNdays == 1 & numberOfCBGsBeforeDayN > 1 & check_all_dateplustime1_different == 1] # optional cut for type of DM # cut_DT <- cut_DT[DiabetesMellitusType_Mapped == "Type 1 Diabetes Mellitus"] # take off last time period for calculation of y timePeriodDays <- 1 # time of interest is last 24 hours in dataset - that corresponds to day 5 of admission timePeriodSeconds <- timePeriodDays * (60*60*24) flag_CBGbelow_n_inLastTime <- function(flagWithinLastTime, yyyy, n) { lastimeValues <- yyyy[flagWithinLastTime == 1] returnVals <- ifelse(lastimeValues < n, 1, 0) flagPositiveResult <- ifelse(sum(returnVals) > 0, 1, 0) return(rep(flagPositiveResult, length(yyyy))) } # flag_CBGbelow_n_in24h_before_LastTime <- function(flagWithin_24h_prior_LastTime, yyyy, n) { # # lastimeValues <- yyyy[flagWithin_24h_prior_LastTime == 1] # returnVals <- ifelse(lastimeValues < n, 1, 0) # # flagPositiveResult <- ifelse(sum(returnVals) > 0, 1, 0) # # return(rep(flagPositiveResult, length(yyyy))) # } # # isDead_n_years <- function(cut_DT_ID, dateplustime1, n) { # # if (nrow(deathFrameDT[ID == cut_DT_ID]) > 0) { # isDead <- deathFrameDT[ID == cut_DT_ID]$isDead # deathDate <- deathFrameDT[ID == cut_DT_ID]$deathDateUnix # # returnValue <- ifelse(deathDate > 0 & deathDate <= (min(dateplustime1) + n * (60*60*24*365.25)), 1, 0) # # returnValue <- rep(returnValue, length(dateplustime1)) # } # # if (nrow(deathFrameDT[ID == cut_DT_ID]) == 0) { returnValue <- rep(0, length(dateplustime1)) } # # return(returnValue) # # } cut_DT[, c("flagWithinLastTime") := (ifelse( dateplustime1 >= (min(dateplustime1) + ((dayN_ofInterest - 1) * daySeconds)) & dateplustime1 < (min(dateplustime1) + ((dayN_ofInterest) * daySeconds)), 1, 0)) , by=.(ID, admissionNumberFlag)] cut_DT[, c("lessThan4_withinLastTime") := flag_CBGbelow_n_inLastTime(flagWithinLastTime, yyyy, 4), by=.(ID, admissionNumberFlag)] cut_DT[, c("lessThan3_withinLastTime") := flag_CBGbelow_n_inLastTime(flagWithinLastTime, yyyy, 3), by=.(ID, admissionNumberFlag)] cut_DT[, c("lessThan2p88_withinLastTime") := flag_CBGbelow_n_inLastTime(flagWithinLastTime, yyyy, 2.88), by=.(ID, admissionNumberFlag)] # cut_DT[, c("flagWithin_24h_prior_LastTime") := (ifelse((dateplustime1 >= (min(dateplustime1) + ((max(dateplustime1) - min(dateplustime1)) - 2*timePeriodSeconds))) & (dateplustime1 < (min(dateplustime1) + ((max(dateplustime1) - min(dateplustime1)) - timePeriodSeconds))), 1, 0)) , by=.(ID, admissionNumberFlag)] # cut_DT[, c("lessThan4_within_24h_prior_LastTime") := flag_CBGbelow_n_in24h_before_LastTime(flagWithin_24h_prior_LastTime, yyyy, 4), by=.(ID, admissionNumberFlag)] # for death analysis run this ##****## # cut_DT[, c("isDead_3y") := isDead_n_years(ID, dateplustime1, 3), by=.(ID, admissionNumberFlag)] ##****## # cut_DT[, c("firstCBGperID") := (dateplustime1 == min(dateplustime1)), by=.(ID)] ##****## # cut_DT[, c("flagPartOfFirstAdmission") := ifelse(max(firstCBGperID) == 1, 1, 0), by=.(ID, admissionNumberFlag)] cut_DT[, c("flagLastCBG") := (ifelse(CBGinSequencePerAdmission == length(yyyy), 1, 0)), by=.(ID, admissionNumberFlag)] report_y_hypo4 <- data.frame(cut_DT[flagLastCBG == 1]$ID, cut_DT[flagLastCBG == 1]$lessThan4_withinLastTime) colnames(report_y_hypo4) <- c("ID", "hypo_4") report_y_hypo3 <- data.frame(cut_DT[flagLastCBG == 1]$ID, cut_DT[flagLastCBG == 1]$lessThan3_withinLastTime) colnames(report_y_hypo3) <- c("ID", "hypo_3") # report_y_hypo4_24hPrior <- data.frame(cut_DT[flagLastCBG == 1]$ID, cut_DT[flagLastCBG == 1]$lessThan4_within_24h_prior_LastTime) # colnames(report_y_hypo4_24hPrior) <- c("ID", "hypo_4") ## code to test using prior hypo as a predictor # test <- report_y_hypo4[39502:49346, ]; test_24hprior <- report_y_hypo4_24hPrior[39502:49346, ] # library(pROC); auc(test$hypo_4, test_24hprior$hypo_4) ##****## report_y_death_3y <- data.frame(cut_DT[firstCBGperID == 1]$ID, cut_DT[firstCBGperID == 1]$isDead_3y) ##****## colnames(report_y_death_3y) <- c("ID", "dead_3y") # scale admissions to n points # cut_DT[, c("flag_for_processing") := (ifelse(dateplustime1 < (min(dateplustime1) + (admissionDuration[1] - timePeriodSeconds)), 1, 0)) , by=.(ID, admissionNumberFlag)] ##****## # for death analysis - only analyse a single CBG dataset per ID - taking the first admission in the dataset ##****## # cut_DT_processSegment <- cut_DT[flagPartOfFirstAdmission == 1] cut_DT_processSegment <- cut_DT[flagWithinLastTime == 0] process_DT <- data.table(cut_DT_processSegment$ID, cut_DT_processSegment$admissionNumberFlag, cut_DT_processSegment$dateplustime1, cut_DT_processSegment$yyyy); colnames(process_DT) <- c("ID", "admissionNumberFlag", "dateplustime1", "yyyy") # remove the unusual admissions where all CBGs occur in last day/time period # ensure that at least 5 cbgs to work with # process_DT[, c("nrows_post_lasttimeRemoval") := .N , by=.(ID, admissionNumberFlag)] # process_DT <- process_DT[nrows_post_lasttimeRemoval > 4] process_DT[, c("scaled_dateplustime1") := (dateplustime1 - min(dateplustime1)) / (max(dateplustime1) - min(dateplustime1)) , by=.(ID, admissionNumberFlag)] n_points = 500 process_X <- data.frame(matrix(nrow = 0, ncol = n_points + 2)) idVector <- unique(process_DT$ID) for (i in seq(1, length(idVector), 1)) { if (i%%100 == 0) {print(i)} id_sub <- process_DT[ID == idVector[i]] admissionVector <- as.numeric(levels(as.data.frame(table(id_sub$admissionNumberFlag))$Var1))[as.data.frame(table(id_sub$admissionNumberFlag))$Var1] for (j in seq(1, length(admissionVector), 1)) { id_admission_sub <- id_sub[admissionNumberFlag == admissionVector[j]] output_X <- approx(id_admission_sub$scaled_dateplustime1, id_admission_sub$yyyy, n = n_points) # first col = ID, second col = admissionFlag, rest is cbg values concat_X <- c(idVector[i], admissionVector[j], output_X[[2]]) process_X <- rbind(process_X, concat_X) } } # plot 1000 points per admission # generate for each admission, and write into a row # y = approx(x$scaled_dateplustime1, x$yyyy, n = 1000) # ensure that report_y in same order as process_X id_diff <- process_X[, 1] - report_y_hypo4[, 1] ifelse(sum(id_diff) > 0, sum(id_diff), print("id match")) # save out input data (X) and (y) save_X <- process_X[, -1] save_X <- save_X[, -1] save_X <- round(save_X, 2) save_y_hypo4 <- report_y_hypo4$hypo_4 save_y_hypo3 <- report_y_hypo3$hypo_3 # save_y_dead3 <- report_y_death_3y$isDead_3y # randomisingSequence <- runif(nrow(report_y_hypo4), 0, 1) ## writeout files for tensorflow # write out sequence for analysis write.table(save_X, file = "~/R/_workingDirectory/rnn_inpatient_cbg/data/4thday_processX.csv", sep=",", row.names = FALSE) # write out dep variable (y) write.table(save_y_hypo4, file = "~/R/_workingDirectory/rnn_inpatient_cbg/data/4thday_report_y_hypo4.csv", sep = ",", row.names = FALSE) write.table(save_y_hypo3, file = "~/R/_workingDirectory/rnn_inpatient_cbg/data/4thday_report_y_hypo3.csv", sep = ",", row.names = FALSE)
9a9a5a7e6c58b8942af6760ad4a6edc37bc8f69b
2c61ff990535e204d2edc233ffbac928e93cb8b3
/man/pt_create_pParams.Rd
817c5fb7d6ce76e9c4f08570c5a891bb61f17526
[]
no_license
bernhard-da/ptable
5a7ff510adf579fd2e6a2c8a510ed35b42ae6b8d
05f604b67115bd672146e3f69e462b7c8e36c9f4
refs/heads/master
2020-03-22T17:24:13.684014
2019-10-01T07:28:44
2019-10-01T07:28:44
139,999,143
0
0
null
2018-07-06T15:07:32
2018-07-06T15:07:32
null
UTF-8
R
false
true
1,828
rd
pt_create_pParams.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pt_create_pParams.R \name{pt_create_pParams} \alias{pt_create_pParams} \title{pt_create_pParams} \usage{ pt_create_pParams(D, V, js = 0, pstay = NULL, optim = 1, mono = TRUE, table = "cnts", step = 2, icat = NULL, type = "all", label = paste("D", D, "V", V * 100, sep = "")) } \arguments{ \item{D}{perturbation parameter for maximum noise/perturbation (scalar or vector)} \item{V}{perturbation parameter for variance (scalar)} \item{js}{treshold value for blocking of small frequencies (i.e. the perturbation will not produce positive cell values that are equal to or smaller than the treshold value).} \item{pstay}{optional parameter to set the probability (0 < p < 1) of an original frequency to remain unperturbed: NA (default) no preset probability (i.e. produces the maximum entropy solution)} \item{optim}{optimization parameter: \code{1} standard approach (default)} \item{mono}{(logical) vector specifying optimization parameter for monotony condition} \item{table}{(character) type of the table: frequency or magnitude table} \item{step}{(integer) step width} \item{icat}{(integer) categorized original frequencies i} \item{type}{(character) indicator for the extra column 'type' used for magnitude tables: 'even', 'odd' or 'all' (default)} \item{label}{(character) label of the Output} } \value{ an object of \code{\linkS4class{ptable_params}} } \description{ creates the required input for \code{\linkS4class{ptable}}. } \examples{ # parameter setting for frequency tables pt_create_pParams(D=5, V=2, js=2, pstay=0.5, optim=1, mono=TRUE) # parameter setting for magnitude tables pt_create_pParams(D=5, V=2, table="nums", step=4, icat=c(1,3,5)) } \author{ Tobias Enderle, \email{tobias.enderle@destatis.de} } \keyword{data}
3ae994a55f5c91dec0e6101a20bc1c6704f1a097
7161c0ba36757e59c685553bfa3477cb4b6cb916
/Exploratorios.r
64dd55be170be940c2ae1fb99d7f6ca41c63339a
[]
no_license
grupo-gien/Exploratorios-en-R
436ac3f2ee3388a9392739914817adde32a45710
3c3610ce5b41de5cc4a18f03da1a7be26f6cfdf0
refs/heads/main
2023-02-02T20:41:27.737610
2020-12-17T00:20:32
2020-12-17T00:20:32
318,847,566
0
1
null
null
null
null
ISO-8859-1
R
false
false
10,157
r
Exploratorios.r
#------------------------------------- # Base de datos datos <- read.csv2("Insectos.csv",row.names=1) datos = read.csv2("Insectos.csv",row.names=1) datos <- read.csv2(file.choose(),row.names=1) #------------------------------------- # LIBRERÍAS REQUERIDAS library(lattice) library(ellipse) require(SciViews) require(stats) # Estructura de la base de datos str(datos) datos$cuenca=as.factor(datos$cuenca) str(datos) summary(datos[,2:9]) # 1. Gráfica por pares pairs(datos[,2:8]) pairs(log10(datos[,2:8])) # 2.Figura elipses plotcorr(cor(datos[,2:9])) help(plotcorr) corr.mtcars <- cor(datos[,2:9]) ord <- order(corr.mtcars[1,]) xc <- corr.mtcars[ord, ord] colors <- c("#A50F15","#DE2D26","#FB6A4A","#FCAE91","#FEE5D9","white", "#EFF3FF","#BDD7E7","#6BAED6","#3182BD","#08519C") plotcorr(xc, col=colors[5*xc + 6]) # 3. Figuras de pares pairs ((datos[,c(2:9)]),panel=function(x,y) {abline(lsfit(x,y)$coef,lwd=2,col=3) lines(lowess(x,y),lty=2,lwd=2,col=2) points(x,y,cex=1)}) pairs ((datos[,c(2,6,7,9)]),panel=function(x,y) {abline(lsfit(x,y)$coef,lwd=2,col=3) lines(lowess(x,y),lty=2,lwd=2,col=2) points(x,y,col=datos$cuenca, cex=1.4)}) # 4. x11() pairs(datos[, 2:9], diag.panel = panel.hist, upper.panel = panel.smooth, lower.panel = panel.cor) # 5. Figura con tres variables (Función: coplot) with(datos,coplot(Efem~pH|temp)) with(datos, { coplot(Efem~pH|temp, number = 3, panel = function(x, y, ...) panel.smooth(x, y, span = .8, ...)) coplot(Efem~pH|temp, panel = panel.smooth) }) # 6. Coplot summary(datos[,2:8]) clasetemp<-cut(datos$temp,seq(15,20,1.2),include.lowest=T) clasetemp clasepH<-cut(datos$pH,seq(5,8,1,include.lowest=T)) clasepH coplot(Efem~pH | clasetemp, pch=19, panel = panel.lm, data=datos) # 7. panel.lm = function(x, y, ...) { tmp<-lm(y~x,na.action=na.omit) abline(tmp, lwd = 1.5, col= 2) points(x,y, ...)} coplot(Efem ~ pH | clasetemp, pch=19, panel = panel.lm, data=datos) # Categorizando variables continuas splom(~datos[,4:8]|clasepH,pscales=0) splom(~datos[,4:8]|clasepH+clasetemp,pscales=0) # Reación por niveles del factor Cuenca # datos$cuenca<-factor(datos$cuenca, levels=c("cuen3","cuen4","cuen1","cuen2")) # 7. Figuras xyplot xyplot(Efem~pH|cuenca,data=datos, panel = panel.lm, data=datos) # 8. Histogramas histogram (~Ab,data=datos, ylab="Porcentaje del Total", xlab="Abundancia de insectos") x11() histogram (~Ab|cuenca,data=datos, ylab="Porcentaje del Total", xlab="Abundancia de insectos") # 8. Figuras de densidad densityplot(~Ab,data=datos, ylab="Porcentaje del Total", xlab="Abundancia de insectos") densityplot(~Ab|cuenca,data=datos, ylab="Porcentaje del Total", xlab="Abundancia de insectos") # 9. qqplot panel<-par(mfrow=c(1,2), mar=c(4,3,3,2)) # figura con datos crudos qqnorm (datos$Ab, main="Abundancia de Insectos", ylab="Cuantiles de la muestra", xlab="Cuantiles teóricos") qqline(datos$Ab) # 10. figura con raíz log de abndancias Ab.log <- log10(datos$Ab+1) qqnorm (Ab.log, main="Log de Abundancia de Insectos", ylab="Cuantiles de la muestra", xlab="Cuantiles teóricos") qqline(Ab.log) par(panel) panel<-par(mfrow=c(1,1)) # 11. plot(Efem~Plec,col=as.integer(cuenca),data=datos,ylab="", xlab="Plecópteros") legend(0,27,legend=levels(datos$cuenca),pch=19,col=1:4,cex=0.8) lines(abline(lm(datos$Efem~datos$Plec),lwd=2,col=2, lty=2)) par(panel) # 14. Colocar colores al grafico por cuenca xyplot(Efem~Plec,group=cuenca,auto.key=T,data=datos) # 15. plot(Efem~Plec,col=as.integer(cuenca),data=datos) legend(0,25,legend=levels(datos$cuenca),pch=19,col=1:4,cex=0.8) lines(abline(lm(datos$Efem~datos$Plec),lwd=2,col=2, lty=2)) # 16. Figuras de Cajas y cinturas datos$cuenca<-factor(datos$cuenca, levels=c("cuen1","cuen2","cuen3","cuen4")) boxplot(Ab~cuenca,data=datos, xlab="Cuencas",ylab="Abundancia", col="lightgray", cex.lab=1.3) boxplot(Ab~cuenca,data=datos,notch=TRUE, xlab="Cuencas",ylab="Abundancia", col="lightgray", cex.lab=1.3) #------------- # PRUEBA DE HIPÓTESIS # 1) Anova 1 vía (Abundancia vs. cuenca) # Qué es el Anova a una vía? # R./ # Cuál será la hipótesis nula (Ho)? # R./ Ab.anov <- aov(Ab ~ cuenca , data=datos) summary(Ab.anov) # Se acepta Ho? Por qué? # R./ # 1.1) Supuesto de Normalidad shapiro.test(Ab.anov$residuals) # Se acepta el supuesto? Por qué? # Ho Hay normalidad en los residuales del Anova # R./ # 1.2) Homogeneidad de varianzas bartlett.test (Ab ~ cuenca , data=datos) # Se acepta el supuesto? Por qué? # R./ # 1.3) Independencia de errores - Durbin Watson library(car) # Se debe generar un modelo lineal relacionado con el Anova modelo<-lm(Ab ~ cuenca, data=datos) names(modelo) durbinWatsonTest(modelo) # Se acepta el supuesto? Por qué? # R./ # Grafico de incremento de los residuales de Durbin Watson plot(residuals(modelo),type="l",ylab="Residuales", xlab="Indice") # Otros tipos de Anovas # 1) Prueba Kruskal-Wallis # Qué es estadístico de Kruskal Wallis? # R./ Ab.kw <- kruskal.test (Ab ~ cuenca , data=datos) Ab.kw # Se acepta Ho? Por qué? # R./ # 2) Prueba de Welch (p.333 Murray) # Qué es estadístico de Welch? # R./ Ab.welch <- oneway.test(Ab ~ cuenca , data=datos, var.equal = F) Ab.welch #-------- # Resumen de la prueba de Hipótesis # Anova, K-W y Welch diferencias entre cuencas= # Normalidad = # Homogeneidad = # Comparación múltiple de medianas boxplot(Ab ~ cuenca , data=datos, col = "lightgray", notch=TRUE, xlab= "Cuenca", ylab="Abundancia", cex.lab=1.5) #------------ # INVESTIGAR: Comparación multiple o prueba # "Contraste no paramétrico de efectos relativos" - Frank Konietschke (2008) # Aplicar al presente ejercicio. library(nparcomp) multi.comp<-nparcomp(Ab ~ cuenca, data=datos, asy.method = "probit", type = "Tukey", alternative = "two.sided", plot.simci = TRUE, info = FALSE,correlation=TRUE) names(multi.comp) multi.comp$Analysis multi.comp$Contrast summary(multi.comp) #========================================== #------------------------------------- # LIBRERÍAS REQUERIDAS library(lattice) library(ellipse) library(plotrix) require(SciViews) require(stats) #------------------------------------ # Figura 1 # Figuras de tortas (Función "pie") datos<-read.csv2("Datos1.csv") str(datos) # Suma de las biomasas datos1 <- colSums(datos[,5:9]) datos1 #Piechart par(mfrow = c(2,2), mar = c(3, 3, 2, 1)) # pie(datos1 , main = "Figura Circular Ordinaria") # pie(datos1 , col = gray(seq(0.4,1.0,length=6)), clockwise=TRUE, main = "Escala de Grises", angle=45) # pie(datos1 , col = rainbow(6),clockwise = TRUE, main="Colores de Arcoiris") # 3D pie3D(datos1 , labels = names(datos1), explode = 0.1, main = "Figura Circular en 3D", labelcex=0.8) # par(bentos) #----------------------------------------- # Grupos Funcionales de Invertebrados Acuáticos "gfun" datos<-read.csv2("Datos1.csv") # Datos del tramo A datosA=datos[1:10,] datosA # Datos de Biomasa total del tramo A tramoA=datosA$BIOM.TOT tramoA names(tramoA) <- datosA[,3] tramoA # Datos del tramo A datosB=datos[11:20,] datosB # Datos de Biomasa total del tramo B tramoB=datosB$BIOM.TOT tramoB names(tramoB) <- datosB[,3] tramoB # Tabla de biomasa total para los dos tramos tramos <- cbind(tramoA, tramoB) tramos # Panel con 4 figuras de barras par(mfrow = c(2,2), mar = c(3, 3, 2, 1)) barplot(tramoB , main = "Biomasas") barplot(tramos) barplot(t(tramos), col = gray(c(0.5,1))) barplot(t(tramos), beside = TRUE) par(mfrow = c(1,1) #------------------------------- #Example 2 # Figuras de columnas y desviaciones, para grupos funcionales # Por tramo y periodos climáticos. datos<-read.csv2("Datos2.csv") head(datos) # Promedios y desviaciones por cada GF datos.m <- tapply(datos$Ab, INDEX=bentos$GF, FUN=mean) datos.de <- tapply(bentos$Ab, INDEX=bentos$GF, FUN=sd) # Tabla de de medias y desviaciones por cada GFA datos1<- cbind(Bent.m, Bent.de) datos1 # Figura de barras con líneas acotadas par(mfrow = c(2,1), mar = c(3, 5, 2, 1)) barplot(Bent.m, xlab = "GFA", ylab = "Abuandancia de GFA", ylim=c(0,400)) arrows(bp, Bent.m, bp, Bent.m + Bent.de, lwd = 1.5,angle=90,length=0.1) barplot(datos.m, xlab = "GFA",ylab = "Abundancias (Indv)", col=rainbow(9), ylim=c(0,700)) arrows(bp, datos.m, bp, datos.m + datos.de, lwd = 1.5, angle=90,length=0.1) box() par(mfrow = c(1,1)) #-------------------------------- # Figuras de tiras # Figura de líneas acotadas con errores estandar (.es) # Error estandar = desviación estandar/raiz de tamaño de la muestra (.le) # Tamaño de la muestra (tm) para los datos de abundancias por cada GF = 8 muestreos datos.tm <- tapply(datos$Ab, INDEX=datos$GF, FUN=length) # datos.ee <- Bent.de / sqrt(datos.tm) # Operación "random jittering (variación)" # # stripchart(datos$Biom ~ datos$GF, vert = TRUE, pch=1, method = "jitter", jit = 0.05, xlab = "Grupos Funcionales",ylab = "Abundancias (Indv)") points (1:5,datos.m, pch = 16, cex = 1.5) # Líneas acotadas simbolizan los errores estandar (ee) arrows (1:8, datos.m,1:8, datos.m + datos.de, lwd = 1.5, angle=90, length=0.1) arrows (1:8, datos.m,1:8, datos.m - datos.de, lwd = 1.5, angle=90, length=0.1) #--------------------------------------- #Figuras de Cajas y Bigotes datos<-read.csv2("Datos2.csv") library(latti) # par(mfrow = c(2,2), mar = c(3, 5, 2, 1)) # boxplot(Ab~GF, data = datos, ylab ="Abundancia (Indv)", cex.lab=1.3) # boxplot(Ab~GF, notch=T, data = datos, ylab="") # boxplot(Ab~GF * Lluvia, data = datos, ylab="Biomasa (Biom)", cex.lab=1.3) # boxplot(Ab~GF*Lluvia, names= c("P1/C-F","P2/C-F","P1/C-R","P2/C-R", "P1/D","P2/D","P1/R","P2/R","P1/T","P2/T"),data = datos, ylab ="") par(mfrow = c(1,1))
3cc0ecf2e36e6a89b99eb72cdafec015510ae586
61aa1a5d5375320b63823e5e6d1b98abaa507afd
/man/getData_champion.Rd
ff2d697d03b43d31d8ce14532206de23d5d12e4a
[ "MIT" ]
permissive
PedroVitorino-ICS/cbloldataR
ffc40d67b0df04f1b624fcfdf9f3a9fb12b05d71
8fcfa71c1bd6b8d19a052e0b751b1c8fbe478b24
refs/heads/master
2023-01-06T21:42:07.506525
2020-10-30T13:22:36
2020-10-30T13:22:36
null
0
0
null
null
null
null
UTF-8
R
false
true
1,290
rd
getData_champion.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getData_champion.R \name{getData_champion} \alias{getData_champion} \title{Harvest data of champions in games of CBLOL from Leaguepedia} \usage{ getData_champion(Role, Year, Split, Champion = NULL) } \arguments{ \item{Role}{(character) The lane where the champion was played. It should contain at least one of the five roles: "Top", "Jungle", "Mid", "AD Carry" and "Support".} \item{Year}{(numeric) The year you want to access data (2015:2020).} \item{Split}{(character) The split you want to access data: "Split 1", "Split 2", "Split 1 Playoffs" or "Split 2 Playoffs".} \item{Champion}{(character) The champion you want to access data. By default it returns data on every champion. Its very case sensitive.} } \value{ A tibble containing: champion, number of games it was played, victories, defeats, win rate, kills, deaths, assists, KDA, CS per game, CS per minute, gold per game, gold per minute, kill participation, percentage of kills/team, percentage of gold/team, lane, year, split and league. } \description{ Creates a tibble containing Leaguepedia data on champions played in CBLOL games } \examples{ champion <- getData_champion(Role = "Mid", Year = 2020, Split = c("Split 2","Split 2 Playoffs")) }
fa1117db4f76f041e2d329fbc47788cd650e4611
7c8d7c2f070a0a40bba6cdb53cb2fbfc29fab07b
/man/get.nb.gamete.Rd
a675622907237c52d7f4a36e2bc9135dc28c9f26
[]
no_license
frederic-michaud/fressa
c2227c1e5d8ca75b3294de72199da07f305d8834
107ba785b2547279d3ec9eb2a78448b9ccfbc5e6
refs/heads/master
2021-04-15T18:18:05.515976
2018-05-25T09:33:33
2018-05-25T09:33:33
126,500,290
0
0
null
null
null
null
UTF-8
R
false
true
321
rd
get.nb.gamete.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/core_function.R \name{get.nb.gamete} \alias{get.nb.gamete} \title{get the number of possible gamete that exist for a given genome} \usage{ get.nb.gamete(genome) } \description{ get the number of possible gamete that exist for a given genome }
fadcf3ee0fb743aa682666d1214f2c4c8ba46038
72c00331eef22b3e00b6baf6dfe06de81ca39337
/tests/testthat/test-numeric.R
40a98383f871b66e5fa142701ea7dba49e1b227a
[]
no_license
itsdalmo/tabulR
87e7ea011004ce1dd6139d8def60331e43f67348
6965769db8bb37d6f3857f3ad2089f683c0fee79
refs/heads/master
2020-12-24T18:50:39.728534
2016-06-13T09:01:13
2016-06-13T09:01:13
57,385,936
0
0
null
2016-10-02T13:09:35
2016-04-29T13:25:45
R
UTF-8
R
false
false
2,321
r
test-numeric.R
context("numeric columns") set.seed(1000L) df <- data.frame( group = factor(paste("Group", LETTERS[1:3]), levels = paste("Group", LETTERS[1:4])), fct = factor(c("No", "Yes", NA), levels = c("Yes", "No", "Don't know")), int = as.integer(runif(3, 1, 10)), num = runif(3, 0, 100), weight = c(1, 2, 2), stringsAsFactors = FALSE ) test_that("Error for mixed columns", { expect_error(x <- qtable_(df, vars = c("int", "fct")), "mixed variable types.") }) test_that("mean for a single numeric", { x <- qtable_(df, vars = "num") expect_identical(x$n, 3L) expect_identical(round(x$num, digits = 3), 42.497) }) test_that("means for multiple numerics", { x <- qtable_(df, vars = c("num", "int")) expect_identical(x$n, 3L) expect_identical(round(x$num, digits = 3), 42.497) expect_identical(x$int, 4) }) test_that("means for numeric by group", { x <- qtable_(df, vars = "num", groups = "group") expect_identical(as.character(x$group), c(paste("Group", LETTERS[1:4]), "Total")) expect_identical(x$n, c(1L, 1L, 1L, 0L, 3L)) expect_identical(round(x$num, 1), c(69.1, 51.6, 6.8, NA, 42.5)) }) test_that("means for multiple numerics by group", { x <- qtable_(df, vars = c("int", "num"), groups = "group") expect_identical(as.character(x$group), c(paste("Group", LETTERS[1:4]), "Total")) expect_identical(x$n, c(1L, 1L, 1L, 0L, 3L)) expect_identical(x$int, c(3, 7, 2, NA, 4)) expect_identical(round(x$num, 1), c(69.1, 51.6, 6.8, NA, 42.5)) }) test_that("means for single numeric by multiple groups", { x <- qtable_(df, vars = "num", groups = c("group", "fct")) expect_identical(as.character(x$group), c(paste("Group", LETTERS[1:4]), "Total")) expect_identical(x$n, c("0/1/0", "1/0/0", "0/0/0", "0/0/0", "1/1/0")) expect_identical(round(x$Yes, 1), c(NA, 51.6, NA, NA, 51.6)) expect_identical(round(x$No, 1), c(69.1, NA, NA, NA, 69.1)) expect_identical(x$`Don't know`, as.numeric(c(NA, NA, NA, NA, NA))) }) test_that("means for weighted numerics", { x <- qtable_(df, vars = c("int", "num"), groups = "group", weight = "weight") expect_identical(as.character(x$group), c(paste("Group", LETTERS[1:4]), "Total")) expect_identical(x$n, c(1L, 1L, 1L, 0L, 3L)) expect_identical(x$int, c(3, 7, 2, NA, 4.2)) expect_identical(round(x$num, 1), c(69.1, 51.6, 6.8, NA, 37.2)) })