blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
d00fe34b591160c30a5e62021b65b004b9180378
697c87e65f7c3a12fdea8141c2859f9da044841f
/R/aimerMethods.R
3fde8c248ad4e05cc8459748c60c7c2d650cdc69
[]
no_license
Lei-D/aimer
944278394d280cacba3d109205c8802c2043f1d5
b7650af34f9661e1dbfffe9e2b40ecb719d49bea
refs/heads/master
2021-07-19T11:54:29.280976
2017-10-27T15:50:59
2017-10-27T15:50:59
null
0
0
null
null
null
null
UTF-8
R
false
false
6,028
r
aimerMethods.R
#'predicts new values for an aimer model. #' #'@param object required, a model generated by the aimer function. #'@param newdata required, a new data matrix to predict with. #'@param ... additional arguments, currently ignored. #' #'@return predicted value vector of length nrow(newdata). #' #'@export predict.aimer <- function(object, newdata, ...){ newdata = sweep(as.matrix(newdata), 2, object$meanx) indeces = object$beta != 0 newdata[,indeces] %*% object$beta[indeces] + object$meany } #'predicts new values for an aimerCV model. #' #'@param object required, a model generated by the aimerCV function. #'@param newdata required, a new data matrix to predict with. #'@param ... additional arguments, currently ignored. #' #'@return predicted value vector of length nrow(newdata). #' #'@export predict.aimerCV <- function(object, newdata, ...){ predict.aimer(object, newdata) } #'returns the fitted values of an aimer model. #' #'@param object required, an aimer model. #'@param ... additional arguments, currently ignored. #' #'@return fitted values of the original data (vector of length n). #' #'@export fitted.aimer <- function(object, ...){ object$fitted } #'returns the fitted values of an aimerCV model. #' #'@param object required, an aimerCV model. #'@param ... additional arguments, currently ignored. #' #'@return fitted values of the original data (vector of length n). #' #'@export fitted.aimerCV <- function(object, ...){ object$fitted } #'returns the residuals of an aimer model. #' #'@param object required, an aimer model. #'@param ... additional arguments, currently ignored. #' #'@return residuals of the original data (vector of length n). #' #'@export residuals.aimer <- function(object, ...){ object$residuals } #'returns the residuals of an aimerCV model. #' #'@param object required, an aimerCV model. #'@param ... additional arguments, currently ignored. #' #'@return residuals of the original data (vector of length n). #' #'@export residuals.aimerCV <- function(object, ...){ object$residuals } #'returns the coefficients of an aimer model. #' #'@param object required, an aimer model. #'@param ... additional arguments, currently ignored. #' #'@return coefficient vector for the model #' #'@export coef.aimer <- function(object, ...){ object$beta } #'returns the coefficients of an aimerCV model. #' #'@param object required, an aimerCV model. #'@param ... additional arguments, currently ignored. #' #'@return coefficient vector for the model #' #'@export coef.aimerCV <- function(object, ...){ object$beta } #'plots the residuals in terms of the fitted values for an aimer model #' #'@param x required, an aimer model. #'@param ... additional arguments, passed to `plot` #' #'@return void. #' #'@export plot.aimer <- function(x, ...){ plot(x$residuals ~ x$fitted, xlab = "fitted values", ylab = "residuals", ...) } #'creates a heatmap of each value of ncomps tested in an aimerCV model #' #'@param x required, an aimerCV model. #'@param ... additional arguments, currently ignored #' #'@return void. #' #'@export #'@importFrom graphics par plot plot.aimerCV <- function(x, ...){ original = par(ask = TRUE) # if (!requireNamespace("ggplot2", quietly = TRUE)){ # stop("This function requires the installation of the ggplot2 package.") # } MyDF = data.frame(expand.grid(x$ncomps, x$nCovs)) best = match(x$nCov.select.best, x$nCovs.select) getColor <- function(coord){ x$mse[best, coord[1], coord[2]] } MyDF$col = apply(expand.grid(1:length(x$ncomps), 1:length(x$nCovs)), MARGIN = 1, FUN = getColor) p = ggplot2::ggplot(MyDF, ggplot2::aes_string(x = 'Var1', y = 'Var2', fill = 'col')) + ggplot2::geom_tile() + ggplot2::scale_fill_continuous(low = 'blue', high = 'red', guide = ggplot2::guide_legend(title = 'MSE')) + ggplot2::xlab("ncomps") + ggplot2::ylab("nCovs") + ggplot2::ggtitle("For Optimal nCov.select") print(p) # image(x = x$ncomps, y = x$nCovs, # z = x$mse[x$nCovs.select == x$nCov.select.best,,], # xlab = "ncomps", ylab = "ncovs", main = "for optimal value of nCovs.select") MyDF = data.frame(expand.grid(x$nCovs.select, x$ncomps)) best = match(x$nCov.best, x$nCovs) getColor <- function(coord){ x$mse[coord[1], coord[2], best] } MyDF$col = apply(expand.grid(1:length(x$nCovs.select), 1:length(x$ncomps)), MARGIN = 1, FUN = getColor) p = ggplot2::ggplot(MyDF, ggplot2::aes_string(x = 'Var1', y = 'Var2', fill = 'col')) + ggplot2::geom_tile() + ggplot2::scale_fill_continuous(low = 'blue', high = 'red', guide = ggplot2::guide_legend(title = 'MSE')) + ggplot2::xlab("nCovs.select") + ggplot2::ylab("ncomps") + ggplot2::ggtitle("For Optimal nCovs") print(p) # image(x = x$nCovs.select, y = x$ncomps, # z = x$mse[,,x$nCovs == x$nCov.best], # xlab = "nCovs.select", ylab = "ncomps", main = "for optimal value of nCovs") MyDF = data.frame(expand.grid(x$nCovs.select, x$nCovs)) best = match(x$ncomp.best, x$ncomps) getColor <- function(coord){ x$mse[coord[1], best, coord[2]] } MyDF$col = apply(expand.grid(1:length(x$nCovs.select), 1:length(x$nCovs)), MARGIN = 1, FUN = getColor) p = ggplot2::ggplot(MyDF, ggplot2::aes_string(x = 'Var1', y = 'Var2', fill = 'col')) + ggplot2::geom_tile() + ggplot2::scale_fill_continuous(low = 'blue', high = 'red', guide = ggplot2::guide_legend(title = 'MSE')) + ggplot2::xlab("nCovs.select") + ggplot2::ylab("nCovs") + ggplot2::ggtitle("For Optimal ncomps") print(p) # image(x = x$nCovs.select, y = x$nCovs, # z = x$mse[,x$ncomps == x$ncomp.best,], # xlab = "nCovs.select", ylab = "nCovs", main = "for optimal value of ncomps") par(original) }
709ccc2d97653a5ba92d9d4f299fc11c86ccf960
4a8209a0137e308f4a96ad0baede74f79803b384
/R/Project.R
7381a0471260149c1e05b942a45ff357677ab962
[ "MIT" ]
permissive
david-yunbae/Project
52e96297b6f3a0457727a6da4fcca5337a034dab
2a04bd5088c1cf149623e1cde5f445a439ab91f1
refs/heads/main
2023-08-28T18:15:50.252789
2021-10-28T23:02:20
2021-10-28T23:02:20
null
0
0
null
null
null
null
UTF-8
R
false
false
10,386
r
Project.R
packages <- c("tidyverse", "knitr", "rmarkdown","roxygen2","testthat","usethis","devtools","ggplot2","ggrepel","stats","kableExtra","bookdown","shiny","patchwork","dplyr","broom") lapply(packages, library, character.only=TRUE) #' Assumption #' #' @description This function outputs the 3 graphs of assumption for the linear regression model. #' @param x text string; This should be the data file name of the test sample. #' #' @return 3 graphs of linearity, constant variance and normal distribution of the residuals #' @export #' #' @examples assumption(x=project-csv) assumption <- function(x,testtype){ dat <- readr::read_csv(x,show_col_types = FALSE) #Prepare assumption for lm D <- lm(height ~ weight,dat) a <- ggplot2::ggplot(dat,aes(x=height,y=weight))+ geom_point() + stat_smooth(method="lm", col="red") +ggtitle("I) Y vs X") b <- ggplot2::ggplot(dat)+geom_point(mapping=aes(x=D$fitted.values ,y=D$residuals)) + geom_hline(yintercept=0,lwd=2)+ggtitle("II) Residual plot")+ylab("Residuals")+xlab("Fitted values") c <- ggplot2::ggplot(dat)+geom_histogram(mapping=aes(x=D$residuals),bins=40) +ggtitle("III) Distribution is normal")+xlab("Residuals") #Prepare assumption for ttest d <- ggplot2::ggplot(dat, aes(sample=height, group=gender, colour=gender))+geom_qq()+geom_qq_line()+xlab("theoretical")+ylab("sample") e <- dat %>% group_by(gender) %>% summarise(n=n(),mu=mean(height),sd=sd(height)) #Preapre assumption for chitest datm <- dat %>% filter(gender=="Male") %>% select(phys) datf <- dat %>% filter(gender=="Female") %>% select(phys) datmn <- datm %>% filter(phys=="None") %>% count() datmm <- datm %>% filter(phys=="Moderate") %>% count() datmi <- datm %>% filter(phys=="Intense") %>% count() datfn <- datf %>% filter(phys=="None") %>% count() datfm <- datf %>% filter(phys=="Moderate") %>% count() datfi <- datf %>% filter(phys=="Intense") %>% count() table <- dplyr::tibble(Male=c(datmn[[1]],datmm[[1]],datmi[[1]]),Female=c(datfn[[1]],datfm[[1]],datfi[[1]])) cat("STAGE II>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Assumptions >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> \n") Sys.sleep(2) if (testtype=="lm"){ cat("Assumptions: \n \nI Linearity\nII Constant variance\nIII Residuals normally distributed\n \n") print((a+b)/c) } else if (testtype=="ttest"){ cat("Assumptions: \n \nI Normality\nII Equal variance (slarger/ssmaller < 2) - Actual value: ", max(e$sd)/min(e$sd),"\n \n") print(d) } else if (testtype=="chitest"){ cat("Assumptions: \n \nI Normal approximation: All entries must be greater than 5.\n \n") print(table) } } assumptiongraph <- function(x,testtype){ dat <- readr::read_csv(x,show_col_types = FALSE) #Prepare assumption for lm D <- lm(height ~ weight,dat) a <- ggplot2::ggplot(dat,aes(x=height,y=weight))+ geom_point() + stat_smooth(method="lm", col="red") +ggtitle("I) Y vs X") b <- ggplot2::ggplot(dat)+geom_point(mapping=aes(x=D$fitted.values ,y=D$residuals)) + geom_hline(yintercept=0,lwd=2)+ggtitle("II) Residual plot")+ylab("Residuals")+xlab("Fitted values") c <- ggplot2::ggplot(dat)+geom_histogram(mapping=aes(x=D$residuals),bins=40) +ggtitle("III) Distribution is normal")+xlab("Residuals") #Prepare assumption for ttest d <- ggplot2::ggplot(dat, aes(sample=height, group=gender, colour=gender))+geom_qq()+geom_qq_line()+xlab("theoretical")+ylab("sample") e <- dat %>% dplyr::group_by(gender) %>% dplyr::summarise(n=n(),mu=mean(height),sd=sd(height)) #Preapre assumption for chitest datm <- dat %>% dplyr::filter(gender=="Male") %>% dplyr::select(phys) datf <- dat %>% dplyr::filter(gender=="Female") %>% dplyr::select(phys) datmn <- datm %>% dplyr::filter(phys=="None") %>% dplyr::count() datmm <- datm %>% dplyr::filter(phys=="Moderate") %>% dplyr::count() datmi <- datm %>% dplyr::filter(phys=="Intense") %>% dplyr::count() datfn <- datf %>% dplyr::filter(phys=="None") %>% dplyr::count() datfm <- datf %>% dplyr::filter(phys=="Moderate") %>% dplyr::count() datfi <- datf %>% dplyr::filter(phys=="Intense") %>% dplyr::count() table <- dplyr::tibble(Male=c(datmn[[1]],datmm[[1]],datmi[[1]]),Female=c(datfn[[1]],datfm[[1]],datfi[[1]])) if (testtype=="lm"){ (a+b)/c } else if (testtype=="ttest"){ d } else if (testtype=="chitest"){ table } } wrapperlm <- function(x){ dat <- readr::read_csv(x,show_col_types = FALSE) B <- lm(height~weight,dat)$coefficients[[2]] df <- lm(height~weight,dat)$df.residual t <- qt(0.025,df,lower.tail=TRUE) p <- broom::glance(lm(height~weight,dat))$p.value[[1]] SE <- coef(summary(lm(height~weight,dat)))[,"Std. Error"][[2]] out <- list(type="lm",param=x,beta=B,CI=t*SE,t_value=t,degree_of_freedom=df,p_value=p) out$CI <- list(min=B-t*SE,max=B+t*SE) class(out) <- "myr" out } wrapperttest <- function(x){ dat <- readr::read_csv(x,show_col_types = FALSE) datm <- dat %>% filter(gender=="Male") %>% select(height) datf <- dat %>% filter(gender=="Female") %>% select(height) test <- t.test(datm,datf,var.equal=TRUE) out <- list(type="ttest",param=x,CI=t,t_value=test[[1]][[1]],degree_of_freedom=test[[2]][[1]],p_value=test[[3]][[1]]) out$CI <- list(min=broom::glance(test)$conf.low,max=broom::glance(test)$conf.high) class(out) <- "myr" out } wrapperchitest <- function(x){ dat <- readr::read_csv(x,show_col_types = FALSE) datm <- dat %>% filter(gender=="Male") %>% select(phys) datf <- dat %>% filter(gender=="Female") %>% select(phys) datmn <- datm %>% filter(phys=="None") %>% count() datmm <- datm %>% filter(phys=="Moderate") %>% count() datmi <- datm %>% filter(phys=="Intense") %>% count() datfn <- datf %>% filter(phys=="None") %>% count() datfm <- datf %>% filter(phys=="Moderate") %>% count() datfi <- datf %>% filter(phys=="Intense") %>% count() table <- dplyr::tibble(Male=c(datmn[[1]],datmm[[1]],datmi[[1]]),Female=c(datfn[[1]],datfm[[1]],datfi[[1]])) test <- chisq.test(table,correct=FALSE) out <- list(type="chitest",param=x,t_value=test[[1]][[1]],degree_of_freedom=test[[2]][[1]],p_value=test[[3]][[1]]) class(out) <- "myr" out } printer.myr <- function(x){ if(x$type=="lm"){ cat(">>>>>>>>>>>>>>>>>>>>>>>>>>>> Relevant test: Linear regression <<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n \n") Sys.sleep(4) cat("STAGE I>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Hypothesis >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n \nH0 : B = 0 against H1 : B != 0\n \n") } else if(x$type=="ttest"){ cat(">>>>>>>>>>>>>>>>>>>>>>>>>>>> Relevant test: t test <<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n \n") Sys.sleep(4) cat("STAGE I>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Hypothesis >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n \nH0 : mu1 = mu2 against H1 : mu1 != mu2\n \n") } else if(x$type=="chitest"){ cat(">>>>>>>>>>>>>>>>>>>>>>>>>>>> Relevant test: chi test <<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n \n") Sys.sleep(4) cat("STAGE I>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Hypothesis >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n \nH0 : the two variables are independent against each other. H1 : not H0.\n \n") } Sys.sleep(2) assumption(x$param,x$type) Sys.sleep(4) cat("STAGE III>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Results >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n \n") if(x$type=="lm"){ cat("I beta: ", x$beta, "\nII CI: (", x$CI$min, ",", x$CI$max, ")\nIII degree of freedom: ", x$degree_of_freedom, "\nIV critical value : ", x$t_value, "\nV p value: ", x$p_value, "\n \n") } else if(x$type=="ttest"){ cat("I CI: (", x$CI$min, ",", x$CI$max, ")\nII degree of freedom: ", x$degree_of_freedom, "\nIII critical value : ", x$t_value, "\nIV p value: ", x$p_value, "\n \n") } else if(x$type=="chitest"){ cat("I degree of freedom: ", x$degree_of_freedom, "\nII critical value : ", x$t_value, "\nIII p value: ", x$p_value, "\n \n") } Sys.sleep(5) cat("STAGE IV>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Decision >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n \n") Sys.sleep(3) if (x$p_value < 0.05){ cat("REJECT: p-value = ", x$p_value, " < 0.05\n \n") } else { cat("DO NOT REJECT: p-value = ", x$p_value, " > 0.05\n \n") } Sys.sleep(4) cat("STAGE V>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Conclusion >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n \n") if (x$type=="lm"){ if (x$p_value <0.05){ cat("There is a relationship between height and weight: As the P-value is very small, we have very strong evidence to reject H0. I.E. very strong evidence that the slope parameter is significant and there is a relationship between the height and weight of the sample population.") } else if(x$p_value >0.05){ cat("There isn't any relationship between height and weight: As the P-value is large, we have no evidence to reject H0. I.E. no evidence that the slope parameter is significant and there isn't any relationship between the height and weight of the sample population.") } } else if(x$type=="ttest"){ if (x$p_value <0.05){ cat("The mean height of male and female are NOT the same: As the P-value is very small, we have very strong evidence to reject H0. I.E. very strong evidence that the mean height of male is not the same as the mean height of female.") } else if(x$p_value >0.05){ cat("The mean height of male and female are the same: As the P-value is large, we have no evidence to reject H0. I.E. no evidence that the mean height of male is not the same as the mean height of female.") } } else if(x$type=="chitest"){ if (x$p_value <0.05){ cat("Gender affects the amount of physical activity: As the P-value is very small, we have very strong evidence to reject H0. I.E. very strong evidence that the two variables are dependent against each other. Gender affects the physical activity.") } else if(x$p_value >0.05){ cat("Gender does NOT affect the amount of physical activity: As the P-value is large, we have no evidence to reject H0. I.E. no evidence that the two variables are dependent against each other. The two variables are independent against each other and there is no association between gender and the amount of physical acitivity.") } } } printer <- function(x){ UseMethod("printer") }
a4c1087ed21cedafa72cbdf3cb2f34bbc217e292
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/RSDA/examples/sym.kmeans.Rd.R
9c7b4f3784107559ca6124db9dc3f65181bb39b2
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
187
r
sym.kmeans.Rd.R
library(RSDA) ### Name: sym.kmeans ### Title: Symbolic k-Means ### Aliases: sym.kmeans ### Keywords: Kmeans Symbolic ### ** Examples data(oils) sk<-sym.kmeans(oils,k=3) sk$cluster
5e5d33320b97da7c500becab519de981d051ca41
4642e25457ecfdaf564ec83133084715de71818a
/DQ Functions/summarise-changes.R
0ba1cc9a09f4da48251ff2d1671c66dc4e96038d
[]
no_license
DasOakster/wilko-data-quality
4639743b654e79c7db982f1ad19549f94a680d41
c65642bc3a4fd87055e2edeced1b7092dc5a4486
refs/heads/master
2021-09-13T03:01:59.193182
2018-04-12T14:26:08
2018-04-12T14:26:08
123,016,410
0
0
null
null
null
null
UTF-8
R
false
false
2,372
r
summarise-changes.R
count.changes <- function(psa1) { library(dplyr) psa1.dir <- paste("D:/OneDrive/Work Files/Wilko/Data Cleanse/",psa1,"/Uploads/",sep="") update.age.data <<- read.csv(paste(psa1.dir,psa1,"_Updates_Age.csv",sep = ""),header = TRUE,stringsAsFactors = FALSE) update.assembly.data <<- read.csv(paste(psa1.dir,psa1,"_Updates_Assembly.csv",sep = ""),header = TRUE,stringsAsFactors = FALSE) update.brand.data <<- read.csv(paste(psa1.dir,psa1,"_Updates_Brand.csv",sep = ""),header = TRUE) update.capacity.data <<- read.csv(paste(psa1.dir,psa1,"_Updates_Capacity.csv",sep = ""),header = TRUE,stringsAsFactors = FALSE) update.coverage.data <<- read.csv(paste(psa1.dir,psa1,"_Updates_Coverage.csv",sep = ""),header = TRUE,stringsAsFactors = FALSE) update.model.no.data <<- read.csv(paste(psa1.dir,psa1,"_Updates_Model.Number.csv",sep = ""),header = TRUE,stringsAsFactors = FALSE) update.size.data <<- read.csv(paste(psa1.dir,psa1,"_Updates_Size.csv",sep = ""),header = TRUE,stringsAsFactors = FALSE) update.pack.qty.data <<- read.csv(paste(psa1.dir,psa1,"_Updates_Pack.Qty.csv",sep = ""),header = TRUE,stringsAsFactors = FALSE) update.colour.data <<- read.csv(paste(psa1.dir,psa1,"_Updates_Colour.csv",sep = ""),header = TRUE,stringsAsFactors = FALSE) update.material.data <<- read.csv(paste(psa1.dir,psa1,"_Updates_Material.csv",sep = ""),header = TRUE,stringsAsFactors = FALSE) update.web.description.data <<- read.csv(paste(psa1.dir,psa1,"_Updates_Web.Description.csv",sep = ""),header = TRUE,stringsAsFactors = FALSE) update.washable.data <<- read.csv(paste(psa1.dir,psa1,"_Updates_Washable.csv",sep = ""),header = TRUE,stringsAsFactors = FALSE) update.brand.data <- update.brand.data[,c("PSA_1","PSA_2","Article","Web.Description","Web.Description","Brand","Title.Brand")] colnames(update.brand.data) <- c("PSA_1","PSA_2","Article","Web.Description","Old.Value","New.Value","Change") update.brand.data$Change <- "Update" a <<- rbind(update.age.data,update.assembly.data,update.capacity.data,update.coverage.data,update.model.no.data, update.size.data,update.pack.qty.data,update.colour.data,update.material.data,update.web.description.data, update.washable.data,update.brand.data) b <<- a[a$Change != "Keep",] change.file <<- data.frame(count(b,b$Change)) write.csv(change.file,paste(psa1.dir,"Total_Changes.csv",sep = "")) }
9bb382b36a186f9e452d22d612270717130b7121
0d3f737aa66620cd3c9cdd5d09b11340873ff099
/run_analysis.R
7726929f8956756d307d1d1403f809685e38ba6b
[]
no_license
tjdurrant/Data_Cleaning_Assignment
c6a4076f83a309d1b033e569c4c86676e5b69442
9610cb2ea4b641f2fa05750445948f297cfbbc63
refs/heads/master
2021-01-22T22:09:16.622064
2017-03-19T23:21:42
2017-03-19T23:21:42
85,513,158
0
0
null
null
null
null
UTF-8
R
false
false
2,002
r
run_analysis.R
library(dplyr) #Read in features and activites data features <- read.table("./features.txt") activity_labels <- read.table("./activity_labels.txt") #Read in training data x_train <- read.table("./x_train.txt") y_train <- read.table("./y_train.txt") subject_train <- read.table("./subject_train.txt") #Read in test data x_test <- read.table("./x_test.txt") y_test <- read.table("./y_test.txt") subject_test <- read.table("./subject_test.txt") #rename variables y_train <- rename(y_train, activityid = V1) y_test <- rename(y_test, activityid = V1) subject_train <- rename(subject_train, subjectID = V1) subject_test <- rename(subject_test, subjectID = V1) #add acivity variable y_train$activity <- factor(y_train$activityid, levels=activity_labels$V1, labels=activity_labels$V2) y_test$activity <- factor(y_test$activityid, levels=activity_labels$V1, labels=activity_labels$V2) #rename X variables names(x_train) <- features[,2] names(x_test) <- features[,2] #add Train or Test variable x_train$datatype <- "Train" x_test$datatype <- "Test" #cbind y and subject y_train <- cbind(y_train, subject_train) y_test <- cbind(y_test, subject_test) #cbind y and x tidy_data_train <- cbind(y_train, x_train) tidy_data_test <- cbind(y_test, x_test) #rbind train and test tidy_data <- rbind(tidy_data_train, tidy_data_test) head(tidy_data$activity, n = 100) #grep mean and std variables formatted_tidy_data <- grep("activity|subject|mean|std", names(tidy_data), value = TRUE) formatted_tidy_data tidy_data <- tidy_data[,formatted_tidy_data] tidy_data write.table(tidy_data, "./tidy_data.txt", row.name=FALSE) #group_by subject, activity. average remaining variables. activity_subject_data <- tidy_data %>% group_by(subjectID, activity) %>% summarise_each(funs(overallmean = mean)) write.table(activity_subject_data, "./activity_subject_data.txt", row.name=FALSE) #########################################################
c34a74d648ae975bfa23a3d6b2d9e4c7e5b553a5
78a12d66cf2c127fdd2c368b7b251972cbc0330d
/code.R
624fe0790d0e9759928b0508d8677b8908df0af1
[]
no_license
jqyangDaz/Daz
b3d3095cd0a48dcca060cef0c6b4391ef3f72206
43de7196431cdd0ae1d694d4a228c0c45ba96c24
refs/heads/master
2020-03-16T21:41:02.617593
2018-05-11T08:10:56
2018-05-11T08:10:56
133,009,907
1
0
null
null
null
null
GB18030
R
false
false
7,116
r
code.R
library(plotrix) library(grid) library(vcd) library(tseries) #导入所需的包 homi<-read.csv('e:/database.csv') #录入数据 nrow(homi) #查看数据行数 length(homi) #查看数据列数 View(homi) #预览数据 attach(homi) bar1<-table(Month) #获取Month数据的的分类频数 barplot(bar1) #绘制条形图 pie1<-table(Year) pie(pie1,main='年份和凶杀案数量的饼图') #绘制饼图 lals_pct <- paste(names(pie1), " ", round(pie1/sum(pie1), 2)*100, "%", sep="") #计算百分比 lals_pct bar2<-table(Year) barplot(bar2,main = 'bar of year',col='pink') #绘制条形图 pie2<-table(Victim.Sex) #获取频数 lals_pct1 <- paste(names(pie2), " ", round(pie2/sum(pie2), 2)*100, "%", sep="") #计算百分比 pie3D(pie2,labels=lals_pct,main = 'pie of sex',explode=0.1) #绘制3D饼图 pie3<-table(Victim.Race) #获取频数 lals_pct2 <- paste(round(pie3/sum(pie3),2)*100, "%", sep="") #计算百分比 pie(pie3,main ="pie of Vicitim's race",labels = lals_pct2, col=c("purple", "violetred1", "green3","cornsilk", "cyan")) #绘制饼图 legend(0.8,1.25,names(pie3),fill = c("purple", "violetred1", "green3","cornsilk", "cyan")) #添加标注 pie4<-table(Perpetrator.Sex) #获取频数 lals_pct3 <- paste(names(pie4), " ", round(pie4/sum(pie4),2)*100, "%", sep="") #计算百分比 pie3D(pie4,labels = lals_pct3,explode = 0.1) #绘制3D饼图 pie5<-table(Perpetrator.Race) #获取频数 lals_pct4 <- paste(names(pie5), " ", round(pie5/sum(pie5),2)*100, "%", sep="") #计算百分比 pie(pie5,labels = lals_pct4,main = "pie of Perpetrator's Race") #绘制饼图 bar2<-table(Relationship) #获取频数 barplot(bar2,main="Hist of Relationship",col='purple') #绘制条形图 summary(Victim.Age) #查看数据概要 Victimage<-c(Victim.Age) #转换为向量 V<-Victimage[Victimage<998] #排除年龄为998的数据 summary(V) #查看数据概要 boxplot(V,main = 'box of age',ylab='years') #绘制箱线图 summary(Perpetrator.Age) P<-na.omit(Perpetrator.Age) #删除缺失值 summary(P) boxplot(P,main = 'box of age',ylab='years') chisq.test(Victim.Count,Perpetrator.Count) #卡放拟合优度检验 chisq.test(Year,Weapon) pie6<-table(Weapon) lals_pct5 <- paste(names(pie6), " ", round(pie6/sum(pie6),2)*100, "%", sep="") pie3D(pie6,explode=0.1,labels=lals_pct5) barplot(pie6,col=c('blue','red','orange','purple','yellow'),main = 'Bar of Weapon') w1<-subset(homi,Year==1980,select=c(Year,Weapon)) #提取各年的武器数据 w2<-subset(homi,Year==1990,select=c(Year,Weapon)) w3<-subset(homi,Year==2000,select=c(Year,Weapon)) w4<-subset(homi,Year==2010,select=c(Year,Weapon)) bar3<-table(w1$Weapon) bar4<-table(w2$Weapon) bar5<-table(w3$Weapon) bar6<-table(w4$Weapon) par(mfrow=c(2,2)) #绘制图时使用2*2格式 barplot(bar3,main = '1980') barplot(bar4,main = '1990') barplot(bar5,main = '2000') barplot(bar6,main = '2010') #绘制条形图 fan1<-table(Crime.Solved) lals_pct6 <- paste(names(pie7), " ", round(pie7/sum(pie7),2)*100, "%", sep="") fan.plot(pie7,labels = lals_pct6,col=c('pink','blue'),radius=0.8) r1<-subset(homi,Year==1980,select=c(Year,Crime.Solved)) r2<-subset(homi,Year==1990,select=c(Year,Crime.Solved)) r3<-subset(homi,Year==2000,select=c(Year,Crime.Solved)) r4<-subset(homi,Year==2010,select=c(Year,Crime.Solved)) #提取案件是否结案的数据 fan2<-table(r1$Crime.Solved) fan3<-table(r2$Crime.Solved) fan4<-table(r3$Crime.Solved) fan5<-table(r4$Crime.Solved) par(mfrow=c(2,2)) lals_pct7 <- paste(names(fan2), " ", round(fan2/sum(fan2),2)*100, "%", sep="") lals_pct8 <- paste(names(fan3), " ", round(fan3/sum(fan3),2)*100, "%", sep="") lals_pct9 <- paste(names(fan4), " ", round(fan4/sum(fan4),2)*100, "%", sep="") lals_pct10 <- paste(names(fan5), " ", round(fan5/sum(fan5),2)*100, "%", sep="") pie(fan2,col=c('pink','blue'),labels=lals_pct7,radius=0.8,main='1980') pie(fan3,col=c('pink','blue'),labels=lals_pct8,radius=0.8,main='1990') pie(fan4,col=c('pink','blue'),labels=lals_pct9,radius=0.8,main='2000') pie(fan5,col=c('pink','blue'),labels=lals_pct10,radius=0.8,main = '2010') Y<-as.data.frame(table(Year)) #提取数据并强制转化为数据框类型 Y<-subset(Y,select = c(counts)) tscounts<- ts(Y,start = c(1980)) #将数据转化为时间序列 plot.ts(tscounts) #绘制时间序列图 tscountsdiff<-diff(tscounts,differences=1) #将序列进行一阶差分 plot.ts(tscountsdiff) #绘制一阶差分后的图 acf(tscountsdiff,lag.max = 20) pacf(tscountsdiff,lag.max = 20) #绘制acf和pacf图 counts<-arima(tscountsdiff,order=c(1,0,0)) #构建AR模型 a<-subset(homi,Victim.Age<998,select = c(Year,Victim.Age,Perpetrator.Age)) detach(homi) attach(a) par(mfrow=c(1,2)) plot(Victim.Age~Year,main='Year vs Vicitim.Age') plot(Perpetrator.Age~Year,main='Year vs Perpetrator.Age') lm.1<-lm(Perpetrator.Count~Perpetrator.Age,data=homi) #回归模型的拟合 summary(lm.1) #回归方程结果的查看 sex<-subset(homi,Perpetrator.Sex!='Unknown',select=c(Perpetrator.Count,Perpetrator.Sex,Perpetrator.Age)) detach(homi) attach(sex) as.factor(Perpetrator.Sex) #将性别转化为因子变量 lm.2<-lm(Perpetrator.Count~-1+Perpetrator.Sex,data=sex) #没有截距项的回归拟合 summary(lm.2) lm.3<-lm(Perpetrator.Count~-1+Perpetrator.Sex+Perpetrator.Age,data=sex) #没有截距项的多元线性回归 summary(lm.3)
37f6cb4cde2cba89d93364960ad0366321fe2a7e
99e62a6f0161cc9de03bf066ae7d27e9cf25c179
/sc_preprocess.R
e4e3da05a63da525aaec9e249b4fa17d501f6fda
[]
no_license
ardadurmaz/trajectory-sc
09431f8e1037a3548916b1d5df8e04854422a56e
6164659b7b415115a4aacf90991ca38e8b754e84
refs/heads/master
2020-04-17T15:11:18.370858
2019-01-20T17:30:12
2019-01-20T17:30:12
null
0
0
null
null
null
null
UTF-8
R
false
false
2,503
r
sc_preprocess.R
library(Matrix) library(cellrangerRkit) library(ggplot2) library(scran) library(scater) library(edgeR) library(limma) ## Read Data ## temp.path <- '~/SingleCellData/single_cell_analysis/Marusyk_Joint3Aggr_ALK_redo' genome <- 'GRCh38' count <- load_cellranger_matrix(temp.path) aggr.data <- read.csv('~/SingleCellData/single_cell_analysis/Marusyk_Joint3Aggr_ALK_redo/outs/aggregation_csv.csv') targets <- as.character(aggr.data$library_id[as.numeric(gsub('^.+-', replacement = '', colnames(count)))]) count.mat <- as.matrix(count) ## Map Gene Symbols ## ensembl <- useMart('ENSEMBL_MART_ENSEMBL', dataset = 'hsapiens_gene_ensembl') mapping <- getBM(mart = ensembl, attributes = c('ensembl_gene_id', 'hgnc_symbol'), filters = 'ensembl_gene_id', values = rownames(count.mat)) mapping <- na.omit(mapping[mapping$ensembl_gene_id != '' & mapping$hgnc_symbol != '',]) mapping <- mapping[match(rownames(count.mat), table = mapping$ensembl_gene_id),] rownames(mapping) <- 1:nrow(mapping) idx <- is.na(mapping$ensembl_gene_id) | is.na(mapping$hgnc_symbol) count.mat <- count.mat[!idx,] mapping <- mapping[!idx,] rownames(mapping) <- 1:nrow(mapping) idx <- order(Matrix::rowMeans(count.mat), decreasing = TRUE) count.mat <- count.mat[idx,] mapping <- mapping[idx,] idx <- duplicated(mapping$hgnc_symbol) count.mat <- count.mat[!idx,] mapping <- mapping[!idx,] rownames(count.mat) <- mapping$hgnc_symbol ## Filter ## sc.data <- SingleCellExperiment(assays = list(counts = as(count.mat, 'dgCMatrix'))) sc.data <- calculateQCMetrics(sc.data, use_spikes = FALSE, detection_limit = 0) plotExprsFreqVsMean(sc.data) keep.total <- sc.data$total_counts > 6e3 ## Remove cells based on total counts keep.n <- sc.data$total_features_by_counts > 1000 ## Remove cells based on number of genes expressed sc.data <- sc.data[,keep.total & keep.n] n.exprs <- nexprs(sc.data, byrow = TRUE, detection_limit = 0) keep_feature <- n.exprs > 250 sc.data <- sc.data[keep_feature,] ## Normalize ## targets <- as.character(aggr.data$library_id[as.numeric(gsub('^.+-', replacement = '', colnames(sc.data)))]) sc.data <- computeSumFactors(sc.data, cluster = targets) saveRDS(sc.data, file = '~/sc_trajectory/data/processedData.rds')
6eaf6ec142fe7b5d8cac6583dacbf196c1bc0673
76f353cb3366684c4708b36662c44f86204381d2
/script/DANMPUT.R
17c8d38314255d2aa07db99449ca094521a6815e
[]
no_license
barbarian1803/saham_analisis
cdd431499348db370072eaec71490a6bbb2ff590
b5c33108faf16e8b4fcda9e0ef9f9a8a9b08c16e
refs/heads/master
2021-01-24T08:09:07.733311
2016-11-21T05:38:17
2016-11-21T05:38:17
70,199,529
0
0
null
null
null
null
UTF-8
R
false
false
184
r
DANMPUT.R
DANMPUT <- read.table("RD/DANMPUT.csv",header=FALSE,quote="",sep="\t") colnames(DANMPUT) <- c("date","price") summary(DANMPUT$price) DANMPUT[duplicated(DANMPUT$date),] rm(DANMPUT_rev)
c0453a013dbea734608437d85644e4eadf80a419
cef3b5e2588a7377281a8f627a552350059ca68b
/cran/paws.game.development/man/gamelift_start_matchmaking.Rd
2d58a972f016a45ef5bbaf276e751ebfe5e7d7e7
[ "Apache-2.0" ]
permissive
sanchezvivi/paws
b1dc786a9229e0105f0f128d5516c46673cb1cb5
2f5d3f15bf991dcaa6a4870ed314eb7c4b096d05
refs/heads/main
2023-02-16T11:18:31.772786
2021-01-17T23:50:41
2021-01-17T23:50:41
null
0
0
null
null
null
null
UTF-8
R
false
true
3,588
rd
gamelift_start_matchmaking.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gamelift_operations.R \name{gamelift_start_matchmaking} \alias{gamelift_start_matchmaking} \title{Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules} \usage{ gamelift_start_matchmaking(TicketId, ConfigurationName, Players) } \arguments{ \item{TicketId}{A unique identifier for a matchmaking ticket. If no ticket ID is specified here, Amazon GameLift will generate one in the form of a UUID. Use this identifier to track the matchmaking ticket status and retrieve match results.} \item{ConfigurationName}{[required] Name of the matchmaking configuration to use for this request. Matchmaking configurations must exist in the same Region as this request. You can use either the configuration name or ARN value.} \item{Players}{[required] Information on each player to be matched. This information must include a player ID, and may contain player attributes and latency data to be used in the matchmaking process. After a successful match, \code{Player} objects contain the name of the team the player is assigned to.} } \description{ Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules. If you're also using GameLift hosting, a new game session is started for the matched players. Each matchmaking request identifies one or more players to find a match for, and specifies the type of match to build, including the team configuration and the rules for an acceptable match. When a matchmaking request identifies a group of players who want to play together, FlexMatch finds additional players to fill the match. Match type, rules, and other features are defined in a \code{MatchmakingConfiguration}. To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. For each player, you must also include the player attribute values that are required by the matchmaking configuration (in the rule set). If successful, a matchmaking ticket is returned with status set to \code{QUEUED}. Track the status of the ticket to respond as needed. If you're also using GameLift hosting, a successfully completed ticket contains game session connection information. Ticket status updates are tracked using event notification through Amazon Simple Notification Service (SNS), which is defined in the matchmaking configuration. \strong{Learn more} \href{https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-client.html}{Add FlexMatch to a Game Client} \href{https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html}{Set Up FlexMatch Event Notification} \href{https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-tasks.html}{FlexMatch Integration Roadmap} \href{https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/gamelift-match.html}{How GameLift FlexMatch Works} \strong{Related operations} \itemize{ \item StartMatchmaking \item DescribeMatchmaking \item StopMatchmaking \item AcceptMatch \item StartMatchBackfill } } \section{Request syntax}{ \preformatted{svc$start_matchmaking( TicketId = "string", ConfigurationName = "string", Players = list( list( PlayerId = "string", PlayerAttributes = list( list( S = "string", N = 123.0, SL = list( "string" ), SDM = list( 123.0 ) ) ), Team = "string", LatencyInMs = list( 123 ) ) ) ) } } \keyword{internal}
453571b12e34505bee05f29aa647616ba55e6ac1
7b00b082bb032398858c863965607bf47c2e6513
/artigo_pairs_def.R
427436c5972b347d847c53129c138a8173444a69
[]
no_license
lemuelemos/Pairs_trading
b9f79978caebc2891767d4b10e6924a762269b89
95b52fc80cac9c092ca2671964fa4478ad02ac3c
refs/heads/master
2021-04-27T11:26:28.688792
2019-04-01T03:01:40
2019-04-01T03:01:40
122,562,684
0
0
null
null
null
null
UTF-8
R
false
false
9,068
r
artigo_pairs_def.R
library(doParallel) library(partialCI) library(readxl) library(xts) library(stringr) library(dplyr) library(plyr) library(timeSeries) library(Rcpp) ##### Import data and cleaning NA's #source('cpp_codes.R') sourceCpp("cpp_codes.cpp") ibrx_2007_2018 <- read_excel("ibrx last price 2007 até 2018_2.xlsx") #### Reading the data ibrx_2007_2018$Dates <- as.Date(ibrx_2007_2018$Dates) ## Setting the format of the dates column ibrx_2007_2018 <- as.data.frame(ibrx_2007_2018) ## Transform in data_frame to easy handling ibrx_2007_2018[,2:ncol(ibrx_2007_2018)] <- apply(ibrx_2007_2018[,2:ncol(ibrx_2007_2018)],2,as.numeric) ## Transform the data form char type to numeric type ibrx_2007_2018 <- xts(ibrx_2007_2018[,-1],order.by = ibrx_2007_2018$Dates) ## transform in xts to easy handling with time series ibrx_2007_2018_integridade <- apply(ibrx_2007_2018,2, function(x) sum(is.na(x))/nrow(ibrx_2007_2018))*100 ## Calculate the percentage of missing data ibrx_2007_2018_70 <- ibrx_2007_2018[,names(ibrx_2007_2018_integridade[which(ibrx_2007_2018_integridade<10)])] ## Taking the coluns eith more than 90% of integrite ibrx_2008_2017_70 <- ibrx_2007_2018_70["2008/2017"] ## subsetting to eliminate more missing data. ibrx_2008_2017_70 <- na.spline(ibrx_2008_2017_70) ## remove "NA's" spline method Missing values (NAs) are replaced by linear interpolation via approx or cubic spline interpolation via spline, respectively. rm(list=c("ibrx_2007_2018","ibrx_2007_2018_70")) ## remove objects that will not be use Nomes <- colnames(ibrx_2008_2017_70) ## Taking the names of equity's Nomes <- str_sub(Nomes, 1,6) colnames(ibrx_2008_2017_70) <- Nomes #ibrx_2008_2017_70 <- ibrx_2008_2017_70[,1:20] ### Setting the window of estimation estimation_method <- "fixed" dir.create(file.path(getwd(), "resultados")) ### resultados_por_tr <- list(NULL) threshold <- matrix(c(1,1,0.5,0),2,2) formation_windown <- c(503,1007) names(formation_windown) <- c("2Y","4Y") trading_days <- c(90,180) rolling_window <- c(63,126) for(pp in 1:length(formation_windown)){ ret_port <- as.list(NULL) pairs_est <- list(NULL) pairs_est_tested <- list(NULL) trading_return <- as.list(NULL) select_port <- as.list(NULL) retornos <- as.list(NULL) time_window <- as.list(NULL) ret_aux <- as.list(NULL) trades <- list(NULL) returns <- list(NULL) window_test <- seq(1,nrow(ibrx_2008_2017_70),by=(rolling_window[pp])) resultados_por_tr <- list(NULL) for(kk in 1:nrow(threshold)){ tr <- threshold[kk,] for(p in seq_along(window_test)){ test_period <- window(ibrx_2008_2017_70, start=time(ibrx_2008_2017_70)[window_test[p]], end=if(is.na(time(ibrx_2008_2017_70)[window_test[p]+formation_windown[pp]])){break} else{time(ibrx_2008_2017_70)[window_test[p]+formation_windown[pp]]}) time_window[[p]] <- time(test_period) test_period_est <- as.data.frame(test_period) test_period_est <- test_period_est[,1:20] ### Estimating pairs cat("\r",paste0("Estimating the pairs from portfolio " ,p,". Period from ", min(time_window[[p]]), " to ",max(time_window[[p]]))) no_cores <- detectCores() cl <- makeCluster(no_cores) clusterExport(cl, "test_period_est") clusterEvalQ(cl, library(partialCI)) pares <- parLapply(cl,test_period_est, function(x) apply(test_period_est,2, function(y) if(x!=y){try(fit.pci(x,y))})) stopCluster(cl) pares <- unlist(pares, recursive = F) pares <- pares[!sapply(pares,is.null)] pares <- pares[!sapply(pares, function(x) is.na(x$rho.se))] names(pares) <- gsub("TIET11vs","TIET11 vs", paste0(str_sub(names(pares), 1,6),"vs ", str_sub(names(pares), 8,13))) pares <- pares[!sapply(pares,is.null)] ### Retirando os valores vazios pares <- pares[!sapply(pares, function(x) is.na(x$rho.se))] ### Retirando os pares com problemas de estimação #saveRDS(pares,file=paste0(getwd(),"/resultados/pair_", #min(time_window[[p]]),"_to_", #max(time_window[[p]]),"_portfolio",p,"_fmw_", #names(formation_windown)[pp],"_tr_(",tr[1],",",tr[2],")")) pairs_est[[p]] <- pares #### Taking the pairs with R square greater than 0.5 cat("\r",paste0("Taking the pais with R2>0.5. Portfolio ",p)) paresR <- pares[sapply(pares,function(x) x$pvmr > 0.5)] paresR <- paresR[sapply(paresR,function(x) x$rho > 0.5)] #rm(pares) ### Testing partial Cointegration cat("\r",paste0("Testing for partial coitegration. Portfolio ",p)) cl <- makeCluster(no_cores) clusterExport(cl, "paresR") clusterEvalQ(cl, library(partialCI)) paresRtested <- paresR[parSapply(cl,paresR, FUN = function(x) which.hypothesis.pcitest(test.pci(x))=="PCI")] stopCluster(cl) #rm(paresR) #saveRDS(paresRtested,file=paste0(getwd(),"/resultados/pairRtested_", #min(time_window[[p]]),"_to_", #max(time_window[[p]]),"_portfolio",p,"_fmw_", #names(formation_windown)[pp],"_tr_(",tr[1],",",tr[2],")")) pairs_est_tested[[p]] <- paresRtested ### Estimation of ocult states cat("\r",paste0("Estimation of ocult states. Portfolio ",p)) paresRtestedM <- lapply(paresRtested, function(x) statehistory.pci(x)) betas_formation <- ldply(paresRtested, function(x) x$beta) colnames(betas_formation) <- c("Pares","betas") #rm(paresRtested) ############### Normalizando O M cat("\r",paste0("Normalizing the M. Portfolio",p)) Zm_fornation <- lapply(paresRtestedM, function(x) x$M/sd(x$M)) Zm_fornation <- as.data.frame(Zm_fornation) colnames(Zm_fornation) <- gsub("\\."," ",names(Zm_fornation)) #rm(paresRtestedM) ### sign of operations cat("\r",paste0("Sign for operations - threshold[",tr[1],",",tr[2],"]. Portolio ",p)) sinal <- matrix(data = rep(0,ncol(Zm_fornation)*nrow(Zm_fornation)),ncol = ncol(Zm_fornation),nrow = nrow(Zm_fornation)) sinal[1,1:ncol(sinal)] <- "Fora" sinal <- sncalc(ncol(Zm_fornation),nrow(Zm_fornation),as.matrix(Zm_fornation), tr=tr, sinal=sinal) sinal<- as.data.frame(sinal) colnames(sinal) <- names(Zm_fornation) sinal %>% mutate_if(is.factor,as.character) -> sinal #as.xts(sinal, order.by = time(test_period)) ############# Return Calc cat("\n",paste0("Return Calc. Portfolio",p)) parestrade <- list(NULL) for(j in 1:length(sinal)){ parestrade[[j]] <- cbind(test_period[,str_sub(names(sinal)[j],end=6)], test_period[,str_sub(names(sinal)[j],start=-6)]) names(parestrade)[j] <- names(sinal)[j] colnames(parestrade[[j]]) <- cbind(str_sub(names(sinal)[j],end=6), str_sub(names(sinal)[j],start=-6)) } invest <- data.frame(matrix(data = rep(1,ncol(Zm_fornation)*nrow(Zm_fornation)),ncol = ncol(Zm_fornation),nrow = nrow(Zm_fornation))) retorno <- data.frame(matrix(data = rep(0,ncol(Zm_fornation)*nrow(Zm_fornation)),ncol = ncol(Zm_fornation),nrow = nrow(Zm_fornation))) ttf <- data.frame(matrix(data = rep(0,ncol(Zm_fornation)*nrow(Zm_fornation)),ncol = ncol(Zm_fornation),nrow = nrow(Zm_fornation))) results <- NULL par_est <- data.frame(NULL) for(j in 1:length(parestrade)){ par_est <- parestrade[[j]] results <- returcalc(as.matrix(sinal[,j]), as.matrix(par_est),betas = betas_formation$betas[j],invest = invest[,j]) invest[,j] <- results[[1]] retorno[,j] <- results[[2]] ttf[,j] <- results[[2]] } colnames(invest) <- names(parestrade) colnames(retorno) <- names(parestrade) colnames(ttf) <- names(parestrade) ################ Cáculo dos Retornos Totais, Desvios Padrões e Sharpe. cat("\r",paste0("Calculating return and sharpe. Portfolio ",p)) portret <- as.data.frame(matrix(data = rep(0,ncol(Zm_fornation)*3),ncol = ncol(Zm_fornation),nrow = 3)) for(f in 1:length(invest)){ portret[1,f] <- ((invest[nrow(invest),f]/invest[1,f])-1)*100 portret[2,f] <- sd(invest[,f]) portret[3,f] <- portret[1,f]/portret[2,f] colnames(portret)[f] <- names(parestrade)[f] } portret <- t(portret) ## Retornos Totais colnames(portret) <- c("Retorno Total","Desvio Padrão","Sharpe") ret_port[[p]] <- portret ## Retornos Totais names(ret_port)[p] <- paste0("Return Formation Period ",p) ##################################################### ############### Periodo de Trading ################## ##################################################### if(estimation_method == "fixed"){ source("trading_period_fixed_window.R") } else {source("trading_period_rolling_window.R")} } #### Salvando Dados Importantes source('res_data_est.R') saveRDS(pairs_est,file = paste0(getwd(),"/resultados/pairs_fmw_", names(formation_windown)[pp],"_tr(",tr[1],",",tr[2],")")) saveRDS(pairs_est_tested,file = paste0(getwd(),"/resultados/pairs_tested_fmw_", names(formation_windown)[pp],"_tr(",tr[1],",",tr[2],")")) } }
ee2499ee634b668899d58ee3276c10a51944c3f3
a520b1ed88904ce8090687cec3284111a871ac7a
/StatisticalRethinking/Exercise_Ch5.R
878a86601afcbfb5d29f5508f45824260b681a63
[]
no_license
EijiGorilla/R-Statistics
b59cb499cb24a383043f58f415a5b4d03bee863a
e900a786dcf80b21eeab29fefb9bed409d4907d0
refs/heads/master
2020-04-18T11:52:19.386254
2019-01-25T08:50:24
2019-01-25T08:50:24
167,515,532
0
0
null
null
null
null
UTF-8
R
false
false
22,059
r
Exercise_Ch5.R
library(rethinking) data("WaffleDivorce") d=WaffleDivorce head(d) op=par(mfrow=c(2,2)) d$MedianAgeMarriage.s=(d$MedianAgeMarriage-mean(d$MedianAgeMarriage))/sd(d$MedianAgeMarriage) plot(Divorce~MedianAgeMarriage.s,data=d,col=col.alpha(rangi2,0.5)) #fit model m5.1=map( alist( Divorce~dnorm(mu,sigma), mu<-a+b*MedianAgeMarriage.s, a~dnorm(10,10), b~dnorm(0,1), sigma~dunif(0,10) ),data=d) precis(m5.1,corr=TRUE) # MAP and 89% PI MAM.seq=seq(from=-3,to=3.5,length.out = 30) post=extract.samples(m5.1) mu=link(m5.1,data=data.frame(MedianAgeMarriage.s=MAM.seq)) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) # plot plot(Divorce~MedianAgeMarriage.s,data=d,col=rangi2) abline(m5.1) lines(MAM.seq,mu.mean) shade(mu.PI,MAM.seq) precis(m5.1) d$Marriage.s=(d$Marriage-mean(d$Marriage))/sd(d$Marriage) m5.2=map( alist( Divorce~dnorm(mu,sigma), mu<-a+b*Marriage.s, a~dnorm(10,10), b~dnorm(0,1), sigma~dunif(0,10) ),data=d) plot(Divorce~Marriage.s,data=d,col=rangi2) MA.seq=seq(from=-3,to=3,length.out=40) post=extract.samples(m5.2) mu=sapply(MA.seq,function(Marriage) post$a+post$b*Marriage) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) abline(m5.2) shade(mu.PI,MA.seq) precis(m5.2) head(d) m5.3=map( alist( Divorce~dnorm(mu,sigma), mu<-a+bR*Marriage.s+bA*MedianAgeMarriage.s, a~dnorm(10,10), bR~dnorm(0,1), bA~dnorm(0,1), sigma~dunif(0,10) ),data=d) precis(m5.3,corr=TRUE) library(rethinking) plot(precis(m5.3)) op=par(mfrow=c(2,2)) # Predictor residuals m5.4=map( alist( Marriage.s~dnorm(mu,sigma), mu<-a+b*MedianAgeMarriage.s, a~dnorm(0,10), b~dnorm(0,1), sigma~dunif(0,10) ),data=d) m5.4 mu=coef(m5.4)["a"]+coef(m5.4)["b"]*d$MedianAgeMarriage.s m.resid=d$Marriage.s-mu plot(Marriage.s~MedianAgeMarriage.s,d,col=rangi2) abline(m5.4) for(i in 1:length(m.resid)){ x=d$MedianAgeMarriage.s[i] y=d$Marriage.s[i] lines(c(x,x),c(mu[i],y),lwd=5,col=col.alpha("black",0.7)) } library(rethinking) plot(Marriage.s~MedianAgeMarriage.s,d,col=rangi2) abline(m5.4) for(i in 1:length(m.resid)){ x=d$MedianAgeMarriage.s[i] y=d$Marriage.s[i] lines(c(x,x),c(mu[i],y),lwd=0.5,col=col.alpha("black",0.7)) } op=par(mfrow=c(2,2)) plot(Divorce~m.resid,d,col=rangi2) abline(v=0) m5.41=map( alist( MedianAgeMarriage.s~dnorm(mu,sigma), mu<-a+b*Marriage.s, a~dnorm(0,10), b~dnorm(0,1), sigma~dunif(0,10) ),data=d) m5.41 mu=coef(m5.41)["a"]+coef(m5.41)["b"]*d$Marriage.s m.resid=d$MedianAgeMarriage.s-mu plot(MedianAgeMarriage.s~Marriage.s,d,col=rangi2) abline(m5.41) for(i in 1:length(m.resid)){ x=d$Marriage.s[i] y=d$MedianAgeMarriage.s[i] lines(c(x,x),c(mu[i],y),col=col.alpha("black",0.4)) } plot(d$Divorce~m.resid,col=rangi2) abline(v=0,l) ?abline # Counterfactual plots m5.3=map( alist( Divorce~dnorm(mu,sigma), mu<-a+bR*Marriage.s+bA*MedianAgeMarriage.s, a~dnorm(10,10), bR~dnorm(0,1), bA~dnorm(0,1), sigma~dunif(0,10) ),data=d) A.ave=mean(d$MedianAgeMarriage.s) R.seq=seq(from=-3,to=3,length.out = 30) post=extract.samples(m5.3) head(post) pred.data=data.frame(Marriage.s=R.seq,MedianAgeMarriage.s=A.ave) # Compute counterfactual mean divorce mu=link(m5.3,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) # compute counterfactual divorce outcome R.sim=sim(m5.3,data=pred.data,n=1e4) R.sim.mean=apply(R.sim,2,mean) R.sim.PI=apply(R.sim,2,PI,prob=0.89) plot(Divorce~Marriage.s,data=d,type="n") mtext("MedianAgeMarriage.s=0") lines(R.seq,mu.mean) shade(mu.PI,R.seq) shade(R.sim.PI,R.seq) plot(Divorce~MedianAgeMarriage.s,d) R.ave=mean(d$Marriage.s) A.seq=seq(from=-3,to=3,length.out=50) pred.data=data.frame(MedianAgeMarriage.s=A.seq,Marriage.s=R.ave) # compute counterfactual mean median age at marriage mu=link(m5.3,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) # compute counterfactual actual median age at marriage A.sim=sim(m5.3,data=pred.data,n=1e4) A.sim.PI=apply(A.sim,2,PI,prob=0.89) plot(Divorce~MedianAgeMarriage.s,data=d,type="n") mtext("Marriage.s=0") lines(A.seq,mu.mean) shade(mu.PI,A.seq) shade(A.sim.PI,A.seq) # Posterior prediction plots # call link to compute predicted mean for each observation of Divorce mu=link(m5.3) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) # plot obs. vs pred. for Divorce plot(mu.mean~d$Divorce,col=rangi2,ylim=range(mu.PI),xlab="Observed Divorce",ylab="Predicted Divorce") abline(a=0,b=1,col=col.alpha("black",0.4)) for(i in 1:nrow(d)){ lines(rep(d$Divorce[i],2),c(mu.PI[1,i],mu.PI[2,i]),col=col.alpha("black",0.7)) } identify(x=d$Divorce,y=mu.mean,labels=d$Loc,cex=0.8) # compute residulas div.resid=d$Divorce-mu.mean # simulate for computing 89% predicted values for acutual divorce div.sim=sim(m5.3,n=1e4) sim.PI=apply(div.sim,2,PI,prob=0.89) # plot predicted residuals by states div.resid o=order(div.resid) dotchart(div.resid[o],labels=d$Loc[o],cex=0.6) abline(v=0,col=col.alpha("black",0.4)) for(i in 1:nrow(d)){ j=o[i] lines(d$Divorce[j]-c(mu.PI[1,j],mu.PI[2,j]),rep(i,2),col=col.alpha("black",0.4)) points(d$Divorce[j]-c(sim.PI[1,j],sim.PI[2,j]),rep(i,2),pch=3,col=col.alpha("black",0.4),cex=0.6) } # data("milk") d=milk dcc=d[complete.cases(d),] head(dcc) m5.5=map( alist( kcal.per.g~dnorm(mu,sigma), mu<-a+b*neocortex.perc, a~dnorm(45,100), b~dnorm(0,1), sigma~dunif(0,1) ),data=dcc) precis(m5.5,digits=3) plot(kcal.per.g~neocortex.perc,dcc,col=rangi2) np.seq=0:100 pred.data=data.frame(neocortex.perc=np.seq) mu=link(m5.5,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) lines(np.seq,mu.mean) shade(mu.PI,np.seq) str(dcc) plot(kcal.per.g~log(mass),dcc,col=rangi2) dcc$log.mass=log(dcc$mass) m5.6=map( alist( kcal.per.g~dnorm(mu,sigma), mu<-a+b*log.mass, a~dnorm(45,100), b~dnorm(0,1), sigma~dunif(0,1) ),data=dcc) precis(m5.6) mas.seq=seq(from=-3,to=5,length.out = 50) pred.data=data.frame(log.mass=mas.seq) mu=link(m5.6,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) lines(mas.seq,mu.mean) shade(mu.PI,mas.seq) # m5.7=map( alist( kcal.per.g~dnorm(mu,sigma), mu<-a+bn*neocortex.perc+bm*log.mass, a~dnorm(45,100), bn~dnorm(0,1), bm~dnorm(0,1), sigma~dunif(0,1) ),data=dcc) plot(precis(m5.7,corr=TRUE)) # counterfactual plots: holding body mass constant at its mean mass.ave=mean(dcc$log.mass) neo.seq=seq(from=50,to=80,length.out=50) pred.data=data.frame(neocortex.perc=neo.seq,log.mass=mass.ave) mu=link(m5.7,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) plot(kcal.per.g~neocortex.perc,data=dcc,col=rangi2) lines(neo.seq,mu.mean) shade(mu.PI,neo.seq) # counterfactual plots: holding neocortex (brain mass) constant at its mean neo.ave=mean(dcc$neocortex.perc) plot(kcal.per.g~log.mass,dcc) log.mass.seq=seq(from=-3,to=5,length.out=50) pred.data=data.frame(neocortex.perc=neo.ave,log.mass=log.mass.seq) mu=link(m5.7,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) lines(log.mass.seq,mu.mean) shade(mu.PI,log.mass.seq) plot(neocortex.perc~log.mass,dcc,col=rangi2) # N=100 height=rnorm(N,10,2) leg_prop=runif(N,0.4,0.5) leg_left=leg_prop*height+rnorm(N,0,0.02) leg_right=leg_prop*height+rnorm(N,0,0.02) d=data.frame(height,leg_left,leg_right) m5.8=map( alist( height~dnorm(mu,sigma), mu<-a+bl*leg_left+br*leg_right, a~dnorm(0,100), bl~dnorm(2,10), br~dnorm(2,10), sigma~dunif(0,10) ),data=d) precis(m5.8) precis_plot(precis(m5.8)) post=extract.samples(m5.8) head(post) plot(bl~br,post,col=col.alpha(rangi2,0.1),pch=16) sum_blbr=post$bl+post$br dens(sum_blbr,col=rangi2,lwd=2) m5.9=map( alist( height~dnorm(mu,sigma), mu<-a+bl*leg_left, a~dnorm(10,100), bl~dnorm(2,10), sigma~dunif(0,10) ),data=d) precis(m5.9) precis_plot(precis(m5.9)) # data("milk") d=milk m5.10=map( alist( kcal.per.g~dnorm(mu,sigma), mu<-a+bf*perc.fat, a~dnorm(0.6,10), bf~dnorm(0,1), sigma~dunif(0,10) ),data=d) m5.11=map( alist( kcal.per.g~dnorm(mu,sigma), mu<-a+bl*perc.lactose, a~dnorm(0.6,10), bl~dnorm(0,1), sigma~dunif(0,10) ),data=d) precis(m5.10,digits=3) precis_plot(precis(m5.10)) precis(m5.11,digits=3) precis_plot(precis(m5.11)) head(d) plot(kcal.per.g~perc.fat,d,col=rangi2) p.fat.seq=seq(from=-10,to=60,length.out = 100) pred.data=data.frame(perc.fat=p.fat.seq) mu=link(m5.10,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) lines(p.fat.seq,mu.mean) shade(mu.PI,p.fat.seq) m5.12=map( alist( kcal.per.g~dnorm(mu,sigma), mu<-a+bl*perc.fat+br*perc.lactose, a~dnorm(0.6,10), bl~dnorm(0,1), br~dnorm(0,1), sigma~dunif(0,10) ),data=d) precis_plot(precis(m5.12)) pairs(~kcal.per.g+perc.fat+perc.lactose,data=d,col=rangi2) cor(d$perc.fat,d$perc.lactose) # number of plants N=100 #simulate initial heights h0=rnorm(N,10,2) treatment=rep(0:1,each=N/2) fungus=rbinom(N,size=1,prob=0.5-treatment*0.4) h1=h0+rnorm(N,5-3*fungus) d=data.frame(h0=h0,h1=h1,treatment=treatment,fungus=fungus) m5.14=map( alist( h1~dnorm(mu,sigma), mu<-a+bh*h0+bt*treatment, a~dnorm(10,100), c(bh,bt)~dnorm(0,10), sigma~dunif(0,10) ),data=d) precis(m5.14) # data("Howell1") d=Howell1 str(d) m5.15=map( alist( height~dnorm(mu,sigma), mu<-a+bm*male, a~dnorm(178,100), bm~dnorm(0,10), sigma~dunif(0,50) ),data=d) precis(m5.15) post=extract.samples(m5.15) head(post) mu.male=post$a+post$bm PI(mu.male) m5.15b=map( alist( height~dnorm(mu,sigma), mu<-af*(1-male)+am*male, af~dnorm(178,100), am~dnorm(178,100), sigma~dunif(0,150) ),data=d) precis(m5.15b) #5.4.2 library(rethinking) data("milk") d=milk head(d) unique(d$clade) # 4 categories for clade: Ape, NWM, OWM, and S # Create dummy variables for NWM, OWM and S. Ape=reference variable d$clade.NWM=ifelse(d$clade=="New World Monkey",1,0) d$clade.OWM=ifelse(d$clade=="Old World Monkey",1,0) d$clade.S=ifelse(d$clade=="Strepsirrhine",1,0) # model m5.16=map( alist( kcal.per.g~dnorm(mu,sigma), mu<-a+b.NWM*clade.NWM+b.OWM*clade.OWM+b.S*clade.S, a~dnorm(0.6,10), c(b.NWM,b.OWM,b.S)~dnorm(0,1), sigma~dunif(0,10) ),data=d) precis(m5.16) post=extract.samples(m5.16) head(post) mu.ape=post$a mu.NWM=post$a+post$b.NWM mu.OWM=post$a+post$b.OWM mu.S=post$a+post$b.S precis(data.frame(mu.ape,mu.NWM,mu.OWM,mu.S)) diff.NWM.OWM=post$b.NWM-post$b.OWM quantile(diff.NWM.OWM,prob=c(0.025,0.5,0.975)) d$clade.id=coerce_index(d$clade) unique(d$clade.id) head(d) d1=d[complete.cases(d),] m5.16_alt=map( alist( kcal.per.g~dnorm(mu,sigma), mu<-a[clade_id], a[clade_id]~dnorm(0.6,1), sigma~dunif(0,10) ),data=d) # 5M1 (Spurious association: one predictor influences or highly correlates with outcome and another predictor) N=100 x_real=rnorm(N) x_spur=rnorm(N,x_real) y=rnorm(N,x_real) d=data.frame(y,x_real,x_spur) pairs(d) m.spur=map( alist( y~dnorm(mu,sigma), mu<-a+b1*x_real+b2*x_spur, a~dnorm(0,10), c(b1,b2)~dnorm(0,1), sigma~dunif(0,10) ),data=d ) precis(m.spur) precis_plot(precis(m.spur)) op=par(mfrow=c(2,2)) plot(y~x_real,d,col=rangi2) # Counterfactual plots # holding x_spur constant at its mean x_spur.ave=mean(d$x_spur) x_real.seq=seq(from=-3,to=4,length.out = 50) pred.data=data.frame(x_real=x_real.seq,x_spur=x_spur.ave) mu=link(m.spur,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) sim.y=sim(m.spur,data=pred.data) sim.y.PI=apply(sim.y,2,PI,prob=0.89) lines(x_real.seq,mu.mean) shade(mu.PI,x_real.seq) shade(sim.y.PI,x_real.seq) abline(a=0,b=1) mtext("Holding x_spur constat at its mean") # holding x_real constant at its mean x_real.ave=mean(d$x_real) x_spur.seq=seq(from=-4,to=4,length.out=50) pred.data=data.frame(x_real=x_real.ave,x_spur=x_spur.seq) mu=link(m.spur,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) sim.y=sim(m.spur,data=pred.data) sim.y.PI=apply(sim.y,2,PI,prob=0.89) plot(y~x_spur,data=d,col=rangi2) lines(x_spur.seq,mu.mean) shade(mu.PI,x_spur.seq) shade(sim.y.PI,x_spur.seq) # 5M2 Masked relatioinship: Predictors are highly correlated with one another, and one is positivily correlated while # another negatively correlated with an outcome variable # InCorrect method: (i.e. in the masked relationship, fitting a bivariate mode (model with x1 or x2 with y separately N=100 rho=0.7 x_pos=rnorm(N) x_neg=rnorm(N,rho*x_pos,sqrt(1-rho^2)) y=rnorm(N,x_pos-x_neg) d=data.frame(y,x_pos,x_neg) m.mask=map( alist( y~dnorm(mu,sigma), mu<-a+b*x_pos, a~dnorm(0,10), b~dnorm(0,1), sigma~dunif(0,10) ),data=d) precis_plot(precis(m.mask)) post=extract.samples(m.mask) x_pos.seq=seq(from=-4,to=4,length.out=50) mu=sapply(x_pos.seq,function(x_pos) post$a+post$b) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) sim.y=sapply(x_pos.seq,function(x_pos) rnorm( n=nrow(post), mean=post$a+post$b*x_pos, sd=post$sigma )) sim.y.PI=apply(sim.y,2,PI,prob=0.89) plot(y~x_pos,d,col=rangi2) lines(x_pos.seq,mu.mean) shade(mu.PI,x_pos.seq) shade(sim.y.PI,x_pos.seq) # Correct method: (i.e. usnig both variables captures a good relationship between the predictors and y) N=100 rho=0.7 x_pos=rnorm(N) x_neg=rnorm(N,rho*x_pos,sqrt(1-rho^2)) y=rnorm(N,x_pos-x_neg) d=data.frame(y,x_pos,x_neg) pairs(d) m.mask=map( alist( y~dnorm(mu,sigma), mu<-a+b1*x_pos+b2*x_neg, a~dnorm(0,10), c(b1,b2)~dnorm(0,1), sigma~dunif(0,10) ),data=d ) precis_plot(precis(m.mask)) op=par(mfrow=c(2,2)) # plot using both variables] mu=link(m.mask) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) sim.y=sim(m.mask) sim.y.PI=apply(mu,2,PI,prob=0.89) plot(mu.mean~d$y,ylab="Predicted",xlab="Observed",col=rangi2) abline(a=0,b=1,lty=2) # plot x_post (x) and y (y) with holding x_neg constat at its mean x_neg.ave=mean(d$x_neg) x_pos.seq=seq(from=-4,to=4,length.out = 50) pred.data=data.frame(x_pos=x_pos.seq,x_neg=x_neg.ave) mu=link(m.mask,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) sim.y=sim(m.mask,data=pred.data) sim.y.PI=apply(sim.y,2,PI,prob=0.89) plot(y~x_pos,data=d,col=rangi2) lines(x_pos.seq,mu.mean) shade(mu.PI,x_pos.seq) shade(sim.y.PI,x_pos.seq) # plot x_neg with holding x_pos constant at its mean x_pos.ave=mean(d$x_pos) x_neg.seq=seq(from=-4,to=4,length.out=50) pred.data=data.frame(x_pos=x_pos.ave,x_neg=x_neg.seq) mu=link(m.mask,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) sim.y=sim(m.mask,data=pred.data) sim.y.PI=apply(sim.y,2,PI,prob=0.89) plot(y~x_neg,data=d,col=rangi2) lines(x_neg.seq,mu.mean) shade(mu.PI,x_neg.seq) shade(sim.y.PI,x_neg.seq) #5M4 data("WaffleDivorce") d=WaffleDivorce head(d) unique(d$Location) str(d) d$pct_LDS <- c(0.75, 4.53, 6.18, 1, 2.01, 2.82, 0.43, 0.55, 0.38, 0.75, 0.82, 5.18, 26.35, 0.44, 0.66, 0.87, 1.25, 0.77, 0.64, 0.81, 0.72, 0.39, 0.44, 0.58, 0.72, 1.14, 4.78, 1.29, 0.61, 0.37, 3.34, 0.41, 0.82, 1.48, 0.52, 1.2, 3.85, 0.4, 0.37, 0.83, 1.27, 0.75, 1.21, 67.97, 0.74, 1.13, 3.99, 0.92, 0.44, 11.5 ) # standardize predctors d$Marriage.s=(d$Marriage-mean(d$Marriage))/sd(d$Marriage) d$MedianAge.s=(d$MedianAgeMarriage-mean(d$MedianAgeMarriage))/sd(d$MedianAgeMarriage) d$pct_LDS.s=(d$pct_LDS-mean(d$pct_LDS))/sd(d$pct_LDS) # fit model m5M4=map( alist( Divorce~dnorm(mu,sigma), mu<-a+b1*Marriage.s+b2*MedianAge.s+b3*pct_LDS.s, a~dnorm(0,10), c(b1,b2,b3)~dnorm(0,1), sigma~dunif(0,10) ),data=d) precis_plot(precis(m5M4)) str(d) names(d) d1=d[,c(7,15,16,17)] head(d1) pairs(d1) #5H1 data("foxes") d=foxes head(d) str(d) pairs(d) # weight ~ area d$area.c=d$area-mean(d$area) m5h1=map( alist( weight~dnorm(mu,sigma), mu<-a+b*area, a~dnorm(4,2), b~dnorm(0,1), sigma~dunif(0,10) ),data=d) post=extract.samples(m5h1) area.seq=seq(from=1,to=6,length.out = 50) mu=sapply(area.seq,function(area) post$a+post$b) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) plot(weight~area,d,col=rangi2) lines(area.seq,mu.mean) shade(mu.PI,area.seq) precis_plot(precis(m5h1));mtext("Weight~area") # weight ~ groupsize str(d) plot(weight~groupsize,d,col=rangi2) m5h1=map( alist( weight~dnorm(mu,sigma), mu<-a+b*groupsize, a~dnorm(4,2), b~dnorm(0,1), sigma~dunif(0,10) ),data=d) precis(m5h1) post=extract.samples(m5h1) gsize.seq=seq(from=2,to=8,length.out = 50) mu=sapply(gsize.seq,function(groupsize) post$a+post$b) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) lines(gsize.seq,mu.mean) shade(mu.PI,gsize.seq) precis_plot(precis(m5h1));mtext("Weight~groupsize") #5H2 pairs(d[,c(3:5)]) m5h2=map( alist( weight~dnorm(mu,sigma), mu<-a+b1*groupsize+b2*area, a~dnorm(4,2), c(b1,b2)~dnorm(0,1), sigma~dunif(0,10) ),data=d) precis_plot(precis(m5h2)) # holding groupsize constant at its mean gsize.ave=mean(d$groupsize) area.seq=seq(from=1,to=6,length.out=50) pred.data=data.frame(groupsize=gsize.ave,area=area.seq) mu=link(m5h2,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) sim.y=sim(m5h2,data=pred.data) sim.y.PI=apply(sim.y,2,PI,prob=0.89) # plot plot(weight~area,d,col=rangi2) lines(area.seq,mu.mean) shade(mu.PI,area.seq) shade(sim.y.PI,area.seq) mtext("Holding groupsize constant") #holiding area constant at its mean area.ave=mean(d$area) gsize.seq=seq(from=1,to=9,length.out = 50) pred.data=data.frame(groupsize=gsize.seq,area=area.ave) mu=link(m5h2,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) sim.y=sim(m5h2,data=pred.data) sim.y.PI=apply(sim.y,2,PI,prob=0.89) # plot plot(weight~groupsize,d,col=rangi2) lines(gsize.seq,mu.mean) shade(mu.PI,gsize.seq) shade(sim.y.PI,gsize.seq) mtext("holding area constat") #5H3 pairs(d) m5h3.1=map( alist( weight~dnorm(mu,sigma), mu<-a+b1*avgfood+b2*groupsize, a~dnorm(4,2), c(b1,b2)~dnorm(0,1), sigma~dunif(0,10) ),data=d) precis(m5h3.1,corr=TRUE) precis_plot(precis(m5h3.1)) # holding avgfood at constant avgfood.ave=mean(d$avgfood) gsize.seq=seq(from=1,to=9,length.out = 50) pred.data=data.frame(avgfood=avgfood.ave,groupsize=gsize.seq) mu=link(m5h3.1,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) sim.y=sim(m5h3.1,data=pred.data) sim.y.PI=apply(sim.y,2,PI,prob=0.89) plot(weight~groupsize,d,col=rangi2);mtext("Holding avgfood constant") lines(gsize.seq,mu.mean) shade(mu.PI,gsize.seq) shade(sim.y.PI,gsize.seq) # Holding groupsize constant gsize.ave=mean(d$groupsize) avgfood.seq=seq(from=0.2,to=1.4,length.out = 50) pred.data=data.frame(avgfood=avgfood.seq,groupsize=gsize.ave) mu=link(m5h3.1,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) sim.y=sim(m5h3.1,data=pred.data) sim.y.PI=apply(sim.y,2,PI,prob=0.89) plot(weight~avgfood,d,col=rangi2);mtext("holding groupsize constant") lines(avgfood.seq,mu.mean) shade(mu.PI,avgfood.seq) shade(sim.y.PI,avgfood.seq) # posterior prediction mu=link(m5h3.1) mu.mean=apply(mu,2,mean) #predicted plot(mu.mean~d$weight,col=rangi2,xlab="Observed",ylab="Predicted");mtext("Obs vs Pred Weight") abline(a=0,b=1) ## m5h3.2=map( alist( weight~dnorm(mu,sigma), mu<-a+b1*avgfood+b2*groupsize+b3*area, a~dnorm(4,2), c(b1,b2,b3)~dnorm(0,1), sigma~dunif(0,10) ),data=d) precis(m5h3.2,corr=TRUE) precis_plot(precis(m5h3.2,corr=TRUE)) # Holding avgfood and groupsize constant avgfood.ave=mean(d$avgfood) gsize.ave=mean(d$groupsize) area.seq=seq(from=1,to=6,length.out=50) pred.data=data.frame(avgfood=avgfood.ave,groupsize=gsize.ave,area=area.seq) mu=link(m5h3.2,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) sim.y=sim(m5h3.2,data=pred.data) sim.y.PI=apply(sim.y,2,PI,prob=0.89) plot(weight~area,d,col=rangi2);mtext("Holiding avgfood & groupsize constant",cex=0.8) lines(area.seq,mu.mean) shade(mu.PI,area.seq) shade(sim.y.PI,area.seq) # Holding avgfood and area constant avgfood.ave=mean(d$avgfood) area.ave=mean(d$area) gsize.seq=seq(from=1,to=9,length.out = 50) pred.data=data.frame(avgfood=avgfood.ave,groupsize=gsize.seq,area=area.ave) mu=link(m5h3.2,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) sim.y=sim(m5h3.2,data=pred.data) sim.y.PI=apply(sim.y,2,PI,prob=0.89) plot(weight~groupsize,d,col=rangi2);mtext("holding avgfood & area constant",cex=0.8) lines(gsize.seq,mu.mean) shade(mu.PI,gsize.seq) shade(sim.y.PI,gsize.seq) # Holding groupsize and area constant gsize.ave=mean(d$groupsize) area.ave=mean(d$area) avgfood.seq=seq(from=0.2,to=1.4,length.out = 50) pred.data=data.frame(avgfood=avgfood.seq,groupsize=gsize.ave,area=area.ave) mu=link(m5h3.2,data=pred.data) mu.mean=apply(mu,2,mean) mu.PI=apply(mu,2,PI,prob=0.89) sim.y=sim(m5h3.2,data=pred.data) sim.y.PI=apply(sim.y,2,PI,prob=0.89) plot(weight~avgfood,d,col=rangi2);mtext("holding groupsize & area constant",cex=0.8) lines(avgfood.seq,mu.mean) shade(mu.PI,avgfood.seq) shade(sim.y.PI,avgfood.seq) # obs vs predicted plot mu=link(m5h3.2) mu.mean=apply(mu,2,mean) plot(mu.mean~d$weight,col=rangi2,xlab="Observed",ylab="Predicted");mtext("Obs vs Pred Weight") abline(a=0,b=1)
9e7df1607075a39ffd944df777f1f3f635649e3c
9802eec0484e38be543320d3b40cf6993ac27e47
/tests/testthat.R
a20dd2811727f84397cf0dc71ce80d4a5a094d4b
[]
no_license
ijlyttle/datadict
31e861930f5ade672ceca6501e49952a2981ba85
34abba8bd50e3db0f42e01a059dbe7dc4047b9ef
refs/heads/master
2021-01-22T16:25:19.994574
2014-08-11T13:08:22
2014-08-11T13:08:22
22,684,355
1
0
null
null
null
null
UTF-8
R
false
false
59
r
testthat.R
library(testthat) library(datadict) test_check("datadict")
eee75dcd86db61a01ca8108fb61d59e06a8d0b0b
2c058880d02be07bd9ee7a9e273752ff930075f8
/cachematrix.R
616cd8f96f26841a9cf9b959145c7f7c0adc7e94
[]
no_license
preisacher/ExData_Plotting1
9ce612087070a83193ca8276de2e417719c1cc39
a50d49f52900b5460c5a953724b709ad4eb07a4c
refs/heads/master
2020-12-25T03:00:49.840150
2016-01-26T13:45:57
2016-01-26T13:45:57
42,243,338
0
0
null
2015-09-10T12:34:22
2015-09-10T12:34:22
null
UTF-8
R
false
false
884
r
cachematrix.R
## These functions are very closely immitating the examples we ## were given using vectors. All I really did was to change from ## using Vectors to a Matrix. ## ## The makeCacheMatrix returns a list of functions that can be used to return ## its inverse ... solve. makeCacheMatrix <- function(x = matrix()) { m<-NULL set<-function(y){ x <<- y m <<- NULL } get <- function() x setmatrix <- function(solve) m <<- solve getmatrix <- function() m list(set = set, get = get, setmatrix = setmatrix, getmatrix = getmatrix) } ## This function computes the invers of the matrix that is returned by ## the makeCacheMatrix function cacheSolve <- function(x, ...) { m<-x$getmatrix() if(!is.null(m)){ message("getting cached data") return(m) } matrix<-x$get() m<-solve(matrix, ...) x$setmatrix(m) ## Return the inverse of the matrix m }
0938fb7d7514875d826e229be67b19534b5a87ea
e4a073e4785f3415e46907c302da09680cd064ff
/cutting_floor/email.R
fe81fcb40b80ca15789e19d518cff4a8515371bb
[]
no_license
sandeepgangarapu/COVID-DAILY-STATSLETTER
e15b280a1da3bea32ab18968728106bf1bbaf6b4
5cfd065e31e9309f1ed33b7f319128ab3458a2eb
refs/heads/master
2021-05-17T06:48:37.217485
2020-04-03T11:42:16
2020-04-03T11:42:16
250,682,036
0
2
null
2020-04-05T08:19:04
2020-03-28T00:39:54
Python
UTF-8
R
false
false
607
r
email.R
library(gmailr) library(googlesheets4) email_list = read_sheet("https://docs.google.com/spreadsheets/d/1HI0llUuuVHRTSnMacgnIVzQmYgZGUckQ2cRMhPTA7QM/") setwd("G:\\My Drive\\Projects\\COVID") #gm_auth_configure(path = "G:\\My Drive\\Projects\\COVID\\credentials.json") html_msg <- gm_mime() %>% gm_bcc(email_list$email_id) %>% gm_from("sandeepgangarapu.iitkgp@gmail.com") %>% gm_subject("Latest COVID stats - Automatically Generated Email") %>% gm_text_body("If you see a bug, Please reply to this email with the issue.") %>% gm_attach_file("newsletter_generator.pdf") gm_send_message(html_msg)
afadcd52356a814b133a03023f1d2b0d6878fdda
d0a7960cae8a536457425921ae8913516a24a824
/scripts/res_themes.R
04dd86d87c0465d4c34f223b075bc7dc0424b3e5
[ "MIT" ]
permissive
dampierch/herv
6ab3cb82c2b103d0335c51c4cd84eb80656b6e29
9f1ce0e676977b6c8d25fdf446c0807826b80bea
refs/heads/master
2023-03-09T02:01:52.970776
2021-02-23T19:40:00
2021-02-23T19:40:00
284,400,012
1
0
null
null
null
null
UTF-8
R
false
false
5,153
r
res_themes.R
## to be sourced in results.R ggp_theme_default <- theme( panel.background=element_rect(fill="white"), panel.grid.major=element_line(color="white"), panel.grid.minor=element_line(color="white"), plot.margin=margin(t=1, r=1, b=1, l=1, unit="lines"), plot.title=element_text(size=12, face="bold", hjust=0.5), plot.subtitle=element_text(size=10, face="plain", hjust=0.5), axis.title.x=element_text(size=10, face="plain"), axis.title.y=element_text(size=10, face="plain"), axis.text.x=element_text(size=9, face="plain"), axis.text.y=element_text(size=9, face="plain"), # axis.ticks.x=element_blank(), # axis.ticks.y=element_blank(), axis.line.x.bottom=element_line(), axis.line.y.left=element_line(), legend.key=element_rect(fill="white"), legend.position="top", legend.title=element_blank(), strip.background=element_rect(fill="black"), strip.text=element_text(colour="white") ) ggp_theme_col <- ggp_theme_default + theme( plot.margin=margin(t=0.5, r=0.5, b=0.5, l=0.5, unit="lines"), plot.title=element_text(size=12, face="plain", hjust=0.5), axis.text.x=element_text(size=10, face="plain", angle=45, hjust=1), axis.text.y=element_text(size=10, face="plain"), axis.line.x.bottom=element_blank(), axis.line.y.left=element_blank(), legend.position="none" ) ggp_theme_vol <- ggp_theme_default + theme( plot.margin=margin(t=0.25, r=0.25, b=0.25, l=0.25, unit="lines"), legend.key=element_blank(), legend.key.size=unit(0.001, "cm"), legend.key.width=unit(0.001, "cm"), legend.position=c(0.18, 0.9), legend.spacing.x=unit(0.001, "cm"), legend.spacing.y=unit(0.001, "cm"), legend.text=element_text(size=8, margin=margin(t=0.001)) ) ggp_theme_box <- ggp_theme_default + theme( plot.margin=margin(t=0.5, r=1, b=0.5, l=1, unit="lines"), axis.text.x=element_text(size=9, face="plain", angle=45, hjust=1), axis.text.y=element_text(size=10, face="plain"), legend.position="none", legend.key=element_blank(), legend.key.size=unit(0.75, "cm"), legend.key.width=unit(0.75, "cm"), legend.spacing.x=unit(0.5, "cm"), legend.spacing.y=unit(0.5, "cm"), legend.text=element_text(size=9, margin=margin(t=0.1)) ) ggp_theme_heat <- ggp_theme_default + theme( # axis.title.x=element_text(size=10, face="plain"), # axis.title.y=element_text(size=10, face="plain"), axis.text.x=element_text(size=9, face="plain"), axis.text.y=element_text(size=9, face="plain"), axis.title.x=element_blank(), axis.title.y=element_blank(), # axis.text.x=element_blank(), # axis.text.y=element_blank(), axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), axis.line.x.bottom=element_blank(), axis.line.y.left=element_blank(), legend.position="top", legend.title=element_text(size=9, face="plain") ) ggp_theme_charac <- ggp_theme_default + theme( panel.border=element_rect(colour="black", size=1.0, fill=NA), plot.margin=margin(t=0.5, r=0.5, b=0.5, l=0.5, unit="lines"), axis.text.x=element_text(size=8, face="plain"), axis.text.y=element_text(size=8, face="plain"), strip.text=element_text(colour="white"), axis.line.x.bottom=element_blank(), axis.line.y.left=element_blank(), legend.position="none" ) draw_key_default <- function(data, params, size) { ## this function is here in case GeomBar$draw_key must be manipulated and ## then reset once occasion for altered parameters done ## https://stackoverflow.com/questions/11366964/is-there-a-way-to-change-the-spacing-between-legend-items-in-ggplot2 if (is.null(data$size)) { data$size <- 0.5 } lwd <- min(data$size, min(size) / 4) grid::rectGrob( width = grid::unit(1, "npc") - grid::unit(lwd, "mm"), height = grid::unit(1, "npc") - grid::unit(lwd, "mm"), gp = grid::gpar( col = data$colour %||% NA, fill = alpha(data$fill %||% "grey20", data$alpha), lty = data$linetype %||% 1, lwd = lwd * .pt, linejoin = params$linejoin %||% "mitre", lineend = if (identical(params$linejoin, "round")) "round" else "square" ) ) } draw_key_large <- function(data, params, size) { ## this function is here in case GeomBar$draw_key must be reset to allow for ## larger spacing between legend keys; not used in this script ## https://stackoverflow.com/questions/11366964/is-there-a-way-to-change-the-spacing-between-legend-items-in-ggplot2 lwd <- min(data$size, min(size) / 4) grid::rectGrob( width = grid::unit(0.6, "npc"), height = grid::unit(0.6, "npc"), gp = grid::gpar( col = data$colour, fill = alpha(data$fill, data$alpha), lty = data$linetype, lwd = lwd * .pt, linejoin = "mitre" ) ) }
51522325be929343b492c194fccd8d7949c8dda8
3156a7a4033706a8db408fa52d8694d41cc36078
/R/multigraph.R
6f54c7a18c5cc6968c9cfc896ac338d5631b3585
[]
no_license
cran/netCoin
d38db4ec39ea0049c475749c3f3b53a88e2a8a51
b81ebd9501cf53c5ef2770a715a6e072ef265ac6
refs/heads/master
2023-04-02T22:45:46.863944
2023-03-23T21:40:02
2023-03-23T21:40:02
77,029,038
3
2
null
null
null
null
UTF-8
R
false
false
812
r
multigraph.R
multigraphCreate <- function(..., mode = c("default","parallel","frame"), mfrow = c(1,2), frame = 0, speed = 50, loop = FALSE, lineplots = NULL, dir = NULL, show = FALSE){ diraux <- NULL if(!is.null(dir) && !identical(show,TRUE)){ diraux <- dir } mode <- substr(mode[1],1,1) if(mode=="p"){ obj <- rd3_multigraph(..., mfrow=mfrow, dir=diraux) }else if(mode=="f"){ obj <- evolNetwork_rd3(..., frame, speed, loop, lineplots, diraux) }else{ obj <- rd3_multigraph(..., dir=diraux) } if(identical(show,TRUE)){ if(is.null(dir)){ plot(obj) }else{ plot(obj,dir=dir) } } class(obj) <- c("mGraph",class(obj)) return(obj) } multiPages <- rd3_multiPages addImage <- rd3_addImage addDescription <- rd3_addDescription
d0ab966e7a657eaf632a9cede4ece1c6f4f5fd14
dcb76ed15a952660c4546916f8ef9e111f496884
/R/machine-ls.R
0971b6226912587c637300f81ef221be52354b3a
[]
no_license
DavisVaughan/machinegun
b8bea1d56d278a6a28cc4daf37e67ddd9426c6fd
128898cead1c4dfc23785aa0a7ad45bc5817a3dd
refs/heads/master
2020-03-26T10:16:07.139337
2018-08-16T02:32:36
2018-08-16T02:32:36
144,789,195
3
0
null
null
null
null
UTF-8
R
false
false
112
r
machine-ls.R
#' List all docker-machine instances #' #' @export machine_ls <- function(...) { machine_command("ls", ...) }
5950f52a372220f54d585f3d8b50b1c2ab223cd8
7c47b764ee111e5ef3f0b3e3aa4b284dd6cda770
/man/create_spp_occurence_raster_list.Rd
8bae1cdeaa96439786fd73419e086126531f4e4f
[ "MIT" ]
permissive
Vizzuality/vspt
845476a6fe1556015b59392003d4966e4d963051
c5d927ea031519733af4d60516d239a1b0c2b438
refs/heads/master
2021-07-12T13:25:19.277790
2020-10-28T15:34:19
2020-10-28T15:34:19
216,042,807
1
0
null
null
null
null
UTF-8
R
false
true
700
rd
create_spp_occurence_raster_list.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/create_spp_occurence_raster_list.R \name{create_spp_occurence_raster_list} \alias{create_spp_occurence_raster_list} \title{Create spp presence-absence raster(s)} \usage{ create_spp_occurence_raster_list(spp_occurence_sfc_list, r, land_mask) } \arguments{ \item{r}{Raster object to use as base for adding presence-absence} \item{land_mask}{sf::sf polygon land mask} \item{spp_occurence_sf_list}{List of sf::sfc objects with spp occurence} } \value{ A list of raster::raster objects representing presence (1) and absence (0) of a species, and NA values for no data. } \description{ Create spp presence-absence raster(s) }
bb289de82567ae04fc8421575ae05b9d1c8881ef
f5ad30ec784cccd03371242a63b99f265bf98dc5
/R/sprintf-ca-function.R
74c0cd96b744ee564e44c59f8bbe32eb4a69bf9f
[]
no_license
claus-e-andersen/clanTools
cf93ec1b81b38277f90eaab7901a8f82b2ffa847
dbbca750aba52b08c99d43a2a49a05810e2d0630
refs/heads/master
2021-07-06T09:22:18.006123
2020-10-17T04:20:21
2020-10-17T04:20:21
22,555,505
0
0
null
null
null
null
UTF-8
R
false
false
1,804
r
sprintf-ca-function.R
#' @title Print number to fit certain format. An alternative is used if this requires too many characters. #' @description #' This is a variation of sprintf, that checks if the formated result is too long. #' This can happen, for example, with electrometer output, where normal results are #' 0.004 pC and then an overflow gives a charge of 6e47 C, which produces an output #' that ruins txtplot table output. #' @usage sprintf.ca("\%.5f", c(1.23, 2.33, 1e99), max.chars=7) #' @name sprintf.ca #' @author Claus E. Andersen #' @return formated number. #' @param format is the main format (e.g. "\%.5f") which is used if the output does not require too many characters. #' @param number to print (e.g. 1.334 or 1.4e99). #' @param format.alt is the alternative format (e.g. "\%.4e") which is used for output that require too many characters. #' @param max.char is the maximum number of characters (e.g. 10) that the main format needs to fit (otherwise the alternative format will be used). #' @export sprintf.ca sprintf.ca <- function(format,number,format.alt="%.4e",max.chars=10){ # This is a variation of sprintf, that checks if the formated # results is too long. This can happen, for example, with electrometer # output, where normal results are 0.004 pC and then an overflow # gives a charge of 6e47 C, which produces an output that ruins # txtplot table output. # Created: January 12, 2019 # Claus E. Andersen # Sample calls: # x <- 3.4557e-3 # sprintf.ca("%.5f", round(x,0)) # x <- 6.6e42 # sprintf.ca("%.5f", round(x,0)) # x <- 6.6e42 # sprintf.ca("%.5f", round(x,0),max.chars=9999) y.main <- sprintf(format, number) y.alt <- sprintf(format.alt, number) res <- y.main ok <-nchar(y.main)>max.chars if(sum(ok)>0){res[ok]<-y.alt[ok]} res }# sprintf.ca
312a0903d5e2ec8d659a09fe65735b2788bbb61f
28d02ce963df64a6802d003f40a5fb4a1e987b5f
/10.27_PseudotimeClassification/virtualtrend.R
02685fe1cd071c520bd8385a026eae1883ec299d
[]
no_license
Zhouzhiling/R
b31586a13a0e3e2557bb519ef73d0fc9b6e5c87b
6d46af4117b6c8f5b013d0c02b517a84cfb756d8
refs/heads/master
2021-05-16T10:19:46.904628
2017-10-27T15:57:28
2017-10-27T15:57:28
104,708,872
1
0
null
null
null
null
UTF-8
R
false
false
4,945
r
virtualtrend.R
rm(list=ls()) src <- read.table(file.choose(),head=T,sep="\t") src src <- src[,order(src[1,])] #setwd("C:/Users/37908/Desktop/") #jpeg(file="1027.jpg",width=20000,height=10000) #plot(src[1,]) #dev.off() src_trim <-as.matrix(src[,-rep(1:26)]) low_quant <- ceiling(ncol(src_trim) / 3) high_quant <- ceiling(ncol(src_trim) / 3) * 2 low_quant_num <- src_trim[1,low_quant] high_quant_num <- src_trim[1,high_quant] persutime <- src_trim[1,] src_trim <- src_trim[-1,] gene_name <- row.names(src_trim) flag <- matrix(0,nrow=nrow(src_trim),ncol=3) count1 <- 0 count2 <- 0 count3 <- 0 #归类求各个时期个个基因表达量的和 for(j in 1:length(persutime)) { if(persutime[j] < low_quant_num) for(i in 1:nrow(src_trim)) { if(persutime[j] < low_quant_num) { flag[i,1] = flag[i,1]+src_trim[i,j] count1 <- count1 + 1 } } else if(persutime[j] < high_quant_num) { for(i in 1:nrow(src_trim)) { flag[i,2] = flag[i,2]+src_trim[i,j] count2 <- count2 + 1 } } else for(i in 1:nrow(src_trim)) { { flag[i,3] = flag[i,3]+src_trim[i,j] count3 <- count3 + 1 } } } row.names(flag) <- gene_name #把全为0的删掉先 tmp_flag <- matrix(0,nrow=nrow(flag),ncol=3) tmp_gene_frame <- matrix("",nrow=nrow(flag),ncol=1) count_no_zero <- 0 #is.numeric(flag) for(k in 1:nrow(flag)) { if((flag[k,1]!=0)||(flag[k,2]!=0)||(flag[k,3]!=0)) { tmp_flag[count_no_zero,] <- flag[k,] tmp_gene_frame[count_no_zero,1] <- gene_name[k] count_no_zero <- count_no_zero + 1 } } #基因三阶段数量 flag_no_zero <- tmp_flag[rep(1:19157),] #基因名称 tmp_gene_frame <- tmp_gene_frame[rep(1:19157),1] row.names(flag_no_zero) <- tmp_gene_frame #得到和之后求平均值 flag_avg <- matrix(0,nrow=nrow(flag_no_zero),ncol=3) for(i in 1:nrow(flag_no_zero)) { { flag_avg[i,1] = flag_no_zero[i,1] * 1000000 / count1 flag_avg[i,2] = flag_no_zero[i,2] * 1000000 / count2 flag_avg[i,3] = flag_no_zero[i,3] * 1000000 / count3 } } row.names(flag_avg) <- tmp_gene_frame #取样看个例子w setwd("C:/Users/37908/Desktop/") jpeg(file="output.jpg",width=2500,height=500) par(mfrow=c(3,5)) flag_avg_mat <- data.frame(flag_avg) for(i in 1:15) plot(x=c(0,4,19),y=flag_avg_mat[i,], type="l",xlab="time",ylab="amount",main=row.names(flag_avg_mat)[i],cex.main=2) dev.off() #导出成表格,备用查询具体细节 write.csv(flag_avg,file="C:/Users/37908/Desktop/classify.csv",row.names=TRUE) #?write.csv class <- matrix("",nrow=nrow(flag_avg) , ncol=9 ) # d means decrease # k means keep stable # i means increase dd_num <- 0 di_num <- 0 dk_num <- 0 id_num <- 0 ii_num <- 0 ik_num <- 0 kd_num <- 0 ki_num <- 0 kk_num <- 0 #确认是否是数字类型 #is.numeric(flag_avg) error = 0.1 for(i in 1:nrow(flag_avg)) # for(i in 1:8) { # 从第一到第二阶段降 if(flag_avg[i,2] < flag_avg[i,1]*(1-error)) { if(flag_avg[i,2] > flag_avg[i,3]*(1+error) ) { dd_num <- dd_num + 1 #第一类,降降 class[dd_num,1] <- tmp_gene_frame[i] } if(flag_avg[i,3] > flag_avg[i,2]*(1+error) ) { di_num <- di_num + 1 #第二类,降升 class[di_num,2] <- tmp_gene_frame[i] } if(flag_avg[i,3]<= flag_avg[i,2]*(1+error) && flag_avg[i,3]>=flag_avg[i,2]*(1-error)) { dk_num <- dk_num + 1 #第三类,降平 class[dk_num,3] <- tmp_gene_frame[i] } } # 从第一到第二阶段升 if(flag_avg[i,2] > flag_avg[i,1]*(1+error)) { if(flag_avg[i,3] < flag_avg[i,2]*(1-error) ) { id_num <- id_num + 1 #第四类,升降 class[id_num,4] <- tmp_gene_frame[i] } if(flag_avg[i,3] > flag_avg[i,2]*(1+error) ) { ii_num <- ii_num + 1 #第五类,升升 class[ii_num,5] <- tmp_gene_frame[i] } if(flag_avg[i,3]<= flag_avg[i,2]*(1+error) && flag_avg[i,3]>=flag_avg[i,2]*(1-error)) { ik_num <- ik_num + 1 #第六类,升平 class[ik_num,6] <- tmp_gene_frame[i] } } #从第一到第二阶段平 if(flag_avg[i,2] <= flag_avg[i,1]*(1+error) && flag_avg[i,2] >= flag_avg[i,1]*(1-error)) { if(flag_avg[i,3] < flag_avg[i,2]*(1-error) ) { kd_num <- kd_num + 1 #第七类,平降 class[kd_num,7] <- tmp_gene_frame[i] } if(flag_avg[i,3] > flag_avg[i,2]*(1+error) ) { ki_num <- ki_num + 1 #第八类,平升 class[ki_num,8] <- tmp_gene_frame[i] } if(flag_avg[i,3]<= flag_avg[i,2]*(1+error) && flag_avg[i,3]>=flag_avg[i,2]*(1-error)) { kk_num <- kk_num + 1 #第九类,平平 class[kk_num,9] <- tmp_gene_frame[i] } } }
c149b7aeabe98d312a4945a97e2c51b25813619a
1cc4e9886e3bc4614c8105d48b7e98e209948cf5
/Rscripts/functions.R
0971e222cbdadbd8e7d1077b4eb09ad3ea81c777
[]
no_license
mikaells/Find-pBGCs
55108bb349346e856f31ca424dc4d7773d75fb15
f59e7b25bdc94592735733076634ecf3b491770b
refs/heads/master
2023-07-09T07:43:39.796862
2021-08-16T18:55:02
2021-08-16T18:55:02
299,373,323
2
1
null
null
null
null
UTF-8
R
false
false
4,515
r
functions.R
color.gradient <- function(x, colors=c("grey90","red"), colsteps=100) { return( colorRampPalette(colors) (colsteps) [ findInterval(x, seq(min(x),max(x), length.out=colsteps)) ] ) } layout.by.attr <- function(graph, wc, cluster.strength=1,layout=layout.auto) { g <- graph.edgelist(get.edgelist(graph)) # create a lightweight copy of graph w/o the attributes. E(g)$weight <- 1 attr <- cbind(id=1:vcount(g), val=wc) g <- g + vertices(unique(attr[,2])) + igraph::edges(unlist(t(attr)), weight=cluster.strength) l <- layout(g, weights=E(g)$weight)[1:vcount(g),] return(l) } spreadClus=function(L,transTab, doKleb=T, minSize=16) { #find major clusters dbclus=dbscan::dbscan(L, eps = 1.5) #save them with main table translationTable2$dbcan=dbclus$cluster #have a look #plot(L, col=0)#translationTable2$dbcan+1) #make a new layout to overwrite l2=L #klebsiella is to close to escherichia #finding indeces of both klebIndx=which(translationTable2$dbcan==7) entIndx=which(translationTable2$mainNames=="Escherichia") #finding cluster centers klebCent=apply(l[klebIndx,],MARGIN = 2,mean) eschCent=apply(l[which(translationTable2$mainNames=="Escherichia"),], MARGIN = 2,mean) #estimate position of kleb relative to esch xx=(klebCent[1]-eschCent[1]) yy=(klebCent[2]-eschCent[2]) #dd=sqrt(xx^2+yy^2) #move klebsiella away from esch #if relative position is positive move more positive and vice versa l2[klebIndx,1]=l[klebIndx,1] +c(ifelse(xx>1,2,-2)) l2[klebIndx,2]=l[klebIndx,2] +c(ifelse(yy>1,2,-2)) #plot again plot(L, col=1)#translationTable2$dbcan+1,pch =16, cex =.1) j=3 #spreading out clusters #for all clusters in graph, do for(j in unique(translationTable2$dbcan)) { if(j==0) next #if cluster is just the non-clustered #find index of cluster members and calculate centroid dbIndx=which(translationTable2$dbcan==j) dbCent=c(mean(l[dbIndx,1]),mean(l[dbIndx,2])) #if cluster is too small, next if(length(dbIndx)<minSize) next #for all cluster members, do for(i in (dbIndx)){ #find relative position and distance to center xx=(l[i,][1]-dbCent[1]) yy=(l[i,][2]-dbCent[2]) dd=sqrt(xx^2+yy^2) #to each point, add max three but less if cluster is small #to pull away from center #l2[i,]=l[i,]+(3-(3/(1*(length(dbIndx))^(1/10))))*c(xx,yy) l2[i,]=l[i,]+(0.2*sqrt(length(dbIndx)))*c(xx,yy) points(x = l2[i,1],y=l2[i,2], pch=16,cex=0.1,col=translationTable2$dbcan[i]+1) } } return(l2) } # triangle vertex shape mytriangle <- function(coords, v=NULL, params) { vertex.color <- params("vertex", "color") if (length(vertex.color) != 1 && !is.null(v)) { vertex.color <- vertex.color[v] } vertex.size <- 1/200 * params("vertex", "size") if (length(vertex.size) != 1 && !is.null(v)) { vertex.size <- vertex.size[v] } symbols(x=coords[,1], y=coords[,2], bg=vertex.color, stars=cbind(vertex.size, vertex.size, vertex.size), add=TRUE, inches=FALSE) } # clips as a circle add_shape("triangle", clip=shapes("circle")$clip, plot=mytriangle) # generic star vertex shape, with a parameter for number of rays mystar <- function(coords, v=NULL, params) { vertex.color <- params("vertex", "color") if (length(vertex.color) != 1 && !is.null(v)) { vertex.color <- vertex.color[v] } vertex.size <- 1/200 * params("vertex", "size") if (length(vertex.size) != 1 && !is.null(v)) { vertex.size <- vertex.size[v] } norays <- params("vertex", "norays") if (length(norays) != 1 && !is.null(v)) { norays <- norays[v] } mapply(coords[,1], coords[,2], vertex.color, vertex.size, norays, FUN=function(x, y, bg, size, nor) { symbols(x=x, y=y, bg=bg, stars=matrix(c(size,size/2), nrow=1, ncol=nor*2), add=TRUE, inches=FALSE) }) } # no clipping, edges will be below the vertices anyway add_shape("star", clip=shape_noclip, plot=mystar, parameters=list(vertex.norays=5)) # plot(1:60, 4-(3/(1*((1:60))^(1/10)))) # plot(1:60, 0.3*sqrt(1:60)) color.gradient <- function(x, colors=c("grey90","red"), colsteps=100) { return( colorRampPalette(colors) (colsteps) [ findInterval(x, seq(min(x),max(x), length.out=colsteps)) ] ) }
286b1c08937ceab6355320d2442a97456e92294e
0137840c620668a93987f291f2d527793dae28f6
/scripts/tutorial.R
003b9e91322f0b9522a5dc6cffeb75bbca49b663
[]
no_license
justinmillar/trevor-strava
9271c87246585b49d497bd55b8e2b1fedc07e53f
58dfe478d53d96f9dfa3fcf124844f0d510ee41a
refs/heads/master
2021-05-05T07:07:53.019964
2018-01-26T21:38:51
2018-01-26T21:38:51
118,854,663
0
0
null
null
null
null
UTF-8
R
false
false
4,657
r
tutorial.R
library(strava) library(tidyverse) data <- process_data("data/") # Facets ---- p1 <- plot_facets(data) ggsave("plots/facets01.png", p1, width = 20, height = 20, units = "cm") # Activity map ---- p2 <- plot_map(data, lon_min = -87, lon_max = -82, lat_min = 42, lat_max = 45.5) ggsave("plots/map01.png", p2, width = 20, height = 15, units = "cm", dpi = 600) # Elevation profile ---- p3 <- plot_elevations(data) ggsave("plots/elevations01.png", p3, width = 20, height = 20, units = "cm") # Ridges ---- library(ggridges) library(lubridate) library(tidyverse) # Process the data # data <- process_data(<gpx file path>) # Summarise data data <- data %>% group_by(id) %>% summarise(start = min(time), end = max(time)) %>% mutate(start_time = as.POSIXct(strftime(start, format = "%H:%M:%S"), format = "%H:%M:%S"), end_time = as.POSIXct(strftime(end, format = "%H:%M:%S"), format = "%H:%M:%S"), duration = end_time - start_time, wday = wday(start, week_start = 1)) # Function for processing an activity on a minute-by-minute basis; active = 1, not active = 0 compute_day_curve <- function(df_row) { start <- as.numeric(data[df_row, "start_time"]) end <- as.numeric(data[df_row, "end_time"]) wday <- as.character(data[df_row, "wday"]) result <- data.frame(time = seq(as.POSIXct("00:00:00", format = "%H:%M:%S"), as.POSIXct("23:59:58", format = "%H:%M:%S"), by = 60)) %>% mutate(time_end = lead(time, default = as.POSIXct("23:59:59", format = "%H:%M:%S")), active = ifelse(time > start & time_end < end, 1, 0), wday = wday) result } # Process all activities plot_data <- 1:nrow(data) %>% map_df(~compute_day_curve(.x), .id = "id") %>% filter(!is.na(active), active > 0) %>% mutate(wday = as.factor(wday)) plot_data$wday <- factor(plot_data$wday, levels = rev(levels(plot_data$wday))) # Create plot p <- ggplot() + geom_density_ridges(aes(x = time, y = wday), plot_data, size = 0.5) + theme_ridges() + scale_y_discrete(expand = c(0.01, 0), labels = c("Sun", "Sat", "Fri", "Thu", "Wed", "Tue", "Mon")) + scale_x_datetime(expand = c(0, 0), date_labels = "%I:%M %p") + theme(panel.grid = element_blank(), plot.margin = unit(rep(1, 4), "cm")) + xlab(NULL) + ylab(NULL) # Save plot ggsave("plots/ridges01.png", p, width = 24, height = 20, units = "cm") # Calendar ---- library(ggart) library(ggthemes) library(ggTimeSeries) library(lubridate) library(strava) library(tidyverse) library(viridis) data <- process_data("data/") # Summarise data summary <- data %>% mutate(time = lubridate::date(data$time), year = strftime(data$time, format = "%Y"), date_without_month = strftime(data$time, format = "%j"), month = strftime(data$time, format = "%m"), day_of_month = strftime(data$time, format = "%d"), year_month = strftime(data$time, format = "%Y-%m")) %>% group_by(time, year, date_without_month, month, day_of_month, year_month) %>% summarise(total_dist = sum(dist_to_prev), total_time = sum(time_diff_to_prev)) %>% mutate(speed = (total_dist) / (total_time /60^2)) %>% mutate(pace = (total_time / 60) / (total_dist)) %>% mutate(type = "day") %>% ungroup %>% mutate(id = as.numeric(row.names(.))) # Generate plot data time_min <- "2015-04-18" time_max <- today() max_dist <- 70 daily_data <- summary %>% group_by(time) %>% summarise(dist = sum(total_dist)) %>% ungroup() %>% mutate(time = lubridate::date(time)) %>% filter(complete.cases(.), time > time_min, time < time_max) %>% mutate(dist_scaled = ifelse(dist > max_dist, max_dist, dist)) # Create plot p <- ggplot_calendar_heatmap(daily_data, "time", "dist_scaled", dayBorderSize = 0.5, dayBorderColour = "white", monthBorderSize = 0.75, monthBorderColour = "transparent", monthBorderLineEnd = "round") + xlab(NULL) + ylab(NULL) + scale_fill_continuous(name = "km", low = "#DAE580", high = "#236327", na.value = "#EFEDE0") + facet_wrap(~Year, ncol = 1) + theme_tufte() + theme(strip.text = element_text(), axis.ticks = element_blank(), legend.position = "bottom") # Save plot ggsave("plots/calendar01.png", p, width = 30, height = 30, units = "cm", dpi = 300) # Circles ---- library(packcircles) cir_data <- summary %>% select(id, year, total_dist, speed) packing <- circleProgressiveLayout(cir_data$total_dist) cir_data <- cbind(cir_data, packing) plot(cir_data$radius, cir_data$total_dist) dat.gg <- circleLayoutVertices(packing, npoints=100)s ggplot() + geom_polygon(data = dat.gg, aes(x, y, fill = ))
258450d4384da9d9ac34e5c568fe2d3f0f00367e
6f9ab236999fff566b0ed76f6fc2146d63e3f7f1
/rotations/man/plot.Rd
861277a4d7ca2c382073a30aabf92ca5ed2f01c3
[ "MIT" ]
permissive
stanfill/rotationsC
d4733140b6e40c61b2d9312474c1a8786f1974fb
66722f095a68d81e506c29cfac7d4a8a69e664cf
refs/heads/master
2022-07-22T01:17:54.917073
2022-06-24T21:27:24
2022-06-24T21:27:24
9,582,475
0
3
null
2021-03-11T21:43:23
2013-04-21T16:51:07
C++
UTF-8
R
false
true
2,835
rd
plot.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{plot} \alias{plot} \alias{plot.SO3} \alias{plot.Q4} \title{Visualizing random rotations} \usage{ \method{plot}{SO3}( x, center = mean(x), col = 1, to_range = FALSE, show_estimates = NULL, label_points = NULL, mean_regions = NULL, median_regions = NULL, alp = NULL, m = 300, interactive = FALSE, ... ) \method{plot}{Q4}( x, center = mean(x), col = 1, to_range = FALSE, show_estimates = NULL, label_points = NULL, mean_regions = NULL, median_regions = NULL, alp = NULL, m = 300, interactive = FALSE, ... ) } \arguments{ \item{x}{n rotations in \code{SO3} or \code{Q4} format.} \item{center}{rotation about which to center the observations.} \item{col}{integer or vector comprised of 1, 2, 3 indicating which column(s) to display. If \code{length(col)>1} then each eyeball is labelled with the corresponding axis.} \item{to_range}{logical; if \code{TRUE} only part of the globe relevant to the data is displayed} \item{show_estimates}{character vector to specify which of the four estimates of the principal direction to show. Possibilities are "all", "proj.mean", "proj.median", "geom.mean", "geom.median".} \item{label_points}{vector of labels.} \item{mean_regions}{character vector to specify which of the three confidence regions to show for the projected mean. Possibilities are "all", "trans.theory","trans.bootstrap, "direct.theory", "direct.bootstrap".} \item{median_regions}{character vector to specify which of the three confidence regions to show for the projected median. Possibilities are "all", "theory", "bootstrap."} \item{alp}{alpha level to be used for confidence regions. See \code{\link{region}} for more details.} \item{m}{number of bootstrap replicates to use in bootstrap confidence regions.} \item{interactive}{deprecated; \code{sphereplot} was set to be removed from CRAN and was going to take this package down with it} \item{...}{parameters passed onto the points layer.} } \value{ A visualization of rotation data. } \description{ This function produces a static three-dimensional globe onto which one of the columns of the provided sample of rotations is projected. The data are centered around a user-specified rotation matrix. The static plot uses \code{ggplot2}. Interactive plots are no longer supported. } \examples{ r <- rvmises(200, kappa = 1.0) Rs <- genR(r) plot(Rs, center = mean(Rs), show_estimates = "proj.mean", shape = 4) \donttest{ # Z is computed internally and contains information on depth plot( Rs, center = mean(Rs), show_estimates = c("proj.mean", "geom.mean"), label_points = sample(LETTERS, 200, replace = TRUE) ) + aes(size = Z, alpha = Z) + scale_size(limits = c(-1, 1), range = c(0.5, 2.5)) } }
08f9c592f8d465d692cd625850d8818e065c5cfb
1204d0e3c990b77ae922038254354c3b660b0cab
/Graphs/2D_and.R
92c34dce36dd5f767b682b162847667892137ad8
[]
no_license
HussamHallak/Soundex_Arabic_Names
56174b370c84566245ef3397338d2a5bd6f11812
142cf7cd4672c8a84eec09c382fcaf22d1a530b6
refs/heads/main
2023-02-04T19:07:54.994422
2020-12-27T00:42:15
2020-12-27T00:42:15
324,659,651
2
0
null
null
null
null
UTF-8
R
false
false
868
r
2D_and.R
# Create a grouped barplot and add a legend Libindic_Soundex <- c(P=0.961, R=0.802, TNR=0.999, F=0.874, ACC=0.996, BA=0.901) Jellyfish_Soundex <- c(P=0.963, R=0.833, TNR=0.999, F=0.894, ACC=0.997, BA=0.917) Jellyfish_Match_Rating <- c(P=0.988, R=0.657, TNR=0.999, F=0.789, ACC=0.995, BA=0.828) Fuzzy_NYSIIS <- c(P=0.982, R=0.429, TNR=1.0, F=0.597, ACC=0.991, BA=0.714) Fuzzy_DMetaphone <- c(P=0.934, R=0.811, TNR=0.999, F=0.868, ACC=0.996, BA=0.905) all <- rbind(Libindic_Soundex, Jellyfish_Soundex, Jellyfish_Match_Rating, Fuzzy_NYSIIS, Fuzzy_DMetaphone) barplot(all, beside = TRUE, main = "Metrics based on both directions names matching using phonetic algorithms", xlab = "Metric", ylim = c(0,1), ylab = "Value", col = c("dodgerblue3", "black", "orange", "skyblue1", "red"), legend.text = rownames(all), xlim = c(0,55), args.legend = list(cex=0.8,x = "topright"))
ef4c261c544b8fe70aec91ce21bb2f2a42758291
5a9d99c0266ced580e9e02bf3839acb2fb8ef49d
/man/add_starting_trees_to_xml.Rd
dbcaee9925b17e702a8dd7b3bf1571cf99ff310a
[]
no_license
emvolz-phylodynamics/sarscov2Rutils
1555fe2512057f86ec12f5d192b912121c7b4781
6747443599fb391a2f21c5de019d11058dec7e86
refs/heads/sarscov2Rutils
2023-04-18T23:54:43.415890
2020-11-05T12:02:56
2020-11-05T12:02:56
266,338,998
12
5
null
2021-02-02T19:39:22
2020-05-23T13:09:29
R
UTF-8
R
false
true
1,037
rd
add_starting_trees_to_xml.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/starttree1.R \name{add_starting_trees_to_xml} \alias{add_starting_trees_to_xml} \title{Make starting trees, insert into beast xml and create ML tree plot} \usage{ add_starting_trees_to_xml( xmlfn, fastafn, plotout = NULL, regionDemes = c("Il"), ntres = 1, ncpu = 4 ) } \arguments{ \item{xmlfn}{File name of beast xml} \item{fastafn}{File name of sequence data (needed for ML tree estimation)} \item{plotout}{Output file name for ML tree plot. Set NULL to not plot} \item{regionDemes}{regions to colour in the output ML tree} \item{ntres}{integer how many start trees to produce? a distinct xml is made for each} \item{ncpu}{Number of CPUs to use} } \value{ Some treedater trees. New XMLs are written to disk } \description{ This will generate multiple starting trees by ML & treedater. Each tree is produced by a different random resolution of polytomies in the ML tree Sequence names must have sample times included (see prep_tip_labels) }
4c7903ff5a122868df61324446e9a61649b85f47
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/hash/examples/del.Rd.R
d650b7bf4fcea782d94af6346fa264dc9889a334
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
315
r
del.Rd.R
library(hash) ### Name: del ### Title: Remove key-value pair(s) from a hash ### Aliases: del del-methods del,ANY,hash-method delete delete-methods ### delete,ANY,hash-method ### Keywords: methods data manip ### ** Examples h <- hash( letters, 1:26 ) h # 26 elements del( "a", h ) h # 25 elements
26cf5ba907ef3adec892dbb3b624b4c324d513fc
bbb9d380e3e7af973f69661b0e7693b2fea73da6
/man/occupations.Rd
7b8a8abdcd4edd8b19e27dd875a139493c1d3597
[]
no_license
cran/skillsengineeR
55c7fa9a46e3bb588a927ddc6630bd10b039902c
a8eacbc9ab7b28f8f62ff974369d8e6ad4425b2d
refs/heads/master
2023-03-19T06:57:25.246922
2021-03-04T08:50:06
2021-03-04T08:50:06
344,525,243
0
0
null
null
null
null
UTF-8
R
false
true
655
rd
occupations.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_functions.R \name{occupations} \alias{occupations} \title{Pull the full list of occupations} \usage{ occupations(token, handle_status = "warn", response_raw = FALSE) } \arguments{ \item{token}{Authorization token obtained from \code{get_access_token}} \item{handle_status}{How to handle bad HTTP status. Set as either \code{warn} or \code{error}} \item{response_raw}{Logical value whether to return the API response as raw, unparsed text. Defaults to \code{FALSE}} } \value{ A data.frame; occupations relevant to query } \description{ Pull the full list of occupations }
3f06a9035cb1cb6aeb8377bf67a2e6cf03705754
bbc76ad786e92e25ec734ab20d6bcc0e51c1bd9d
/man/dtw.Rd
5ab1f10fd82aa2056af869104bc61c13f46152bc
[]
no_license
akinori-ito/DTW
fda888b339c4318069e3cad150545a7f908b2bff
6fcc339763fda4bb8837ecfdefda0203f165932e
refs/heads/master
2020-04-02T03:43:05.556146
2016-06-07T00:38:53
2016-06-07T00:38:53
60,339,033
0
0
null
null
null
null
UTF-8
R
false
false
585
rd
dtw.Rd
\name{dtw} \alias{dtw} \title{Dynamic Time Warping} \usage{ dtw(x,y,window=100) } \description{ dtw calculates correspondence of rows of two matrices x and y using dynamic time warping algorithm. } \details{ Variables x and y should be matrices having the same length of columns. The variable window denotes the calculation window of DTW; if window is large, the result becomes optimum and calculation becomes slower. Return value is a list. xsize: number of rows of x. ysize: number of rows of y. opt: a two-column matrix that is correspondences of rows of x and y. } \examples{ }
e71831d208ffcc3ee0936b7488c5b7506b29bad1
28f02ab412dcfed615601f747c6d28e654c444f1
/technical_analysis-master/michele/code_quantmod/04_miller01.R
84126287d0f78e8221caf2f63159fdf3e991f142
[ "MIT" ]
permissive
karansoneja/DataScience
a7b2a09cd4ca20cd8d51d7fed4f2196ce2d0de84
883343eebbb27664e55a89cc9d42bc842ed1a3f7
refs/heads/master
2022-12-27T12:25:04.192150
2020-09-30T17:19:59
2020-09-30T17:19:59
215,329,005
0
0
null
null
null
null
UTF-8
R
false
false
11,228
r
04_miller01.R
## Miller's code (https://ntguardian.wordpress.com/2017/04/03/introduction-stock-market-data-r-2/) # library(quantmod) library(IKTrading) ## ===== get data ========== start <- as.Date("2010-01-01") end <- as.Date("2016-10-01") # Let's get Apple stock data; Apple's ticker symbol is AAPL. We use the quantmod function getSymbols, and pass a string as a first argument to identify the desired ticker symbol, pass "yahoo" to src for Yahoo! Finance, and from and to specify date ranges # The default behavior for getSymbols is to load data directly into the global environment, with the object being named after the loaded ticker symbol. This feature may become deprecated in the future, but we exploit it now. getSymbols("AAPL", src="yahoo", from = start, to = end) ## ===== some charting with SMA's ========= candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white", subset = "2016-01-04/") AAPL_sma_20 <- SMA( Cl(AAPL), # The closing price of AAPL, obtained by quantmod's Cl() function n = 20 # The number of days in the moving average window ) AAPL_sma_50 <- SMA( Cl(AAPL), n = 50 ) AAPL_sma_200 <- SMA( Cl(AAPL), n = 200 ) zoomChart("2016") # Zoom into the year 2016 in the chart addTA(AAPL_sma_20, on = 1, col = "red") # on = 1 plots the SMA with price addTA(AAPL_sma_50, on = 1, col = "blue") addTA(AAPL_sma_200, on = 1, col = "green") ## ======== 1. create indicators by comparing SMA crossing ======== AAPL_trade <- AAPL AAPL_trade$`20d` <- AAPL_sma_20 AAPL_trade$`50d` <- AAPL_sma_50 regime_val <- sigComparison("", data = AAPL_trade, columns = c("20d", "50d"), relationship = "gt") - sigComparison("", data = AAPL_trade, columns = c("20d", "50d"), relationship = "lt") plot(regime_val["2016"], main = "Regime", ylim = c(-2, 2)) plot(regime_val, main = "Regime", ylim = c(-2, 2)) ## regime and main series in the same chart candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white", subset = "2016-01-04/") addTA(regime_val, col = "blue", yrange = c(-2, 2)) addLines(h = 0, col = "black", on = 3) addSMA(n = c(20, 50), on = 1, col = c("red", "blue")) # zoomChart("2016") ## count how many bullish and how many bearish days there were (i.e. buy vs sell) table(as.vector(regime_val)) ## generate signals out of the regimes -- actual triggering of mkt operations sig <- diff(regime_val) / 2 plot(sig, main = "Signal", ylim = c(-2, 2)) table(sig) ## ====== computing profitability ===== # The Cl function from quantmod pulls the closing price from the object # holding a stock's data # Buy prices Cl(AAPL)[which(sig == 1)] # Sell prices Cl(AAPL)[sig == -1] # Since these are of the same dimension, computing profit is easy as.vector(Cl(AAPL)[sig == 1])[-1] - Cl(AAPL)[sig == -1][-table(sig)[["1"]]] candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white") addTA(regime_val, col = "blue", yrange = c(-2, 2)) addLines(h = 0, col = "black", on = 3) addSMA(n = c(20, 50), on = 1, col = c("red", "blue")) zoomChart("2014-05/2014-07") ## ########################################################## ## ## ============ 2. ADJUSTING ALL DATA ======================== getSymbols(Symbols = symbols, src = "yahoo", from = start, to = end) ## Quickly define adjusted versions of each of these `%s%` <- function(x, y) {paste(x, y)} `%s0%` <- function(x, y) {paste0(x, y)} for (s in symbols) { eval(parse(text = s %s0% "_adj <- adjustOHLC(" %s0% s %s0% ")")) } symbols_adj <- paste(symbols, "adj", sep = "_") ## here uses quantstrat to manage portfolios ## =============== some trading rule indicators ============= ## Indicators are used to construct signals add.indicator(strategy = strategy_st, name = "SMA", # SMA is a function arguments = list(x = quote(Cl(mktdata)), # args of SMA n = 20), label = "fastMA") add.indicator(strategy = strategy_st, name = "SMA", arguments = list(x = quote(Cl(mktdata)), n = 50), label = "slowMA") ## ============= Next comes trading signals ======= add.signal(strategy = strategy_st, name = "sigComparison", # Remember me? arguments = list(columns = c("fastMA", "slowMA"), relationship = "gt"), label = "bull") add.signal(strategy = strategy_st, name = "sigComparison", arguments = list(columns = c("fastMA", "slowMA"), relationship = "lt"), label = "bear") ## ============== rules that generate trades ============ ## BUY add.rule(strategy = strategy_st, name = "ruleSignal", # Almost always this one arguments = list(sigcol = "bull", # Signal (see above) that triggers sigval = TRUE, ordertype = "market", orderside = "long", replace = FALSE, prefer = "Open", osFUN = osMaxDollar, # The next parameter, which is a parameter passed to # osMaxDollar, will ensure that trades are about 10% # of portfolio equity maxSize = quote(floor(getEndEq(account_st, Date = timestamp) * .1)), tradeSize = quote(floor(getEndEq(account_st, Date = timestamp) * .1))), type = "enter", path.dep = TRUE, label = "buy") ## SELL add.rule(strategy = strategy_st, name = "ruleSignal", arguments = list(sigcol = "bear", sigval = TRUE, orderqty = "all", ordertype = "market", orderside = "long", replace = FALSE, prefer = "Open"), type = "exit", path.dep = TRUE, label = "sell") ## ======================= Execution ======= ## Execution of the strategy applyStrategy(strategy_st, portfolios = portfolio_st) ## Update profile updatePortf(portfolio_st) dateRange <- time(getPortfolio(portfolio_st)$summary)[-1] updateAcct(portfolio_st, dateRange) updateEndEq(account_st) ## And some statistics tStats <- tradeStats(Portfolios = portfolio_st, use="trades", inclZeroDays = FALSE) tStats[, 4:ncol(tStats)] <- round(tStats[, 4:ncol(tStats)], 2) print(data.frame(t(tStats[, -c(1,2)]))) final_acct <- getAccount(account_st) plot(final_acct$summary$End.Eq["2010/2016"], main = "Portfolio Equity") ## ======================================================== ## ## ========= 3. ENLARGE PORTFOLIO ============================ start = "2018-01-01" end = "2018-07-01" # Get new symbols symbols = c("AAPL", "MSFT", "GOOG", "FB", "TWTR", "NFLX", "AMZN", "YHOO", "SNY", "NTDOY", "IBM", "HPQ") quantmod::getSymbols(Symbols = symbols, from = start, to = end) # Quickly define adjusted versions of each of these `%s%` <- function(x, y) {paste(x, y)} `%s0%` <- function(x, y) {paste0(x, y)} for (s in symbols) { eval(parse(text = s %s0% "_adj <- adjustOHLC(" %s0% s %s0% ")")) } initDate = "1990-01-01" symbols_adj <- paste(symbols, "adj", sep = "_") currency("USD") stock(symbols_adj,currency= "USD" ,multiplier = 1) init strategy_st_2 <- portfolio_st_2 <- account_st_2 <- "SMAC-20-50_v2" rm.strat(portfolio_st_2) rm.strat(strategy_st_2) initPortf(portfolio_st_2, symbols = symbols_adj, initDate = initDate, currency = "USD") initAcct(account_st_2, portfolios = portfolio_st_2, initDate = initDate, currency = "USD", initEq = 1000000) initOrders(portfolio_st_2, store = TRUE) strategy(strategy_st_2, store = TRUE) add.indicator(strategy = strategy_st_2, name = "SMA", arguments = list(x = quote(Cl(mktdata)), n = 20), label = "fastMA") add.indicator(strategy = strategy_st_2, name = "SMA", arguments = list(x = quote(Cl(mktdata)), n = 50), label = "slowMA") # Next comes trading signals add.signal(strategy = strategy_st_2, name = "sigComparison", # Remember me? arguments = list(columns = c("fastMA", "slowMA"), relationship = "gt"), label = "bull") add.signal(strategy = strategy_st_2, name = "sigComparison", arguments = list(columns = c("fastMA", "slowMA"), relationship = "lt"), label = "bear") # Finally, rules that generate trades add.rule(strategy = strategy_st_2, name = "ruleSignal", arguments = list(sigcol = "bull", sigval = TRUE, ordertype = "market", orderside = "long", replace = FALSE, prefer = "Open", osFUN = osMaxDollar, maxSize = quote(floor(getEndEq(account_st_2, Date = timestamp) * .1)), tradeSize = quote(floor(getEndEq(account_st_2, Date = timestamp) * .1))), type = "enter", path.dep = TRUE, label = "buy") add.rule(strategy = strategy_st_2, name = "ruleSignal", arguments = list(sigcol = "bear", sigval = TRUE, orderqty = "all", ordertype = "market", orderside = "long", replace = FALSE, prefer = "Open"), type = "exit", path.dep = TRUE, label = "sell") applyStrategy(strategy_st_2, portfolios = portfolio_st_2) # Now for analytics updatePortf(portfolio_st_2) ## ===== dateRange <- time(getPortfolio(portfolio_st_2)$summary)[-1] updateAcct(account_st_2, dateRange) updateEndEq(account_st_2) tStats2 <- tradeStats(Portfolios = portfolio_st_2, use="trades", inclZeroDays = FALSE) tStats2[, 4:ncol(tStats2)] <- round(tStats2[, 4:ncol(tStats2)], 2) print(data.frame(t(tStats2[, -c(1,2)]))) final_acct2 <- getAccount(account_st_2) plot(final_acct2$summary$End.Eq["2010/2016"], main = "Portfolio Equity") ## ============ 4. BENCHMARKING =========== # one should always buy an index fund that merely reflects the composition of the market. # SPY is an exchange-traded fund (a mutual fund that is traded on the market like a stock) # whose value effectively represents the value of the stocks in the S&P 500 stock index. # By buying and holding SPY, we are effectively trying to match our returns with the market # rather than beat it. getSymbols("SPY", from = start, to = end) # A crude estimate of end portfolio value from buying and holding SPY 1000000 * (SPY$SPY.Adjusted["2016-09-30"][[1]] / SPY$SPY.Adjusted[[1]]) plot(final_acct2$summary$End.Eq["2010/2016"] / 1000000, main = "Portfolio Equity", ylim = c(0.8, 2.5)) lines(SPY$SPY.Adjusted / SPY$SPY.Adjusted[[1]], col = "blue")
9ec281bd3c50c693bb9eba082b954c1aca4ff41e
d8be1dd889f673d8636c02139c4f5fbfa4200fcd
/tests/testthat/test_bind_schema_to_entity_request.R
4576a21487c6d909d1b6ae8c0207c3efe07fa229
[]
no_license
thomasyu888/synr-sdk-client
c03a23ff852a819f3dab1ee103f8caa7f7d16ec0
a090a5e062ca41ee68ee719ac30b041a0b2a9809
refs/heads/main
2023-02-26T03:10:25.802797
2021-02-02T15:10:28
2021-02-02T15:10:28
333,743,854
1
0
null
null
null
null
UTF-8
R
false
false
874
r
test_bind_schema_to_entity_request.R
# Automatically generated by openapi-generator (https://openapi-generator.tech) # Please update as you see appropriate context("Test BindSchemaToEntityRequest") model.instance <- BindSchemaToEntityRequest$new() test_that("entityId", { # tests for the property `entityId` (character) # The ID of the the entity. # uncomment below to test the property #expect_equal(model.instance$`entityId`, "EXPECTED_RESULT") }) test_that("schema$id", { # tests for the property `schema$id` (character) # The $id of the JSON schema to bind to the entity. Note: If the $id includes a semantic version then entity will be bound to that specific version. If the $id excludes the semantic version then the entity will be bound to the latest version of that schema. # uncomment below to test the property #expect_equal(model.instance$`schema$id`, "EXPECTED_RESULT") })
edf8d812654d5e4ff45af08c2c1e54c77dc5f399
43937991b4969b915f03c247d13cf00a5d81ae3e
/R/plot_calls.R
16b397d19da61a21abef103a41c3cd8d5e7edcde
[]
no_license
mjwestgate/circleplot
51f2e84acd590553e611ece0b2eb59c817bc34a0
52d705dc050f74ec75a5a528ed981a9c5a1c772f
refs/heads/master
2020-12-09T23:34:15.721674
2017-07-04T08:50:57
2017-07-04T08:50:57
24,923,479
10
0
null
2016-09-09T03:35:04
2014-10-08T03:47:28
R
UTF-8
R
false
false
5,001
r
plot_calls.R
# plot functions - called by user # function to draw a figure, if supplied with a data.frame or matrix circleplot<-function( input, # a distance matrix (class 'dist') or square matrix (class matrix) cluster=TRUE, # should points be rearranged using hclust? Defaults to TRUE reduce=FALSE, # should nodes with no connections be removed? draw=TRUE, # should the figure be drawn? add=FALSE, # should this figure be added to an existing plot? style="classic", # "pie" or "clock" are current alternatives plot.control, # a list containing plot attributes. See ?circleplot ... ) { # catch errors if(any(c("classic", "pie", "clock")==style)==FALSE){ warning(paste("style = '", style, "' not recognised: switched to style = 'classic'", sep="")) style<-"classic"} # test whether the object given was calculated by circleplot check.names<-function(x){ if(length(x)==3){ test<-names(x)==c("locations", "plot.control", "line.data") if(length(test)==0){return(FALSE) }else{all(test)} }else{FALSE}} add.existing.plot<-class(input)=="list" & check.names(input) # if input was calculated by circleplot, extract relevant information if(add.existing.plot){ plot.options<-input$plot.control circleplot.object<-input$locations line.object<-input$line.data style<-plot.options$style # if not, calculate (and plot) node and edge locations as usual }else{ dataset<-check.inputs(input, reduce) plot.options<-set.plot.attributes(dataset, plot.control, reduce, style) # set plot attributes/defaults circleplot.object<-calc.circleplot(dataset, plot.options, cluster, style) # get line and point attributes # calculate inter-point distances # allows setting of pc.scale (to calculate curvature of lines relative to origin) point.distance<-dist(circleplot.object$points[, c("x", "y")]) scale.distance<-point.distance-min(point.distance) scale.distance<-((scale.distance/max(scale.distance))* plot.options$line.curvature[2])+ plot.options$line.curvature[1] scale.distance<-as.matrix(scale.distance) # loop to calculate and draw lines line.object <-lapply(circleplot.object$lines, function(a, add, circleplot.object, scale.distance, plot.options){ if(nrow(a)>0){ # this may not be sufficient line.list<-split(a, c(1:nrow(a))) line.list<-lapply(line.list, function(x, plot.object, distance, options){ calc.lines(x, plot.object, distance, options)}, plot.object=circleplot.object, distance=scale.distance, options= plot.options) } }, add=add, circleplot.object= circleplot.object, scale.distance= scale.distance, plot.options= plot.options) } # set plot window attributes if(draw & class(input)=="list" & check.names(input)==FALSE){ par(mfrow=panel.dims(length(circleplot.object$lines)))} # DRAW if(draw){ # this has to run within lapply, in case lists are supplied to circleplot # if(is.null(line.object[[1]])==FALSE){ invisible(lapply(line.object, function(a, add, circleplot.object, plot.options){ if(add==FALSE){ do.call(par, circleplot.object$par) do.call(plot, circleplot.object$plot)} # draw these lines if(is.null(a)==FALSE){ invisible(lapply(a, FUN=function(z, asymmetric, arrow.attr){ draw.curves(z) if(asymmetric)draw.arrows(z, arrow.attr)}, asymmetric=attr(circleplot.object, "asymmetric"), arrow.attr=plot.options$arrows)) } # add points or polygons, depending on style switch(style, "classic"={do.call(points, as.list(circleplot.object$points[, -which(colnames(circleplot.object$points)=="labels")]))}, "pie"={invisible(lapply(circleplot.object$polygons, function(x){do.call(polygon, x)}))}, "clock"={ invisible(lapply(circleplot.object$nodes, function(x){do.call(lines, x)})) do.call(lines, circleplot.object$border)} ) # label points label.suppress.test<-is.logical(plot.options$point.labels) & length(plot.options$point.labels)==1 if(label.suppress.test==FALSE){ labels.list<-split(circleplot.object$labels, 1:nrow(circleplot.object$labels)) invisible(lapply(labels.list, FUN=function(x){do.call(text, x)}))} }, add=add, circleplot.object= circleplot.object, plot.options= plot.options)) if(class(input)=="list" & add.existing.plot==FALSE)par(mfrow=c(1, 1)) } # end if(draw) # return information as needed return(invisible(list(locations= circleplot.object, plot.control=plot.options, line.data= line.object))) } # simple code to get pretty point colours point.attr<-function(distance.matrix) { if(length(attr(distance.matrix, "Labels"))==0){ attr(distance.matrix, "Labels")<-paste("V", c(1:attr(distance.matrix, "Size")), sep="")} labels<-as.character(attr(distance.matrix, "Labels")) color.hex<-c(RColorBrewer::brewer.pal(8, "Dark2"), brewer.pal(9, "Set1"), brewer.pal(8, "Set2") )[1:length(labels)] point.attributes<-data.frame( labels= labels, pch=19, col=color.hex, cex=3, stringsAsFactors=FALSE) return(point.attributes) }
e8782ff1256b0c96bac3f19070a5222c371bf053
c5344271e392c529d9245ce7fdfe723678edd580
/Simulations/Fig_4_Analysis.R
6c2a6f7cdca696b4e25686bb43316f07ff0da5f2
[]
no_license
njacobs627/Pan99_IVGs_Spatial_Structure
41cfb0a61e174e7ccd2f4d166b31432ffdf756a7
a1d82fb50b0ebe4105476e4a20b328ecd0e4c6ae
refs/heads/master
2020-05-23T07:22:00.328932
2019-06-08T13:51:18
2019-06-08T13:51:18
186,676,634
2
0
null
null
null
null
UTF-8
R
false
false
22,919
r
Fig_4_Analysis.R
require(tidyverse) require(dplyr) Proj.Home = "/Users/Nate/Box Sync/Lowen_Lab/Data_Notebook/Documentation" #For Nate's Macbook Pro Data.Path = file.path(Proj.Home,"Simulation_Test_Results") setwd(Data.Path) # Analyze ---- Fig4 = read.csv(file = file.path(Data.Path,"Fig4_Data_SHORT.csv")) Fig4.Peak = Fig4 %>% group_by(Sim,Spread_Frac,Pp,Virus_D) %>% dplyr::summarise(Max_Productive = max(Productive), Max_Virions = max(Virions), Max_Semi = max(Infected - Productive)) Fig4.Peak Fig4.Hours = Fig4 %>% filter((t * 4) %in% (1:(96 * 4))) write.csv(Fig4.Peak,file=file.path(Data.Path,"Fig4_Peak_Data.csv"),row.names=FALSE) write.csv(Fig4.Hours,file=file.path(Data.Path,"Fig4_Hours_Data.csv"),row.names=FALSE) # Visualize ---- Fig4.Peak = read.csv(file = file.path(Data.Path,"Fig4_Peak_Data.csv")) %>% mutate(Virus_D = log10(Virus_D / 3600)) %>% filter(Virus_D < 5, Spread_Frac %in% c(0,0.25)) %>% mutate(Spread_Frac = recode(Spread_Frac, "0.25" = "25% Local Spread","0" = "Diffusion Only")) Fig4.Hours = read.csv(file=file.path(Data.Path,"Fig4_Hours_Data.csv")) %>% mutate(Virus_D = log10(Virus_D / 3600)) %>% filter(Virus_D < 5, Spread_Frac %in% c(0,0.25)) %>% mutate(Spread_Frac = recode(Spread_Frac, "0.25" = "25% Local Spread","0" = "Diffusion Only")) # Plot Stock ---- Plot.Stock = ggplot() + scale_color_manual(values = c("Diffusion Only" = "midnightblue","25% Local Spread" = "steelblue1"), guide = FALSE) + scale_fill_manual(values = c("Diffusion Only" = "midnightblue","25% Local Spread" = "steelblue1"), guide = FALSE) + scale_y_continuous(limits = c(0,105)) + geom_vline(xintercept = log10((5.825)), lty = 2) + coord_cartesian(xlim = c(-0.5,4)) + theme(text=element_text(size = 13,face="bold"), strip.text.x=element_text(size=rel(1.5),margin=margin(0,0,3,0)), strip.text.y=element_text(size=rel(1.5),margin=margin(0,0,0,0),angle=0), strip.background = element_blank(), plot.title = element_text(size=rel(1.5)), axis.text.x=element_text(angle = 0,vjust=0.75,size=rel(1.5),color = "black"), axis.text.y=element_text(size=rel(1.5),color = "black"), axis.line.x = element_line(size=0.5), axis.line.y = element_line(size=0.5), axis.ticks.x = element_line(size=0.5), axis.ticks.y = element_line(size = 0.5), axis.title.y = element_text(size=rel(1.2),color = "black"), axis.title.x = element_text(size=rel(1.2)), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank()) # Raw r0 ---- Early.Time = 12 df4 = Fig4.Hours %>% filter(t == Early.Time) %>% mutate(Slope = pmax(log10(Productive),0)) %>% dplyr::select(Pp,Virus_D,Spread_Frac,Sim,Slope) %>% group_by(Pp,Virus_D,Spread_Frac) %>% dplyr::summarise(r0 = mean(Slope)) %>% ungroup %>% mutate(Pp = Pp + 0.005) %>% mutate(Pp = round(Pp,2)) df4 Plot.Stock + geom_point(data = df4, aes(x = Virus_D, y = r0, color = Spread_Frac)) + geom_smooth(data = df4, aes(x = Virus_D, y = r0, linetype = factor(Pp), color = Spread_Frac, fill = Spread_Frac, group = interaction(Spread_Frac,Pp)), lwd = 1.2, alpha = 0.3) + labs(y = TeX("\\textbf{$\\log_1_0$ Cells Infected in 12 Hours}"), x = TeX("\\textbf{Diffusion Coefficient ($\\log_1_0$ $\\mu$m^2/s)}"), linetype = TeX("\\textbf{$\\P_P$}"), size = TeX("\\textbf{$\\P_P$}"), color = NULL, fill = NULL) + scale_linetype_manual(values = c("1" = 1,"0.58" = 1212)) + scale_y_continuous(limits = c(0,4)) + scale_color_manual(values = c("Diffusion Only" = "midnightblue","25% Local Spread" = "steelblue1")) + scale_fill_manual(values = c("Diffusion Only" = "midnightblue","25% Local Spread" = "steelblue1")) ggsave('Figures/4A_r0_Raw.pdf', width = 7, height = 5, unit = "in") #r0 Cost (log10 Productive cells in 12 hours) ---- Early.Time = 12 df1 = Fig4.Hours %>% filter(t == Early.Time, Pp == 1) %>% mutate(Slope = log10(Productive) / t) %>% group_by(Virus_D,Spread_Frac) %>% dplyr::summarise(Intact_r0 = mean(Slope)) df2 = Fig4.Hours %>% filter(t == Early.Time, Pp < 1) %>% mutate(Slope = pmax(log10(Productive) / t,0)) %>% dplyr::select(Pp,Virus_D,Spread_Frac,Sim,Slope) df3 = right_join(df1,df2) %>% mutate(Cost = (1 - (Slope / Intact_r0)) * 100) %>% group_by(Virus_D,Spread_Frac) %>% dplyr::summarise(Cost = mean(Cost)) Plot.Stock + geom_point(data = df3, aes(x = Virus_D, y = Cost, color = Spread_Frac, group = Spread_Frac)) + geom_smooth(data = df3, aes(x = Virus_D, y = Cost, group = Spread_Frac, color = Spread_Frac, fill = Spread_Frac), lty = 1212, alpha = 0.3) + labs(y = "% Reduction in Initial Growth Rate", x = TeX("\\textbf{Diffusion Coefficient ($\\log_1_0$ $\\mu$m^2/s)}")) ggsave('Figures/4B_r0_Reduction.pdf', width = 6, height = 5, unit = "in") #Time to 100 Cells ---- Target_Cells = 100 df1 = Fig4.Hours %>% filter(Pp == 1, Productive >= Target_Cells) %>% group_by(Spread_Frac,Virus_D,Sim) %>% dplyr::summarise(To_Target_Cells = min(t)) %>% ungroup() %>% group_by(Spread_Frac,Virus_D) %>% dplyr::summarise(Intact_To_Target_Cells = mean(To_Target_Cells)) df2 = Fig4.Hours %>% filter(Pp < 1, Productive >= Target_Cells) %>% group_by(Spread_Frac,Virus_D,Sim) %>% dplyr::summarise(To_Target_Cells = min(t)) %>% dplyr::select(Spread_Frac,Virus_D,Sim,To_Target_Cells) df3 = right_join(df1,df2) %>% mutate(Target_Cell_Cost = (To_Target_Cells - Intact_To_Target_Cells) / Intact_To_Target_Cells * 100) %>% group_by(Virus_D,Spread_Frac) %>% dplyr::summarise(Target_Cell_Cost = mean(Target_Cell_Cost)) Plot.Stock + geom_point(data = df3, aes(x = Virus_D, y = Target_Cell_Cost, color = Spread_Frac, group = Spread_Frac)) + geom_smooth(data = df3, aes(x = Virus_D, y = Target_Cell_Cost, group = Spread_Frac, color = Spread_Frac, fill = Spread_Frac), lty = 1212, alpha = 0.3) + scale_y_continuous(limits = c(0,600)) + labs(y = "% Increase in Time to Infect 100 Cells", x = TeX("\\textbf{Diffusion Coefficient ($\\log_1_0$ $\\mu$m^2/s)}")) ggsave('Figures/4C_Increase_in_Time_to_100_Cells.pdf', width = 6, height = 5, unit = "in") #Time to 1e5 Virions ---- Target_Virions = 1e5 df1 = Fig4.Hours %>% filter(Pp == 1, Virions >= Target_Virions) %>% group_by(Spread_Frac,Virus_D,Sim) %>% dplyr::summarise(To_Target_Virions = min(t)) %>% ungroup() %>% group_by(Spread_Frac,Virus_D) %>% dplyr::summarise(Intact_To_Target_Virions = mean(To_Target_Virions)) df2 = Fig4.Hours %>% filter(Pp < 1, Virions >= Target_Virions) %>% group_by(Spread_Frac,Virus_D,Sim) %>% dplyr::summarise(To_Target_Virions = min(t)) %>% dplyr::select(Spread_Frac,Virus_D,Sim,To_Target_Virions) df3 = right_join(df1,df2) %>% mutate(Target_Virion_Cost = (To_Target_Virions - Intact_To_Target_Virions) / Intact_To_Target_Virions * 100) %>% group_by(Virus_D,Spread_Frac) %>% dplyr::summarise(Target_Virion_Cost = mean(Target_Virion_Cost)) Plot.Stock + geom_point(data = df3, aes(x = Virus_D, y = Target_Virion_Cost, color = Spread_Frac, group = Spread_Frac)) + geom_smooth(data = df3, aes(x = Virus_D, y = Target_Virion_Cost, group = Spread_Frac, color = Spread_Frac, fill = Spread_Frac), lty = 1212, alpha = 0.3) + scale_y_continuous(limits = c(0,400)) + labs(y = TeX("\\textbf{% Increase in Time to Yield 10^5 Virions}"), x = TeX("\\textbf{Diffusion Coefficient ($\\log_1_0$ $\\mu$m^2/s)}")) ggsave('Figures/4D_Increase_in_Time_to_1e5_Virions.pdf', width = 6, height = 5, unit = "in") # Supplement ---- # Representative Time Course ---- ggplot() + geom_line(data = Fig4.Hours %>% filter(Pp == 1,Sim == 1,Spread_Frac == "25% Local Spread"), aes(x = t, y = Productive, color = Virus_D, group = Virus_D), lwd = 1.2) + scale_color_viridis(guide = FALSE) + scale_y_continuous(limits = c(0,10000),labels = comma) + scale_x_continuous(limits = c(0,96),breaks = c(0,24,48,72)) + labs(x = TeX("\\textbf{Time Post-Inoculation (Hours)}"), y = TeX("\\textbf{Productively Infected Cells}")) + theme(text=element_text(size = 13,face="bold"), strip.text.x=element_text(size=rel(1.5),margin=margin(0,0,3,0)), strip.text.y=element_text(size=rel(1.5),margin=margin(0,0,0,0),angle=0), strip.background = element_blank(), plot.title = element_text(size=rel(1.5)), axis.text.x=element_text(angle = 0,vjust=0.75,size=rel(1.5),color = "black"), axis.text.y=element_text(size=rel(1.5),color = "black"), axis.line.x = element_line(size=0.5), axis.line.y = element_line(size=0.5), axis.ticks.x = element_line(size=0.5), axis.ticks.y = element_line(size = 0.5), axis.title.y = element_text(size=rel(1.2),color = "black"), axis.title.x = element_text(size=rel(1.2)), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank()) ggsave('Figures/Supp2C_Time_Course_PP1_Local_Spread.pdf', width = 5, height = 5, unit = "in") ggplot() + geom_line(data = Fig4.Hours %>% filter(Pp == 0.575,Sim == 1,Spread_Frac == "25% Local Spread"), aes(x = t, y = Productive, color = Virus_D, group = Virus_D), lwd = 1.2) + scale_color_viridis(guide = FALSE) + scale_y_continuous(limits = c(0,10000),labels = comma) + scale_x_continuous(limits = c(0,96),breaks = c(0,24,48,72)) + labs(x = TeX("\\textbf{Time Post-Inoculation (Hours)}"), y = TeX("\\textbf{Productively Infected Cells}")) + theme(text=element_text(size = 13,face="bold"), strip.text.x=element_text(size=rel(1.5),margin=margin(0,0,3,0)), strip.text.y=element_text(size=rel(1.5),margin=margin(0,0,0,0),angle=0), strip.background = element_blank(), plot.title = element_text(size=rel(1.5)), axis.text.x=element_text(angle = 0,vjust=0.75,size=rel(1.5),color = "black"), axis.text.y=element_text(size=rel(1.5),color = "black"), axis.line.x = element_line(size=0.5), axis.line.y = element_line(size=0.5), axis.ticks.x = element_line(size=0.5), axis.ticks.y = element_line(size = 0.5), axis.title.y = element_text(size=rel(1.2),color = "black"), axis.title.x = element_text(size=rel(1.2)), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank()) ggsave('Figures/Supp2G_Time_Course_PP_058_Local_Spread.pdf', width = 6, height = 5, unit = "in") ggplot() + geom_line(data = Fig4.Hours %>% filter(Pp == 1,Sim == 1,Spread_Frac == "Diffusion Only"), aes(x = t, y = Productive, color = Virus_D, group = Virus_D), lwd = 1.2) + scale_color_viridis(guide = FALSE) + scale_y_continuous(limits = c(0,10000),labels = comma) + scale_x_continuous(limits = c(0,96),breaks = c(0,24,48,72)) + labs(x = TeX("\\textbf{Time Post-Inoculation (Hours)}"), y = TeX("\\textbf{Productively Infected Cells}")) + theme(text=element_text(size = 13,face="bold"), strip.text.x=element_text(size=rel(1.5),margin=margin(0,0,3,0)), strip.text.y=element_text(size=rel(1.5),margin=margin(0,0,0,0),angle=0), strip.background = element_blank(), plot.title = element_text(size=rel(1.5)), axis.text.x=element_text(angle = 0,vjust=0.75,size=rel(1.5),color = "black"), axis.text.y=element_text(size=rel(1.5),color = "black"), axis.line.x = element_line(size=0.5), axis.line.y = element_line(size=0.5), axis.ticks.x = element_line(size=0.5), axis.ticks.y = element_line(size = 0.5), axis.title.y = element_text(size=rel(1.2),color = "black"), axis.title.x = element_text(size=rel(1.2)), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank()) ggsave('Figures/Supp2A_Time_Course_PP1_No_Spread.pdf', width = 5, height = 5, unit = "in") ggplot() + geom_line(data = Fig4.Hours %>% filter(Pp == 0.575,Sim == 1,Spread_Frac == "Diffusion Only"), aes(x = t, y = Productive, color = Virus_D, group = Virus_D), lwd = 1.2) + scale_color_viridis(guide = FALSE) + scale_y_continuous(limits = c(0,10000),labels = comma) + scale_x_continuous(limits = c(0,96),breaks = c(0,24,48,72)) + labs(x = TeX("\\textbf{Time Post-Inoculation (Hours)}"), y = TeX("\\textbf{Productively Infected Cells}")) + theme(text=element_text(size = 13,face="bold"), strip.text.x=element_text(size=rel(1.5),margin=margin(0,0,3,0)), strip.text.y=element_text(size=rel(1.5),margin=margin(0,0,0,0),angle=0), strip.background = element_blank(), plot.title = element_text(size=rel(1.5)), axis.text.x=element_text(angle = 0,vjust=0.75,size=rel(1.5),color = "black"), axis.text.y=element_text(size=rel(1.5),color = "black"), axis.line.x = element_line(size=0.5), axis.line.y = element_line(size=0.5), axis.ticks.x = element_line(size=0.5), axis.ticks.y = element_line(size = 0.5), axis.title.y = element_text(size=rel(1.2),color = "black"), axis.title.x = element_text(size=rel(1.2)), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank()) ggsave('Figures/Supp2E_Time_Course_PP_058_No_Spread.pdf', width = 6, height = 5, unit = "in") # Virion Time Course ---- ggplot() + geom_line(data = Fig4.Hours %>% filter(Pp == 1,Sim == 1,Spread_Frac == "25% Local Spread"), aes(x = t, y = Virions %>% log10, color = Virus_D, group = Virus_D), lwd = 1.2) + scale_color_viridis(guide = FALSE) + scale_x_continuous(limits = c(0,96),breaks = c(0,24,48,72)) + labs(x = TeX("\\textbf{Time Post-Inoculation (Hours)}"), y = TeX("\\textbf{$\\log_1_0$ Virions}")) + theme(text=element_text(size = 13,face="bold"), strip.text.x=element_text(size=rel(1.5),margin=margin(0,0,3,0)), strip.text.y=element_text(size=rel(1.5),margin=margin(0,0,0,0),angle=0), strip.background = element_blank(), plot.title = element_text(size=rel(1.5)), axis.text.x=element_text(angle = 0,vjust=0.75,size=rel(1.5),color = "black"), axis.text.y=element_text(size=rel(1.5),color = "black"), axis.line.x = element_line(size=0.5), axis.line.y = element_line(size=0.5), axis.ticks.x = element_line(size=0.5), axis.ticks.y = element_line(size = 0.5), axis.title.y = element_text(size=rel(1.2),color = "black"), axis.title.x = element_text(size=rel(1.2)), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank()) ggsave('Figures/Supp2D_Virion_Time_Course_PP1_Local_Spread.pdf', width = 5, height = 5, unit = "in") ggplot() + geom_line(data = Fig4.Hours %>% filter(Pp == 0.575,Sim == 1,Spread_Frac == "25% Local Spread"), aes(x = t, y = Virions %>% log10, color = Virus_D, group = Virus_D), lwd = 1.2) + scale_color_viridis(guide = FALSE) + scale_x_continuous(limits = c(0,96),breaks = c(0,24,48,72)) + labs(x = TeX("\\textbf{Time Post-Inoculation (Hours)}"), y = TeX("\\textbf{$\\log_1_0$ Virions}")) + theme(text=element_text(size = 13,face="bold"), strip.text.x=element_text(size=rel(1.5),margin=margin(0,0,3,0)), strip.text.y=element_text(size=rel(1.5),margin=margin(0,0,0,0),angle=0), strip.background = element_blank(), plot.title = element_text(size=rel(1.5)), axis.text.x=element_text(angle = 0,vjust=0.75,size=rel(1.5),color = "black"), axis.text.y=element_text(size=rel(1.5),color = "black"), axis.line.x = element_line(size=0.5), axis.line.y = element_line(size=0.5), axis.ticks.x = element_line(size=0.5), axis.ticks.y = element_line(size = 0.5), axis.title.y = element_text(size=rel(1.2),color = "black"), axis.title.x = element_text(size=rel(1.2)), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank()) ggsave('Figures/Supp2H_Virion_Time_Course_PP_058_Local_Spread.pdf', width = 6, height = 5, unit = "in") ggplot() + geom_line(data = Fig4.Hours %>% filter(Pp == 1,Sim == 1,Spread_Frac == "Diffusion Only"), aes(x = t, y = Virions %>% log10, color = Virus_D, group = Virus_D), lwd = 1.2) + scale_color_viridis(guide = FALSE) + scale_x_continuous(limits = c(0,96),breaks = c(0,24,48,72)) + labs(x = TeX("\\textbf{Time Post-Inoculation (Hours)}"), y = TeX("\\textbf{$\\log_1_0$ Virions}")) + theme(text=element_text(size = 13,face="bold"), strip.text.x=element_text(size=rel(1.5),margin=margin(0,0,3,0)), strip.text.y=element_text(size=rel(1.5),margin=margin(0,0,0,0),angle=0), strip.background = element_blank(), plot.title = element_text(size=rel(1.5)), axis.text.x=element_text(angle = 0,vjust=0.75,size=rel(1.5),color = "black"), axis.text.y=element_text(size=rel(1.5),color = "black"), axis.line.x = element_line(size=0.5), axis.line.y = element_line(size=0.5), axis.ticks.x = element_line(size=0.5), axis.ticks.y = element_line(size = 0.5), axis.title.y = element_text(size=rel(1.2),color = "black"), axis.title.x = element_text(size=rel(1.2)), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank()) ggsave('Figures/Supp2B_Virion_Time_Course_PP1_No_Spread.pdf', width = 5, height = 5, unit = "in") ggplot() + geom_line(data = Fig4.Hours %>% filter(Pp == 0.575,Sim == 1,Spread_Frac == "Diffusion Only"), aes(x = t, y = Virions %>% log10, color = Virus_D, group = Virus_D), lwd = 1.2) + scale_color_viridis(guide = FALSE) + scale_x_continuous(limits = c(0,96),breaks = c(0,24,48,72)) + labs(x = TeX("\\textbf{Time Post-Inoculation (Hours)}"), y = TeX("\\textbf{$\\log_1_0$ Virions}")) + theme(text=element_text(size = 13,face="bold"), strip.text.x=element_text(size=rel(1.5),margin=margin(0,0,3,0)), strip.text.y=element_text(size=rel(1.5),margin=margin(0,0,0,0),angle=0), strip.background = element_blank(), plot.title = element_text(size=rel(1.5)), axis.text.x=element_text(angle = 0,vjust=0.75,size=rel(1.5),color = "black"), axis.text.y=element_text(size=rel(1.5),color = "black"), axis.line.x = element_line(size=0.5), axis.line.y = element_line(size=0.5), axis.ticks.x = element_line(size=0.5), axis.ticks.y = element_line(size = 0.5), axis.title.y = element_text(size=rel(1.2),color = "black"), axis.title.x = element_text(size=rel(1.2)), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank()) ggsave('Figures/Supp2F_Virion_Time_Course_PP_058_No_Spread.pdf', width = 6, height = 5, unit = "in") D_Legend = ggplot() + geom_line(data = Fig4.Hours %>% filter(Pp == 0.575,Sim == 1,Spread_Frac == "Diffusion Only"), aes(x = t, y = Virions %>% log10, color = Virus_D, group = Virus_D), lwd = 1.2) + scale_color_viridis() + scale_x_continuous(limits = c(0,96),breaks = c(0,24,48,72)) + labs(color = TeX("\\textbf{Diffusion Coefficient ($\\log_1_0$ $\\mu$m^2/s)}")) + theme(text=element_text(size = 13,face="bold"), strip.text.x=element_text(size=rel(1.5),margin=margin(0,0,3,0)), strip.text.y=element_text(size=rel(1.5),margin=margin(0,0,0,0),angle=0), strip.background = element_blank(), plot.title = element_text(size=rel(1.5)), axis.text.x=element_text(angle = 0,vjust=0.75,size=rel(1.5),color = "black"), axis.text.y=element_text(size=rel(1.5),color = "black"), axis.line.x = element_line(size=0.5), axis.line.y = element_line(size=0.5), axis.ticks.x = element_line(size=0.5), axis.ticks.y = element_line(size = 0.5), axis.title.y = element_text(size=rel(1.2),color = "black"), axis.title.x = element_text(size=rel(1.2)), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank()) plot(g_legend(D_Legend)) ggsave('Figures/Supp2_D_Legend.pdf', width = 4, height = 4, unit = "in") # Max # Semi-Infected Cells ---- df1 = Fig4.Peak %>% group_by(Virus_D,Spread_Frac) %>% dplyr::summarise(Max_Semi = mean(Max_Semi)) Plot.Stock + geom_point(data = df1, aes(x = Virus_D, y = Max_Semi, color = Spread_Frac)) + geom_smooth(data = df1, aes(x = Virus_D, y = Max_Semi, group = Spread_Frac, color = Spread_Frac, fill = Spread_Frac), lty = 1212, alpha = 0.3) + scale_y_continuous(limits = c(-100,3200), breaks = c(0,1000,2000,3000)) + labs(y = TeX("\\textbf{Peak # Cells with IVGs}"), x = TeX("\\textbf{Diffusion Coefficient ($\\log_1_0$ $\\mu$m^2/s)}")) ggsave('Figures/6B_Max_Semi.pdf', width = 6, height = 5, unit = "in")
72c25df60fe1fb7031a8b3670b78f97eeb918236
44692f59da65ef2a635b7c721a2f2e9a8ada38bd
/R/MS_SSM_plots.R
1d7ea2e78a780a0951ab95c2d3028b2fa319c8ea
[]
no_license
vtrijoulet/MS_SSM
d35a807a7305c8b2ea504ad9149fc2104e1050ae
672535011415f0e2fe1825b76b809501028f9642
refs/heads/master
2020-04-22T21:11:46.074173
2019-09-30T11:56:13
2019-09-30T11:56:13
170,665,801
4
0
null
null
null
null
UTF-8
R
false
false
15,513
r
MS_SSM_plots.R
years<-min(data$year1):data$lastyear Y1<-min(data$year1) sp.names<-attr(data,"sp_names") z.stat <- 1.96 name.folder<-paste(paste(sp.names,collapse ="_"),"_",Y1,"-",data$lastyear,"_rec",data$recruit_model, "_M",data$M_model,"_err-rec",data$process_rec,"_err-survi",data$process_survival, "_err-M",data$process_M,"_pred",data$predation_on,"_Type",data$functional_response+1, "_sizepref",data$gamma_pref_estim,"_consrate",data$cons_rate_estim,"_flagdiet",data$flag_nll_diet,sep="")[1] mypath<-getwd() new.path<-paste(mypath,"Results",name.folder,sep="/") dir.create(new.path,recursive=TRUE) setwd(new.path) #### Calculate CI #### names.rep.all <- names(sd.rep$value) names.rep <- unique(names.rep.all) se <- list() k=0 for (i in 1:length(names.rep)){ k=k+1 dim <- dim(rep[[names.rep[i]]]) idx <- as.vector(array(k:(k+prod(dim)-1),dim=dim)) se[[names.rep[i]]] <- array(sd.rep$sd[idx], dim=dim) k=k+prod(dim)-1 } # #### Calculate mean mortality rates and corresponding CI #### # # ## Function to estimate se for mean of adereported objects # require(numDeriv) # se.ADrep <- function (object, sp, FUN){ # phi <- function(x) FUN(x) # ix <- obj$env$ADreportIndex()[[object]][,,sp] # covx <- sd.rep$cov[ix, ix] # x <- rep[[object]][,,sp] # J <- jacobian(phi, x) ## Derivative of phi at x # covPhix <- J %*% covx %*% t(J) ## Covariance of phi(x) # return(sqrt(diag(covPhix))) # se around phi(x) # } # # ## F # mean.F<-array(dim=c(data$Y,data$sp)) # se.mean.F<-array(0,dim=c(data$Y,data$sp)) # mean.FAA<-array(dim=c(data$max_A,data$sp)) # se.mean.FAA<-array(0,dim=c(data$max_A,data$sp)) # for (i in 1:data$sp){ # mean.F[,i]<-apply(rep$F[,data$min_A_catch[i]:data$max_A_catch[i],i],1,mean) # se.mean.F[,i] <- se.ADrep("F",i,rowMeans) # mean.FAA[data$min_A_catch[i]:data$max_A_catch[i],i]<-apply(rep$F[,data$min_A_catch[i]:data$max_A_catch[i],i],2,mean) # se.mean.FAA[,i] <- se.ADrep("F",i,colMeans) # } # # ## M # mean.MAA<-array(dim=c(data$max_A,data$sp)) # se.mean.MAA<-array(0,dim=c(data$max_A,data$sp)) # for (i in 1:data$sp){ # mean.MAA[1:data$Aplus[i],i]<-apply(rep$MAA[,1:data$Aplus[i],i],2,mean) # se.mean.MAA[,i] <- se.ADrep("MAA",i,colMeans) # } # # ## P # if (data$predation_on==1){ # mean.PAA<-array(dim=c(data$max_A,data$sp)) # se.mean.PAA<-array(0,dim=c(data$max_A,data$sp)) # mean.PAA.y<-array(dim=c(data$Y,data$sp)) # se.mean.PAA.y<-array(dim=c(data$Y,data$sp)) # for (i in 1:data$sp){ # mean.PAA[,i]<-apply(rep$PAA[,,i],2,mean) # se.mean.PAA[,i]<-se.ADrep("PAA",i,colMeans) # mean.PAA.y[,i]<-apply(rep$PAA[,1:data$Aplus[i],i],1,mean) # se.mean.PAA.y[,i]<-se.ADrep("PAA",i,rowMeans) # } # } # # ## Z # mean.Z<-array(dim=c(data$max_A,data$sp)) # se.mean.Z<-array(dim=c(data$max_A,data$sp)) # mean.Z.y<-array(dim=c(data$Y,data$sp)) # se.mean.Z.y<-array(dim=c(data$Y,data$sp)) # for (i in 1:data$sp){ # mean.Z[,i]<-apply(rep$Z[,,i],2,mean) # se.mean.Z[,i]<-se.ADrep("Z",i,colMeans) # mean.Z.y[,i]<-apply(rep$Z[,1:data$Aplus[i],i],1,mean) # se.mean.Z.y[,i]<-se.ADrep("Z",i,rowMeans) # } #### Plot functions #### plot.time <- function(pred, obs=NULL, se=0, legend=0, xaxt="n", xlab="", ylab="", main="", ylim=c(min(na.omit(c((pred-z.stat*se),obs))),max(na.omit(c((pred+z.stat*se),obs)))) ){ plot(pred~years, ylim=ylim, ylab=ylab, type="l", main=main, xlab=xlab, xaxt=xaxt) if (!is.null(obs)) points(obs~years, pch=16, col="red") if (!is.null(se)) polygon(x=c(years,rev(years)), y=c(pred-z.stat*se,rev(pred+z.stat*se)), col=rgb(0,0,0,alpha=0.3), border = NA) if (legend==1 & i==1) legend("topleft",legend=c("Observed","Predicted"),pch=c(16,NA),lty=c(NA,1),col=c("red", "black"),bty="n", cex=0.8) } plot.age <- function(pred, age, se=0, legend=0, xaxt="n", xlab="", ylab="", main="", ylim=c(min(na.omit((pred-z.stat*se))),max(na.omit((pred+z.stat*se)))) ){ plot(y=pred, x=age, ylim=ylim, ylab=ylab, type="l", xaxt=xaxt, main=main, xlab=xlab, xlim=c(1, data$max_A)) if (!is.null(se)) polygon(x=c(age,rev(age)), y=c(pred-z.stat*se,rev(pred+z.stat*se)), col=rgb(0,0,0,alpha=0.3), border = NA) #if (legend==1 & i==1) legend("topright",legend=c("Observed","Predicted"),pch=c(16,NA),lty=c(NA,1),col=c("red", "black"),bty="n") } #### Start pdf output file #### pdf(file="Figures_fit.pdf") obs_aggr_Cw<-data$obs_aggr_Cw for (i in 1:data$sp) for (t in 1:data$Y) if (data$obs_aggr_Cw[t,i]==-999) obs_aggr_Cw[t,i]<-NA par(mfrow=c(data$sp,1), oma=c(3,4,1,1), mar=c(0,0,0,0),xpd=NA) for (i in 1:data$sp){ plot.time(pred=rep$aggr_Cw[,i], obs=obs_aggr_Cw[,i], se=se$aggr_Cw[,i], legend=1, ylab=paste0(sp.names[i]," total catch (tons)")) if(i==data$sp) axis(1) } obs_aggr_I<-data$obs_aggr_I for (j in 1:data$n_surv[i]) for (i in 1:data$sp) for (t in 1:data$Y) if (data$obs_aggr_I[t,i,j]==-999) obs_aggr_I[t,i,j]<-NA par(mfrow=c(data$sp,data$n_surv_max), oma=c(3,4,2,1), mar=c(0,1,0,1),xpd=NA) if (data$n_surv_max>4) {par(mfrow=c(data$sp,3), oma=c(3,4,2,1), mar=c(0,1,0,1),xpd=NA) } else {par(mfrow=c(data$sp,data$n_surv_max), oma=c(3,4,2,1), mar=c(0,1,0,1),xpd=NA)} for (i in 1:data$sp){ for (j in 1:data$n_surv[i]){ plot.time(pred=rep$aggr_I[,i,j], obs=obs_aggr_I[,i,j], se=se$aggr_I[,i,j], legend=1) if (i==1) mtext(paste0("Survey ",j),side=3, line=0.5, cex=0.7) if(i==data$sp) axis(1) if(j==1) mtext(paste0(sp.names[i]," total abundance indices"), side=2, line=3, cex=0.6) } } for (j in 1:data$sp){ if (length(data$min_A_catch[j]:data$max_A_catch[j])>4) par(mfrow=c(ceiling(length(data$min_A_catch[j]:data$max_A_catch[j])/3),3), oma=c(2,2,1,1), mar=c(2,2,2,2),xpd=NA) else par(mfrow=c(ceiling(length(data$min_A_catch[j]:data$max_A_catch[j])/2),2), oma=c(2,2,1,1), mar=c(2,2,2,2),xpd=NA) for (i in data$min_A_catch[j]:data$max_A_catch[j]){ plot.time(pred=rep$prop_Caa[,i,j], obs=data$obs_prop_Caa[,i,j], xaxt="s", legend=1, main=paste0("Age ",i), ylab=paste(sp.names[j],"age comp catch",sep=" ")) } #for (n in 1:(12-length(data$min_A_catch[j]:data$max_A_catch[j]))) #plot.new() } #par(mfrow=c(4,3), oma=c(2,2,1,1), mar=c(2,2,2,2),xpd=NA) for (j in 1:data$sp){ for (k in 1:data$n_surv[j]){ if (length(data$min_A_surv[j,k]:data$max_A_surv[j,k])>4) par(mfrow=c(ceiling(length(data$min_A_surv[j,k]:data$max_A_surv[j,k])/3),3), oma=c(2,2,1,1), mar=c(2,2,2,2),xpd=NA) else par(mfrow=c(ceiling(length(data$min_A_surv[j,k]:data$max_A_surv[j,k])/2),2), oma=c(2,2,1,1), mar=c(2,2,2,2),xpd=NA) if (length(data$min_A_surv[j,k]:data$max_A_surv[j,k])==1) par(mfrow=c(1,1), oma=c(2,2,1,1), mar=c(2,2,2,2),xpd=NA) for (i in data$min_A_surv[j,k]:data$max_A_surv[j,k]){ plot.time(pred=rep$prop_Iaa[,i,j,k], obs=data$obs_prop_Iaa[,i,j,k], xaxt="s", legend=1, main=paste0("Age ",i), ylab=paste(sp.names[j]," age comp survey",k,sep=" ")) } #for (n in 1:(12-length(data$min_A_surv[j,k]:data$max_A_surv[j,k]))) #plot.new() } } # Plot SSB par(mfrow=c(data$sp,1), oma=c(3,4,1,1), mar=c(0,0,0,0),xpd=NA) for (i in 1:data$sp){ plot.time(pred=rep$SSB[,i], se=se$SSB[,i], ylab=paste0(sp.names[i], " SSB")) if(i==data$sp) axis(1) } #plot recruits #par(mfrow=c(data$sp,1), oma=c(3,4,1,1), mar=c(0,0,0,0),xpd=NA) for (i in 1:data$sp){ plot.time(pred=rep$NAA[,1,i], se=se$recruits[,i], ylab=paste0(sp.names[i], " recruitment")) if(i==data$sp) axis(1) } par(mfrow=c(data$sp,1), oma=c(2,2,1,1), mar=c(2,2,2,2),xpd=NA) for (i in 1:data$sp){ plot(rep$NAA[2:data$Y,1,i]~rep$SSB[1:(data$Y-1),i], type="l", xlab=paste0(sp.names[i]," SSB"), ylab=paste0(sp.names[i]," recruitment")) text(y=rep$NAA[2:data$Y,1,i], x=rep$SSB[1:(data$Y-1),i], label=years[-data$Y], col="red") } # Plot s_surv par(mfrow=c(data$sp,data$n_surv_max), oma=c(3,4,2,1), mar=c(0,1,0,1),xpd=NA) for (i in 1:data$sp){ for (j in 1:data$n_surv[i]){ plot.age(pred=rep$s_surv[data$min_A_surv[i,j]:data$max_A_surv[i,j],i,j], age=data$min_A_surv[i,j]:data$max_A_surv[i,j]) if (i==1) mtext(paste0("Survey ",j),side=3, line=0.5, cex=0.7) if (j==1) mtext(paste0("Survey selectivity on ", sp.names[i]), side=2, line=3, cex=0.6) if (i==data$sp) axis(1) } } # Plot s_F par(mfrow=c(data$sp,1), oma=c(3,4,1,1), mar=c(0,0,0,0),xpd=NA) for (i in 1:data$sp){ plot.age(pred=rep$s_F[data$min_A_catch[i]:data$max_A_catch[i],i], age=data$min_A_catch[i]:data$max_A_catch[i], ylab=paste0("Fishing selectivity on ", sp.names[i])) if (i==data$sp) axis(1) } # Plot F #par(mfrow=c(data$sp,1)) for (i in 1:data$sp){ plot.time(pred=rep$mean_Fy[,i], se=se$mean_Fy[,i], ylab=paste0(sp.names[i], " mean fishing mortality"), legend=0) if (i==data$sp) axis(1) } #par(mfrow=c(data$sp,1)) for (i in 1:data$sp){ plot.age(pred=rep$mean_FAA[data$min_Fbar_idx[i]:data$max_Fbar_idx[i],i], se=se$mean_FAA[data$min_Fbar_idx[i]:data$max_Fbar_idx[i],i], age=data$min_Fbar_idx[i]:data$max_Fbar_idx[i], ylab=paste0("Mean fishing mortality")) if (i==data$sp) axis(1) } #par(mfrow=c(data$sp,1)) for (i in 1:data$sp) { plot.age(pred=rep$F[1,1:data$Aplus[i],i], age=1:data$Aplus[i], ylab="Fishing mortality in all years", ylim=c(0,max(rep$F[,,i]))) for(t in 2:data$Y){ lines(rep$F[t,1:data$Aplus[i],i],col=t) } if (i==data$sp) axis(1) } #par(mfrow=c(data$sp,1)) for (i in 1:data$sp) { plot.time(pred=rep$F[,1,i], ylim=c(min(rep$F[,,i]),max(rep$F[,,i])), ylab=paste0("Fishing mortality at all ages on ", sp.names[i])) for(a in 2:data$Aplus[i]){ lines(rep$F[,a,i]~years,col=a) } if (i==1) legend("topleft", lty=1, col=1:data$max_A, legend=1:data$max_A, bty="n", cex=0.7, ncol=2) if (i==data$sp) axis(1) } #plot M #par(mfrow=c(data$sp,1)) for (i in 1:data$sp) { plot.age(pred=rep$MAA[1,1:data$Aplus[i],i], age=1:data$Aplus[i], ylab="Natural mortality in all years", ylim=c(min(rep$MAA),max(rep$MAA))) for(t in 2:data$Y){ lines(rep$MAA[t,1:data$Aplus[i],i],col=t) } if (i==data$sp) axis(1) } #par(mfrow=c(data$sp,1)) for (i in 1:data$sp){ plot.time(pred=rep$mean_My[,i], se=se$mean_My[,i], ylab=paste0(sp.names[i], " mean natural mortality"), legend=0) if (i==data$sp) axis(1) } #par(mfrow=c(data$sp,1)) for (i in 1:data$sp){ plot.age(pred=rep$mean_MAA[data$min_Fbar_idx[i]:data$max_Fbar_idx[i],i], se=se$mean_MAA[data$min_Fbar_idx[i]:data$max_Fbar_idx[i],i], age=data$min_Fbar_idx[i]:data$max_Fbar_idx[i], ylab="Mean natural mortality") } #plot PAA #par(mfrow=c(data$sp,1)) if (data$predation_on==1){ for (i in 1:data$sp){ plot.age(pred=rep$mean_PAA[data$min_Fbar_idx[i]:data$max_Fbar_idx[i],i], se=se$mean_PAA[data$min_Fbar_idx[i]:data$max_Fbar_idx[i],i], age=data$min_Fbar_idx[i]:data$max_Fbar_idx[i], ylab="Mean predation mortality") if (i==data$sp) axis(1) } for (i in 1:data$sp){ plot.time(pred=rep$mean_Py[,i], se=se$mean_Py[,i], ylab=paste0("Mean predation mortality on ", sp.names[i])) if (i==data$sp) axis(1) } #par(mfrow=c(data$sp,1)) for (i in 1:data$sp) { plot.age(pred=rep$PAA[1,1:data$Aplus[i],i], age=1:data$Aplus[i], ylab="Predation mortality in all years", ylim=c(0,max(rep$PAA[,,i]))) for(t in 2:data$Y){ lines(rep$PAA[t,1:data$Aplus[i],i],col=t) } if (i==data$sp) axis(1) } #par(mfrow=c(data$sp,1)) for (i in 1:data$sp) { plot.time(pred=rep$PAA[,1,i], ylim=c(min(rep$PAA[,,i]),max(rep$PAA[,,i])), ylab=paste0("Predation mortality at all ages on ", sp.names[i])) for(a in 2:data$Aplus[i]){ lines(rep$PAA[,a,i]~years,col=a) } if (i==1) legend("topleft", lty=1, col=1:data$max_A, legend=1:data$max_A, bty="n", cex=0.7, ncol=2) if (i==data$sp) axis(1) } } #plot Z #par(mfrow=c(data$sp,1)) for (i in 1:data$sp){ plot.age(pred=rep$mean_ZAA[data$min_Fbar_idx[i]:data$max_Fbar_idx[i],i], se=se$mean_ZAA[data$min_Fbar_idx[i]:data$max_Fbar_idx[i],i], age=data$min_Fbar_idx[i]:data$max_Fbar_idx[i], ylab="Mean total mortality") if (i==data$sp) axis(1) } for (i in 1:data$sp){ plot.time(pred=rep$mean_Zy[,i], se=se$mean_Zy[,i], ylab=paste0("Mean total mortality on ", sp.names[i])) if (i==data$sp) axis(1) } #par(mfrow=c(data$sp,1)) for (i in 1:data$sp) { plot.age(pred=rep$Z[1,1:data$Aplus[i],i], age=1:data$Aplus[i], ylab="Total mortality in all years", ylim=c(min(rep$Z),max(rep$Z))) for(t in 2:data$Y){ lines(rep$Z[t,1:data$Aplus[i],i],col=t) } if (i==data$sp) axis(1) } #Area plot mortalities col<-c("deepskyblue3","midnightblue","darkolivegreen3") if (data$predation_on==0) { rep$mean_PAA=rep$mean_MAA rep$mean_PAA[]=0 } M=rep$mean_PAA+rep$mean_MAA #par(mfrow=c(data$sp,1)) for (i in 1:data$sp){ age=(data$min_Fbar_idx[i]:data$max_Fbar_idx[i]) plot(rep$mean_ZAA[data$min_Fbar_idx[i]:data$max_Fbar_idx[i],i]~age,xaxt="n", type="n", xlab="",ylab=paste0("Total mortality on ", sp.names[i]),ylim=c(0,max(rep$mean_ZAA[data$min_Fbar_idx[i]:data$max_Fbar_idx[i],i]))) polygon(x=c(data$min_Fbar_idx[i]:data$max_Fbar_idx[i],rev(data$min_Fbar_idx[i]:data$max_Fbar_idx[i])), c(rep$mean_PAA[data$min_Fbar_idx[i]:data$max_Fbar_idx[i],i], rev(rep(0,length(data$min_Fbar_idx[i]:data$max_Fbar_idx[i])))),col=col[1],border=NA) polygon(x=c(data$min_Fbar_idx[i]:data$max_Fbar_idx[i],rev(data$min_Fbar_idx[i]:data$max_Fbar_idx[i])), c(M[data$min_Fbar_idx[i]:data$max_Fbar_idx[i],i], rev(rep$mean_PAA[data$min_Fbar_idx[i]:data$max_Fbar_idx[i],i])),col=col[2],border=NA) polygon(x=c(data$min_Fbar_idx[i]:data$max_Fbar_idx[i],rev(data$min_Fbar_idx[i]:data$max_Fbar_idx[i])), c(rep$mean_ZAA[data$min_Fbar_idx[i]:data$max_Fbar_idx[i],i], rev(M[data$min_Fbar_idx[i]:data$max_Fbar_idx[i],i])),col=col[3],border=NA) if (i==1) if (data$predation==1) legend("topleft", legend=rev(c("P", "M", "F")), fill=rev(col), border=NA, bty="n") else legend("topleft", legend=rev(c("M", "F")), fill=rev(col), border=NA, bty="n") if (i==data$sp) axis(1) } if (data$predation_on==0) { rep$mean_Py=rep$mean_My rep$mean_Py[]=0 } My=rep$mean_Py+rep$mean_My #par(mfrow=c(data$sp,1)) for (i in 1:data$sp){ plot(rep$mean_Zy[,i]~years,xaxt="n", type="n", xlab="",ylab=paste0("Total mortality on ", sp.names[i]),ylim=c(0,max(rep$mean_Zy[,i]))) polygon(x=c(years,rev(years)), c(rep$mean_Py[,i], rev(rep(0,data$Y))),col=col[1],border=NA) polygon(x=c(years,rev(years)), c(My[,i], rev(rep$mean_Py[,i])),col=col[2],border=NA) polygon(x=c(years,rev(years)), c(rep$mean_Zy[,i], rev(My[,i])),col=col[3],border=NA) if (i==1) if (data$predation==1) legend("topleft", legend=rev(c("P", "M", "F")), fill=rev(col), border=NA, bty="n") else legend("topleft", legend=rev(c("M", "F")), fill=rev(col), border=NA, bty="n") if (i==data$sp) axis(1) } # # Plot cons_rate # if (data$cons_rate_estim==1){ # par(mfrow=c(data$sp,data$n_pred)) # for (j in 1:data$n_pred) # for (i in 1:data$sp){ # plot(rep$cons_rate[i,,j],main=sp.names[i],ylab="Consumption rate (kg/predator/year)",xlab=paste("Age",sp.names[j], "predator",sep=" "),type="l") # } # } dev.off() ## end of pdf file #### Save estimates ##### # outputs<-cbind(sd.rep$value,sd.rep$sd) # colnames(outputs)<-c("Estimate","Std.Error") # fixed.param<-cbind(sd.rep$par.fixed) # #random.param<-cbind(sd.rep$par.random) # write.csv(outputs,"sd_rep.csv") # write.csv(fixed.param,"fixed_par.csv") # write.csv(rep,"rep.csv") # sink("outtext.txt") # lapply(rep, print) # sink() save(data,file="data.RData") save(init,file="init.RData") save(rep,file="rep.RData") save(sd.rep,file="sd.rep.RData") save(map,file="map.RData") save(opt,file="opt.RData") #save.image("MS_SSM_workspace.RData")
1eb1e4be825f17e3a1aa009b6f5ca5bd7c2c4dc6
787a278be967f58fced754b31722033c127b600e
/6_three_sp_consortia.R
adada694efe21852f2bcf22a05cd51ff525b75bb
[ "MIT" ]
permissive
djbajic/structure-function-bacilli
17cd654ba4c8409a244b0bd140fcd10a8cddb701
16769943b58d20926c9cb0e8486126847cb5b4fa
refs/heads/master
2020-09-01T01:20:52.291192
2019-11-11T03:32:31
2019-11-11T03:32:31
218,840,107
0
1
null
null
null
null
UTF-8
R
false
false
891
r
6_three_sp_consortia.R
source('functions.R') map <- fread('data/stepwise_map_PA.csv') strfun <- fread('data/structure_function_PA.csv') strains <- c( 'C', 'E', 'M', 'P', 'S', 'T') # .. epistasis in pairs as a function of single background strains (3-body) d <- map[bg %in% strains | is.na(bg)] d[, bg := ifelse(is.na(bg), '-', bg)] d[, bg := ordered(bg, c('-', sort(c( 'C', 'E', 'M', 'P', 'S', 'T'))))] # .. strength of interactions in 3-body k <- cbind(do.call(rbind, strsplit(d$pair, '-')), as.character(d$bg)) k <- t(apply(k, 1, sort)) k <- apply(k, 1, paste, collapse = '-') k <- strfun[match(k, V2)] d <- cbind(d, k[, c('Vj.1.V2', 'Vj.1.V2.se', 'Vj.2.V2', 'Vj.2.V2.se', 'Vj.real.V2', 'se.real.V2')]) d[, eps.2 := (Vj.2.V2 - Vj.1.V2)] d[, eps.2.se := sqrt(Vj.2.V2.se^2 + Vj.1.V2.se^2)] write.csv(d, file='data/three_sp_consortia.csv', row.names=FALSE)
f227191a4c89510c8e1e01db1c646ab0017f957d
4c44593583577dc94b74f6102b23e90b11d62de9
/BIEN Scripts/ext.clade.sister.r
81155a417c6b98053ad2c0e7c0e5131f91867bd8
[]
no_license
redakc/kerkhofflab
7334bfd89de5877b37e52bc9b07a321b45156b5f
425ed6100137d48c5cbc07db75af4ca2cc8df52c
refs/heads/master
2020-04-05T22:49:01.500457
2017-03-27T18:45:49
2017-03-27T18:45:49
52,403,262
0
0
null
2017-03-27T18:45:50
2016-02-24T00:58:13
R
UTF-8
R
false
false
412
r
ext.clade.sister.r
#Function to extract clade that includes all desired tips plus the #clade sister to the mrca of those tips ext.clade.sister=function(phy, tips){ require(ape) require(geiger) require(phytools) mrca.focal=getMRCA(phy, tips) mrca.sister=getSisters(phy, mrca.focal) mrca.both=getMRCA(phy, c(tips(phy,mrca.focal), tips(phy, mrca.sister))) ext.clade=extract.clade(phy, mrca.both) return(ext.clade) }
70231340a9a4d134fd2b7bdee4988c315aba9105
328eaeda3c51826f13d93859ce1b58e298e34f08
/Scripts/Data Organization.R
ba42075c733b9b1cc29e08d065ceb390d24c3c76
[]
no_license
s-gibson/ASA-NFL-book
237ce5ad62f9dba1774bc8e8836d81de8a55b914
d0519c816d93aa7ec4460ce4be3207692f6af8e6
refs/heads/master
2021-01-25T09:38:01.826688
2017-08-30T12:13:36
2017-08-30T12:13:36
93,863,763
8
0
null
null
null
null
UTF-8
R
false
false
9,750
r
Data Organization.R
######################## ## ASA NFL DFS book ## ## Data Oganization ## ## Stewart Gibson ## ## 6/10/17 ## ######################## ## Import data of weekly scores, O/U's OU.2016 <- read.csv("data/2016_DonBest_VegasData_NFL_Week.csv") OU.2012_2015 <- read.csv("data/2012_2015_Final_DonbestData_NFL.csv") ## Keep only regular season games OU.2016 <- OU.2016[which(OU.2016$Regular.Season == 1),] # Create variables for year and week OU.2016$Year <- 2016 OU.2012_2015$Year <- substr(OU.2012_2015$Date, start = 7, stop = 10) OU.2016$Week <- NA OU.2012_2015$Week <- NA OU.2016$Information <- as.character(OU.2016$Information) OU.2012_2015$Information <- as.character(OU.2012_2015$Information) for (i in c(1:17)) { OU.2016$Week[grep(paste('WEEK ', i, " ", sep = ''), OU.2016$Information)] <- i OU.2012_2015$Week[grep(paste('WEEK ', i, " ", sep = ''), OU.2012_2015$Information)] <- i } rm(i) # Create data frame of only 2015 data OU.2015 <- OU.2012_2015[which(OU.2012_2015$Year == 2015),] # Remove Preseaston/Regular Season/Postseason columns from OU.2016 OU.2016 <- OU.2016[,c(1:11,15:17)] # Import players' weekly fantasy points data Fantasy.2016 <- read.csv("data/2016_NFL_Fantasy_Points.csv") Fantasy.2016$First.Name <- as.character(Fantasy.2016$First.Name) Fantasy.2016$Last.Name <- as.character(Fantasy.2016$Last.Name) # Remove [space] in front of players' first and last names Fantasy.2016$First.Name <- gsub(" ", "", Fantasy.2016$First.Name) Fantasy.2016$Last.Name <- gsub(" ", "", Fantasy.2016$Last.Name) # Change class of Team Name (Home, Away) columns (OU.2015, OU.2016) to "character" OU.2015$Home.Team <- as.character(OU.2015$Home.Team) OU.2015$Away.Team <- as.character(OU.2015$Away.Team) OU.2016$Home.Team <- as.character(OU.2016$Home.Team) OU.2016$Away.Team <- as.character(OU.2016$Away.Team) # Manually fill in missing Home Teams from OU.2016 OU.2016$Home.Team[which(is.na(OU.2016$Home.Team))] <- c("Atlanta Falcons", "Arizona Cardinals", "Arizona Cardinals", "Atlanta Falcons", "Arizona Cardinals", "Arizona Cardinals", "Atlanta Falcons", "Arizona Cardinals", "Atlanta Falcons", "Arizona Cardinals", "Atlanta Falcons", "Atlanta Falcons", "Arizona Cardinals", "Arizona Cardinals", "Atlanta Falcons", "Atlanta Falcons") # Replace elongated teams names in OU.2015_2016 with abbreviations (i.e. "Arizona Cardinals" -> # "ari") full.names <- unique(OU.2016$Home.Team)[order(unique(OU.2016$Home.Team))][c(1:19, 21, 20, 22:27, 29, 28, 30:32)] for (i in c(1:32)) { OU.2016$Home.Team[which(OU.2016$Home.Team == full.names[i])] <- as.character(levels(Fantasy.2016$Team)[i]) OU.2016$Away.Team[which(OU.2016$Away.Team == full.names[i])] <- as.character(levels(Fantasy.2016$Team)[i]) OU.2015$Home.Team[which(OU.2015$Home.Team == full.names[i])] <- as.character(levels(Fantasy.2016$Team)[i]) OU.2015$Away.Team[which(OU.2015$Away.Team == full.names[i])] <- as.character(levels(Fantasy.2016$Team)[i]) } rm(i, full.names) ## NOTE: Still need to do some work on "St. Louis Rams". Wait to see if 2015 player fantasy data ## notes Rams' players as "stl" or "lar". ## Change Christine Michael's information in weeks 1-10 to reflect SEA games. Substitute ## Russell Wilson's Team, H/A, Oppt info Fantasy.2016$Team[which(Fantasy.2016$First.Name == "Christine" & Fantasy.2016$Week <= 10)] <- 'sea' Fantasy.2016$h.a[which(Fantasy.2016$First.Name == "Christine" & Fantasy.2016$Week <= 10)] <- Fantasy.2016$h.a[which(Fantasy.2016$First.Name == "Russell" & Fantasy.2016$Last.Name == "Wilson" & Fantasy.2016$Week <= 10)] Fantasy.2016$Oppt[which(Fantasy.2016$First.Name == "Christine" & Fantasy.2016$Week <= 10)] <- Fantasy.2016$Oppt[which(Fantasy.2016$First.Name == "Russell" & Fantasy.2016$Last.Name == "Wilson" & Fantasy.2016$Week <= 10)] ## Change Josh Huff's information in weeks 1-8 to reflect PHI games. Substitute Carson Wentz' ## Team, H/A, Oppt info Fantasy.2016$Team[which(Fantasy.2016$First.Name == "Josh" & Fantasy.2016$Last.Name == "Huff" & Fantasy.2016$Week <= 8)] <- 'phi' Fantasy.2016$h.a[which(Fantasy.2016$First.Name == "Josh" & Fantasy.2016$Last.Name == "Huff" & Fantasy.2016$Week <= 8)] <- Fantasy.2016$h.a[which(Fantasy.2016$First.Name == "Carson" & Fantasy.2016$Last.Name == "Wentz" & Fantasy.2016$Week <= 8)] Fantasy.2016$Oppt[which(Fantasy.2016$First.Name == "Josh" & Fantasy.2016$Last.Name == "Huff" & Fantasy.2016$Week <= 8)] <- Fantasy.2016$Oppt[which(Fantasy.2016$First.Name == "Carson" & Fantasy.2016$Last.Name == "Wentz" & Fantasy.2016$Week <= 8)] ## Change Griff Whalen's team to 'sdg' Fantasy.2016$Team[which(Fantasy.2016$Last.Name == 'Whalen')] <- 'sdg' ## Change Ronnie Hillman's information in weeks 1-11 to reflect MIN games. Substitute with Matt ## Asiata's Team, H/A, Oppt information. Fantasy.2016$Team[which(Fantasy.2016$First.Name == "Ronnie" & Fantasy.2016$Last.Name == "Hillman" & Fantasy.2016$Week <= 11)] <- 'min' Fantasy.2016$h.a[which(Fantasy.2016$First.Name == "Ronnie" & Fantasy.2016$Last.Name == "Hillman" & Fantasy.2016$Week <= 11)] <- Fantasy.2016$h.a[which(Fantasy.2016$First.Name == "Matt" & Fantasy.2016$Last.Name == "Asiata" & Fantasy.2016$Week <= 11 & Fantasy.2016$Week >= 7)] Fantasy.2016$Oppt[which(Fantasy.2016$First.Name == "Ronnie" & Fantasy.2016$Last.Name == "Hillman" & Fantasy.2016$Week <= 11)] <- Fantasy.2016$Oppt[which(Fantasy.2016$First.Name == "Matt" & Fantasy.2016$Last.Name == "Asiata" & Fantasy.2016$Week <= 11 & Fantasy.2016$Week >= 7)] ## Remove John Phillips - Wk 5 Fantasy.2016 <- Fantasy.2016[-1958,] ## REMOVE THIS CODE ONCE MASON PROVIDES REST OF WEEK 14 SCORES ## Remove Wk 14 Fantasy information Fantasy.2016 <- Fantasy.2016[which(Fantasy.2016$Week != 14),] ## Add columns to Fantasy.2016 that show players' teams' actual points and actual spread Fantasy.2016$Actual.Points <- NA Fantasy.2016$Actual.Spread <- NA Fantasy.2016$Team <- as.character(Fantasy.2016$Team) for (i in c(1:nrow(Fantasy.2016))) { if (Fantasy.2016$h.a[i] == 'h') { Fantasy.2016$Actual.Points[i] <- OU.2016$Home.Team.Score[which( OU.2016$Home.Team == Fantasy.2016$Team[i] & OU.2016$Week == Fantasy.2016$Week[i])] Fantasy.2016$Actual.Spread[i] <- OU.2016$Actual.Spread..Relative.to.Home.Team.[which( OU.2016$Home.Team == Fantasy.2016$Team[i] & OU.2016$Week == Fantasy.2016$Week[i])] } else { Fantasy.2016$Actual.Points[i] <- OU.2016$Away.Team.Score[which( OU.2016$Away.Team == Fantasy.2016$Team[i] & OU.2016$Week == Fantasy.2016$Week[i])] Fantasy.2016$Actual.Spread[i] <- -1*OU.2016$Actual.Spread..Relative.to.Home.Team.[which( OU.2016$Away.Team == Fantasy.2016$Team[i] & OU.2016$Week == Fantasy.2016$Week[i])] } } rm(i) ## Create columns that store players' names as their full name (First Last) and as their first ## initial + last name Fantasy.2016$First.Last <- paste(Fantasy.2016$First.Name, Fantasy.2016$Last.Name) Fantasy.2016$Initial.Last <- paste(substr(Fantasy.2016$First.Last, start = 1, stop = 1), ". ", Fantasy.2016$Last.Name, sep = "") ## Import and clean injury data Injuries.2016 <- read.csv("data/merged_injuries_2016.csv") ## Convert Team column to character, change names of teams to match team-naming scheme of other ## datasets. Injuries.2016$Team <- as.character(Injuries.2016$Team) Injuries.2016$Team[which(Injuries.2016$Team == "clt")] <- "ind" Injuries.2016$Team[which(Injuries.2016$Team == "crd")] <- "ari" Injuries.2016$Team[which(Injuries.2016$Team == "htx")] <- "hou" Injuries.2016$Team[which(Injuries.2016$Team == "jax")] <- "jac" Injuries.2016$Team[which(Injuries.2016$Team == "oti")] <- "ten" Injuries.2016$Team[which(Injuries.2016$Team == "rai")] <- "oak" Injuries.2016$Team[which(Injuries.2016$Team == "ram")] <- "lar" Injuries.2016$Team[which(Injuries.2016$Team == "rav")] <- "bal" ## Add a column for the average fantasy points scored by each player who was injured in 2016 Injuries.2016$Avg.FP <- NA for (i in 1:nrow(Injuries.2016)) { Injuries.2016$Avg.FP[i] <- max(0,mean(Fantasy.2016$DK.points[which( Fantasy.2016$First.Last == Injuries.2016$Player[i] )], na.rm = T)) } ## Establish a threshold of average fantasy points required for a player to be a significant injury. ## Only keep players whose average fantasy point total is above that threshold. Inj.threshold <- 10 Injuries.2016 <- Injuries.2016[which(Injuries.2016$Avg.FP >= Inj.threshold),] ## Save data environment for future use rm(i, Inj.threshold) save.image("data/clean_data.RData")
0795b5816f7caf177dcfbcd449aa34691e8a0d7d
ba65e258d288b8ccd8011313e6b4c0522f802e93
/man/logtransform.Rd
9ce618e836a47e75ef8efbbc511e8c9feea0d7c2
[]
no_license
cran/doebioresearch
3572acba107f48228ea6db867d9c8639e29829ce
fd141323443a017ae973f373c9be3e9c7f25d0ce
refs/heads/master
2022-11-18T14:03:19.248583
2020-07-08T11:20:03
2020-07-08T11:20:03
278,226,971
0
0
null
null
null
null
UTF-8
R
false
true
933
rd
logtransform.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/logtransform.R \name{logtransform} \alias{logtransform} \title{Log transformation of the numeric vector} \usage{ logtransform(numeric.vector) } \arguments{ \item{numeric.vector}{data vector to be transformed} } \value{ A list of \itemize{ \item \code{Ratio}- A ratio of maximum and minimum values of the data \item \code{LogTransformedVector} - A vector of the transformed data \item \code{Comment} - A comment about zero being present in data or not } } \description{ The function carries out log with base 10 transformation of each values of vector. If one of values of a vector is 0, 1 is added to each observation. Log transformation is carried out for the data when variance is proportional to square of the mean and treatment effects are multiplicative in nature. } \examples{ vector<-c(100,0,120,1000,52,30,60) logtransform(vector) }
c32f5f2865dd05ab590cc974b0569f488864fc95
e2d06410af5af94eff00bffd230fb5c689daaba1
/R/users.R
8fcfd3383b75aac9712bcc34ff24564c7d56d5a3
[]
no_license
MarkEdmondson1234/gtmR
b99855895768c2b7dd3eee33881a89adcdc20770
644684285808ecf02a59fa31830802d4b1435d28
refs/heads/master
2021-01-10T16:22:00.092511
2020-12-05T21:32:24
2020-12-05T21:32:24
55,138,326
6
3
null
2020-12-05T21:32:25
2016-03-31T09:37:49
R
UTF-8
R
false
false
717
r
users.R
#' GTM users list #' #' Downloads all users that have access to the account #' #' @param accountId Add your GTM account ID #' @export #' gtm_list_users <- function(accountId){ acc_url <- "https://www.googleapis.com/tagmanager/v2/accounts" user_url <- paste(acc_url, "/",accountId,"/user_permissions", sep = "") f_perm <- gar_api_generator(user_url, "GET") permissions_list <- f_perm() permissions_list$content$userAccess$accountAccess$permission <- unlist(permissions_list$content$userAccess$accountAccess$permission) permissions_list$content$userAccess$accountAccess <- permissions_list$content$userAccess$accountAccess$permission as.data.frame(permissions_list$content) }
892db8af0dcfb43f65106aa161b1d11a37e6902f
e8c17cc82dd543dc61d633aacd1fde248868db99
/scripts/R/session2/simple_database_queries.R
2bd84bcc8d049f679a41766d0ba9defb9c10e956
[]
no_license
H4estu/COVID19-Data-Exploration
7d19eb30229b71c6118117769fb7ce1909f985c2
f549e629fe15f61ac0c4e83406af4ed860604f31
refs/heads/master
2022-03-08T03:42:53.456581
2022-02-23T19:06:46
2022-02-23T19:06:46
250,889,937
0
2
null
2020-06-01T22:21:42
2020-03-28T20:44:37
HTML
UTF-8
R
false
false
913
r
simple_database_queries.R
library(data.table) library(sf) library(RPostgreSQL) library(magrittr) # -------- Access the COVID-19 Database --------- # source(file.path(git.path,'Code/config_files/db_config.R')) con <- db_connect.fn() # ----------------------------------------------- # report_data <- dbGetQuery(con, 'SELECT * FROM covid_data.dummy_table') %>% data.table select_example <- dbGetQuery(con, "SELECT * FROM covid_data.dummy_table WHERE deaths > 100") %>% data.table update_example <- dbGetQuery(con, "UPDATE covid_data.dummy_table SET combined_key = 'This is an update', recovered=1 WHERE country_region = 'Mainland China'") %>% data.table insert_example <- dbExecute(con, "INSERT INTO covid_data.dummy_table (confirmed, deaths, recovered) VALUES (1000, 0, 100)") # need permission to do this delete_example <- dbGetQuery(con, "DELETE FROM covid_data.dummy_table where recovered = 100") %>% data.table dbDisconnect(con)
ddb1b14993d8748f81f64216c173630e8f476b1e
9fed66e4b6496d9f2428c8cb0054d255b3bb3c90
/plot2.R
bccc425720f30a263da3521d807160590374cde4
[]
no_license
catblue/ExData_Plotting1
6312dad8f9d9a6c9b8fc8e414c06994e5e86411a
2bac4fe1ca9c6c1616f3e8ab986d574ab4e9a356
refs/heads/master
2020-12-26T02:11:11.058585
2014-05-11T17:13:45
2014-05-11T17:13:45
null
0
0
null
null
null
null
UTF-8
R
false
false
1,542
r
plot2.R
#downloading and unziping data if(!file.exists("./data/household_power_consumption.txt")){ if(!file.exists("./data")){dir.create("./data")} fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip?accessType=DOWNLOAD" download.file(fileUrl, destfile="./exdata-data-household_power_consumption.zip", method="curl") unzip("exdata-data-household_power_consumption.zip",exdir="./data") } #Fast data loading if(!"data.table" %in% installed.packages()) install.packages("data.table") library(data.table) options(warn = -1) #fread() doesn't react properly on arguments and sends warnings energyTemp <- fread("data/household_power_consumption.txt", header=T, sep=";", na.strings = "?", stringsAsFactors=F, colClasses = c(rep("character",2), rep("numeric",7))) options(warn = 0) #subseting dataframe setkey(energyTemp,Date) eng <- data.frame(energyTemp[(Date == "1/2/2007") | (Date == "2/2/2007")]) #a bit risky but it works! #some cleaning eng$Time <- strptime(paste(eng$Date,eng$Time, sep=" "),format="%d/%m/%Y %H:%M:%S") #just better to have full time available eng$Date <- as.Date(eng$Date,format="%d/%m/%Y") energyTemp <- NULL #----------- Plot generation ------------ #Plot2 - Global Active Power - scaterplot if(!file.exists("./figure")){dir.create("./figure")} png(filename="./figure/plot2.png", width = 480, height = 480, units = "px") x <- eng$Time; y <- eng$Global_active_power plot(x,y, main="", ylab="Global Active Power (kilowatts)", xlab="", type="l" ) dev.off()
b0c7138849192921ae2ef20802f546afe70c1e0d
2d590228a8b39b5bf747825d3fe303e430124066
/man/sensitivity.Rd
89e11a8baf099f24162dc36da6b4e1a160ab31f5
[]
no_license
cran/gripp
b33cbb359f68bdeedb9dcef9476923ed68f56c7e
8fedae6b2871b53e9a61af45a107720390a7f741
refs/heads/master
2020-06-23T10:12:44.184493
2019-08-24T04:30:02
2019-08-24T04:30:02
198,593,773
0
0
null
null
null
null
UTF-8
R
false
false
3,511
rd
sensitivity.Rd
\name{sensitivity} \alias{sensitivity} \title{Sensitivity matrix calculator} \usage{sensitivity(parm_s)} \arguments{ \item{parm_s}{Set of values to be considered as parameters for the Direct Problem solution} } \value{A matrix with the derivative of the function that represents the Direct Problem for each parameter.} \description{ This sotfware will calculate the sensitivity matrix for the Direct Problem. First order derivatives are calculated using central difference approximation. {\if{html}{\figure{Inverse_Problem_R_Solver_Sens.png}{options: width="100\%" alt="Figure: Inverse_Problem_R_Solver_Sens.png"}}} } \examples{ # # Configure the name of the folder where each file with the parameter to be changed. # If the folder is 'wd' then it will not be changed. # You can change it to another location where your parameter files are. auxi <- system.file(package = "gripp") folder_name <- c(auxi,auxi) # # Configure the name of each file to be changed in the Direct Problem Solver. file_name <- c('f1.R','f1.R') # # Configure the name of each parameter to be changed. parm_name <- c('A','B') # # Configure alternative method using line number to enter each parameter # When line_number is zero, then the input method uses keywords. line_number <-c(0,0) # # Configure each parameter type, where: # 1 means a numeric variable informed as a string, such as parm <- "1.38" # 2 means a numeric vector informed as a string of numbers separated with a space character. # For example, parm <- "1.25 3.4". You must tell which one will be the variable considered. parm_type <- c(1,1) # # Informe the position of the variable to be considered in the problem in the vector. # If the parameter is a numeric variable, then its position is zero. parm_vector <- c(0,0) # # Configure the smallest value for each parameter parm_min <- c(0,0) # # Configure the larger value for each parameter parm_max <- c(2,5) # # Configure the name of the folder where the command must be called. # If the folder is 'wd' then it will not be changed. # You can change it to another location where your parameter files are. command_folder <- auxi # # This variable means that the Direct Problem is solved using R or outside # if this is FALSE, the results are to be read from a file. # When it is TRUE, results will be passed inside R using the variable "results" isitR <- TRUE # # Configure the command to be used to call the Direct Problem Solver. # if this is FALSE, the results are to be read from a file. # When it is TRUE, results will be passed inside R using the variable "result" command <- 'f1.R' # # Parameter positive and negative percentual difference to be used to calculate the derivative # ppdif must me a number between 0 and 100 # parameter_pos <- parm + (ppdif/100)*(parm_max-parm_min) # parameter_neg <- parm - (ppdif/100)*(parm_max-parm_min) ppdif <- 1 # # Configure a string to be used to attrib values in the Direct Problem file. attrib_str <- '<-' # # Configure the name of the file with the results obtained by the Direct Problem. # It must be a single column of values. # The results are changed at each run by the Direct Problem Solver. result <- 'result.dat' # # Configure the name of the folder where the results can be found after each run. # If the folder is 'wd' then it will not be changed. # You can change it to another location where your result file is. result_folder <- tempdir() # sensitivity(c(1,3)) }
80f4968cfe695048d1bf4e32b493250958de963b
70f7231b9f8c041c21abc4abaedfaa5b2a6bd107
/depth_split.R
323c8e1542d4a6736c696e7a3b1666ff6b7ba0e9
[]
no_license
Kikiliuz/depth_split
07f41f9a84c6f177e567b82d0b5660020bf0d275
4ae58b5335cc4709e24914cb75e30294b7212aa5
refs/heads/master
2020-03-21T05:42:34.265900
2018-06-21T13:44:50
2018-06-21T13:44:50
138,173,631
0
0
null
null
null
null
UTF-8
R
false
false
637
r
depth_split.R
library('dplyr') set.seed(1) x<-round(runif(min=100,max=10000,n=1231)) y<-runif(min=0,max=1,n=1231) data<-data.frame(cbind(x,y)) depth_split = function(data, k, variable) { n = nrow(data) # 获取data的行数 data = arrange(data, variable) # 对data进行排序 depth_list=rep(0, n) # 创建新行,初始化为0 for(i in c(1:n)) { depth_list[i] =i/k # 对每行的depth列,赋值组号 } data$depth = depth_list # 创建新列 data$split=cut(depth_list,k) #等宽分组 return(data) # 返回 } a<-depth_split(data=data,k=10,variable=y) summarise(group_by(a,split),length(x),min(y),max(y))#检查排序
c1f97beb7e622887a55a3afd7d7926ef52591500
f76dcb5082462f759a63ff46d41c4acc2dbe5a93
/man/rws_read_init.Rd
8d6ed90adf3f213229d78f078a2880e03ee79497
[ "MIT" ]
permissive
poissonconsulting/readwritesqlite
ba7ae2d6c814eb4880f94ee3e0ee77793a12436a
db961138ad98b957b70b3e4f257ab8c9c317b8e2
refs/heads/main
2022-11-02T22:30:43.073748
2022-10-16T23:09:15
2022-10-16T23:09:15
158,617,726
39
1
NOASSERTION
2022-09-28T12:29:35
2018-11-21T23:26:36
R
UTF-8
R
false
true
568
rd
rws_read_init.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/init.R \name{rws_read_init} \alias{rws_read_init} \alias{rws_read_sqlite_init} \title{Read Initialization Data table from a SQLite Database} \usage{ rws_read_init(conn) } \arguments{ \item{conn}{A \linkS4class{SQLiteConnection} to a database.} } \value{ A data frame of the init table } \description{ The table is created if it doesn't exist. } \examples{ conn <- rws_connect() rws_read_init(conn) rws_write(rws_data, exists = FALSE, conn = conn) rws_read_init(conn) rws_disconnect(conn) }
cf019fc12b77515d05074adcab187c4af9078b45
e6b97f595b589e38238287d617605753b20511cf
/workflow.R
2ffb9a74aa28acbe63161c55dc1ddbc57d104fdb
[]
no_license
peggylind/SyntheticDataSet
c3bd0f8cce17fe32f4341ac49fd5c98680c3d010
b2cdf2581d64504e9af4d5af2c2c199ea4908d57
refs/heads/master
2020-04-05T22:17:47.762325
2019-07-18T00:36:56
2019-07-18T00:36:56
157,251,390
0
0
null
2018-11-12T17:34:16
2018-11-12T17:34:15
null
UTF-8
R
false
false
1,167
r
workflow.R
# Workflow controller for SAM creation source("BaseScripts/basesam.R") #set working directory to where all the data folders can be found housingDataDirectory <- "~/University Of Houston/Price, Daniel M - Social Network Hypergraphs/HCAD/2015/" censusDataDirectory <- "~/University Of Houston/Price, Daniel M - Social Network Hypergraphs/Census/2014/" sam <- createBaseSAM(housingdir = housingDataDirectory, censusdir = censusDataDirectory, censusFromRDS = TRUE, HCAD_parcelsFromRDS = TRUE, numberOfCores = 1) # do some sanity checks and more columns if (sanityChecks(sam)) { # Add column to add level to data sam <- one_of(sam) # TODO move into HCAD preprocessing - adds cooordinates to the model based on the geometry column sam <- add_lat_long(sam) # This converts all the columns in to the model to the appropriate class (either character or numeric) sam <- convertColumnTypes(sam) } else { print("Did not pass sanity checks!") } #add extension columns - NHANES etc. #Save the final result saveRDS(sam, paste0("complete_sam", Sys.Date(), ".RDS"))
121f60b8bc82be2a14ed46868b42dd206a6fe88c
a04716b6fd12d346a07b26c85d3cb3ea995def54
/a1.R
e167e22260636ba5614089c0cddebe570b345c28
[]
no_license
mjherz/kt1
991fcfedffbda25a2788148d7aaec3a0071337f4
e6a2592aa60df06596b03ccff22e5850e1bc1632
refs/heads/master
2016-09-05T12:43:45.241676
2013-06-25T18:46:36
2013-06-25T18:46:36
null
0
0
null
null
null
null
UTF-8
R
false
false
5,441
r
a1.R
set.seed(6001) train1<-read.csv("train.csv") test1<-read.csv("test.csv") gcm<-read.csv("genderclassmodel.csv") gm<-read.csv("gendermodel.csv") names(train1) dim(train1) sapply(train1,class) train1$pclass<-as.factor(train1$pclass) train1$survfact<-as.factor(train1$survived) #develop cabin group train1$cabgr<-as.factor(substr(train1$cabin,1,1)) table(train1$cabgr) sum(train1$cabgr=="") #missing values sum(is.na(train1)) which(is.na(train1),arr.ind=TRUE) #missing age,cabgr train1$cabgr[train1$cabgr==""]<-NA train1$cabgr<-as.factor(train1$cabgr) train1$survdoub<-as.double(train1$survived) sum(is.na(train1$cabgr)) which(is.na(train1),arr.ind=TRUE) #missing two values in embarked; insert NA train1$embarked[train1$embarked==""]<-NA library(randomForest) #for first, exclude name, ticket, cabin because of number of factors #include cabgr (first digit of cabin), and age, but only for complete records train2<-train1[-which(is.na(train1),arr.ind=TRUE)[,1],] which(is.na(train2),arr.ind=TRUE) sum(is.na(train2)) dim(train2) dim(train1) form1<-as.formula("survfact~embarked+age+sex+sibsp+parch+pclass+fare+cabgr") rf1<-randomForest(form1,data=train2,prox=TRUE) importance(rf1) train3<-cbind(train2[[2]],train2[[4]],train2[[5]],train2[[6]],train2[[7]],train2[[9]],train2[[11]],train2[[13]]) rfcv1<-rfcv(train3,train2$survfact,cv.fold=10) rfcv1$error.cv #exclude cabgr (first digit of cabin), and age because of missing records rm(train2,train3,form1) train2<-data.frame(as.factor(train1[[2]])) train2<-cbind(train2,as.factor(train1[[4]]),as.integer(train1[[6]]),as.integer(train1[[7]]),as.numeric(train1[[9]]),as.factor(train1[[11]])) colnames(train2)<-c(names(train1)[[2]],names(train1)[[4]],names(train1)[[6]],names(train1)[[7]],names(train1)[[9]],names(train1)[[11]]) library(caret) library(RWeka) library(gbm) library(party) library(glmnet) library(C50) library(e1071) fitcontrol<-trainControl(method="repeatedcv",number=10,repeats=10) fitresults1<-data.frame(matrix(ncol=6)) #standardize all errors-- kappa or rmse cripper1<-train(train2,train1$survfact,method="JRip",trControl=fitcontrol) fitresults1[1,1]<-"jrip" fitresults1[1,2:6]<-cripper1$results form1<-as.formula("survfact~embarked+sex+sibsp+parch+pclass+fare") crf1<-train(form1,data=train1,method="rf",trControl=fitcontrol) fitresults1[2,1]<-"rf" fitresults1[2,2:6]<-crf1$results cctree1<-train(train2,train1$survfact,method="ctree",trControl=fitcontrol) fitresults1[3,1]<-"ctree" fitresults1[3,2:6]<-cctree1$results #standardize errors ccforest1<-train(train2,train1$survfact,method="cforest",trControl=fitcontrol) fitresults1[4,1]<-"cforest" fitresults1[4,2:6]<-ccforest1$results #works, but what are the errors cj48tree1<-train(train2,train1$survfact,method="J48",trControl=fitcontrol) fitresults1[5,1]<-"j48" fitresults1[5,2:6]<-cj48tree1$results #standardize errors cgbm1<-train(train2,train1$survfact,method="gbm",distribution="bernoulli",trControl=fitcontrol) fitresults1[6,1]<-"gbm" fitresults1[6,2:6]<-cgbm1$results #standardize errors form1<-as.formula("survived~embarked+sex+sibsp+parch+pclass+fare") cglmnet1<-train(form1,data=train1,method="glmnet",family="binomial",trControl=fitcontrol) fitresults1[7,1]<-"glmnet" fitresults1[7,2:6]<-cglmnet1$results #works, but not certain of teh type of errors form1<-as.formula("survfact~embarked+sex+sibsp+parch+pclass+fare") c50rules1<-train(form1,data=train1,method="C5.0Rules") fitresults1[8,1]<-"c50rules" fitresults1[8,2:6]<-c50rules1$results #standardize errors cPART1<-train(train2,train1$survfact,method="PART",trControl=fitcontrol) fitresults1[9,1]<-"part" fitresults1[9,2:6]<-cPART1$results form1<-as.formula("survived~embarked+sex+sibsp+parch+pclass+fare") cglmstepaic1<-train(form1,data=train1,method="glmStepAIC",family="binomial",trControl=fitcontrol) fitresults1[10,1]<-"glmstepaic" fitresults1[10,2:6]<-cglmstepaic1$results csvm1<-train(form1,data=train1,method="svmLinear",family="binomial",trControl=fitcontrol) fitresults1[11,1]<-"svm" fitresults1[11,2:6]<-csvm1$results #can use varImp on some test2<-data.frame(as.factor(test1[[1]])) test2<-cbind(test2,as.factor(test1[[3]]),as.integer(test1[[5]]),as.integer(test1[[6]]),as.numeric(test1[[8]]),as.factor(test1[[10]])) colnames(test2)<-c(names(test1)[[1]],names(test1)[[3]],names(test1)[[5]],names(test1)[[6]],names(test1)[[8]],names(test1)[[10]]) test2$embarked[test2$embarked==""]<-NA restest3<-data.frame(matrix(ncol=2,nrow=dim(test1)[1])) restest3$survpred<-as.integer(0) restest3$agreement<-as.integer(0) #153 does not have a fare; impute for now test2$fare[153]<-mean(test1$fare[-c(153)]) for (i in 1:dim(test2)[1]) { j=0 if(predict(cripper1,test2[i,])==1) {j=j+1} if(predict(crf1,test2[i,])==1) {j=j+1} # if(predict(cctree1,test2[i,])==1) {j=j+1} # if(predict(ccforest1,test2[i,])==1) {j=j+1} if(predict(cj48tree1,test2[i,])==1) {j=j+1} if(predict(cgbm1,test2[i,])==1) {j=j+1} if(predict(cglmnet1,test2[i,])==1) {j=j+1} if(predict(c50rules1,test2[i,])==1) {j=j+1} if(predict(cPART1,test2[i,])==1) {j=j+1} if(predict(cglmstepaic1,test2[i,])==1) {j=j+1} if(predict(csvm1,test2[i,])==1) {j=j+1} restest3$agreement[i]<-j if(j>=6) {restest3$survpred[i]<-1} } sum(restest3$survpred==1)/dim(restest3)[1] sum(train1$survived==1)/dim(train1)[1] survpred<-data.frame((matrix(nrow=dim(test1)[1]))) survpred[[1]]<-restest3$survpred write.csv(survpred, file = "survpred1.csv",row.names=FALSE) #confusion matrix
80cf90b140d1c5cee4a463861963a0b4e8e8fb9a
40b8c18de5170436038973ed1478e9817a4270f8
/scripts/03_linear_regression_training_data.R
dba08d10bd85e0efd31bab4ef7388945004ef0e3
[]
no_license
desval/ResazurinMIC
a97eb7e12a8407e8af27cfb7de77d3fc60c269fe
19b6aac10a24280eec77139c36e7192e280468c5
refs/heads/master
2021-09-15T16:56:18.775159
2017-07-10T10:46:04
2017-07-10T10:46:04
79,265,781
0
0
null
2017-01-17T19:54:13
2017-01-17T19:54:13
null
UTF-8
R
false
false
4,807
r
03_linear_regression_training_data.R
# Description ## ---------------------------------------------------------------------------------------------------------------------------- # In this file we fit a linear regression to the training data and plot the model # Load dependencies and set options ## ---------------------------------------------------------------------------------------------------------------------------- rm(list=ls()) packages <- c("ggplot2", "RColorBrewer") to.install <- setdiff(packages, rownames(installed.packages())) if (length(to.install) > 0) { install.packages(to.install) } lapply(packages, library, character.only = TRUE) # load and subset data (to calculate the number of evaluable strains) ## ---------------------------------------------------------------------------------------------------------------------------- alldata <- read.csv("output/tables/alldata.csv", stringsAsFactors = F) #above limit of detection (resazurin method): alldata <- subset(alldata, alldata$quality != "above limit of detection") #above or below limit of detection (Etest method): alldata$above <- grepl(">",alldata$MIC) length(alldata[alldata$above==T,]$above) alldata$below <- grepl("<",alldata$MIC) length(alldata[alldata$below==T,]$below) cleandata <- subset(alldata, alldata$above!=TRUE & alldata$below!=TRUE) # cleandata_t <- subset(cleandata, cleandata$run == "training") # cleandata_v <- subset(cleandata, cleandata$run == "validation") # cleandata_r <- subset(cleandata, cleandata$run == "reference") training <- subset(cleandata,cleandata$run == "training") validation <- subset(cleandata,cleandata$run == "validation") # estimate linear regression model ##----------------------------------------------------------------------------------------------------------------------- etest <- as.numeric(training$MIC) esti <- as.numeric(training$Estimate) print(summary(linreg <- lm(log(etest)~(esti)))) esti_fit_log <- linreg$fitted.values esti_fit <- exp(linreg$fitted.values) prediction <- data.frame(esti_fit) # save parameters and parameters variance~covariance matrix in a named list, we will use it to bootstrap vcov.list <- list(list("Estimates"=linreg, "Matrix"=list(vcov(linreg), "Summary"=summary(linreg)))) saveRDS(vcov.list, "output/tables/lm_parameters_variance_covariance_matrix_list.rds") # save pearson correlation pearson <- cor(log(etest),esti,method="pearson") saveRDS(pearson, "output/tables/pearson.rds") # Plot regression and save it as rds object, so that we can combine it with plots from # other scripts #------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- # get fitted values df4 <- as.data.frame(cbind(esti_fit,training)) # set plot options par(mar = c(5,5,5,5)) par(oma = c(1,1,1,1)) options(scipen = 100) # function for formatting axis label decimals fmt_dcimals <- function(decimals=0){ function(x) as.character(round(x,decimals)) } # colorvector <- c( "#D95F02", "#7570B3", "#E7298A" ,"#66A61E", "#1F78B4", "#A6761D", "#666666") colorvector <- brewer.pal(9,"Set1")[c(3,2,1,4,5,9,8)] #colorvector <- brewer.pal(8,"Set2") dfplot <- df4 dfplot$antibiotic <- gsub("Penicillin","Penicillin", dfplot$antibiotic ) # cosmetic changes p1<-ggplot(dfplot, aes (x = exp(Estimate), y = as.numeric(MIC))) + #geom_text(data = data.frame(), aes(0.05, 0.0005, label = ""),cex=16, fontface="bold")+ geom_point(size=2,alpha=0.6,aes(colour = factor(antibiotic)))+ #labs(aes(colour=antibiotic))+ geom_smooth(method = "lm",color="steelblue2",show.legend = F,se = F,level=0.95)+ scale_y_continuous("Etest MIC (mg/L)",trans='log10' , limits = c(0.00001, 5000), breaks=c(0.00001,0.0001,0.001, 0.01,0.1,1,10,100,1000), labels = fmt_dcimals(5))+ scale_x_continuous(expression("EC"[50]*" (mg/L)"), trans='log10' , limits = c(0.00001, 5000), breaks=c(0.00001,0.0001,0.001, 0.01,0.1,1,10,100,1000), labels = fmt_dcimals(5)) + scale_color_manual(values=colorvector) + geom_abline(intercept = 0.0000001, slope = 1, color="black", lty=2)+ theme_bw(18)+theme(legend.justification=c(-0.01,1.01), legend.position=c(0,1), legend.text = element_text(colour="black"), legend.title = element_blank(), axis.line = element_line(colour = "black"), legend.key = element_rect(colour = "white")) + guides(colour = guide_legend(override.aes = list(size=8))) p1 regr_plot <- list("regression" = p1) saveRDS(regr_plot, "output/figures/regression_plot.RDS") # file end ## -----------------------------------------------------------------------------------------------------------
d97f26345dd556b3af2c247fe54e40cf00da083e
749df9b8dd733b3c63dbea598540ffc7a97d8c84
/Supplementaryscript2.r
bef7758c7c2745d94323df0e7bb0bb71db9b5964
[]
no_license
thalescherubino/RTqPCR
4845dd030d4f28c2a64e552935fd52da663c1d7c
ca205737b4b9edf56b4f6a7f040934895d014649
refs/heads/main
2023-04-08T05:40:42.966678
2021-04-23T16:29:41
2021-04-23T16:29:41
360,313,870
0
0
null
null
null
null
UTF-8
R
false
false
8,454
r
Supplementaryscript2.r
# Supplementaryscript2.r ############################ #Traditional method-DeltaCT# ############################ #first calculate the mean between both technical replicates deltaCtData <- RTqPCRdata[seq(1,nrow(RTqPCRdata),2),] deltaCtData$ct <- colMeans(matrix(RTqPCRdata$ct,nrow=2),na.rm=T) #Opt - Wat target.opt <- deltaCtData$ct[(which(deltaCtData$treat == "opt" & deltaCtData$gene == TARGET))] refa.opt <- deltaCtData$ct[(which(deltaCtData$treat == "opt" & deltaCtData$gene == REFA))] refb.opt <- deltaCtData$ct[(which(deltaCtData$treat == "opt" & deltaCtData$gene == REFB))] target.wat <- deltaCtData$ct[(which(deltaCtData$treat == "wat" & deltaCtData$gene == TARGET))] refa.wat <- deltaCtData$ct[(which(deltaCtData$treat == "wat" & deltaCtData$gene == REFA))] refb.wat <- deltaCtData$ct[(which(deltaCtData$treat == "wat" & deltaCtData$gene == REFB))] opt <- target.opt - 0.5*(refa.opt + refb.opt) wat <- target.wat - 0.5*(refa.wat + refb.wat) diff.opt.wat <- opt - wat t.test.opt.wat <- t.test(opt, wat) #aca - cat target.aca <- deltaCtData$ct[(which(deltaCtData$cultivar == "aca" & deltaCtData$gene == TARGET))] refa.aca <- deltaCtData$ct[(which(deltaCtData$cultivar == "aca" & deltaCtData$gene == REFA))] refb.aca <- deltaCtData$ct[(which(deltaCtData$cultivar == "aca" & deltaCtData$gene == REFB))] target.cat <- deltaCtData$ct[(which(deltaCtData$cultivar == "cat" & deltaCtData$gene == TARGET))] refa.cat <- deltaCtData$ct[(which(deltaCtData$cultivar == "cat" & deltaCtData$gene == REFA))] refb.cat <- deltaCtData$ct[(which(deltaCtData$cultivar == "cat" & deltaCtData$gene == REFB))] aca <- target.aca - 0.5*(refa.aca + refb.aca) cat <- target.cat -0.5*(refa.cat + refb.cat) diff.aca.cat <- aca - cat t.test.aca.cat <- t.test(aca, cat) #aca.opt - aca.wat target.aca.opt <- deltaCtData$ct[(which(deltaCtData$cultivar == "aca" & deltaCtData$gene == TARGET & deltaCtData$treat == "opt"))] refa.aca.opt <- deltaCtData$ct[(which(deltaCtData$cultivar == "aca" & deltaCtData$gene == REFA & deltaCtData$treat == "opt"))] refb.aca.opt <- deltaCtData$ct[(which(deltaCtData$cultivar == "aca" & deltaCtData$gene == REFB & deltaCtData$treat == "opt"))] target.aca.wat <- deltaCtData$ct[(which(deltaCtData$cultivar == "aca" & deltaCtData$gene == TARGET & deltaCtData$treat == "wat"))] refa.aca.wat <- deltaCtData$ct[(which(deltaCtData$cultivar == "aca" & deltaCtData$gene == REFA & deltaCtData$treat == "wat"))] refb.aca.wat <- deltaCtData$ct[(which(deltaCtData$cultivar == "aca" & deltaCtData$gene == REFB & deltaCtData$treat == "wat"))] aca.opt <- target.aca.opt - 0.5*(refa.aca.opt + refb.aca.opt) aca.wat <- target.aca.wat -0.5*(refa.aca.wat + refb.aca.wat) diff.aca.opt.wat <- aca.opt - aca.wat t.test.aca.opt.wat <- t.test(aca.opt, aca.wat) #cat.opt - cat.wat target.cat.opt <- deltaCtData$ct[(which(deltaCtData$cultivar == "cat" & deltaCtData$gene == TARGET & deltaCtData$treat == "opt"))] refa.cat.opt <- deltaCtData$ct[(which(deltaCtData$cultivar == "cat" & deltaCtData$gene == REFA & deltaCtData$treat == "opt"))] refb.cat.opt <- deltaCtData$ct[(which(deltaCtData$cultivar == "cat" & deltaCtData$gene == REFB & deltaCtData$treat == "opt"))] target.cat.wat <- deltaCtData$ct[(which(deltaCtData$cultivar == "cat" & deltaCtData$gene == TARGET & deltaCtData$treat == "wat"))] refa.cat.wat <- deltaCtData$ct[(which(deltaCtData$cultivar == "cat" & deltaCtData$gene == REFA & deltaCtData$treat == "wat"))] refb.cat.wat <- deltaCtData$ct[(which(deltaCtData$cultivar == "cat" & deltaCtData$gene == REFB & deltaCtData$treat == "wat"))] cat.opt <- target.cat.opt - 0.5*(refa.cat.opt + refb.cat.opt) cat.wat <- target.cat.wat -0.5*(refa.cat.wat + refb.cat.wat) diff.cat.opt.wat <- cat.opt - cat.wat t.test.cat.opt.wat <- t.test(cat.opt, cat.wat) #create a vector with the mean of the -diff (or delta delta Ct) diff.expression.means <- c(mean(-diff.opt.wat), mean(-diff.aca.cat), mean(-diff.aca.opt.wat), mean(-diff.cat.opt.wat)) names(diff.expression.means) <- c("opt - wat", "aca - cat", "aca.opt - aca.wat", "cat.opt - cat.wat ") lower.conf.int <- c(-t.test.opt.wat$conf.int[1], -t.test.aca.cat$conf.int[1], -t.test.aca.opt.wat$conf.int[1], -t.test.cat.opt.wat$conf.int[1]) upper.conf.int <- c(-t.test.opt.wat$conf.int[2], -t.test.aca.cat$conf.int[2], -t.test.aca.opt.wat$conf.int[2], -t.test.cat.opt.wat$conf.int[2]) pdf("deltaDeltaCtLog2FC.pdf", h=8,w=8) barplot <- barplot(diff.expression.means,beside=T, ylim=c(-5,5),las=1,yaxt='n',cex.names=1.1) title(ylab=expression("Log"[2]*"FC"),line=2) axis(2, at =c(seq(-15,15,1)),las =2,cex=.5) abline(h=0) abline(h=1,lty=3,lwd=2,col="red") abline(h=-1,lty=3,lwd=2,col="red") arrows(x0 = barplot, y0 = lower.conf.int, x1 = barplot,y1=upper.conf.int,code=3,angle=90,length=0.05,col="#964841",lwd=2) dev.off() FC.diff.expression.means <- 2^diff.expression.means FC.lower.conf.int <- 2^upper.conf.int FC.upper.conf.int <- 2^lower.conf.int pdf("deltaDeltaCtFC.pdf", h=8,w=8) barplot <- barplot(FC.diff.expression.means,beside=T, ylim=c(0,5),las=1,yaxt='n',cex.names=1) title(ylab=expression("FC"),line=2) axis(2, at =c(seq(0,10,1)),las =2,cex=.5) abline(h=0) abline(h=1,lty=3,lwd=2,col="red") abline(h=.5,lty=3,lwd=2,col="blue") arrows(x0 = barplot, y0 = FC.lower.conf.int, x1 = barplot,y1=FC.upper.conf.int,code=3,angle=90,length=0.05,col="#964841",lwd=2) dev.off() ############################################ #make a single plot with Delta Ct and model# ############################################ names <- c("opt - wat", "aca - cat", "aca.opt - aca.wat", "cat.opt - cat.wat ") pdf("LMM.versus.DeltaCt.pdf",h=9,w=9) par(mfrow = c(2,2)) #log2FC lMM model barplot <- barplot(confidence.intervals[,1],beside=T, ylim=c( if(min(confidence.intervals[,2])>=-5){-5}else{min(confidence.intervals[,2])*1.2}, if(max(confidence.intervals[,3])<=5){5}else{max(confidence.intervals[,3])*1.2} ),las=1,yaxt='n',names="",main="LMM model",cex.main=2) text( x= barplot,y=if(min(confidence.intervals[,2])>=-5){-6.5}else{min(confidence.intervals[,2])*1.4}, labels = names,srt=30,xpd=T) axis(2, at =c(seq(-100,100,1)),las =2,cex=.5) title(ylab=expression("Log"[2]*"FC"),line=2,cex.lab=1.5) abline(h=0) abline(h=1,lty=3,lwd=2,col="red") abline(h=-1,lty=3,lwd=2,col="red") arrows(x0 = barplot, y0 = confidence.intervals[,2], x1 = barplot,y1=confidence.intervals[,3],code=3,angle=90,length=0.05,col="#964841",lwd=2) #FC lMM model barplot <- barplot(FCCI[,1],beside=T, ylim=c(0, if(max(FCCI[,3]) <= 5){5}else {max(FCCI[,3])*1.20 }), las=1,yaxt='n',cex.names=1,main="LMM model",names="",cex.main=2) title(ylab=expression("FC"),line=2,cex.lab=1.5) axis(2, at =c(seq(0,100)),las =2,cex=.5) text( x= barplot,y=if(max(FCCI[,3]) <= 5 ){-.7}else if(max(FCCI[,3]) <= 15){- 1.4}else{-3.5}, labels = names,srt=30,xpd=T) abline(h=0) abline(h=1,lty=3,lwd=2,col="red") abline(h=.5,lty=3,lwd=2,col="blue") arrows(x0 = barplot, y0 = FCCI[,2], x1 = barplot,y1=FCCI[,3],code=3,angle=90,length=0.05,col="#964841",lwd=2) FC.strapolator=FALSE # Delta Delta Ct Log2FC barplot <- barplot(diff.expression.means,beside=T, ylim=c( if(min(upper.conf.int)>=-5){-5}else{min(upper.conf.int)*1.2}, if(max(lower.conf.int)<=5){5}else{max(upper.conf.int)*1.2} ),las=1,yaxt='n',cex.names=1.1,main=expression(paste(Delta,bold("Ct method"))),names="",cex.main=2) text( x= barplot,y=if(min(upper.conf.int)>=-5){-6.5}else{min(upper.conf.int)*1.4}, labels = names,srt=30,xpd=T) title(ylab=expression("Log"[2]*"FC"),line=2,cex.lab=1.5) axis(2, at =c(seq(-100,100,1)),las =2,cex=.5) abline(h=0) abline(h=1,lty=3,lwd=2,col="red") abline(h=-1,lty=3,lwd=2,col="red") arrows(x0 = barplot, y0 = lower.conf.int, x1 = barplot,y1=upper.conf.int,code=3,angle=90,length=0.05,col="#964841",lwd=2) #Ct methdo FC barplot <- barplot(FC.diff.expression.means,beside=T, ylim=c(0, if(max(FC.upper.conf.int) <= 5){5}else{max(FC.upper.conf.int)*1.2}) ,las=1,yaxt='n',cex.names=1,main=expression(paste(Delta,bold("Ct method"))),names="",cex.main=2) text( x= barplot,y=if(max(FC.upper.conf.int) <= 5 ){-.7}else if(max(FC.upper.conf.int) <= 15){- 1.4}else{-3.5}, labels = names,srt=30,xpd=T) title(ylab=expression("FC"),line=2,cex.lab=1.5) axis(2, at =c(seq(0,100,1)),las =2,cex=.5) abline(h=0) abline(h=1,lty=3,lwd=2,col="red") abline(h=.5,lty=3,lwd=2,col="blue") arrows(x0 = barplot, y0 = FC.lower.conf.int, x1 = barplot,y1=FC.upper.conf.int,code=3,angle=90,length=0.05,col="#964841",lwd=2) dev.off()
5c6641a06f79b0be82b10516160e6f9171238bda
09847d5e16938d783a1a8a37b576d5840dd7e5f7
/tests/predict.R
063999b0337b332bb914c32a4e5c1231642d6e69
[]
no_license
cran/aster
b6dc91ebf539df904d55f4cacd584a6ea5ff27a1
61fc3cd89e7f6776279333b534652356c7059f89
refs/heads/master
2021-07-04T00:20:22.476008
2021-06-13T03:40:32
2021-06-13T03:40:32
17,694,509
0
0
null
null
null
null
UTF-8
R
false
false
13,986
r
predict.R
library(aster) # needed because of the change in R function "sample" in R-devel suppressWarnings(RNGversion("3.5.2")) set.seed(42) nind <- 25 vars <- c("l2", "l3", "f2", "f3", "h2", "h3") pred <- c(0, 1, 1, 2, 3, 4) fam <- c(1, 1, 1, 1, 3, 3) length(pred) == length(fam) nnode <- length(pred) theta <- matrix(0, nind, nnode) root <- matrix(1, nind, nnode) x <- raster(theta, pred, fam, root) dimnames(x) <- list(NULL, vars) data <- as.data.frame(x) site <- factor(sample(LETTERS[1:4], nind, replace = TRUE)) foo <- rnorm(nind) data <- data.frame(x, site = site, foo = foo, root = 1) redata <- reshape(data, varying = list(vars), direction = "long", timevar = "varb", times = as.factor(vars), v.names = "resp") out <- aster(resp ~ foo + site + varb, pred, fam, varb, id, root, data = redata) sout1 <- summary(out, show.graph = TRUE) ##### redo with aster.default and predict.aster out2 <- aster(x, root, pred, fam, modmat = out$modmat) sout2 <- summary(out2) foo <- match(sort(unique(site)), site) modmat.pred <- out$modmat[foo, , ] origin.pred <- out$origin[foo, ] pout1 <- predict(out2, modmat = modmat.pred, parm.type = "canon") ##### case 1: model = "unco", obj = "unco", parm = "cano" #### fred <- predict(out2, modmat = modmat.pred, parm.type = "canon", se.fit = TRUE) all.equal(fred$se.fit, sqrt(diag(fred$gradient %*% solve(out2$fisher) %*% t(fred$gradient)))) sally <- matrix(modmat.pred, ncol = length(out2$coef)) all.equal(fred$gradient, sally) all.equal(fred$fit, as.numeric(origin.pred) + as.numeric(sally %*% out$coef)) ##### case 1a: same but with amat node.names <- dimnames(out$modmat)[[2]] site.names <- levels(site) amat <- array(0, c(dim(modmat.pred)[1:2], length(site.names))) for (i in seq(along = site.names)) amat[i, grep("h", node.names), i] <- 1 alfie <- predict(out2, modmat = modmat.pred, parm.type = "canon", se.fit = TRUE, amat = amat) amatmat <- matrix(amat, ncol = dim(amat)[3]) all.equal(alfie$fit, as.numeric(t(amatmat) %*% fred$fit)) all.equal(alfie$gradient, t(amatmat) %*% fred$gradient) all.equal(alfie$se.fit, sqrt(diag(alfie$gradient %*% solve(out2$fisher) %*% t(alfie$gradient)))) ##### case 2: model = "cond", obj = "cond", parm = "cano" #### ##### no test -- same code as case 1 ##### case 3: model = "unco", obj = "cond", parm = "cano" #### out3 <- aster(x, root, pred, fam, modmat = out$modmat, type = "cond") sout3 <- summary(out3) fred <- predict(out3, modmat = modmat.pred, parm.type = "canon", se.fit = TRUE) nind <- dim(modmat.pred)[1] nnode <- dim(modmat.pred)[2] ncoef <- dim(modmat.pred)[3] aster:::setfam(fam.default()) beta.hat <- out3$coef theta.hat <- as.numeric(sally %*% beta.hat) phi.hat <- .C(aster:::C_aster_theta2phi, nind = as.integer(nind), nnode = as.integer(nnode), pred = as.integer(pred), fam = as.integer(fam), theta = as.double(theta.hat), phi = double(nind * nnode))$phi all.equal(fred$fit, phi.hat) all.equal(fred$se.fit, sqrt(diag(fred$gradient %*% solve(out3$fisher) %*% t(fred$gradient)))) my.gradient <- 0 * fred$gradient epsilon <- 1e-9 for (k in 1:ncoef) { beta.epsilon <- beta.hat beta.epsilon[k] <- beta.hat[k] + epsilon theta.epsilon <- as.numeric(sally %*% beta.epsilon) phi.epsilon <- .C(aster:::C_aster_theta2phi, nind = as.integer(nind), nnode = as.integer(nnode), pred = as.integer(pred), fam = as.integer(fam), theta = as.double(theta.epsilon), phi = double(nind * nnode))$phi my.gradient[ , k] <- (phi.epsilon - phi.hat) / epsilon } all.equal(fred$gradient, my.gradient, tolerance = sqrt(epsilon)) alfie <- predict(out3, modmat = modmat.pred, parm.type = "canon", se.fit = TRUE, amat = amat) all.equal(alfie$fit, as.numeric(t(amatmat) %*% fred$fit)) all.equal(alfie$gradient, t(amatmat) %*% fred$gradient) all.equal(alfie$se.fit, sqrt(diag(alfie$gradient %*% solve(out3$fisher) %*% t(alfie$gradient)))) ##### case 4: model = "cond", obj = "unco", parm = "cano" #### fred <- predict(out2, modmat = modmat.pred, parm.type = "canon", model.type = "cond", se.fit = TRUE) aster:::setfam(fam.default()) beta.hat <- out2$coef phi.hat <- as.numeric(origin.pred) + as.numeric(sally %*% beta.hat) theta.hat <- .C(aster:::C_aster_phi2theta, nind = as.integer(nind), nnode = as.integer(nnode), pred = as.integer(pred), fam = as.integer(fam), phi = as.double(phi.hat), theta = double(nind * nnode))$theta all.equal(fred$fit, theta.hat) all.equal(fred$se.fit, sqrt(diag(fred$gradient %*% solve(out2$fisher) %*% t(fred$gradient)))) my.gradient <- 0 * fred$gradient epsilon <- 1e-9 for (k in 1:ncoef) { beta.epsilon <- beta.hat beta.epsilon[k] <- beta.hat[k] + epsilon phi.epsilon <- as.numeric(origin.pred) + as.numeric(sally %*% beta.epsilon) theta.epsilon <- .C(aster:::C_aster_phi2theta, nind = as.integer(nind), nnode = as.integer(nnode), pred = as.integer(pred), fam = as.integer(fam), phi = as.double(phi.epsilon), theta = double(nind * nnode))$theta my.gradient[ , k] <- (theta.epsilon - theta.hat) / epsilon } all.equal(fred$gradient, my.gradient, tolerance = sqrt(epsilon)) alfie <- predict(out2, modmat = modmat.pred, parm.type = "canon", model.type = "cond", se.fit = TRUE, amat = amat) all.equal(alfie$fit, as.numeric(t(amatmat) %*% fred$fit)) all.equal(alfie$gradient, t(amatmat) %*% fred$gradient) all.equal(alfie$se.fit, sqrt(diag(alfie$gradient %*% solve(out2$fisher) %*% t(alfie$gradient)))) ##### case 5: model = "cond", obj = "cond", parm = "mean" #### root.pred <- matrix(1, nind, nnode) fred <- predict(out3, modmat = modmat.pred, parm.type = "mean", model.type = "cond", root = root.pred, x = root.pred) aster:::setfam(fam.default()) beta.hat <- out3$coef theta.hat <- as.numeric(sally %*% beta.hat) xi.hat <- .C(aster:::C_aster_theta2ctau, nind = as.integer(nind), nnode = as.integer(nnode), pred = as.integer(pred), fam = as.integer(fam), theta = as.double(theta.hat), ctau = double(nind * nnode))$ctau all.equal(fred, xi.hat) fred <- predict(out3, modmat = modmat.pred, parm.type = "mean", model.type = "cond", root = root.pred, x = root.pred, se.fit = TRUE) all.equal(fred$fit, xi.hat) all.equal(fred$se.fit, sqrt(diag(fred$gradient %*% solve(out3$fisher) %*% t(fred$gradient)))) aster:::setfam(fam.default()) my.gradient <- 0 * fred$gradient epsilon <- 1e-9 for (k in 1:ncoef) { beta.epsilon <- beta.hat beta.epsilon[k] <- beta.hat[k] + epsilon theta.epsilon <- as.numeric(sally %*% beta.epsilon) xi.epsilon <- .C(aster:::C_aster_theta2ctau, nind = as.integer(nind), nnode = as.integer(nnode), pred = as.integer(pred), fam = as.integer(fam), theta = as.double(theta.epsilon), ctau = double(nind * nnode))$ctau my.gradient[ , k] <- (xi.epsilon - xi.hat) / epsilon } all.equal(fred$gradient, my.gradient, tolerance = sqrt(epsilon)) ##### case 6: model = "unco", obj = "unco", parm = "mean" #### fred <- predict(out2, modmat = modmat.pred, parm.type = "mean", root = root.pred) beta.hat <- out2$coef beta2tau <- function(beta) { phi <- origin.pred + matrix(sally %*% beta, nrow = nind) theta <- .C(aster:::C_aster_phi2theta, nind = as.integer(nind), nnode = as.integer(nnode), pred = as.integer(pred), fam = as.integer(fam), phi = as.double(phi), theta = matrix(as.double(0), nind, nnode))$theta ctau <- .C(aster:::C_aster_theta2ctau, nind = as.integer(nind), nnode = as.integer(nnode), pred = as.integer(pred), fam = as.integer(fam), theta = as.double(theta), ctau = double(nind * nnode))$ctau tau <- .C(aster:::C_aster_ctau2tau, nind = as.integer(nind), nnode = as.integer(nnode), pred = as.integer(pred), fam = as.integer(fam), root = as.double(root.pred), ctau = as.double(ctau), tau = double(nind * nnode))$tau return(tau) } aster:::setfam(fam.default()) tau.hat <- beta2tau(beta.hat) all.equal(fred, tau.hat) fred <- predict(out2, modmat = modmat.pred, parm.type = "mean", root = root.pred, se.fit = TRUE) all.equal(fred$fit, tau.hat) all.equal(fred$se.fit, sqrt(diag(fred$gradient %*% solve(out2$fisher) %*% t(fred$gradient)))) aster:::setfam(fam.default()) my.gradient <- 0 * fred$gradient for (k in 1:length(beta.hat)) { beta.epsilon <- beta.hat beta.epsilon[k] <- beta.hat[k] + epsilon tau.epsilon <- beta2tau(beta.epsilon) my.gradient[ , k] <- (tau.epsilon - tau.hat) / epsilon } all.equal(fred$gradient, my.gradient, tolerance = sqrt(epsilon)) ##### case 7: model = "cond", obj = "unco", parm = "mean" #### fred <- predict(out2, modmat = modmat.pred, parm.type = "mean", model.type = "cond", root = root.pred, x = root.pred) beta.hat <- out2$coef beta2xi <- function(beta) { phi <- origin.pred + matrix(sally %*% beta, nrow = nind) theta <- .C(aster:::C_aster_phi2theta, nind = as.integer(nind), nnode = as.integer(nnode), pred = as.integer(pred), fam = as.integer(fam), phi = as.double(phi), theta = matrix(as.double(0), nind, nnode))$theta ctau <- .C(aster:::C_aster_theta2ctau, nind = as.integer(nind), nnode = as.integer(nnode), pred = as.integer(pred), fam = as.integer(fam), theta = as.double(theta), ctau = double(nind * nnode))$ctau return(ctau) } aster:::setfam(fam.default()) xi.hat <- beta2xi(beta.hat) all.equal(fred, xi.hat) fred <- predict(out2, modmat = modmat.pred, parm.type = "mean", model.type = "cond", root = root.pred, x = root.pred, se.fit = TRUE) all.equal(fred$fit, xi.hat) all.equal(fred$se.fit, sqrt(diag(fred$gradient %*% solve(out2$fisher) %*% t(fred$gradient)))) aster:::setfam(fam.default()) my.gradient <- 0 * fred$gradient for (k in 1:ncoef) { beta.epsilon <- beta.hat beta.epsilon[k] <- beta.hat[k] + epsilon xi.epsilon <- beta2xi(beta.epsilon) my.gradient[ , k] <- (xi.epsilon - xi.hat) / epsilon } all.equal(fred$gradient, my.gradient, tolerance = sqrt(epsilon)) ##### case 8: model = "unco", obj = "cond", parm = "mean" #### fred <- predict(out3, modmat = modmat.pred, root = root.pred) beta.hat <- out3$coef beta2tau <- function(beta) { theta <- matrix(sally %*% beta, nrow = nind) ctau <- .C(aster:::C_aster_theta2ctau, nind = as.integer(nind), nnode = as.integer(nnode), pred = as.integer(pred), fam = as.integer(fam), theta = as.double(theta), ctau = double(nind * nnode))$ctau tau <- .C(aster:::C_aster_ctau2tau, nind = as.integer(nind), nnode = as.integer(nnode), pred = as.integer(pred), fam = as.integer(fam), root = as.double(root.pred), ctau = as.double(ctau), tau = double(nind * nnode))$tau return(tau) } aster:::setfam(fam.default()) tau.hat <- beta2tau(beta.hat) all.equal(fred, tau.hat) fred <- predict(out3, modmat = modmat.pred, root = root.pred, se.fit = TRUE) all.equal(fred$fit, tau.hat) all.equal(fred$se.fit, sqrt(diag(fred$gradient %*% solve(out3$fisher) %*% t(fred$gradient)))) aster:::setfam(fam.default()) my.gradient <- 0 * fred$gradient for (k in 1:ncoef) { beta.epsilon <- beta.hat beta.epsilon[k] <- beta.hat[k] + epsilon tau.epsilon <- beta2tau(beta.epsilon) my.gradient[ , k] <- (tau.epsilon - tau.hat) / epsilon } all.equal(fred$gradient, my.gradient, tolerance = sqrt(epsilon)) ##### HOORAY !!!!! ##### That's it for aster.predict ##### ##### now for aster.predict.formula ##### ##### case 1: newdata missing pout2 <- predict(out) newdata <- data.frame(site = factor(LETTERS[1:4])) for (v in vars) newdata[[v]] <- 1 newdata$root <- 1 newdata$foo <- modmat.pred[ , "l2", "foo"] renewdata <- reshape(newdata, varying = list(vars), direction = "long", timevar = "varb", times = as.factor(vars), v.names = "resp") louise <- predict(out, newdata = renewdata, varvar = varb, idvar = id, root = root, se.fit = TRUE) all.equal(louise$modmat, modmat.pred) fred <- predict(out2, modmat = modmat.pred, root = root.pred, se.fit = TRUE) all.equal(louise$fit, fred$fit) all.equal(louise$se.fit, fred$se.fit) foo <- new.env(parent = emptyenv()) bar <- suppressWarnings(try(load("predict.rda", foo), silent = TRUE)) if (inherits(bar, "try-error")) { save(sout1, sout2, sout3, pout1, pout2, file = "predict.rda") } else { print(all.equal(sout1, foo$sout1)) print(all.equal(sout2, foo$sout2)) print(all.equal(sout3, foo$sout3)) print(all.equal(pout1, foo$pout1)) print(all.equal(pout2, foo$pout2)) } ##### test for global variables ##### saves <- c("out", "renewdata", "out2", "modmat.pred", "root.pred", "louise", "fred") rm(list = setdiff(ls(), saves)) ls() louise.too <- predict(out, newdata = renewdata, varvar = varb, idvar = id, root = root, se.fit = TRUE) identical(louise, louise.too) fred.too <- predict(out2, modmat = modmat.pred, root = root.pred, se.fit = TRUE) identical(fred, fred.too) ##### test of newcoef ##### fake <- out2 beta.new <- fake$coefficients + rnorm(length(fake$coefficients)) * 0.1 fake$coefficients <- beta.new fred.fake <- predict(fake, modmat = modmat.pred, root = root.pred, se.fit = TRUE) fred.new <- predict(out2, modmat = modmat.pred, root = root.pred, se.fit = TRUE, newcoef = beta.new) identical(fred.fake, fred.new)
6c4a97f7f670a2e4a5aa03decb6a0a8c8bdcd7d4
325902f26f8df4914f3931d6a3a5c5af12b975b1
/R scripts/Protein_examples_zoom.R
98add3d4ae4e9ab296e4dbbb99e713dcc412aef3
[]
no_license
Rappsilber-Laboratory/ProteomeHD
5e9ca415a0dac31ef46972eeff018547b9ee8aeb
2ee6d87110b9d4932af0d106927eb289dfbce321
refs/heads/master
2020-04-28T22:22:08.913323
2019-11-05T12:24:45
2019-11-05T12:24:45
175,614,695
8
0
null
2019-03-14T12:10:33
2019-03-14T12:10:32
null
UTF-8
R
false
false
7,099
r
Protein_examples_zoom.R
## In this script we analyse the distribution of four example proteins in the coregulation map: An uncharacterised microprotein, ## an uncharacterised protein without coregulation partners above threshold and two multifunctional proteins # Load the required libraries library(data.table); library(ggplot2); library(gridExtra); library(grid) #### Prepare the data #### # Load coregulation scores and annotate arbitrary threshold tC <- fread("coregulation_scores.csv") n_coreg <- floor( tC[,.N] * 0.005 ) # We arbitrarily define the highest scoring 0.5% of protein pairs as "co-regulated". Assign those. score_cut_off <- tC[ order(-coregulation_score) # What score cut-off does that correspond to? ][ n_coreg , coregulation_score ] tC[ coregulation_score >= score_cut_off , coregulated := TRUE ] # Assign the term # Load the tSNE map coordinates SNE <- fread("tSNE_coordinates_and_annotation.csv") # Add Gene names to the SNE map ProHD <- fread("ProteomeHD_v1_1.csv") ProHD <- ProHD[, .(Majority_protein_IDs, Gene_names)] SNE <- merge(SNE, ProHD, by.x = "ProteinID", by.y = "Majority_protein_IDs") # A function to "weight" the tSNE map by the treeClust score for a target protein make_custom_SNE <- function(x){ d1 <- tC[ Protein_1 == x , .(Target = Protein_1, Partner = Protein_2, coregulated) ] d2 <- tC[ Protein_2 == x , .(Target = Protein_2, Partner = Protein_1, coregulated) ] d3 <- rbind(d1, d2) d4 <- merge(SNE, d3, by.x = "ProteinID", by.y = "Partner", all.x = TRUE) return( d4[, .(ProteinID, coregulated) ] ) } #### Get coregulation partners for the selected proteins of interest #### # Use the custom function to assign coregulation partners TMEM256 <- make_custom_SNE( "Q8N2U0" ) HEATR5B <- make_custom_SNE( "Q9P2D3;Q9P2D3-3" ) DDX3X <- make_custom_SNE( "O00571;O00571-2" ) PHB <- make_custom_SNE( "P35232" ) # Modify colnames names(TMEM256)[ names(TMEM256) == "coregulated" ] <- "coreg_with_TMEM256" names(HEATR5B)[ names(HEATR5B) == "coregulated" ] <- "coreg_with_HEATR5B" names(DDX3X)[ names(DDX3X) == "coregulated" ] <- "coreg_with_DDX3X" names(PHB)[ names(PHB) == "coregulated" ] <- "coreg_with_PHB" # Merge into one table, together with tSNE coordinates SNE <- merge( SNE, TMEM256 ) SNE <- merge( SNE, HEATR5B ) SNE <- merge( SNE, DDX3X ) SNE <- merge( SNE, PHB ) #### Uncharacterised proteins: TMEM256 and HEATR5B #### # Set TMEM256 zoom region TMEM256_zoom_x <- c( 5, 11 ) TMEM256_zoom_y <- c( -48, -42 ) # Set HEATR5B zoom region HEATR5B_zoom_x <- c( 35, 37) HEATR5B_zoom_y <- c( 3, 6) # Global map with the zoomed regions annotated p1 <- ggplot(SNE, aes( x = tSNE_x_dim, y = tSNE_y_dim ))+ geom_point(shape = 16, size = 0.1, colour = "grey50")+ geom_point(data = SNE[ coreg_with_TMEM256 == TRUE ], shape = 16, size = 0.2, alpha = 0.5, colour = "royalblue4")+ geom_point(data = SNE[ coreg_with_HEATR5B == TRUE ], shape = 16, size = 0.2, alpha = 0.5, colour = "red")+ annotate("rect", xmin = TMEM256_zoom_x[1], xmax = TMEM256_zoom_x[2], ymin = TMEM256_zoom_y[1], ymax = TMEM256_zoom_y[2], colour="black", fill=NA, size=0.25)+ annotate("rect", xmin = HEATR5B_zoom_x[1], xmax = HEATR5B_zoom_x[2], ymin = HEATR5B_zoom_y[1], ymax = HEATR5B_zoom_y[2], colour="black", fill=NA, size=0.25)+ theme(panel.background=element_blank(), axis.text=element_blank(), axis.ticks=element_blank(), panel.border=element_rect(fill=NA, colour="black", size=0.25), panel.grid.major=element_blank(), axis.title=element_blank(), legend.position = "none", plot.margin = unit( c(0.5, 0.5, 0.5, 0.5), "mm")) p1 ## Map zoom for TMEM256 # In this zoom I want to show the enrichment for GO term "mitochondrion inner membrane" (GO:0005743). Download list of UniProt IDs # associated with this term from QuickGO and load here mito_IMM <- fread("QuickGO_mito_IMM.tsv") # Simplify protein IDs for assigning subcellular location SNE[, SimpleID := gsub(";.+", "", ProteinID) ][, SimpleID := gsub("-.+", "", SimpleID) ] pTMEM256 <- ggplot(SNE, aes( x = tSNE_x_dim, y = tSNE_y_dim))+ geom_point(size = 0.6, shape = 16, colour = "grey50" )+ geom_point(data = SNE[ coreg_with_TMEM256 == TRUE], shape = 16, size = 0.6, colour = "royalblue4")+ geom_point(data = SNE[ SimpleID %in% mito_IMM$`GENE PRODUCT ID` ], shape = 21, size = 3, colour = "darkorange")+ geom_point(data = SNE[ Gene_names == "TMEM256" ], shape=16, size = 2, colour = "royalblue4")+ geom_text( data = SNE[ Gene_names == "TMEM256" ], aes(label = Gene_names), size = 3, colour="royalblue4")+ xlim( TMEM256_zoom_x )+ ylim( TMEM256_zoom_y )+ theme(panel.background=element_blank(), axis.text=element_blank(), axis.ticks=element_blank(), panel.border=element_rect(fill=NA, colour="black", size=0.25), panel.grid.major=element_blank(), axis.title=element_blank(), legend.position = "top", plot.margin = unit( c(0.5, 0.5, 0.5, 0.5), "mm")) pTMEM256 # Map zoom for HEATR5B pHEATR5B <- ggplot(SNE, aes( x = tSNE_x_dim, y = tSNE_y_dim))+ geom_point(size = 0.6, shape = 16, colour = "grey50" )+ geom_point(data = SNE[ coreg_with_HEATR5B == TRUE], shape = 16, size = 0.6, colour = "red")+ geom_point(data = SNE[ Gene_names == "HEATR5B" ], shape=16, size = 2, colour = "red")+ geom_text( aes(label = Gene_names), size = 3 , hjust = -0.1)+ xlim( HEATR5B_zoom_x )+ ylim( HEATR5B_zoom_y )+ theme(panel.background=element_blank(), axis.text=element_blank(), axis.ticks=element_blank(), panel.border=element_rect(fill=NA, colour="black", size=0.25), panel.grid.major=element_blank(), axis.title=element_blank(), legend.position = "top", plot.margin = unit( c(0.5, 0.5, 0.5, 0.5), "mm")) pHEATR5B # Output combined plot p1b <- arrangeGrob(pHEATR5B, pTMEM256) pUnc <- arrangeGrob(p1, p1b, nrow = 1) grid.draw(pUnc) ggsave("TMEM256_HEATR5B_plot.pdf", pUnc, width=9, height=4.5, units=c("cm")) #### Multifunctional proteins: DDX3X AND prohibitin #### # Global map with the co-regulated proteins shown p2 <- ggplot(SNE, aes( x = tSNE_x_dim, y = tSNE_y_dim ))+ geom_point(shape = 16, size = 0.1, colour = "grey50")+ geom_point(data = SNE[ coreg_with_DDX3X == TRUE ], shape = 16, size = 0.2, alpha = 0.5, colour = "orangered")+ geom_point(data = SNE[ coreg_with_PHB == TRUE ], shape = 16, size = 0.2, alpha = 0.5, colour = "deepskyblue1")+ theme(panel.background=element_blank(), axis.text=element_blank(), axis.ticks=element_blank(), panel.border=element_rect(fill=NA, colour="black", size=0.25), panel.grid.major=element_blank(), axis.title=element_blank(), legend.position = "none", plot.margin = unit( c(0.5, 0.5, 0.5, 0.5), "mm")) p2 # Save plot ggsave("DDX3X_PHB.pdf", p2, width=4.5, height=4.5, units=c("cm"))
32161ff44f3983f7a36ff985cefdbb4e7c1ce135
104e81350a792f38d9522857cfbad03836ef7683
/Week1/htmlDownload.R
ed292b98f6ace500fae02bd44930f705fcdd6c3f
[]
no_license
jtantongco/Coursera_GetCleanData
bcf184d13e0e245b204b784871330f616b3086b4
080ff4bd328942d0d0d68043b52cb5fc48fd653a
refs/heads/master
2021-01-01T15:36:18.433559
2014-08-24T09:22:21
2014-08-24T09:22:21
null
0
0
null
null
null
null
UTF-8
R
false
false
287
r
htmlDownload.R
library(XML) fileUrl <- "http://espn.go.com/nfl/team/_/name/bal/baltimore-ravens" doc <- htmlTreeParse(fileUrl, useInternal=TRUE) scores <- xpathSApply(doc, "//li[@class='score']", xmlValue) #seems to return empty teams <- xpathSApply(doc,"//li[@class='team-name']", xmlValue) #is good
38ba394aa46c2540bd30a5189a91dfe1f69920e5
afe39a330e68856413be87018519f7119dde6508
/R/create_resource.R
48cb36254855e2251a580e1d4dbfcbeeb5f5df8b
[]
no_license
jchrom/trelloR
4a142222c34d480b25b7f3fd75614c6af1cf66eb
cca04eb70bf5060a7c7f858fa9911bd8c68e2089
refs/heads/master
2023-08-31T06:33:57.940993
2023-08-27T18:09:07
2023-08-27T18:09:07
34,352,839
40
12
null
2017-01-21T19:47:02
2015-04-21T21:20:56
R
UTF-8
R
false
false
2,484
r
create_resource.R
#' Create Resources #' #' Create resources via Trello API. #' #' See [Trello API reference](https://developer.atlassian.com/cloud/trello/rest) #' for more info about what elements can be included in POST request body. #' #' @param resource Model name, eg. `"card"`. #' @param id Model id. #' @param path Path. #' @param body A named list. #' @param token An object of class `"Trello_API_token"`, a path or `NULL`. #' #' * If a `Token`, it is passed as is. #' * If `NULL` and a cache file called `".httr-oauth"` exists, the newest token #' is read from it. If the file is not found, an error is thrown. #' * If a character vector of length 1, it will be used as an alternative path #' to the cache file. #' #' @param verbose Whether to pass [httr::verbose()] to [httr::RETRY()]. #' @param on.error Behavior when HTTP status >= 300, defaults to `"stop"`. #' @param handle Passed to [httr::RETRY()]. #' @param encode,response Deprecated. #' #' @family functions to create resources #' #' @export #' #' @examples #' #' \dontrun{ #' #' # Get token with write access #' key = Sys.getenv("MY_TRELLO_KEY") #' secret = Sys.getenv("MY_TRELLO_SECRET") #' #' token = get_token("my_app", key = key, secret = secret, #' scope = c("read", "write")) #' #' # Get board ID #' url = "Your board URL" #' bid = get_id_board(url, token) #' #' # Get lists on that board, extract ID of the first one #' lid = get_board_lists(bid, token)$id[1] #' #' # Content for the new card #' payload = list( #' idList = lid, #' name = "A new card", #' desc = "#This card has been created by trelloR", #' pos = "bottom" #' ) #' #' # Create card and store the response (to capture the ID #' # of the newly created resource) #' r = create_resource("card", body = payload, token = token) #' #' # Get ID of the new card #' r$id #' } create_resource = function(resource, id = NULL, path = NULL, body = list(name = "New"), token = NULL, on.error = c("stop", "warn", "message"), verbose = FALSE, handle = NULL, encode, response) { warn_for_argument(encode) warn_for_argument(response) url = httr::modify_url( url = "https://api.trello.com", path = c(1, paste0(resource, "s"), id, path) ) trello_api_verb("POST", url = url, times = 1L, handle = handle, token = token, verbose = verbose, body = body, on.error = on.error) }
17289ae45f927b9fc86c7967a20242ecdcb704bd
de8336641d9f4ee5295a9e42fff7b4f742281338
/tests/testthat/test-pick_UScode.R
f114cfecf5ca8f883688c165ed88b8ebbc260e00
[ "MIT" ]
permissive
sjmarks/Birdr
44b8e2abe13ccacdf32073e757121f4e3853ebe5
2183cbb28272d6b96f0635c21987fd6d06e471c6
refs/heads/master
2022-06-07T03:04:35.841992
2020-05-04T04:14:29
2020-05-04T04:14:29
260,333,483
2
2
MIT
2020-05-04T04:14:30
2020-04-30T22:40:53
R
UTF-8
R
false
false
286
r
test-pick_UScode.R
test_that("pickUSCode returns proper name and code", { correct_result <- tidyr::tibble(code = "US-CA-079", name = "San Luis Obispo") my_result <- pick_UScode(state = "California", county = "San Luis Obispo", ebirdkey = "rqksong3qcbm") expect_equal(my_result, correct_result) })
3c5a60b9d187b32c6037ebeea5ef336cb962c8b7
53e56475273a36deeb2d1f133947f531e4e3871a
/proj_1/sampleMultinomial.R
8fc3017cee09f6168b8a512ed0f3f3ff58f3b428
[]
no_license
Marvedog/Tma4300-Kode
34af409101da8471be4853f4ec07ed7d986a23bb
d8af15888f6078030a9834fecddb30fec7b3fc9d
refs/heads/master
2021-05-12T06:52:51.580650
2018-02-09T09:01:25
2018-02-09T09:01:25
117,229,525
0
1
null
2018-02-02T07:48:04
2018-01-12T10:45:57
R
UTF-8
R
false
false
641
r
sampleMultinomial.R
# -------------- Multinomial sampling -----------------# # Input: # p : Sorted list of probabilities summing to unity. # N : Number of draws # Output: # out: vector of draws corresponding to each interval sampleMultinomial <- function(p, N) { len_p <- length(p) # Compute cumulative sum cumSum <- matrix(0,len_p, 1) cumSum[1] <- p[1] for (i in 2:len_p) { cumSum[i] <- cumSum[i-1] + p[i] } u <- runif(N) # Increment output draws out <- matrix(0, len_p, 1) for (i in 1:N) { for (j in 1:len_p) { if (u[i] < cumSum[j]) { out[j] <- out[j] + 1 break } } } return (out) }
623e52504b377e12623dc0325c265163f598f73b
9c4fd75b5fa36683a7f72598d5b7ffb3e155fa74
/man/dput_levels.Rd
7a9c6bdf5a40d17ca612a47c8a7919e50376a577
[]
no_license
gridl/thinkr
3c291d6ec16c83c32f596064493f4178977f7714
24f85da8c65e77b0f36cd80b7d40389e0ba7b92a
refs/heads/master
2020-03-23T03:17:16.909851
2018-06-21T04:15:31
2018-06-21T04:15:31
null
0
0
null
null
null
null
UTF-8
R
false
true
402
rd
dput_levels.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/levels_to_vec.R \name{dput_levels} \alias{dput_levels} \title{return R instruction to create levels} \usage{ dput_levels(vec) } \arguments{ \item{vec}{a factor or character vector} } \value{ a R instruction } \description{ return R instruction to create levels } \examples{ dput_levels(iris$Species) }
b001b7f7bd00b59a34c1d09f2ca8e184ee209281
50291a5d7e7652f8dcfbfb718906f4ba0b8964e7
/ui.R
d83c90417c1a28dc44a4dc38546d54532c49755a
[]
no_license
bborgesr/reactnb
e9f94f00951299519f7968d61e48a75d345d7506
0083b79c560ecc1dd6b38221fe0bbcc75ad48b72
refs/heads/master
2021-08-22T16:53:56.972462
2017-11-30T17:56:32
2017-11-30T17:56:32
119,948,057
1
0
null
2018-02-02T07:28:43
2018-02-02T07:28:43
null
UTF-8
R
false
false
682
r
ui.R
shinyUI(basicPage( tags$link(rel='stylesheet', type='text/css', href='lib/jqueryui/css/ui-lightness/jquery-ui-1.10.3.custom.css'), tags$script(src='lib/jqueryui/js/jquery-ui-1.10.3.custom.js'), tags$script(src='lib/jquery.ui.touch-punch.min.js'), tags$script(src='reactnb.js'), tags$link(rel='stylesheet', type='text/css', href='reactnb.css'), tags$link(rel='stylesheet', type='text/css', href='reactnb-hi.css'), tags$div(id='output'), tags$div(class='command_container', 'Type an R expression to observe:', tags$input(type='command', id='command', class='reactnb-command', autocomplete='off', autocorrect='off') ), tags$div(id="trash") ))
63d5e1962e11107df6c82eec599dbacb6856038c
f7a0f3cbeefdc01fc0f172a47359c0c4610c95a7
/code_active/sim_analysis_looping_info_1.R
93157ed8c287ec91e07d04bb3d727e609354f40a
[]
no_license
EESI/exploring_thematic_structure
65e77efbb56fea646a9f165eaa94f955f68259ff
06f7ea096c31dbb63b09fc117ee22411e52ab60e
refs/heads/master
2020-08-25T03:30:04.253394
2019-10-23T03:02:18
2019-10-23T03:02:18
216,955,082
2
0
null
null
null
null
UTF-8
R
false
false
30,832
r
sim_analysis_looping_info_1.R
jsd <- function(p,q){ m <- .5*(p+q) sqrt(.5*(sum(p*log(p/m)) + sum(q*log(q/m)))) } entropy <- function(x){ x <- x[x!=0] f <- x/sum(x) -sum(f * log2(f)) } norm10 <- function(x) (x-min(x))/(max(x)-min(x)) bcd <- function(x,y) sum(abs(x-y)/sum(x+y)) N_OTU <- 500 N_SAMP <- 100 SC_P <- c(.1,.25,.50,.75) SC_N <- 10 SC_SAMP_P <- .75 N_SC <- 5 param_subset_idx <- with(param_grid,which(n_otu %in% N_OTU & n_samp %in% N_SAMP & sc_p %in% SC_P & sc_n %in% SC_N & sc_samp_p %in% SC_SAMP_P)) fns_subset <- paste0('data/dat_',param_subset_idx,'.rds') LIST_SUBSET <- vector(mode='list',length=length(fns_subset)) for (i in seq_along(fns_subset)) LIST_SUBSET[[i]] <- readRDS(file.path(sim_dir,fns_subset[i])) param_values <- param_grid[param_subset_idx,] param_values$idx <- as.integer(rownames(param_values)) model_param_values <- with(LIST_SUBSET[[1]]$model_params,which(model %in% c('unsup') & K %in% c(15))) P_MIN <- matrix(0.0,nrow(param_values),length(model_param_values), dimnames=list(1:nrow(param_values),LIST_SUBSET[[1]]$model_params[model_param_values,'ps'])) E_MIN <- E_MEAN <- P_MEAN <- P_MIN for (p in seq_along(param_values$idx)){ for (m in seq_along(model_param_values)){ MM <- LIST_SUBSET[[p]] M <- MM$out[[model_param_values[m]]] if (class(M$data$fit) == 'try-error') {P_MIN[p,m] <- P_MEAN[p,m] <- E_MEAN[p,m] <- E_MIN[p,m] <- NA; next} K <- M$data$fit$settings$dim$K VOCAB <- M$data$fit$vocab OTUS <- M$data$sim$sc_otus SAMPS <- M$data$sim$samples LOGBETA2 <- M$data$fit$beta$logbeta[[1]] LOGBETA2[LOGBETA2==-Inf] <- min(LOGBETA2[LOGBETA2>-Inf]) BETA2 <- exp(LOGBETA2) colnames(BETA2) <- VOCAB OTU <- t(otu_table(M$data$ps)) META <- sample_data(M$data$ps) SIG <- M$results$sig sc_df <- data.frame(sc=paste0('sc',1:(3*N_SC)), sc_idx=paste0('sc',rep(1:5,3)), sc_type=rep(c('1','0','b'),each=N_SC)) G1 <- rownames(OTU)[rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2))] G2 <- rownames(OTU)[!(rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2)))] GB <- rownames(OTU) G <- list(G1=G1,G2=G2,GB=GB) SCORE <- sapply(1:nrow(OTUS), function(sc) { #GROUP <- G[[ifelse(sc <= 5, 1, ifelse(sc >= 10, 3, 2))]] GROUP <- rownames(OTU)[rownames(OTU) %in% paste0('s',SAMPS[[sc]])] OTU_SC <- colSums(OTU[GROUP,paste0('otu',OTUS[sc,])]) OTU_SC <- OTU_SC/sum(OTU_SC) TOP_SC <- BETA2[,paste0('otu',OTUS[sc,])] TOP_SC <- TOP_SC/rowSums(TOP_SC) TOP_SC <- TOP_SC[,names(which(!(OTU_SC==0)))] OTU_SC <- OTU_SC[names(which(!(OTU_SC==0)))] apply(TOP_SC,1,function(p) jsd(p,OTU_SC)) }) dimnames(SCORE) <- list(paste0('T',1:nrow(SCORE)),paste0('sc',1:ncol(SCORE))) NSCORE_MIN <- matrix(NA,nrow(SCORE),ncol(SCORE),dimnames=dimnames(SCORE)) for (i in 1:NROW(NSCORE_MIN)){ for (j in 1:NCOL(NSCORE_MIN)){ NSCORE_MIN[i,j] <- SCORE[i,j]/min(SCORE[i,-j]) } } NSCORE_MEAN <- matrix(NA,nrow(SCORE),ncol(SCORE),dimnames=dimnames(SCORE)) for (i in 1:NROW(NSCORE_MEAN)){ for (j in 1:NCOL(NSCORE_MEAN)){ NSCORE_MEAN[i,j] <- SCORE[i,j]/median(SCORE[i,-j]) ### using median } } NSCORE_MIN_THRES <- 1*(NSCORE_MIN<1) nscore_min_prop <- 1-mean(colSums(NSCORE_MIN_THRES)==0) nscore_min_entr <- sum(apply(NSCORE_MIN_THRES,1,function(x) entropy(table(which(x==1))))) NSCORE_MEAN_THRES <- 1*(NSCORE_MEAN<1) nscore_mean_prop <- 1-mean(colSums(NSCORE_MEAN_THRES)==0) nscore_mean_entr <- sum(apply(NSCORE_MEAN_THRES,1,function(x) entropy(table(which(x==1))))) P_MIN[p,m] <- nscore_min_prop E_MIN[p,m] <- nscore_min_entr P_MEAN[p,m] <- nscore_mean_prop E_MEAN[p,m] <- nscore_mean_entr } } P <- param_values %>% left_join(data.frame(P_MIN,idx=1:nrow(P)),by='idx') %>% left_join(data.frame(E_MEAN,idx=1:nrow(P)),by='idx') %>% gather(model,score,-(n_otu:idx)) %>% mutate(model=gsub('\\.x','\\.pmin',model), model=gsub('\\.y','\\.emean',model)) %>% separate(model,c('model','stat'),sep='\\.') %>% spread(stat,score) P %>% ggplot(aes(emean,pmin,shape=model,colour=model)) + geom_point(size=5) + facet_grid(sc_p~sc_m) + scale_x_reverse() + scale_colour_brewer(type='qual',palette=2) model_param_values <- with(LIST_SUBSET[[1]]$model_params,which(model %in% c('unsup') & K %in% c(25))) S <- matrix(0.0,nrow(param_values),length(model_param_values), dimnames=list(1:nrow(param_values),LIST_SUBSET[[1]]$model_params[model_param_values,'ps'])) TH <- S for (p in seq_along(param_values$idx)){ for (m in seq_along(model_param_values)){ MM <- LIST_SUBSET[[p]] M <- MM$out[[model_param_values[m]]] if (class(M$data$fit) == 'try-error') {S[p,m] <- TH[p,m] <- NA; next} K <- M$data$fit$settings$dim$K VOCAB <- M$data$fit$vocab OTUS <- M$data$sim$sc_otus SAMPS <- M$data$sim$samples LOGBETA2 <- M$data$fit$beta$logbeta[[1]] LOGBETA2[LOGBETA2==-Inf] <- min(LOGBETA2[LOGBETA2>-Inf]) BETA2 <- exp(LOGBETA2) colnames(BETA2) <- VOCAB OTU <- t(otu_table(M$data$ps)) META <- sample_data(M$data$ps) SIG <- M$results$sig sc_df <- data.frame(sc=paste0('sc',1:(3*N_SC)), sc_idx=paste0('sc',rep(1:5,3)), sc_type=rep(c('1','0','b'),each=N_SC)) G1 <- rownames(OTU)[rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2))] G2 <- rownames(OTU)[!(rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2)))] GB <- rownames(OTU) G <- list(G1=G1,G2=G2,GB=GB) SCORE <- sapply(1:nrow(OTUS), function(sc) { GROUP <- G[[ifelse(sc <= 5, 1, ifelse(sc >= 10, 3, 2))]] #GROUP <- rownames(OTU)[rownames(OTU) %in% paste0('s',SAMPS[[sc]])] OTU_SC <- colSums(OTU[GROUP,paste0('otu',OTUS[sc,])]) OTU_SC <- OTU_SC/sum(OTU_SC) TOP_SC <- BETA2[,paste0('otu',OTUS[sc,])] TOP_SC <- TOP_SC/rowSums(TOP_SC) TOP_SC <- TOP_SC[,names(which(!(OTU_SC==0)))] OTU_SC <- OTU_SC[names(which(!(OTU_SC==0)))] apply(TOP_SC,1,function(p) jsd(p,OTU_SC)) }) dimnames(SCORE) <- list(paste0('T',1:nrow(SCORE)),paste0('sc',1:ncol(SCORE))) THRES <- seq(min(SCORE),max(SCORE),by=.01) for (thres in THRES){ col_thres <- colSums(1*(SCORE<thres)) if (all(col_thres != 0)){ break } } TH[p,m] <- thres S[p,m] <- sum((rowSums(1*(SCORE<thres))))/ncol(SCORE) } } P <- param_values %>% left_join(data.frame(TH,idx=1:nrow(P)),by='idx') %>% left_join(data.frame(S,idx=1:nrow(P)),by='idx') %>% gather(model,score,-(n_otu:idx)) %>% mutate(model=gsub('\\.x','\\.threshold',model), model=gsub('\\.y','\\.score',model)) %>% separate(model,c('model','stat'),sep='\\.') %>% spread(stat,score) P %>% ggplot(aes(threshold,score,shape=model,colour=model)) + geom_point(size=5) + facet_grid(sc_p~sc_m) + scale_colour_brewer(type='qual',palette=2) model_param_values <- with(LIST_SUBSET[[1]]$model_params,which(model %in% c('unsup') & K %in% c(25))) S <- matrix(0.0,nrow(param_values),length(model_param_values), dimnames=list(1:nrow(param_values),LIST_SUBSET[[1]]$model_params[model_param_values,'ps'])) TH <- S for (p in seq_along(param_values$idx)){ for (m in seq_along(model_param_values)){ MM <- LIST_SUBSET[[p]] M <- MM$out[[model_param_values[m]]] if (class(M$data$fit) == 'try-error') {S[p,m] <- TH[p,m] <- NA; next} K <- M$data$fit$settings$dim$K VOCAB <- M$data$fit$vocab OTUS <- M$data$sim$sc_otus SAMPS <- M$data$sim$samples LOGBETA2 <- M$data$fit$beta$logbeta[[1]] LOGBETA2[LOGBETA2==-Inf] <- min(LOGBETA2[LOGBETA2>-Inf]) BETA2 <- exp(LOGBETA2) colnames(BETA2) <- VOCAB OTU <- t(otu_table(M$data$ps)) META <- sample_data(M$data$ps) SIG <- M$results$sig sc_df <- data.frame(sc=paste0('sc',1:(3*N_SC)), sc_idx=paste0('sc',rep(1:5,3)), sc_type=rep(c('1','0','b'),each=N_SC)) G1 <- rownames(OTU)[rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2))] G2 <- rownames(OTU)[!(rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2)))] GB <- rownames(OTU) G <- list(G1=G1,G2=G2,GB=GB) SCORE <- sapply(1:nrow(OTUS), function(sc) { #GROUP <- G[[ifelse(sc <= 5, 1, ifelse(sc >= 10, 3, 2))]] GROUP <- rownames(OTU)[rownames(OTU) %in% paste0('s',SAMPS[[sc]])] OTU_SC <- colSums(OTU[GROUP,]) OTU_SC <- OTU_SC[colnames(BETA2)] OTU_SC[!(names(OTU_SC) %in% paste0('otu',OTUS[sc,]))] <- min(BETA2) OTU_SC <- OTU_SC/sum(OTU_SC) TOP_SC <- BETA2 TOP_SC <- TOP_SC/rowSums(TOP_SC) TOP_SC <- TOP_SC[,names(which(!(OTU_SC==0)))] OTU_SC <- OTU_SC[names(which(!(OTU_SC==0)))] apply(TOP_SC,1,function(p) jsd(p,OTU_SC)) }) dimnames(SCORE) <- list(paste0('T',1:nrow(SCORE)),paste0('sc',1:ncol(SCORE))) THRES <- seq(min(SCORE),max(SCORE),by=.01) for (thres in THRES){ col_thres <- colSums(1*(SCORE<thres)) if (all(col_thres != 0)){ break } } TH[p,m] <- thres S[p,m] <- sum((rowSums(1*(SCORE<thres))))/ncol(SCORE) } } P <- param_values %>% left_join(data.frame(TH,idx=1:nrow(P)),by='idx') %>% left_join(data.frame(S,idx=1:nrow(P)),by='idx') %>% gather(model,score,-(n_otu:idx)) %>% mutate(model=gsub('\\.x','\\.threshold',model), model=gsub('\\.y','\\.score',model)) %>% separate(model,c('model','stat'),sep='\\.') %>% spread(stat,score) P %>% ggplot(aes(threshold,score,shape=model,colour=model)) + geom_point(size=5) + facet_grid(sc_p~sc_m) + scale_colour_brewer(type='qual',palette=2) model_param_values <- with(LIST_SUBSET[[1]]$model_params,which(model %in% c('unsup') & K %in% c(15))) S <- matrix(0.0,nrow(param_values),length(model_param_values), dimnames=list(1:nrow(param_values),LIST_SUBSET[[1]]$model_params[model_param_values,'ps'])) TH <- S for (p in seq_along(param_values$idx)){ for (m in seq_along(model_param_values)){ MM <- LIST_SUBSET[[p]] M <- MM$out[[model_param_values[m]]] if (class(M$data$fit) == 'try-error') {S[p,m] <- TH[p,m] <- NA; next} K <- M$data$fit$settings$dim$K VOCAB <- M$data$fit$vocab OTUS <- M$data$sim$sc_otus SAMPS <- M$data$sim$samples LOGBETA2 <- M$data$fit$beta$logbeta[[1]] LOGBETA2[LOGBETA2==-Inf] <- min(LOGBETA2[LOGBETA2>-Inf]) BETA2 <- exp(LOGBETA2) colnames(BETA2) <- VOCAB OTU <- t(otu_table(M$data$ps)) META <- sample_data(M$data$ps) SIG <- M$results$sig sc_df <- data.frame(sc=paste0('sc',1:(3*N_SC)), sc_idx=paste0('sc',rep(1:5,3)), sc_type=rep(c('1','0','b'),each=N_SC)) G1 <- rownames(OTU)[rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2))] G2 <- rownames(OTU)[!(rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2)))] GB <- rownames(OTU) G <- list(G1=G1,G2=G2,GB=GB) SCORE <- sapply(1:nrow(OTUS), function(sc) { #GROUP <- G[[ifelse(sc <= 5, 1, ifelse(sc >= 10, 3, 2))]] GROUP <- rownames(OTU)[rownames(OTU) %in% paste0('s',SAMPS[[sc]])] OTU_SC <- colSums(OTU[GROUP,]) OTU_SC <- OTU_SC[colnames(BETA2)] OTU_SC[!(names(OTU_SC) %in% paste0('otu',OTUS[sc,]))] <- min(BETA2) OTU_SC <- OTU_SC/sum(OTU_SC) TOP_SC <- BETA2 #TOP_SC <- TOP_SC/rowSums(TOP_SC) TOP_SC <- TOP_SC[,names(which(!(OTU_SC==0)))] OTU_SC <- OTU_SC[names(which(!(OTU_SC==0)))] apply(TOP_SC,1,function(p) jsd(p,OTU_SC)) }) dimnames(SCORE) <- list(paste0('T',1:nrow(SCORE)),paste0('sc',1:ncol(SCORE))) THRES <- seq(min(SCORE),max(SCORE),by=.01) for (thres in THRES){ col_thres <- colSums(1*(SCORE<thres)) if (sum(col_thres) >= K){ thres <- thres_last break } thres_last <- thres } TH[p,m] <- thres S[p,m] <- sum(rowSums(1*(SCORE<thres)) != 1)/K } } P <- param_values %>% left_join(data.frame(TH,idx=1:nrow(P)),by='idx') %>% left_join(data.frame(S,idx=1:nrow(P)),by='idx') %>% gather(model,score,-(n_otu:idx)) %>% mutate(model=gsub('\\.x','\\.threshold',model), model=gsub('\\.y','\\.score',model)) %>% separate(model,c('model','stat'),sep='\\.') %>% spread(stat,score) P %>% ggplot(aes(threshold,score,shape=model,colour=model)) + geom_point(size=5) + facet_grid(sc_p~sc_m) + scale_colour_brewer(type='qual',palette=2) + xlim(0,1) + ylim(0,1) + theme(aspect.ratio=1) model_param_values <- with(LIST_SUBSET[[1]]$model_params,which(model %in% c('unsup') & K %in% c(15))) S <- matrix(0.0,nrow(param_values),length(model_param_values), dimnames=list(1:nrow(param_values),LIST_SUBSET[[1]]$model_params[model_param_values,'ps'])) TH <- S for (p in seq_along(param_values$idx)){ for (m in seq_along(model_param_values)){ MM <- LIST_SUBSET[[p]] M <- MM$out[[model_param_values[m]]] if (class(M$data$fit) == 'try-error') {S[p,m] <- TH[p,m] <- NA; next} K <- M$data$fit$settings$dim$K VOCAB <- M$data$fit$vocab OTUS <- M$data$sim$sc_otus SAMPS <- M$data$sim$samples LOGBETA2 <- M$data$fit$beta$logbeta[[1]] LOGBETA2[LOGBETA2==-Inf] <- min(LOGBETA2[LOGBETA2>-Inf]) BETA2 <- exp(LOGBETA2) colnames(BETA2) <- VOCAB OTU <- t(otu_table(M$data$ps)) META <- sample_data(M$data$ps) SIG <- M$results$sig sc_df <- data.frame(sc=paste0('sc',1:(3*N_SC)), sc_idx=paste0('sc',rep(1:5,3)), sc_type=rep(c('1','0','b'),each=N_SC)) G1 <- rownames(OTU)[rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2))] G2 <- rownames(OTU)[!(rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2)))] GB <- rownames(OTU) G <- list(G1=G1,G2=G2,GB=GB) SCORE <- sapply(1:nrow(OTUS), function(sc) { #GROUP <- G[[ifelse(sc <= 5, 1, ifelse(sc >= 10, 3, 2))]] GROUP <- rownames(OTU)[rownames(OTU) %in% paste0('s',SAMPS[[sc]])] OTU_SC <- colSums(OTU[GROUP,]) OTU_SC <- OTU_SC[colnames(BETA2)] OTU_SC[!(names(OTU_SC) %in% paste0('otu',OTUS[sc,]))] <- NA OTU_SC <- dense_rank(dplyr::desc(OTU_SC)) OTU_SC[is.na(OTU_SC)] <- length(OTU_SC) names(OTU_SC) <- colnames(OTU) OTU_SC <- OTU_SC[colnames(BETA2)] TOP_SC <- t(apply(BETA2,1,function(x) dense_rank(dplyr::desc(x)))) dimnames(TOP_SC) <- dimnames(BETA2) apply(TOP_SC,1,function(p) cor(p,OTU_SC,method='kendall')) }) dimnames(SCORE) <- list(paste0('T',1:nrow(SCORE)),paste0('sc',1:ncol(SCORE))) THRES <- seq(min(SCORE),max(SCORE),by=.01) for (thres in THRES){ col_thres <- colSums(1*(SCORE<thres)) if (sum(col_thres) >= K){ thres <- thres_last break } thres_last <- thres } TH[p,m] <- thres S[p,m] <- sum(rowSums(1*(SCORE<thres)) != 1)/K } } P <- param_values %>% left_join(data.frame(TH,idx=1:nrow(P)),by='idx') %>% left_join(data.frame(S,idx=1:nrow(P)),by='idx') %>% gather(model,score,-(n_otu:idx)) %>% mutate(model=gsub('\\.x','\\.threshold',model), model=gsub('\\.y','\\.score',model)) %>% separate(model,c('model','stat'),sep='\\.') %>% spread(stat,score) P %>% ggplot(aes(threshold,score,shape=model,colour=model)) + geom_point(size=5) + facet_grid(sc_p~sc_m) + scale_colour_brewer(type='qual',palette=2) + scale_x_reverse() + theme(aspect.ratio=1) model_param_values <- with(LIST_SUBSET[[1]]$model_params,which(model %in% c('unsup') & K %in% c(15))) S <- matrix(0.0,nrow(param_values),length(model_param_values), dimnames=list(1:nrow(param_values),LIST_SUBSET[[1]]$model_params[model_param_values,'ps'])) TH <- S for (p in seq_along(param_values$idx)){ for (m in seq_along(model_param_values)){ MM <- LIST_SUBSET[[p]] M <- MM$out[[model_param_values[m]]] if (class(M$data$fit) == 'try-error') {S[p,m] <- TH[p,m] <- NA; next} K <- M$data$fit$settings$dim$K VOCAB <- M$data$fit$vocab OTUS <- M$data$sim$sc_otus SAMPS <- M$data$sim$samples LOGBETA2 <- M$data$fit$beta$logbeta[[1]] LOGBETA2[LOGBETA2==-Inf] <- min(LOGBETA2[LOGBETA2>-Inf]) BETA2 <- exp(LOGBETA2) colnames(BETA2) <- VOCAB OTU <- t(otu_table(M$data$ps)) META <- sample_data(M$data$ps) SIG <- M$results$sig sc_df <- data.frame(sc=paste0('sc',1:(3*N_SC)), sc_idx=paste0('sc',rep(1:5,3)), sc_type=rep(c('1','0','b'),each=N_SC)) G1 <- rownames(OTU)[rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2))] G2 <- rownames(OTU)[!(rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2)))] GB <- rownames(OTU) G <- list(G1=G1,G2=G2,GB=GB) SCORE <- sapply(1:nrow(OTUS), function(sc) { #GROUP <- G[[ifelse(sc <= 5, 1, ifelse(sc >= 10, 3, 2))]] GROUP <- rownames(OTU)[rownames(OTU) %in% paste0('s',SAMPS[[sc]])] OTU_SC <- colSums(OTU[GROUP,paste0('otu',OTUS[sc,])]) OTU_SC <- dense_rank(dplyr::desc(OTU_SC)) names(OTU_SC) <- paste0('otu',OTUS[sc,]) TOP_SC <- t(apply(BETA2,1,function(x) dense_rank(dplyr::desc(x)))) dimnames(TOP_SC) <- dimnames(BETA2) TOP_SC <- TOP_SC[,paste0('otu',OTUS[sc,])] apply(TOP_SC,1,function(p) cor(p,OTU_SC,method='kendall')) }) dimnames(SCORE) <- list(paste0('T',1:nrow(SCORE)),paste0('sc',1:ncol(SCORE))) THRES <- seq(min(SCORE),max(SCORE),by=.01) for (thres in THRES){ col_thres <- colSums(1*(SCORE<thres)) if (sum(col_thres) >= K){ thres <- thres_last break } thres_last <- thres } TH[p,m] <- thres S[p,m] <- sum(rowSums(1*(SCORE<thres)) != 1)/K } } P <- param_values %>% left_join(data.frame(TH,idx=1:nrow(P)),by='idx') %>% left_join(data.frame(S,idx=1:nrow(P)),by='idx') %>% gather(model,score,-(n_otu:idx)) %>% mutate(model=gsub('\\.x','\\.threshold',model), model=gsub('\\.y','\\.score',model)) %>% separate(model,c('model','stat'),sep='\\.') %>% spread(stat,score) P %>% ggplot(aes(threshold,score,shape=model,colour=model)) + geom_point(size=5) + facet_grid(sc_p~sc_m) + scale_colour_brewer(type='qual',palette=2) + scale_x_reverse() + theme(aspect.ratio=1) model_param_values <- with(LIST_SUBSET[[1]]$model_params,which(model %in% c('unsup') & K %in% c(15))) S <- matrix(0.0,nrow(param_values),length(model_param_values), dimnames=list(1:nrow(param_values),LIST_SUBSET[[1]]$model_params[model_param_values,'ps'])) TH <- MAX50 <- S for (p in seq_along(param_values$idx)){ for (m in seq_along(model_param_values)){ MM <- LIST_SUBSET[[p]] M <- MM$out[[model_param_values[m]]] if (class(M$data$fit) == 'try-error') {S[p,m] <- TH[p,m] <- NA; next} K <- M$data$fit$settings$dim$K VOCAB <- M$data$fit$vocab OTUS <- M$data$sim$sc_otus SAMPS <- M$data$sim$samples LOGBETA2 <- M$data$fit$beta$logbeta[[1]] LOGBETA2[LOGBETA2==-Inf] <- min(LOGBETA2[LOGBETA2>-Inf]) BETA2 <- exp(LOGBETA2) colnames(BETA2) <- VOCAB OTU <- t(otu_table(M$data$ps)) META <- sample_data(M$data$ps) SIG <- M$results$sig sc_df <- data.frame(sc=paste0('sc',1:(3*N_SC)), sc_idx=paste0('sc',rep(1:5,3)), sc_type=rep(c('1','0','b'),each=N_SC)) G1 <- rownames(OTU)[rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2))] G2 <- rownames(OTU)[!(rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2)))] GB <- rownames(OTU) G <- list(G1=G1,G2=G2,GB=GB) q_rank <- -qnormalize(1:N_OTU) SCORE <- sapply(1:nrow(OTUS), function(sc) { #GROUP <- G[[ifelse(sc <= 5, 1, ifelse(sc >= 10, 3, 2))]] GROUP <- rownames(OTU)[rownames(OTU) %in% paste0('s',SAMPS[[sc]])] OTU_SC <- colSums(OTU[GROUP,paste0('otu',OTUS[sc,])]) OTU_SC <- q_rank[dense_rank(dplyr::desc(OTU_SC))] names(OTU_SC) <- paste0('otu',OTUS[sc,]) TOP_SC <- t(apply(BETA2,1,function(x) q_rank[dense_rank(dplyr::desc(x))])) dimnames(TOP_SC) <- dimnames(BETA2) TOP_SC <- TOP_SC[,paste0('otu',OTUS[sc,])] apply(TOP_SC,1,function(p) sqrt(sum(p - OTU_SC)^2)) }) dimnames(SCORE) <- list(paste0('T',1:nrow(SCORE)),paste0('sc',1:ncol(SCORE))) THRES <- seq(min(SCORE),max(SCORE),by=.01) for (thres in THRES){ col_thres <- colSums(1*(SCORE<thres)) if (sum(col_thres) >= K){ thres <- thres_last break } thres_last <- thres } MAX50[p,m] <- median(apply(SCORE,2,max)) TH[p,m] <- thres S[p,m] <- sum(rowSums(1*(SCORE<thres)) != 1)/K } } colnames(MAX50) <- paste0(colnames(MAX50),'.max50') colnames(TH) <- paste0(colnames(TH),'.threshold') colnames(S) <- paste0(colnames(S),'.score') P <- param_values %>% left_join(data.frame(TH,idx=1:nrow(P)),by='idx') %>% left_join(data.frame(S,idx=1:nrow(P)),by='idx') %>% left_join(data.frame(MAX50,idx=1:nrow(P)),by='idx') %>% gather(model,score,-(n_otu:idx)) %>% separate(model,c('model','stat'),sep='\\.') %>% spread(stat,score) P %>% ggplot(aes(threshold,score,shape=model,colour=model)) + geom_point(size=5) + facet_grid(sc_p~sc_m) + geom_vline(aes(xintercept=max50,colour=model),size=1,alpha=.7) + scale_colour_brewer(type='qual',palette=2) + theme(aspect.ratio=1) model_param_values <- with(LIST_SUBSET[[1]]$model_params,which(model %in% c('unsup') & K %in% c(50))) S <- matrix(0.0,nrow(param_values),length(model_param_values), dimnames=list(1:nrow(param_values),LIST_SUBSET[[1]]$model_params[model_param_values,'ps'])) TH <- MAX50 <- S for (p in seq_along(param_values$idx)){ for (m in seq_along(model_param_values)){ MM <- LIST_SUBSET[[p]] M <- MM$out[[model_param_values[m]]] if (class(M$data$fit) == 'try-error') {S[p,m] <- TH[p,m] <- NA; next} K <- M$data$fit$settings$dim$K VOCAB <- M$data$fit$vocab OTUS <- M$data$sim$sc_otus SAMPS <- M$data$sim$samples LOGBETA2 <- M$data$fit$beta$logbeta[[1]] LOGBETA2[LOGBETA2==-Inf] <- min(LOGBETA2[LOGBETA2>-Inf]) BETA2 <- exp(LOGBETA2) colnames(BETA2) <- VOCAB OTU <- t(otu_table(M$data$ps)) META <- sample_data(M$data$ps) SIG <- M$results$sig sc_df <- data.frame(sc=paste0('sc',1:(3*N_SC)), sc_idx=paste0('sc',rep(1:5,3)), sc_type=rep(c('1','0','b'),each=N_SC)) G1 <- rownames(OTU)[rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2))] G2 <- rownames(OTU)[!(rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2)))] GB <- rownames(OTU) G <- list(G1=G1,G2=G2,GB=GB) SCORE <- sapply(1:nrow(OTUS), function(sc) { #GROUP <- G[[ifelse(sc <= 5, 1, ifelse(sc >= 10, 3, 2))]] GROUP <- rownames(OTU)[rownames(OTU) %in% paste0('s',SAMPS[[sc]])] OTU_SC <- colSums(OTU[GROUP,]) OTU_SC <- OTU_SC[colnames(BETA2)] OTU_SC[!(names(OTU_SC) %in% paste0('otu',OTUS[sc,]))] <- 0 OTU_SC <- OTU_SC/sum(OTU_SC) TOP_SC <- BETA2 TOP_SC[TOP_SC<1e-300] <- 1e-300 apply(TOP_SC,1,function(p) sum(ifelse(OTU_SC==0,0,OTU_SC*log(OTU_SC/p)))) }) dimnames(SCORE) <- list(paste0('T',1:nrow(SCORE)),paste0('sc',1:ncol(SCORE))) THRES <- seq(min(SCORE),max(SCORE),by=.01) for (thres in THRES){ col_thres <- colSums(1*(SCORE<thres)) if (sum(col_thres) >= K){ thres <- thres_last break } thres_last <- thres } MAX50[p,m] <- median(apply(SCORE,2,min)) TH[p,m] <- thres S[p,m] <- sum(rowSums(1*(SCORE<thres)) > 1)/K } } colnames(MAX50) <- paste0(colnames(MAX50),'.max50') colnames(TH) <- paste0(colnames(TH),'.threshold') colnames(S) <- paste0(colnames(S),'.score') P <- param_values %>% left_join(data.frame(TH,idx=1:nrow(TH)),by='idx') %>% left_join(data.frame(S,idx=1:nrow(S)),by='idx') %>% left_join(data.frame(MAX50,idx=1:nrow(MAX50)),by='idx') %>% gather(model,score,-(n_otu:idx)) %>% separate(model,c('model','stat'),sep='\\.') %>% spread(stat,score) P %>% ggplot(aes(threshold,score,shape=model,colour=model)) + geom_vline(aes(xintercept=max50,colour=model),size=1,alpha=.5,linetype=2) + geom_point(size=5) + facet_grid(sc_p~sc_m) + scale_colour_brewer(type='qual',palette=2) + theme(aspect.ratio=1) model_param_values <- with(LIST_SUBSET[[1]]$model_params,which(model %in% c('unsup') & K %in% c(50))) S <- matrix(0.0,nrow(param_values),length(model_param_values), dimnames=list(1:nrow(param_values),LIST_SUBSET[[1]]$model_params[model_param_values,'ps'])) TH <- MAX50 <- S for (p in seq_along(param_values$idx)){ for (m in seq_along(model_param_values)){ MM <- LIST_SUBSET[[p]] M <- MM$out[[model_param_values[m]]] if (class(M$data$fit) == 'try-error') {S[p,m] <- TH[p,m] <- NA; next} K <- M$data$fit$settings$dim$K VOCAB <- M$data$fit$vocab OTUS <- M$data$sim$sc_otus SAMPS <- M$data$sim$samples LOGBETA2 <- M$data$fit$beta$logbeta[[1]] LOGBETA2[LOGBETA2==-Inf] <- min(LOGBETA2[LOGBETA2>-Inf]) BETA2 <- exp(LOGBETA2) colnames(BETA2) <- VOCAB OTU <- t(otu_table(M$data$ps)) META <- sample_data(M$data$ps) SIG <- M$results$sig sc_df <- data.frame(sc=paste0('sc',1:(3*N_SC)), sc_idx=paste0('sc',rep(1:5,3)), sc_type=rep(c('1','0','b'),each=N_SC)) G1 <- rownames(OTU)[rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2))] G2 <- rownames(OTU)[!(rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2)))] GB <- rownames(OTU) G <- list(G1=G1,G2=G2,GB=GB) SCORE <- sapply(1:nrow(OTUS), function(sc) { #GROUP <- G[[ifelse(sc <= 5, 1, ifelse(sc >= 10, 3, 2))]] GROUP <- rownames(OTU)[rownames(OTU) %in% paste0('s',SAMPS[[sc]])] OTU_SC <- colSums(OTU[GROUP,]) OTU_SC <- OTU_SC[colnames(BETA2)] #OTU_SC[!(names(OTU_SC) %in% paste0('otu',OTUS[sc,]))] <- 0 OTU_SC <- OTU_SC/sum(OTU_SC) TOP_SC <- BETA2 TOP_SC[TOP_SC<1e-300] <- 1e-300 apply(TOP_SC,1,function(p) sum(ifelse(OTU_SC==0,0,OTU_SC*log(OTU_SC/p)))) }) dimnames(SCORE) <- list(paste0('T',1:nrow(SCORE)),paste0('sc',1:ncol(SCORE))) THRES <- seq(min(SCORE),max(SCORE),by=.01) for (thres in THRES){ col_thres <- colSums(1*(SCORE<thres)) if (sum(col_thres) >= K){ thres <- thres_last break } thres_last <- thres } MAX50[p,m] <- median(apply(SCORE,2,min)) TH[p,m] <- thres S[p,m] <- sum(rowSums(1*(SCORE<thres)) > 1)/K } } colnames(MAX50) <- paste0(colnames(MAX50),'.max50') colnames(TH) <- paste0(colnames(TH),'.threshold') colnames(S) <- paste0(colnames(S),'.score') P <- param_values %>% left_join(data.frame(TH,idx=1:nrow(TH)),by='idx') %>% left_join(data.frame(S,idx=1:nrow(S)),by='idx') %>% left_join(data.frame(MAX50,idx=1:nrow(MAX50)),by='idx') %>% gather(model,score,-(n_otu:idx)) %>% separate(model,c('model','stat'),sep='\\.') %>% spread(stat,score) P %>% ggplot(aes(threshold,score,shape=model,colour=model)) + geom_vline(aes(xintercept=max50,colour=model),size=1,alpha=.5,linetype=2) + geom_point(size=5) + facet_grid(sc_p~sc_m) + scale_colour_brewer(type='qual',palette=2) + theme(aspect.ratio=1) model_param_values <- with(LIST_SUBSET[[1]]$model_params,which(model %in% c('unsup') & K %in% c(50))) S <- matrix(0.0,nrow(param_values),length(model_param_values), dimnames=list(1:nrow(param_values),LIST_SUBSET[[1]]$model_params[model_param_values,'ps'])) TH <- MAX50 <- S for (p in seq_along(param_values$idx)){ for (m in seq_along(model_param_values)){ MM <- LIST_SUBSET[[p]] M <- MM$out[[model_param_values[m]]] if (class(M$data$fit) == 'try-error') {S[p,m] <- TH[p,m] <- NA; next} K <- M$data$fit$settings$dim$K VOCAB <- M$data$fit$vocab OTUS <- M$data$sim$sc_otus SAMPS <- M$data$sim$samples LOGBETA2 <- M$data$fit$beta$logbeta[[1]] LOGBETA2[LOGBETA2==-Inf] <- min(LOGBETA2[LOGBETA2>-Inf]) BETA2 <- exp(LOGBETA2) colnames(BETA2) <- VOCAB OTU <- t(otu_table(M$data$ps)) META <- sample_data(M$data$ps) SIG <- M$results$sig sc_df <- data.frame(sc=paste0('sc',1:(3*N_SC)), sc_idx=paste0('sc',rep(1:5,3)), sc_type=rep(c('1','0','b'),each=N_SC)) G1 <- rownames(OTU)[rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2))] G2 <- rownames(OTU)[!(rownames(OTU) %in% paste0('s',1:(nrow(OTU)/2)))] GB <- rownames(OTU) G <- list(G1=G1,G2=G2,GB=GB) SCORE <- sapply(1:nrow(OTUS), function(sc) { #GROUP <- G[[ifelse(sc <= 5, 1, ifelse(sc >= 10, 3, 2))]] GROUP <- rownames(OTU)[rownames(OTU) %in% paste0('s',SAMPS[[sc]])] OTU_SC <- colSums(OTU[GROUP,]) OTU_SC <- OTU_SC[colnames(BETA2)] OTU_SC <- OTU_SC/sum(OTU_SC) OTU_SC[!(names(OTU_SC) %in% paste0('otu',OTUS[sc,]))] <- 0 TOP_SC <- BETA2 TOP_SC[TOP_SC<1e-300] <- 1e-300 apply(TOP_SC,1,function(p) bcd(p,OTU_SC)) }) dimnames(SCORE) <- list(paste0('T',1:nrow(SCORE)),paste0('sc',1:ncol(SCORE))) THRES <- seq(min(SCORE),max(SCORE),by=.01) for (thres in THRES){ col_thres <- colSums(1*(SCORE<thres)) if (sum(col_thres) >= K){ thres <- thres_last break } thres_last <- thres } MAX50[p,m] <- median(apply(SCORE,2,min)) TH[p,m] <- thres S[p,m] <- sum(rowSums(1*(SCORE<thres)) > 1)/K } } colnames(MAX50) <- paste0(colnames(MAX50),'.max50') colnames(TH) <- paste0(colnames(TH),'.threshold') colnames(S) <- paste0(colnames(S),'.score') P <- param_values %>% left_join(data.frame(TH,idx=1:nrow(TH)),by='idx') %>% left_join(data.frame(S,idx=1:nrow(S)),by='idx') %>% left_join(data.frame(MAX50,idx=1:nrow(MAX50)),by='idx') %>% gather(model,score,-(n_otu:idx)) %>% separate(model,c('model','stat'),sep='\\.') %>% spread(stat,score) P %>% ggplot(aes(threshold,score,shape=model,colour=model)) + geom_vline(aes(xintercept=max50,colour=model),size=1,alpha=.5,linetype=2) + geom_point(size=5) + facet_grid(sc_p~sc_m) + scale_colour_brewer(type='qual',palette=2) + theme(aspect.ratio=1)
5f142a713c53ea4a45d0ff3b11740b18bc6cd06d
79aa26d2d18b78dce81bb799bedd74d0ac0c47d9
/man/select2.Rd
feb18f7845b33ae038d1fa18458d4ca5db1d69dc
[]
no_license
MadsQE/minidplyr
729fe7d5409959a21d7fb08cb5381d6d7ab2606f
dac9172150bccd7e796eadffaba1a2ab382e3e3f
refs/heads/main
2023-07-16T21:13:46.823292
2021-08-26T09:34:53
2021-08-26T09:34:53
null
0
0
null
null
null
null
UTF-8
R
false
true
373
rd
select2.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/select.R \name{select2} \alias{select2} \title{Select some columns} \usage{ select2(df, names_or_ind) } \arguments{ \item{df}{A data frame} \item{names_or_ind}{A vector of column names or indices} } \value{ An other data frame, with the subsetted columns. } \description{ Select some columns }
82edd76f78571c515f71a3f5f42cda08d79a104e
447418d950c976e5dd9ead3722399f927e29fb7c
/Factors.R
6c2b6d9f332a0f392b549aeb1d9f53ee509fc89b
[]
no_license
archits14/datasciencecoursera
9678c4ade7410dc7f11554ab5753aa1c57647c0a
7b08290838f390f377608170f0600c76ba96f4f8
refs/heads/master
2022-12-14T07:57:49.002406
2020-09-05T03:24:25
2020-09-05T03:24:25
291,470,543
0
0
null
null
null
null
UTF-8
R
false
false
178
r
Factors.R
temps <- c('cold','med','hot','hot','hot','cold','med') summary(temps) fact.temp <- factor(temps, ordered = TRUE, levels = c('cold','med','hot')) fact.temp summary(fact.temp)
20548dbb89198b87b644e2f7923604fbd009265a
94da0f2f5d7fce4f84d4a540dd0edfed09b264e4
/R/sumZero.R
ae7c1704d04f97aa87e9d8d723d64d0734bef29a
[]
no_license
karirogg/SirKR
6f02cd2f80cf29ef38b600cbfd54f79ba315a6ef
f88039ef5c7e918a08d2d20d02d64df26ffe3059
refs/heads/master
2021-05-29T15:49:41.948932
2015-04-11T00:02:46
2015-04-11T00:02:46
null
0
0
null
null
null
null
UTF-8
R
false
false
812
r
sumZero.R
#' Sum of Vector Elements #' #' Takes in a numeric vector and returns the sum of it. If at least one entry is numeric, then the function returns 0. If no entry is a value, then it returns NA. #' @param x A numeric vector to find the sum of #' @return The sum of the vector x #' @examples #' x <- c(NA,2) #' sumZero(x) #' returns: 0 #' @export sumZero <- function(x) { if(!(class(x) %in% c("numeric", "logical","integer"))) { stop("The input value is not able to convert to a numeric vector") } returnValue = 0 if(all(is.na(x)) == TRUE) { returnValue = sum(x, na.rm = F) } else if(is.na(x[1]) & !is.na(x[2]) | !is.na(x[1]) & is.na(x[2])) { returnValue = 0 } else { returnValue = sum(x, na.rm = T) } returnValue }
581ba27e784b1b41178c61d555e590d6b7da6cf3
4d6a64ab3bc0813e6888d58eeff8a809739f826b
/FishBase/R/fb_ration.R
7f66b730d034167a7b103c9637f4f03c71da5f50
[ "Apache-2.0" ]
permissive
cornejotux/FishBaseR
2d54075078238c89887ed8356b878c6f1cb7b900
72fbabbda485583fbcf06fb5502593e0f6e7fe7a
refs/heads/master
2022-12-25T18:03:43.153346
2020-09-29T20:13:50
2020-09-29T20:13:50
299,727,130
0
0
null
null
null
null
UTF-8
R
false
false
1,443
r
fb_ration.R
fb_ration <- function(idFB = NA, Genus = NA, Species = NA, server = 'http://www.fishbase.tw/') { require("XML") require("stringr") require('RCurl') if (is.na(idFB)) { ids <- fb_ids(Genus=Genus, Species=Species, server = server) idFB <- ids$idFB StockCode <- ids$StockCode } if(!is.na(idFB)) { url <- paste(server, "TrophicEco/RationList.php?ID=", idFB, sep = "") s <- readHTMLTable(url, as.data.frame=TRUE) ## I got the file as a XML class if (is.null(s$dataTable)) { diet <- c(as.character(idFB), as.character(Genus), as.character(Species), t(rep(NA, 7))) names(diet) <- c('idFB', 'Genus', 'Species', 'Weight', 'Ration', 'K1', 'EvacRate', 'Temperature', 'Salinity', 'FoodI') } if (!is.null(s$dataTable)) { diet <- cbind(as.character(idFB), as.character(Genus), as.character(Species), s$dataTable) names(diet) <- c('idFB', 'Genus', 'Species', 'Weight', 'Ration', 'K1', 'EvacRate', 'Temperature', 'Salinity', 'FoodI') diet$idFB <- as.character(diet$idFB) diet$Genus <- as.character(diet$Genus) diet$Species <- as.character(diet$Species) diet$Weight <- as.character(diet$Weight) diet$Ration <- as.character(diet$Ration) diet$K1 <- as.character(diet$K1) diet$EvacRate <- as.character(diet$EvacRate) diet$Temperature <- as.character(diet$Temperature) diet$Salinity <- as.character(diet$Salinity) diet$FoodI <- as.character(diet$FoodI) } } return(diet) }
fdd7796b725ecd7c5fdcaa5581783410bf2f3eae
47bb25ddbf692279a80e28ecaadeffbfd28973d5
/tests/testthat/test-advent.R
8717155e334ef55f02b7034791b574fa09d79234
[]
no_license
MatMoore/adventofcode2019
d7c28cd7c9650d8ba9472506b4d71a5e82035964
95db2b43dd9b3647b93ef0e75d337a1e1df1ecae
refs/heads/master
2020-09-22T14:33:08.996773
2019-12-06T22:00:47
2019-12-06T22:00:47
225,240,509
0
0
null
null
null
null
UTF-8
R
false
false
5,574
r
test-advent.R
context("day 1") test_that("day 1 part 1 returns the correct answer", { answer <- day_01_part1() expect_equal(answer, 3456641) }) test_that("day 1 part 2 returns the correct answer", { answer <- day_01_part2() expect_equal(answer, 5182078) }) context("day 2") test_that("day 2 part 1 returns the correct answer", { answer <- day_02_part1() expect_equal(answer, 3790645) }) test_that("day 2 part 2 returns the correct answer", { answer <- day_02_part2() expect_equal(answer, 6577) }) context("day 3") test_that("can parse a wire direction", { expect_equal(parse_direction("L1"), "L") expect_equal(parse_direction("R0"), "R") expect_equal(parse_direction("U123"), "U") expect_equal(parse_direction("D4"), "D") }) test_that("can parse a wire length", { expect_equal(parse_length("L1"), 1) expect_equal(parse_length("R0"), 0) expect_equal(parse_length("U123"), 123) expect_equal(parse_length("D4"), 4) }) test_that("can parse a wire", { input <- "R8" path <- parse_wire(input) expect_equal(path$direction, "R") expect_equal(path$wire_length, 8) }) test_that("can parse a wire path", { input <- c("R8", "U5", "L5", "D3") path <- parse_wire_path(input) expect_equal(path[[1]]$direction, "R") expect_equal(path[[1]]$wire_length, 8) expect_equal(path[[2]]$direction, "U") expect_equal(path[[2]]$wire_length, 5) expect_equal(path[[3]]$direction, "L") expect_equal(path[[3]]$wire_length, 5) expect_equal(path[[4]]$direction, "D") expect_equal(path[[4]]$wire_length, 3) }) test_that("can place central node on a grid", { path <- parse_wire_path(c("R8", "U5", "L5", "D3")) dims = wire_grid_dimensions(list(path)) expect_equal(dims$cx, 1) expect_equal(dims$cy, 6) }) test_that("can calculate required grid size", { path <- parse_wire_path(c("R8", "U5", "L5", "D3")) dims = wire_grid_dimensions(list(path)) expect_equal(dims$width, 9) expect_equal(dims$height, 6) }) test_that("can put central node in the centre of a grid", { # | . . . . # | . . . . # | . o - + # | . . . | # + - - - + path <- parse_wire_path(c("R2", "D2", "L4", "U4")) dims = wire_grid_dimensions(list(path)) expect_equal(dims$cx, 3) expect_equal(dims$cy, 3) expect_equal(dims$width, 5) expect_equal(dims$height, 5) }) test_that("can draw a wire path", { path <- parse_wire_path(c("R2", "D2", "L4", "U4")) dims = wire_grid_dimensions(list(path)) grid <- matrix(0, nrow=dims$height, ncol=dims$width) grid <- draw_wire_path(matrix=grid, row=dims$cy, col=dims$cx, path=path) expected <- rbind( c(12,0,0,0,0), c(11,0,0,0,0), c(10,0,0,1,2), c(9, 0,0,0,3), c(8, 7,6,5,4) ) expect_equal(grid, expected) }) test_that("can identify wire crossings", { path1 <- parse_wire_path(c("R8", "U5", "L5", "D3")) path2 <- parse_wire_path(c("U7", "R6", "D4", "L4")) dims <- wire_grid_dimensions(list(path1, path2)) grid <- matrix(0, nrow=dims$height, ncol=dims$width) grid2 <- draw_wire_path(matrix=grid, row=dims$cy, col=dims$cx, path=path1) grid3 <- draw_wire_path(matrix=grid, row=dims$cy, col=dims$cx, path=path2) grid2[grid2!=0] <- 1 grid3[grid3!=0] <- 1 expected <- rbind( c(1,1,1,1,1,1,1,0,0), c(1,0,0,0,0,0,1,0,0), c(1,0,0,1,1,1,2,1,1), c(1,0,0,1,0,0,1,0,1), c(1,0,1,2,1,1,1,0,1), c(1,0,0,1,0,0,0,0,1), c(1,0,0,0,0,0,0,0,1), c(0,1,1,1,1,1,1,1,1) ) expect_equal(grid2 + grid3, expected) }) test_that("can find the manhattan distance", { expect_equal(manhattan_distance(c(1,1), c(2,2)), 2) expect_equal(manhattan_distance(c(2,2), c(1,1)), 2) expect_equal(manhattan_distance(c(2,2), c(3,1)), 2) expect_equal(manhattan_distance(c(2,2), c(2,5)), 3) }) test_that("can find intersections", { path1 <- parse_wire_path(c("R8", "U5", "L5", "D3")) path2 <- parse_wire_path(c("U7", "R6", "D4", "L4")) dims <- wire_grid_dimensions(list(path1, path2)) grid <- matrix(0, nrow=dims$height, ncol=dims$width) grid2 <- draw_wire_path(matrix=grid, row=dims$cy, col=dims$cx, path=path1) grid3 <- draw_wire_path(matrix=grid, row=dims$cy, col=dims$cx, path=path2) grid2[grid2!=0] <- 1 grid3[grid3!=0] <- 1 intersections <- find_intersections(grid2 + grid3) expected <- rbind( c(row=5, col=4), c(row=3, col=7) ) expect_equal(intersections, expected) }) test_that("can find the closest intersection", { path1 <- parse_wire_path(c("R8", "U5", "L5", "D3")) path2 <- parse_wire_path(c("U7", "R6", "D4", "L4")) closest <- closest_intersection(path1, path2) expect_equal(closest, 6) }) test_that("example1 has a distance of 159", { input1 <- c("R75", "D30", "R83", "U83", "L12", "D49", "R71", "U7", "L72") path1 <- parse_wire_path(input1) input2 <- c("U62", "R66", "U55", "R34", "D71", "R55", "D58", "R83") path2 <- parse_wire_path(input2) answer <- closest_intersection(path1, path2) expect_equal(answer, 159) }) test_that("example1 takes 610 steps", { input1 <- c("R75", "D30", "R83", "U83", "L12", "D49", "R71", "U7", "L72") path1 <- parse_wire_path(input1) input2 <- c("U62", "R66", "U55", "R34", "D71", "R55", "D58", "R83") path2 <- parse_wire_path(input2) answer <- shortest_steps(path1, path2) expect_equal(answer, 610) }) # These are commented because they are slow to run #test_that("day 3 part 1 returns the incorrect answer", { # answer <- day03_part1() # print(answer) # # expect_equal(answer, 273) #}) #test_that("day 3 part 1 returns the incorrect answer", { # answer <- day03_part2() # print(answer) # # expect_equal(answer, 15622) #})
4fe6f29010b516c529ecc9fca961e0a47471158b
0fc8ad550144e4e96c9aae06f83e2be7aaef5a48
/R/SmallNR.R
b631ce64079c7d0c78b8f9e55c350adc9854599e
[]
no_license
nroming/NRmisc
f3a3072bfbe257b65f00005948d68d37635624f8
41b375758997310288a99a562f5bd89a01cb844e
refs/heads/master
2021-01-22T02:39:57.560916
2015-03-05T10:55:49
2015-03-05T10:55:49
25,294,408
0
2
null
2015-02-03T15:52:10
2014-10-16T09:03:22
R
UTF-8
R
false
false
490
r
SmallNR.R
#' Return n-smallest value of a vector of numbers #' #' @author Niklas Roming #' @param x A vector (numeric or integer) #' @param n Which value to return (e.g. n=2 => second-smallest) #' @return The n-smallest value of x #' @export SmallNR <- function(x, n){ # this function returns the second smallest value of a vector # http://stackoverflow.com/questions/2453326/fastest-way-to-find-second-third- # highest-lowest-value-in-vector-or-column y <- unlist(sort(x)[n]) return(y) }
1af80c4abeb254be90f906eab515ac24a9046cf1
ee547df8ce469ca54c111a61edb66b3c6123b588
/man/SVMModel.Rd
08a851ba76aa33db4166b861e1dba0a2100628df
[]
no_license
brian-j-smith/MachineShop
b4d6ee3d3e900ac0733ea0a663c54ba74cc17800
3599d9af6f58faff2f423520193d3029637f7bc5
refs/heads/master
2023-04-07T00:49:41.675201
2023-03-21T14:23:23
2023-03-21T14:23:23
150,329,909
63
12
null
2018-11-18T23:55:38
2018-09-25T21:12:07
R
UTF-8
R
false
true
3,518
rd
SVMModel.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ML_SVMModel.R \name{SVMModel} \alias{SVMModel} \alias{SVMANOVAModel} \alias{SVMBesselModel} \alias{SVMLaplaceModel} \alias{SVMLinearModel} \alias{SVMPolyModel} \alias{SVMRadialModel} \alias{SVMSplineModel} \alias{SVMTanhModel} \title{Support Vector Machine Models} \usage{ SVMModel( scaled = TRUE, type = character(), kernel = c("rbfdot", "polydot", "vanilladot", "tanhdot", "laplacedot", "besseldot", "anovadot", "splinedot"), kpar = "automatic", C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, cache = 40, tol = 0.001, shrinking = TRUE ) SVMANOVAModel(sigma = 1, degree = 1, ...) SVMBesselModel(sigma = 1, order = 1, degree = 1, ...) SVMLaplaceModel(sigma = numeric(), ...) SVMLinearModel(...) SVMPolyModel(degree = 1, scale = 1, offset = 1, ...) SVMRadialModel(sigma = numeric(), ...) SVMSplineModel(...) SVMTanhModel(scale = 1, offset = 1, ...) } \arguments{ \item{scaled}{logical vector indicating the variables to be scaled.} \item{type}{type of support vector machine.} \item{kernel}{kernel function used in training and predicting.} \item{kpar}{list of hyper-parameters (kernel parameters).} \item{C}{cost of constraints violation defined as the regularization term in the Lagrange formulation.} \item{nu}{parameter needed for nu-svc, one-svc, and nu-svr.} \item{epsilon}{parameter in the insensitive-loss function used for eps-svr, nu-svr and eps-bsvm.} \item{prob.model}{logical indicating whether to calculate the scaling parameter of the Laplacian distribution fitted on the residuals of numeric response variables. Ignored in the case of a factor response variable.} \item{cache}{cache memory in MB.} \item{tol}{tolerance of termination criterion.} \item{shrinking}{whether to use the shrinking-heuristics.} \item{sigma}{inverse kernel width used by the ANOVA, Bessel, and Laplacian kernels.} \item{degree}{degree of the ANOVA, Bessel, and polynomial kernel functions.} \item{...}{arguments passed to \code{SVMModel} from the other constructors.} \item{order}{order of the Bessel function to be used as a kernel.} \item{scale}{scaling parameter of the polynomial and hyperbolic tangent kernels as a convenient way of normalizing patterns without the need to modify the data itself.} \item{offset}{offset used in polynomial and hyperbolic tangent kernels.} } \value{ \code{MLModel} class object. } \description{ Fits the well known C-svc, nu-svc, (classification) one-class-svc (novelty) eps-svr, nu-svr (regression) formulations along with native multi-class classification formulations and the bound-constraint SVM formulations. } \details{ \describe{ \item{Response types:}{\code{factor}, \code{numeric}} \item{\link[=TunedModel]{Automatic tuning} of grid parameters:}{ \itemize{ \item SVMANOVAModel: \code{C}, \code{degree} \item SVMBesselModel: \code{C}, \code{order}, \code{degree} \item SVMLaplaceModel: \code{C}, \code{sigma} \item SVMLinearModel: \code{C} \item SVMPolyModel: \code{C}, \code{degree}, \code{scale} \item SVMRadialModel: \code{C}, \code{sigma} } } } Arguments \code{kernel} and \code{kpar} are automatically set by the kernel-specific constructor functions. Default values and further model details can be found in the source link below. } \examples{ fit(sale_amount ~ ., data = ICHomes, model = SVMRadialModel) } \seealso{ \code{\link[kernlab]{ksvm}}, \code{\link{fit}}, \code{\link{resample}} }
7eae27afc03859369c64a562ea1c95aed2e1630f
79457aaae83a0b3914a38874c10907440e0dfc61
/man/gdalcubes_options.Rd
3753ecc5b24dc7169976e380dc6125c8204054d9
[]
permissive
appelmar/gdalcubes
be9786b36fbe4e25a5c0245968634f57a40752ad
2134f769454e147660e7a73c61afa14219de20b4
refs/heads/master
2023-08-07T20:56:02.442579
2023-07-25T06:36:46
2023-07-25T06:36:46
148,130,790
74
7
MIT
2023-03-23T19:56:08
2018-09-10T09:25:01
C++
UTF-8
R
false
true
3,083
rd
gdalcubes_options.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/config.R \name{gdalcubes_options} \alias{gdalcubes_options} \title{Set or read global options of the gdalcubes package} \usage{ gdalcubes_options( ..., parallel, ncdf_compression_level, debug, cache, ncdf_write_bounds, use_overview_images, show_progress, default_chunksize, streaming_dir, log_file, threads ) } \arguments{ \item{...}{not used} \item{parallel}{number of parallel workers used to process data cubes or TRUE to use the number of available cores automatically} \item{ncdf_compression_level}{integer; compression level for created netCDF files, 0=no compression, 1=fast compression, 9=small compression} \item{debug}{logical; print debug messages} \item{cache}{logical; TRUE if temporary data cubes should be cached to support fast reprocessing of the same cubes} \item{ncdf_write_bounds}{logical; write dimension bounds as additional variables in netCDF files} \item{use_overview_images}{logical; if FALSE, all images are read on original resolution and existing overviews will be ignored} \item{show_progress}{logical; if TRUE, a progress bar will be shown for actual computations} \item{default_chunksize}{length-three vector with chunk size in t, y, x directions or a function taking a data cube size and returning a suggested chunk size} \item{streaming_dir}{directory where temporary binary files for process streaming will be written to} \item{log_file}{character, if empty string or NULL, diagnostic messages will be printed to the console, otherwise to the provided file} \item{threads}{number of threads used to process data cubes (deprecated)} } \description{ Set global package options to change the default behavior of gdalcubes. These include how many parallel processes are used to process data cubes, how created netCDF files are compressed, and whether or not debug messages should be printed. } \details{ Data cubes can be processed in parallel where the number of chunks in a cube is distributed among parallel worker processes. The actual number of used workers can be lower if a data cube as less chunks. If parallel is TRUE, the number of available cores is used. Setting parallel = FALSE can be used to disable parallel processing. Notice that since version 0.6.0, separate processes are being used instead of parallel threads to avoid possible R session crashes due to some multithreading issues. Caching has no effect on disk or memory consumption, it simply tries to reuse existing temporary files where possible. For example, changing only parameters to \code{plot} will void reprocessing the same data cube if cache is TRUE. The streaming directory can be used to control the performance of user-defined functions, if disk IO is a bottleneck. Ideally, this can be set to a directory on a shared memory device. Passing no arguments will return the current options as a list. } \examples{ gdalcubes_options(parallel=4) # set the number gdalcubes_options() # print current options gdalcubes_options(parallel=FALSE) # reset }
7359924195940ed7e5e7ef88fcb7e92e512daf60
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/netgsa/examples/NetGSA.Rd.R
4a55b66b2865863e633000fa57df9f37261cc1e3
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
357
r
NetGSA.Rd.R
library(netgsa) ### Name: NetGSA ### Title: Network-based Gene Set Analysis ### Aliases: NetGSA ### ** Examples set.seed(1) ## NetGSA with directed networks ## NetGSA with undirected networks data(netgsaex2) A = netgsaex2$A B = netgsaex2$B x = netgsaex2$x y = netgsaex2$y # -Not-run- # fit = NetGSA(A, x, y, B, lklMethod="REML", directed=FALSE)
026150d9880f35f653a89fae7a4750f8ebf7abda
ee2553a8d0160ce6b0410007cdda86e71fd5ad3d
/JHigh HW 3/HW 3 Problem 4/HW 4 Problem 4.R
11558cbef4784f74e35bd0ae928ee035df77a19b
[]
no_license
joehigh/EN.553.732
bd4b958acbcbc7cf91b5c0d120add6ca2994b6c9
fc277a288418c3ad577f28c6733e56b3e07fd416
refs/heads/master
2022-04-27T01:41:00.268173
2020-04-29T09:24:44
2020-04-29T09:24:44
255,237,001
0
1
null
null
null
null
UTF-8
R
false
false
2,167
r
HW 4 Problem 4.R
#Problem4 #part a glucose = read.table("glucose.dat", header = FALSE); data=as.matrix(glucose) data=as.numeric(data) hist(data,breaks=seq(50,200,5),freq=FALSE,main="Problem 4 Part a") lines(density(data)) #part c y=data set.seed(123) n=length(y) iter=10000 a=1 b=1 mu0=120 tau0.sq=200 sigma0.sq=1000 nu0=10 x=matrix(0,iter, n) p=numeric(iter) theta1=numeric(iter) theta2=numeric(iter) sigma1.sq=numeric(iter) sigma2.sq=numeric(iter) p[1]=rbeta(1, a, b) x[1,]=rbinom(n,1,p[1]) theta1[1]=rnorm(1,mu0,sqrt(tau0.sq)) theta2[1]=rnorm(1,mu0,sqrt(tau0.sq)) sigma1.sq[1]=1/rgamma(1, nu0/2, nu0*sigma0.sq/2) sigma2.sq[1]=1/rgamma(1, nu0/2, nu0*sigma0.sq/2) for (i in 2:iter) { for (j in 1:n) { y1=dnorm(y[j], theta1[i-1], sqrt(sigma1.sq[i-1])) y2=dnorm(y[j], theta2[i-1], sqrt(sigma2.sq[i-1])) x[i,j]=rbinom(1,1,(p[i-1]*y1)/(p[i-1]*y1+(1-p[i-1])*y2)) } c=sum(x[i,]) p[i]=rbeta(1,a+c,b+n-c) y_1.bar=mean(y[x[i,]==1]) mu_n=(mu0/tau0.sq+c*y_1.bar/sigma1.sq[i-1])/(1/tau0.sq+c/sigma1.sq[i-1]) tau2_n=1/(1/tau0.sq+c/sigma1.sq[i-1]) theta1[i]=rnorm(1, mu_n, sqrt(tau2_n)) nu_n=nu0+c s2_n=sum((y[x[i,] == 1]-theta1[i])^2)/c sigma2_n=(nu0*sigma0.sq+c*s2_n)/nu_n sigma1.sq[i]=1/rgamma(1,nu_n/2,nu_n*sigma2_n/2) y_2.bar=mean(y[x[i,] == 0]) mu_n=(mu0/tau0.sq+(n-c)*y_2.bar/sigma2.sq[i-1])/(1/tau0.sq+(n-c)/sigma2.sq[i-1]) tau2_n=1/(1/tau0.sq+(n-c)/sigma2.sq[i-1]) theta2[i]=rnorm(1, mu_n, sqrt(tau2_n)) nu_n=nu0+(n-c) s2_n=sum((y[x[i,] == 0]-theta2[i])^2)/(n-c) sigma2_n=(nu0*sigma0.sq+(n-c)*s2_n)/nu_n sigma2.sq[i]=1/rgamma(1,nu_n/2,nu_n*sigma2_n/2) } theta_1s=rep(0,iter) theta_2s=rep(0,iter) for (i in 1:iter) { theta_1s[i]=min(theta1[i], theta2[i]) theta_2s[i]=max(theta1[i], theta2[i]) } acf(theta_1s,main="Problem 4 Part c, theta1 ") acf(theta_2s,main="Problem 4 Part c, theta2 ") effectiveSize(theta_1s) effectiveSize(theta_2s) #part d x_1=rbinom(length(p), 1, p) y_1=numeric(iter) for (i in 1:iter) { if (x_1[i] == 1) { y_1[i]=rnorm(1, theta1[i], sqrt(sigma1.sq[i])) } else { y_1[i]=rnorm(1, theta2[i], sqrt(sigma2.sq[i])) } } hist(y_1,breaks=seq(0,300,5),freq=FALSE,main="Problem 4 Part d") lines(density(y))
bec35d2c7f7335148b8eb60880d63bf84884fcc7
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/Devore7/examples/ex07.58.Rd.R
a0e5200c0d1e07d27b90a9fc173e1b20f8fd2014
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
160
r
ex07.58.Rd.R
library(Devore7) ### Name: ex07.58 ### Title: R Data set: ex07.58 ### Aliases: ex07.58 ### Keywords: datasets ### ** Examples data(ex07.58) str(ex07.58)
94819142fd6c9795a92bd2ddfc7542259883d733
060c6a303098ef689c43ea0feff68c68272477e9
/kinomeAnalysis/kinome_baseline_data_munging.R
12fd6dbdfd110b218cba0a1ef5b52d55405c2070
[]
no_license
Sage-Bionetworks/Synodos_NF2
e1c004191de438d6efa2d565f7d1c1e36a90efaa
1506b57c74469439e81fe8afbc6de9add681c57c
refs/heads/master
2022-12-20T16:20:09.620615
2022-12-14T22:53:52
2022-12-14T22:53:52
20,036,276
2
0
null
2014-05-28T18:09:16
2014-05-21T20:29:18
R
UTF-8
R
false
false
4,448
r
kinome_baseline_data_munging.R
library(synapseClient) library("gdata") library("tidyr") library("dplyr") library("reshape2") require("parallel") library("plyr") library("doMC") library("gdata") registerDoMC(4) synapseLogin() #schwannoma baseline data baseline_schwannoma_human_synid <- "syn4214458" baseline_schwannoma_human <- synGet(baseline_schwannoma_human_synid) baseline_schwannoma_human <- read.xls(xls=baseline_schwannoma_human@filePath,sheet="trimmed") rownames(baseline_schwannoma_human) <- baseline_schwannoma_human$Gene baseline_schwannoma_human <- baseline_schwannoma_human[,grepl('LFQ',colnames(baseline_schwannoma_human))] baseline_schwannoma_human$Gene <- toupper(rownames(baseline_schwannoma_human)) baseline_schwannoma_mouse_synid <- "syn4214457" baseline_schwannoma_mouse <- synGet(baseline_schwannoma_mouse_synid) baseline_schwannoma_mouse <- read.xls(xls=baseline_schwannoma_mouse@filePath,sheet="trimmed") rownames(baseline_schwannoma_mouse) <- baseline_schwannoma_mouse$Gene baseline_schwannoma_mouse <- baseline_schwannoma_mouse[,grepl('LFQ',colnames(baseline_schwannoma_mouse))] baseline_schwannoma_mouse$Gene <- toupper(rownames(baseline_schwannoma_mouse)) #vestibular schwannoma data baseline_vest_schwannoma_synid <- "syn4942532" baseline_vest_schwannoma <- synGet(baseline_vest_schwannoma_synid) baseline_vest_schwannoma <- read.xls(xls=baseline_vest_schwannoma@filePath,sheet="Kinase LFQ") rownames(baseline_vest_schwannoma) <- baseline_vest_schwannoma$Gene.names baseline_vest_schwannoma <- baseline_vest_schwannoma[,grepl('LFQ',colnames(baseline_vest_schwannoma))] baseline_vest_schwannoma$Gene <- toupper(rownames(baseline_vest_schwannoma)) View(baseline_vest_schwannoma) #meningioma baseline data baseline_meningioma_synid <- "syn3104723" baseline_meningioma <- synGet(baseline_meningioma_synid) baseline_meningioma <- read.xls(xls=baseline_meningioma@filePath,sheet="Kinases with LFQ in all") rownames(baseline_meningioma) <- baseline_meningioma$Gene.names baseline_meningioma <- baseline_meningioma[,grepl('LFQ',colnames(baseline_meningioma))] baseline_meningioma$Gene <- toupper(rownames(baseline_meningioma)) kinome_baseline_LFQ_data <- Reduce(function(x,y) merge(x,y,by="Gene", all=T), list(baseline_meningioma, baseline_schwannoma_human,baseline_schwannoma_mouse, baseline_vest_schwannoma)) colnames(kinome_baseline_LFQ_data) <- gsub('LFQ.intensity.', '',colnames(kinome_baseline_LFQ_data)) #upload to synapse outfile <- "Synodos_kinome_baseline_LFQ_data.tsv" write.table(kinome_baseline_LFQ_data, file=outfile, sep="\t", row.names=F, col.names=T, quote=F) synStore(File(outfile, parentId = "syn4259360"), used = c(baseline_meningioma_synid, baseline_schwannoma_human_synid, baseline_schwannoma_mouse_synid, baseline_vest_schwannoma_synid), executed = "https://github.com/Sage-Bionetworks/Synodos_NF2/blob/master/kinomeAnalysis/kinome_baseline_data_munging.R") unlink(outfile) ### #process new baseline data - iTRAQ based ### source("kinomeAnalysis/kinome_data_functions.R") #new baseline triplicate data - protein level meningioma_baseline_triplicates_proteins_synid <- 'syn5575201' schwannoma_baseline_triplicates_proteins_synid <- 'syn5575204' meningioma_baseline_triplicates_proteins <- get_new_kinome_proteinData(meningioma_baseline_triplicates_proteins_synid) schwannoma_baseline_triplicates_proteins <- get_new_kinome_proteinData(schwannoma_baseline_triplicates_proteins_synid) Kinome_baseline_iTRAQ_data <- rbind(meningioma_baseline_triplicates_proteins,schwannoma_baseline_triplicates_proteins) Kinome_baseline_iTRAQ_data <- Kinome_baseline_iTRAQ_data %>% mutate(condition = sample) %>% separate(sample, into=c('cellLine', 'referenceSample'), sep="/") %>% separate(cellLine, into=c('cellLine', 'replicate'), sep="_") %>% separate(referenceSample, into=c('referenceSample', 'refSampleReplicate'), sep="_") %>% mutate(log2ratio = log2(ratio)) View(Kinome_baseline_iTRAQ_data) #upload to synapse outfile <- "Synodos_kinome_baseline_iTRAQ_data.tsv" write.table(Kinome_baseline_iTRAQ_data, file=outfile, sep="\t", row.names=F, col.names=T, quote=F) synStore(File(outfile, parentId = "syn4259360"), used = c(meningioma_baseline_triplicates_proteins_synid, schwannoma_baseline_triplicates_proteins_synid), executed = "https://github.com/Sage-Bionetworks/Synodos_NF2/blob/master/kinomeAnalysis/kinome_baseline_data_munging.R") unlink(outfile)
7cb4806da6638c720a9dc80285c58ee74060e83f
75778fec111d3c10b1dca261dd7dddd47c3a4bc8
/script_data_analysis.R
b591f73e2c54cc36f1e1dd21413c49989a59861b
[]
no_license
veren4/Protein_Prediction_II_plots
6ed5591b13580422b340e038c852f7e48db487cd
9e59d9b77baa408c2cc490ff45098f18197faaf1
refs/heads/master
2022-11-23T06:50:37.937935
2019-11-28T11:28:32
2019-11-28T11:28:32
279,658,912
0
0
null
null
null
null
UTF-8
R
false
false
5,136
r
script_data_analysis.R
# # density plot of positions of NLSs/ NESs in the protein sequences # # in fasta file: find out absolute lengths of proteins library(tidyverse) my_protein_sequences = seqinr::read.fasta(file = "C:/Users/Verena/1_Studium/03_Aufbaustudium_Informatik/Protein Prediction II/Exercise/data/ns/nes_nls.fasta", seqtype = "AA", as.string = T) my_proteins = data.frame(protein_ID = character(), aa_sequence = character(), sequence_length = integer(), stringsAsFactors = F) for(i in 1:length(my_protein_sequences)){ my_proteins[i, "protein_ID"] = attributes(my_protein_sequences[[i]])$name my_proteins[i, "aa_sequence"] = my_protein_sequences[[i]][1] my_proteins[i, "sequence_length"] = seqinr::getLength(my_protein_sequences[[i]]) } # in excel file: extract start + end of NLS/ NES and wether its an NLS or NES locations = read_tsv(file = "C:/Users/Verena/1_Studium/03_Aufbaustudium_Informatik/Protein Prediction II/Exercise/data/ns/nes_nls.tab", col_names = F) %>% dplyr::rename(protein_ID = X1, start = X2, end = X3, kind = X4) my_proteins = dplyr::full_join(my_proteins, locations, by = c("protein_ID" = "protein_ID")) rm(my_protein_sequences, i, locations) # Theoretisch ist es nicht gründlich genug, davon auszugehen, dass die Sequenzen # in den beiden Dokumenten gleich orientiert sind. my_proteins = dplyr::mutate(my_proteins, perc_start = round((start/sequence_length)*100, digits = 0), perc_end = round((end/sequence_length)*100, digits = 0)) occupied_percentage_boxes_NLS = c() occupied_percentage_boxes_NES = c() for(i in 1:nrow(my_proteins)){ temp = my_proteins[i, "perc_start"]:my_proteins[i, "perc_end"] if(my_proteins[i, "kind"] == "NLS"){ occupied_percentage_boxes_NLS = c(occupied_percentage_boxes_NLS, temp) }else{ occupied_percentage_boxes_NES = c(occupied_percentage_boxes_NES, temp) } } # Für diesen Plot könnte ich die Prozentpunkte auch mit mehr Nachkommastellen berechnen. my_proteins$protein_ID = factor(my_proteins$protein_ID) my_proteins$kind = factor(my_proteins$kind, levels = c("NLS", "NES")) # backup = my_proteins # my_proteins = backup my_proteins_kinder = my_proteins %>% select(protein_ID, kind) %>% group_by(protein_ID) %>% summarise(signal_cases = length(unique(kind))) my_proteins = left_join(my_proteins, my_proteins_kinder, by = c("protein_ID" = "protein_ID")) my_proteins = my_proteins %>% mutate(final_kind_sorter = case_when(signal_cases == 2 ~ "both", kind == "NES" ~ "NES", kind == "NLS" ~ "NLS")) start_sorter = select(my_proteins, protein_ID, perc_start) %>% group_by(protein_ID) %>% summarise(first_start = min(perc_start)) my_proteins = left_join(my_proteins, start_sorter, by = c("protein_ID" = "protein_ID")) my_proteins$final_kind_sorter = factor(my_proteins$final_kind_sorter, levels = c("NLS", "both", "NES")) # sort by first_start within the 3 kind groups -------------------------------- NLS_orderer = dplyr::filter(my_proteins, final_kind_sorter == "NLS") %>% arrange(first_start) ########### here lieth the problem!!!!!!!! ################## # Du musst innerhalb der facet sortieren, und dabei berücksichtigen, dass später # NxS eines Proteins innerhalb einer Zeile sind. # ggplot2 --------------------------------------------------------------------- gantt = ggplot(data = my_proteins, mapping = aes(x = perc_start, y = protein_ID, xend = perc_end, yend = protein_ID)) + facet_grid(rows = vars(final_kind_sorter)) + geom_segment(mapping = aes(color = kind)) + theme(axis.text.y = element_blank(), axis.ticks.y = element_blank(), legend.key = element_blank(), panel.background = element_blank(), text = element_text(size=14)) + ylab("protein sequence") + xlab("relative protein sequence position [%]") + scale_color_manual(values = c("#37c837", "#ff7f0e"), name = element_blank()) gantt # plot separately ------------------------------------------------------------- NLS_plot_input = filter(my_proteins, final_kind_sorter == "NLS") NLS_plot_input$perc_start = factor(NLS_plot_input$perc_start, levels = order(NLS_plot_input$first_start)) NLS_gantt = ggplot(data = NLS_plot_input, mapping = aes(x = perc_start, y = protein_ID, xend = perc_end, yend = protein_ID)) + geom_segment(mapping = aes(color = kind)) + theme(axis.text.y = element_blank(), axis.ticks.y = element_blank(), legend.key = element_blank(), panel.background = element_blank(), text = element_text(size=14)) + ylab("protein sequence") + xlab("relative protein sequence position [%]") + scale_color_manual(values = c("#37c837", "#ff7f0e"), name = element_blank()) NLS_gantt
e576868601a4dd8b191f700bc3b84a42622bc402
18e8822e6cce16631058ecfd14906bbb1580aa66
/R/BITFAM_scATAC.R
536e2ce15f87955d9762087b92ab584f7a490118
[]
no_license
jaleesr/BITFAM
fe443d9b0bc23016526e483e918cfe38bd069913
b604014c40329b3f737d4a152d44114a60c518b1
refs/heads/master
2023-02-21T11:10:50.972711
2023-02-13T13:50:50
2023-02-13T13:50:50
310,433,311
27
18
null
null
null
null
UTF-8
R
false
false
340
r
BITFAM_scATAC.R
#' Extract the genes that have scATAC-seq peaks on their promoter regions #' #' @param scATAC_obj A preprocessed Seurat object of scATAC-seq data. #' @return the genes that have scATAC-seq peaks on their promoter regions #' @export #' @import rstan #' @import Seurat BITFAM_scATAC <- function(scATAC_obj){ return(rownames(scATAC_obj)) }
312d64ea0082a5d031500741fdb5457733e9d543
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/bamlss/examples/boost.Rd.R
a17b2ba42017f1f0414018c6214b03e6a653c4db
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
2,784
r
boost.Rd.R
library(bamlss) ### Name: boost ### Title: Boosting BAMLSS ### Aliases: boost boostm boost.summary boost.plot print.boost.summary ### plot.boost.summary boost.frame ### Keywords: regression ### ** Examples ## Not run: ##D ## Simulate data. ##D set.seed(123) ##D d <- GAMart() ##D ##D ## Estimate model. ##D f <- num ~ x1 + x2 + x3 + lon + lat + ##D s(x1) + s(x2) + s(x3) + s(lon) + s(lat) + te(lon,lat) ##D ##D b <- bamlss(f, data = d, optimizer = boost, ##D sampler = FALSE, scale.d = TRUE, nu = 0.01, ##D maxit = 1000, plot = FALSE) ##D ##D ## Plot estimated effects. ##D plot(b) ##D ##D ## Print and plot the boosting summary. ##D boost.summary(b, plot = FALSE) ##D boost.plot(b, which = 1) ##D boost.plot(b, which = 2) ##D boost.plot(b, which = 3, name = "mu.s.te(lon,lat).") ##D ##D ## Extract estimated parameters for certain ##D ## boosting iterations. ##D parameters(b, mstop = 1) ##D parameters(b, mstop = 100) ##D ##D ## Also works with predict(). ##D head(do.call("cbind", predict(b, mstop = 1))) ##D head(do.call("cbind", predict(b, mstop = 100))) ##D ##D ## Another example using the modified liklihood ##D ## bootsing algorithm. ##D f <- list( ##D num ~ x1 + x2 + x3 + lon + lat + ##D s(x1) + s(x2) + s(x3) + s(lon) + s(lat) + te(lon,lat), ##D sigma ~ x1 + x2 + x3 + lon + lat + ##D s(x1) + s(x2) + s(x3) + s(lon) + s(lat) + te(lon,lat) ##D ) ##D ##D b <- bamlss(f, data = d, optimizer = boostm, ##D sampler = FALSE, scale.d = TRUE, nu = 0.05, ##D maxit = 400, stop.criterion = "AIC", force.stop = FALSE) ##D ##D ## Plot estimated effects. ##D plot(b) ##D ##D ## Plot AIC and log-lik contributions. ##D boost.plot(b, "AIC") ##D boost.plot(b, "loglik.contrib") ##D ##D ## Out-of-sample selection of model terms. ##D set.seed(123) ##D d <- GAMart(n = 5000) ##D ##D ## Split data into training and testing ##D i <- sample(1:2, size = nrow(d), replace = TRUE) ##D d.test <- subset(d, i == 1) ##D d.train <- subset(d, i == 2) ##D ##D ## Model formula ##D f <- list( ##D num ~ s(x1) + s(x2) + s(x3), ##D sigma ~ s(x1) + s(x2) + s(x3) ##D ) ##D ##D ## Create model frame for out-of-sample selection. ##D sm <- boost.frame(f, train = d.train, test = d.test, family = "gaussian") ##D ##D ## Out-of-sample selection function. ##D sfun <- function(parameters) { ##D sm$parameters <- parameters ##D p <- predict(sm, type = "parameter") ##D -1 * sum(sm$family$d(d.test$num, p, log = TRUE)) ##D } ##D ##D ## Start boosting with out-of-sample negative ##D ## log-likelihood selection of model terms. ##D b <- bamlss(f, data = d.train, sampler = FALSE, optimizer = boost, ##D selectfun = sfun, always = "best") ##D ##D ## Plot curve of negative out-of-sample log-likelihood. ##D boost.plot(b, which = "user") ## End(Not run)
a3e3b07172666b137d96f2e19211870f68d2a0cd
d69f4337b05a04eafa938a070eb824ef0cb517e3
/bin/plotAME.R
fe36766cbafec4dd41738be6e31bf27931b38bdf
[]
no_license
biomystery/atacMotif
2d7b0212c0bc4c4f6f38f49b09e4a063bf3dc384
e68d54bf00036a648ea6da62c19c3ab308e834f8
refs/heads/master
2022-02-24T01:07:02.357139
2019-10-09T06:59:25
2019-10-09T06:59:25
138,637,073
0
0
null
null
null
null
UTF-8
R
false
false
19,498
r
plotAME.R
source("./bin/aux_funs.R") tfclass <- readRDS("./db/tfclass.rds") # input parameters -------------------------------------------------------- ame_path <- "./test/ame_2kbg_all.res.txt" th <- 0.1 # ame’s output ------------------------------------------------------------ ame_res <- read.table(ame_path, header = T, stringsAsFactors = F) # load jaspar_to_TFclass dic--------------------------------------------------------- # filter adj_p.value <1e-5 ame_res.anno <- ame_res%>% filter(adj_p.value<=th) # jaspa dic dic$jasparTOensembl <- unique(ame_res.anno$motif_ID) names(dic$jasparTOensembl) <- unique(ame_res.anno$motif_ID) if(F){ # grab jaspar info from web for(i in 1:length(dic.jasparTOensembl)) { print(i); dic.jasparTOensembl[i]=getEnsemblFromJaspar(dic.jasparTOensembl[i])} # tfclass dic dic.ensemblTOsubfamily <- tfclass$merge%>% separate_rows(tf.id,sep = ";") %>% distinct(tf.id,.keep_all = T)%>% column_to_rownames("tf.id") # jaspa to tfclass dic.jasparTOtfclass <-data.frame(ensembl.id = dic.jasparTOensembl)%>% rownames_to_column("motif_ID")%>% separate_rows(ensembl.id,sep = ";") %>% right_join(dic.ensemblTOsubfamily%>%rownames_to_column("ensembl.id"))%>% filter(!is.na(motif_ID)) saveRDS(list(jasparTOensembl=dic.jasparTOensembl, ensemblTOtfclass=dic.ensemblTOsubfamily, jasparTOtfclass=dic.jasparTOtfclass), file = "./db/dic_jaspar_tfclass.rds") } dic<- readRDS("./db/dic_jaspar_tfclass.rds") names(dic) # add annotation ----------------------------------------------------------------- tmp <- sapply(unique(ame_res[ame_res$adj_p.value<=th,"motif_ID"]),function(a) (dic$jasparTOtfclass%>% filter(row_number()==grep(a,dic$jasparTOtfclass$jaspar.id)[1])),simplify = F) tmp<-do.call(rbind,tmp) # 17 out of 430 are not in the dic. all(unique(ame_res$motif_ID) %in% tmp$jaspar.id) sum(!unique(ame_res$motif_ID) %in% tmp$jaspar.id) length(unique(ame_res$motif_ID)) unique(ame_res$motif_ID) [!unique(ame_res$motif_ID) %in% tmp$jaspar.id] pd.ame_res <- ame_res%>% filter(adj_p.value<=th) pd.ame_res<-cbind(pd.ame_res,tmp[pd.ame_res$motif_ID,-1])%>% #unite(tf,1:2)%>% mutate(tf=motif_alt_ID)%>% mutate(subfamily.name=ifelse(is.na(subfamily.name),as.character(family.name),subfamily.name))%>% unite(family,family.id,family.name)%>% unite(subfamily,subfamily.id,subfamily.name)%>% mutate(p.value=-log10(p.value), adj_p.value=-log10(adj_p.value), E.value=-log10(E.value), log2FE = log2((X.TP+0.1)/(X.FP+0.1))) res.sum <- data.frame(pd.ame_res[,c("sample","FASTA_max")]%>%distinct(), nmotifs=sapply(unique(pd.ame_res$sample), function(x) nrow(pd.ame_res%>% filter(sample==x))), nmotifs.FE=sapply(unique(pd.ame_res$sample), function(x) nrow(pd.ame_res%>% filter(log2FE>1)%>% filter(sample==x))) ) res.sum%>% mutate(frac=round(nmotifs/FASTA_max,2)) write.table(data.frame(pd.ame_res[,c("sample","FASTA_max")]%>%distinct(), nmotifs=sapply(unique(pd.ame_res$sample), function(x) nrow(pd.ame_res%>% filter(sample==x)))), file = "n.csv",sep = ',',quote = F,row.names = F) ggplot(pd.ame_res%>%filter(log2FE>1),aes(sample,tf))+ geom_tile(aes(fill=log2FE))+ theme(axis.text.y = element_blank()) #+scale_fill_gradientn(colours = wes_palette("Zissou1", 21, type = "continuous")) # thresholding ------------------------------------------------------------ pd <- pd.ame_res%>% filter(log2FE>0)%>% filter(adj_p.value > 10)%>% dplyr::select(sample,tf,log2FE)%>% spread(key=sample,value = log2FE)%>% column_to_rownames("tf") pd.2 <- pd; pd.2[is.na(pd)]<-0 # hcluster ---------------------------------------------------------------- require(cluster) distfunc <- function(x) daisy(x,metric="gower") d <- distfunc(pd.2) dend <- as.dendrogram(hclust(d)) dend <- as.dendrogram(hclust(dist(pd.2))) plot(dend) pheatmap(pd[order.dendrogram(dend),],scale = "none",cluster_rows = F,cluster_cols = F,na_col = "grey", color = colorRampPalette(brewer.pal(9,"Blues"))(21), show_rownames = F) # ranking ----------------------------------------------------------------- # order by column rord <- pd %>% rownames_to_column("tf")%>% arrange(desc(alpha_1),desc(alpha_2), desc(beta_1),desc(beta_2), desc(delta_1),desc(delta_2), desc(endothelial_1),desc(endothelial_2), desc(exocrine),desc(gamma), desc(glial),desc(immune),desc(stellate)) pheatmap(pd[rord$tf,],scale = "none",cluster_rows = F,cluster_cols = F,na_col = "grey", color = colorRampPalette(brewer.pal(9,"Blues"))(21)[7:21], show_rownames = F,fontsize_row = 6) require(heatmaply) heatmaply(pd[rord$tf,],scale="none", Rowv = NULL,Colv = NULL, colors = colorRampPalette(brewer.pal(9,"Blues"))(21)[7:21]) # customized ranking ------------------------------------------------------------------ custRank_all <- function(input.pd=pd.2,na.val=0){ custRank <- function(pd.2,v="alpha_1"){ y <- pd.2[,v] idx.nna <- which(y!=na.val); idx.na <- which(y==na.val) idx.idx <- order(apply(pd.2[idx.nna,],1,mean),decreasing = T) res.list <- list() res.list$ordered <- pd.2[idx.nna[idx.idx],] res.list$un_ordered <- pd.2[idx.na,] res.list } tmp.pd <- input.pd;final.pd <- input.pd[-(1:nrow(input.pd)),] for(x in colnames(input.pd)){ tmp.res <- custRank(pd.2 = tmp.pd,v=x) tmp.pd <- tmp.res$un_ordered final.pd<- rbind(final.pd,tmp.res$ordered) } final.pd[-1,] } pd.3 <- pd; pd.3[is.na(pd)]<- na.value final.pd <- custRank_all(input.pd = pd.3,na.val = na.value) pheatmap(pd[rownames(final.pd),],scale = "none",cluster_rows = F,cluster_cols = F,na_col = "grey", color = colorRampPalette(brewer.pal(9,"Blues"))(21)[7:21], show_rownames = F,fontsize_row = 6) # digital customized ranking ---------------------------------------------- bks <- seq(min(pd, na.rm = T), max(pd,na.rm = T), length.out = 15 + 1) mat = as.matrix(pd) pd.3 <- matrix(as.numeric(cut(as.vector(mat), breaks = bks, include.lowest = T)), nrow(mat), ncol(mat), dimnames = list(rownames(mat), colnames(mat))) pd.3[is.na(pd.3)]<- 0 final.pd <- custRank_all(input.pd = pd.3) pheatmap(pd[rownames(final.pd),],scale = "none",cluster_rows = F,cluster_cols = F,na_col = "grey", color = colorRampPalette(brewer.pal(9,"Blues"))(21)[7:21], show_rownames = F,fontsize_row = 6) # kmeans ---------------------------------------------- getClusts <- function(pd.2,...){ require(NbClust) nb <- NbClust(pd.2,method="complete",...) require(factoextra) fviz_nbclust(nb) + theme_minimal() ords <-(sapply(1:max(nb$Best.partition), function(x) which(nb$Best.partition==x))) brks <- cumsum(lapply(ords, length)) ords <- unlist(ords) list(ords=ords,brks=brks) } res <- getClusts(pd.2=pd.2,min.nc=10) pheatmap(pd[res$ords,],scale = "none",cluster_rows = F,cluster_cols = F,na_col = "grey", color = colorRampPalette(brewer.pal(9,"Blues"))(21)[7:21],gaps_row = res$brks, show_rownames = F,fontsize_row = 6) heatmaply(pd[res$ords,],scale="none", Rowv = NULL,Colv = NULL,gaps_row=brks, colors = colorRampPalette(brewer.pal(9,"Blues"))(21)[7:21]) # row-wise scale --------------------------------------------------------- pd.3 <- t(scale(t(pd.2))) pd.3[pd.3>1.96] <- 1.96; pd.3[pd.3 < -1.96] <- -1.96 res <- getClusts(pd.2=pd.3,min.nc=10) pheatmap(pd.3[res$ords,],scale = "none",cluster_rows = F,cluster_cols = F,na_col = "grey", color = colorRampPalette(rev(brewer.pal(n = 7, name = "RdYlBu")))(21),gaps_row = res$brks, show_rownames = F,fontsize_row = 6) heatmaply(pd.3[res$ords,],scale="none", Rowv = NULL,Colv = NULL,gaps_row=pd.3$brks, colors = colorRampPalette(rev(brewer.pal(n = 7, name = "RdYlBu")))(21)) pd.3[is.na(pd)]<- -1.96 # by supfamily ------------------------------------------------------ pd.sub <- pd.ame_res%>% filter(log2FE>0)%>% filter(adj_p.value > 10)%>% dplyr::select(sample,subfamily,log2FE)%>% group_by(sample,subfamily)%>% summarise(mlog2FE=max(log2FE))%>% spread(key=sample,value = mlog2FE)%>% as.data.frame()%>% column_to_rownames("subfamily") # order by level rord <- pd.sub %>% rownames_to_column("tf")%>% arrange(desc(alpha_1),desc(alpha_2), desc(beta_1),desc(beta_2), desc(delta_1),desc(delta_2), desc(endothelial_1),desc(endothelial_2), desc(exocrine),desc(gamma), desc(glial),desc(immune),desc(stellate)) pheatmap(pd.sub[rord$tf,],scale = "none",cluster_rows = F,cluster_cols = F,na_col = "grey", color = colorRampPalette(brewer.pal(9,"Blues"))(21)[7:21], show_rownames = F,fontsize_row = 6) heatmaply(pd.sub[rord$tf,],scale="none", Rowv = NULL,Colv = NULL,gaps_row=brks, colors = colorRampPalette(brewer.pal(9,"Blues"))(21)[7:21]) # hclust pd.sub <- pd.sub[apply(pd.sub, 1, function(x) sum(is.na(x))<ncol(pd.sub)),] pd.sub.2 <- pd.sub;pd.sub.2[is.na(pd.sub.2)] <- 0 dend <- as.dendrogram(hclust(dist(pd.sub.2))) pheatmap(pd.sub[order.dendrogram(dend),],scale = "none",cluster_rows = F,cluster_cols = T,na_col = "grey", color = colorRampPalette(brewer.pal(9,"GnBu"))(11), show_rownames = T,fontsize_row = 6) # nbClust ords <- getClusts(pd.sub.2,min.nc=5) pheatmap(pd.sub[ords$ords,],scale = "none",cluster_rows = F,cluster_cols = F,na_col = "grey", color = colorRampPalette(brewer.pal(9,"Blues"))(21)[7:21],gaps_row = ords$brks, show_rownames = T,fontsize_row = 6) # cust final.pd <- custRank_all(input.pd = pd.sub.2) pheatmap(pd.sub[rownames(final.pd),],scale = "none",cluster_rows = F,cluster_cols = F,na_col = "grey", color = colorRampPalette(brewer.pal(9,"Blues"))(21)[7:21], show_rownames = T,fontsize_row = 5,fontsize_col = 6) heatmaply(pd.sub[rownames(final.pd),],scale="none", Rowv = NULL,Colv = NULL,gaps_row=brks, colors = colorRampPalette(brewer.pal(9,"Blues"))(21)[7:21]) # by family ----------------------------------------------------------- pd.fa <- pd.ame_res%>% filter(log2FE>0)%>% filter(adj_p.value > 10)%>% dplyr::select(sample,family,log2FE)%>% group_by(sample,family)%>% summarise(mlog2FE=max(log2FE))%>% spread(key=sample,value = mlog2FE)%>% as.data.frame()%>% column_to_rownames("family") pd.fa <- pd.fa[apply(pd.fa, 1, function(x) sum(is.na(x))<ncol(pd.fa)),] pd.fa.2 <- pd.fa;pd.fa.2[is.na(pd.fa)] <- 0 dend <- as.dendrogram(hclust(dist(pd.fa.2))) pheatmap(pd.fa[order.dendrogram(dend),],scale = "none",cluster_rows = F,cluster_cols = T,na_col = "grey", color = colorRampPalette(brewer.pal(9,"GnBu"))(11), show_rownames = T,fontsize_row = 8) na.value <- -ncol(pd.fa) pd.fa.2 <- pd.fa;pd.fa.2[is.na(pd.fa)] <- na.value final.pd <- custRank_all(input.pd = pd.fa.2,na.val = na.value) final.pd <- final.pd[-grep("NA",rownames(final.pd)),] heatmaply(pd.fa[rownames(final.pd),],scale="none", Rowv = NULL,Colv = NULL,gaps_row=brks, colors = colorRampPalette(brewer.pal(9,"Blues"))(21)[7:21]) pheatmap(pd.fa[rownames(final.pd),],scale = "none",cluster_rows = F,cluster_cols = F,na_col = "grey", color = colorRampPalette(brewer.pal(9,"Blues"))(21)[7:21], show_rownames = T,fontsize_row = 6) # save data ------------------------------------------------------------------- # promoter reads cnt promoter <- read.table('./test/Islet_123.promoter_peak_counts.txt',header = T,row.names = 1) apply(promoter, 2, sum) calCpm <- function(x) x/sum(x)*100000 promoter.cpm <- apply(promoter[,-1], 2,calCpm) require(reshape) long = melt(promoter, id.vars= "celltypes") ggplot(long, aes (value)) + geom_density(aes(color = X2)) plot(density(promoter[,"glial"])) # add ensemble id # ensemble -> gene symbol require(biomaRt) human<- useEnsembl(biomart="ensembl", dataset="hsapiens_gene_ensembl") name.dic <- getBM(attributes =c("ensembl_gene_id", "external_gene_name"), filters = "external_gene_name", rownames(promoter.cpm), mart = human) promoter.cpm.2<- right_join(name.dic,promoter.cpm%>%as.data.frame()%>%rownames_to_column("external_gene_name")) idx <- (promoter.cpm.2$ensembl_gene_id%in% dat$tfclass$ensembl.id[!is.na(dat$tfclass$ensembl.id)] | promoter.cpm.2$external_gene_name %in% as.character(dat$tfclass$tf.symbol)[!is.na(as.character(dat$tfclass$tf.symbol))]) promoter.cpm.2 <- promoter.cpm.2[idx,] tmp <- right_join(promoter.cpm.2,dat$tfclass,by=c("ensembl_gene_id"="ensembl.id"),) tmp <- tmp[!is.na(tmp$external_gene_name),] dat <- list() dat$motif <- pd.ame_res dat$tfclass <- dic$merged dat$promoter.cpm <- promoter.cpm.2 saveRDS(dat,file = "appDat.rds") # export csv pd.tmp <- pd.ame_res%>% filter(log2FE>0)%>% filter(adj_p.value > 10)%>% dplyr::select(sample,motif_ID,motif_alt_ID,ensembl.id,subfamily,family,log2FE)%>% spread(key=sample,value = log2FE) write.csv(pd.tmp,"log2FE_res.csv",quote = F) write.csv(tmp,"") # TOP5 TFs ---------------------------------------------- pd.ame_res.top5<- pd.ame_res.hsap%>% as.tibble()%>% gather(key="sample",value = "log10_adj_p.value",1:15)%>% group_by(sample)%>% top_n(5,log10_adj_p.value)%>% spread(sample,log10_adj_p.value) # print pd.ame_res.top5<-pd.ame_res.top5[,c(colnames(pd.ame_res.top5)[1:7],paste0("sample_",1:15))]%>% arrange(sample_1,sample_2,sample_3,sample_4,sample_5,sample_6,sample_7,sample_8,sample_9,sample_10,sample_11,sample_12,sample_13,sample_14,sample_15)%>% print(n=28) # ensemble -> gene symbol require(biomaRt) human<- useEnsembl(biomart="ensembl", dataset="hsapiens_gene_ensembl") name.dic <- getBM(attributes =c("ensembl_gene_id", "external_gene_name"), filters = "ensembl_gene_id", pd.ame_res.top5$ensembl.id, mart = human) pd.ame_res.top5<- right_join(name.dic,pd.ame_res.top5,by=c("ensembl_gene_id"="ensembl.id")) # Gene expression connection ---------------------------------------------- require(RSQLite) sqlitePath <- "./data/zhang-rnaseq-db.sqlite" mydb <- dbConnect(SQLite(), sqlitePath) query <- sprintf("SELECT * FROM %s", "samples") metadata.samples <- dbGetQuery(mydb, query) query <- sprintf("SELECT gene_id,description FROM %s", "genes_no_mt_tpm_rescale") genes <- dbGetQuery(mydb, query) rownames(genes) <- genes$gene_id getExp <- function(gg){ data.frame(TPM= as.numeric(dbGetQuery(mydb,'SELECT * FROM genes_no_mt_tpm_rescale WHERE "gene_id" = :g', params=list(g=gg))[metadata.samples$id]), metadata.samples,Gene=gg)%>%dplyr::select(id,TPM)%>%spread(key = id,value = TPM) } exp.dat <- sapply(pd.ame_res.top5$external_gene_name, getExp,simplify = F) all.equal(names(exp.dat), pd.ame_res.top5$external_gene_name) exp.dat<-do.call(rbind,exp.dat) exp.dat <- exp.dat[,c(9:10,1:8,11:18)] pd.ame_res.top5<- right_join(pd.ame_res.top5,exp.dat%>% rownames_to_column("external_gene_name")) # save write.csv(pd.ame_res.top5,file = "./data/zhang_top5_enrich_motif.csv") # heatmap p1 <- ggplot(pd.ame_res.top5%>% unite(gene,external_gene_name,subfamily)%>% dplyr::select("gene",starts_with("sample"))%>% mutate(gene=factor(gene,levels = gene)) %>% gather("sample","log10_adj_p.value",2:16)%>% mutate(sample=factor(sample,levels = paste0("sample_",1:15))), aes(sample,gene,fill=log10_adj_p.value))%+% geom_tile()%+% scale_fill_gradientn(colours =rev(brewer.pal(7,"YlGnBu")),na.value="NA")+ theme(plot.background=element_blank(), axis.text=element_text(face="bold"), panel.grid = element_line(colour = "black"))+ theme_bw() ggplot(pd.ame_res.top5%>% unite(gene,external_gene_name,subfamily)%>% dplyr::select("gene", starts_with("Me"), starts_with("AEC"), starts_with("VEC"))%>% mutate(gene=factor(gene,levels = gene)) %>% gather("sample","logTPM",2:18), aes(sample,gene,fill=logTPM))+ geom_tile()+ geom_tile(colour="white",size=0.25)+ scale_fill_gradientn(colours =rev(brewer.pal(7,"YlGnBu")),na.value="NA") theme(plot.background=element_blank(), axis.text=element_text(face="bold"), panel.grid = element_line(colour = "black"))+ theme_bw() # heatmap expression pd <- pd.ame_res.top5%>% unite(gene,external_gene_name,subfamily)%>% dplyr::select("gene",starts_with("sample"))%>% column_to_rownames("gene") b <- simplot(pd) pd.2.dup <-right_join(pd.ame_res.top5[,1:2], pd.ame_res.hsap%>% filter(ensembl.id %in% pd.ame_res.top5$ensembl_gene_id), by=c('ensembl_gene_id'="ensembl.id"))%>% unite(gene,external_gene_name,subfamily)%>% filter(duplicated(gene)) pd.2.dup <- pd.2.dup[c()] pd.2 <- right_join(pd.ame_res.top5[,1:2], pd.ame_res.hsap%>% filter(ensembl.id %in% pd.ame_res.top5$ensembl_gene_id), by=c('ensembl_gene_id'="ensembl.id"))%>% unite(gene,external_gene_name,subfamily)%>% filter(!duplicated(gene))%>% dplyr::select("gene",starts_with("sample"))%>% column_to_rownames("gene") pd.2[pd.2.dup$gene[5],1:15] <-pd.2.dup[5,3:17] pd.2 <- pd.2[rownames(pd),paste0("sample_",1:15)] b.2 <- simplot(pd.2) b.3 <- simplot(apply(pd.2[rownames(pd),paste0("sample_",1:15)],2,normalise)) pd <- pd.ame_res.top5%>% unite(gene,external_gene_name,subfamily)%>% dplyr::select("gene", starts_with("Me"), starts_with("AEC"), starts_with("VEC"))%>% column_to_rownames("gene") a<- simplot(pd) a2<- simplot(t(apply(pd, 1,normalise))) pd.2 <- pd %>%rownames_to_column("gene")%>% gather(key=sample,value = TPM,2:(ncol(pd)+1))%>% separate(sample,into =c("Cell","Day","Rep")) %>% group_by(gene,Cell,Day)%>% summarise(avgTPM=mean(TPM))%>% unite(sample,Cell,Day)%>% spread(key = sample,value = avgTPM)%>% ungroup()%>% mutate(gene=factor(gene,levels = rownames(pd)))%>% arrange(gene)%>%as.data.frame()%>% column_to_rownames("gene") pd.2 <- pd.2[,c(5,1:4,6:9)] c<- simplot(pd.2) c2<- simplot(t(apply(pd.2, 1, normalise))) require(gridExtra) grid.arrange(b+theme(axis.text.x = element_blank()), a+theme(axis.text = element_blank()),ncol=2) grid.arrange(b+theme(axis.text.x = element_blank()), a2+theme(axis.text = element_blank()),ncol=2) grid.arrange(b+theme(axis.text.x = element_blank()), c+theme(axis.text = element_blank()),ncol=2) grid.arrange(b+theme(axis.text.x = element_blank()), b.3+theme(axis.text = element_blank()), c2+theme(axis.text = element_blank()),ncol=3) grid.arrange(b+theme(axis.text.x = element_blank()), b.2+theme(axis.text = element_blank()), c+theme(axis.text = element_blank()),ncol=3)
c7b3fb11b7d158cf807bfa1824220d8a0e786490
ba0d52a9447cc2cedcaacafd8349fc50a32363b5
/R/prepBoxplotHealthData.R
591ef4c4c3ae0449e86e21cfd8bd5626e302bff3
[ "CC0-1.0" ]
permissive
robschick/tangled
49590a754531b8e50294abb4d86fcd9cc85d037c
e4c0e49fa87802dd39fba01dc4fba5cef25e7b31
refs/heads/master
2023-04-07T19:24:43.838552
2022-05-04T19:11:30
2022-05-04T19:11:30
33,547,111
0
0
null
null
null
null
UTF-8
R
false
false
9,416
r
prepBoxplotHealthData.R
#' Prepare data for making the boxplots of health during entanglement #' #' The goal of this is to make the box plots of the health during the entanglement window. #' This uses two data frames: (tangRepro and tangNonRepro) which are stored #' in the package structure. The function also used the unimpacted reference #' cases that are returned from the `returnUnimpactedHealth.R` helper #' function. This function returns two data frames: \code{dfLongRepro}, and #' \code{dfLongNonRepro} #' #' @param \code{tangRepro} data frame with information on the temporal #' extend of windows for entnanglement events of #' reproductively active animals #' @param \code{tangNonRepro} data frame with information on the temporal #' extend of windows for entnanglement events of #' \emph{non}-reproductively active animals #' @return \code{dfLongRepro} a melted data frame of the health of #' reproductively active animals during entanglement windows #' @return \code{dfLongNonRepro} a melted data frame of the health of #' \emph{non}-reproductively active animals during entanglement windows #' @export #' @examples #' \dontrun{ #' prepBoxplotHealthData(tangRepro, tangNonRepro, anomFlag = TRUE, thold = 67) #' } prepBoxplotHealthData <- function(tangRepro, tangNonRepro, anomFlag = FALSE, thold = 67){ tmp <- returnUnimpactedHealth(anomFlag = anomFlag) nonrepvec <- tmp$nonrep repvec <- tmp$rep nonrepvec1980 <- tmp$nonrep1980 repvec1980 <- tmp$rep1980 nonrepvec1990 <- tmp$nonrep1990 repvec1990 <- tmp$rep1990 nonrepvec2000 <- tmp$rep2000 repvec2000 <- tmp$nonrep2000 useAnom <- anomFlag for(z in 1:2){ ifelse(z == 1, repro <- TRUE, repro <- FALSE) if (repro) { tSub <- tangRepro } else { tSub <- tangNonRepro } if (useAnom){ healthmean <- anom } dfSum <- data.frame(egno = rep(NA, times = nrow(tSub)), EntanglementId = rep(NA, times = nrow(tSub)), eventNo = rep(NA, times = nrow(tSub)), nMonths = rep(NA, times = nrow(tSub)), hAnom = rep(NA, times = nrow(tSub)), lthold = rep(NA, times = nrow(tSub)), gearInj = rep(NA, times = nrow(tSub)), startDate = rep(NA, times = nrow(tSub)), endDate = rep(NA, times = nrow(tSub))) idFac <- factor(unique(tSub$EGNo)) for(i in 1:length(tSub$EGNo)){ # i <- 32 #, EGNo == 1130 is a good test animal; i <- 390 is another (EGNo = 1102) (Both for Non-Repro) # i = 4 # EGNo == 1014 for Repro ind <- tSub$EGNo[i] ent_id <- tSub$EntanglementId[i] if(ind == 1045) next() eventNo <- tSub$EventNo[i] if(!ind %in% ID){next()} htest <- healthmean[which(ID == ind),] ti <- tSub[tSub$EGNo == ind,] # Assemble the vector of integers for the duration of different gear injury combinations # asking for the start and end of the health window during which I'll calculate health # also for the date of first severe entanglement & the the 12 month recovery date evnum <- nrow(ti) s <- match(t(tSub[i, 'swindmonyr']), myName) sDate <- tSub[i, 'StartDateWindow', drop = TRUE] e <- match(t(tSub[i, 'ewindmonyr']), myName) eDate <- tSub[i, 'EndDateWindow', drop = TRUE] sev <- match(t(tSub[i, 'fsevmonyr']), myName) sev <- sev[is.finite(sev)] gstat <- tSub[i, 'gearInj'] hind <- htest[s:e] lthold <- length(which(hind < thold)) dfSum[i, 'egno'] <- ind dfSum[i, 'EntanglementId'] <- ent_id dfSum[i, 'eventNo'] <- eventNo dfSum[i, 'hAnom'] <- median(hind, na.rm = TRUE) dfSum[i, 'lthold'] <- lthold dfSum[i, 'gearInj'] <- gstat dfSum[i, 'nMonths'] <- length(s:e) dfSum[i, 'startDate'] <- lubridate::year(sDate) dfSum[i, 'endDate'] <- lubridate::year(eDate) } dfSum$gearnogear <- 0 sev1idx <- which(dfSum$gearInj == 1) mod1idx <- which(dfSum$gearInj == 2) min1idx <- which(dfSum$gearInj == 4) dfSum[sev1idx, 'gearnogear'] <- 1 dfSum[mod1idx, 'gearnogear'] <- 1 dfSum[min1idx, 'gearnogear'] <- 1 dfSum$variable <- 'impacted' # add in the unimpacted animals - using data coming from the splitpopHealth.R function # ONly adding them in once to avoid the double data, i.e. adding the same unimpacted data for both # the rep and the non-rep, when we really only want them once. if (repro) { uvec <- repvec dfuvec <- data.frame(egno = 9999, EntanglementId = 0, eventNo = 0, nMonths = 0, hAnom = uvec, lthold = 9999, gearInj = 0, startDate = '01-0000', endDate = '01-0000', gearnogear = 0, variable = 'unimpacted') # 1980 uvec1980 <- repvec1980 dfuvec1980 <- data.frame(egno = 9999, EntanglementId = 0, eventNo = 0, nMonths = 0, hAnom = uvec1980, lthold = 9999, gearInj = 0, startDate = 1980, endDate = 1989, gearnogear = 0, variable = 'unimpacted') # 1990 uvec1990 <- repvec1990 dfuvec1990 <- data.frame(egno = 9999, EntanglementId = 0, eventNo = 0, nMonths = 0, hAnom = uvec1990, lthold = 9999, gearInj = 0, startDate = 1990, endDate = 1999, gearnogear = 0, variable = 'unimpacted') # 2000 uvec2000 <- repvec2000 dfuvec2000 <- data.frame(egno = 9999, EntanglementId = 0, eventNo = 0, nMonths = 0, hAnom = uvec2000, lthold = 9999, gearInj = 0, startDate = 2000, endDate = 2009, gearnogear = 0, variable = 'unimpacted') dfLong <- rbind(dfSum, dfuvec, dfuvec1980, dfuvec1990, dfuvec2000) } else { uvec <- nonrepvec dfurvec <- data.frame(egno = 9999, EntanglementId = 0, eventNo = 0, nMonths = 0, hAnom = uvec, lthold = 9999, gearInj = 0, startDate = '01-0000', endDate = '01-0000', gearnogear = 0, variable = 'unimpacted') # 1980 uvec1980 <- nonrepvec1980 dfurvec1980 <- data.frame(egno = 9999, EntanglementId = 0, eventNo = 0, nMonths = 0, hAnom = uvec1980, lthold = 9999, gearInj = 0, startDate = 1980, endDate = 1989, gearnogear = 0, variable = 'unimpacted') # 1990 uvec1990 <- nonrepvec1990 dfurvec1990 <- data.frame(egno = 9999, EntanglementId = 0, eventNo = 0, nMonths = 0, hAnom = uvec1990, lthold = 9999, gearInj = 0, startDate = 1990, endDate = 1999, gearnogear = 0, variable = 'unimpacted') # 2000 uvec2000 <- nonrepvec2000 dfurvec2000 <- data.frame(egno = 9999, EntanglementId = 0, eventNo = 0, nMonths = 0, hAnom = uvec2000, lthold = 9999, gearInj = 0, startDate = 2000, endDate = 2009, gearnogear = 0, variable = 'unimpacted') dfLong <- rbind(dfSum, dfurvec, dfurvec1980, dfurvec1990, dfurvec2000) } dfLong$group <- 1 mod0idx <- which(dfLong$gearInj == 5) min0idx <- which(dfLong$gearInj == 6) unimpactedidx <- which(dfLong$variable == 'unimpacted') dfLong[c(mod1idx, mod0idx), 'group'] <- 2 dfLong[c(min1idx, min0idx), 'group'] <- 3 dfLong[unimpactedidx, 'group'] <- 4 dfLong$group <- factor(dfLong$group, levels = c(4, 3, 2, 1) ) # Put an identifier column if (repro) { dfLongRepro <- dfLong dfLongRepro$status <- 'RepFem' } else { dfLongNonRepro <- dfLong dfLongNonRepro$status <- 'NonRepFem' } } # end loop over the 'repro' variable list(dfLongRepro = dfLongRepro, dfLongNonRepro = dfLongNonRepro) }
d0f182d7d28df9cf4c8b8e3d4abb480cb1ecfff3
35ae390a7e9df0e77e3a247588424efc50cc1220
/plot5.R
3f66f6e6fd2754bff4e6d38e0fc1a6889ff2b0b4
[]
no_license
tomashaber/ExData_Plotting2
b27ab507953b2bdcf839d6d091519cd8af354d37
45530013fd608609e6398f36ea2b1d1d4f0f3c7b
refs/heads/master
2021-01-10T02:39:12.454755
2015-11-22T20:47:22
2015-11-22T20:47:22
46,672,432
0
0
null
null
null
null
UTF-8
R
false
false
701
r
plot5.R
library(ggplot2) library(dplyr) wd <- getwd() setwd("2") NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") v <- grepl("vehicle", SCC$SCC.Level.Two, ignore.case=TRUE) vSCC <- SCC[v,]$SCC vNEI <- NEI[NEI$SCC %in% vSCC,] data <- filter(vNEI, vNEI$fips=="24510") png("plot5.png", width=480, height=480, units="px") print(ggplot(data,aes(factor(year),Emissions)) + geom_bar(stat="identity",fill="red",width=0.9) + theme_bw() + guides(fill=FALSE) + labs(x="year", y=expression("Total PM2,5 Emission (10^5 Tons)")) + labs(title=expression("PM2.5 Motor Vehicle Source Emissions in Baltimore from 1999-2008"))) dev.off() setwd(wd)
7963efa405f6168f029bd29157d0f2db4b106237
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
/codeml_files/newick_trees_processed/7896_0/rinput.R
37bb1153bd7289eddc308772f2c26435eaaa0ffd
[]
no_license
DaniBoo/cyanobacteria_project
6a816bb0ccf285842b61bfd3612c176f5877a1fb
be08ff723284b0c38f9c758d3e250c664bbfbf3b
refs/heads/master
2021-01-25T05:28:00.686474
2013-03-23T15:09:39
2013-03-23T15:09:39
null
0
0
null
null
null
null
UTF-8
R
false
false
135
r
rinput.R
library(ape) testtree <- read.tree("7896_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="7896_0_unrooted.txt")
0b277d8b7671e0d05988444ae511cc6af4e9e3eb
231e93f8115c71e8a4ef643ca51524f041f3f57d
/man/bold_stats.Rd
24c896be1678e57f1317cbe01be011ec3594f3f8
[ "MIT" ]
permissive
GhostsOfHiroshima/bold
88c7d6551f3ef978e89986f945438e0fc1c9dcfb
82de5cfc8971e0b59eb661eab76f7f96f9858f78
refs/heads/master
2020-06-11T14:00:14.434375
2019-06-26T02:56:00
2019-06-26T02:56:00
null
0
0
null
null
null
null
UTF-8
R
false
true
2,663
rd
bold_stats.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bold_stats.R \name{bold_stats} \alias{bold_stats} \title{Get BOLD stats} \usage{ bold_stats(taxon = NULL, ids = NULL, bin = NULL, container = NULL, institutions = NULL, researchers = NULL, geo = NULL, dataType = "drill_down", response = FALSE, ...) } \arguments{ \item{taxon}{(character) Returns all records containing matching taxa. Taxa includes the ranks of phylum, class, order, family, subfamily, genus, and species.} \item{ids}{(character) Returns all records containing matching IDs. IDs include Sample IDs, Process IDs, Museum IDs and Field IDs.} \item{bin}{(character) Returns all records contained in matching BINs. A BIN is defined by a Barcode Index Number URI.} \item{container}{(character) Returns all records contained in matching projects or datasets. Containers include project codes and dataset codes} \item{institutions}{(character) Returns all records stored in matching institutions. Institutions are the Specimen Storing Site.} \item{researchers}{(character) Returns all records containing matching researcher names. Researchers include collectors and specimen identifiers.} \item{geo}{(character) Returns all records collected in matching geographic sites. Geographic sites includes countries and province/states.} \item{dataType}{(character) one of "overview" or "drill_down" (default). "drill_down": a detailed summary of information which provides record counts by [BINs, Country, Storing Institution, Species]. "overview": the total counts of [BINs, Countries, Storing Institutions, Orders, Families, Genus, Species]} \item{response}{(logical) Note that response is the object that returns from the Curl call, useful for debugging, and getting detailed info on the API call.} \item{...}{Further args passed on to \code{\link[crul]{HttpClient}}, main purpose being curl debugging} } \description{ Get BOLD stats } \examples{ \dontrun{ x <- bold_stats(taxon='Osmia') x$total_records x$records_with_species_name x$bins x$countries x$depositories x$order x$family x$genus x$species # just get all counts lapply(Filter(is.list, x), "[[", "count") res <- bold_stats(taxon='Osmia', response=TRUE) res$url res$status_code res$response_headers # More than 1 can be given for all search parameters bold_stats(taxon=c('Coelioxys','Osmia')) ## curl debugging ### These examples below take a long time, so you can set a timeout so that ### it stops by X sec bold_stats(taxon='Osmia', verbose = TRUE) # bold_stats(geo='Costa Rica', timeout_ms = 6) } } \references{ \url{http://v4.boldsystems.org/index.php/resources/api?type=webservices} }
ea43da1a58f6e7a167c472346f0601653e222693
e2baae0f2cfe109900b67fbdfc0080eb2aa854b5
/man/khclust_euc.Rd
3d63bb058a3b4e04df4f7d909384dafa1435cca9
[]
no_license
HBPMedical/CCC
5eb3411c420e13a14529858b1b848208ca4416d7
32a4b64e07e60c229c86f33286aa43742ba6f23c
refs/heads/master
2021-09-07T19:07:00.290850
2018-02-27T14:35:46
2018-02-27T14:35:46
69,594,420
1
0
null
null
null
null
UTF-8
R
false
true
883
rd
khclust_euc.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/functions.R \name{khclust_euc} \alias{khclust_euc} \title{Title Gap statistics for hclust Euclidean} \usage{ khclust_euc(x, K.max, B, verbose, plot.num.clus) } \arguments{ \item{x}{data matrix} \item{K.max}{positive integer specifying the number of clusters, less than the number of observations.} \item{B}{integer, number of Monte Carlo (“bootstrap”) samples} \item{verbose}{integer or logical, determining if “progress” output should be printed. The default prints one bit per bootstrap sample} \item{plot.num.clus}{if TRUE (default) the gap statistic plot will be printed} } \value{ the clusGap function output } \description{ Title Gap statistics for hclust Euclidean } \examples{ # khclust_euc(subx,K.max=10, B=60, verbose = FALSE, plot.num.clus=TRUE ) }
865887324e48c77dd0f66476cea169a51e71366d
34dd9a3a2313d0b4484ea3bf32e65175cdd718eb
/man/GSA.read.gmt.Rd
c52078044c44448138b1a24ea5868e3405e7d927
[]
no_license
cran/GSA
6afc50cd79249bb0051596d7ef82a2e7513b97af
43634ce04a75fcc52023419eb6225524bacf71e0
refs/heads/master
2022-05-13T06:39:05.299449
2022-03-19T13:42:01
2022-03-19T13:42:01
17,679,483
0
0
null
null
null
null
UTF-8
R
false
false
1,656
rd
GSA.read.gmt.Rd
\name{GSA.read.gmt} \alias{GSA.read.gmt} \title{Read in a gene set collection from a .gmt file} \description{ Read in a gene set collection from a .gmt file } \usage{ GSA.read.gmt(filename) } \arguments{ \item{filename}{The name of a file to read data values from. Should be a tab-separated text file, with one row per gene set. Column 1 has gene set names (identifiers), column 2 has gene set descriptions, remaining columns are gene ids for genes in that geneset}. } \details{This function reads in a geneset collection from a .gmt text file, and creates an R object that can be used as input into GSA. We use UniGene symbols for our gene set names in our .gmt files and expression datasets, to match the two. However the user is free to use other identifiers, as long as the same ones are used in the gene set collections and expression datasets. } \value{ A list with components \item{genesets}{List of gene names (identifiers) in each gene set}, \item{geneset.names}{Vector of gene set names (identifiers)}, \item{geneset.descriptions}{Vector of gene set descriptions} } \references{Efron, B. and Tibshirani, R. On testing the significance of sets of genes. Stanford tech report rep 2006. http://www-stat.stanford.edu/~tibs/ftp/GSA.pdf } \author{Robert Tibshirani} \examples{ # read in functional pathways gene set file from Broad institute GSEA website # http://www.broad.mit.edu/gsea/msigdb/msigdb_index.html # You have to register first and then download the file C2.gmt from # their site #GSA.read.gmt(C2.gmt) } \keyword{univar}% at least one, from doc/KEYWORDS \keyword{survival} \keyword{ts} \keyword{nonparametric}
c7d394c6398719f97b6e6feb8fcbdef2209ab4fc
3124eae2c2cc624306b83f945f0f0730841798ce
/man/itakura.dist.Rd
57caebc88cd639902ea6d8dae5c69d8713fe69b4
[]
no_license
cran/seewave
7841703a7f1cf237ce67e84f0f5b1dba877b1dff
1f0b3d5688151141129368c17f826ccedcb4ad96
refs/heads/master
2023-08-08T11:45:55.093227
2023-07-16T05:50:02
2023-07-16T06:34:59
17,699,546
18
12
null
2023-01-27T12:57:14
2014-03-13T06:15:47
R
UTF-8
R
false
false
2,399
rd
itakura.dist.Rd
\name{itakura.dist} \alias{itakura.dist} \title{Itakuro-Saito distance} \description{Compare two distributions (e.g. two frequency spectra) by computing the Itakuro-Saito distance} \usage{itakura.dist(spec1, spec2, scale=FALSE)} \arguments{ \item{spec1}{any distribution, especially a spectrum obtained with \code{\link{spec}} or \code{\link{meanspec}} (not in dB). This can be either a two-column matrix (col1 = frequency, col2 = amplitude) or a vector (amplitude).} \item{spec2}{any distribution, especially a spectrum obtained with \code{\link{spec}} or \code{\link{meanspec}} (not in dB). This can be either a two-column matrix (col1 = frequency, col2 = amplitude) or a vector (amplitude).} \item{scale}{a logical, if \code{TRUE} the distance is scaled by dividing the distance by the length of \code{spec1} (or \code{spec2}).} } \details{The Itakura-Saito (I-S) distance is a non-symmetric measure of the difference between two probability distributions. It is here adapted for frequency spectra. The distance is asymmetric, ie computing the I-S distance between spec1 and spec2 is not the same as computing it between spec2 and spec1. A symmetry can be obtained by calculating the mean between the two directions.\cr The distance is obtained following:\cr \deqn{D_{I-S}(spec1 \Vert spec2) = \sum{\frac{spec1}{spec2} - log(\frac{spec1}{spec2}) - 1}}{% D(spec1 || spec2) = sum(spec1/spec2 - log(spec1/spec2) - 1)} } \value{The function returns a list of three items: \item{D1}{The I-S distance of 'spec2' with respect to 'spec1' (\emph{i.e.} D(spec1 || spec2))} \item{D2}{The I-S distance of 'spec1' with respect to 'spec2' (\emph{i.e.} D(spec2 || spec1))} \item{D}{The symmetric distance (\emph{i.e.} D = 0.5*(D1+D2))} If \code{scale = TRUE} the distance is divided by the length of \code{spec1} (or \code{spec2}). } \note{The function works for both Hz and (htk-)mel scales.} \author{Jerome Sueur, improved by Laurent Lellouch} \seealso{\code{\link{kl.dist}}, \code{\link{ks.dist}}, \code{\link{logspec.dist}}, \code{\link{simspec}}, \code{\link{diffspec}}} \examples{ # Comparison of two spectra data(tico) tico1 <- spec(tico, at=0.65, plot=FALSE) tico2 <- spec(tico, at=1.1, plot=FALSE) itakura.dist(tico1, tico2) itakura.dist(tico1, tico2, scale=TRUE) } \keyword{distribution} \keyword{ts}
1024d7186c5da32c9916729369d608d93ebbb518
f884d7bcd31c81582960a7d8f820e3d3f4cf6953
/man/formatSettings.Rd
aa055446faf970e99612b2044615e309f75709e0
[ "MIT" ]
permissive
rtmtemp/kwb.monitoring
bc716dfb18f489dc4d1040f39359fc781d362ff2
6aefc1d076c4cfa8ffbdd1abf8f143f7deb8b9e5
refs/heads/master
2022-11-06T23:51:03.604207
2019-02-14T01:04:28
2019-02-14T01:04:28
null
0
0
null
null
null
null
UTF-8
R
false
true
552
rd
formatSettings.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/info.R \name{formatSettings} \alias{formatSettings} \title{Format Settings} \usage{ formatSettings(settings, settingNames = names(settings), do.stop = FALSE) } \arguments{ \item{settings}{list of settings} \item{settingNames}{names of the settings, by default: \code{names(settings)}} \item{do.stop}{passed to \code{kwb.monitoring:::get_H_threshold}, \code{kwb.monitoring:::get_Q_threshold}, \code{kwb.monitoring:::get_V_threshold}} } \description{ Format Settings }
e7ad0d3d60a6a427f7e38abe2b57bf2d05bae028
2407dd0c4ecba24555a73ca84640330fa3e754e4
/R/gonogo.R
0cfec4e3b84f89acce74285634d08c4d53919c3b
[]
no_license
brian-lau/Rexpneuro
c6f7cf7cc8ede7f41145b4d9b389620aa8e30103
0caf9b72c2108ce50f7b7e1ebd8d43aea3a6f949
refs/heads/master
2023-07-03T00:44:58.757667
2020-11-23T17:06:31
2020-11-23T17:06:31
308,354,517
0
0
null
null
null
null
UTF-8
R
false
false
14,956
r
gonogo.R
#' @export read_eventide <- function(fname = NULL, name = NULL, basedir = getwd(), start_date = "30012017", # daymonthyear end_date = "30012021", # daymonthyear min_trials = 1, include_tracker = FALSE, include_spike = FALSE, ... ) { library(magrittr) library(dplyr) if (is.null(names) & is.null(fname)) stop("Specify subject name or filename.") if (!is.null(fname)) { fnames <- fname name <- stringr::str_split(fnames,"_")[[1]][1] } else { fnames <- list.files(path = basedir, pattern = glob2rx(paste0(name, "_", "GNG", "*.txt")), ignore.case = TRUE) } d <- purrr::map_chr(stringr::str_split(fnames,"_"), 3) t <- purrr::map_chr(stringr::str_split(fnames,"_"), 4) t <- purrr::map_chr(stringr::str_split(t,".txt"), 1) d <- as.POSIXct(paste(d, t), "%d%m%Y %H-%M", tz = "Europe/Paris") ## Eventide trial data # Sort by ascending experiment date ind <- order(d) fnames <- fnames[ind] d <- d[ind] start_date <- as.POSIXlt(start_date, format = "%d%m%Y", tz = "Europe/Paris") end_date <- as.POSIXlt(end_date, format = "%d%m%Y", tz = "Europe/Paris") ind <- (start_date <= d) & (d <= end_date) fnames <- fnames[ind] d <- d[ind] ## Eventide tracker data if (include_tracker) { td <- parse_tracker_filenames(basedir = basedir) #dt <- difftime(d, td$date, units = "secs") # Find index for matching tracker file by time difference dt <- t(matrix(unlist(purrr::map(d, ~(abs(difftime(.x, td$date, units = "secs"))))), ncol = length(d))) mind <- which(dt==matrixStats::rowMins(dt),arr.ind=T) #map_int(d, ~which.min(abs(difftime(.x, td$date, units = "secs")))) dt <- dt[mind] td$fnames <- td$fnames[mind[,2]] td$date <- td$date[mind[,2]] td$dt <- dt } if (!any(ind)) { out <- list( call = match.call(), name = name, info = NULL, trial_data = NULL ) } else { # Read session data dat <- purrr::map(paste(basedir, fnames, sep = .Platform$file.sep), read_eventide_single, ...) trial_data = purrr::map_dfr(dat, "trial_data", .id = "session") info <- tibble::tibble(session = unique(trial_data$session), version = purrr::map_chr(dat, "version"), date = unlist(purrr::map(dat, "date") %>% purrr::reduce(c)), fname_eventide = fnames ) if (include_tracker) { dat_tracker <- purrr::map(paste(basedir, td$fnames, sep = .Platform$file.sep), read_eventide_tracker, ...) tracker_data <- purrr::map_dfr(dat_tracker, "tracker_data", .id = "session") info %<>% tibble::add_column(date_tracker = td$date, fname_tracker = td$fnames, dt = dt ) # Remove extra tracker data (from session terminating before trial_data written) tracker_data %<>% group_by(session, counter_total_trials) %>% nest() %>% semi_join(trial_data, by = c("session", "counter_total_trials")) # Convert to trial time if ("define_trial_onset_time_absolute" %in% names(trial_data)) { tracker_data$define_trial_onset_time_absolute = trial_data$define_trial_onset_time_absolute f <- function(df, t0) { df$t <- df$t - t0 return(df) } tracker_data %<>% mutate(data = map(data, ~f(.x, define_trial_onset_time_absolute))) } } else { tracker_data <- NULL } out <- list( call = match.call(), name = name, info = info, trial_data = trial_data, tracker_data = tracker_data ) #temp = trial_data %>% nest_join(tracker_data) } class(out) <- "GNGeventide" return(out) } #' @export summary.GNGeventide <- function(obj, skim_func = NULL, # see skimr::skim_with summarise_durations = FALSE ) { library(dplyr) library(skimr) cat("Call: ") print(obj$call) cat("Name:\t\t", obj$name, "\n") cat("# sessions:\t", nrow(obj$info), "\n") cat("# trials:\t", nrow(obj$trial_data), "\n") if (is.null(skim_func)) { skim_func <- skim_with( base = sfl(missing = n_missing), numeric = sfl(mean = mean, std = sd, med = median, mad = mad, hist = function(x) inline_hist(x, 20)), append = F ) } x <- obj$trial_data %>% group_by(condition) %>% skim_func(is_correct, is_abort) print(x, include_summary = FALSE, width = NULL) x <- obj$trial_data %>% group_by(block) %>% skim_func(is_correct, is_abort) print(x, include_summary = FALSE, width = NULL) cat("\nFilter by correct trials") x <- obj$trial_data %>% group_by(block) %>% filter(is_correct & (condition != "nogo")) %>% skim_func(counter_total_trials, counter_trials_in_block, rt, mt) print(x, include_summary = FALSE, width = NULL) if (summarise_durations) { ind = stringr::str_detect(names(obj$trial_data), "^measured") x <- obj$trial_data %>% skim_func(names(obj$trial_data)[stringr::str_detect(names(obj$trial_data), "^measured")]) print(x, include_summary = FALSE, width = NULL) } } #' @export read_eventide_single <- function(fname, remove_measured = FALSE, zero_trial_start_time = TRUE ) { library(magrittr) library(dplyr) library(readr) # Parse filename ## Parse header x <- stringr::str_replace_all(readLines(fname, n = 8), ";", "") hdr_txt <- stringr::str_split( stringr::str_replace_all(x, stringr::fixed(" "), ""), ":", simplify = TRUE) ind <- hdr_txt[,2] != "" hdr_txt <- hdr_txt[ind, ] out <- list( name = hdr_txt[5,2], date = as.POSIXct(paste0(hdr_txt[3,2], " ", hdr_txt[4,2], ":", hdr_txt[4,3]), "%d/%m/%Y %H:%M", tz = "Europe/Paris"), version = hdr_txt[1,2] ) ## Read data ct <- readr::cols( .default = col_double(), "Counter Total Trials" = col_integer(), "Counter Trials In Block" = col_integer(), "Blocked Mode" = col_logical(), "Condition Name" = col_character(), "Trial Result Str" = col_character(), "Is Correct Trial" = col_logical(), "Is Incorrect Trial" = col_logical(), "Is Abort Trial" = col_logical(), "Is Repeat Trial" = col_logical() ) df <- readr::read_csv2(fname, col_names = TRUE, col_types = ct, skip = 8, locale(decimal_mark = ",")) df %<>% janitor::remove_empty(which = "cols") %>% janitor::clean_names() # Movement time df$mt <- df$tt - df$rt # Fill in cue set index for sessions with only one if (!("cue_set_index" %in% colnames(df))) { df %<>% tibble::add_column(cue_set_index = 0, .after = "block_index") } cnames <- colnames(df) df %<>% rename(cueset = cue_set_index) %>% mutate(cueset = factor(cueset, levels = c(0,1), labels = c("old", "new"))) # convert time to seconds msec_to_sec <- function(x, na.rm = FALSE) (x/1000) tvars <- c("rt", "rt2", "tt", "mt", "reward_delay") tvars <- c(tvars, cnames[stringr::str_ends(cnames, "_duration")], cnames[stringr::str_ends(cnames, "_time")]) df %<>% mutate_at(which(cnames %in% tvars), msec_to_sec) %>% tibble::add_column(direction = contra_ipsi_tar(df$tar_x, tolower(out$name)), .after = "cueset") %>% tibble::add_column(block = as.factor(df$block_index), .after = "block_index") %>% relocate(mt, .after = rt2) # set factor levels of trial_result_str, condition_name, direction levels(df$block) <- c("con", "mix") df$condition_name[df$condition_name=="Go"] = "go" df$condition_name[df$condition_name=="Go control"] = "go_con" df$condition_name[df$condition_name=="Nogo"] = "nogo" df %<>% tibble::add_column(condition = factor(df$condition_name, levels = c("go_con", "go", "nogo")), .after = "condition_name") df$condition_name[df$condition_name=="go_con"] = "go" df %<>% rename(gng = condition_name) %>% mutate(gng = factor(gng, levels = c("nogo", "go"))) df %<>% rename(is_correct = is_correct_trial, is_incorrect = is_incorrect_trial, is_abort = is_abort_trial, is_repeat = is_repeat_trial) df$trial_result_str[df$trial_result_str=="target touch"] = "target_touch" df$trial_result_str[df$trial_result_str=="fixation holding"] = "fixation_holding" df$trial_result_str[df$trial_result_str=="Late retouch"] = "err_late_retouch" df$trial_result_str[df$trial_result_str=="cue touch aborted"] = "err_cue_touch_aborted" df$trial_result_str[df$trial_result_str=="Else-where touch"] = "err_elsewhere_touch" df$trial_result_str[df$trial_result_str=="Overall too late"] = "err_overall_too_late" df$trial_result_str[df$trial_result_str=="Anticipation"] = "err_anticipation" df$trial_result_str[df$trial_result_str=="Early target release"] = "err_early_target_release" df %<>% tibble::add_column(event = factor(df$trial_result_str, levels = c("fixation_holding", "target_touch", "err_anticipation", "err_cue_touch_aborted", "err_late_retouch", "err_elsewhere_touch", "err_early_target_release", "err_overall_too_late" )), .after = "trial_result_str") df$cue_duration_programmed <- df$cue_duration df$cue_duration <- df$measured_cue_duration if (remove_measured) df %<>% select(-starts_with("measured")) if (zero_trial_start_time) { df %<>% mutate(define_trial_onset_time_absolute = define_trial_onset_time, .before = define_trial_onset_time) df %<>% mutate(across(ends_with("_onset_time"), ~purrr::map2_dbl(.x, define_trial_onset_time, ~.x - .y))) } out$trial_data <- df class(out) <- "GNGeventide_single" return(out) } #' @export read_eventide_tracker <- function(fname, Fs = 100) { library(dplyr) library(tidyr) library(purrr) library(readr) # Parse filename ## Parse header x <- stringr::str_replace_all(readLines(fname, n = 3), ";", "") hdr_txt <- stringr::str_split( stringr::str_replace_all(x, stringr::fixed(" "), ""), ":", simplify = TRUE, n = 2) ind <- hdr_txt[,2] != "" hdr_txt <- hdr_txt[ind, ] out <- list( date = as.POSIXct(hdr_txt[1,2], "%Y.%d.%m%H:%M", tz = "Europe/Paris"), version = hdr_txt[2,2] ) ## Read data ct <- readr::cols_only( `User Field` = col_integer(), `Current Event` = col_character(), `EventIDE TimeStamp` = col_double(), `Gaze CVX` = col_double(), # degrees `Gaze CVY` = col_double(), #`Gaze X` = col_double(), # pixels #`Gaze Y` = col_double(), Pressure = col_double(), #`Is Touch` = col_logical(), X10 = col_logical() ) df <- readr::read_csv2(fname, col_names = TRUE, col_types = ct, skip = 3, locale(decimal_mark = ",")) df %<>% janitor::remove_empty(which = "cols") %>% rename("counter_total_trials" = "User Field", "state" = "Current Event", "t" = "EventIDE TimeStamp", "x" = "Gaze CVX", "y" = "Gaze CVY", "pressure" = "Pressure") df %<>% filter((state == "Fixation") | (state == "Cue") | (state == "Target>Holding Fixation ROI") | (state == "Target>Waiting") | (state == "Target>Target touch") | (state == "Eval") | (state == "Correct>Delay") | (state == "Abort")) %>% mutate(x = ifelse(pressure==0, NA, x), y = ifelse(pressure==0, NA, y)) lev <- c("Fixation", "Cue", "Target>Holding Fixation ROI", "Target>Waiting", "Target>Target touch", "Eval", "Correct>Delay", "Abort") lev <- lev[lev %in% unique(df$state)] df$state <- factor(df$state, levels = lev) ## Linearly interpolate to regular grid # Create a regular grid myseq <- function(from, to, by) tibble(t = seq(from, to, by)) df2 <- df %>% group_by(counter_total_trials) %>% summarise(start = min(t), end = max(t), .groups = "drop") %>% group_by(counter_total_trials) %>% mutate(t_r = map2(start, end, ~myseq(start, end, 1000/Fs))) %>% # times in msec select(-start,-end) # Join with original data df2 %<>% full_join(df %>% group_by(counter_total_trials) %>% nest(), by = "counter_total_trials") # Interpolate myapprox <- function(x, y, xout, method = "linear") { tibble(r = approx(x, y, xout, ties = min, na.rm = FALSE, method = method)$y) } df2 %<>% mutate(state = map2(data, t_r, ~myapprox(.x$t, as.integer(.x$state), .y$t, method = "constant")), x = map2(data, t_r, ~myapprox(.x$t, .x$x, .y$t)), y = map2(data, t_r, ~myapprox(.x$t, .x$y, .y$t)), pressure = map2(data, t_r, ~myapprox(.x$t, .x$pressure, .y$t))) %>% select(-data) %>% unnest(cols = c(t_r, state, x, y, pressure), names_sep = "_") %>% rename(t = t_r_t, state = state_r, x = x_r, y = y_r, pressure = pressure_r) %>% mutate(pressure = ifelse(is.na(x), 0, pressure), t = t/1000) df2$state <- factor(df2$state, labels = lev) out$Fs <- Fs out$tracker_data <- df2 %>% ungroup() class(out) <- "GNGeventide_tracker" return(out) } contra_ipsi_tar <- function(x, subject) { # Contra/Ipsi relative to arm used dir = x if (subject == "tess") { dir[x < 0] = "ipsi" dir[x > 0] = "contra" } else if (subject == "chanel") { dir[x < 0] = "ipsi" dir[x > 0] = "contra" } else if (subject == "flocky") { dir[x > 0] = "ipsi" dir[x < 0] = "contra" } dir = factor(dir, levels = c("ipsi", "contra")) return(dir) } #' @export parse_tracker_filenames <- function(basedir = getwd()) { fnames <- list.files(path = basedir, pattern = glob2rx(paste0("TrackerLog--ELOTouchTracker--", "*.txt"))) d <- purrr::map_chr(stringr::str_split(fnames,"--"), 3) t <- purrr::map_chr(stringr::str_split(fnames,"--"), 4) t <- purrr::map_chr(stringr::str_split(t,".txt"), 1) d <- as.POSIXct(paste(d, t), "%Y-%d-%m %H-%M", tz = "Europe/Paris") # Sort by ascending experiment date ind <- order(d) fnames <- fnames[ind] d <- d[ind] return(list(fnames = fnames, date = d)) }
2327910847afe871f74f18386c2304382a3b4cbb
487a34c5ace2b1a60229c5403335de734616561e
/1d2-basicstats.R
9eb57d44dfb473d21d7e2d1708b617ea0d7f9575
[]
no_license
hhenoida/dataanalytics
39d261a288f90c97effc0358d49fd2ffb8566578
c563272f7890a0731fbb9e24e5ff6309ea8586ee
refs/heads/master
2020-03-31T19:50:12.135620
2018-10-29T12:40:39
2018-10-29T12:40:39
152,513,808
259
15
null
null
null
null
UTF-8
R
false
false
620
r
1d2-basicstats.R
# Basic Stats x = ceiling(rnorm(10000, mean=60, sd=20)) mean(x) median(x) #there is no mode function for mode stats table(x) sort(table(x), decreasing=T) #mode library(modeest) mlv(x,method='shorth') #quantile quantile(x) quantile(x,seq(.1,1,by=.1)) #decile quantile(x,seq(.01,1,by=.01)) #percentile library(e1071) # load e1071 plot(density(x)) #density plot e1071::skewness(x) # apply the skewness kurtosis(x) sd(x); var(x) cov(women$weight, women$height) cor(women$height, women$height) stem(x) #Freq Table library(fdth) #fast way of creating FT ftable1 = fdt(x) ftable1
e0ee1e3f4b7b94112dbde3108138803580450774
7f02263a680124a9b6fed6e709013034be0dc2e8
/SciDataEpi2020/functions/plot_plus.r
996d8e1545e100e85fbcc929c1474de820715f1f
[]
no_license
Hindrance/EpiSciData2020
a8fa07e67a240a81d76391b614175593369e2810
b271bb99bd793992fea7f41fe46ef1a981e33e61
refs/heads/master
2022-11-10T14:53:32.093301
2020-06-23T11:21:55
2020-06-23T11:21:55
266,233,813
0
0
null
null
null
null
UTF-8
R
false
false
6,609
r
plot_plus.r
############################################################# # PLOTS: POINTS PLUS DENSITIES - by Juliette (and Vince :D) ############################################################# # __ # _.-~ ) # _..--~~~~,' ,-/ _ # .-'. . . .' ,-',' ,' ) # ,'. . . _ ,--~,-'__..-' ,' # ,'. . . (@)' ---~~~~ ,' # /. . . . '~~ ,-' # /. . . . . ,-' # ; . . . . - . ,' # : . . . . _ / # . . . . . `-.: # . . . ./ - . ) # . . . | _____..---.._/ _____ #~---~~~~----~~~~ ~~ # # DENSITY PLOT DOLPHIN ###################################### # datax = x values # datay = y values # classes = class factors # colours = ordered vector of colours (as a HEX string) # a = plot vertical adjust plot_plus <- function(datax, datay, classes, colours, a=10,pch=16,cex=1,ylim, xlim, bw=c(diff(range(datax))/20,diff(range(datay)/20)),...){ if(class(classes)!="factor"){ class.vector <- factor(classes, levels=unique(classes))} else { class.vector <- classes } class.colours <- colours # Create a plot range - used for all densities if(missing(xlim)){x.range <- range(datax)+c((-(range(datax)[2]-range(datax)[1])/10),(range(datax)[2]-range(datax)[1])/10)} else { x.range <- range(xlim)+c((-(range(datax)[2]-range(datax)[1])/10),(range(datax)[2]-range(datax)[1])/10) } if(missing(ylim)){y.range <- range(datay)+c((-(range(datay)[2]-range(datay)[1])/10),(range(datay)[2]-range(datay)[1])/10)} else { y.range <- range(ylim)+c((-(range(datay)[2]-range(datay)[1])/10),(range(datay)[2]-range(datay)[1])/10) } # Set up plot window # par(mar=c(5,5,5,5),xpd=T,cex=cex) par(xpd=T,cex=cex) plot(datax,datay,pch=NA, yaxs="i",ylim=y.range, xaxs="i",xlim=x.range,...) # For each class defined in class list. for(i in levels(class.vector)){ # What data belongs to our class i? i.vector <- which(class.vector == i) # What colour belongs to our class i? i.colour <- class.colours[which(levels(class.vector) == i)] # Let's plot our class i data if(pch < 21){points(datax[i.vector],datay[i.vector],col=i.colour,pch=pch)} if(pch >= 21){points(datax[i.vector],datay[i.vector],bg=i.colour,pch=pch, col="black")} # Let's find the density of our class y along the x-axis x.density <- density(datax[i.vector],from = min(x.range),to = max(x.range), bw=bw[1]) x.density$x <- c(min(x.density$x), x.density$x, max(x.density$x)) x.density$y <- c(0, x.density$y, 0) x.density$y <- x.density$y/length(i.vector) # Let's find the density of our class y along the y-axis (here we rename x and y because of reasons) y.density <- density(datay[i.vector],from = min(y.range),to = max(y.range), bw=bw[2]) y.density$x <- c(min(y.density$x), y.density$x, max(y.density$x)) y.density$y <- c(0, y.density$y, 0) y.density$y <- y.density$y/length(i.vector) names(y.density)[1:2] <- c("y","x") # We need a coefficient to make the density appear on our plot scale - this is scaled to the plot window by "a". x.coeff <- ((y.range[2]-y.range[1])/a)/max(x.density$y) y.coeff <- ((x.range[2]-x.range[1])/a)/max(y.density$x) # Let's produce some density plots on the x-axis (axis 3) - adding the y axis coefficient. polygon(x.density$x,x.density$y*x.coeff+max(y.range),col=paste(i.colour,"80",sep=""), border=i.colour) # Let's produce some density plots on the y-axis (axis 4) - adding the x axis coefficient. polygon(y.density$x*y.coeff+max(x.range),y.density$y,col=paste(i.colour,"80",sep=""), border=i.colour) # Plot population distribution values?? - to be continued.. .need to find ways to machine learn / parse population between density distib's # x.density$y[which.max(x.density$y)] } par(xpd=F) box() } example.data.function <- function( class.no = 6, n.factors=2, class.length.m = 50, class.length.sd = 10, norm.m.range = c(-50,50), norm.sd.range = c(0,30), linear.range = c(-50,50) ) { # Example data # class.no = number of classes (clusters) # class.length.m = number of data in each class # class.length.sd = s deviation of data # norm.m.range = lower and upper limits of normal distribution means # norm.sd.range = lower and upper limits of normal distribution SDs # linear.range = lower and upper limits of linear function name.vector <- c(LETTERS,letters,0:9) multi.factor.set <- t(matrix(rnorm(class.length.m*class.no*n.factors, runif(n.factors, 0, 1), runif(n.factors, 0, 0)), n.factors, class.length.m*class.no)) multi.factor.set <- matrix(rnorm(class.length.m*n.factors, runif(n.factors, 0, 1), runif(n.factors, 0, 0)), n.factors, class.length.m) class.vector <- character() class.values.x <- numeric() class.values.y <- numeric() class.colours <- character() for(i in 1:class.no){ class.name <- paste(name.vector[round(runif(6,1,length(name.vector)))], collapse="") class.length <- round(rnorm(1,class.length.m, class.length.sd)) class.vector <- c(class.vector, rep(class.name, class.length)) linearx <- seq(runif(1, linear.range[1], linear.range[2]),runif(1, linear.range[1], linear.range[2]), length=class.length) lineary <- seq(runif(1, linear.range[1], linear.range[2]),runif(1, linear.range[1], linear.range[2]), length=class.length) class.values.x <- c(class.values.x, (rnorm(class.length,runif(1,norm.m.range[1], norm.m.range[2]),runif(1,norm.sd.range[1], norm.sd.range[2])))+linearx) class.values.y <- c(class.values.y, (rnorm(class.length,runif(1,norm.m.range[1], norm.m.range[2]),runif(1,norm.sd.range[1], norm.sd.range[2])))+lineary) c.temp <- sample(colours(),1) c.temp <- sprintf("#%02X%02X%02X",col2rgb(c.temp)[1], col2rgb(c.temp)[2], col2rgb(c.temp)[3]) class.colours <- c(class.colours, rep(c.temp,class.length)) } class.vector <- as.factor(class.vector) output <- data.frame(class.vector, class.values.x, class.values.y, class.colours, stringsAsFactors=F) output } if(no.example.plx == F){ #################### EXAMPLE DATA ######################### plot.example.data <- function(){ temp.data <- example.data.function(5, 50, 10, c(-50, 50), c(0, 30), c(-50, 50)) plot_plus(temp.data$class.values.x, temp.data$class.values.y, classes = temp.data$class.vector,colours=unique(temp.data$class.colours), pch=21, cex=1.2) } plot.example.data() #################### EXAMPLE DATA ######################### }
bf4204c4eeaceb96894c8cd6933899f93d83f7d6
c981caf103a3540f7964e6c41a56ca34d67732c4
/R/mice.impute.eap.R
162326db22d762e9d3ab3ff88d0136c95af7e900
[]
no_license
alexanderrobitzsch/miceadds
8285b8c98c2563c2c04209d74af6432ce94340ee
faab4efffa36230335bfb1603078da2253d29566
refs/heads/master
2023-03-07T02:53:26.480028
2023-03-01T16:26:31
2023-03-01T16:26:31
95,305,394
17
2
null
2018-05-31T11:41:51
2017-06-24T15:16:57
R
UTF-8
R
false
false
415
r
mice.impute.eap.R
## File Name: mice.impute.eap.R ## File Version: 2.07 mice.impute.eap <- function (y, ry, x, eap, ...) { pos <- parent.frame(n=1) res <- mice_imputation_get_states(pos=pos) vname <- res$vname newstate <- res$newstate M.scale <- eap[[ vname ]][[ "M" ]] SE.scale <- eap[[ vname ]][[ "SE" ]] N <- length(M.scale) ximp <- stats::rnorm( N, mean=M.scale, sd=SE.scale ) return(ximp) }
c7878052270c641731c821593ef8bdced6675f93
0a906cf8b1b7da2aea87de958e3662870df49727
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610386845-test.R
72fd159b80b0d971b048ad0e03215b200edd1218
[]
no_license
akhikolla/updated-only-Issues
a85c887f0e1aae8a8dc358717d55b21678d04660
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
refs/heads/master
2023-04-13T08:22:15.699449
2021-04-21T16:25:35
2021-04-21T16:25:35
360,232,775
0
0
null
null
null
null
UTF-8
R
false
false
597
r
1610386845-test.R
testlist <- list(a = -774646785L, b = 385877056L, x = c(-623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623247288L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -623191334L, -636853204L, -1L, -1L, 278592L, 751971372L)) result <- do.call(grattan:::anyOutside,testlist) str(result)
2d06eb0cf5a285db35ba08d3c9f9ba8ec5dd3719
8c4c1dd8db80357e6d0fed7adafe9a4a371c2801
/plot3.R
5c242439e09ef484895cce3691ac147703735b73
[]
no_license
tj---/ExData_Plotting1
694b50bf42721088a904e673b9296eab70695ba0
c402485d9a8c4b8343560d965af2b0b090c0b4da
refs/heads/master
2021-01-22T18:42:55.226052
2015-06-04T20:24:24
2015-06-04T20:24:24
36,820,921
0
0
null
2015-06-03T18:00:33
2015-06-03T18:00:31
null
UTF-8
R
false
false
480
r
plot3.R
source("util.R") data <- load_data() # Load the relevant data from the file png(file = "plot3.png", width = 480, height = 480) plot(data$DateTime, data$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering") lines(data$DateTime, data$Sub_metering_2, col = "red") lines(data$DateTime, data$Sub_metering_3, col = "blue") legend("topright", lty = c(1, 1, 1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) dev.off()
7a5b8863f39d34673b1ca152d584e82619025bc7
80fbecdc50f0f580336f32ee3160a8ebf16b83ff
/run_analysis.R
d5fded9ec1ec5172fb677cd7eecc5d6c8b79a491
[]
no_license
hpirespt/Getting-and-Cleaning-Data
b22a6089fe58e30d03ac47e3308ff3ec436cd1b1
b257e40f26369916beb6bfcf27b50e38d2a3be8c
refs/heads/master
2022-04-15T00:44:33.517701
2020-04-11T14:21:09
2020-04-11T14:21:09
254,882,900
0
0
null
null
null
null
UTF-8
R
false
false
4,279
r
run_analysis.R
run_analysis<-function(){ library(dplyr) #path definitions pathfeatures<-".\\data\\UCI HAR Dataset\\features.txt" pathactivities<-".\\data\\UCI HAR Dataset\\activity_labels.txt" pathtestX<-".\\data\\UCI HAR Dataset\\test\\X_test.txt" pathtesty<-".\\data\\UCI HAR Dataset\\test\\y_test.txt" pathsubject_test<-".\\data\\UCI HAR Dataset\\test\\subject_test.txt" pathtestX<-".\\data\\UCI HAR Dataset\\test\\X_test.txt" pathtesty<-".\\data\\UCI HAR Dataset\\test\\y_test.txt" pathsubject_test<-".\\data\\UCI HAR Dataset\\test\\subject_test.txt" pathtrainX<-".\\data\\UCI HAR Dataset\\train\\X_train.txt" pathtrainy<-".\\data\\UCI HAR Dataset\\train\\y_train.txt" pathsubject_train<-".\\data\\UCI HAR Dataset\\train\\subject_train.txt" ###################################################################################### #read data features<-read.table(pathfeatures, sep="") activities<-read.table(pathactivities, sep="") test_X_data<- read.table(pathtestX, sep = "") test_y_data<- read.table(pathtesty, sep = "") subject_test<- read.table(pathsubject_test, sep = "") train_X_data<- read.table(pathtrainX, sep = "") train_y_data<- read.table(pathtrainy, sep = "") subject_train<- read.table(pathsubject_train, sep = "") ####################################################################################### #add headers with names to test_X_data colnames(test_X_data)<-features[,2] colnames(train_X_data)<-features[,2] ####################################################################################### #Bind subject and Y_test with Cbind and give names to columns: subject, acactivitieslabels (Because it reprsents the activities by numbers) Y_subBind_test<-data.frame(cbind(subject_test,test_y_data)) colnames(Y_subBind_test)<-c("subject","activitieslabels") Y_subBind_train<-data.frame(cbind(subject_train,train_y_data)) colnames(Y_subBind_train)<-c("subject","activitieslabels") ####################################################################################### #Bind Y_subbind with Test_X data: Comp_Test <- cbind(Y_subBind_test, test_X_data) Comp_Train <- cbind(Y_subBind_train, train_X_data) ####################################################################################### #Bind both dataframes Final_dataset<-rbind(Comp_Test,Comp_Train) #Replace activities labels for the names presents in activities.txt Final_dataset$activitieslabels<-factor(Final_dataset$activitieslabels) levels(Final_dataset$activitieslabels) <- activities$V2 ####################################################################################### #get mean columns get_means<-grep("mean()" , names(Final_dataset), value=TRUE) get_meanfreq<-grep("meanFreq()" , names(Final_dataset), value=TRUE) get_cols_mean<-setdiff(get_means, get_meanfreq) #get std columns get_cols_std<-grep("std()" , names(Final_dataset), value=TRUE) ###################################################################################### #Tidy Datase: Subsetting Final_dataset to only have subject, activities, mean and std columns #Sort columns by sybject and activities Tidy_dataset<-Final_dataset[,c("subject","activitieslabels",get_cols_mean,get_cols_std)] Tidy_dataset<-arrange(Tidy_dataset, subject, activitieslabels) ##################################################################################### #start creating Tidy dataset 2: average of each variable for each activity and each subject #using dplyr functions Tidy_dataset_2<-group_by(Tidy_dataset,subject,activitieslabels) Tidy_dataset_2<-summarise_each(Tidy_dataset_2, funs = mean) ##################################################################################### # Writing Tidy_dataset_2 txt file write.table(Tidy_dataset_2,"Tidy_dataset_2.txt",row.name=FALSE ) #Print Tidy Dataset Tidy_dataset ##################################################################################### }
ff27a56b2bb45cdc2ed34799304a405dbe00d7a2
439a1a4a95ea2a915c20b12aa49d083d28be5e72
/visualization/phylogenetic_analysis.R
9d70dae12ca54855371dfc4dce0307b39e61d837
[]
no_license
davidgllund/ARG_analysis_scripts
611b4b1efa8976a4b5ef37b7b8d67a1c4dbacb63
7f1388ab8fa48951534813c850ae62222deebd5a
refs/heads/master
2023-05-26T23:36:37.365107
2021-06-01T12:48:36
2021-06-01T12:48:36
337,497,994
0
1
null
null
null
null
UTF-8
R
false
false
1,939
r
phylogenetic_analysis.R
# Import packages library("ggplot2") library("ggtree") # Import files with the ids of the tips in the tree, the phylum corresponding to each tip, and the names of the tips ids<-read.table('headers.txt') phylum<-read.table("phylum.txt") names<-read.table("names.txt") # Combine input files to a data.frame containing all metadata of each tip tip_metadata<-data.frame(Label=ids, Phylum=phylum, Gene=names) colnames(tip_metadata) <- c("Label", "Phylum", "Gene") # Re-order the phylum information for the plot tip_metadata$Phylum<-factor(tip_metadata$Phylum, levels = c("Mobile", "Actinobacteria", "Bacteroidetes", "Firmicutes", "Proteobacteria", "Miscellaneous", "Metagenome")) # Add labels to tree outgroup tip_metadata<-rbind(tip_metadata, c("KsgA", NA, "KsgA")) tip_metadata<-rbind(tip_metadata, c("APH2IIIa", NA, "APH(2'')-IIIa")) tip_metadata<-rbind(tip_metadata, c("APH2IIa", NA, "APH(2'')-IIa")) tip_metadata<-rbind(tip_metadata, c("APH2Ie", NA, "APH(2'')-Ie")) # Define colors and shapes of tips phylum_cols<-c("#000000","#8da0cb","#66c2a5","#fc8d62","#e78ac3","#C8C8C8","#FFFFFF") phylum_shape<-c(21,21,21,21,21,21,21,1) # Import phylogenetic tree in newwick format and plot with ggtree tree <- read.tree("tree.txt") p <- ggtree(tree, layout='circular') p <- p %<+% tip_metadata + geom_tippoint(aes(fill=Phylum, shape = Phylum), size=4.5, color="black", stroke=0.7) + scale_fill_manual(values=phylum_cols) + scale_shape_manual(values = phylum_shape) + theme(legend.position = c(0.9, 0.62), legend.title=element_text(size=24), legend.text=element_text(size=22)) + guides(shape=guide_legend(override.aes = list(size = 6))) + geom_tiplab(aes(subset=!is.na(Gene), label=Gene), align=T, fontface='bold', size=7) + geom_tiplab(aes(subset=is.na(Gene), label=Gene), align=F) # Plot tree and save into pdf pdf("tree_annotated.pdf",height = 25, width = 25) plot(p) dev.off()
f40af827692e9ca06ffa8200f43d5a7e565f6f4b
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/homals/examples/roskam.Rd.R
02f0d5e43c2a8cd5890554f51451c51eada5d962
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
151
r
roskam.Rd.R
library(homals) ### Name: roskam ### Title: Roskam dataset ### Aliases: roskam ### Keywords: datasets ### ** Examples data(roskam) roskam
ffc4c5049094d1363bd378f35a3c953a306cddfa
06efc31a33d3ea3cd3b645c582bacda5373badd4
/CeTrAn/other_codes/F1000master.R
40f389f19284d25c8ec8552dfab6e5b9352baaf6
[ "MIT" ]
permissive
Marlouck/CeTrAn
081b4d1d42417f67fab1c085a1d0c78068ffc95d
e4883361de3fa21ba9f6f2a33f38850953f4fe57
refs/heads/master
2020-04-17T18:09:14.563943
2019-01-10T09:23:57
2019-01-10T09:23:57
null
0
0
null
null
null
null
UTF-8
R
false
false
5,426
r
F1000master.R
### code for figure 4 in the F1000 paper REBOOT =F#if true: all data are recalculated only_one_additional_group=F #if true, groups selected such that only the group given will be analysed with the 5 initial groups. #Additional_group = "data entered by the user!" #indicate analysis variables: g_duration_slider = 10 #default=10, min0 max20 g_bin_size= 1 #default=1, min0 max20 g_supress_paints=TRUE g_treshold = 8 # min0 max20 #indicate which analysis is done g_general= TRUE g_roundarena= TRUE g_stripes_deviation=TRUE g_occupancy= TRUE g_angledev= TRUE g_outputtext= TRUE g_log= TRUE g_thigmo= TRUE g_pca= FALSE g_individual= FALSE g_open_pdf=FALSE outputfile = "temp/output" ##NEED TO BE CHANGED WHEN MOVING TO A NEW ENVIRONMENT #define folders where files are read/written g_inputdir = "~/Desktop/F1000/Buridan_data_reworked/uploads" g_outputdir="~/Desktop/F1000/Buridan_data_reworked/output" rgghome= "~/Gits/CeTrAn/CeTrAn" ## setwd(g_inputdir) #uploads.csv is modified after the data is uploaded via the F1000 code importdata= read.csv("uploads.csv") #import the previous analysis if (REBOOT) {onlycolname=structure(list(id = logical(0), group = logical(0), date = logical(0), timeofday = logical(0), length_experiment = logical(0), median_speed = logical(0), distance_traveled_mm__permin = logical(0), turning_angle = logical(0), meander = logical(0), activitytime_permin_ST = logical(0), act_bouts_ST = logical(0), pause_duration_ST = logical(0), numb_pause_permin_ST = logical(0), activitytime_permin_TT = logical(0), act_bouts_TT = logical(0), pause_length_TT = logical(0), numb_pauses_permin_TT = logical(0), centrophobism_moving = logical(0), centrophobism_sitting = logical(0), number_of_walks_permin = logical(0), stripe_deviation = logical(0), UID = logical(0)), .Names = c("id", "group", "date", "timeofday", "length_experiment", "median_speed", "distance_traveled_mm__permin", "turning_angle", "meander", "activitytime_permin_ST", "act_bouts_ST", "pause_duration_ST", "numb_pause_permin_ST", "activitytime_permin_TT", "act_bouts_TT", "pause_length_TT", "numb_pauses_permin_TT", "centrophobism_moving", "centrophobism_sitting", "number_of_walks_permin", "stripe_deviation", "UID"), class = "data.frame", row.names = integer(0)) setwd(g_outputdir) write.csv(onlycolname,"analysed_data.csv") } analyseresults=read.csv(paste(g_outputdir,"analysed_data.csv", sep="/")) analyseresults=analyseresults[,-1] analyseresults$UID= as.factor(analyseresults$UID) analysed = levels(analyseresults$UID) # check for new data to analyse, analyse it and add it to the result table for (i in c(1:nrow(importdata))){ UID=importdata[i,1] if (!UID %in% analysed){ message("YES") if (exists("combf_table")){analyseresults=combf_table} groupname=importdata[i,8] g_filetablename = paste (groupname, "/grouping.csv",sep="") g_filetable= read.csv(paste(g_inputdir,g_filetablename, sep="/"),sep = ",", header=FALSE) g_filetable[,1]= paste(groupname,g_filetable[,1], sep="/") setwd(rgghome) source("CeTrAn_norgg_xml.r") setwd(g_outputdir) f_table$UID = UID combf_table= rbind( f_table,analyseresults) #write.csv(f_table,"analysed_data.csv") } setwd(g_outputdir) ### if new data write the new analysis, if no new data, just write the data into the combf_table variable if (exists("combf_table")){ write.csv(combf_table,"analysed_data.csv") }else {combf_table=analyseresults} } #now the result table is used to perform the PCA: data=combf_table setwd(rgghome) if (only_one_additional_group){ source(other_codes/f1000_only_one_group.r) } source ("other_codes/PCA_stablef1000.R")# you need to get line 7 out ##not in use anymore: old way to add colors ##this value "abc" is deciding the color of the plot. it could eventually be chosen by the user, that is why I put the plotting code apart: if we make this value changeable, we do not have to do the PCA analysis again. #abc= c(2,rep(1,length(levels(PCA_res$group))-1)) #last group in red, all the other in black #abc=c (1:600) # color used per default, only 6 different colors used in loop source ("other_codes/plotting_PCA_f1000.R") # you will need to change *pdf("test.pdf")* (l.9) with the appropriate output, probably *png("pca_plot.png")* ##there you should get an image. ## users should be able to see the image and download the "allrawdata.csv" file.
e8db6fb2cfe01d30355d0fcd8127286b61deb8d1
4faa70a753a7192dd0eaff0191e2c21bb2997185
/wgmeans.R
43cc11c5e241f529fbaf0cbd982d3a81647c1c97
[]
no_license
SaptarshiC98/-WG--means
7b85909c69754ef889692cb761e57086af3f3de9
0c9910947f4aa4e0804a2c65e100f60e7c8abac8
refs/heads/master
2021-09-23T23:30:50.254010
2018-09-29T04:25:28
2018-09-29T04:25:28
108,992,157
0
0
null
null
null
null
UTF-8
R
false
false
4,456
r
wgmeans.R
sq.euc.dist= function(x1, x2) sum((x1 - x2) ^ 2) wt.euc.dist.sq=function(x1,x2,w){ p=(x1-x2)^2 p=w*p return(sum(p)) } vec.wt.euc.dist.sq=function(x1,x2,w){ p=(x1-x2)^2 p=w*p return(p) } library(nortest) sq.euc.dist= function(x1, x2) sum((x1 - x2) ^ 2) k.means= function(X,M,w,tmax){ X=as.matrix(X) N =(dim(X))[1] k=dim(M)[1] t=0 label=numeric(N) wdist=numeric(k) repeat{ t=t+1 for(i in 1:N){ for(j in 1:k){ wdist[j]=wt.euc.dist.sq(X[i,],M[j,],w) } label[i]=which.min(wdist) } for(i in 1:k){ I=which(label==i) M[i,]=colMeans(X[I,]) } if(t>tmax){ break } } return(list(label,M)) } proj=function(X,v,w){ X=as.matrix(X) n=dim(X)[1] pro=numeric(dim(X)[1]) norm.v.sq=sum(v*v*w) for(i in 1:n){ pro[i]=sum(X[i,]*v*w)/norm.v.sq } # hist(pro) pro } split=function(c,X,w){ X=as.matrix(X) p=prcomp(X) # A=diag(w) # B=A%*%B # B=B%*%A # cat(A) s=p$rotation[,1] lambda=p$sdev[1]^2 m=sqrt(2*lambda/pi)*s rm(p) return(list(c+m,c-m)) } split.2=function(X,c,alpha,w){ X=as.matrix(X) flag=0 N=dim(X)[1] d=dim(X)[2] spl=split(c,X,w) c1=spl[[1]] c2=spl[[2]] c1=as.matrix(c1) c2=as.matrix(c2) M=cbind(c1,c2) M=t(M) p=k.means(X,M,w,50) M=p[[2]] c1=M[1,] c2=M[2,] v=c1-c2 projected=proj(X,v,w) test=ad.test(projected) if(test$p.value<alpha){ flag=1 return(list(flag,c1,c2)) }else{ flag=0 return(list(flag,c)) } } wgmeans=function(X,beta,alpha,tmax){ n=dim(X)[1] d=dim(X)[2] for(i in 1:d){ X[,i]=X[,i]-mean(X[,i]) X[,i]=X[,i]/sd(X[,i]) } M=as.matrix(colMeans(X)) M=t(M) weight=rep(1/d,d) label=rep(1,n) #dist=numeric(c) t=0 D=numeric(d) flag=0 #plot(X) repeat{ t=t+1 flag=0 #update centres and clusternumbers if(is.vector(M)==TRUE){ M=as.matrix(M) M=t(M) } c=dim(M)[1] counter=1 c1=c new.mat=matrix(rep(0,2*c *d),ncol=d) cat(c1) for(i in 1 : c){ I=which(label==i) ## if(length(I)<8){ ## break ## } s=split.2(X[I,],M[i,],alpha,weight^beta) if(s[[1]]==1){ new.mat[counter,]=s[[2]] new.mat[(counter+1),]=s[[3]] counter=counter+2 }else if(s[[1]]==0){ new.mat[counter,]=s[[2]] counter=counter+1 } } rm(M) M=matrix(rep(0,d * (counter-1)),ncol=d) for(i in 1 : (counter-1)){ M[i,]=new.mat[i,] } rm(new.mat) c=dim(M)[1] # if(c1==c){ # break # } ## p=k.means(X,M,weight^beta,200) ## M=p[[2]] ## points(M,col=3,pch=19) cat(c) dist=numeric(c) for(iter in 1:30){ #update membership for(i in 1 : n){ for(j in 1 : c){ dist[j]=wt.euc.dist.sq(X[i,],M[j,],weight^beta) } label[i]=which.min(dist) } #update centres for(i in 1:c){ I=which(label==i) M[i,]=colMeans(X[I,]) } #update weights for(j in 1:d){ D[j]=0 } for(i in 1:c){ I=which(label==i) for(k in I){ D=D+vec.wt.euc.dist.sq(X[k,],M[i,],rep(1,d)) } } for(i in 1:d){ if(D[i]!=0){ D[i]=1/D[i] D[i]=D[i]^(1/(beta-1)) } } s=sum(D) weight=D/s } # cat(weight) # cat('\n') ## cat(weight) #check to discard features ## max=max(weight) ## max=max*.1 ## truth=(weight<max) ## if(sum(truth)>0){ ##I=which(weight<max) ## weight(I)=0 ## } if(t>tmax){ break cat('here1') } for(i in 1:c){ I=which(label==i) if(length(I)<8){ flag=1 } } if(flag==1){ break cat('here2') } if(c1==c){ cat('here3') break } } return(list(label,M,weight,t)) }
f2b28d086d320fe8832badefc0f85b398ceaf2ee
43e6a86a838a2f4bb21059190640551dd1fc4dc9
/R/plotMethy.R
ebd74b1319290932867a1f276eb510047e2aab97
[]
no_license
jianhong/RRBSeq
3dc6e2e0bacc0d8bbda49d4a6ea907d21b522f63
e7f648897a588b705aaef559402e0eb2e2f28a63
refs/heads/master
2021-01-25T08:42:39.259480
2017-03-30T14:16:32
2017-03-30T14:16:32
35,629,158
0
0
null
null
null
null
UTF-8
R
false
false
5,386
r
plotMethy.R
plotMethy <- function(data, txdb, orgdb, range, gene, meanCutoff=c(totalC=0, methyC=0, ratio=0.05), ...){ if(missing(txdb) || missing(orgdb) || missing(data)){ stop("data, txdb and orgdb are required") } if(missing(range) && missing(gene)){ stop("range and gene both are missing.") } if(any(!c("totalC", "methyC", "ratio") %in% names(meanCutoff)) || !inherits(meanCutoff, c("numeric", "integer"))){ stop("meanCutoff must be a numeric vector with names totalC, methyC, ratio") } if(missing(range)){ org <- gsub(".db", "", deparse(substitute(orgdb))) eg <- mget(gene, get(paste0(org, "SYMBOL2EG")), ifnotfound = NA) if(is.na(eg[[1]][1])){ eg <- mget(gene, get(paste0(org, "ALIAS2EG")), ifnotfound = NA) } if(is.na(eg[[1]][1])){ stop("can not retrieve location info by ", gene) } eg <- eg[[1]][1] start <- mget(eg, get(paste0(org, "CHRLOC")))[[1]][1] end <- mget(eg, get(paste0(org, "CHRLOCEND")))[[1]][1] chr <- mget(eg, get(paste0(org, "CHR")))[[1]][1] strand <- ifelse(start>0, "+", "-") range <- GRanges(chr, IRanges(abs(start)-2000, abs(end)+1000), strand=strand) seqlevelsStyle(range) <- "UCSC" } stopifnot(inherits(data, c("GRanges", "RRBSeqDataSet"))) if(class(data)=="GRanges"){ mc <- mcols(data) total.C <- grepl("^totalC", colnames(mc)) methy.C <- grepl("^methyC", colnames(mc)) ratio <- grepl("ratio", colnames(mc)) if(sum(total.C)<1 || sum(methy.C)<1 || sum(ratio)<1){ stop("Input could be output of compareMethylation which will generate", "a GRanges object with metadata which colnames contains", "totalC, methyC and ratio.") } total.C <- as.data.frame(mc[, total.C, drop=FALSE]) methy.C <- as.data.frame(mc[, methy.C, drop=FALSE]) ratio <- as.data.frame(mc[, ratio, drop=FALSE]) sampleNames <- gsub("^methyC.", "", colnames(methy.C)) }else{ ## class(data)=="RRBSeqDataSet" raw <- data data <- as(raw, "GRanges") total.C <- totalC(raw) methy.C <- methyC(raw) ratio <- ratio(raw) sampleNames <- sampleNames(raw) } keep <- rowMeans(methy.C) > meanCutoff['methyC'] & rowMeans(total.C) > meanCutoff['totalC'] & rowMeans(ratio) > meanCutoff['ratio'] data <- reCenterPeaks(data[keep], width=1) total.C <- total.C[keep, , drop=FALSE] methy.C <- methy.C[keep, , drop=FALSE] ratio <- ratio[keep, , drop=FALSE] if(length(unique(sampleNames))!=ncol(methy.C)){ stop("Can not get sample names") } args <- as.list(match.call(expand.dots=FALSE))$`...` if(length(args$type)!=1){ if(length(sampleNames)==1){ type <- "circle" }else{ type <- "pie.stack" } }else{ type <- args$type } if(length(args$legend)==0){ legend <- NULL } data.gr <- rep(data, length(sampleNames)) color.set <- as.list(as.data.frame(rbind(rainbow(length(sampleNames)), "#FFFFFFFF"), stringsAsFactors=FALSE)) names(color.set) <- sampleNames if(type %in% c("pie", "pie.stack")){ type <- "pie.stack" unMethyC <- total.C - methy.C if(any(unMethyC < 0)){ stop("Some total counts are smaller than methylated counts") } colnames(unMethyC) <- colnames(methy.C) mc <- lapply(as.list(as.data.frame(rbind(methy.C, unMethyC))), function(.ele) matrix(.ele, ncol=2, byrow=FALSE)) mcols(data.gr) <- do.call(rbind, mc) colnames(mcols(data.gr)) <- c("methylated", "unmethylated") data.gr$stack.factor <- rep(sampleNames, each=length(data)) data.gr$color <- color.set[data.gr$stack.factor] legend <- list(labels=sampleNames, col="gray80", fill=sapply(color.set, `[`, 1)) }else{ data.gr <- split(data.gr, rep(sampleNames, each=length(data))) data.gr <- mapply(function(.ele, id){ mcols(.ele) <- methy.C[, id, drop=FALSE] .ele$color <- color.set[[id]][1] .ele }, data.gr, 1:length(data.gr)) } ## get transcripts from ranges method <- ifelse(length(args$method)>0, args$method, "BH") if(length(args$features)==0){ suppressMessages(trs <- geneModelFromTxdb(txdb, orgdb, gr=range)) features <- sapply(trs, function(.ele) .ele$dat) features <- unlist(GRangesList(features)) features$featureLayerID <- paste(features$symbol, features$transcript, sep=":") features$fill <- as.numeric(factor(features$transcript)) features$height <- ifelse(grepl("utr", features$feature), 0.02, 0.04) } args$SNP.gr <- data.gr args$features <- features args$ranges <- range args$type <- type args$legend <- legend if(length(args$ylab)==0 && class(data.gr)=="GRanges") args$ylab <- "methylation" args <- as.list(args) do.call(lolliplot, args = args) }
ecce800734040ab9f42831e9f9e55ea96dde411a
5b430612262f3de20da50ee86c29dc71d4477edb
/IDU_Incidence_Maps/app.R
8e6c329c84e8f15a5fb5c3e89bfc45a708b98398
[]
no_license
ErichDenk/Team_AMIE_670_Project
9d9ea7c788d5467a04992e65685c4a722287a636
1b7a8a79b8c803e2ffb1c95bb688aec01d3a5938
refs/heads/master
2020-04-20T17:12:48.138619
2019-04-30T00:56:42
2019-04-30T00:56:42
168,982,286
2
2
null
null
null
null
UTF-8
R
false
false
4,045
r
app.R
# # This is a Shiny web application. You can run the application by clicking # the 'Run App' button above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # require(urbnmapr) require(tidyverse) require(shiny) require(viridis) require(here) incidenceData <- read.csv(here("Active-data-sets/incidenceDat.csv")) %>% select(., state_name = State, Year = Year1, NewDiag = "New.Diagnoses.State.Cases") %>% mutate(state_name = as.character(state_name)) mapCreateData <- left_join(incidenceData, states, by = "state_name") theme_map <- function(...) { theme_minimal() + theme( text = element_text(family = "Arial", color = "#22211d"), axis.line = element_blank(), axis.text.x = element_blank(), axis.text.y = element_blank(), axis.ticks = element_blank(), axis.title.x = element_blank(), axis.title.y = element_blank(), legend.title = element_text(size=10), legend.text=element_text(size=8), legend.position = "bottom", # panel.grid.minor = element_line(color = "#ebebe5", size = 0.2), panel.grid.major = element_line(color = "#ebebe5", size = 0.2), panel.grid.minor = element_blank(), plot.background = element_rect(fill = "#f5f5f2", color = NA), panel.background = element_rect(fill = "#f5f5f2", color = NA), legend.background = element_rect(fill = "#f5f5f2", color = NA), panel.border = element_blank(), ... ) } # Define UI for application that draws a histogram ui <- fluidPage( # Application title titlePanel("HIV Incidence due to Injection Drug Use"), # Sidebar with a drop down list for which year to look at sidebarLayout( sidebarPanel( selectInput(inputId = "year", choices = unique(incidenceData$Year), label = "Select Year", selected = 2008) ), # Show a plot of the generated distribution mainPanel( plotOutput("map") ) ) ) # Define server logic required to draw a histogram server <- function(input, output) { output$map <- renderPlot({ mapCreateData %>% filter(., Year == input$year) %>% # first layer plots states ggplot(data = states, mapping = aes(long, lat, group=group)) + # center the map view on the US coord_map(projection = "albers", lat0 = 39, lat1 = 45) + # black boarder and grey fill for all states geom_polygon(color = "black", fill = NA) + # outline for counties, I commented it out because it looked too busy # geom_polygon(data = counties, fill = NA, color = "white") + # adding the IDU as the density fill per county geom_polygon(data = mapCreateData, aes(fill = log(NewDiag))) + #change gradient for scale bar -- I wanted darker color to be higher IDU density. scale_fill_viridis(option = "magma", direction = -1, name = "IDU Incidence", guide = guide_colorbar( direction = "horizontal", barheight = unit(2, units = "mm"), barwidth = unit(50, units = "mm"), draw.ulim = F, title.position = 'top', title.hjust = 0.5, label.hjust = 0.5)) + #scale_fill_gradientn( colors = c("white", "blue"), # guide = guide_colorbar(title.position = "top"))+ # re plot the black boarder lines geom_polygon(color = "black", fill = NA) }) } # Run the application shinyApp(ui = ui, server = server)
297db95253655b0ae42e632bd42b54717c3e7a26
573edac85effdda60291c96f568af4bcf36833a5
/man/compute_fixed_coef.Rd
8daacbf88cae558df14169b49067fd1c29ba83f5
[]
no_license
L-Ippel/SEMA
d71835566c17df707896bcd9ef32960c71b2c43a
1d0e3a48c855df704cad18c7ab6bb73d08bd4efa
refs/heads/master
2021-06-03T18:26:07.776301
2018-08-06T11:32:00
2018-08-06T11:32:00
38,366,963
3
0
null
null
null
null
UTF-8
R
false
true
824
rd
compute_fixed_coef.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Mstep.R \name{compute_fixed_coef} \alias{compute_fixed_coef} \title{compute_fixed_coef computes the coefficients of the fixed effects, see from raudenbush and bryk (2002) Hierarchial linear models, 2nd edition, EQUATION 14.10.} \usage{ compute_fixed_coef(x_inv, xy, t1) } \arguments{ \item{x_inv}{The inverted matrix of the fixed effects variables.} \item{xy}{The product of fixed effects variables and dependent variable.} \item{t1}{A vector with Complete Data Sufficient Statistics of the fixed effects.} } \value{ A vector with fixed effects coefficients. } \description{ compute_fixed_coef computes the coefficients of the fixed effects, see from raudenbush and bryk (2002) Hierarchial linear models, 2nd edition, EQUATION 14.10. }
c36e1d4889fdda0db66fcfbbe2a0b4d6425cba71
7bb21189354bf72b2e8aeeb9f0e4340e69ed2913
/man/plot.Planes.Rd
b57a6bc745e1a17eb4f26aa02f567fde81b733fc
[]
no_license
elvanceyhan/pcds
16371849188f98138933afd2e68a46167f674923
00331843a0670e7cd9a62b7bca70df06d4629212
refs/heads/master
2023-07-02T10:03:48.702073
2023-06-16T15:50:46
2023-06-16T15:50:46
218,353,699
0
0
null
null
null
null
UTF-8
R
false
true
1,621
rd
plot.Planes.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ClassFunctions.R \name{plot.Planes} \alias{plot.Planes} \title{Plot a \code{Planes} \code{object}} \usage{ \method{plot}{Planes}( x, x.grid.size = 10, y.grid.size = 10, xlab = "x", ylab = "y", zlab = "z", phi = 40, theta = 40, ... ) } \arguments{ \item{x}{Object of class \code{Planes}.} \item{x.grid.size, y.grid.size}{the size of the grids for the \eqn{x} and \eqn{y} axes, default is 10 for both} \item{xlab, ylab, zlab}{Titles for the \eqn{x}, \eqn{y}, and \eqn{z} axes, respectively (default is \code{xlab="x"}, \code{ylab="y"}, and \code{zlab="z"}).} \item{theta, phi}{The angles defining the viewing direction, default is 40 for both. \code{theta} gives the azimuthal direction and \code{phi} the colatitude. see \code{\link[graphics]{persp}}.} \item{\dots}{Additional parameters for \code{plot}.} } \value{ None } \description{ Plots the plane together with the defining 3D \code{points}. } \examples{ \dontrun{ P<-c(1,10,3); Q<-c(1,1,3); C<-c(3,9,12) pts<-rbind(P,Q,C) xr<-range(pts[,1]); yr<-range(pts[,2]) xf<-(xr[2]-xr[1])*.1 #how far to go at the lower and upper ends in the x-coordinate yf<-(yr[2]-yr[1])*.1 #how far to go at the lower and upper ends in the y-coordinate x<-seq(xr[1]-xf,xr[2]+xf,l=5) #try also l=10, 20 or 100 y<-seq(yr[1]-yf,yr[2]+yf,l=5) #try also l=10, 20 or 100 plPQC<-Plane(P,Q,C,x,y) plPQC plot(plPQC,theta = 225, phi = 30, expand = 0.7, facets = FALSE, scale = TRUE) } } \seealso{ \code{\link{print.Planes}}, \code{\link{summary.Planes}}, and \code{\link{print.summary.Planes}} }
aa88882fa026ac3669c58c0342b8e2c12e9a75d2
fd84077ffbb3f1662c0d4d1c06116a55893b6201
/tests/testthat/test-main.R
8848e2182de860ecf4a2137092be322f7d496ffb
[]
no_license
hamedbh/popstats
c9e46849e83934129b0d6b98bbb4ebb8a8da05d0
18da0cd52e7dfe710f245cca4545d430c837f3da
refs/heads/master
2020-08-12T00:47:12.069260
2019-10-19T07:34:11
2019-10-19T07:34:11
214,659,002
0
0
null
null
null
null
UTF-8
R
false
false
1,447
r
test-main.R
context("Results with non-numeric values") test_that("Character type throw errors", { expect_error(pop_var(letters)) expect_error(pop_sd(letters)) }) test_that("Factor type gives warning", { expect_warning(pop_var(factor(1:5))) expect_warning(pop_sd(factor(1:5))) }) obj_pop_var_factor <- suppressWarnings(pop_var(factor(1:5))) obj_pop_sd_factor <- suppressWarnings(pop_sd(factor(1:5))) test_that("Factor type returns NA", { expect_true(is.na(obj_pop_var_factor)) expect_true(is.na(obj_pop_sd_factor)) }) context("Confirming results on multiple data structures") obj_small_vector <- 1:10 obj_small_matrix <- matrix(obj_small_vector, ncol = 2) obj_small_df <- data.frame(x1 = obj_small_vector, x2 = obj_small_vector) test_that("Variance results", { expect_equal(pop_var(obj_small_vector), 8.25) expect_equal(pop_var(obj_small_matrix), 8.25) expect_equal(sapply(obj_small_df, pop_var), c(x1 = 8.25, x2 = 8.25)) }) obj_sd_one_to_ten <- sqrt(sum((1:10 - mean(1:10))^2)/length(1:10)) test_that("Standard deviation results", { expect_equal(pop_sd(obj_small_vector), obj_sd_one_to_ten) expect_equal(pop_sd(obj_small_matrix), obj_sd_one_to_ten) expect_equal(sapply(obj_small_df, pop_sd), c(x1 = obj_sd_one_to_ten, x2 = obj_sd_one_to_ten)) })
3fbfa16c51aa88b7ab28d930049c3f79c958c666
f30cc1c33978ca5a708a7e0a493403ea88550160
/R/hxsurf.R
a7f9cc045fb90c27b31bf819036434de4abf7d17
[]
no_license
natverse/nat
044384a04a17fd0c9d895e14979ce43e43a283ba
1d161fa463086a2d03e7db3d2a55cf4d653dcc1b
refs/heads/master
2023-08-30T21:34:36.623787
2023-08-25T07:23:44
2023-08-26T19:02:50
15,578,625
35
10
null
2023-01-28T19:03:03
2014-01-02T07:54:01
R
UTF-8
R
false
false
30,998
r
hxsurf.R
#' Read Amira surface (aka HxSurface or HyperSurface) files into hxsurf object #' #' @details Note that when \code{RegionChoice="both"} or #' \code{RegionChoice=c("Inner", "Outer")} both polygons in inner and outer #' regions will be added to named regions. To understand the significance of #' this, consider two adjacent regions, A and B, with a shared surface. For #' the polygons in both A and B, Amira will have a patch with (say) #' InnerRegion A and OuterRegion B. This avoids duplication in the file. #' However, it might be convenient to add these polygons to both regions when #' we read them into R, so that regions A and B in our R object are both #' closed surfaces. To achieve this when \code{RegionChoice="both"}, #' \code{read.hxsurf} adds these polygons to region B (as well as region A) #' but swaps the order of the vertices defining the polygon to ensure that the #' surface directionality is correct. #' #' As a rule of thumb, stick with \code{RegionChoice="both"}. If you get more #' regions than you wanted, then try switching to \code{RegionChoice="Inner"} #' or \code{RegionChoice="Outer"}. #' #' Note that the support for reading Amira's binary mesh format (HxSurface #' binary) is less mature and in particular only a few multi region mesh files #' have been tested. Finally there is no support to read meshes from the newer #' "Amira Binary Surface format" although such files can be read into a list #' using the \code{read.amiramesh} function. #' #' @param filename Character vector defining path to file #' @param RegionNames Character vector specifying which regions should be read #' from file. Default value of \code{NULL} => all regions. #' @param RegionChoice Whether the \emph{Inner} or \emph{Outer} material, or #' \emph{both} (default), should define the material of the patch. See #' details. #' @param FallbackRegionCol Colour to set regions when no colour is defined #' @param Verbose Print status messages during parsing when \code{TRUE} #' @return A list with S3 class hxsurf with elements \itemize{ #' #' \item{Vertices}{ A data.frame with columns \code{X, Y, Z, PointNo}} #' #' \item{Regions}{ A list with 3 column data.frames specifying triplets of #' vertices for each region (with reference to \code{PointNo} column in #' \code{Vertices} element)} #' #' \item{RegionList}{ Character vector of region names (should match names of #' \code{Regions} element)} #' #' \item{RegionColourList}{ Character vector specifying default colour to plot #' each region in R's \code{\link{rgb}} format} #' #' } #' @export #' @seealso \code{\link{plot3d.hxsurf}, \link{rgb}} #' @aliases hxsurf #' @family amira #' @family hxsurf #' @examples #' \dontrun{ #' read.hxsurf("my.surf", RegionChoice="both") #' } read.hxsurf<-function(filename,RegionNames=NULL,RegionChoice="both", FallbackRegionCol="grey",Verbose=FALSE){ # Check for header confirming file type firstLine=readLines(filename,n=1) if(!any(grep("#\\s+hypersurface\\s+[0-9.]+\\s+ascii",firstLine,ignore.case=T,perl=T))){ if(!any(grep("#\\s+hypersurface\\s+[0-9.]+\\s+binary",firstLine,ignore.case=T,perl=T))){ stop(filename," does not appear to be an Amira HyperSurface file!") } res = tryCatch( read.hxsurf.bin( filename = filename, FallbackRegionCol = FallbackRegionCol, Verbose = Verbose ), error = function(e) stop( "Support for reading binary Amira HyperSurface is still limited.\n", "See https://github.com/natverse/nat/issues/429. Detailed error message", as.character(e) ) ) return(res) } initialcaps<-function(x) {substr(x,1,1)=toupper(substr(x,1,1)); x} RegionChoice=match.arg(initialcaps(RegionChoice), c("Inner", "Outer", "Both"), several.ok = TRUE) if(RegionChoice[1]=="Both") RegionChoice=c("Inner", "Outer") t=readLines(filename) nLines=length(t) if(Verbose) cat(nLines,"lines of text to parse\n") # Find the start of the Vertices dataStart=grep("^\\s*Vertices\\s*",t)[1] if(Verbose) cat("Data start line =",dataStart,"\n") headerLines=t[seq(dataStart-1)] getfield=function(fName,textLines=headerLines,pos=2) unlist(strsplit(trimws(textLines[grep(fName,textLines)]),"\\s+",perl=TRUE))[pos] nVertices=as.numeric(getfield("Vertices",t[dataStart],2)) if(Verbose) cat("nVertices =",nVertices,"\n") d=list() d$Vertices=read.table(filename,skip=dataStart,nrows=nVertices,col.names=c("X","Y","Z"),colClasses=rep("numeric",3)) d$Regions <- list() d$Vertices$PointNo=seq(nrow(d$Vertices)) if(Verbose) cat("Finished processing Vertices\n") # Now read in Triangles that define patches: linesSkipped=dataStart+nVertices-1 remainingLines=t[(dataStart+nVertices):nLines] PatchDefLine=grep("^\\s*Patches\\s*",remainingLines,perl=TRUE) if(Verbose) cat("PatchDefLine =",PatchDefLine,"\n") nPatches=as.numeric(getfield("Patches",remainingLines[PatchDefLine],2)) if(Verbose) cat("nPatches =",nPatches,"\n") PatchStarts=grep("^\\s*{",remainingLines[PatchDefLine:length(remainingLines)],perl=TRUE)+PatchDefLine-1 if(length(PatchStarts)>nPatches) PatchStarts=PatchStarts[1:nPatches] PatchEnds=grep("^\\s*}",remainingLines[PatchDefLine:length(remainingLines)],perl=TRUE)+PatchDefLine-1 if(length(PatchEnds)>nPatches) PatchEnds=PatchEnds[1:nPatches] TriangleDeflines<-grep("Triangles",remainingLines) if(length(TriangleDeflines)!=nPatches) stop("Incorrect number of Triangle definition lines in",filename,"\n") for(i in 1:nPatches){ if(Verbose) cat("TriangleDefline =",TriangleDeflines[i],"\n") PatchHeader<-remainingLines[PatchStarts[i]:TriangleDeflines[i]] # remove any opening braces - these would cause a problem if on same line PatchHeader=sub("^\\s*\\{\\s*","",PatchHeader) # convert all whitespace to single spaces PatchHeader=gsub("\\s+"," ",PatchHeader) if(Verbose) cat("PatchHeader is",length(PatchHeader),"lines long\n") # note use of RegionChoice to switch naming between inner and outer for(RegChoice in RegionChoice) { RegionName=getfield(paste(RegChoice,"Region",sep=""),PatchHeader,2) nTriangles=as.numeric(getfield("Triangles",PatchHeader,2)) if(nTriangles<0 || nTriangles>100000) stop("Bad triangle number: ", nTriangles) if(Verbose) cat("nTriangles =",nTriangles,"for patch =",i,"\n") # Check if we want to load in this region if( is.null(RegionNames) || RegionName%in%RegionNames ){ # Ensure we do not try to add no triangles, or the exterior region if(nTriangles == 0 || RegionName == "Exterior") next thispatch=read.table(filename,skip=linesSkipped+TriangleDeflines[i],nrows=nTriangles, quote='',colClasses='integer',blank.lines.skip=FALSE, fill=FALSE,comment.char="", col.names=c("V1","V2","V3")) if(getfield(paste(RegChoice,"Region",sep=""),PatchHeader,1) == "OuterRegion") { thispatch <- thispatch[, c(1,3,2)] if(Verbose) message("Permuting vertices for ", RegionName, "...") colnames(thispatch) <- c("V1","V2","V3") } # scan no quicker in these circs, problem is repeated file access # specifying text directly also does not help dues to very slow textConnection # check if we have already loaded a patch in this name if(RegionName%in%names(d$Regions)){ # add to the old patch if(Verbose) cat("Adding to patch name",RegionName,"\n") d[['Regions']][[RegionName]]=rbind(d[['Regions']][[RegionName]],thispatch) } else { # new patch if(Verbose) cat("Making new patch name",RegionName,"\n") d[['Regions']][[RegionName]]=thispatch } } } } d$RegionList=names(d$Regions) # Handle colours for regions d$RegionColourList <- vector(length=length(d$RegionList)) closeBraces <- grep("}", headerLines) for(regionName in d$RegionList) { # Find section in headerLines corresponding to this region headerSecStart <- grep(paste0("^\\s*", regionName, "(\\s+ \\{){0,1}"), headerLines)[1] headerSecEnd <- closeBraces[closeBraces > headerSecStart][1] # Extract colour information colorLine <- grep("Color", headerLines[headerSecStart:headerSecEnd], value=T) if(length(colorLine) > 0) { rgbValues <- strsplit(regmatches(colorLine, gregexpr("[0-9]$|[0-9][^\\.]|[0-9]\\.[0-9]+", colorLine, perl=T))[[1]], " ") # clean up any trailing commas rgbValues <- gsub("[,}]","", rgbValues) color <- rgb(rgbValues[[1]], rgbValues[[2]], rgbValues[[3]]) } else { color <- FallbackRegionCol } d$RegionColourList[which(d$RegionList == regionName)] <- color } class(d) <- c('hxsurf',class(d)) return(d) } read.hxsurf.bin <- function(filename, return.raw=FALSE, FallbackRegionCol='grey', Verbose=FALSE) { con=file(filename, open='rb') on.exit(close(con)) vertex_regex='^Vertices \\d+$' # read header h <- character() line <- readLines(con, n=1) while(!isTRUE(grepl(vertex_regex, line))) { h=c(h, line) line <- readLines(con, n=1) } params=.ParseAmirameshParameters(h) materials=names(params$Parameters$Materials) # read data blocks data_regex='^\\s*(\\w+)\\s+(\\d+)$' parse_data_line <- function(line) { tryCatch({ res=stringr::str_match(line, data_regex) n=suppressWarnings(as.integer(res[,3])) checkmate::assert_int(n) label=checkmate::assert_character(res[,2]) names(n)=label n }, error=function(e) {NA_integer_}) } data <- list(header=h, params=params) curpatch=NA_integer_ while(TRUE) { if(length(line)<1) break if(is.finite(curpatch)) { if(length(data[['PatchInfo']])<curpatch) data[['PatchInfo']][[curpatch]]=line else data[['PatchInfo']][[curpatch]]=c(data[['PatchInfo']][[curpatch]], line) } else { data[['header']]=c(data[["header"]], line) } # is this a closing bracket at the end of a section firstchar=substr(trimws(line), 1, 1) if(isTRUE(firstchar=='}') && is.finite(curpatch)) curpatch=curpatch+1 n=parse_data_line(line) if(is.na(n) || n==0) { line <- readLines(con, 1) next } label=names(n) if(label=='Vertices') { chunk=readBin(con, what='numeric', n=n*3, size=4, endian = 'big') data[['Vertices']]=matrix(chunk, ncol=3, byrow = T) } else if (label=='Triangles') { npatches=length(data[['Patches']]) chunk=readBin(con, what='integer', n=n*3, size=4, endian = 'big') data[['Patches']][[npatches+1]]=matrix(chunk, ncol=3, byrow = T) } else if(label=='Patches') { curpatch=1 if(is.null(data[['Patches']])) data[['Patches']]=list() if(is.null(data[['PatchInfo']])) data[['PatchInfo']]=list() } else { stop("Error parsing binary hxsurf file!") } line <- readLines(con, 1) } if(return.raw) data else parse.hxsurf.bin(data, FallbackRegionCol=FallbackRegionCol, Verbose=Verbose) } # FIXME: Ideally this would be harmonised with the code for read.hxsurf to # avoid duplication parse.hxsurf.bin <- function(data, FallbackRegionCol, Verbose) { materials=data$params$Parameters$Materials d=list() d[['Vertices']]=as.data.frame(xyzmatrix(data$Vertices)) d[['Vertices']][,'PointNo']=seq_len(nrow(d[['Vertices']])) d$Regions=list() for(p in seq_along(data$Patches)) { thispatch=as.data.frame(data$Patches[[p]]) pi=.ParseAmirameshParameters(data$PatchInfo[[p]]) RegionName <- pi$InnerRegion if(RegionName%in%names(d$Regions)){ # add to the old patch if(Verbose) cat("Adding to patch name",RegionName,"\n") d[['Regions']][[RegionName]]=rbind(d[['Regions']][[RegionName]],thispatch) } else { # new patch if(Verbose) cat("Making new patch name",RegionName,"\n") d[['Regions']][[RegionName]]=thispatch } } d$RegionList=names(d$Regions) d$RegionColourList <- vector(length=length(d$RegionList)) for(regionName in d$RegionList) { rgbValues <- materials[[regionName]][['Color']] if(isTRUE(length(rgbValues)==3)) { color <- rgb(rgbValues[[1]], rgbValues[[2]], rgbValues[[3]]) } else { color <- FallbackRegionCol } d$RegionColourList[which(d$RegionList == regionName)] <- color } class(d) <- c('hxsurf',class(d)) return(d) } #' Write Amira surface (aka HxSurface or HyperSurface) into .surf file. #' #' @param surf hxsurf object to write to file. #' @param filename character vector defining path to file. #' @return \code{NULL} or integer status from \code{\link{close}}. #' @export #' @seealso \code{\link{plot3d.hxsurf}},\code{\link{read.hxsurf}}, \code{\link{rgb}} #' @family amira #' @family hxsurf write.hxsurf <- function(surf, filename) { fc <- file(filename, open="at") cat("# HyperSurface 0.1 ASCII\n\n", file=fc) cat("Parameters {\n", file=fc) cat(" Materials {\n", file=fc) cat(" Exterior {\n Id 1\n }\n", file=fc) regionData <- cbind(surf$RegionList, surf$RegionColourList) for (i in 1:nrow(regionData)) { cat(" ", regionData[i, 1], " {\n", sep="", file=fc) cat(" Id ", i+1, ",\n", sep="", file=fc) cat(" Color ", paste(zapsmall(col2rgb(regionData[i, 2])/255), collapse=" "), "\n", sep="", file=fc) cat(" }\n", file=fc) } cat(" }\n", file=fc) cat(" BoundaryIds {\n Name \"BoundaryConditions\"\n }\n", file=fc) cat("}\n\n", file=fc) cat("Vertices ", nrow(surf$Vertices), "\n", sep="", file=fc) apply(surf$Vertices[, 1:3], 1, function(x) cat(" ", sprintf(x[1], fmt="%.6f"), " ", sprintf(x[2], fmt="%.6f"), " ", sprintf(x[3], fmt="%.6f"), "\n", sep="", file=fc)) cat("NBranchingPoints 0\nNVerticesOnCurves 0\nBoundaryCurves 0\n", file=fc) cat("Patches ", length(surf$Regions), "\n", sep="", file=fc) for(i in 1:length(surf$Regions)) { region <- surf$Regions[[i]] cat("{\n", file=fc) cat("InnerRegion ", names(surf$Regions[i]), "\n", sep="", file=fc) cat("OuterRegion Exterior\n", file=fc) cat("BoundaryId 0\n", file=fc) cat("BranchingPoints 0\n\n", file=fc) cat("Triangles ", nrow(region), "\n", sep="", file=fc) apply(region, 1, function(x) cat(" ", paste(x, collapse=" "), "\n", sep="", file=fc)) cat("}\n", file=fc) } close(fc) } #' Plot amira surface objects in 3D using rgl #' #' @param x An hxsurf surface object #' @param materials Character vector or \code{\link[base]{regex}} naming #' materials to plot (defaults to all materials in x). See #' \code{\link{subset.hxsurf}}. #' @param col Character vector specifying colors for the materials, or a #' function that will be called with the number of materials to plot. When #' \code{NULL} (default) will use material colours defined in Amira (if #' available), or \code{rainbow} otherwise. #' @param ... Additional arguments passed to \code{triangles3d} #' @inheritParams plot3d.neuronlist #' @export #' @seealso \code{\link{read.hxsurf}} #' @family hxsurf #' @examples #' plot3d(kcs20) #' plot3d(MBL.surf) #' #' \donttest{ #' # plot only vertical lobe #' nclear3d() #' plot3d(MBL.surf, materials="VL", alpha=0.3) #' #' # everything except vertical lobe #' nclear3d() #' plot3d(MBL.surf, alpha=0.3, #' materials=grep("VL", MBL.surf$RegionList, value = TRUE, invert = TRUE)) #' } plot3d.hxsurf<-function(x, materials=NULL, col=NULL, gridlines = FALSE, ..., plotengine = getOption('nat.plotengine')){ plotengine <- check_plotengine(plotengine) if (plotengine == 'rgl'){ # skip so that the scene is updated only once per hxsurf object skip <- par3d(skipRedraw = TRUE) on.exit(par3d(skip)) } if (plotengine == 'plotly') { psh <- openplotlyscene()$plotlyscenehandle params=list(...) opacity <- if("alpha" %in% names(params)) params$alpha else 1 } materials=subset(x, subset = materials, rval='names') if(is.null(col)) { if(length(x$RegionColourList)){ col=x$RegionColourList[match(materials,x$RegionList)] } else col=rainbow } if(is.function(col)) col=col(length(materials)) if(is.factor(col)) col=rainbow(nlevels(col))[as.integer(col)] if(length(col)==1 && length(materials)>1) col=rep(col,length(materials)) names(col)=materials rlist=list() for(mat in materials) { # get order triangle vertices tri=as.integer(t(x$Regions[[mat]])) if (plotengine == 'rgl'){ rlist[[mat]]=triangles3d(x[['Vertices']]$X[tri],x[['Vertices']]$Y[tri], x[['Vertices']]$Z[tri],col=col[mat], ...) } else { tmpx <- as.mesh3d.hxsurf(x, Regions = mat) psh <- psh %>% plotly::add_trace(x = tmpx$vb[1,], y = tmpx$vb[2,], z = tmpx$vb[3,], i = tmpx$it[1,]-1, j = tmpx$it[2,]-1, k = tmpx$it[3,]-1, type = "mesh3d", opacity = opacity, hovertext=mat, hoverinfo="x+y+z+text", facecolor = rep(col[mat], length(tmpx$it[1,]))) } } if (plotengine == 'rgl'){ invisible(rlist) } else { psh <- psh %>% plotly::layout(showlegend = FALSE, scene=list(camera=.plotly3d$camera)) if(gridlines == FALSE){ psh <- psh %>% plotly::layout(scene = list(xaxis=.plotly3d$xaxis, yaxis=.plotly3d$yaxis, zaxis=.plotly3d$zaxis)) } assign("plotlyscenehandle", psh, envir=.plotly3d) psh } } #' Convert an object to an rgl mesh3d #' #' Note that this provides a link to the Rvcg package #' @param x Object to convert to mesh3d #' @param ... Additional arguments for methods #' @param Regions Character vector or regions to select from \code{hxsurf} #' object #' @param material rgl materials such as \code{color} #' @param drop Whether to drop unused vertices (default TRUE) #' @export #' @rdname as.mesh3d #' @seealso \code{\link[rgl]{as.mesh3d}}, \code{\link[rgl]{tmesh3d}}, #' \code{\link{as.hxsurf}}, \code{\link{read.hxsurf}} #' @family hxsurf as.mesh3d.hxsurf<-function(x, Regions=NULL, material=NULL, drop=TRUE, ...){ if(is.null(Regions)) { Regions=x$RegionList } x=subset(x, Regions, drop=drop) if(length(Regions)==1 && is.null(material)){ # find colour material=list(color=x$RegionColourList[match(Regions,x$RegionList)]) } verts=t(data.matrix(x$Vertices[,1:3])) inds=t(data.matrix(do.call(rbind, x$Regions))) tmesh3d(vertices=verts, indices=inds, homogeneous = FALSE, material = material, ...) } #' @description \code{as.mesh3d.boundingbox} converts a nat #' \code{\link{boundingbox}} object into an rgl compatible \code{mesh3d} #' object. #' @rdname as.mesh3d #' @export #' @examples #' bb=boundingbox(kcs20) #' mbb=as.mesh3d(bb) #' \donttest{ #' plot3d(kcs20) #' # simple plot #' plot3d(bb) #' shade3d(mbb, col='red', alpha=0.3) #' #' } as.mesh3d.boundingbox <- function(x, ...) { centroid=colMeans(x) size=diff(x)/2 mat=scaleMatrix(size[1], size[2], size[3])%*%translationMatrix(centroid[1], centroid[2], centroid[3]) cube3d(mat) } #' Convert an object to a nat hxsurf object #' #' @details \code{hxsurf} objects are based on the format of Amira's surface #' objects (see \code{\link{read.hxsurf}}). They have the ability to include #' multiple distinct regions. However, at the moment the only method that we #' provide converts \code{mesh3d} objects, which can only include one region. #' @param x A surface object #' @param ... Additional arguments passed to methods #' #' @return A new surface object of class \code{hxsurf} (see #' \code{\link{read.hxsurf}}) for details. #' @export #' @family hxsurf #' @seealso \code{\link{as.mesh3d}} #' @examples #' tet=tetrahedron3d(col='red') #' teth=as.hxsurf(tet) #' \donttest{ #' plot3d(teth) #' } as.hxsurf <- function(x, ...) UseMethod('as.hxsurf') #' @param region The default name for the surface region #' @param col The surface colour (default value of NULL implies the colour #' specified in mesh3d object or \code{grey} when the \code{mesh3d} object has #' no colour.) #' @export #' @rdname as.hxsurf as.hxsurf.mesh3d <- function(x, region="Interior", col=NULL, ...) { if (is.null(x$it)) stop("This method only works for triangular mesh3d objects!") h=list() h$Vertices=data.frame(xyzmatrix(x)) colnames(h$Vertices)=c("X","Y","Z") h$Vertices$PointNo=1:nrow(h$Vertices) h$Regions[[region]]=data.frame(t(x$it)) colnames(h$Regions[[region]])=c("V1","V2","V3") h$RegionList=names(h$Regions) if(is.null(col)) col=x$material$col h$RegionColourList <- if(!is.null(col)) col else 'grey' class(h)=c("hxsurf","list") h } #' Subset hxsurf object to specified regions #' #' @param x A dotprops object #' @param subset Character vector specifying regions to keep. Interpreted as #' \code{\link[base]{regex}} if of length 1 and no fixed match. #' @param drop Whether to drop unused vertices after subsetting (default: #' \code{TRUE}) #' @param rval Whether to return a new \code{hxsurf} object or just the names of #' the matching regions #' @param ... Additional parameters (currently ignored) #' @return subsetted hxsurf object #' @export #' @family hxsurf #' @examples #' # plot only vertical lobe #' vertical_lobe=subset(MBL.surf, "VL") #' \donttest{ #' plot3d(vertical_lobe, alpha=0.3) #' plot3d(kcs20) #' #' # there is also a shortcut for this #' nclear3d() #' plot3d(MBL.surf, subset = "VL", alpha=0.3) #' } subset.hxsurf<-function(x, subset=NULL, drop=TRUE, rval=c("hxsurf","names"), ...){ rval=match.arg(rval) if(!is.null(subset)){ tokeep=integer(0) if(is.character(subset)){ tokeep=match(subset,x$RegionList) if(is.na(tokeep[1]) && length(subset)==1){ # try as regex tokeep=grep(subset,x$RegionList) } } if(!length(tokeep) || any(is.na(tokeep))) stop("Invalid subset! See ?subset.hxsurf") if(rval=='names') return(x$RegionList[tokeep]) x$Regions=x$Regions[tokeep] x$RegionList=x$RegionList[tokeep] x$RegionColourList=x$RegionColourList[tokeep] } else if(rval=='names') return(x$RegionList) if(drop){ # see if we need to drop any vertices vertstokeep=sort(unique(unlist(x$Regions))) # a vector where each position is the old vertex id and the value is the # new one i.e. newid=vert_table[oldid] vert_table=match(seq_len(nrow(x$Vertices)), vertstokeep) # convert all vertex ids from old to new sequence for(r in x$RegionList){ for(i in seq_len(ncol(x$Regions[[r]]))){ x$Regions[[r]][[i]]=vert_table[x$Regions[[r]][[i]]] } } # drop unused vertices x$Vertices=x$Vertices[vertstokeep, ] } x } #' Subset methods for different nat objects #' #' These methods enable subsets of some nat objects including neurons and #' neuronlists to be obtained. See the help for each individual method for #' details. #' #' @name subset #' @seealso \code{\link{subset.neuron}}, \code{\link{subset.dotprops}}, #' \code{\link{subset.hxsurf}}, \code{\link{subset.neuronlist}} NULL #' Find which points of an object are inside a surface #' #' @details Note that \code{hxsurf} surface objects will be converted to #' \code{mesh3d} before being passed to \code{Rvcg::vcgClostKD}, so if you are #' testing repeatedly against the same surface, it may make sense to #' pre-convert. #' #' \code{pointsinside} depends on the face normals for each face pointing out #' of the object (see example). The face normals are defined by the order of #' the three vertices making up a triangular face. You can flip the face #' normal for a face by permuting the vertices (i.e. 1,2,3 -> 1,3,2). If you #' find for a given surface that points are outside when you expect them to be #' inside then the face normals are probably all the wrong way round. You can #' invert them yourself or use the \code{Morpho::invertFaces} function to fix #' this. #' #' The \code{rval} argument determines the return value. These options should #' be fairly clear, but the difference between \code{logical} and #' \code{consistent_logical} needs some explanation. The \code{logical} method #' now does a pre-test to remove any points that are not in the 3D bounding #' box (cuboid) enclosing the surf object. This often results in a significant #' speed-up by rejecting distant points and has the additional benefit of #' rejecting distant points that sometimes are erroneously declared inside the #' mesh (see below). Regrettably it is not yet possible to extend this #' approach when distances are being returned, which means there will be a #' discrepancy between the results of \code{rval="logical"} and looking for #' points with distance >=0. If you want to ensure consistency between these #' approaches, use \code{rval="consistent_logical"}. #' #' If you find that some points but not all points are not behaving as you #' would expect, then it may be that some faces are not coherently oriented. #' The \code{Rvcg::\link[Rvcg]{vcgClean}} function can sometimes be used to #' correct the orientation of the faces. Fixing more problematic cases may be #' possible by generating a new surface using #' \code{alphashape3d::\link[alphashape3d]{ashape3d}} (see examples). #' #' @param x an object with 3D points. #' @param surf The reference surface - either a \code{mesh3d} object or any #' object that can be converted using \code{as.mesh3d} including \code{hxsurf} #' and \code{ashape3d} objects. #' @param ... additional arguments for methods, eventually passed to #' \code{\link{as.mesh3d}}. #' @export #' @examples #' # check if the vertices in these neurons are inside the mushroom body calyx #' # surface object #' inout=pointsinside(kcs20, surf=subset(MBL.surf, "MB_CA_L")) #' table(inout) #' # you can also check if points are inside a bounding box #' mbcalbb=boundingbox(subset(MBL.surf, "MB_CA_L")) #' inout2=pointsinside(kcs20, mbcalbb) #' # compare those two #' table(inout, inout2) #' pts=xyzmatrix(kcs20) #' # nb that colour expressions maps combinations of two logicals onto 1:4 #' plot(pts[,1:2], col=1+inout+inout2*2) #' # the colours are defined by #' palette()[1:4] #' #' # be a bit more lenient and include points less than 5 microns from surface #' MBCAL=subset(MBL.surf, "MB_CA_L") #' inout5=pointsinside(kcs20, surf=MBCAL, rval='distance') > -5 #' table(inout5) #' \donttest{ #' # show which points are in or out #' # Hmm seems like there are a few red points in the vertical lobe #' # that are well outside the calyx #' points3d(xyzmatrix(kcs20), col=ifelse(inout5, 'red', 'black')) #' plot3d(MBL.surf, alpha=.3) #' #' # Let's try to make an alphashape for the mesh to clean it up #' library(alphashape3d) #' MBCAL.as=ashape3d(xyzmatrix(MBCAL), alpha = 10) #' # Plotting the points, we can see that is much better behaved #' points3d(xyzmatrix(kcs20), #' col=ifelse(pointsinside(kcs20, MBCAL.as), 'red', 'black')) #' } #' #' \dontrun{ #' # Show the face normals for a surface #' if(require('Morpho')) { #' # convert to a mesh3d object used by rgl and Morpho packge #' MBCAL.mesh=as.mesh3d(subset(MBL.surf, "MB_CA_L")) #' fn=facenormals(MBCAL.mesh) #' wire3d(MBCAL.mesh) #' # show that the normals point out of the object #' plotNormals(fn, long=5, col='red') #' #' # invert the faces of the mesh and show that normals point in #' MBCAL.inv=invertFaces(MBCAL.mesh) #' plotNormals(facenormals(MBCAL.inv), long=5, col='cyan') #' } #' } pointsinside<-function(x, surf, ...) UseMethod('pointsinside') #' @export #' @param rval what to return. #' @return A vector of logical values or distances (positive inside, negative #' outside) equal to the number of points in x or the \code{mesh3d} object #' returned by \code{Rvcg::vcgClostKD}. #' @rdname pointsinside pointsinside.default<-function(x, surf, ..., rval=c('logical','distance', 'mesh3d', 'consistent_logical')) { rval=match.arg(rval) if(rval=='logical') { # use optimised contains_points approach return(contains_points(surf, x, ...)) } if(inherits(surf, "boundingbox")) { stop("Only logical return values are currently possible ", "with boundingbox objects!") } if(!requireNamespace('Rvcg', quietly = TRUE)) stop("Please install suggested library Rvcg to use pointsinside") if(!inherits(surf,'mesh3d')) { surf=as.mesh3d(surf, ...) } pts=xyzmatrix(x) rmesh=Rvcg::vcgClostKD(pts, surf, sign = TRUE) switch(rval, consistent_logical = rmesh$quality >= 0, distance = rmesh$quality, mesh3d = rmesh ) } contains_points <- function(obj, points, ...) UseMethod("contains_points") contains_points.boundingbox <- function(obj, points, ...) { xyz=xyzmatrix(points) xyz[,1] >= obj[1,1] & xyz[,2] >= obj[1,2] & xyz[,3] >= obj[1,3] & xyz[,1] <= obj[2,1] & xyz[,2] <= obj[2,2] & xyz[,3] <= obj[2,3] } contains_points.mesh3d <- function(obj, points, ...) { xyz=xyzmatrix(points) inbb=contains_points(boundingbox(obj), xyz, ...) # nb must call the original logical method to avoid infinite recursion iosurf=pointsinside(xyz[inbb,,drop=F], surf = obj, ..., rval='consistent_logical') res=inbb res[inbb]=iosurf res } contains_points.hxsurf<-contains_points.mesh3d contains_points.ashape3d<-function(obj, points, ...) { alphashape3d::inashape3d(obj, points=xyzmatrix(points), ...) } # Concatenate two HyperSurface objects # # @param x hxsurf # @param y another hxsurf # @return new hxsurf # @examples # h1 = as.hxsurf(icosahedron3d(), 'a') # h2 = as.hxsurf(tetrahedron3d()+1, 'b') # h3=c(h1, h2) concat_hxsurfs <- function(x, y) { nx <- x nx$Vertices <- rbind(x$Vertices, y$Vertices) nx$Vertices[,4] <- 1:length(nx$Vertices[,4]) for (reg in y$RegionList) { nx$Regions[[reg]] <- nrow(x$Vertices) + y$Regions[[reg]] } nx$RegionList <- c(x$RegionList, y$RegionList) nx$RegionColourList <- c(x$RegionColourList, y$RegionColourList) nx } #' Concatenate HyperSurface objects #' #' @param ... multiple hxsurf objects #' @return new hxsurf #' @export #' @rdname hxsurf-ops #' @examples #' h1 = as.hxsurf(icosahedron3d(), 'a') #' h2 = as.hxsurf(tetrahedron3d()+1, 'b') #' h3 = as.hxsurf(icosahedron3d()+3, 'c') #' hc = c(h1, h2, h3) `c.hxsurf` <- function(...) { items <- list(...) if (length(items) == 1) { return(items[[1]]) } else { hxs <- items[[1]] for (i in 2:length(items)) { hxs <- concat_hxsurfs(hxs, items[[i]]) } hxs } }
3fb514477eb39cc62fe0edacda199229c82097fd
efeba9f5aff2e7afbf96a57e0baf62a8fb1a3b94
/Part2/Stage3-Structured data/ex.3-4/practice1.R
6462171e7e8b8e1ec01b715f0310cf45c653e86f
[]
no_license
psm9619/R_Data_Analysis
b1db04295607b5b0811eb2151ce5378a812b2aa3
b6b8186a582174533ab41a68aeab77bdcf0ea854
refs/heads/master
2020-05-29T13:27:26.350660
2019-10-10T01:07:53
2019-10-10T01:07:53
189,161,472
0
0
null
null
null
null
UTF-8
R
false
false
966
r
practice1.R
library(reshape) library(ggplot2) library(dplyr) data1 <- read.csv("연도별요양기관별보험청구건수_2001_2013.csv") data2 <- read.csv("연도별요양기관별보험청구건수_2001_2013_세로.csv") data2 row.names(data2) <- data2[,1] data2 data2[1] <- NULL func <- function(x) { x/100000 } data3 <- apply(data2, 2, func) plot (data3[,1], xlab="", ylab="", ylim=c(0,6000), axes=F, col="violet", type="o", lwd=2, main=paste("연도별 요양 기관별 보험청구 건수 (단위: 십만건)","\n", "출처:건강보험심사평가원")) axis(1, at = 1:10, label=row.names(data3), las=2) axis(2, las=1) col=c("red","orange","yellow","green", "blue", "purple", "brown", "grey","lightblue") for (i in 1:10){ lines(data3[,i], col=col[i], type="o", lwd=2) } abline(h=seq(0,6000,500), v=seq(1,100,1), lty=3,lwd=0.2) legend(1,6000, col, cex=0.8, col=col, lty=1, lwd=2, bg='white')
eeb17a82aa5df82b78992448ae73539c028732b9
1a6f7dc7e39fd02f390ad6f058874367a1119742
/R/occurrencesLessThan.R
b42a91d3a053a45b53954de76e827d3f496af64a
[]
no_license
cran/inverseRegex
f881d95cec7bdcd073a3779f876dec0dc78657d6
69f0cd3ec678a6babd4878ffc3e73ceb0f2e37b6
refs/heads/master
2022-11-09T02:02:21.757705
2022-10-23T15:25:07
2022-10-23T15:25:07
210,402,220
0
0
null
null
null
null
UTF-8
R
false
false
5,210
r
occurrencesLessThan.R
##' Identifies Infrequent inverseRegex Patterns in an R Object. ##' ##' Calls \code{inverseRegex} on the input object and identifies values that ##' occur infrequently. ##' ##' @param x Object to analyse for infrequent regex patterns. ##' @param fraction Fraction of the R object size; regex patterns that occur less ##' (or equal) often than this will be identified. For a vector this fraction will ##' be multiplied by the length of the object; for a matrix it will be multiplied by ##' the total number of entries; and for a data frame or tibble it will be multiplied ##' by the number of rows. Defaults to \code{0.05}. ##' @param n Alternative to the \code{fraction} argument which allows a literal ##' number of occurrences to be searched for. Defaults to NULL, in which case ##' \code{fraction} will be used. ##' @param ... Other arguments to be passed to \code{inverseRegex}. ##' ##' @return A collection of logical values with \code{TRUE} indicating entries with ##' an infrequent regex pattern. The class of the return value will depend on the ##' input object; matrices, data frames, and tibbles will be returned in kind; all ##' others are returned as vectors. ##' ##' @details This function is essentially a wrapper around calling \code{table()} on ##' the return value of \code{inverseRegex}. It can be used to identify the indices ##' of values that consist of a regex pattern different to others in the R object. ##' ##' @note NA values are not considered and will need to be identified separately. ##' ##' @examples ##' occurrencesLessThan(c(LETTERS, 1)) ##' ##' x <- iris ##' x$Species <- as.character(x$Species) ##' x[27, 'Species'] <- 'set0sa' ##' apply(occurrencesLessThan(x), 2, which) ##' ##' @seealso inverseRegex, regex ##' ##' @author Jasper Watson ##' ##' @export ##' ## occurrencesLessThan <- function(x, fraction = 0.05, n = NULL, ... ){ UseMethod('occurrencesLessThan') } ##' @export occurrencesLessThan.default <- function(...) stop('Input class not supported') vectorOption <- function(x, fraction = 0.05, n = NULL, ...){ out <- rep(FALSE, length(x)) if (is.null(n)) n <- fraction * length(x) y <- inverseRegex(x, ...) tab <- table(y) tmp <- y %in% names(tab)[tab <= n] if (length(tmp) > 0) out <- tmp out } ##' @export occurrencesLessThan.character <- function(x, fraction = 0.05, n = NULL, ...) vectorOption(x, fraction, n, ...) ##' @export occurrencesLessThan.logical <- function(x, fraction = 0.05, n = NULL, ...) vectorOption(x, fraction, n, ...) ##' @export occurrencesLessThan.integer <- function(x, fraction = 0.05, n = NULL, ...) vectorOption(x, fraction, n, ...) ##' @export occurrencesLessThan.numeric <- function(x, fraction = 0.05, n = NULL, ...) vectorOption(x, fraction, n, ...) ##' @export occurrencesLessThan.Date <- function(x, fraction = 0.05, n = NULL, ...) vectorOption(x, fraction, n, ...) ##' @export occurrencesLessThan.POSIXct <- function(x, fraction = 0.05, n = NULL, ...) vectorOption(x, fraction, n, ...) ##' @export occurrencesLessThan.factor <- function(x, fraction = 0.05, n = NULL, ...) vectorOption(x, fraction, n, ...) ##' @export occurrencesLessThan.matrix <- function(x, fraction = 0.05, n = NULL, ...){ out <- matrix(FALSE, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x)) if (is.null(n)) n <- nrow(x) * ncol(x) * fraction y <- inverseRegex(as.vector(x), ...) tab <- table(y) tmp <- y %in% names(tab)[tab <= n] if (length(tmp) > 0) out[] <- tmp out } ##' @export occurrencesLessThan.data.frame <- function(x, fraction = 0.05, n = NULL, ...){ if (is.null(n)) n <- nrow(x) * fraction out <- as.data.frame(matrix(FALSE, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x)), stringsAsFactors = FALSE) for (jj in 1:ncol(x) ){ y <- as.vector(unlist(inverseRegex(x[, jj, drop = FALSE], ...))) tab <- table(y) tmp <- y %in% names(tab)[tab <= n] if (length(tmp) > 0) out[, jj] <- tmp } out } ##' @export occurrencesLessThan.tbl_df <- function(x, fraction = 0.05, n = NULL, ...){ if (!requireNamespace('tibble', quietly = TRUE)) stop('Package tibble not available. Install or provide input as a data.frame.') if (is.null(n)) n <- nrow(x) * fraction out <- tibble::as_tibble(matrix(FALSE, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x)), stringsAsFactors = FALSE) for (jj in 1:ncol(x) ){ y <- as.vector(unlist(inverseRegex(x[, jj, drop = FALSE], ...))) tab <- table(y) tmp <- y %in% names(tab)[tab <= n] if (length(tmp) > 0) out[, jj] <- tmp } out }
1e8b1d2d9d3a37fdeb392411f0762d0adf265806
908b54c4546885f4a165606117b3f1e06d2bc1dc
/tests/testthat/test_z_transform.R
c3daa8c238d7c77dc0b3f2b0cc8006f0a822bd02
[ "MIT" ]
permissive
jeremymcrae/cifer
406de54640189e4d0135bceecedf06ed7a59915e
328114b73c76183d98e7a222bbcb281b66ad9de3
refs/heads/master
2021-01-17T07:18:46.317264
2016-07-08T12:52:45
2016-07-08T12:52:45
23,546,083
0
0
null
null
null
null
UTF-8
R
false
false
1,516
r
test_z_transform.R
library(cifer) library(testthat) context("Z-transform data") test_that("get_l2r_z_scores output is correct", { # construct a dataframe for two samples, one of which will have a Z # transformed mean close to -0.707106, the other close to 0.707106, and # trio members with Z transformed values close to 2.12. pop = data.frame(a=c(1,2,3,4,5), b=c(3,4,5,6,7)) z_scores = get_l2r_z_scores(mom_data=c(5,6,7,8,9), dad_data=c(5,6,7,8,9), child_data=c(5,6,7,8,9), pop) # check that the Z transformed values are close to their expected values. # Note that I don't include the full precision, since the values have 22 # significant figures, so it's easier to check if they are close to the # predicted values. expect_true(z_scores$population[1] - -0.707 < 0.01) expect_true(z_scores$population[2] - 0.707 < 0.01) expect_true(z_scores$dad - 2.12 < 0.01) expect_true(z_scores$mom - 2.12 < 0.01) expect_true(z_scores$child - 2.12 < 0.01) # and check that the function copes with parents lacking probe data z_scores = get_l2r_z_scores(mom_data=NULL, dad_data=c(5,6,7,8,9), child_data=c(5,6,7,8,9), pop) expect_identical(z_scores$mom, NA) # and check that the function copes with parents lacking probe data z_scores = get_l2r_z_scores(mom_data=NULL, dad_data=c(5,6,7,8,9), child_data=c(5,6,7,8,9), population=c(1,2,3,4,5)) a = 0.632455532033676 expect_equal(z_scores$population, c(-2*a, -a, 0, a, 2*a)) })