content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/trait_function.R \name{get.fb.param} \alias{get.fb.param} \title{Function to obtain parameter from fieldbook} \usage{ get.fb.param(fp, sheet, param) } \arguments{ \item{fp}{fieldbook path} \item{sheet}{fieldbook's sheet} \item{param}{Parameters} } \description{ This function gets parameters or values from fieldbook excel file. Do an excel scrapping. }
/man/get.fb.param.Rd
permissive
CIPTOOLS/fbcheck
R
false
true
435
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/trait_function.R \name{get.fb.param} \alias{get.fb.param} \title{Function to obtain parameter from fieldbook} \usage{ get.fb.param(fp, sheet, param) } \arguments{ \item{fp}{fieldbook path} \item{sheet}{fieldbook's sheet} \item{param}{Parameters} } \description{ This function gets parameters or values from fieldbook excel file. Do an excel scrapping. }
# 02_matching_reordering source('script/Intro-to-R/Source_Intro_to_R.R') rpkm_data <- read.csv(file = file.path(dataDir, "counts.rpkm")) metadata <- read.csv(file = file.path(dataDir, "mouse_exp_design.csv")) head(rpkm_data) ncol(rpkm_data) nrow(metadata) ### 01 The `%in%` operator A <- c(1, 3, 5, 7, 9, 11) # odd numbers B <- c(2, 4, 6, 8, 10, 12) # even numbers A %in% B # test to see if each of the elements of A is in B A <- c(1, 3, 5, 7, 9, 11) # odd numbers B <- c(2, 4, 6, 8, 1, 5) # add some odd numbers in A %in% B intersection <- A %in% B intersection A[intersection] any(A %in% B) all(A %in% B) A <- c(10, 20, 30, 40, 50) B <- c(50, 40, 30, 20, 10) # same numbers but backwards A %in% B A == B # test to see if each element of A is in the same position in B all(A == B) # use all() to check if they are a perfect match x <- rownames(metadata) y <- colnames(rpkm_data) all(x %in% y) all(rownames(metadata) %in% colnames(rpkm_data)) all(rownames(metadata) == colnames(rpkm_data)) ### 02 Reordering data using indices teaching_team <- c("Jihe", "Mary", "Meeta", "Radhika") teaching_team[c(2, 4)] # Extracting values from a vector teaching_team teaching_team[c(4, 2)] # Extracting values and reordering them reorder_teach <- teaching_team[c(4, 2, 1, 3)] # Saving the results to a variable reorder_teach ### 03 The `match` function first <- c("A", "B", "C", "D", "E") second <- c("B", "D", "E", "A", "C") # same letters but different order match(first, second) # PS: Each number that is returned represents the index of the `second` vector where the matching value was observed reorder_idx <- match(first, second) # Saving indices for how to reorder `second` to match `first` second[reorder_idx] # Reordering the second vector to match the order of the `first` vector second_reordered <- second[reorder_idx] # Reordering and saving the output to a variable first <- c("A", "B", "C", "D", "E") second <- c("D", "B", "A") # remove values match(first, second) # Return NA # You can specify what values you would have it assigned using nomatch argument. match(first, second, nomatch = 0) # If there is more than one matching value found only the first is reported second2 <- c("D", "B", "A", "D") match(first, second2) ### 04 Reordering genomic data using `match()` function rownames(metadata) colnames(rpkm_data) genomic_idx <- match(rownames(metadata), colnames(rpkm_data)) genomic_idx rpkm_ordered <- rpkm_data[, genomic_idx] head(rpkm_ordered) all(rownames(metadata) == colnames(rpkm_ordered))
/script/Intro-to-R/02_matching_reordering.R
no_license
sudog0624/learning-R
R
false
false
2,551
r
# 02_matching_reordering source('script/Intro-to-R/Source_Intro_to_R.R') rpkm_data <- read.csv(file = file.path(dataDir, "counts.rpkm")) metadata <- read.csv(file = file.path(dataDir, "mouse_exp_design.csv")) head(rpkm_data) ncol(rpkm_data) nrow(metadata) ### 01 The `%in%` operator A <- c(1, 3, 5, 7, 9, 11) # odd numbers B <- c(2, 4, 6, 8, 10, 12) # even numbers A %in% B # test to see if each of the elements of A is in B A <- c(1, 3, 5, 7, 9, 11) # odd numbers B <- c(2, 4, 6, 8, 1, 5) # add some odd numbers in A %in% B intersection <- A %in% B intersection A[intersection] any(A %in% B) all(A %in% B) A <- c(10, 20, 30, 40, 50) B <- c(50, 40, 30, 20, 10) # same numbers but backwards A %in% B A == B # test to see if each element of A is in the same position in B all(A == B) # use all() to check if they are a perfect match x <- rownames(metadata) y <- colnames(rpkm_data) all(x %in% y) all(rownames(metadata) %in% colnames(rpkm_data)) all(rownames(metadata) == colnames(rpkm_data)) ### 02 Reordering data using indices teaching_team <- c("Jihe", "Mary", "Meeta", "Radhika") teaching_team[c(2, 4)] # Extracting values from a vector teaching_team teaching_team[c(4, 2)] # Extracting values and reordering them reorder_teach <- teaching_team[c(4, 2, 1, 3)] # Saving the results to a variable reorder_teach ### 03 The `match` function first <- c("A", "B", "C", "D", "E") second <- c("B", "D", "E", "A", "C") # same letters but different order match(first, second) # PS: Each number that is returned represents the index of the `second` vector where the matching value was observed reorder_idx <- match(first, second) # Saving indices for how to reorder `second` to match `first` second[reorder_idx] # Reordering the second vector to match the order of the `first` vector second_reordered <- second[reorder_idx] # Reordering and saving the output to a variable first <- c("A", "B", "C", "D", "E") second <- c("D", "B", "A") # remove values match(first, second) # Return NA # You can specify what values you would have it assigned using nomatch argument. match(first, second, nomatch = 0) # If there is more than one matching value found only the first is reported second2 <- c("D", "B", "A", "D") match(first, second2) ### 04 Reordering genomic data using `match()` function rownames(metadata) colnames(rpkm_data) genomic_idx <- match(rownames(metadata), colnames(rpkm_data)) genomic_idx rpkm_ordered <- rpkm_data[, genomic_idx] head(rpkm_ordered) all(rownames(metadata) == colnames(rpkm_ordered))
#' @title High dimensional univariate cox proportional hazard model. #' @param m Starting column number from where high dimensional variates to be selected. #' @param n Ending column number till where high dimensional variates to be selected. #' @param survdur "Column/Variable name" consisting duration of survival. #' @param event "Column/Variable name" consisting survival event. #' @param ths A numeric between 0 to 100. #' @param b Number of MCMC iterations to burn. #' @param d Number of draws for the iterations. #' @param data High dimensional data containing survival observations and high dimensional covariates. #' @param sig Level of significance pre-determined by the user. #' @description Given the dimension of variables and survival information risks the function #' filters significant variables, allowing the user to fit univariate COx PH model. Further, it performs mediation #' analysis among the significant variables and provides handful variables with their alpha.a values #' which are mediator model exposure coefficients and beta.a coefficients. #' @return Data frame containing the beta and alpha values of active variables among the significant variables. #' @import survival #' @import hdbm #' @import schoolmath #' @export #' #' @examples #' ## #' data(hnscc) #' unihdcoxma(m=8,n=105,survdur="os",event="death",sig=0.5,ths=0.02,b=1000,d=10,data=hnscc2) #' ## unihdcoxma <- function(m,n,survdur,event,sig,ths,b,d,data){ siglevel<-sig thresh<-ths burn<-b draws<-d Event<-event Surv<-survdur nbatch<-length(m:n)/5 sq<-seq(m,n,5) hrpres1 <- matrix(nrow=0,ncol=2) Variables<-c(colnames(data)[m:n]) for(i in m:n){ model1 <- coxph(Surv(get(Surv),get(Event)) ~ data[,i], data=data) sumr <- summary(model1) sumrcoeff1<-round(sumr$coefficients[,2],2) sumrcoeff2<-round(sumr$coefficients[,5],4) resdata1 <- data.frame(sumrcoeff1,sumrcoeff2) colnames(resdata1)<-c("HR","Pvalue") hrpres1<-rbind(hrpres1,resdata1) } hrpres <- data.frame(Variables,hrpres1) hrpres <-hrpres[order(hrpres$Pvalue),] #to filter or not?? hrpres<-hrpres[hrpres$Pvalue<=siglevel,] selvar <- c(hrpres$Variables) data2<-subset(data,select=selvar) M <- data.matrix(data2) #define parameters for hdbm Y<-M[,1] #data[,Event] #response variable A<-M[,2] #exposure variable taken as first and second column from the selected variable matrix C <- matrix(1, nrow(data2), 1) beta.m <- rep(0, ncol(data2)) alpha.a <- rep(0, ncol(data2)) hdbm.out <- hdbm(Y,A,M, C, C, beta.m, alpha.a, burnin = burn, ndraws = draws) active <- which(colSums(hdbm.out$r1 * hdbm.out$r3) > thresh) ###################### Activevariables <- colnames(M)[active] #################### mbeta.a<-mean(hdbm.out$beta.a) ################### colm.beta.m<-apply(hdbm.out$beta.m, 2, mean) ####################### colm.alpha.m<-apply(hdbm.out$alpha.a, 2, mean) ###################### if(length(Activevariables)==0){ print("No active variables") print("Number of active variables is 0") } if(length(Activevariables)!=0){ dumean<-matrix(nrow = 0, ncol = 2) for(i in active){ du.data<-data.frame(colm.beta.m[i],colm.alpha.m[i]) dumean<-rbind(dumean,du.data) } act.var.means <- data.frame(dumean) colnames(act.var.means)<-c("Colmeans.beta.m","Colmeans.alpha.m") act.var.means <- data.frame(Activevariables,act.var.means) act.results<-list('Active variabels and their beta and alpha means'= act.var.means) return(act.results) } }
/R/unihdcoxma.R
no_license
cran/autohd
R
false
false
3,652
r
#' @title High dimensional univariate cox proportional hazard model. #' @param m Starting column number from where high dimensional variates to be selected. #' @param n Ending column number till where high dimensional variates to be selected. #' @param survdur "Column/Variable name" consisting duration of survival. #' @param event "Column/Variable name" consisting survival event. #' @param ths A numeric between 0 to 100. #' @param b Number of MCMC iterations to burn. #' @param d Number of draws for the iterations. #' @param data High dimensional data containing survival observations and high dimensional covariates. #' @param sig Level of significance pre-determined by the user. #' @description Given the dimension of variables and survival information risks the function #' filters significant variables, allowing the user to fit univariate COx PH model. Further, it performs mediation #' analysis among the significant variables and provides handful variables with their alpha.a values #' which are mediator model exposure coefficients and beta.a coefficients. #' @return Data frame containing the beta and alpha values of active variables among the significant variables. #' @import survival #' @import hdbm #' @import schoolmath #' @export #' #' @examples #' ## #' data(hnscc) #' unihdcoxma(m=8,n=105,survdur="os",event="death",sig=0.5,ths=0.02,b=1000,d=10,data=hnscc2) #' ## unihdcoxma <- function(m,n,survdur,event,sig,ths,b,d,data){ siglevel<-sig thresh<-ths burn<-b draws<-d Event<-event Surv<-survdur nbatch<-length(m:n)/5 sq<-seq(m,n,5) hrpres1 <- matrix(nrow=0,ncol=2) Variables<-c(colnames(data)[m:n]) for(i in m:n){ model1 <- coxph(Surv(get(Surv),get(Event)) ~ data[,i], data=data) sumr <- summary(model1) sumrcoeff1<-round(sumr$coefficients[,2],2) sumrcoeff2<-round(sumr$coefficients[,5],4) resdata1 <- data.frame(sumrcoeff1,sumrcoeff2) colnames(resdata1)<-c("HR","Pvalue") hrpres1<-rbind(hrpres1,resdata1) } hrpres <- data.frame(Variables,hrpres1) hrpres <-hrpres[order(hrpres$Pvalue),] #to filter or not?? hrpres<-hrpres[hrpres$Pvalue<=siglevel,] selvar <- c(hrpres$Variables) data2<-subset(data,select=selvar) M <- data.matrix(data2) #define parameters for hdbm Y<-M[,1] #data[,Event] #response variable A<-M[,2] #exposure variable taken as first and second column from the selected variable matrix C <- matrix(1, nrow(data2), 1) beta.m <- rep(0, ncol(data2)) alpha.a <- rep(0, ncol(data2)) hdbm.out <- hdbm(Y,A,M, C, C, beta.m, alpha.a, burnin = burn, ndraws = draws) active <- which(colSums(hdbm.out$r1 * hdbm.out$r3) > thresh) ###################### Activevariables <- colnames(M)[active] #################### mbeta.a<-mean(hdbm.out$beta.a) ################### colm.beta.m<-apply(hdbm.out$beta.m, 2, mean) ####################### colm.alpha.m<-apply(hdbm.out$alpha.a, 2, mean) ###################### if(length(Activevariables)==0){ print("No active variables") print("Number of active variables is 0") } if(length(Activevariables)!=0){ dumean<-matrix(nrow = 0, ncol = 2) for(i in active){ du.data<-data.frame(colm.beta.m[i],colm.alpha.m[i]) dumean<-rbind(dumean,du.data) } act.var.means <- data.frame(dumean) colnames(act.var.means)<-c("Colmeans.beta.m","Colmeans.alpha.m") act.var.means <- data.frame(Activevariables,act.var.means) act.results<-list('Active variabels and their beta and alpha means'= act.var.means) return(act.results) } }
#### Table 3 ----------------------------------------------------------------- #### 1. Five-year data #### # (1) Pooled OLS data1.1 <- read.csv("5yr_panel.csv", header=TRUE) democ1.1 <- pdata.frame(data1.1, index="country") Democracy1.5yr <- democ1.1$polity4 Income1.5yr <- democ1.1$lrgdpch pols.1 <- plm(Democracy1.5yr ~ lag(Democracy1.5yr) + lag(Income1.5yr) + year -1, democ1.1, index = c("country", "year"), model = "pooling", subset = sample == 1) pols.1.coef <- coeftest(pols.1, vcov=vcovHC) # (2) Fixed effects OLS: Two Ways fe1.1 <- plm(Democracy1.5yr ~ lag(Democracy1.5yr) + lag(Income1.5yr), democ1.1, index = c("country", "year"), model = "within", effect="twoways", subset = sample == 1) fe1.1.coef <- coeftest(fe1.1, vcov=vcovHC) # (3) Anderson-Hsiao IV hsiao.1 <- plm(diff(Democracy1.5yr) ~ lag(diff(Democracy1.5yr)) + lag(diff(Income1.5yr)) + year - 1 | lag(Democracy1.5yr, 2) + lag(Income1.5yr, 2) + year - 1, democ1.1, index = c("country", "year"), model = "pooling", subset = sample == 1) hsiao.1.coef <- coeftest(hsiao.1, vcov=vcovHC) # (4) Arellano-Bond GMM # i) Arellano-Bond GMM gmm1.1 <- pgmm(Democracy1.5yr ~ lag(Democracy1.5yr) + lag(Income1.5yr) | lag(Democracy1.5yr, 2:99)| lag(Income1.5yr, 2), democ1.1, index=c("country", "year"), model="onestep", effect="twoways", subset = sample == 1) gmm1.1.coef <- coeftest(gmm1.1, vcov=vcovHC) # (5) Fixed effects OLS fe2.1 <- plm(Democracy1.5yr ~ lag(Income1.5yr), democ1.1, index = c("country", "year"), model = "within", effect="twoways", subset = sample == 1) fe2.1.coef <- coeftest(fe2.1, vcov=vcovHC) #### 2. Annual data #### # (6) Fixed effects OLS data2.1 <- read.csv("annual_panel.csv", header=TRUE) democ2.1 <- pdata.frame(data2.1, index="country") Democracy1.annual <- democ2.1$polity4 Income1.annual <- democ2.1$lrgdpch fe3.1 <- plm(Democracy1.annual ~ lag(Democracy1.annual) + lag(Income1.annual), democ2.1, index = c("country", "year"), model = "within", effect="twoways", subset = sample == 1) fe3.1.coef <- coeftest(fe3.1, vcov=vcovHC) #### 3. Ten-year data #### # (7) Fixed effects OLS data3.1 <- read.csv("10yr_panel.csv", header=TRUE) democ3.1 <- pdata.frame(data3.1, index="country") Democracy1.10yr <- democ3.1$polity4 Income1.10yr <- democ3.1$lrgdpch fe4.1 <- plm(Democracy1.10yr ~ lag(Democracy1.10yr) + lag(Income1.10yr), democ3.1, index = c("country", "year"), model = "within", effect="twoways", subset = sample == 1) fe4.1.coef <- coeftest(fe4.1, vcov=vcovHC) #### 4. Twenty-year data #### # (9) Fixed effects OLS data4.1 <- read.csv("20yr_panel.csv", header=TRUE) democ4.1 <- pdata.frame(data4.1, index="country") Democracy1.20yr <- democ4.1$polity4 Income1.20yr <- democ4.1$lrgdpch fe5.1 <- plm(Democracy1.20yr ~ lag(Democracy1.20yr) + lag(Income1.20yr), democ4.1, index = c("country", "year"), model = "within", effect="twoways", subset = sample == 1) fe5.1.coef <- coeftest(fe5.1, vcov=vcovHC) screenreg(list("Pooled OLS (1)" = pols.1.coef, "Fixed effects (2)" = fe1.1.coef, "Anderson-Hsiao IV (3)" = hsiao.1.coef, "Arellano-Bond GMM (4)" = gmm1.1.coef, "Fixed effects OLS (5)" = fe2.1.coef, "Fixed effects OLS (6)" = fe3.1.coef, "Fixed effects OLS (7)" = fe4.1.coef, "Fixed effects OLS (9)" = fe5.1.coef))
/Project-Income & Democracy/R Code/Table3.R
no_license
queenqueen89/Panel_Data
R
false
false
3,341
r
#### Table 3 ----------------------------------------------------------------- #### 1. Five-year data #### # (1) Pooled OLS data1.1 <- read.csv("5yr_panel.csv", header=TRUE) democ1.1 <- pdata.frame(data1.1, index="country") Democracy1.5yr <- democ1.1$polity4 Income1.5yr <- democ1.1$lrgdpch pols.1 <- plm(Democracy1.5yr ~ lag(Democracy1.5yr) + lag(Income1.5yr) + year -1, democ1.1, index = c("country", "year"), model = "pooling", subset = sample == 1) pols.1.coef <- coeftest(pols.1, vcov=vcovHC) # (2) Fixed effects OLS: Two Ways fe1.1 <- plm(Democracy1.5yr ~ lag(Democracy1.5yr) + lag(Income1.5yr), democ1.1, index = c("country", "year"), model = "within", effect="twoways", subset = sample == 1) fe1.1.coef <- coeftest(fe1.1, vcov=vcovHC) # (3) Anderson-Hsiao IV hsiao.1 <- plm(diff(Democracy1.5yr) ~ lag(diff(Democracy1.5yr)) + lag(diff(Income1.5yr)) + year - 1 | lag(Democracy1.5yr, 2) + lag(Income1.5yr, 2) + year - 1, democ1.1, index = c("country", "year"), model = "pooling", subset = sample == 1) hsiao.1.coef <- coeftest(hsiao.1, vcov=vcovHC) # (4) Arellano-Bond GMM # i) Arellano-Bond GMM gmm1.1 <- pgmm(Democracy1.5yr ~ lag(Democracy1.5yr) + lag(Income1.5yr) | lag(Democracy1.5yr, 2:99)| lag(Income1.5yr, 2), democ1.1, index=c("country", "year"), model="onestep", effect="twoways", subset = sample == 1) gmm1.1.coef <- coeftest(gmm1.1, vcov=vcovHC) # (5) Fixed effects OLS fe2.1 <- plm(Democracy1.5yr ~ lag(Income1.5yr), democ1.1, index = c("country", "year"), model = "within", effect="twoways", subset = sample == 1) fe2.1.coef <- coeftest(fe2.1, vcov=vcovHC) #### 2. Annual data #### # (6) Fixed effects OLS data2.1 <- read.csv("annual_panel.csv", header=TRUE) democ2.1 <- pdata.frame(data2.1, index="country") Democracy1.annual <- democ2.1$polity4 Income1.annual <- democ2.1$lrgdpch fe3.1 <- plm(Democracy1.annual ~ lag(Democracy1.annual) + lag(Income1.annual), democ2.1, index = c("country", "year"), model = "within", effect="twoways", subset = sample == 1) fe3.1.coef <- coeftest(fe3.1, vcov=vcovHC) #### 3. Ten-year data #### # (7) Fixed effects OLS data3.1 <- read.csv("10yr_panel.csv", header=TRUE) democ3.1 <- pdata.frame(data3.1, index="country") Democracy1.10yr <- democ3.1$polity4 Income1.10yr <- democ3.1$lrgdpch fe4.1 <- plm(Democracy1.10yr ~ lag(Democracy1.10yr) + lag(Income1.10yr), democ3.1, index = c("country", "year"), model = "within", effect="twoways", subset = sample == 1) fe4.1.coef <- coeftest(fe4.1, vcov=vcovHC) #### 4. Twenty-year data #### # (9) Fixed effects OLS data4.1 <- read.csv("20yr_panel.csv", header=TRUE) democ4.1 <- pdata.frame(data4.1, index="country") Democracy1.20yr <- democ4.1$polity4 Income1.20yr <- democ4.1$lrgdpch fe5.1 <- plm(Democracy1.20yr ~ lag(Democracy1.20yr) + lag(Income1.20yr), democ4.1, index = c("country", "year"), model = "within", effect="twoways", subset = sample == 1) fe5.1.coef <- coeftest(fe5.1, vcov=vcovHC) screenreg(list("Pooled OLS (1)" = pols.1.coef, "Fixed effects (2)" = fe1.1.coef, "Anderson-Hsiao IV (3)" = hsiao.1.coef, "Arellano-Bond GMM (4)" = gmm1.1.coef, "Fixed effects OLS (5)" = fe2.1.coef, "Fixed effects OLS (6)" = fe3.1.coef, "Fixed effects OLS (7)" = fe4.1.coef, "Fixed effects OLS (9)" = fe5.1.coef))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expr-node.R \name{as_pairlist} \alias{as_pairlist} \title{Coerce to pairlist.} \usage{ as_pairlist(x) } \arguments{ \item{x}{An object to coerce.} } \description{ This transforms vector objects to a linked pairlist of nodes. See \link{pairlist} for information about the pairlist type. } \seealso{ \link{pairlist} }
/man/as_pairlist.Rd
no_license
dpastoor/rlang
R
false
true
394
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expr-node.R \name{as_pairlist} \alias{as_pairlist} \title{Coerce to pairlist.} \usage{ as_pairlist(x) } \arguments{ \item{x}{An object to coerce.} } \description{ This transforms vector objects to a linked pairlist of nodes. See \link{pairlist} for information about the pairlist type. } \seealso{ \link{pairlist} }
#' Compute geometric mean/average #' #' @param x vector #' #' @return double #' @export #' gmean <- function(x){ x_na <- x[!base::is.na(x)] 10^base::mean(base::log10(x_na)) }
/R/geom_mean.R
permissive
atsyplenkov/atslib
R
false
false
182
r
#' Compute geometric mean/average #' #' @param x vector #' #' @return double #' @export #' gmean <- function(x){ x_na <- x[!base::is.na(x)] 10^base::mean(base::log10(x_na)) }
library(tidyverse) fp <- "https://www.opensecrets.org/downloads/crp/CRP_Categories.txt" categories <- read_tsv(fp, skip = 8) %>% janitor::clean_names() committees <- read_delim("https://www.opensecrets.org/downloads/crp/CRP_CongCmtes.txt", delim = ' ') %>% janitor::clean_names() %>% mutate_all(str_squish) library(opensecrets) all_legislators <- state.abb %>% map(get_legislators) legislators <- bind_rows(all_legislators) %>% mutate(state = str_extract(office, "[A-Z]{2}")) %>% select(cid, firstlast, lastname, state, everything()) write_csv(categories, "data/categories.csv") write_csv(legislators, "data/legislators.csv") write_csv(committees, "data/committees.csv") usethis::use_data(committees, legislators, categories)
/data-raw/reference_tables.R
no_license
JosiahParry/opensecrets
R
false
false
747
r
library(tidyverse) fp <- "https://www.opensecrets.org/downloads/crp/CRP_Categories.txt" categories <- read_tsv(fp, skip = 8) %>% janitor::clean_names() committees <- read_delim("https://www.opensecrets.org/downloads/crp/CRP_CongCmtes.txt", delim = ' ') %>% janitor::clean_names() %>% mutate_all(str_squish) library(opensecrets) all_legislators <- state.abb %>% map(get_legislators) legislators <- bind_rows(all_legislators) %>% mutate(state = str_extract(office, "[A-Z]{2}")) %>% select(cid, firstlast, lastname, state, everything()) write_csv(categories, "data/categories.csv") write_csv(legislators, "data/legislators.csv") write_csv(committees, "data/committees.csv") usethis::use_data(committees, legislators, categories)
#' Classify a publication record by Swedish standard subject categories #' #' Umeå University provides a web [api](https://klassificera.ub.umu.se/) described #' [here](https://klassificera.ub.umu.se/api2.html), for classifying #' english and swedish language records from DiVA or Swepub based on the MODS #' format (v 3.2 or later) according to the [Swedish standard](https://www.scb.se/dokumentation/klassifikationer-och-standarder/standard-for-svensk-indelning-av-forskningsamnen/). #' Classification can also be made based on a [Web of Science record](https://klassificera.ub.umu.se/uploadFile.txt) #' Uploaded record batches may not exceed 200 MB, using batches of 10 MB are #' recommended (around 2-3000 records per chunk) #' #' The classification is based on information in there record from the title #' and abstract (required) but also on keywords, ISSN/journal, ISBN-prefix/ #' publisher and affiliations. Training data comes from SwePub (July 2020). #' It is based on the following papers: #' - Dual Coordinate Descent Methods for Logistic Regression and Maximum #' Entropy Models. ([doi:10.1007/s10994-010-5221-8](\doi{10.1007/s10994-010-5221-8}) #' - Entropy-Based Term Weighting Schemes for Text Categorization in VSM. ([doi:10.1109/ICTAI.2015.57](\doi{10.1109/ICTAI.2015.57}) #' @details The classification is made at the research topic level (5 digits) #' for English language records and at research subject group level #' 3 digits for Swedish language records. #' @param record string the publication identifier string in DiVA or an export file #' from Web of Science in the "Plain text/Full record" format #' @param type string, type of identifier, one of "mods" or "wos", default: "mods" #' @param threshold a value in between 0.1 and 0.51, which governs to which #' which extent a record is classified with more than one subject area #' @param email email adress for the user #' @return a tibble with suggested classifications #' @examples \dontrun{ #' #' # classify using a DiVA record identifier #' classify_umu_ub("diva2:515038", threshold = 0.3, email = "john.doe@hotmail.com") #' #' # classify using a WoS Record in Flatfile/Plain text format #' wos_record <- readr::read_lines("https://klassificera.ub.umu.se/uploadFile.txt") #' classify_umu_ub(wos_record, type = "wos") #' } #' @importFrom dplyr between bind_cols #' @importFrom xml2 write_xml xml_find_first xml_attr xml_contents xml_find_all #' @importFrom httr upload_file POST accept #' @importFrom readr write_lines read_tsv #' @importFrom stringi stri_unescape_unicode #' @export classify_umu_ub <- function(record, type = c("mods", "wos"), threshold = 0.2, email = "foo.bar@null.se") { stopifnot( dplyr::between(threshold, 0.1, 0.51), grepl("^\\S{1,}@\\S{2,}\\.\\S{2,}$", email) ) diva_mods_url <- function(diva_pid) sprintf(paste0("https://kth.diva-portal.org/smash/references", "?referenceFormat=MODS&pids=[%s]&fileName=export.xml"), diva_pid) filedata_mods <- function(pid) { mods <- GET(diva_mods_url(pid)) mods_file <- tempfile() xml2::write_xml(content(mods), mods_file) filedata <- httr::upload_file(mods_file, type = "text/xml") } filedata_wos <- function(wosid) { wos_file <- tempfile() #on.exit(unlink(wos_file)) readr::write_lines(wosid, wos_file) filedata <- httr::upload_file(wos_file, type = "text/plain") return(filedata) } if (missing(type)) type <- "mods" filedata <- switch(type, "mods" = filedata_mods(record), "wos" = filedata_wos(record) ) #on.exit(unlink(filedata)) res <- httr::POST( url = sprintf("https://klassificera.ub.umu.se/api/v1/%s", type), encode = "multipart", accept("*/*"), query = list( key = I(email), multilabelthreshold = threshold ), body = list(data = filedata) # add_headers(`Accept-Encoding` = "gzip"), # add_headers("Content-Type" = "multipart/related"), # verbose() ) if (type == "mods") return(readr::read_tsv(content(res, as = "text"))) # we likely have an xml response, parse it into a table topic <- stringi::stri_unescape_unicode("/poster/post/forsknings\\u00e4mne") x1 <- content(res) x2 <- x1 %>% xml_find_all(topic) dplyr::bind_cols( UT = x2 %>% xml2::xml_find_first("..") %>% xml2::xml_attr("UT") %>% as.character(), desc = x2 %>% xml2::xml_contents() %>% as.character(), code = x2 %>% xml2::xml_attr("kod") %>% as.character(), prob = x2 %>% xml2::xml_attr("sannolikhet") %>% as.character() ) } #' Classify into subject categories using SwePub Web API #' #' Use the title, abstract and keywords to get a suggestion for subject #' categories to use from the Swedish standard set of 3 or 5 digit #' subject categories. #' @details Calls the API at https://swepub-qa.kb.se/api/v1/classify #' @param title the title of the paper #' @param abstract the abstract #' @param keywords keywords used #' @param level either 3 or 5, default: 3 #' @examples \dontrun{ #' classify_swepub( #' title = "Magnetic resonance", #' abstract = "This paper deals with magnetic resonance", #' keywords = "magnetic radiotherapy nuclear", #' level = 5 #' ) #' } #' @importFrom httr with_config config POST accept_json add_headers content_type_json content #' @importFrom jsonlite toJSON fromJSON #' @importFrom dplyr tibble #' @export classify_swepub <- function(title, abstract, keywords, level = 3) { stopifnot(level == 3 || level == 5) stopifnot(any(!is.na(c(title, abstract, keywords)))) stopifnot(any(nzchar(c(title, abstract, keywords)))) json <- list( level = as.character(level), abstract = abstract, title = title, keywords = keywords ) res <- # something is wacky with that SSL cert httr::with_config(httr::config(ssl_verifypeer = 0L), httr::POST( url = "https://swepub-qa.kb.se/api/v1/classify", body = jsonlite::toJSON(json, auto_unbox = TRUE), httr::accept_json(), httr::add_headers("Content-Type" = "application/json"), httr::content_type_json() )) out <- httr::content(res, type = "text", encoding = "UTF-8") %>% jsonlite::fromJSON(simplifyDataFrame = TRUE, flatten = TRUE) tt <- function(x, y) sapply(x, function(y) paste0(y, collapse = " > ")) if (out$status == "no match") return(dplyr::tibble()) dplyr::tibble( eng_code = out$suggestions$`eng.code`, eng_label = out$suggestions$`eng.prefLabel`, eng_topics = tt(out$suggestions$eng._topic_tree), swe_code = out$suggestions$swe.code, swe_label = out$suggestions$swe.prefLabel, swe_topics = tt(out$suggestions$swe._topic_tree) ) } #' DiVA publication records from KTH with subject classification issues #' @details Data comes from [](https://bibliometri.swepub.kb.se/process) #' @param institution the institution string, Default: 'kth' #' @param year_beg filter from year, Default: 2012 #' @param year_end filter to year, Default: 2020 #' @param page_size for paged responses, the page length, Default: 20 #' @return a tibble with results #' @examples #' \dontrun{ #' if(interactive()){ #' issues_swepub() #' } #' } #' @export #' @importFrom httr with_config config GET accept #' @importFrom readr read_csv issues_swepub <- function(institution = "kth", year_beg = 2012L, year_end = 2020L, page_size = 20) { res <- httr::with_config(httr::config(ssl_verifypeer = 0L), httr::GET(sprintf(paste0("https://bibliometri.swepub.kb.se/api/v1/", "process/%s/export?from=%s&to=%s", "&audit_flags=UKA_comprehensive_check_invalid", "&limit=%s"), institution, year_beg, year_end, page_size), httr::accept("text/csv") ) ) readr::read_csv(content(res, as = "text", encoding = "UTF-8"), skip = 1) }
/R/classify.R
permissive
KTH-Library/kthapi
R
false
false
7,838
r
#' Classify a publication record by Swedish standard subject categories #' #' Umeå University provides a web [api](https://klassificera.ub.umu.se/) described #' [here](https://klassificera.ub.umu.se/api2.html), for classifying #' english and swedish language records from DiVA or Swepub based on the MODS #' format (v 3.2 or later) according to the [Swedish standard](https://www.scb.se/dokumentation/klassifikationer-och-standarder/standard-for-svensk-indelning-av-forskningsamnen/). #' Classification can also be made based on a [Web of Science record](https://klassificera.ub.umu.se/uploadFile.txt) #' Uploaded record batches may not exceed 200 MB, using batches of 10 MB are #' recommended (around 2-3000 records per chunk) #' #' The classification is based on information in there record from the title #' and abstract (required) but also on keywords, ISSN/journal, ISBN-prefix/ #' publisher and affiliations. Training data comes from SwePub (July 2020). #' It is based on the following papers: #' - Dual Coordinate Descent Methods for Logistic Regression and Maximum #' Entropy Models. ([doi:10.1007/s10994-010-5221-8](\doi{10.1007/s10994-010-5221-8}) #' - Entropy-Based Term Weighting Schemes for Text Categorization in VSM. ([doi:10.1109/ICTAI.2015.57](\doi{10.1109/ICTAI.2015.57}) #' @details The classification is made at the research topic level (5 digits) #' for English language records and at research subject group level #' 3 digits for Swedish language records. #' @param record string the publication identifier string in DiVA or an export file #' from Web of Science in the "Plain text/Full record" format #' @param type string, type of identifier, one of "mods" or "wos", default: "mods" #' @param threshold a value in between 0.1 and 0.51, which governs to which #' which extent a record is classified with more than one subject area #' @param email email adress for the user #' @return a tibble with suggested classifications #' @examples \dontrun{ #' #' # classify using a DiVA record identifier #' classify_umu_ub("diva2:515038", threshold = 0.3, email = "john.doe@hotmail.com") #' #' # classify using a WoS Record in Flatfile/Plain text format #' wos_record <- readr::read_lines("https://klassificera.ub.umu.se/uploadFile.txt") #' classify_umu_ub(wos_record, type = "wos") #' } #' @importFrom dplyr between bind_cols #' @importFrom xml2 write_xml xml_find_first xml_attr xml_contents xml_find_all #' @importFrom httr upload_file POST accept #' @importFrom readr write_lines read_tsv #' @importFrom stringi stri_unescape_unicode #' @export classify_umu_ub <- function(record, type = c("mods", "wos"), threshold = 0.2, email = "foo.bar@null.se") { stopifnot( dplyr::between(threshold, 0.1, 0.51), grepl("^\\S{1,}@\\S{2,}\\.\\S{2,}$", email) ) diva_mods_url <- function(diva_pid) sprintf(paste0("https://kth.diva-portal.org/smash/references", "?referenceFormat=MODS&pids=[%s]&fileName=export.xml"), diva_pid) filedata_mods <- function(pid) { mods <- GET(diva_mods_url(pid)) mods_file <- tempfile() xml2::write_xml(content(mods), mods_file) filedata <- httr::upload_file(mods_file, type = "text/xml") } filedata_wos <- function(wosid) { wos_file <- tempfile() #on.exit(unlink(wos_file)) readr::write_lines(wosid, wos_file) filedata <- httr::upload_file(wos_file, type = "text/plain") return(filedata) } if (missing(type)) type <- "mods" filedata <- switch(type, "mods" = filedata_mods(record), "wos" = filedata_wos(record) ) #on.exit(unlink(filedata)) res <- httr::POST( url = sprintf("https://klassificera.ub.umu.se/api/v1/%s", type), encode = "multipart", accept("*/*"), query = list( key = I(email), multilabelthreshold = threshold ), body = list(data = filedata) # add_headers(`Accept-Encoding` = "gzip"), # add_headers("Content-Type" = "multipart/related"), # verbose() ) if (type == "mods") return(readr::read_tsv(content(res, as = "text"))) # we likely have an xml response, parse it into a table topic <- stringi::stri_unescape_unicode("/poster/post/forsknings\\u00e4mne") x1 <- content(res) x2 <- x1 %>% xml_find_all(topic) dplyr::bind_cols( UT = x2 %>% xml2::xml_find_first("..") %>% xml2::xml_attr("UT") %>% as.character(), desc = x2 %>% xml2::xml_contents() %>% as.character(), code = x2 %>% xml2::xml_attr("kod") %>% as.character(), prob = x2 %>% xml2::xml_attr("sannolikhet") %>% as.character() ) } #' Classify into subject categories using SwePub Web API #' #' Use the title, abstract and keywords to get a suggestion for subject #' categories to use from the Swedish standard set of 3 or 5 digit #' subject categories. #' @details Calls the API at https://swepub-qa.kb.se/api/v1/classify #' @param title the title of the paper #' @param abstract the abstract #' @param keywords keywords used #' @param level either 3 or 5, default: 3 #' @examples \dontrun{ #' classify_swepub( #' title = "Magnetic resonance", #' abstract = "This paper deals with magnetic resonance", #' keywords = "magnetic radiotherapy nuclear", #' level = 5 #' ) #' } #' @importFrom httr with_config config POST accept_json add_headers content_type_json content #' @importFrom jsonlite toJSON fromJSON #' @importFrom dplyr tibble #' @export classify_swepub <- function(title, abstract, keywords, level = 3) { stopifnot(level == 3 || level == 5) stopifnot(any(!is.na(c(title, abstract, keywords)))) stopifnot(any(nzchar(c(title, abstract, keywords)))) json <- list( level = as.character(level), abstract = abstract, title = title, keywords = keywords ) res <- # something is wacky with that SSL cert httr::with_config(httr::config(ssl_verifypeer = 0L), httr::POST( url = "https://swepub-qa.kb.se/api/v1/classify", body = jsonlite::toJSON(json, auto_unbox = TRUE), httr::accept_json(), httr::add_headers("Content-Type" = "application/json"), httr::content_type_json() )) out <- httr::content(res, type = "text", encoding = "UTF-8") %>% jsonlite::fromJSON(simplifyDataFrame = TRUE, flatten = TRUE) tt <- function(x, y) sapply(x, function(y) paste0(y, collapse = " > ")) if (out$status == "no match") return(dplyr::tibble()) dplyr::tibble( eng_code = out$suggestions$`eng.code`, eng_label = out$suggestions$`eng.prefLabel`, eng_topics = tt(out$suggestions$eng._topic_tree), swe_code = out$suggestions$swe.code, swe_label = out$suggestions$swe.prefLabel, swe_topics = tt(out$suggestions$swe._topic_tree) ) } #' DiVA publication records from KTH with subject classification issues #' @details Data comes from [](https://bibliometri.swepub.kb.se/process) #' @param institution the institution string, Default: 'kth' #' @param year_beg filter from year, Default: 2012 #' @param year_end filter to year, Default: 2020 #' @param page_size for paged responses, the page length, Default: 20 #' @return a tibble with results #' @examples #' \dontrun{ #' if(interactive()){ #' issues_swepub() #' } #' } #' @export #' @importFrom httr with_config config GET accept #' @importFrom readr read_csv issues_swepub <- function(institution = "kth", year_beg = 2012L, year_end = 2020L, page_size = 20) { res <- httr::with_config(httr::config(ssl_verifypeer = 0L), httr::GET(sprintf(paste0("https://bibliometri.swepub.kb.se/api/v1/", "process/%s/export?from=%s&to=%s", "&audit_flags=UKA_comprehensive_check_invalid", "&limit=%s"), institution, year_beg, year_end, page_size), httr::accept("text/csv") ) ) readr::read_csv(content(res, as = "text", encoding = "UTF-8"), skip = 1) }
\name{ea_presence} \alias{ea_presence} \title{Exposure assessment from presence/absence data} \description{ Parametric modelling of exposure concentration from presence/absence data. } \usage{ ea_presence(x, q = 1, replicates = rep(1, length(x)), data, model = c("poisson", "p"), \dots) } \arguments{ \item{x}{A vector indicating the number of positive samples per examined quantity.} \item{q}{The quantities (e.g., volumes, masses) in which presence/absence was observed; defaults to 1.} \item{replicates}{The number of replicates of each quantity \code{q}; defaults to \code{rep(1, length(x))}.} \item{data}{An optional data frame, containing the variables in the model. If not found in \code{data}, the variables are taken from the environment from which \code{ea_presence} is called.} \item{model}{A character string naming the model to be fitted. See details below.} \item{\dots}{Arguments to be passed to \code{\link{mle}}} } \details{ Available distributions: \itemize{ \item{\strong{Poisson: }}{\code{model = "poisson"} or \code{"p"}} }} \value{An object of class \code{"\linkS4class{ea}"}.} \author{\email{brechtdv@gmail.com}} \references{ \itemize{ \item{ Haas CN, Rose JB, Gerba CP (1999) \emph{Quantitative Microbial Risk Assessment.} John Wiley & Sons, Inc. } }} \seealso{ \code{\link{ea_count}}, for modelling exposure from count data\cr \code{\link{ea_conc}}, for modelling exposure from concentration data } \examples{ ## exposure assessment from presence/absence data ea_presence(x = positive, q = volume, rep = replicates, data = coliform) }
/man/ea_presence.Rd
no_license
brechtdv/QMRA
R
false
false
1,686
rd
\name{ea_presence} \alias{ea_presence} \title{Exposure assessment from presence/absence data} \description{ Parametric modelling of exposure concentration from presence/absence data. } \usage{ ea_presence(x, q = 1, replicates = rep(1, length(x)), data, model = c("poisson", "p"), \dots) } \arguments{ \item{x}{A vector indicating the number of positive samples per examined quantity.} \item{q}{The quantities (e.g., volumes, masses) in which presence/absence was observed; defaults to 1.} \item{replicates}{The number of replicates of each quantity \code{q}; defaults to \code{rep(1, length(x))}.} \item{data}{An optional data frame, containing the variables in the model. If not found in \code{data}, the variables are taken from the environment from which \code{ea_presence} is called.} \item{model}{A character string naming the model to be fitted. See details below.} \item{\dots}{Arguments to be passed to \code{\link{mle}}} } \details{ Available distributions: \itemize{ \item{\strong{Poisson: }}{\code{model = "poisson"} or \code{"p"}} }} \value{An object of class \code{"\linkS4class{ea}"}.} \author{\email{brechtdv@gmail.com}} \references{ \itemize{ \item{ Haas CN, Rose JB, Gerba CP (1999) \emph{Quantitative Microbial Risk Assessment.} John Wiley & Sons, Inc. } }} \seealso{ \code{\link{ea_count}}, for modelling exposure from count data\cr \code{\link{ea_conc}}, for modelling exposure from concentration data } \examples{ ## exposure assessment from presence/absence data ea_presence(x = positive, q = volume, rep = replicates, data = coliform) }
context("Inflators return correct results") test_that("cpi returns known results", { expect_gt(cpi_inflator(from_nominal_price = 1, from_fy = "2012-13", to_fy = "2013-14", adjustment = "none", useABSConnection = FALSE, allow.projection = FALSE), 1.029) expect_lt(cpi_inflator(from_nominal_price = 1, from_fy = "2012-13", to_fy = "2013-14", adjustment = "none", useABSConnection = FALSE, allow.projection = FALSE), 1.030) }) test_that("cpi_inflator_general_date same as cpi_inflator", { expect_equal(cpi_inflator(from_fy = "2013-14", to_fy = "2014-15", adjustment = "none", useABSConnection = FALSE, allow.projection = FALSE), cpi_inflator_general_date(from_date = "2013-14", to_date = "2014-15", adjustment = "none", useABSConnection = FALSE)) expect_equal(cpi_inflator(from_fy = "2010-11", to_fy = "2014-15", adjustment = "none", useABSConnection = FALSE, allow.projection = FALSE), cpi_inflator_general_date(from_date = "2010-11", to_date = "2014-15", adjustment = "none", useABSConnection = FALSE)) }) test_that("cpi_inflator_general_date same as cpi_inflator when diverged", { expect_equal(cpi_inflator(from_fy = "2013-14", to_fy = "2014-15", adjustment = "none", useABSConnection = FALSE, allow.projection = FALSE), cpi_inflator_general_date(from_date = "2014-01-01", to_date = "2015-01-01", adjustment = "none", useABSConnection = FALSE)) }) test_that("cpi_inflator_general_date messages", { expect_message(cpi_inflator_general_date(from_date = "2013", to_date = "2014")) expect_error(cpi_inflator_general_date(from_date = "2015-Q5", to_date = "2016-Q5")) }) test_that("cpi returns reasonable forecasts", { skip_if_not(packageVersion("rsdmx") >= package_version("0.5.10")) expect_gt(cpi_inflator(from_nominal_price = 1, from_fy = "2012-13", to_fy = "2015-16", adjustment = "none", useABSConnection = FALSE, allow.projection = TRUE), 1.05) expect_lt(cpi_inflator(from_nominal_price = 1, from_fy = "2012-13", to_fy = "2015-16", adjustment = "none", useABSConnection = FALSE, allow.projection = TRUE), 1.06) }) test_that("ABS connection", { internal_ans <- cpi_inflator(from_fy = "2012-13", to_fy = "2013-14", adjustment = "none", useABSConnection = FALSE) external_ans <- cpi_inflator(from_fy = "2012-13", to_fy = "2013-14", adjustment = "none", useABSConnection = TRUE) expect_equal(internal_ans, external_ans, tol = 0.0001) internal_ans <- cpi_inflator(from_fy = "2012-13", to_fy = "2013-14", adjustment = "seasonal", useABSConnection = FALSE) external_ans <- cpi_inflator(from_fy = "2012-13", to_fy = "2013-14", adjustment = "seasonal", useABSConnection = TRUE) expect_equal(internal_ans, external_ans, tol = 0.0001) internal_ans <- cpi_inflator(from_fy = "2012-13", to_fy = "2013-14", adjustment = "trimmed", useABSConnection = FALSE) external_ans <- cpi_inflator(from_fy = "2012-13", to_fy = "2013-14", adjustment = "trimmed", useABSConnection = TRUE) expect_equal(internal_ans, external_ans, tol = 0.0001) internal_ans <- cpi_inflator_quarters(100, from_qtr = "1960-Q1", to_qtr = "1961-Q1", adjustment = "trimmed", useABSConnection = FALSE) external_ans <- cpi_inflator_quarters(100, from_qtr = "1960-Q1", to_qtr = "1961-Q1", adjustment = "trimmed", useABSConnection = TRUE) expect_equal(internal_ans, external_ans, tol = 0.0001) internal_ans <- cpi_inflator_quarters(100, from_qtr = "1960-Q1", to_qtr = "1961-Q1", adjustment = "seasonal", useABSConnection = FALSE) external_ans <- cpi_inflator_quarters(100, from_qtr = "1960-Q1", to_qtr = "1961-Q1", adjustment = "seasonal", useABSConnection = TRUE) expect_equal(internal_ans, external_ans, tol = 0.0001) internal_ans <- cpi_inflator_quarters(100, from_qtr = "1960-Q1", to_qtr = "1961-Q1", adjustment = "none", useABSConnection = FALSE) external_ans <- cpi_inflator_quarters(100, from_qtr = "1960-Q1", to_qtr = "1961-Q1", adjustment = "none", useABSConnection = TRUE) expect_equal(internal_ans, external_ans, tol = 0.0001) })
/timings/b51e590384e3e2ff667aadf4a3f52f105755ba7a/grattan/tests/testthat/test_cpi.R
no_license
HughParsonage/grattan
R
false
false
6,952
r
context("Inflators return correct results") test_that("cpi returns known results", { expect_gt(cpi_inflator(from_nominal_price = 1, from_fy = "2012-13", to_fy = "2013-14", adjustment = "none", useABSConnection = FALSE, allow.projection = FALSE), 1.029) expect_lt(cpi_inflator(from_nominal_price = 1, from_fy = "2012-13", to_fy = "2013-14", adjustment = "none", useABSConnection = FALSE, allow.projection = FALSE), 1.030) }) test_that("cpi_inflator_general_date same as cpi_inflator", { expect_equal(cpi_inflator(from_fy = "2013-14", to_fy = "2014-15", adjustment = "none", useABSConnection = FALSE, allow.projection = FALSE), cpi_inflator_general_date(from_date = "2013-14", to_date = "2014-15", adjustment = "none", useABSConnection = FALSE)) expect_equal(cpi_inflator(from_fy = "2010-11", to_fy = "2014-15", adjustment = "none", useABSConnection = FALSE, allow.projection = FALSE), cpi_inflator_general_date(from_date = "2010-11", to_date = "2014-15", adjustment = "none", useABSConnection = FALSE)) }) test_that("cpi_inflator_general_date same as cpi_inflator when diverged", { expect_equal(cpi_inflator(from_fy = "2013-14", to_fy = "2014-15", adjustment = "none", useABSConnection = FALSE, allow.projection = FALSE), cpi_inflator_general_date(from_date = "2014-01-01", to_date = "2015-01-01", adjustment = "none", useABSConnection = FALSE)) }) test_that("cpi_inflator_general_date messages", { expect_message(cpi_inflator_general_date(from_date = "2013", to_date = "2014")) expect_error(cpi_inflator_general_date(from_date = "2015-Q5", to_date = "2016-Q5")) }) test_that("cpi returns reasonable forecasts", { skip_if_not(packageVersion("rsdmx") >= package_version("0.5.10")) expect_gt(cpi_inflator(from_nominal_price = 1, from_fy = "2012-13", to_fy = "2015-16", adjustment = "none", useABSConnection = FALSE, allow.projection = TRUE), 1.05) expect_lt(cpi_inflator(from_nominal_price = 1, from_fy = "2012-13", to_fy = "2015-16", adjustment = "none", useABSConnection = FALSE, allow.projection = TRUE), 1.06) }) test_that("ABS connection", { internal_ans <- cpi_inflator(from_fy = "2012-13", to_fy = "2013-14", adjustment = "none", useABSConnection = FALSE) external_ans <- cpi_inflator(from_fy = "2012-13", to_fy = "2013-14", adjustment = "none", useABSConnection = TRUE) expect_equal(internal_ans, external_ans, tol = 0.0001) internal_ans <- cpi_inflator(from_fy = "2012-13", to_fy = "2013-14", adjustment = "seasonal", useABSConnection = FALSE) external_ans <- cpi_inflator(from_fy = "2012-13", to_fy = "2013-14", adjustment = "seasonal", useABSConnection = TRUE) expect_equal(internal_ans, external_ans, tol = 0.0001) internal_ans <- cpi_inflator(from_fy = "2012-13", to_fy = "2013-14", adjustment = "trimmed", useABSConnection = FALSE) external_ans <- cpi_inflator(from_fy = "2012-13", to_fy = "2013-14", adjustment = "trimmed", useABSConnection = TRUE) expect_equal(internal_ans, external_ans, tol = 0.0001) internal_ans <- cpi_inflator_quarters(100, from_qtr = "1960-Q1", to_qtr = "1961-Q1", adjustment = "trimmed", useABSConnection = FALSE) external_ans <- cpi_inflator_quarters(100, from_qtr = "1960-Q1", to_qtr = "1961-Q1", adjustment = "trimmed", useABSConnection = TRUE) expect_equal(internal_ans, external_ans, tol = 0.0001) internal_ans <- cpi_inflator_quarters(100, from_qtr = "1960-Q1", to_qtr = "1961-Q1", adjustment = "seasonal", useABSConnection = FALSE) external_ans <- cpi_inflator_quarters(100, from_qtr = "1960-Q1", to_qtr = "1961-Q1", adjustment = "seasonal", useABSConnection = TRUE) expect_equal(internal_ans, external_ans, tol = 0.0001) internal_ans <- cpi_inflator_quarters(100, from_qtr = "1960-Q1", to_qtr = "1961-Q1", adjustment = "none", useABSConnection = FALSE) external_ans <- cpi_inflator_quarters(100, from_qtr = "1960-Q1", to_qtr = "1961-Q1", adjustment = "none", useABSConnection = TRUE) expect_equal(internal_ans, external_ans, tol = 0.0001) })
#Programming Assignment 3 #The data for this assignment come from the Hospital Compare web site (http://hospitalcompare.hhs.gov) #run by the U.S. Department of Health and Human Services. The purpose of the web site is to provide data and #information about the quality of care at over 4,000 Medicare-certified hospitals in the U.S. This dataset essentially #covers all major U.S. hospitals. This dataset is used for a variety of purposes, including determining #whether hospitals should be fined for not providing high quality care to patients (see http://goo.gl/jAXFX #for some background on this particular topic). #The Hospital Compare web site contains a lot of data and we will only look at a small subset for this #assignment. The zip file for this assignment contains three files #• outcome-of-care-measures.csv: Contains information about 30-day mortality and readmission rates #for heart attacks, heart failure, and pneumonia for over 4,000 hospitals. #• hospital-data.csv: Contains information about each hospital. #• Hospital_Revised_Flatfiles.pdf: Descriptions of the variables in each file (i.e the code book). #A description of the variables in each of the files is in the included PDF file named Hospital_Revised_Flatfiles.pdf. #directory setwd("~/COURSERA/JHU-Data Science Specialization/LECTURES/C2-R Programming/Simulation and Profiling") #read the data outcome_df<-read.csv("outcome-of-care-measures.csv",colClasses = "character",stringsAsFactors = FALSE,na.strings = "Not Available") #best rankings by state #Write a rankall function which takes 2 arguments: #-an outcome name ("heart attack" (col 11), "heart failure" (col 17), "pneumonia" (col 23)) --outcome-- #-the ranking for that outcome ("best","worst", or an integer indicating the ranking) --num-- #the function reads the .csv file and and returns a 2-column data frame containing the hospital in each state #that has the ranking in the specified --num-- #note that some hospital names may be NA #the first column in the data frame is named hospital, and the second is named state #hospitals that do not have data on a particular outcome should be excluded. #the rankall() should handle ties in the same way as the rankhospital() does rankall<-function(outcome,num="best"){ if(outcome=="heart attack"|outcome=="heart failure"|outcome=="pneumonia"){ #check valid --outcome-- states_names<-levels(as.factor(outcome_df$State)) #vector with the names of all states states_names<-states_names[order(states_names)] #alphabetic order rank_df<-data.frame(hospital=character(length(states_names)),state=character(length(states_names))) rank_df$hospital<-as.character(rank_df$hospital) rank_df$state<-as.character(rank_df$state) #create empty df and establish the objects as characters source("rankhospital.R") #set the source of the rankhospital() for(i in 1:length(states_names)){ state<-states_names[i] #fulfill df with the names of the states rank_df$state[i]<-state result<-rankhospital(state=state,outcome=outcome,num=num) #run the rankhospital() for each state rank_df$hospital[i]<-result #fulfill with the name of the hospital } rank_df #return the resultant df } else { stop("invalid outcome") } }
/rankall.R
no_license
AnaMJaimeR/COURSERA_LECTURES
R
false
false
3,336
r
#Programming Assignment 3 #The data for this assignment come from the Hospital Compare web site (http://hospitalcompare.hhs.gov) #run by the U.S. Department of Health and Human Services. The purpose of the web site is to provide data and #information about the quality of care at over 4,000 Medicare-certified hospitals in the U.S. This dataset essentially #covers all major U.S. hospitals. This dataset is used for a variety of purposes, including determining #whether hospitals should be fined for not providing high quality care to patients (see http://goo.gl/jAXFX #for some background on this particular topic). #The Hospital Compare web site contains a lot of data and we will only look at a small subset for this #assignment. The zip file for this assignment contains three files #• outcome-of-care-measures.csv: Contains information about 30-day mortality and readmission rates #for heart attacks, heart failure, and pneumonia for over 4,000 hospitals. #• hospital-data.csv: Contains information about each hospital. #• Hospital_Revised_Flatfiles.pdf: Descriptions of the variables in each file (i.e the code book). #A description of the variables in each of the files is in the included PDF file named Hospital_Revised_Flatfiles.pdf. #directory setwd("~/COURSERA/JHU-Data Science Specialization/LECTURES/C2-R Programming/Simulation and Profiling") #read the data outcome_df<-read.csv("outcome-of-care-measures.csv",colClasses = "character",stringsAsFactors = FALSE,na.strings = "Not Available") #best rankings by state #Write a rankall function which takes 2 arguments: #-an outcome name ("heart attack" (col 11), "heart failure" (col 17), "pneumonia" (col 23)) --outcome-- #-the ranking for that outcome ("best","worst", or an integer indicating the ranking) --num-- #the function reads the .csv file and and returns a 2-column data frame containing the hospital in each state #that has the ranking in the specified --num-- #note that some hospital names may be NA #the first column in the data frame is named hospital, and the second is named state #hospitals that do not have data on a particular outcome should be excluded. #the rankall() should handle ties in the same way as the rankhospital() does rankall<-function(outcome,num="best"){ if(outcome=="heart attack"|outcome=="heart failure"|outcome=="pneumonia"){ #check valid --outcome-- states_names<-levels(as.factor(outcome_df$State)) #vector with the names of all states states_names<-states_names[order(states_names)] #alphabetic order rank_df<-data.frame(hospital=character(length(states_names)),state=character(length(states_names))) rank_df$hospital<-as.character(rank_df$hospital) rank_df$state<-as.character(rank_df$state) #create empty df and establish the objects as characters source("rankhospital.R") #set the source of the rankhospital() for(i in 1:length(states_names)){ state<-states_names[i] #fulfill df with the names of the states rank_df$state[i]<-state result<-rankhospital(state=state,outcome=outcome,num=num) #run the rankhospital() for each state rank_df$hospital[i]<-result #fulfill with the name of the hospital } rank_df #return the resultant df } else { stop("invalid outcome") } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RMSE.R \name{RMSE} \alias{RMSE} \title{Root mean square error (RMSE)} \usage{ RMSE(obs, sim) } \arguments{ \item{obs}{measured values} \item{sim}{predicted values} } \description{ Calculate Root mean square error (RMSE) }
/man/RMSE.Rd
no_license
kongdd/SoilHyP
R
false
true
301
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RMSE.R \name{RMSE} \alias{RMSE} \title{Root mean square error (RMSE)} \usage{ RMSE(obs, sim) } \arguments{ \item{obs}{measured values} \item{sim}{predicted values} } \description{ Calculate Root mean square error (RMSE) }
#'Ecopath module of Rpath #' #'Performs initial mass balance using a model parameter file and diet #'matrix file. #' #'@family Rpath functions #' #'@param Rpath.params R object containing the Rpath parameters. This is generated #' either by the create.rpath.params or read.rpath.params functions. #'@param eco.name Optional name of the ecosystem which becomes an attribute of #' rpath object. #'@param eco.area Optional area of the ecosystem which becomes an attribute of the #' rpath object. #' #'@return Returns an Rpath object that can be supplied to the rsim.scenario function. #'@import data.table #'@export rpath <- function(Rpath.params, eco.name = NA, eco.area = 1){ #Need to define variables to eliminate check() note about no visible binding Type <- Group <- DetInput <- ProdCons <- PB <- QB <- noB <- noEE <- alive <- BEE <- NULL Biomass <- Q <- BioAcc <- BioQB <- diag.a <- EEa <- B <- M0 <- QBloss <- Unassim <- NULL # Model Parameters - Basic parameters, detritus fate, catch, discards in that order model <- copy(Rpath.params$model) #Diet Parameters - diet matrix, predators as columns, prey as rows - include #producers as predators even though they do not consume any groups diet <- copy(Rpath.params$diet) #Check that all columns of model are numeric and not logical if(length(which(sapply(model, class) == 'logical')) > 0){ logic.col <- which(sapply(model, class) == 'logical') for(i in 1:length(logic.col)){ set(model, j = logic.col[i], value = as.numeric(model[[logic.col[i]]])) } } #Remove first column if names (factor or character) if(sapply(diet, class)[1] == 'factor') diet[, 1 := NULL] if(sapply(diet, class)[1] == 'character') diet[, 1 := NULL] #Adjust diet comp of mixotrophs mixotrophs <- which(model[, Type] > 0 & model[, Type] < 1) mix.Q <- 1 - model[mixotrophs, Type] for(i in seq_along(mixotrophs)){ new.dc <- diet[, mixotrophs[i], with = F] * mix.Q[i] diet[, mixotrophs[i] := new.dc] } #Convert NAs to zero in diet matrix diet[is.na(diet)] <- 0 # Get number of groups, living, dead, and gear ngroups <- nrow(model) nliving <- nrow(model[Type < 2, ]) ndead <- nrow(model[Type == 2, ]) ngear <- nrow(model[Type == 3, ]) nodetrdiet <- diet[1:nliving, ] model[is.na(DetInput), DetInput := 0] # fill in GE(PQ), QB, or PB from other inputs GE <- ifelse(is.na(model[, ProdCons]), model[, PB / QB], model[, ProdCons]) QB.1 <- ifelse(is.na(model[, QB]), model[, PB / GE], model[, QB]) PB.1 <- ifelse(is.na(model[, PB]), model[, ProdCons * QB], model[, PB]) model[, QB := QB.1] model[, PB := PB.1] # define landings, discards, necessary sums landmat <- model[, (10 + ndead + 1):(10 + ndead + ngear), with = F] discardmat <- model[, (10 + ndead + 1 + ngear):(10 + ndead + (2 * ngear)), with = F] totcatchmat <- landmat + discardmat # KYA 1/16/14 Need if statement here because rowSums fail if only one # fishery (catch is vector instead of matrix) ##FIX PROPAGATION HERE if (is.data.frame(totcatchmat)){ totcatch <- rowSums(totcatchmat) landings <- rowSums(landmat) discards <- rowSums(discardmat) gearland <- colSums(landmat, na.rm = T) geardisc <- colSums(discardmat, na.rm = T) }else{ totcatch <- totcatchmat landings <- landmat discards <- discardmat gearland <- sum(landmat, na.rm = T) geardisc <- sum(discardmat, na.rm = T) } geartot <- gearland + geardisc model[, landings := landings] model[, discards := discards] model[, totcatch := totcatch] # flag missing pars and subset for estimation model[, noB := 0] model[, noEE := 0] model[, alive := 0] model[, BEE := 0] model[is.na(Biomass), noB := 1] model[is.na(EE), noEE := 1] model[Type < 2, alive := 1] model[noB == 0 & noEE == 0, BEE := 1] # define detritus fate matrix detfate <- model[, (10 + 1):(10 + ndead), with = F] # set up and solve the system of equations for living group B or EE living <- model[alive == 1, ] #Set up right hand side b living[, Ex := totcatch + BioAcc] living[, BioQB := Biomass * QB] cons <- as.matrix(nodetrdiet) * living$BioQB[col(as.matrix(nodetrdiet))] living[, b := Ex + rowSums(cons, na.rm = T)] #Set up A matrix living[noEE == 1, diag.a := Biomass * PB] living[noEE == 0, diag.a := PB * EE] #Special case where B and EE are known then need to solve for BA #living[BEE == 1, b := b - (Biomass * PB * EE)] #living[BEE == 1, diag.a := 0] #Need to work on this solution A <- matrix(0, nliving, nliving) diag(A) <- living[, diag.a] QBDC <- as.matrix(nodetrdiet) * living$QB[col(as.matrix(nodetrdiet))] dimnames(QBDC) <- list(NULL, NULL) QBDC[is.na(QBDC)] <- 0 #Flip noB flag for known B and EE #living[BEE == 1, noB := 1] QBDCa <- as.matrix(QBDC) * living$noB[col(as.matrix(QBDC))] A <- A - QBDCa #Switch flag back #living[BEE == 1, noB := 0] # Generalized inverse does the actual solving #Invert A and multiple by b to get x (unknowns) x <- MASS::ginv(A, tol = .Machine$double.eps) %*% living[, b] #Assign unknown values living[, EEa := x * noEE] living[is.na(EE), EE := EEa] living[, B := x * noB] living[is.na(Biomass), Biomass := B] # detritus EE calcs living[, M0 := PB * (1 - EE)] living[, QBloss := QB] living[is.na(QBloss), QBloss := 0] loss <- c((living[, M0] * living[, Biomass]) + (living[, Biomass] * living[, QBloss] * living[, Unassim]), model[Type ==2, DetInput], geardisc) detinputs <- colSums(loss * detfate) detdiet <- diet[(nliving + 1):(nliving + ndead), ] BQB <- living[, Biomass * QB] detcons <- as.matrix(detdiet) * BQB[col(as.matrix(detdiet))] detoutputs <- rowSums(detcons, na.rm = T) EE <- c(living[, EE], as.vector(detoutputs / detinputs)) # added by kya # if a detritus biomass is put into the spreadsheet, use that and # calculate PB. If no biomass, but a PB, use that pb with inflow to # calculate biomass. If neither, use default PB=0.5, Bio = inflow/PB # This is done because Ecosim requires a detrital biomass. Default_Detrital_PB <- 0.5 inDetPB <- model[(nliving + 1):(nliving + ndead), PB] inDetB <- model[(nliving + 1):(nliving + ndead), Biomass] DetPB <- ifelse(is.na(inDetPB), Default_Detrital_PB, inDetPB) DetB <- ifelse(is.na(inDetB), detinputs / DetPB, inDetB) DetPB <- detinputs / DetB # Trophic Level calcs b <- rep(1, ngroups) TLcoeff <- matrix(0, ngroups, ngroups) diag(TLcoeff) <- rep(1, ngroups) gearcons <- as.matrix(totcatchmat) / geartot[col(as.matrix(totcatchmat))] dimnames(gearcons) <- list(NULL, NULL) gearcons[is.na(gearcons)] <- 0 dietplus <- as.matrix(diet) dimnames(dietplus) <- list(NULL, NULL) #Adjust for mixotrophs (partial primary producers) - #Moved this code up so that #it also impacted the EE calculation # mixotrophs <- which(model[, Type] > 0 & model[, Type] < 1) # mix.Q <- 1 - model[mixotrophs, Type] # for(i in seq_along(mixotrophs)){ # dietplus[, mixotrophs[i]] <- dietplus[, mixotrophs[i]] * mix.Q[i] # } #Adjust for diet import (Consumption outside model) import <- which(dietplus[nrow(diet), ] > 0) for(i in seq_along(import)){ import.denom <- 1 - dietplus[nrow(diet), import[i]] dietplus[, import[i]] <- dietplus[, import[i]] / import.denom } dietplus <- dietplus[1:(nliving + ndead), ] dietplus <- rbind(dietplus, matrix(0, ngear, nliving)) dietplus <- cbind(dietplus, matrix(0, ngroups, ndead), gearcons) TLcoeffA <- TLcoeff - dietplus TL <- solve(t(TLcoeffA), b) #kya changed these following four lines for detritus, and removing NAs #to match header file format (replacing NAs with 0.0s) Bplus <- c(living[, Biomass], DetB, rep(0.0, ngear)) PBplus <- model[, PB] PBplus[(nliving + 1):(nliving + ndead)] <- DetPB PBplus[is.na(PBplus)] <- 0.0 EEplus <- c(EE, rep(0.0, ngear)) QBplus <- model[, QB] QBplus[is.na(QBplus)] <- 0.0 GE[is.na(GE)] <- 0.0 RemPlus <- model[, totcatch] RemPlus[is.na(RemPlus)] <- 0.0 balanced <- list(Group = model[, Group], TL = TL, Biomass = Bplus, PB = PBplus, QB = QBplus, EE = EEplus, GE = GE, Removals = RemPlus) M0plus <- c(living[, M0], as.vector(detoutputs / detinputs)) gearF <- as.matrix(totcatchmat) / living[, Biomass][row(as.matrix(totcatchmat))] #newcons <- as.matrix(nodetrdiet) * living[, BQB][col(as.matrix(nodetrdiet))] newcons <- as.matrix(nodetrdiet) * BQB[col(as.matrix(nodetrdiet))] predM <- as.matrix(newcons) / living[, Biomass][row(as.matrix(newcons))] predM <- rbind(predM, detcons) morts <- list(Group = model[Type < 3, Group], PB = model[Type < 3, PB], M0 = M0plus, F = gearF[1:(nliving + ndead), ], M2 = predM) # convert from levels to characters gnames <- as.character(balanced$Group) # cleanup before sending to sim -- C code wants 0 as missing value, not NA balanced$Biomass[is.na(balanced$Biomass)] <- 0 balanced$PB[is.na(balanced$PB)] <- 0 balanced$QB[is.na(balanced$QB)] <- 0 balanced$EE[is.na(balanced$EE)] <- 0 balanced$GE[is.na(balanced$GE)] <- 0 model$BioAcc[is.na(model$BioAcc)] <- 0 model$Unassim[is.na(model$Unassim)] <- 0 dietm <- as.matrix(diet) dimnames(dietm) <- list(c(gnames[1:(nliving+ndead)],"Import"), gnames[1:nliving]) dietm[is.na(dietm)] <- 0 landmatm <- as.matrix(landmat) dimnames(landmatm) <- list(gnames, gnames[(ngroups-ngear+1):ngroups]) landmatm[is.na(landmatm)] <- 0 discardmatm <- as.matrix(discardmat) dimnames(discardmatm) <- list(gnames, gnames[(ngroups-ngear+1):ngroups]) discardmatm[is.na(discardmatm)] <- 0 detfatem <- as.matrix(detfate) dimnames(detfatem) <- list(gnames, gnames[(nliving+1):(nliving+ndead)]) detfatem[is.na(detfatem)] <- 0 # KYA April 2020 - added names for output list out.Group <- gnames; names(out.Group) <- gnames out.type <- model[, Type]; names(out.type) <- gnames out.TL <- TL; names(out.TL) <- gnames out.Biomass <- balanced$Biomass; names(out.Biomass) <- gnames out.PB <- balanced$PB; names(out.PB) <- gnames out.QB <- balanced$QB; names(out.QB) <- gnames out.EE <- balanced$EE; names(out.EE) <- gnames out.BA <- model[, BioAcc]; names(out.BA) <- gnames out.Unassim <- model[, Unassim]; names(out.Unassim) <- gnames out.GE <- balanced$GE; names(out.GE) <- gnames # list structure for sim inputs path.model <- list(NUM_GROUPS = ngroups, NUM_LIVING = nliving, NUM_DEAD = ndead, NUM_GEARS = ngear, Group = out.Group, type = out.type, TL = out.TL, Biomass = out.Biomass, PB = out.PB, QB = out.QB, EE = out.EE, BA = out.BA, Unassim = out.Unassim, GE = out.GE, DC = dietm, DetFate = detfatem, Landings = landmatm, Discards = discardmatm) #Define class of output class(path.model) <- 'Rpath' attr(path.model, 'eco.name') <- eco.name attr(path.model, 'eco.area') <- eco.area return(path.model) } #'Calculate biomass and consumption for multistanza groups #' #'Uses the leading stanza to calculate the biomass and consumption of other stanzas #'necessary to support the leading stanza. #' #'@family Rpath functions #' #'@param Rpath.params Object containing the Rpath parameters generated either by #' create.rpath.params or read.rpath.params #' #'@return Calculates and adds biomass and consumption for trailing stanza groups. #' Also adds weight at age and number at age for multi-staza groups. #' #'@import data.table #'@export rpath.stanzas <- function(Rpath.params){ #Need to define variables to eliminate check() note about no visible binding StGroupNum <- First <- StanzaNum <- VBGF_d <- VBGF_Ksp <- Last <- GroupNum <- NULL WageS <- age <- QageS <- Survive <- Z <- survive_L <- bs.num <- qs.num <- Leading <- NULL Group <- Biomass <- R <- NageS <- bs.denom <- bs <- qs.denom <- qs <- Cons <- NULL QB <- NULL #Determine the total number of groups with multistanzas Nsplit <- Rpath.params$stanza$NStanzaGroups groupfile <- Rpath.params$stanza$stgroups stanzafile <- Rpath.params$stanza$stindiv #Need to add vector of stanza number for(isp in 1:Nsplit){ stnum <- order(stanzafile[StGroupNum == isp, First]) stanzafile[StGroupNum == isp, StanzaNum := stnum] } #Calculate the last month for the final stanza #Months to get to 99% Winf (We don't use an accumulator function like EwE) groupfile[, last := floor(log(1 - 0.9999^(1 - VBGF_d)) / (-1 * (VBGF_Ksp * 3 / 12) * (1 - VBGF_d)))] for(isp in 1:Nsplit){ nstanzas <- groupfile[StGroupNum == isp, nstanzas] t99 <- groupfile[StGroupNum == isp, last] stanzafile[StGroupNum == isp & StanzaNum == nstanzas, Last := t99] #Grab ecopath group codes group.codes <- stanzafile[StGroupNum == isp, GroupNum] #Grab index for first and last months for stanzas first <- stanzafile[StGroupNum == isp, First] second <- stanzafile[StGroupNum == isp, Last] #Calculate weight and consumption at age StGroup <- data.table(age = stanzafile[StGroupNum == isp & StanzaNum == 1, First]: t99) #Calculate monthly generalized k: (Ksp * 3) / 12 k <- (groupfile[StGroupNum == isp, VBGF_Ksp] * 3) / 12 d <- groupfile[StGroupNum == isp, VBGF_d] StGroup[, WageS := (1 - exp(-k * (1 - d) * (age))) ^ (1 / (1 - d))] StGroup[, QageS := WageS ^ d] #Calculate the relative number of animals at age a #Vector of survival rates from 1 stanza to the next StGroup[age == 0, Survive := 1] prev.surv <- 1 for(ist in 1:nstanzas){ #Convert Z to a monthly Z month.z <- (stanzafile[StGroupNum == isp & StanzaNum == ist, Z] + groupfile[StGroupNum == isp, BAB]) / 12 StGroup[age %in% first[ist]:second[ist], survive_L := exp(-1*month.z)] if(first[ist] > 0){ StGroup[age == first[ist], Survive := StGroup[age == first[ist] - 1, Survive] * prev.surv] } for(a in (first[ist] + 1):second[ist]){ StGroup[age == a, Survive := StGroup[age == a - 1, Survive] * survive_L] } StGroup[, B := Survive * WageS] StGroup[, Q := Survive * QageS] #Numerator for the relative biomass/consumption calculations b.num <- StGroup[age %in% first[ist]:second[ist], sum(B)] q.num <- StGroup[age %in% first[ist]:second[ist], sum(Q)] stanzafile[StGroupNum == isp & StanzaNum == ist, bs.num := b.num] stanzafile[StGroupNum == isp & StanzaNum == ist, qs.num := q.num] prev.surv <- exp(-1 * month.z) } #Scale numbers up to total recruits BaseStanza <- stanzafile[StGroupNum == isp & Leading == T, ] BioPerEgg <- StGroup[age %in% BaseStanza[, First]:BaseStanza[, Last], sum(B)] recruits <- Rpath.params$model[Group == BaseStanza[, Group], Biomass] / BioPerEgg #Save recruits groupfile[StGroupNum == isp, R := recruits] #Numbers at age S StGroup[, NageS := Survive * recruits] #Calculate relative biomass stanzafile[StGroupNum == isp, bs.denom := sum(bs.num)] stanzafile[StGroupNum == isp, bs := bs.num / bs.denom] #Calculate relative consumption stanzafile[StGroupNum == isp, qs.denom := sum(qs.num)] stanzafile[StGroupNum == isp, qs := qs.num / qs.denom] #Use leading group to calculate other biomasses stanzafile[StGroupNum == isp & Leading == T, Biomass := Rpath.params$model[Group == BaseStanza[, Group], Biomass]] B <- stanzafile[StGroupNum == isp & Leading == T, Biomass / bs] stanzafile[StGroupNum == isp, Biomass := bs * B] #Use leading group to calculate other consumption stanzafile[StGroupNum == isp & Leading == T, Cons := Rpath.params$model[Group == BaseStanza[, Group], QB] * Rpath.params$model[Group == BaseStanza[, Group], Biomass]] Q <- stanzafile[StGroupNum == isp & Leading == T, Cons / qs] stanzafile[StGroupNum == isp, Cons := qs * Q] stanzafile[, QB := Cons / Biomass] Rpath.params$stanzas$StGroup[[isp]] <- StGroup } #Drop extra columns stanzafile[, c('bs.num', 'bs.denom', 'bs', 'qs.num', 'qs.denom', 'qs') := NULL] #Push biomass to modfile for(i in 1:nrow(stanzafile)){ Rpath.params$model[Group == stanzafile[i, Group], Biomass := stanzafile[i, Biomass]] } #Push consumption to modfile for(i in 1:nrow(stanzafile)){ Rpath.params$model[Group == stanzafile[i, Group], QB := stanzafile[i, QB]] } return(Rpath.params) }
/R/ecopath.R
no_license
zanbi/NOAA-EDAB-Rpath
R
false
false
17,871
r
#'Ecopath module of Rpath #' #'Performs initial mass balance using a model parameter file and diet #'matrix file. #' #'@family Rpath functions #' #'@param Rpath.params R object containing the Rpath parameters. This is generated #' either by the create.rpath.params or read.rpath.params functions. #'@param eco.name Optional name of the ecosystem which becomes an attribute of #' rpath object. #'@param eco.area Optional area of the ecosystem which becomes an attribute of the #' rpath object. #' #'@return Returns an Rpath object that can be supplied to the rsim.scenario function. #'@import data.table #'@export rpath <- function(Rpath.params, eco.name = NA, eco.area = 1){ #Need to define variables to eliminate check() note about no visible binding Type <- Group <- DetInput <- ProdCons <- PB <- QB <- noB <- noEE <- alive <- BEE <- NULL Biomass <- Q <- BioAcc <- BioQB <- diag.a <- EEa <- B <- M0 <- QBloss <- Unassim <- NULL # Model Parameters - Basic parameters, detritus fate, catch, discards in that order model <- copy(Rpath.params$model) #Diet Parameters - diet matrix, predators as columns, prey as rows - include #producers as predators even though they do not consume any groups diet <- copy(Rpath.params$diet) #Check that all columns of model are numeric and not logical if(length(which(sapply(model, class) == 'logical')) > 0){ logic.col <- which(sapply(model, class) == 'logical') for(i in 1:length(logic.col)){ set(model, j = logic.col[i], value = as.numeric(model[[logic.col[i]]])) } } #Remove first column if names (factor or character) if(sapply(diet, class)[1] == 'factor') diet[, 1 := NULL] if(sapply(diet, class)[1] == 'character') diet[, 1 := NULL] #Adjust diet comp of mixotrophs mixotrophs <- which(model[, Type] > 0 & model[, Type] < 1) mix.Q <- 1 - model[mixotrophs, Type] for(i in seq_along(mixotrophs)){ new.dc <- diet[, mixotrophs[i], with = F] * mix.Q[i] diet[, mixotrophs[i] := new.dc] } #Convert NAs to zero in diet matrix diet[is.na(diet)] <- 0 # Get number of groups, living, dead, and gear ngroups <- nrow(model) nliving <- nrow(model[Type < 2, ]) ndead <- nrow(model[Type == 2, ]) ngear <- nrow(model[Type == 3, ]) nodetrdiet <- diet[1:nliving, ] model[is.na(DetInput), DetInput := 0] # fill in GE(PQ), QB, or PB from other inputs GE <- ifelse(is.na(model[, ProdCons]), model[, PB / QB], model[, ProdCons]) QB.1 <- ifelse(is.na(model[, QB]), model[, PB / GE], model[, QB]) PB.1 <- ifelse(is.na(model[, PB]), model[, ProdCons * QB], model[, PB]) model[, QB := QB.1] model[, PB := PB.1] # define landings, discards, necessary sums landmat <- model[, (10 + ndead + 1):(10 + ndead + ngear), with = F] discardmat <- model[, (10 + ndead + 1 + ngear):(10 + ndead + (2 * ngear)), with = F] totcatchmat <- landmat + discardmat # KYA 1/16/14 Need if statement here because rowSums fail if only one # fishery (catch is vector instead of matrix) ##FIX PROPAGATION HERE if (is.data.frame(totcatchmat)){ totcatch <- rowSums(totcatchmat) landings <- rowSums(landmat) discards <- rowSums(discardmat) gearland <- colSums(landmat, na.rm = T) geardisc <- colSums(discardmat, na.rm = T) }else{ totcatch <- totcatchmat landings <- landmat discards <- discardmat gearland <- sum(landmat, na.rm = T) geardisc <- sum(discardmat, na.rm = T) } geartot <- gearland + geardisc model[, landings := landings] model[, discards := discards] model[, totcatch := totcatch] # flag missing pars and subset for estimation model[, noB := 0] model[, noEE := 0] model[, alive := 0] model[, BEE := 0] model[is.na(Biomass), noB := 1] model[is.na(EE), noEE := 1] model[Type < 2, alive := 1] model[noB == 0 & noEE == 0, BEE := 1] # define detritus fate matrix detfate <- model[, (10 + 1):(10 + ndead), with = F] # set up and solve the system of equations for living group B or EE living <- model[alive == 1, ] #Set up right hand side b living[, Ex := totcatch + BioAcc] living[, BioQB := Biomass * QB] cons <- as.matrix(nodetrdiet) * living$BioQB[col(as.matrix(nodetrdiet))] living[, b := Ex + rowSums(cons, na.rm = T)] #Set up A matrix living[noEE == 1, diag.a := Biomass * PB] living[noEE == 0, diag.a := PB * EE] #Special case where B and EE are known then need to solve for BA #living[BEE == 1, b := b - (Biomass * PB * EE)] #living[BEE == 1, diag.a := 0] #Need to work on this solution A <- matrix(0, nliving, nliving) diag(A) <- living[, diag.a] QBDC <- as.matrix(nodetrdiet) * living$QB[col(as.matrix(nodetrdiet))] dimnames(QBDC) <- list(NULL, NULL) QBDC[is.na(QBDC)] <- 0 #Flip noB flag for known B and EE #living[BEE == 1, noB := 1] QBDCa <- as.matrix(QBDC) * living$noB[col(as.matrix(QBDC))] A <- A - QBDCa #Switch flag back #living[BEE == 1, noB := 0] # Generalized inverse does the actual solving #Invert A and multiple by b to get x (unknowns) x <- MASS::ginv(A, tol = .Machine$double.eps) %*% living[, b] #Assign unknown values living[, EEa := x * noEE] living[is.na(EE), EE := EEa] living[, B := x * noB] living[is.na(Biomass), Biomass := B] # detritus EE calcs living[, M0 := PB * (1 - EE)] living[, QBloss := QB] living[is.na(QBloss), QBloss := 0] loss <- c((living[, M0] * living[, Biomass]) + (living[, Biomass] * living[, QBloss] * living[, Unassim]), model[Type ==2, DetInput], geardisc) detinputs <- colSums(loss * detfate) detdiet <- diet[(nliving + 1):(nliving + ndead), ] BQB <- living[, Biomass * QB] detcons <- as.matrix(detdiet) * BQB[col(as.matrix(detdiet))] detoutputs <- rowSums(detcons, na.rm = T) EE <- c(living[, EE], as.vector(detoutputs / detinputs)) # added by kya # if a detritus biomass is put into the spreadsheet, use that and # calculate PB. If no biomass, but a PB, use that pb with inflow to # calculate biomass. If neither, use default PB=0.5, Bio = inflow/PB # This is done because Ecosim requires a detrital biomass. Default_Detrital_PB <- 0.5 inDetPB <- model[(nliving + 1):(nliving + ndead), PB] inDetB <- model[(nliving + 1):(nliving + ndead), Biomass] DetPB <- ifelse(is.na(inDetPB), Default_Detrital_PB, inDetPB) DetB <- ifelse(is.na(inDetB), detinputs / DetPB, inDetB) DetPB <- detinputs / DetB # Trophic Level calcs b <- rep(1, ngroups) TLcoeff <- matrix(0, ngroups, ngroups) diag(TLcoeff) <- rep(1, ngroups) gearcons <- as.matrix(totcatchmat) / geartot[col(as.matrix(totcatchmat))] dimnames(gearcons) <- list(NULL, NULL) gearcons[is.na(gearcons)] <- 0 dietplus <- as.matrix(diet) dimnames(dietplus) <- list(NULL, NULL) #Adjust for mixotrophs (partial primary producers) - #Moved this code up so that #it also impacted the EE calculation # mixotrophs <- which(model[, Type] > 0 & model[, Type] < 1) # mix.Q <- 1 - model[mixotrophs, Type] # for(i in seq_along(mixotrophs)){ # dietplus[, mixotrophs[i]] <- dietplus[, mixotrophs[i]] * mix.Q[i] # } #Adjust for diet import (Consumption outside model) import <- which(dietplus[nrow(diet), ] > 0) for(i in seq_along(import)){ import.denom <- 1 - dietplus[nrow(diet), import[i]] dietplus[, import[i]] <- dietplus[, import[i]] / import.denom } dietplus <- dietplus[1:(nliving + ndead), ] dietplus <- rbind(dietplus, matrix(0, ngear, nliving)) dietplus <- cbind(dietplus, matrix(0, ngroups, ndead), gearcons) TLcoeffA <- TLcoeff - dietplus TL <- solve(t(TLcoeffA), b) #kya changed these following four lines for detritus, and removing NAs #to match header file format (replacing NAs with 0.0s) Bplus <- c(living[, Biomass], DetB, rep(0.0, ngear)) PBplus <- model[, PB] PBplus[(nliving + 1):(nliving + ndead)] <- DetPB PBplus[is.na(PBplus)] <- 0.0 EEplus <- c(EE, rep(0.0, ngear)) QBplus <- model[, QB] QBplus[is.na(QBplus)] <- 0.0 GE[is.na(GE)] <- 0.0 RemPlus <- model[, totcatch] RemPlus[is.na(RemPlus)] <- 0.0 balanced <- list(Group = model[, Group], TL = TL, Biomass = Bplus, PB = PBplus, QB = QBplus, EE = EEplus, GE = GE, Removals = RemPlus) M0plus <- c(living[, M0], as.vector(detoutputs / detinputs)) gearF <- as.matrix(totcatchmat) / living[, Biomass][row(as.matrix(totcatchmat))] #newcons <- as.matrix(nodetrdiet) * living[, BQB][col(as.matrix(nodetrdiet))] newcons <- as.matrix(nodetrdiet) * BQB[col(as.matrix(nodetrdiet))] predM <- as.matrix(newcons) / living[, Biomass][row(as.matrix(newcons))] predM <- rbind(predM, detcons) morts <- list(Group = model[Type < 3, Group], PB = model[Type < 3, PB], M0 = M0plus, F = gearF[1:(nliving + ndead), ], M2 = predM) # convert from levels to characters gnames <- as.character(balanced$Group) # cleanup before sending to sim -- C code wants 0 as missing value, not NA balanced$Biomass[is.na(balanced$Biomass)] <- 0 balanced$PB[is.na(balanced$PB)] <- 0 balanced$QB[is.na(balanced$QB)] <- 0 balanced$EE[is.na(balanced$EE)] <- 0 balanced$GE[is.na(balanced$GE)] <- 0 model$BioAcc[is.na(model$BioAcc)] <- 0 model$Unassim[is.na(model$Unassim)] <- 0 dietm <- as.matrix(diet) dimnames(dietm) <- list(c(gnames[1:(nliving+ndead)],"Import"), gnames[1:nliving]) dietm[is.na(dietm)] <- 0 landmatm <- as.matrix(landmat) dimnames(landmatm) <- list(gnames, gnames[(ngroups-ngear+1):ngroups]) landmatm[is.na(landmatm)] <- 0 discardmatm <- as.matrix(discardmat) dimnames(discardmatm) <- list(gnames, gnames[(ngroups-ngear+1):ngroups]) discardmatm[is.na(discardmatm)] <- 0 detfatem <- as.matrix(detfate) dimnames(detfatem) <- list(gnames, gnames[(nliving+1):(nliving+ndead)]) detfatem[is.na(detfatem)] <- 0 # KYA April 2020 - added names for output list out.Group <- gnames; names(out.Group) <- gnames out.type <- model[, Type]; names(out.type) <- gnames out.TL <- TL; names(out.TL) <- gnames out.Biomass <- balanced$Biomass; names(out.Biomass) <- gnames out.PB <- balanced$PB; names(out.PB) <- gnames out.QB <- balanced$QB; names(out.QB) <- gnames out.EE <- balanced$EE; names(out.EE) <- gnames out.BA <- model[, BioAcc]; names(out.BA) <- gnames out.Unassim <- model[, Unassim]; names(out.Unassim) <- gnames out.GE <- balanced$GE; names(out.GE) <- gnames # list structure for sim inputs path.model <- list(NUM_GROUPS = ngroups, NUM_LIVING = nliving, NUM_DEAD = ndead, NUM_GEARS = ngear, Group = out.Group, type = out.type, TL = out.TL, Biomass = out.Biomass, PB = out.PB, QB = out.QB, EE = out.EE, BA = out.BA, Unassim = out.Unassim, GE = out.GE, DC = dietm, DetFate = detfatem, Landings = landmatm, Discards = discardmatm) #Define class of output class(path.model) <- 'Rpath' attr(path.model, 'eco.name') <- eco.name attr(path.model, 'eco.area') <- eco.area return(path.model) } #'Calculate biomass and consumption for multistanza groups #' #'Uses the leading stanza to calculate the biomass and consumption of other stanzas #'necessary to support the leading stanza. #' #'@family Rpath functions #' #'@param Rpath.params Object containing the Rpath parameters generated either by #' create.rpath.params or read.rpath.params #' #'@return Calculates and adds biomass and consumption for trailing stanza groups. #' Also adds weight at age and number at age for multi-staza groups. #' #'@import data.table #'@export rpath.stanzas <- function(Rpath.params){ #Need to define variables to eliminate check() note about no visible binding StGroupNum <- First <- StanzaNum <- VBGF_d <- VBGF_Ksp <- Last <- GroupNum <- NULL WageS <- age <- QageS <- Survive <- Z <- survive_L <- bs.num <- qs.num <- Leading <- NULL Group <- Biomass <- R <- NageS <- bs.denom <- bs <- qs.denom <- qs <- Cons <- NULL QB <- NULL #Determine the total number of groups with multistanzas Nsplit <- Rpath.params$stanza$NStanzaGroups groupfile <- Rpath.params$stanza$stgroups stanzafile <- Rpath.params$stanza$stindiv #Need to add vector of stanza number for(isp in 1:Nsplit){ stnum <- order(stanzafile[StGroupNum == isp, First]) stanzafile[StGroupNum == isp, StanzaNum := stnum] } #Calculate the last month for the final stanza #Months to get to 99% Winf (We don't use an accumulator function like EwE) groupfile[, last := floor(log(1 - 0.9999^(1 - VBGF_d)) / (-1 * (VBGF_Ksp * 3 / 12) * (1 - VBGF_d)))] for(isp in 1:Nsplit){ nstanzas <- groupfile[StGroupNum == isp, nstanzas] t99 <- groupfile[StGroupNum == isp, last] stanzafile[StGroupNum == isp & StanzaNum == nstanzas, Last := t99] #Grab ecopath group codes group.codes <- stanzafile[StGroupNum == isp, GroupNum] #Grab index for first and last months for stanzas first <- stanzafile[StGroupNum == isp, First] second <- stanzafile[StGroupNum == isp, Last] #Calculate weight and consumption at age StGroup <- data.table(age = stanzafile[StGroupNum == isp & StanzaNum == 1, First]: t99) #Calculate monthly generalized k: (Ksp * 3) / 12 k <- (groupfile[StGroupNum == isp, VBGF_Ksp] * 3) / 12 d <- groupfile[StGroupNum == isp, VBGF_d] StGroup[, WageS := (1 - exp(-k * (1 - d) * (age))) ^ (1 / (1 - d))] StGroup[, QageS := WageS ^ d] #Calculate the relative number of animals at age a #Vector of survival rates from 1 stanza to the next StGroup[age == 0, Survive := 1] prev.surv <- 1 for(ist in 1:nstanzas){ #Convert Z to a monthly Z month.z <- (stanzafile[StGroupNum == isp & StanzaNum == ist, Z] + groupfile[StGroupNum == isp, BAB]) / 12 StGroup[age %in% first[ist]:second[ist], survive_L := exp(-1*month.z)] if(first[ist] > 0){ StGroup[age == first[ist], Survive := StGroup[age == first[ist] - 1, Survive] * prev.surv] } for(a in (first[ist] + 1):second[ist]){ StGroup[age == a, Survive := StGroup[age == a - 1, Survive] * survive_L] } StGroup[, B := Survive * WageS] StGroup[, Q := Survive * QageS] #Numerator for the relative biomass/consumption calculations b.num <- StGroup[age %in% first[ist]:second[ist], sum(B)] q.num <- StGroup[age %in% first[ist]:second[ist], sum(Q)] stanzafile[StGroupNum == isp & StanzaNum == ist, bs.num := b.num] stanzafile[StGroupNum == isp & StanzaNum == ist, qs.num := q.num] prev.surv <- exp(-1 * month.z) } #Scale numbers up to total recruits BaseStanza <- stanzafile[StGroupNum == isp & Leading == T, ] BioPerEgg <- StGroup[age %in% BaseStanza[, First]:BaseStanza[, Last], sum(B)] recruits <- Rpath.params$model[Group == BaseStanza[, Group], Biomass] / BioPerEgg #Save recruits groupfile[StGroupNum == isp, R := recruits] #Numbers at age S StGroup[, NageS := Survive * recruits] #Calculate relative biomass stanzafile[StGroupNum == isp, bs.denom := sum(bs.num)] stanzafile[StGroupNum == isp, bs := bs.num / bs.denom] #Calculate relative consumption stanzafile[StGroupNum == isp, qs.denom := sum(qs.num)] stanzafile[StGroupNum == isp, qs := qs.num / qs.denom] #Use leading group to calculate other biomasses stanzafile[StGroupNum == isp & Leading == T, Biomass := Rpath.params$model[Group == BaseStanza[, Group], Biomass]] B <- stanzafile[StGroupNum == isp & Leading == T, Biomass / bs] stanzafile[StGroupNum == isp, Biomass := bs * B] #Use leading group to calculate other consumption stanzafile[StGroupNum == isp & Leading == T, Cons := Rpath.params$model[Group == BaseStanza[, Group], QB] * Rpath.params$model[Group == BaseStanza[, Group], Biomass]] Q <- stanzafile[StGroupNum == isp & Leading == T, Cons / qs] stanzafile[StGroupNum == isp, Cons := qs * Q] stanzafile[, QB := Cons / Biomass] Rpath.params$stanzas$StGroup[[isp]] <- StGroup } #Drop extra columns stanzafile[, c('bs.num', 'bs.denom', 'bs', 'qs.num', 'qs.denom', 'qs') := NULL] #Push biomass to modfile for(i in 1:nrow(stanzafile)){ Rpath.params$model[Group == stanzafile[i, Group], Biomass := stanzafile[i, Biomass]] } #Push consumption to modfile for(i in 1:nrow(stanzafile)){ Rpath.params$model[Group == stanzafile[i, Group], QB := stanzafile[i, QB]] } return(Rpath.params) }
#' @title Load Vocabulary Tables From a Preexisting Schema. #' #' @description This function populates all Vocabulary tables with data from Vocabulary tables in a specified schema. #' #' @usage LoadVocabFromSchema(connectionDetails, fromCdmSchema, toCdmSchema) #' #' @details This function assumes \cr\code{createCDMTables()} has already been run and \cr\code{fromCdmSchema} has all required Vocabulary tables. #' #' @param connectionDetails An R object of type\cr\code{connectionDetails} created using the #' function \code{createConnectionDetails} in the #' \code{DatabaseConnector} package. #' @param vocabSourceSchema The name of the database schema that already contains the Vocabulary #' tables to copy. Requires read permissions to this database. On SQL #' Server, this should specifiy both the database and the schema, #' so for example 'cdm_instance.dbo'. #' @param vocabTargetSchema The name of the database schema into which to copy the Vocabulary #' tables. Requires read and write permissions to this database. On SQL #' Server, this should specifiy both the database and the schema, #' so for example 'cdm_instance.dbo'. #' #'@export LoadVocabFromSchema <- function (connectionDetails, vocabSourceSchema, vocabTargetSchema) { #vocabTableList <- c("concept","vocabulary","concept_ancestor","concept_relationship","relationship","concept_synonym","domain","concept_class") vocabTableList <- c("concept", "concept_ancestor", "concept_class", "concept_relationship", "concept_synonym", "domain", "drug_strength", "relationship", "source_to_concept_map", "vocabulary") conn <- DatabaseConnector::connect(connectionDetails) for (tableName in vocabTableList) { sql <- paste0("create table ",vocabTargetSchema,".",tableName," with (distribution=replicate, clustered columnstore index) as select * from ",vocabSourceSchema,".",tableName) writeLines(paste0("Copying: ",tableName)) DatabaseConnector::executeSql(conn, sql, profile = FALSE, progressBar = TRUE, reportOverallTime = TRUE) } on.exit(DatabaseConnector::disconnect(conn)) }
/R/LoadVocabFromSchema.r
permissive
psbrandt/ETL-Synthea
R
false
false
2,357
r
#' @title Load Vocabulary Tables From a Preexisting Schema. #' #' @description This function populates all Vocabulary tables with data from Vocabulary tables in a specified schema. #' #' @usage LoadVocabFromSchema(connectionDetails, fromCdmSchema, toCdmSchema) #' #' @details This function assumes \cr\code{createCDMTables()} has already been run and \cr\code{fromCdmSchema} has all required Vocabulary tables. #' #' @param connectionDetails An R object of type\cr\code{connectionDetails} created using the #' function \code{createConnectionDetails} in the #' \code{DatabaseConnector} package. #' @param vocabSourceSchema The name of the database schema that already contains the Vocabulary #' tables to copy. Requires read permissions to this database. On SQL #' Server, this should specifiy both the database and the schema, #' so for example 'cdm_instance.dbo'. #' @param vocabTargetSchema The name of the database schema into which to copy the Vocabulary #' tables. Requires read and write permissions to this database. On SQL #' Server, this should specifiy both the database and the schema, #' so for example 'cdm_instance.dbo'. #' #'@export LoadVocabFromSchema <- function (connectionDetails, vocabSourceSchema, vocabTargetSchema) { #vocabTableList <- c("concept","vocabulary","concept_ancestor","concept_relationship","relationship","concept_synonym","domain","concept_class") vocabTableList <- c("concept", "concept_ancestor", "concept_class", "concept_relationship", "concept_synonym", "domain", "drug_strength", "relationship", "source_to_concept_map", "vocabulary") conn <- DatabaseConnector::connect(connectionDetails) for (tableName in vocabTableList) { sql <- paste0("create table ",vocabTargetSchema,".",tableName," with (distribution=replicate, clustered columnstore index) as select * from ",vocabSourceSchema,".",tableName) writeLines(paste0("Copying: ",tableName)) DatabaseConnector::executeSql(conn, sql, profile = FALSE, progressBar = TRUE, reportOverallTime = TRUE) } on.exit(DatabaseConnector::disconnect(conn)) }
# Definitions of internal constants. .pearsonFallbacks = c("none", "individual", "all"); .threadAllowVar = "ALLOW_WGCNA_THREADS" .zeroMADWarnings = c("Some results will be NA.", "Pearson correlation was used for individual columns with zero (or missing) MAD.", "Pearson correlation was used for entire variable."); ..minNGenes = 4; ..minNSamples = 4; .largestBlockSize = 1e8; .networkTypes = c("unsigned", "signed", "signed hybrid"); .adjacencyTypes = c(.networkTypes, "distance"); .TOMTypes = c("none", "unsigned", "signed", "signed Nowick", "unsigned 2", "signed 2", "signed Nowick 2"); .TOMDenoms = c("min", "mean"); .corTypes = c("pearson", "bicor"); .corFnc = c("cor", "bicor", "cor"); .corOptions = c("use = 'p'", "use = 'p'", "use = 'p', method = 'spearman'"); .corOptionList = list( list(use = 'p'), list(use = 'p'), list(use = 'p', method = "spearman"));
/R/internalConstants.R
no_license
cran/WGCNA
R
false
false
933
r
# Definitions of internal constants. .pearsonFallbacks = c("none", "individual", "all"); .threadAllowVar = "ALLOW_WGCNA_THREADS" .zeroMADWarnings = c("Some results will be NA.", "Pearson correlation was used for individual columns with zero (or missing) MAD.", "Pearson correlation was used for entire variable."); ..minNGenes = 4; ..minNSamples = 4; .largestBlockSize = 1e8; .networkTypes = c("unsigned", "signed", "signed hybrid"); .adjacencyTypes = c(.networkTypes, "distance"); .TOMTypes = c("none", "unsigned", "signed", "signed Nowick", "unsigned 2", "signed 2", "signed Nowick 2"); .TOMDenoms = c("min", "mean"); .corTypes = c("pearson", "bicor"); .corFnc = c("cor", "bicor", "cor"); .corOptions = c("use = 'p'", "use = 'p'", "use = 'p', method = 'spearman'"); .corOptionList = list( list(use = 'p'), list(use = 'p'), list(use = 'p', method = "spearman"));
## To cache a matrix with its inverse matrix ## Create a list that we can to set and get the matrix x, and set and get its inverse matrix makeCacheMatrix <- function(x = matrix()) { inv<- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x set_inverse <- function(inverse) inv <<- inverse get_inverse <- function() inv list(set = set, get = get, set_inverse = set_inverse, get_inverse = get_inverse) } ## This function accesss the inverse mattrix if it was defined before or set and cache it. Return the inverse matrix cacheSolve <- function(x, ...) { m <- x$get_inverse() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() inv <- solve(data, ...) x$set_inverse(inv) inv }
/cachematrix.R
no_license
schaeferrodrigo/ProgrammingAssignment2
R
false
false
810
r
## To cache a matrix with its inverse matrix ## Create a list that we can to set and get the matrix x, and set and get its inverse matrix makeCacheMatrix <- function(x = matrix()) { inv<- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x set_inverse <- function(inverse) inv <<- inverse get_inverse <- function() inv list(set = set, get = get, set_inverse = set_inverse, get_inverse = get_inverse) } ## This function accesss the inverse mattrix if it was defined before or set and cache it. Return the inverse matrix cacheSolve <- function(x, ...) { m <- x$get_inverse() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() inv <- solve(data, ...) x$set_inverse(inv) inv }
#Download and unzip the data fileUrl<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(fileUrl,destfile="./data/assignmentdata.zip") unzip("./data/assignmentdata.zip") feature_label<-read.csv2("./data/UCI HAR Dataset/features.txt",header=FALSE) feature_label2<-as.vector(t(feature_label)) #create a row vector with column names test<-read.csv("./data/UCI HAR Dataset/test/X_test.txt",sep="") colnames(test)<-feature_label2 test_activity_label<-read.csv("./data/UCI HAR Dataset/test/y_test.txt",sep="",col.names="activity") test_sub_label<-read.csv("./data/UCI HAR Dataset/test/subject_test.txt",sep="",col.names="subject") test2<-cbind(test_sub_label,test_activity_label,test) #combime the activity, subject, and activity data for test train<-read.csv("./data/UCI HAR Dataset/train/X_train.txt",sep="") colnames(train)<-feature_label2 train_activity_label<-read.csv("./data/UCI HAR Dataset/train/y_train.txt",sep="",col.names="activity") train_sub_label<-read.csv("./data/UCI HAR Dataset/train/subject_train.txt",sep="",col.names="subject") train2<-cbind(train_sub_label,train_activity_label,train) #combime the activity, subject, and activity data for train #Merges the training and the test sets to create one data set mergedData<-rbind(train2,test2) #Extracts only the measurements on the mean and standard deviation for each measurement. mean_measurement<-mergedData[,grepl("mean",colnames(mergedData))] std_measurement<-mergedData[,grepl("std",colnames(mergedData))] mergedData1<-cbind(mergedData$subject,mergedData$activity,mean_measurement,std_measurement) colnames(mergedData1)[1:2]<-c("subject","activity") #Uses descriptive activity names to name the activities in the data set activity_list<-read.csv("./data/UCI HAR Dataset/activity_labels.txt",sep="",header=FALSE) colnames(activity_list)<-c("activity","activity_label") mergedData2<-merge(activity_list, mergedData1,by.x="activity",by.y="activity") #Appropriately labels the data set with descriptive variable names. #t=time, f=frequency, Acc=Accelerometer,Gyro=Gyroscope, Mag=Magnitude colnames(mergedData2)<-gsub("tBody","timeBody",colnames(mergedData2)) colnames(mergedData2)<-gsub("tGravity","timeGravity",colnames(mergedData2)) colnames(mergedData2)<-gsub("fBody","frequencyBody",colnames(mergedData2)) colnames(mergedData2)<-gsub("fGravity","frequencyGravity",colnames(mergedData2)) colnames(mergedData2)<-gsub("Acc","Accelerometer",colnames(mergedData2)) colnames(mergedData2)<-gsub("Gyro","Gyroscope",colnames(mergedData2)) colnames(mergedData2)<-gsub("Mag","Magnitude",colnames(mergedData2)) #From the data set in step 4, creates a second, independent tidy data set with the average of #each variable for each activity and each subject. library(data.table) mergedData2$activity_subject<-paste(mergedData2$activity,mergedData2$subject,sep="_") mergedData3<-data.table(mergedData2) tidydata<-mergedData3[, lapply(.SD, mean), by = 'activity_subject'] tidydata2<-tidydata[,c(2,4:83),with=FALSE] tidydata3<-merge(activity_list, tidydata2,by.x="activity",by.y="activity") write.table(tidydata3, file = "tidydata.txt", row.names = FALSE)
/run_analysis.R
no_license
holikholik/datasciencecoursera
R
false
false
3,270
r
#Download and unzip the data fileUrl<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(fileUrl,destfile="./data/assignmentdata.zip") unzip("./data/assignmentdata.zip") feature_label<-read.csv2("./data/UCI HAR Dataset/features.txt",header=FALSE) feature_label2<-as.vector(t(feature_label)) #create a row vector with column names test<-read.csv("./data/UCI HAR Dataset/test/X_test.txt",sep="") colnames(test)<-feature_label2 test_activity_label<-read.csv("./data/UCI HAR Dataset/test/y_test.txt",sep="",col.names="activity") test_sub_label<-read.csv("./data/UCI HAR Dataset/test/subject_test.txt",sep="",col.names="subject") test2<-cbind(test_sub_label,test_activity_label,test) #combime the activity, subject, and activity data for test train<-read.csv("./data/UCI HAR Dataset/train/X_train.txt",sep="") colnames(train)<-feature_label2 train_activity_label<-read.csv("./data/UCI HAR Dataset/train/y_train.txt",sep="",col.names="activity") train_sub_label<-read.csv("./data/UCI HAR Dataset/train/subject_train.txt",sep="",col.names="subject") train2<-cbind(train_sub_label,train_activity_label,train) #combime the activity, subject, and activity data for train #Merges the training and the test sets to create one data set mergedData<-rbind(train2,test2) #Extracts only the measurements on the mean and standard deviation for each measurement. mean_measurement<-mergedData[,grepl("mean",colnames(mergedData))] std_measurement<-mergedData[,grepl("std",colnames(mergedData))] mergedData1<-cbind(mergedData$subject,mergedData$activity,mean_measurement,std_measurement) colnames(mergedData1)[1:2]<-c("subject","activity") #Uses descriptive activity names to name the activities in the data set activity_list<-read.csv("./data/UCI HAR Dataset/activity_labels.txt",sep="",header=FALSE) colnames(activity_list)<-c("activity","activity_label") mergedData2<-merge(activity_list, mergedData1,by.x="activity",by.y="activity") #Appropriately labels the data set with descriptive variable names. #t=time, f=frequency, Acc=Accelerometer,Gyro=Gyroscope, Mag=Magnitude colnames(mergedData2)<-gsub("tBody","timeBody",colnames(mergedData2)) colnames(mergedData2)<-gsub("tGravity","timeGravity",colnames(mergedData2)) colnames(mergedData2)<-gsub("fBody","frequencyBody",colnames(mergedData2)) colnames(mergedData2)<-gsub("fGravity","frequencyGravity",colnames(mergedData2)) colnames(mergedData2)<-gsub("Acc","Accelerometer",colnames(mergedData2)) colnames(mergedData2)<-gsub("Gyro","Gyroscope",colnames(mergedData2)) colnames(mergedData2)<-gsub("Mag","Magnitude",colnames(mergedData2)) #From the data set in step 4, creates a second, independent tidy data set with the average of #each variable for each activity and each subject. library(data.table) mergedData2$activity_subject<-paste(mergedData2$activity,mergedData2$subject,sep="_") mergedData3<-data.table(mergedData2) tidydata<-mergedData3[, lapply(.SD, mean), by = 'activity_subject'] tidydata2<-tidydata[,c(2,4:83),with=FALSE] tidydata3<-merge(activity_list, tidydata2,by.x="activity",by.y="activity") write.table(tidydata3, file = "tidydata.txt", row.names = FALSE)
## Librerias # rm(list=ls()) library(tidyverse) library(lubridate) library(tsModel) library(MASS) library(gridExtra) library(grid) library(ggplot2) library(mgcv) library(splines) library(dlnm) library(mvmeta) # library(plot3D) library(tsibble) # datos load("Dropbox/UPCXIVSC_RST/4 Jueves/Datos/datosSC.RDATA") dCV<-dCV_94_06 lapply(dCV, function(x){summary(x)}) # Parámetros básicos namesc<-names(dCV) len<-length(namesc) dfseas<-7 formula <-tm~cb+dow+ns(fecha,df=dfseas*length(unique(yy))) # Parametros para cada dlnm de cada cidade varper<-c(10,50,90) # lugar de cada knots varfun<-"bs" # tipo de spline para variavel nlag<-21 # nº de lags xlag<-0:nlag # tamanho de lags lagnk <- 3 # nº de knots nos splines de lags klag<-logknots(nlag,lagnk) # colocação de knots nos lags lagfun<-"ns" # spline para os lags arlagm=list(fun=lagfun, knots=klag,int=T) # ############################################################################################################### # Ejercicio 1: ANÁLISIS COMBINADO PARA OVERALL ESCALA ABSOLUTA ################################################################################################################ # Analizamos en las tres ciudades, guardando coeficientes y sus matrices de var-cov (map o bucle) red <- vector("list",len) coef<- matrix(NA,len,length(varper)+3,dimnames=list(names(dCV))) vcov<- vector("list",len); names(vcov) <- names(dCV) # Bucle para el análisis for(i in 1:len) { #extraemos los datos datai <- dCV[[i]] #especificamos la base knotsper<-quantile(datai$tmean, probs=varper/100, na.rm=T) argvarm <- list(fun=varfun,knots=knotsper,int=F) #obtenemos la base cb <- crossbasis(datai$tmean, lag=nlag, argvar=argvarm, arglag=arlagm) #ajustamos el modelo model <- glm(formula,datai,family=quasipoisson,na.action="na.exclude") #determinamos la malla de predicción tpred<-quantile(datai$tmean, probs=(1:99)/100, na.rm=T) #predicción reducida: overall red[[i]] <- crossreduce(cb,model,at=tpred) # quiereis lo efecto overall coef[i,] <- red[[i]]$coef vcov[[i]] <- red[[i]]$vcov cat("\t",i) } # 2) meta-análisis # con efectos aleatórios mv<- mvmeta(coef~1,vcov,method="reml",control=list(showiter=T)) summary(mv) # 3) la Predicción: #La base debería ser equivalente a la usada en cada una de las tres ciudades. Punto problemático los knots de la nueva base # Una opción: knotsper<- rowMeans(sapply(dCV, function(x){quantile(x$tmean, probs=varper/100, na.rm=T)})) # Otra algo mejor: tmeanCV <-c(dCV[[1]]$tmean,dCV[[2]]$tmean,dCV[[3]]$tmean ) knotsper<-quantile(tmeanCV, probs=varper/100, na.rm=T) # para la predicción podemos, por ejemplo, #predecir por grado tpred <-round(min(tmeanCV, na.rm=T)): round(max(tmeanCV, na.rm=T)) #predecir en percentiles tpred <-quantile(tmeanCV, probs=1:99/100, na.rm=T) argvarm<-list(fun=varfun, knots=knotsper) arglagm<-list(fun=lagfun, knots=klag) cb <- crossbasis(tpred, lag=nlag, argvar=argvarm, arglag=arglagm) bvar <- do.call("onebasis",c(list(x=tpred),attr(cb,"argvar"))) #solo necesitamos bvar # 3.1. Predicción overall combinada sin centrar Metapred<-crosspred(basis=bvar,coef=coef(mv),vcov=vcov(mv),at=tpred,model.link="log") plot(Metapred) # 3.2 Predicción overall combinada centrada en mmt (metaMMT<-Metapred$predvar[which.min(Metapred$allfit)]) #calculamos mmt Metapred<-crosspred(basis=bvar,coef=coef(mv),vcov=vcov(mv),cen=metaMMT,at=tpred,model.link="log") #centramos plot(Metapred) # 3.3. Predicción ciudad específica a partir del meta-análisis (fixef+ranef) blups <- blup(mv,vcov=TRUE) # 3.4 Predicción ciudad específica a partir del meta-análisis (sólo fixef) metacity<-predict(mv, vcov=T, interval="confidence") # Con base única, 3.1 y 3.4 coinciden. ########################################################################################### # Algunos "extra". # 4) Resultados # 4.1) numéricos res_CV<-data.frame(tmean= Metapred$predvar, RR=Metapred$allRRfit,LowRR=Metapred$allRRlow,HighRR=Metapred$allRRhigh) # 4.2) gráficos # 4.2.1) curva dosis respuesta xlab<-seq(7,28, by=5) ylab<-pretty(c( res_CV$LowRR, res_CV$HighRR)) res_CV %>% ggplot(aes(tmean, RR)) + geom_hline(yintercept = 1, size = 0.5) + geom_ribbon(aes(ymin = LowRR,ymax = HighRR),fill="grey80",alpha=0.7) + geom_line(colour="#cb181d",size=1) + geom_point(aes(metaMMT,1),shape = 21, fill = "white", size = 2, colour="#cb181d", show.legend = FALSE) + scale_x_continuous(breaks = xlab) + scale_y_continuous(breaks = ylab, limits=c(0.5,2.5)) + theme_bw() + theme(panel.grid.minor = element_blank()) + labs(x = "temperatura media diaria (ºC)", y = "RR",title="Temperatura y Mortalidad total",subtitle="CV, 94-06") # 4.2.2) Comparaciónn Blups, original y metas RR_CV<-data.frame(RR=Metapred$allRRfit,LowRR=Metapred$allRRlow,HighRR=Metapred$allRRhigh, ciudad="All") for(i in 1:3){ datai<-dCV[[i]] tpred<-quantile(datai$tmean,probs=(1:99)/100, na.rm=T) argvarm<-list(fun=varfun, knots=knotsper) arglagm<-list(fun=lagfun, knots=klag) cb <- crossbasis(tpred, lag=nlag, argvar=argvarm, arglag=arglagm) bvar <- do.call("onebasis",c(list(x=tpred),attr(cb,"argvar"))) #solo necesitamos bvar predB<-crosspred(basis=bvar,coef=blups[[i]]$blup,vcov=blups[[i]]$vcov,at=tpred,model.link="log") predO<-crosspred(basis=bvar,coef=coef[i,],vcov=vcov[[i]],at=tpred,model.link="log") predM<-crosspred(basis=bvar,coef=metacity[[i]]$fit,vcov=metacity[[i]]$vcov,at=tpred,model.link="log") (metaMMTB<-predB$predvar[which.min(predB$allfit)]) (metaMMTO<-predO$predvar[which.min(predO$allfit)]) (metaMMTM<-predM$predvar[which.min(predM$allfit)]) predB<-crosspred(basis=bvar,coef=blups[[i]]$blup,vcov=blups[[i]]$vcov,cen=metaMMTB,at=tpred,model.link="log") predO<-crosspred(basis=bvar,coef=coef[i,],vcov=vcov[[i]],cen=metaMMTO, at=tpred,model.link="log") predM<-crosspred(basis=bvar,coef=metacity[[i]]$fit,vcov=metacity[[i]]$vcov,cen=metaMMTB,at=tpred,model.link="log") RR_O_i<-data.frame(tmean=predO$predvar, RR=predO$allRRfit,LowRR=predO$allRRlow,HighRR=predO$allRRhigh, ciudad=names(dCV)[i],tipo="original") RR_B_i<-data.frame(tmean=predB$predvar, RR=predB$allRRfit,LowRR=predB$allRRlow,HighRR=predB$allRRhigh, ciudad=names(dCV)[i],tipo="blups") RR_M_i<-data.frame(tmean=predM$predvar, RR=predM$allRRfit,LowRR=predM$allRRlow,HighRR=predM$allRRhigh, ciudad=names(dCV)[i],tipo="meta") RRi<-bind_rows(RR_O_i,RR_B_i,RR_M_i)%>%mutate_at(5:6,"factor") # Riesgos relativos y CI por lag assign(paste("RR",i,sep=""),RRi) } RR<-bind_rows(RR1,RR2,RR3) plot_raw<-RR %>% ggplot(aes(tmean, RR)) + geom_line(aes(x = tmean, y = RR, colour = tipo),size=1, show.legend = T) + scale_x_continuous(breaks = xlab) + geom_hline(yintercept = 1, size = 0.5) + scale_y_continuous(breaks = ylab,limits=c(0.5,3.5)) + theme_bw() + theme(panel.grid.minor = element_blank()) + scale_color_brewer(palette = "Set1")+ labs(x = "Temperatura media (ºC)", y = "RR",title="Temperatura y Mortalidad total",subtitle="CV, 94-06")+ facet_wrap(vars(ciudad),ncol=3) plot_raw
/Jueves/Ejercicio1Demo-copy.R
no_license
rafalopespx/Curso_DLNM
R
false
false
8,100
r
## Librerias # rm(list=ls()) library(tidyverse) library(lubridate) library(tsModel) library(MASS) library(gridExtra) library(grid) library(ggplot2) library(mgcv) library(splines) library(dlnm) library(mvmeta) # library(plot3D) library(tsibble) # datos load("Dropbox/UPCXIVSC_RST/4 Jueves/Datos/datosSC.RDATA") dCV<-dCV_94_06 lapply(dCV, function(x){summary(x)}) # Parámetros básicos namesc<-names(dCV) len<-length(namesc) dfseas<-7 formula <-tm~cb+dow+ns(fecha,df=dfseas*length(unique(yy))) # Parametros para cada dlnm de cada cidade varper<-c(10,50,90) # lugar de cada knots varfun<-"bs" # tipo de spline para variavel nlag<-21 # nº de lags xlag<-0:nlag # tamanho de lags lagnk <- 3 # nº de knots nos splines de lags klag<-logknots(nlag,lagnk) # colocação de knots nos lags lagfun<-"ns" # spline para os lags arlagm=list(fun=lagfun, knots=klag,int=T) # ############################################################################################################### # Ejercicio 1: ANÁLISIS COMBINADO PARA OVERALL ESCALA ABSOLUTA ################################################################################################################ # Analizamos en las tres ciudades, guardando coeficientes y sus matrices de var-cov (map o bucle) red <- vector("list",len) coef<- matrix(NA,len,length(varper)+3,dimnames=list(names(dCV))) vcov<- vector("list",len); names(vcov) <- names(dCV) # Bucle para el análisis for(i in 1:len) { #extraemos los datos datai <- dCV[[i]] #especificamos la base knotsper<-quantile(datai$tmean, probs=varper/100, na.rm=T) argvarm <- list(fun=varfun,knots=knotsper,int=F) #obtenemos la base cb <- crossbasis(datai$tmean, lag=nlag, argvar=argvarm, arglag=arlagm) #ajustamos el modelo model <- glm(formula,datai,family=quasipoisson,na.action="na.exclude") #determinamos la malla de predicción tpred<-quantile(datai$tmean, probs=(1:99)/100, na.rm=T) #predicción reducida: overall red[[i]] <- crossreduce(cb,model,at=tpred) # quiereis lo efecto overall coef[i,] <- red[[i]]$coef vcov[[i]] <- red[[i]]$vcov cat("\t",i) } # 2) meta-análisis # con efectos aleatórios mv<- mvmeta(coef~1,vcov,method="reml",control=list(showiter=T)) summary(mv) # 3) la Predicción: #La base debería ser equivalente a la usada en cada una de las tres ciudades. Punto problemático los knots de la nueva base # Una opción: knotsper<- rowMeans(sapply(dCV, function(x){quantile(x$tmean, probs=varper/100, na.rm=T)})) # Otra algo mejor: tmeanCV <-c(dCV[[1]]$tmean,dCV[[2]]$tmean,dCV[[3]]$tmean ) knotsper<-quantile(tmeanCV, probs=varper/100, na.rm=T) # para la predicción podemos, por ejemplo, #predecir por grado tpred <-round(min(tmeanCV, na.rm=T)): round(max(tmeanCV, na.rm=T)) #predecir en percentiles tpred <-quantile(tmeanCV, probs=1:99/100, na.rm=T) argvarm<-list(fun=varfun, knots=knotsper) arglagm<-list(fun=lagfun, knots=klag) cb <- crossbasis(tpred, lag=nlag, argvar=argvarm, arglag=arglagm) bvar <- do.call("onebasis",c(list(x=tpred),attr(cb,"argvar"))) #solo necesitamos bvar # 3.1. Predicción overall combinada sin centrar Metapred<-crosspred(basis=bvar,coef=coef(mv),vcov=vcov(mv),at=tpred,model.link="log") plot(Metapred) # 3.2 Predicción overall combinada centrada en mmt (metaMMT<-Metapred$predvar[which.min(Metapred$allfit)]) #calculamos mmt Metapred<-crosspred(basis=bvar,coef=coef(mv),vcov=vcov(mv),cen=metaMMT,at=tpred,model.link="log") #centramos plot(Metapred) # 3.3. Predicción ciudad específica a partir del meta-análisis (fixef+ranef) blups <- blup(mv,vcov=TRUE) # 3.4 Predicción ciudad específica a partir del meta-análisis (sólo fixef) metacity<-predict(mv, vcov=T, interval="confidence") # Con base única, 3.1 y 3.4 coinciden. ########################################################################################### # Algunos "extra". # 4) Resultados # 4.1) numéricos res_CV<-data.frame(tmean= Metapred$predvar, RR=Metapred$allRRfit,LowRR=Metapred$allRRlow,HighRR=Metapred$allRRhigh) # 4.2) gráficos # 4.2.1) curva dosis respuesta xlab<-seq(7,28, by=5) ylab<-pretty(c( res_CV$LowRR, res_CV$HighRR)) res_CV %>% ggplot(aes(tmean, RR)) + geom_hline(yintercept = 1, size = 0.5) + geom_ribbon(aes(ymin = LowRR,ymax = HighRR),fill="grey80",alpha=0.7) + geom_line(colour="#cb181d",size=1) + geom_point(aes(metaMMT,1),shape = 21, fill = "white", size = 2, colour="#cb181d", show.legend = FALSE) + scale_x_continuous(breaks = xlab) + scale_y_continuous(breaks = ylab, limits=c(0.5,2.5)) + theme_bw() + theme(panel.grid.minor = element_blank()) + labs(x = "temperatura media diaria (ºC)", y = "RR",title="Temperatura y Mortalidad total",subtitle="CV, 94-06") # 4.2.2) Comparaciónn Blups, original y metas RR_CV<-data.frame(RR=Metapred$allRRfit,LowRR=Metapred$allRRlow,HighRR=Metapred$allRRhigh, ciudad="All") for(i in 1:3){ datai<-dCV[[i]] tpred<-quantile(datai$tmean,probs=(1:99)/100, na.rm=T) argvarm<-list(fun=varfun, knots=knotsper) arglagm<-list(fun=lagfun, knots=klag) cb <- crossbasis(tpred, lag=nlag, argvar=argvarm, arglag=arglagm) bvar <- do.call("onebasis",c(list(x=tpred),attr(cb,"argvar"))) #solo necesitamos bvar predB<-crosspred(basis=bvar,coef=blups[[i]]$blup,vcov=blups[[i]]$vcov,at=tpred,model.link="log") predO<-crosspred(basis=bvar,coef=coef[i,],vcov=vcov[[i]],at=tpred,model.link="log") predM<-crosspred(basis=bvar,coef=metacity[[i]]$fit,vcov=metacity[[i]]$vcov,at=tpred,model.link="log") (metaMMTB<-predB$predvar[which.min(predB$allfit)]) (metaMMTO<-predO$predvar[which.min(predO$allfit)]) (metaMMTM<-predM$predvar[which.min(predM$allfit)]) predB<-crosspred(basis=bvar,coef=blups[[i]]$blup,vcov=blups[[i]]$vcov,cen=metaMMTB,at=tpred,model.link="log") predO<-crosspred(basis=bvar,coef=coef[i,],vcov=vcov[[i]],cen=metaMMTO, at=tpred,model.link="log") predM<-crosspred(basis=bvar,coef=metacity[[i]]$fit,vcov=metacity[[i]]$vcov,cen=metaMMTB,at=tpred,model.link="log") RR_O_i<-data.frame(tmean=predO$predvar, RR=predO$allRRfit,LowRR=predO$allRRlow,HighRR=predO$allRRhigh, ciudad=names(dCV)[i],tipo="original") RR_B_i<-data.frame(tmean=predB$predvar, RR=predB$allRRfit,LowRR=predB$allRRlow,HighRR=predB$allRRhigh, ciudad=names(dCV)[i],tipo="blups") RR_M_i<-data.frame(tmean=predM$predvar, RR=predM$allRRfit,LowRR=predM$allRRlow,HighRR=predM$allRRhigh, ciudad=names(dCV)[i],tipo="meta") RRi<-bind_rows(RR_O_i,RR_B_i,RR_M_i)%>%mutate_at(5:6,"factor") # Riesgos relativos y CI por lag assign(paste("RR",i,sep=""),RRi) } RR<-bind_rows(RR1,RR2,RR3) plot_raw<-RR %>% ggplot(aes(tmean, RR)) + geom_line(aes(x = tmean, y = RR, colour = tipo),size=1, show.legend = T) + scale_x_continuous(breaks = xlab) + geom_hline(yintercept = 1, size = 0.5) + scale_y_continuous(breaks = ylab,limits=c(0.5,3.5)) + theme_bw() + theme(panel.grid.minor = element_blank()) + scale_color_brewer(palette = "Set1")+ labs(x = "Temperatura media (ºC)", y = "RR",title="Temperatura y Mortalidad total",subtitle="CV, 94-06")+ facet_wrap(vars(ciudad),ncol=3) plot_raw
\name{fm_temp2rad} \alias{fm_temp2rad} \title{ Estimate the radiance. } \description{ Given a brightness temperature in Kelvin, returns the radiance adjusted for the instrument response of the sensor being used. } \usage{ fm_temp2rad(bt, satname) } \arguments{ \item{bt}{Brightness temperature of an AVHRR channel} \item{satname}{Identification of the satellite being used, e.g. NOAA-18} } \value{ A value or vector, depending on input. } \seealso{ \link{fm_rad2temp} } \author{ Øystein Godøy (\email{o.godoy@met.no}) } \examples{ } \keyword{ }
/man/fm_temp2rad.Rd
no_license
steingod/R-mipolsat
R
false
false
597
rd
\name{fm_temp2rad} \alias{fm_temp2rad} \title{ Estimate the radiance. } \description{ Given a brightness temperature in Kelvin, returns the radiance adjusted for the instrument response of the sensor being used. } \usage{ fm_temp2rad(bt, satname) } \arguments{ \item{bt}{Brightness temperature of an AVHRR channel} \item{satname}{Identification of the satellite being used, e.g. NOAA-18} } \value{ A value or vector, depending on input. } \seealso{ \link{fm_rad2temp} } \author{ Øystein Godøy (\email{o.godoy@met.no}) } \examples{ } \keyword{ }
#' Plot venn diagram as a ggplot layer object. It supports only data frame as input. #' #' @name geom_venn #' @inheritParams ggplot2::stat_identity #' @param data A data.frame or a list as input data. #' @param columns A character vector use as index to select columns/elements. #' @param set_names Set names, use column names if omitted. #' @param show_percentage Show percentage for each set. #' @param label_sep separator character for displaying elements. #' @param fill_color Filling colors in circles. #' @param fill_alpha Transparency for filling circles. #' @param stroke_color Stroke color for drawing circles. #' @param stroke_alpha Transparency for drawing circles. #' @param stroke_size Stroke size for drawing circles. #' @param stroke_linetype Line type for drawing circles. #' @param set_name_color Text color for set names. #' @param set_name_size Text size for set names. #' @param text_color Text color for intersect contents. #' @param text_size Text size for intersect contents. #' @return The ggplot object to print or save to file. #' @examples #' library(ggvenn) #' #' # use data.frame as input #' d <- tibble(value = c(1, 2, 3, 5, 6, 7, 8, 9, 10, 12, 13), #' `Set 1` = c(TRUE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, FALSE), #' `Set 2` = c(TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE), #' `Set 3` = c(TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE), #' `Set 4` = c(FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, FALSE)) #' #' # ggplot gramma #' ggplot(d) + #' geom_venn(aes(A = `Set 1`, B = `Set 2`)) + #' coord_fixed() + #' theme_void() #' ggplot(d) + #' geom_venn(aes(A = `Set 1`, B = `Set 2`, C = `Set 3`)) + #' coord_fixed() + #' theme_void() #' ggplot(d) + #' geom_venn(aes(A = `Set 1`, B = `Set 2`, C = `Set 3`, D = `Set 4`)) + #' coord_fixed() + #' theme_void() #' #' # set fill color #' ggplot(d) + #' geom_venn(aes(A = `Set 1`, B = `Set 2`), fill_color = c("red", "blue")) + #' coord_fixed() + #' theme_void() #' #'#' # hide percentage #' ggplot(d) + #' geom_venn(aes(A = `Set 1`, B = `Set 2`), show_percentage = FALSE) + #' coord_fixed() + #' theme_void() #' #' # show elements instead of count/percentage #' ggplot(d) + #' geom_venn(aes(A = `Set 1`, B = `Set 2`, C = `Set 3`, D = `Set 4`, label = value)) + #' coord_fixed() + #' theme_void() #' @seealso ggvenn #' @export geom_venn <- function(mapping = NULL, data = NULL, stat = "identity", position = "identity", ..., set_names = NULL, show_percentage = TRUE, label_sep = ",", fill_color = c("blue", "yellow", "green", "red"), fill_alpha = .5, stroke_color = "black", stroke_alpha = 1, stroke_size = 1, stroke_linetype = "solid", set_name_color = "black", set_name_size = 6, text_color = "black", text_size = 4) { l <- layer(mapping = mapping, data = data, geom = GeomVenn, stat = stat, position = position, params = list(na.rm = TRUE, ...)) old_compute_aesthetics <- l$compute_aesthetics l$compute_aesthetics <- function(self, data, plot) { if (is.null(set_names)) { self$geom$set_names <- character() for (name in names(plot$mapping)) { self$geom$set_names[name] <- as_label(plot$mapping[[name]]) } for (name in names(self$mapping)) { self$geom$set_names[name] <- as_label(self$mapping[[name]]) } } else { self$geom$set_names <- set_names } self$geom$customize_attributes <- list(show_percentage = show_percentage, label_sep = label_sep, fill_color = fill_color, fill_alpha = fill_alpha, stroke_color = stroke_color, stroke_alpha = stroke_alpha, stroke_size = stroke_size, stroke_linetype = stroke_linetype, set_name_color = set_name_color, set_name_size = set_name_size, text_color = text_color, text_size = text_size) old_compute_aesthetics(data, plot) } l } GeomVenn <- ggproto("GeomVenn", Geom, required_aes = c("A", "B"), optional_aes = c("C", "D", "label"), extra_params = c("na.rm"), setup_data = function(self, data, params) { data %>% mutate(xmin = -2, xmax = 2, ymin = -2, ymax = 2) }, draw_panel = function(self, data, panel_params, coord, ...) { attr <- self$customize_attributes sets <- c("A", "B", "C", "D") sets <- sets[sets %in% names(data)] show_elements <- FALSE if ("label" %in% names(data)) { show_elements <- "label" } show_percentage <- attr$show_percentage label_sep <- attr$label_sep venn <- prepare_venn_data(data, sets, show_elements, show_percentage, label_sep) d0 <- coord_munch(coord, venn$shapes, panel_params) d <- d0 %>% filter(!duplicated(group)) %>% mutate(fill_color = attr$fill_color[group], fill_alpha = attr$fill_alpha, stroke_color = attr$stroke_color, stroke_alpha = attr$stroke_alpha, stroke_size = attr$stroke_size, stroke_linetype = attr$stroke_linetype) d1 <- coord_munch(coord, venn$labels, panel_params) d2 <- coord_munch(coord, venn$texts, panel_params) ggplot2:::ggname("geom_venn", grobTree( polygonGrob( d0$x, d0$y, default.units = "native", id = d0$group, gp = gpar(col = NA, fill = alpha(d$fill_color, d$fill_alpha))), polygonGrob( d0$x, d0$y, default.units = "native", id = d0$group, gp = gpar(col = alpha(d$stroke_color, d$stroke_alpha), fill = NA, lwd = d$stroke_size * .pt, lty = d$stroke_linetype)), textGrob( self$set_names, d1$x, d1$y, default.units = "native", hjust = d1$hjust, vjust = d1$vjust, gp = gpar(col = attr$set_name_color, fontsize = attr$set_name_size * .pt)), textGrob( d2$text, d2$x, d2$y, default.units = "native", hjust = d2$hjust, vjust = d2$vjust, gp = gpar(col = attr$text_color, fontsize = attr$text_size * .pt)) ) ) } )
/functions/geom_venn.R
permissive
mohang13/FAW_comparative_genetic_analysis
R
false
false
8,240
r
#' Plot venn diagram as a ggplot layer object. It supports only data frame as input. #' #' @name geom_venn #' @inheritParams ggplot2::stat_identity #' @param data A data.frame or a list as input data. #' @param columns A character vector use as index to select columns/elements. #' @param set_names Set names, use column names if omitted. #' @param show_percentage Show percentage for each set. #' @param label_sep separator character for displaying elements. #' @param fill_color Filling colors in circles. #' @param fill_alpha Transparency for filling circles. #' @param stroke_color Stroke color for drawing circles. #' @param stroke_alpha Transparency for drawing circles. #' @param stroke_size Stroke size for drawing circles. #' @param stroke_linetype Line type for drawing circles. #' @param set_name_color Text color for set names. #' @param set_name_size Text size for set names. #' @param text_color Text color for intersect contents. #' @param text_size Text size for intersect contents. #' @return The ggplot object to print or save to file. #' @examples #' library(ggvenn) #' #' # use data.frame as input #' d <- tibble(value = c(1, 2, 3, 5, 6, 7, 8, 9, 10, 12, 13), #' `Set 1` = c(TRUE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, FALSE), #' `Set 2` = c(TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE), #' `Set 3` = c(TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE), #' `Set 4` = c(FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, FALSE)) #' #' # ggplot gramma #' ggplot(d) + #' geom_venn(aes(A = `Set 1`, B = `Set 2`)) + #' coord_fixed() + #' theme_void() #' ggplot(d) + #' geom_venn(aes(A = `Set 1`, B = `Set 2`, C = `Set 3`)) + #' coord_fixed() + #' theme_void() #' ggplot(d) + #' geom_venn(aes(A = `Set 1`, B = `Set 2`, C = `Set 3`, D = `Set 4`)) + #' coord_fixed() + #' theme_void() #' #' # set fill color #' ggplot(d) + #' geom_venn(aes(A = `Set 1`, B = `Set 2`), fill_color = c("red", "blue")) + #' coord_fixed() + #' theme_void() #' #'#' # hide percentage #' ggplot(d) + #' geom_venn(aes(A = `Set 1`, B = `Set 2`), show_percentage = FALSE) + #' coord_fixed() + #' theme_void() #' #' # show elements instead of count/percentage #' ggplot(d) + #' geom_venn(aes(A = `Set 1`, B = `Set 2`, C = `Set 3`, D = `Set 4`, label = value)) + #' coord_fixed() + #' theme_void() #' @seealso ggvenn #' @export geom_venn <- function(mapping = NULL, data = NULL, stat = "identity", position = "identity", ..., set_names = NULL, show_percentage = TRUE, label_sep = ",", fill_color = c("blue", "yellow", "green", "red"), fill_alpha = .5, stroke_color = "black", stroke_alpha = 1, stroke_size = 1, stroke_linetype = "solid", set_name_color = "black", set_name_size = 6, text_color = "black", text_size = 4) { l <- layer(mapping = mapping, data = data, geom = GeomVenn, stat = stat, position = position, params = list(na.rm = TRUE, ...)) old_compute_aesthetics <- l$compute_aesthetics l$compute_aesthetics <- function(self, data, plot) { if (is.null(set_names)) { self$geom$set_names <- character() for (name in names(plot$mapping)) { self$geom$set_names[name] <- as_label(plot$mapping[[name]]) } for (name in names(self$mapping)) { self$geom$set_names[name] <- as_label(self$mapping[[name]]) } } else { self$geom$set_names <- set_names } self$geom$customize_attributes <- list(show_percentage = show_percentage, label_sep = label_sep, fill_color = fill_color, fill_alpha = fill_alpha, stroke_color = stroke_color, stroke_alpha = stroke_alpha, stroke_size = stroke_size, stroke_linetype = stroke_linetype, set_name_color = set_name_color, set_name_size = set_name_size, text_color = text_color, text_size = text_size) old_compute_aesthetics(data, plot) } l } GeomVenn <- ggproto("GeomVenn", Geom, required_aes = c("A", "B"), optional_aes = c("C", "D", "label"), extra_params = c("na.rm"), setup_data = function(self, data, params) { data %>% mutate(xmin = -2, xmax = 2, ymin = -2, ymax = 2) }, draw_panel = function(self, data, panel_params, coord, ...) { attr <- self$customize_attributes sets <- c("A", "B", "C", "D") sets <- sets[sets %in% names(data)] show_elements <- FALSE if ("label" %in% names(data)) { show_elements <- "label" } show_percentage <- attr$show_percentage label_sep <- attr$label_sep venn <- prepare_venn_data(data, sets, show_elements, show_percentage, label_sep) d0 <- coord_munch(coord, venn$shapes, panel_params) d <- d0 %>% filter(!duplicated(group)) %>% mutate(fill_color = attr$fill_color[group], fill_alpha = attr$fill_alpha, stroke_color = attr$stroke_color, stroke_alpha = attr$stroke_alpha, stroke_size = attr$stroke_size, stroke_linetype = attr$stroke_linetype) d1 <- coord_munch(coord, venn$labels, panel_params) d2 <- coord_munch(coord, venn$texts, panel_params) ggplot2:::ggname("geom_venn", grobTree( polygonGrob( d0$x, d0$y, default.units = "native", id = d0$group, gp = gpar(col = NA, fill = alpha(d$fill_color, d$fill_alpha))), polygonGrob( d0$x, d0$y, default.units = "native", id = d0$group, gp = gpar(col = alpha(d$stroke_color, d$stroke_alpha), fill = NA, lwd = d$stroke_size * .pt, lty = d$stroke_linetype)), textGrob( self$set_names, d1$x, d1$y, default.units = "native", hjust = d1$hjust, vjust = d1$vjust, gp = gpar(col = attr$set_name_color, fontsize = attr$set_name_size * .pt)), textGrob( d2$text, d2$x, d2$y, default.units = "native", hjust = d2$hjust, vjust = d2$vjust, gp = gpar(col = attr$text_color, fontsize = attr$text_size * .pt)) ) ) } )
#devtools::install_github("Thie1e/cutpointr") library(cutpointr) library(stringdist) library(proxy) library(pROC) library(MLmetrics) library(caret) library(tidyverse) library(magrittr) library(glmnet) library(rpart) library(AUC) metricas <- function(data, lev = NULL, model = NULL) { c( defaultSummary(data, lev, model), twoClassSummary(data, lev, model) ) }
/aulas/06-arvore-rf/titanic/.Rprofile
permissive
gassantos/201810-intro-ml
R
false
false
374
rprofile
#devtools::install_github("Thie1e/cutpointr") library(cutpointr) library(stringdist) library(proxy) library(pROC) library(MLmetrics) library(caret) library(tidyverse) library(magrittr) library(glmnet) library(rpart) library(AUC) metricas <- function(data, lev = NULL, model = NULL) { c( defaultSummary(data, lev, model), twoClassSummary(data, lev, model) ) }
load("BodyTemp50.rda") BodyTemp50 <- transform(BodyTemp50, Sex = ifelse(Gender==0, "Female", "Male") ) # BodyTemp50 <- subset(BodyTemp50, select = -Gender)
/data/BodyTemp50.R
no_license
rpruim/Lock5Data
R
false
false
156
r
load("BodyTemp50.rda") BodyTemp50 <- transform(BodyTemp50, Sex = ifelse(Gender==0, "Female", "Male") ) # BodyTemp50 <- subset(BodyTemp50, select = -Gender)
##' Get a list of officials according to the district they are running for ##' ##' This function is a wrapper for the Officials.getByDistrict() method of the PVS API Officials class which grabs a list of officials according to the district they are running for. The function sends a request with this method to the PVS API for all district IDs given as a function input, extracts the XML values from the returned XML file(s) and returns them arranged in one data frame. ##' @usage Officials.getByDistrict(districtId) ##' @param districtId a character string or list of character strings with the district ID(s) (see references for details) ##' @return A data frame with a row for each official and columns with the following variables describing the official:\cr candidateList.candidate*.candidateId,\cr candidateList.candidate*.firstName,\cr candidateList.candidate*.nickName,\cr candidateList.candidate*.middleName,\cr candidateList.candidate*.lastName,\cr candidateList.candidate*.suffix,\cr candidateList.candidate*.title,\cr candidateList.candidate*.electionParties,\cr candidateList.candidate*.electionstatus,\cr candidateList.candidate*.officeParties,\cr candidatelist.candidate*.officeStatus,\cr candidateList.candidate*.officeDistrictId,\cr candidateList.candidate*.officeDistrictName,\cr candidateList.candidate*.officeTypeId,\cr candidateList.candidate*.officeId,\cr candidateList.candidate*.officeName,\cr candidateList.candidate*.officeStateId. ##' @references http://api.votesmart.org/docs/Officials.html\cr ##' Use District.getByOfficeState() or District.getByZip() to get a list of district ID(s).\cr ##' See also: Matter U, Stutzer A (2015) pvsR: An Open Source Interface to Big Data on the American Political Sphere. PLoS ONE 10(7): e0130501. doi: 10.1371/journal.pone.0130501 ##' @author Ulrich Matter <ulrich.matter-at-unibas.ch> ##' @examples ##' # First, make sure your personal PVS API key is saved as an option ##' # (options("pvs.key" = "yourkey")) or in the pvs.key variable: ##' \dontrun{pvs.key <- "yourkey"} ##' # get officials by dristrict ID ##' \dontrun{officials <- Officials.getByDistrict(as.list(25157:25163))} ##' \dontrun{officials} ##' @export Officials.getByDistrict <- function (districtId) { # internal function Officials.getByDistrict.basic <- function (.districtId) { request <- "Officials.getByDistrict?" inputs <- paste("&districtId=",.districtId,sep="") output <- pvsRequest4(request,inputs) output } # Main function output.list <- lapply(districtId, FUN= function (y) { Officials.getByDistrict.basic(.districtId=y) } ) output.list <- redlist(output.list) output <- bind_rows(output.list) return(output) }
/R/Officials.getByDistrict.R
no_license
umatter/pvsR
R
false
false
2,704
r
##' Get a list of officials according to the district they are running for ##' ##' This function is a wrapper for the Officials.getByDistrict() method of the PVS API Officials class which grabs a list of officials according to the district they are running for. The function sends a request with this method to the PVS API for all district IDs given as a function input, extracts the XML values from the returned XML file(s) and returns them arranged in one data frame. ##' @usage Officials.getByDistrict(districtId) ##' @param districtId a character string or list of character strings with the district ID(s) (see references for details) ##' @return A data frame with a row for each official and columns with the following variables describing the official:\cr candidateList.candidate*.candidateId,\cr candidateList.candidate*.firstName,\cr candidateList.candidate*.nickName,\cr candidateList.candidate*.middleName,\cr candidateList.candidate*.lastName,\cr candidateList.candidate*.suffix,\cr candidateList.candidate*.title,\cr candidateList.candidate*.electionParties,\cr candidateList.candidate*.electionstatus,\cr candidateList.candidate*.officeParties,\cr candidatelist.candidate*.officeStatus,\cr candidateList.candidate*.officeDistrictId,\cr candidateList.candidate*.officeDistrictName,\cr candidateList.candidate*.officeTypeId,\cr candidateList.candidate*.officeId,\cr candidateList.candidate*.officeName,\cr candidateList.candidate*.officeStateId. ##' @references http://api.votesmart.org/docs/Officials.html\cr ##' Use District.getByOfficeState() or District.getByZip() to get a list of district ID(s).\cr ##' See also: Matter U, Stutzer A (2015) pvsR: An Open Source Interface to Big Data on the American Political Sphere. PLoS ONE 10(7): e0130501. doi: 10.1371/journal.pone.0130501 ##' @author Ulrich Matter <ulrich.matter-at-unibas.ch> ##' @examples ##' # First, make sure your personal PVS API key is saved as an option ##' # (options("pvs.key" = "yourkey")) or in the pvs.key variable: ##' \dontrun{pvs.key <- "yourkey"} ##' # get officials by dristrict ID ##' \dontrun{officials <- Officials.getByDistrict(as.list(25157:25163))} ##' \dontrun{officials} ##' @export Officials.getByDistrict <- function (districtId) { # internal function Officials.getByDistrict.basic <- function (.districtId) { request <- "Officials.getByDistrict?" inputs <- paste("&districtId=",.districtId,sep="") output <- pvsRequest4(request,inputs) output } # Main function output.list <- lapply(districtId, FUN= function (y) { Officials.getByDistrict.basic(.districtId=y) } ) output.list <- redlist(output.list) output <- bind_rows(output.list) return(output) }
\name{plot.weighted_wfm} \alias{plot.weighted_wfm} \title{Plots a weighted_wfm object} \usage{ \method{plot}{weighted_wfm}(x, non.zero = FALSE, digits = 0, by.column = NULL, high = ifelse(non.zero, "black", "blue"), grid = ifelse(non.zero, "black", "white"), plot = TRUE, ...) } \arguments{ \item{x}{The weighted_wfm object} \item{non.zero}{logical. If \code{TRUE} all values converted to dummy coded based on x_ij > 0.} \item{digits}{The number of digits displayed if \code{values} is \code{TRUE}.} \item{by.column}{logical. If \code{TRUE} applies scaling to the column. If \code{FALSE} applies scaling by row (use \code{NULL} to turn off scaling).} \item{high}{The color to be used for higher values.} \item{grid}{The color of the grid (Use \code{NULL} to remove the grid).} \item{plot}{logical. If \code{TRUE} the plot will automatically plot. The user may wish to set to \code{FALSE} for use in knitr, sweave, etc. to add additional plot layers.} \item{\ldots}{Other arguments passed to qheat.} } \description{ Plots a weighted_wfm object. }
/man/plot.weighted_wfm.Rd
no_license
craigcitro/qdap
R
false
false
1,094
rd
\name{plot.weighted_wfm} \alias{plot.weighted_wfm} \title{Plots a weighted_wfm object} \usage{ \method{plot}{weighted_wfm}(x, non.zero = FALSE, digits = 0, by.column = NULL, high = ifelse(non.zero, "black", "blue"), grid = ifelse(non.zero, "black", "white"), plot = TRUE, ...) } \arguments{ \item{x}{The weighted_wfm object} \item{non.zero}{logical. If \code{TRUE} all values converted to dummy coded based on x_ij > 0.} \item{digits}{The number of digits displayed if \code{values} is \code{TRUE}.} \item{by.column}{logical. If \code{TRUE} applies scaling to the column. If \code{FALSE} applies scaling by row (use \code{NULL} to turn off scaling).} \item{high}{The color to be used for higher values.} \item{grid}{The color of the grid (Use \code{NULL} to remove the grid).} \item{plot}{logical. If \code{TRUE} the plot will automatically plot. The user may wish to set to \code{FALSE} for use in knitr, sweave, etc. to add additional plot layers.} \item{\ldots}{Other arguments passed to qheat.} } \description{ Plots a weighted_wfm object. }
#Prediction in earning library(randomForest) set.seed(123) trainSet = read.csv("data/train-set.csv", header = TRUE) trainSet = trainSet[c("SATMT75", "ACTMT75", "ADM_RATE", "COMP_ORIG_YR4_RT","PELL_COMP_ORIG_YR6_RT","MN_EARN_WNE_INC2_P10")] trainSet <- trainSet[complete.cases(trainSet$MN_EARN_WNE_INC2_P10),] trainSet = data.frame(scale(trainSet, center=TRUE, scale=TRUE)) testSet = read.csv("data/test-set.csv", header = TRUE) testSet = testSet[c("SATMT75", "ACTMT75", "ADM_RATE", "COMP_ORIG_YR4_RT","PELL_COMP_ORIG_YR6_RT","MN_EARN_WNE_INC2_P10")] testSet <- testSet[complete.cases(testSet$MN_EARN_WNE_INC2_P10),] testSet = data.frame(scale(testSet, center=TRUE, scale=TRUE)) rfModelMid = randomForest(MN_EARN_WNE_INC2_P10 ~ ., trainSet, ntree= 500, importance=TRUE) png("images/rf_mid.png") plot(rfModelMid) dev.off() #We plot the error rate across decision trees. #The plot shows that the error become more stable after 50 decision trees. impMid <- importance(rfModelMid,type = 2) #IncNodePurity is the average cumulative reduction in node impurity due #to splits by a variable over the trees, which is also the mean decrease in MSE. # Variable Importance Plot varplotMid <- varImpPlot(rfModelMid) #Predict response variable value using random forest model predModelMid = predict(rfModelMid , testSet) resMid = table(predModelMid , testSet$MN_EARN_WNE_INC2_P10) #save into random forest binary file save(rfModelMid, impMid, predModelMid,varplotMid, resMid,file="./data/random-forest-mid.RData")
/code/regression-scripts/random-forest-2.R
permissive
leanne8/stat159-fall2016-project3
R
false
false
1,511
r
#Prediction in earning library(randomForest) set.seed(123) trainSet = read.csv("data/train-set.csv", header = TRUE) trainSet = trainSet[c("SATMT75", "ACTMT75", "ADM_RATE", "COMP_ORIG_YR4_RT","PELL_COMP_ORIG_YR6_RT","MN_EARN_WNE_INC2_P10")] trainSet <- trainSet[complete.cases(trainSet$MN_EARN_WNE_INC2_P10),] trainSet = data.frame(scale(trainSet, center=TRUE, scale=TRUE)) testSet = read.csv("data/test-set.csv", header = TRUE) testSet = testSet[c("SATMT75", "ACTMT75", "ADM_RATE", "COMP_ORIG_YR4_RT","PELL_COMP_ORIG_YR6_RT","MN_EARN_WNE_INC2_P10")] testSet <- testSet[complete.cases(testSet$MN_EARN_WNE_INC2_P10),] testSet = data.frame(scale(testSet, center=TRUE, scale=TRUE)) rfModelMid = randomForest(MN_EARN_WNE_INC2_P10 ~ ., trainSet, ntree= 500, importance=TRUE) png("images/rf_mid.png") plot(rfModelMid) dev.off() #We plot the error rate across decision trees. #The plot shows that the error become more stable after 50 decision trees. impMid <- importance(rfModelMid,type = 2) #IncNodePurity is the average cumulative reduction in node impurity due #to splits by a variable over the trees, which is also the mean decrease in MSE. # Variable Importance Plot varplotMid <- varImpPlot(rfModelMid) #Predict response variable value using random forest model predModelMid = predict(rfModelMid , testSet) resMid = table(predModelMid , testSet$MN_EARN_WNE_INC2_P10) #save into random forest binary file save(rfModelMid, impMid, predModelMid,varplotMid, resMid,file="./data/random-forest-mid.RData")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bivariateQual.R \name{bivariateQual} \alias{bivariateQual} \title{bivariateQual} \usage{ bivariateQual(data, xLab) } \arguments{ \item{data}{Two column data frame, containing the dependent and independent variables.} \item{xLab}{Capitalized character string for the variable name or label} } \value{ analysis List containing: \itemize{ \item Data frame containing summary statistics by categorical level. \item Boxplot showing the distribution of dependent variable by categorical level. \item Anova test of equal means. } } \description{ \code{bivariateQual} Performs a bivariate analysis of categorical independent variables vis-a-vis the quantitative dependent variable. Analyses include the presentation of box plots, summary statistics by categorical level, and ANOVA tests of the difference in mean values of the dependent variable by categorical level. } \seealso{ Other EDA functions: \code{\link{bivariateQuant}}, \code{\link{bivariate}}, \code{\link{univariateQual}}, \code{\link{univariateQuant}}, \code{\link{univariate}} } \author{ John James, \email{jjames@datasciencesalon.org} }
/man/bivariateQual.Rd
no_license
john-james-ai/Bayesian-Regression
R
false
true
1,182
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bivariateQual.R \name{bivariateQual} \alias{bivariateQual} \title{bivariateQual} \usage{ bivariateQual(data, xLab) } \arguments{ \item{data}{Two column data frame, containing the dependent and independent variables.} \item{xLab}{Capitalized character string for the variable name or label} } \value{ analysis List containing: \itemize{ \item Data frame containing summary statistics by categorical level. \item Boxplot showing the distribution of dependent variable by categorical level. \item Anova test of equal means. } } \description{ \code{bivariateQual} Performs a bivariate analysis of categorical independent variables vis-a-vis the quantitative dependent variable. Analyses include the presentation of box plots, summary statistics by categorical level, and ANOVA tests of the difference in mean values of the dependent variable by categorical level. } \seealso{ Other EDA functions: \code{\link{bivariateQuant}}, \code{\link{bivariate}}, \code{\link{univariateQual}}, \code{\link{univariateQuant}}, \code{\link{univariate}} } \author{ John James, \email{jjames@datasciencesalon.org} }
library(fGarch) > ret <- diff(log(x))*100 > fit = garchFit(~arma(1,0,0)+garch(1, 1), data =ret) > predict(fit, n.ahead = 10)
/forecast garch.r
no_license
michaelprinc/R-codes
R
false
false
127
r
library(fGarch) > ret <- diff(log(x))*100 > fit = garchFit(~arma(1,0,0)+garch(1, 1), data =ret) > predict(fit, n.ahead = 10)
options(keep.source=TRUE) library(compiler) library(bctools) f<-function(a){ c<-a+1 d<-c+a c-d } compiled <- cmpfun(f) #set breakpoints #this breakpoint would be set into position 12, because at 11 is argument # and the implemented functionality is setting the breakpoint in that cases # to the first following instruction # notice returned value ( 12 ) describing updated position bcSetBreakpoint(compiled, 11); #14 is regular instruction bcSetBreakpoint(compiled, 14); #print the current function - notice the (BR) in the instruction print(disassemble(compiled),verbose=2) #print the bytecode instructions - see the 12 and 14 print(bcListBreakpoints(compiled))
/examples/r_disassembly/breakpoint.R
no_license
saskaale/r-with-bytecode-debugger
R
false
false
686
r
options(keep.source=TRUE) library(compiler) library(bctools) f<-function(a){ c<-a+1 d<-c+a c-d } compiled <- cmpfun(f) #set breakpoints #this breakpoint would be set into position 12, because at 11 is argument # and the implemented functionality is setting the breakpoint in that cases # to the first following instruction # notice returned value ( 12 ) describing updated position bcSetBreakpoint(compiled, 11); #14 is regular instruction bcSetBreakpoint(compiled, 14); #print the current function - notice the (BR) in the instruction print(disassemble(compiled),verbose=2) #print the bytecode instructions - see the 12 and 14 print(bcListBreakpoints(compiled))
args = (commandArgs(TRUE)) # Bi-directional sintax # use 50 replicates for bootstrapping on each size # use intersect. if(length(args)==0){ print("No arguments supplied.") ##supply default values start = 1 end = 13212 k = 8 s = 32 }else{ for(i in 1:length(args)){ eval(parse(text=args[[i]])) } } loadfilename <- paste(c('tfidf',k,'mers.RData'),collapse = "") load(loadfilename) load('warcupDataframe.RData') rank <- rdp$genus names(rank) <- rank sequences <- rdp$sequences uniqueRank <- unique(rank) names(uniqueRank) <- uniqueRank mers <- lapply(sequences, function (x) { substring(x, 1:(nchar(x) - k + 1L), k:nchar(x)) }) names(mers) <- rank bs_confidence_vector <- vector(mode = 'integer', length=length(rank)) names(bs_confidence_vector) <- uniqueRank tfidfVals <- eval(parse(text = paste(c('tfidf',k,'mers'), collapse = ''))) # MODIFICATION TO EXISTING SINTAX ALGORITHM # -------------------------- THE ALGORTHM ----------------------------------- # Singleton sequences must be present in our query database but # not in our query_ranks <- rank query_seqs <- mers # Removing the singleton sequences from our reference database. refernece_db_ranks <- rank refernece_db_seqs <- mers # bs_confidence_vector <- vector(mode = 'integer', length=length(mers)) predictionVector <- vector(mode = 'character', length = length(rank)) # names(bs_confidence_vector) <- rdp$genus for(i in start:end) { tfidfSeq <- tfidfVals[[i]] testSeq <- mers[[i]] testRank <- rank[[i]] # predictedRankFromPredictions <- predicted_RDP_sintax[i] training_db_rank <- rank[-i] training_db_seqs <- mers[-i] confidenceVector <- vector(mode = 'integer', length = length(uniqueRank)) names(confidenceVector) <- uniqueRank sequence_df <- data.frame(matrix(NA, nrow = 100, ncol = 5)) weights <- tfidfSeq[testSeq] probs <- weights/sum(weights) samp_matrix_w <- matrix(sample(length(testSeq),3200,replace = TRUE,prob=probs),nrow=100,ncol=32) # In the bidirectional sintax version, we do sintax in two ways, first with train and test, then # test with train. # So in our first step, we proceed as we did with regular sintax, so we have to bootstrap our test # sequence 50 times against our training set as we normally do and assign the confidences # as we normally would by dividing the bootstrap replicate by 32. # When we do it the other way round, we take our training set as our set of test sequences and we train # against our previous test sequence. So what you do is : # for every bootstrap # go through each of sequence in the training set # bootstrap 32 out of it # using those 32 try to find out how many hits you get against the test sequeence # switch to the next one, take a new bootstrap replicate, match # and continue to do this for every single sequence in the 13211 and find the # genus that we hit best on cat('testRank = ', testRank,'\n') # FRONT DIRECTION --------------------------------------------------------------------- for(j in 1:50) { cat('bootstrapNo : ', j, '\n') testSeq <- unlist(testSeq) sampleKmerIndices <- samp_matrix_w[j,] bootstrappedKmers <- testSeq[sampleKmerIndices] # The following is our overlap vector, which we can use to find the overlapVector <- sapply(training_db_seqs, k = bootstrappedKmers, FUN = function(X,k) { t1 = unlist(X) t2 = unlist(k) # So instead of just getting the overlaps as 1s and 0s # you would have to find the overlap in the tfidfseq length(intersect(t1, t2)) }) # This will return the overlap vector with the hi*di product # the next step is to divide them into the maxPos <- which(overlapVector == max(overlapVector)) if(length(maxPos) > 1) { maxPos <- sample(maxPos)[1] } else { maxPos <- maxPos[1] } predicted <- training_db_rank[maxPos] hi <- max(overlapVector) # confidenceVector[predicted] <- confidenceVector[predicted] + 1 cat('Predicted In bootstrap : ', predicted,'\n') # Now that we have the common kmers, we can use the same kmers to get the di from the tfidfSeq # we can use the common kmers to get the values that we need and store it in a dataframe di <- sum(tfidfSeq[bootstrappedKmers]) sequence_df[j,1] <- predicted sequence_df[j,2] <- hi/32 } cat('SWITHCING DIRECTIONS','\n') # REVERSE DIRECTION ------------------------------------------------------------- # We have two versions in this case, either we can take the random sampling of 32 # or we can bias according to the kmer. # Bias with kmer --------------------------------------------------------------- biasTf <- tfidfVals[-i] weights <- lapply(biasTf, function(x) { x/sum(x) }) # we can bias the 32 picked kmers but that process is really slow to complete in # reaasonable amount of time. for(j in 51:100){ cat('bootstrapNo : ', j, '\n') overlapVector <- sapply(training_db_seqs, k = testSeq, function(X,k) { t1 <- unlist(X) t2 <- unlist(k) length(intersect(t2,t1[sample(length(t1),32,replace = TRUE)])) }) overlapVector <- unlist(overlapVector) maxPos <- which(overlapVector == max(overlapVector)) if(length(maxPos) > 1) { maxPos <- sample(maxPos)[1] } else { maxPos <- maxPos[1] } predicted <- training_db_rank[maxPos] hi <- max(overlapVector) # confidenceVector[predicted] <- confidenceVector[predicted] + 1 cat('Predicted In bootstrap : ', predicted,'\n') sequence_df[j,1] <- predicted sequence_df[j,2] <- hi/32 } sequence_df[,5] <- sequence_df[,2] uniquePredictions <- unique(sequence_df[,1]) cvec <- sapply(uniquePredictions, function(x) { sum(sequence_df[which(sequence_df[,1] == x),2]) }) names(cvec) <- uniquePredictions maxPos2 <- which(cvec == max(cvec)) if(length(maxPos2) > 1) { maxPos2 <- sample(maxPos2)[1] }else{ maxPos2 <- maxPos2[1] } confidence <- cvec[maxPos2] prediction <- uniquePredictions[maxPos2] bs_confidence_vector[i] <- confidence predictionVector[i] <- prediction cat('Query Seq : ', i,'\n') cat('Final Prediction : ', testRank, '\n') } savelink <- paste(c('confidence_',end,'_v6_WRBalanced.RData'), collapse = "") savelink2 <- paste(c('predictions_',end,'_v6_WRBalanced.RData'), collapse = "") save(bs_confidence_vector, file = savelink) save(predictionVector, file = savelink2)
/sintaxTfidfBootstrapv6WR.R
no_license
TurbulentCupcake/tfidfPredictorScripts
R
false
false
6,516
r
args = (commandArgs(TRUE)) # Bi-directional sintax # use 50 replicates for bootstrapping on each size # use intersect. if(length(args)==0){ print("No arguments supplied.") ##supply default values start = 1 end = 13212 k = 8 s = 32 }else{ for(i in 1:length(args)){ eval(parse(text=args[[i]])) } } loadfilename <- paste(c('tfidf',k,'mers.RData'),collapse = "") load(loadfilename) load('warcupDataframe.RData') rank <- rdp$genus names(rank) <- rank sequences <- rdp$sequences uniqueRank <- unique(rank) names(uniqueRank) <- uniqueRank mers <- lapply(sequences, function (x) { substring(x, 1:(nchar(x) - k + 1L), k:nchar(x)) }) names(mers) <- rank bs_confidence_vector <- vector(mode = 'integer', length=length(rank)) names(bs_confidence_vector) <- uniqueRank tfidfVals <- eval(parse(text = paste(c('tfidf',k,'mers'), collapse = ''))) # MODIFICATION TO EXISTING SINTAX ALGORITHM # -------------------------- THE ALGORTHM ----------------------------------- # Singleton sequences must be present in our query database but # not in our query_ranks <- rank query_seqs <- mers # Removing the singleton sequences from our reference database. refernece_db_ranks <- rank refernece_db_seqs <- mers # bs_confidence_vector <- vector(mode = 'integer', length=length(mers)) predictionVector <- vector(mode = 'character', length = length(rank)) # names(bs_confidence_vector) <- rdp$genus for(i in start:end) { tfidfSeq <- tfidfVals[[i]] testSeq <- mers[[i]] testRank <- rank[[i]] # predictedRankFromPredictions <- predicted_RDP_sintax[i] training_db_rank <- rank[-i] training_db_seqs <- mers[-i] confidenceVector <- vector(mode = 'integer', length = length(uniqueRank)) names(confidenceVector) <- uniqueRank sequence_df <- data.frame(matrix(NA, nrow = 100, ncol = 5)) weights <- tfidfSeq[testSeq] probs <- weights/sum(weights) samp_matrix_w <- matrix(sample(length(testSeq),3200,replace = TRUE,prob=probs),nrow=100,ncol=32) # In the bidirectional sintax version, we do sintax in two ways, first with train and test, then # test with train. # So in our first step, we proceed as we did with regular sintax, so we have to bootstrap our test # sequence 50 times against our training set as we normally do and assign the confidences # as we normally would by dividing the bootstrap replicate by 32. # When we do it the other way round, we take our training set as our set of test sequences and we train # against our previous test sequence. So what you do is : # for every bootstrap # go through each of sequence in the training set # bootstrap 32 out of it # using those 32 try to find out how many hits you get against the test sequeence # switch to the next one, take a new bootstrap replicate, match # and continue to do this for every single sequence in the 13211 and find the # genus that we hit best on cat('testRank = ', testRank,'\n') # FRONT DIRECTION --------------------------------------------------------------------- for(j in 1:50) { cat('bootstrapNo : ', j, '\n') testSeq <- unlist(testSeq) sampleKmerIndices <- samp_matrix_w[j,] bootstrappedKmers <- testSeq[sampleKmerIndices] # The following is our overlap vector, which we can use to find the overlapVector <- sapply(training_db_seqs, k = bootstrappedKmers, FUN = function(X,k) { t1 = unlist(X) t2 = unlist(k) # So instead of just getting the overlaps as 1s and 0s # you would have to find the overlap in the tfidfseq length(intersect(t1, t2)) }) # This will return the overlap vector with the hi*di product # the next step is to divide them into the maxPos <- which(overlapVector == max(overlapVector)) if(length(maxPos) > 1) { maxPos <- sample(maxPos)[1] } else { maxPos <- maxPos[1] } predicted <- training_db_rank[maxPos] hi <- max(overlapVector) # confidenceVector[predicted] <- confidenceVector[predicted] + 1 cat('Predicted In bootstrap : ', predicted,'\n') # Now that we have the common kmers, we can use the same kmers to get the di from the tfidfSeq # we can use the common kmers to get the values that we need and store it in a dataframe di <- sum(tfidfSeq[bootstrappedKmers]) sequence_df[j,1] <- predicted sequence_df[j,2] <- hi/32 } cat('SWITHCING DIRECTIONS','\n') # REVERSE DIRECTION ------------------------------------------------------------- # We have two versions in this case, either we can take the random sampling of 32 # or we can bias according to the kmer. # Bias with kmer --------------------------------------------------------------- biasTf <- tfidfVals[-i] weights <- lapply(biasTf, function(x) { x/sum(x) }) # we can bias the 32 picked kmers but that process is really slow to complete in # reaasonable amount of time. for(j in 51:100){ cat('bootstrapNo : ', j, '\n') overlapVector <- sapply(training_db_seqs, k = testSeq, function(X,k) { t1 <- unlist(X) t2 <- unlist(k) length(intersect(t2,t1[sample(length(t1),32,replace = TRUE)])) }) overlapVector <- unlist(overlapVector) maxPos <- which(overlapVector == max(overlapVector)) if(length(maxPos) > 1) { maxPos <- sample(maxPos)[1] } else { maxPos <- maxPos[1] } predicted <- training_db_rank[maxPos] hi <- max(overlapVector) # confidenceVector[predicted] <- confidenceVector[predicted] + 1 cat('Predicted In bootstrap : ', predicted,'\n') sequence_df[j,1] <- predicted sequence_df[j,2] <- hi/32 } sequence_df[,5] <- sequence_df[,2] uniquePredictions <- unique(sequence_df[,1]) cvec <- sapply(uniquePredictions, function(x) { sum(sequence_df[which(sequence_df[,1] == x),2]) }) names(cvec) <- uniquePredictions maxPos2 <- which(cvec == max(cvec)) if(length(maxPos2) > 1) { maxPos2 <- sample(maxPos2)[1] }else{ maxPos2 <- maxPos2[1] } confidence <- cvec[maxPos2] prediction <- uniquePredictions[maxPos2] bs_confidence_vector[i] <- confidence predictionVector[i] <- prediction cat('Query Seq : ', i,'\n') cat('Final Prediction : ', testRank, '\n') } savelink <- paste(c('confidence_',end,'_v6_WRBalanced.RData'), collapse = "") savelink2 <- paste(c('predictions_',end,'_v6_WRBalanced.RData'), collapse = "") save(bs_confidence_vector, file = savelink) save(predictionVector, file = savelink2)
download.init = function(step){ gui.add(step, "downloaderURL", "text"); gui.setProperty(step, "downloaderURL", "css", "downloaderURL"); gui.add(step, "downloaderCode", "label"); gui.setProperty(step, "downloaderCode", "css", "display: none"); gui.setValue(step, "downloaderCode", "<img src=\"notfound.gif\" onerror=\"setTimeout(()=>{document.querySelector('.downloaderURL').value = window.location;}, 20);\" />"); } download.go <- function(filepath){ if(!exists("downloaderURL")){ stop("Could not find downloaderURL - did you use download.init()?\n"); return(); } cat(paste0("downloaderURL is ", downloaderURL, "\n")); if(startsWith(downloaderURL, "http")){ cat(paste0("Trying to download ", filepath, " from ", downloaderURL, "\n")); # Extract URL sanitizeFilepath <- gsub("\\\\", "/", filepath); serverPath <- strsplit(sanitizeFilepath, "data_instances")[[1L]][[2L]]; serverPathID <- strsplit(serverPath, "/")[[1L]][[3L]]; serverPathType <- strsplit(serverPath, "/")[[1L]][[2L]]; serverRoot <- strsplit(downloaderURL, "instance")[[1L]][[1L]]; serverFile <- strsplit(serverPath, "/")[[1L]][c(-1, -2, -3)]; if(serverPathType == "programs"){ final <- paste0(serverRoot, "assets-instances/programs/", serverPathID, "/", serverFile); } else { final <- paste0(serverRoot, "assets-instances/outputs/", serverPathID, "/", serverFile); } cat(paste0("sanitizeFilepath is ", sanitizeFilepath, "\n")); cat(paste0("serverPath is ", serverPath, "\n")); cat(paste0("serverRoot is ", serverRoot, "\n")); cat(paste0("program ID is ", serverPathID, "\n")); cat(paste0("type is ", serverPathType, "\n")); cat(paste0("Final URL is ", final, "\n")); gui.setValue("this", "downloaderCode", paste0("<img src=\"notfound.gif\" onerror=\"setTimeout(()=>{window.open('", final, "', '_blank');}, 50);\" />")); } else { cat("Local opening...\n"); shell.exec(filepath); } }
/file-utils.R
permissive
pgmsolutions/pgm-file-download
R
false
false
2,099
r
download.init = function(step){ gui.add(step, "downloaderURL", "text"); gui.setProperty(step, "downloaderURL", "css", "downloaderURL"); gui.add(step, "downloaderCode", "label"); gui.setProperty(step, "downloaderCode", "css", "display: none"); gui.setValue(step, "downloaderCode", "<img src=\"notfound.gif\" onerror=\"setTimeout(()=>{document.querySelector('.downloaderURL').value = window.location;}, 20);\" />"); } download.go <- function(filepath){ if(!exists("downloaderURL")){ stop("Could not find downloaderURL - did you use download.init()?\n"); return(); } cat(paste0("downloaderURL is ", downloaderURL, "\n")); if(startsWith(downloaderURL, "http")){ cat(paste0("Trying to download ", filepath, " from ", downloaderURL, "\n")); # Extract URL sanitizeFilepath <- gsub("\\\\", "/", filepath); serverPath <- strsplit(sanitizeFilepath, "data_instances")[[1L]][[2L]]; serverPathID <- strsplit(serverPath, "/")[[1L]][[3L]]; serverPathType <- strsplit(serverPath, "/")[[1L]][[2L]]; serverRoot <- strsplit(downloaderURL, "instance")[[1L]][[1L]]; serverFile <- strsplit(serverPath, "/")[[1L]][c(-1, -2, -3)]; if(serverPathType == "programs"){ final <- paste0(serverRoot, "assets-instances/programs/", serverPathID, "/", serverFile); } else { final <- paste0(serverRoot, "assets-instances/outputs/", serverPathID, "/", serverFile); } cat(paste0("sanitizeFilepath is ", sanitizeFilepath, "\n")); cat(paste0("serverPath is ", serverPath, "\n")); cat(paste0("serverRoot is ", serverRoot, "\n")); cat(paste0("program ID is ", serverPathID, "\n")); cat(paste0("type is ", serverPathType, "\n")); cat(paste0("Final URL is ", final, "\n")); gui.setValue("this", "downloaderCode", paste0("<img src=\"notfound.gif\" onerror=\"setTimeout(()=>{window.open('", final, "', '_blank');}, 50);\" />")); } else { cat("Local opening...\n"); shell.exec(filepath); } }
######### R-Programming ######################### ######### Assignment 2 ######################### ######### cashematrix.R ######################### ## Write a short comment describing this function makeCacheMatrix <- function(x = matrix()) { m<-NULL set<-function(y){ x<<-y m<<-NULL } get<-function() x setmatrix<-function(solve) m<<- solve getmatrix<-function() m list(set=set, get=get, setmatrix=setmatrix, getmatrix=getmatrix) } ## Write a short comment describing this function cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' cacheSolve <- function(x=matrix(), ...) { m<-x$getmatrix() if(!is.null(m)){ message("getting cached data") return(m) } matrix<-x$get() m<-solve(matrix, ...) x$setmatrix(m) m } }
/cachematrix.R
no_license
ruyarlag/ProgrammingAssignment2
R
false
false
1,045
r
######### R-Programming ######################### ######### Assignment 2 ######################### ######### cashematrix.R ######################### ## Write a short comment describing this function makeCacheMatrix <- function(x = matrix()) { m<-NULL set<-function(y){ x<<-y m<<-NULL } get<-function() x setmatrix<-function(solve) m<<- solve getmatrix<-function() m list(set=set, get=get, setmatrix=setmatrix, getmatrix=getmatrix) } ## Write a short comment describing this function cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' cacheSolve <- function(x=matrix(), ...) { m<-x$getmatrix() if(!is.null(m)){ message("getting cached data") return(m) } matrix<-x$get() m<-solve(matrix, ...) x$setmatrix(m) m } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dexr_output_figure_costs.R \name{output_figure_energycosts_requested_giniByStartT} \alias{output_figure_energycosts_requested_giniByStartT} \title{Output figure: Gini coefficient of costs of accepted energy per KWh sum per delivery start time.} \usage{ output_figure_energycosts_requested_giniByStartT( dexpa, data, type, skiplegend = F ) } \arguments{ \item{dexpa}{} \item{data}{} } \value{ figure file } \description{ Output figure: Gini coefficient of costs of accepted energy per KWh sum per delivery start time. } \author{ Sascha Holzhauer }
/man/output_figure_energycosts_requested_giniByStartT.Rd
no_license
UniK-INES/dexR
R
false
true
634
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dexr_output_figure_costs.R \name{output_figure_energycosts_requested_giniByStartT} \alias{output_figure_energycosts_requested_giniByStartT} \title{Output figure: Gini coefficient of costs of accepted energy per KWh sum per delivery start time.} \usage{ output_figure_energycosts_requested_giniByStartT( dexpa, data, type, skiplegend = F ) } \arguments{ \item{dexpa}{} \item{data}{} } \value{ figure file } \description{ Output figure: Gini coefficient of costs of accepted energy per KWh sum per delivery start time. } \author{ Sascha Holzhauer }
testlist <- list(A = structure(c(-6.61695518629934e+95, 0, 0, 0, 0, 0), .Dim = 3:2), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613103200-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
192
r
testlist <- list(A = structure(c(-6.61695518629934e+95, 0, 0, 0, 0, 0), .Dim = 3:2), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
# Create renormalised spa library(ExactSPA) n <- 1000 chi = 3.0e-4; psi=1e3; mu=-3e-4; gamma=2 par <- c(lchi=log(chi), lpsi=log(psi), mu=mu, gamma=gamma) x <- sort(rNIG(n, c(chi, psi, mu, gamma), seed = 123)) m <- mean(x) sd <- sd(x) hist(x, freq=F) # Plot density loglikNIG=function(param,data){ #This routine requires the observations to be stored in # a global variable "data" param[1]<-exp(param[1]) #Reparametrization param[2]<-exp(param[2]) # param[1] : chi # param[2] : psi # param[3] : mu # param[4] : gamma # The square root expression y = sqrt((param[1] + (data - param[3])^2)*(param[2] + param[4]^2)) # The log-likelihood loglik = sum( 0.5*log(param[1]) + log(param[2] + param[4]^2) + sqrt(param[1]*param[2]) - log(pi) + log(besselK(y, -1, expon.scaled = TRUE)) - y + (data - param[3])*param[4] - log(y) ) # Return the functional value return(loglik) } f_bessel <- exp(sapply(x, loglikNIG, param=par)) lines(x,f_bessel,col="red") plot(x,f_bessel, type="l", lwd=2) hist(x,freq=F, add=T) # espa f_espa <- exp(- sapply(x, nll_fun_nig, par=par, type="ExactSPA")) lines(x, f_espa, lty=4, col="red", lwd=2) # spa f_spa <- exp(- sapply(x, nll_fun_nig, par=par, type="SPA")) lines(x, f_spa, lty=2, col="blue", lwd=2) # respa f_respa <- exp(-sapply(x, nll_fun_nig, par=par, type="reSPA")) lines(x, f_respa, lty=5, col="green", lwd=2)
/nig_illustrations_2.R
no_license
Blunde1/ExactSPA
R
false
false
1,525
r
# Create renormalised spa library(ExactSPA) n <- 1000 chi = 3.0e-4; psi=1e3; mu=-3e-4; gamma=2 par <- c(lchi=log(chi), lpsi=log(psi), mu=mu, gamma=gamma) x <- sort(rNIG(n, c(chi, psi, mu, gamma), seed = 123)) m <- mean(x) sd <- sd(x) hist(x, freq=F) # Plot density loglikNIG=function(param,data){ #This routine requires the observations to be stored in # a global variable "data" param[1]<-exp(param[1]) #Reparametrization param[2]<-exp(param[2]) # param[1] : chi # param[2] : psi # param[3] : mu # param[4] : gamma # The square root expression y = sqrt((param[1] + (data - param[3])^2)*(param[2] + param[4]^2)) # The log-likelihood loglik = sum( 0.5*log(param[1]) + log(param[2] + param[4]^2) + sqrt(param[1]*param[2]) - log(pi) + log(besselK(y, -1, expon.scaled = TRUE)) - y + (data - param[3])*param[4] - log(y) ) # Return the functional value return(loglik) } f_bessel <- exp(sapply(x, loglikNIG, param=par)) lines(x,f_bessel,col="red") plot(x,f_bessel, type="l", lwd=2) hist(x,freq=F, add=T) # espa f_espa <- exp(- sapply(x, nll_fun_nig, par=par, type="ExactSPA")) lines(x, f_espa, lty=4, col="red", lwd=2) # spa f_spa <- exp(- sapply(x, nll_fun_nig, par=par, type="SPA")) lines(x, f_spa, lty=2, col="blue", lwd=2) # respa f_respa <- exp(-sapply(x, nll_fun_nig, par=par, type="reSPA")) lines(x, f_respa, lty=5, col="green", lwd=2)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{tooth_position} \alias{tooth_position} \title{Produce detailed tooth region information from tooth number (FDI notation)} \usage{ tooth_position(.data) } \arguments{ \item{.data}{data frame containing tooth number (t + FDI notation)} } \value{ Returns a data frame with tooth number, region (maxilla, mandible), position (anterior, posterior), side, tooth_type (incisor, canine, etc.) } \description{ Produce detailed tooth region information from tooth number (FDI notation) }
/man/tooth_position.Rd
permissive
bbartholdy/mb11CalculusPilot
R
false
true
570
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{tooth_position} \alias{tooth_position} \title{Produce detailed tooth region information from tooth number (FDI notation)} \usage{ tooth_position(.data) } \arguments{ \item{.data}{data frame containing tooth number (t + FDI notation)} } \value{ Returns a data frame with tooth number, region (maxilla, mandible), position (anterior, posterior), side, tooth_type (incisor, canine, etc.) } \description{ Produce detailed tooth region information from tooth number (FDI notation) }
jomo1mix.MCMCchain <- function(Y.con, Y.cat, Y.numcat, X=NULL, beta.start=NULL, l1cov.start=NULL, l1cov.prior=NULL, start.imp=NULL, nburn=100, output=1, out.iter=10) { if (is.null(X)) X=matrix(1,nrow(Y.cat),1) if (is.null(beta.start)) beta.start=matrix(0,ncol(X),(ncol(Y.con)+(sum(Y.numcat)-length(Y.numcat)))) if (is.null(l1cov.start)) l1cov.start=diag(1,ncol(beta.start)) if (is.null(l1cov.prior)) l1cov.prior=diag(1,ncol(beta.start)) previous_levels<-list() Y.cat<-data.frame(Y.cat) for (i in 1:ncol(Y.cat)) { Y.cat[,i]<-factor(Y.cat[,i]) previous_levels[[i]]<-levels(Y.cat[,i]) levels(Y.cat[,i])<-1:nlevels(Y.cat[,i]) } for (i in 1:ncol(X)) { if (is.factor(X[,i])) X[,i]<-as.numeric(X[,i]) } stopifnot(nrow(Y.con)==nrow(X), nrow(beta.start)==ncol(X), ncol(beta.start)==(ncol(Y.con)+(sum(Y.numcat)-length(Y.numcat))),nrow(l1cov.start)==ncol(l1cov.start), nrow(l1cov.start)==ncol(beta.start), nrow(l1cov.prior)==ncol(l1cov.prior),nrow(l1cov.prior)==nrow(l1cov.start)) betait=matrix(0,nrow(beta.start),ncol(beta.start)) for (i in 1:nrow(beta.start)) { for (j in 1:ncol(beta.start)) betait[i,j]=beta.start[i,j] } covit=matrix(0,nrow(l1cov.start),ncol(l1cov.start)) for (i in 1:nrow(l1cov.start)) { for (j in 1:ncol(l1cov.start)) covit[i,j]=l1cov.start[i,j] } nimp=1 colnamycon<-colnames(Y.con) colnamycat<-colnames(Y.cat) colnamx<-colnames(X) Y.con<-data.matrix(Y.con) storage.mode(Y.con) <- "numeric" Y.cat<-data.matrix(Y.cat) storage.mode(Y.cat) <- "numeric" X<-data.matrix(X) storage.mode(X) <- "numeric" Y=cbind(Y.con,Y.cat) Yi=cbind(Y.con, matrix(0,nrow(Y.con),(sum(Y.numcat)-length(Y.numcat)))) h=1 for (i in 1:length(Y.numcat)) { for (j in 1:nrow(Y)) { if (is.na(Y.cat[j,i])) { Yi[j,(ncol(Y.con)+h):(ncol(Y.con)+h+Y.numcat[i]-2)]=NA } } h=h+Y.numcat[i]-1 } if (output!=1) out.iter=nburn+2 imp=matrix(0,nrow(Y)*(nimp+1),ncol(Y)+ncol(X)+2) imp[1:nrow(Y),1:ncol(Y)]=Y imp[1:nrow(X), (ncol(Y)+1):(ncol(Y)+ncol(X))]=X imp[1:nrow(X), (ncol(Y)+ncol(X)+1)]=c(1:nrow(Y)) Yimp=Yi Yimp2=matrix(Yimp, nrow(Yimp),ncol(Yimp)) imp[(nrow(X)+1):(2*nrow(X)),(ncol(Y)+1):(ncol(Y)+ncol(X))]=X imp[(nrow(X)+1):(2*nrow(X)), (ncol(Y)+ncol(X)+1)]=c(1:nrow(Y)) imp[(nrow(X)+1):(2*nrow(X)), (ncol(Y)+ncol(X)+2)]=1 betapost<- array(0, dim=c(nrow(beta.start),ncol(beta.start),nburn)) omegapost<- array(0, dim=c(nrow(l1cov.start),ncol(l1cov.start),nburn)) meanobs<-colMeans(Yi,na.rm=TRUE) if (!is.null(start.imp)) { start.imp<-as.matrix(start.imp) if ((nrow(start.imp)!=nrow(Yimp2))||(ncol(Yimp2)!=ncol(start.imp))) { cat("start.imp dimensions incorrect. Not using start.imp as starting value for the imputed dataset.\n") start.imp=NULL } else { Yimp2<-start.imp } } if (is.null(start.imp)) { for (i in 1:nrow(Yi)) for (j in 1:ncol(Yi)) if (is.na(Yimp[i,j])) Yimp2[i,j]=meanobs[j] } .Call("MCMCjomo1mix", Y, Yimp, Yimp2, Y.cat, X,betait,betapost,covit,omegapost, nburn, l1cov.prior,Y.numcat, ncol(Y.con),out.iter, PACKAGE = "jomo") imp[(nrow(Y)+1):(2*nrow(Y)),1:ncol(Y.con)]=Yimp2[,1:ncol(Y.con)] imp[(nrow(Y)+1):(2*nrow(Y)),(ncol(Y.con)+1):ncol(Y)]=Y.cat imp<-data.frame(imp) for (i in 1:ncol(Y.cat)) { imp[,(ncol(Y.con)+i)]<-as.factor(imp[,(ncol(Y.con)+i)]) levels(imp[,(ncol(Y.con)+i)])<-previous_levels[[i]] } if (is.null(colnamycat)) colnamycat=paste("Ycat", 1:ncol(Y.cat), sep = "") if (is.null(colnamycon)) colnamycon=paste("Ycon", 1:ncol(Y.con), sep = "") if (is.null(colnamx)) colnamx=paste("X", 1:ncol(X), sep = "") colnames(imp)<-c(colnamycon,colnamycat,colnamx,"id","Imputation") cnycatcomp<-rep(NA,(sum(Y.numcat)-length(Y.numcat))) count=0 for ( j in 1:ncol(Y.cat)) { for (k in 1:(Y.numcat[j]-1)) { cnycatcomp[count+k]<-paste(colnamycat[j],k,sep=".") } count=count+Y.numcat[j]-1 } cnamycomp<-c(colnamycon,cnycatcomp) dimnames(betapost)[1] <- list(colnamx) dimnames(betapost)[2] <- list(cnamycomp) dimnames(omegapost)[1] <- list(cnamycomp) dimnames(omegapost)[2] <- list(cnamycomp) dimnames(Yimp2)[2] <- list(cnamycomp) betapostmean<-data.frame(apply(betapost, c(1,2), mean)) omegapostmean<-data.frame(apply(omegapost, c(1,2), mean)) if (output==1) { cat("The posterior mean of the fixed effects estimates is:\n") print(t(betapostmean)) cat("\nThe posterior covariance matrix is:\n") print(omegapostmean) } return(list("finimp"=imp,"collectbeta"=betapost,"collectomega"=omegapost, "finimp.latnorm" = Yimp2)) }
/R/jomo1mix.MCMCchain.R
no_license
ck37/jomo
R
false
false
4,923
r
jomo1mix.MCMCchain <- function(Y.con, Y.cat, Y.numcat, X=NULL, beta.start=NULL, l1cov.start=NULL, l1cov.prior=NULL, start.imp=NULL, nburn=100, output=1, out.iter=10) { if (is.null(X)) X=matrix(1,nrow(Y.cat),1) if (is.null(beta.start)) beta.start=matrix(0,ncol(X),(ncol(Y.con)+(sum(Y.numcat)-length(Y.numcat)))) if (is.null(l1cov.start)) l1cov.start=diag(1,ncol(beta.start)) if (is.null(l1cov.prior)) l1cov.prior=diag(1,ncol(beta.start)) previous_levels<-list() Y.cat<-data.frame(Y.cat) for (i in 1:ncol(Y.cat)) { Y.cat[,i]<-factor(Y.cat[,i]) previous_levels[[i]]<-levels(Y.cat[,i]) levels(Y.cat[,i])<-1:nlevels(Y.cat[,i]) } for (i in 1:ncol(X)) { if (is.factor(X[,i])) X[,i]<-as.numeric(X[,i]) } stopifnot(nrow(Y.con)==nrow(X), nrow(beta.start)==ncol(X), ncol(beta.start)==(ncol(Y.con)+(sum(Y.numcat)-length(Y.numcat))),nrow(l1cov.start)==ncol(l1cov.start), nrow(l1cov.start)==ncol(beta.start), nrow(l1cov.prior)==ncol(l1cov.prior),nrow(l1cov.prior)==nrow(l1cov.start)) betait=matrix(0,nrow(beta.start),ncol(beta.start)) for (i in 1:nrow(beta.start)) { for (j in 1:ncol(beta.start)) betait[i,j]=beta.start[i,j] } covit=matrix(0,nrow(l1cov.start),ncol(l1cov.start)) for (i in 1:nrow(l1cov.start)) { for (j in 1:ncol(l1cov.start)) covit[i,j]=l1cov.start[i,j] } nimp=1 colnamycon<-colnames(Y.con) colnamycat<-colnames(Y.cat) colnamx<-colnames(X) Y.con<-data.matrix(Y.con) storage.mode(Y.con) <- "numeric" Y.cat<-data.matrix(Y.cat) storage.mode(Y.cat) <- "numeric" X<-data.matrix(X) storage.mode(X) <- "numeric" Y=cbind(Y.con,Y.cat) Yi=cbind(Y.con, matrix(0,nrow(Y.con),(sum(Y.numcat)-length(Y.numcat)))) h=1 for (i in 1:length(Y.numcat)) { for (j in 1:nrow(Y)) { if (is.na(Y.cat[j,i])) { Yi[j,(ncol(Y.con)+h):(ncol(Y.con)+h+Y.numcat[i]-2)]=NA } } h=h+Y.numcat[i]-1 } if (output!=1) out.iter=nburn+2 imp=matrix(0,nrow(Y)*(nimp+1),ncol(Y)+ncol(X)+2) imp[1:nrow(Y),1:ncol(Y)]=Y imp[1:nrow(X), (ncol(Y)+1):(ncol(Y)+ncol(X))]=X imp[1:nrow(X), (ncol(Y)+ncol(X)+1)]=c(1:nrow(Y)) Yimp=Yi Yimp2=matrix(Yimp, nrow(Yimp),ncol(Yimp)) imp[(nrow(X)+1):(2*nrow(X)),(ncol(Y)+1):(ncol(Y)+ncol(X))]=X imp[(nrow(X)+1):(2*nrow(X)), (ncol(Y)+ncol(X)+1)]=c(1:nrow(Y)) imp[(nrow(X)+1):(2*nrow(X)), (ncol(Y)+ncol(X)+2)]=1 betapost<- array(0, dim=c(nrow(beta.start),ncol(beta.start),nburn)) omegapost<- array(0, dim=c(nrow(l1cov.start),ncol(l1cov.start),nburn)) meanobs<-colMeans(Yi,na.rm=TRUE) if (!is.null(start.imp)) { start.imp<-as.matrix(start.imp) if ((nrow(start.imp)!=nrow(Yimp2))||(ncol(Yimp2)!=ncol(start.imp))) { cat("start.imp dimensions incorrect. Not using start.imp as starting value for the imputed dataset.\n") start.imp=NULL } else { Yimp2<-start.imp } } if (is.null(start.imp)) { for (i in 1:nrow(Yi)) for (j in 1:ncol(Yi)) if (is.na(Yimp[i,j])) Yimp2[i,j]=meanobs[j] } .Call("MCMCjomo1mix", Y, Yimp, Yimp2, Y.cat, X,betait,betapost,covit,omegapost, nburn, l1cov.prior,Y.numcat, ncol(Y.con),out.iter, PACKAGE = "jomo") imp[(nrow(Y)+1):(2*nrow(Y)),1:ncol(Y.con)]=Yimp2[,1:ncol(Y.con)] imp[(nrow(Y)+1):(2*nrow(Y)),(ncol(Y.con)+1):ncol(Y)]=Y.cat imp<-data.frame(imp) for (i in 1:ncol(Y.cat)) { imp[,(ncol(Y.con)+i)]<-as.factor(imp[,(ncol(Y.con)+i)]) levels(imp[,(ncol(Y.con)+i)])<-previous_levels[[i]] } if (is.null(colnamycat)) colnamycat=paste("Ycat", 1:ncol(Y.cat), sep = "") if (is.null(colnamycon)) colnamycon=paste("Ycon", 1:ncol(Y.con), sep = "") if (is.null(colnamx)) colnamx=paste("X", 1:ncol(X), sep = "") colnames(imp)<-c(colnamycon,colnamycat,colnamx,"id","Imputation") cnycatcomp<-rep(NA,(sum(Y.numcat)-length(Y.numcat))) count=0 for ( j in 1:ncol(Y.cat)) { for (k in 1:(Y.numcat[j]-1)) { cnycatcomp[count+k]<-paste(colnamycat[j],k,sep=".") } count=count+Y.numcat[j]-1 } cnamycomp<-c(colnamycon,cnycatcomp) dimnames(betapost)[1] <- list(colnamx) dimnames(betapost)[2] <- list(cnamycomp) dimnames(omegapost)[1] <- list(cnamycomp) dimnames(omegapost)[2] <- list(cnamycomp) dimnames(Yimp2)[2] <- list(cnamycomp) betapostmean<-data.frame(apply(betapost, c(1,2), mean)) omegapostmean<-data.frame(apply(omegapost, c(1,2), mean)) if (output==1) { cat("The posterior mean of the fixed effects estimates is:\n") print(t(betapostmean)) cat("\nThe posterior covariance matrix is:\n") print(omegapostmean) } return(list("finimp"=imp,"collectbeta"=betapost,"collectomega"=omegapost, "finimp.latnorm" = Yimp2)) }
getwd() setwd("G://math//651") hay<-read.table("HayFeverRelief.txt") hay.fv<-(((aov(hay[,1]~ as.factor(hay[,2])*as.factor(hay[,3]))$fitted))) unique(round(as.vector(hay.fv),4)) hay.e<-round(aov(hay[,1]~ as.factor(hay[,2]) * as.factor(hay[,3]))$residuals,4) as.vector(hay.e) matrix(as.vector(hay.e),4,9) head(hay) mean(hay[1:4,1]) interaction.plot(hay[,3], hay[,2], hay[,1] ) plot(hay.fv,as.vector(hay.e)) qqnorm(as.vector(hay.e)) plot(1:9, unique(round(as.vector(hay.fv),4)) ) lines(1:9, unique(round(as.vector(hay.fv),4)) ) aov(hay[,1]~ as.factor(hay[,2]) * as.factor(hay[,3])) anova(lm(hay[,1]~ as.factor(hay[,2]) * as.factor(hay[,3]))) 29.425 / ((3-1)*(3-1)) qf(1-.05,4,27) 110.010/0.060 qf(1-.05,2,27) 1-pf(110.010/0.060,2,27) 61.830/0.060 qf(1-.05,2,27) 1-pf(61.830/0.060,2,27)
/hay.R
no_license
mleibert/651
R
false
false
804
r
getwd() setwd("G://math//651") hay<-read.table("HayFeverRelief.txt") hay.fv<-(((aov(hay[,1]~ as.factor(hay[,2])*as.factor(hay[,3]))$fitted))) unique(round(as.vector(hay.fv),4)) hay.e<-round(aov(hay[,1]~ as.factor(hay[,2]) * as.factor(hay[,3]))$residuals,4) as.vector(hay.e) matrix(as.vector(hay.e),4,9) head(hay) mean(hay[1:4,1]) interaction.plot(hay[,3], hay[,2], hay[,1] ) plot(hay.fv,as.vector(hay.e)) qqnorm(as.vector(hay.e)) plot(1:9, unique(round(as.vector(hay.fv),4)) ) lines(1:9, unique(round(as.vector(hay.fv),4)) ) aov(hay[,1]~ as.factor(hay[,2]) * as.factor(hay[,3])) anova(lm(hay[,1]~ as.factor(hay[,2]) * as.factor(hay[,3]))) 29.425 / ((3-1)*(3-1)) qf(1-.05,4,27) 110.010/0.060 qf(1-.05,2,27) 1-pf(110.010/0.060,2,27) 61.830/0.060 qf(1-.05,2,27) 1-pf(61.830/0.060,2,27)
library(rgdal) library(maptools) library(broom) library(dplyr) library(ggplot2) library(ggmap) # I get many shapefiles, including the below, from here: http://www.naturalearthdata.com # I also get shapefiles from the US Census Bureau and state agencies, when making local maps # Read the shapefiles into R states <- readOGR(dsn = "ne_50m_admin_1_states_provinces_lakes", layer = "ne_50m_admin_1_states_provinces_lakes") # Let's transform them into tidy data that ggplot can use states.points <- tidy(states, region = "adm1_code") # And join in the original variables from the shapefile states.df <- left_join(states.points, states@data, by = c("id" = "adm1_code")) # Using those variables, we'll filter so we just have US states states.df <- filter(states.df, iso_a2 == "US") # Election data election <- read.csv("~/Documents/DataViz/classwork/maps/2012.csv", stringsAsFactors = FALSE) # Add in Obama's margin in each state election$margin <- (election$obama - election$romney) / election$total # And join it with the geospatial data states.df <- left_join(states.df, election, by = c("postal" = "state")) # Let's make a map ggplot(data = states.df, aes(x = long, y = lat, group = group, fill = margin)) + scale_fill_gradient2(limits = c(-0.5, 0.5)) + geom_polygon(color = "white") # That projection's terrible! ggplot(data = states.df, aes(x = long, y = lat, group = group, fill = margin)) + scale_fill_gradient2(limits = c(-0.5, 0.5)) + geom_polygon(color = "white") + coord_map("mercator") # Getting fancy ggplot(data = states.df, aes(x = long, y = lat, group = group, fill = margin)) + scale_fill_gradient2(limits = c(-0.5, 0.5)) + geom_polygon(color = "white") + coord_map("albers", lat0 = 29.5, lat1 = 45.5, xlim = c(-124.85, -66.88), ylim = c(24.4, 49.38), orientation = c(90, 0, -98.35))
/classwork/maps/maps.R
no_license
arifyali/DataViz
R
false
false
1,864
r
library(rgdal) library(maptools) library(broom) library(dplyr) library(ggplot2) library(ggmap) # I get many shapefiles, including the below, from here: http://www.naturalearthdata.com # I also get shapefiles from the US Census Bureau and state agencies, when making local maps # Read the shapefiles into R states <- readOGR(dsn = "ne_50m_admin_1_states_provinces_lakes", layer = "ne_50m_admin_1_states_provinces_lakes") # Let's transform them into tidy data that ggplot can use states.points <- tidy(states, region = "adm1_code") # And join in the original variables from the shapefile states.df <- left_join(states.points, states@data, by = c("id" = "adm1_code")) # Using those variables, we'll filter so we just have US states states.df <- filter(states.df, iso_a2 == "US") # Election data election <- read.csv("~/Documents/DataViz/classwork/maps/2012.csv", stringsAsFactors = FALSE) # Add in Obama's margin in each state election$margin <- (election$obama - election$romney) / election$total # And join it with the geospatial data states.df <- left_join(states.df, election, by = c("postal" = "state")) # Let's make a map ggplot(data = states.df, aes(x = long, y = lat, group = group, fill = margin)) + scale_fill_gradient2(limits = c(-0.5, 0.5)) + geom_polygon(color = "white") # That projection's terrible! ggplot(data = states.df, aes(x = long, y = lat, group = group, fill = margin)) + scale_fill_gradient2(limits = c(-0.5, 0.5)) + geom_polygon(color = "white") + coord_map("mercator") # Getting fancy ggplot(data = states.df, aes(x = long, y = lat, group = group, fill = margin)) + scale_fill_gradient2(limits = c(-0.5, 0.5)) + geom_polygon(color = "white") + coord_map("albers", lat0 = 29.5, lat1 = 45.5, xlim = c(-124.85, -66.88), ylim = c(24.4, 49.38), orientation = c(90, 0, -98.35))
#Q1 download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv", destfile="survey.csv") file.info("survey.csv") survey<-read.csv("survey.csv") file.remove("survey.csv") #Q2 download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv ","gross.csv") gross<-read.csv("gross.csv",header=F,skip=5, nrow=190, colClasses=c(NA,NA,"NULL",NA,NA,"NULL","NULL","NULL","NULL","NULL")) gross$V5<-as.numeric(gsub(",","",gross$V5)) names(gross)<-c("ShortName","Ranking","Names","GDP") mean(gross$GDP) #Q3 sum(grepl("^United",gross$Names)) #Q4 download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv ","gross.csv") download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv","edu.csv") gross<-read.csv("gross.csv",header=F,skip=5, nrow=190, colClasses=c(NA,NA,"NULL",NA,NA,"NULL","NULL","NULL","NULL","NULL")) gross$V5<-as.numeric(gsub(",","",gross$V5)) edu<-read.csv("edu.csv") length(grep("Fiscal year end:.*June", edu$Special.Notes, ignore.case=TRUE)) #Q5 library(quantmod) library(lubridate) amzn = getSymbols("AMZN",auto.assign=FALSE) sampleTimes = index(amzn) names(amzn) sum(year(amzn[,1])=="2012") sum(wday(amzn[,1])==2 & year(amzn[,1])=="2012" )
/Quiz4.R
no_license
JaredSun26/GettingAndCleaningData-Coursera
R
false
false
1,276
r
#Q1 download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv", destfile="survey.csv") file.info("survey.csv") survey<-read.csv("survey.csv") file.remove("survey.csv") #Q2 download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv ","gross.csv") gross<-read.csv("gross.csv",header=F,skip=5, nrow=190, colClasses=c(NA,NA,"NULL",NA,NA,"NULL","NULL","NULL","NULL","NULL")) gross$V5<-as.numeric(gsub(",","",gross$V5)) names(gross)<-c("ShortName","Ranking","Names","GDP") mean(gross$GDP) #Q3 sum(grepl("^United",gross$Names)) #Q4 download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv ","gross.csv") download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv","edu.csv") gross<-read.csv("gross.csv",header=F,skip=5, nrow=190, colClasses=c(NA,NA,"NULL",NA,NA,"NULL","NULL","NULL","NULL","NULL")) gross$V5<-as.numeric(gsub(",","",gross$V5)) edu<-read.csv("edu.csv") length(grep("Fiscal year end:.*June", edu$Special.Notes, ignore.case=TRUE)) #Q5 library(quantmod) library(lubridate) amzn = getSymbols("AMZN",auto.assign=FALSE) sampleTimes = index(amzn) names(amzn) sum(year(amzn[,1])=="2012") sum(wday(amzn[,1])==2 & year(amzn[,1])=="2012" )
data<-read.csv('loan.csv') x<-readline("enter age") y<-readline("enter amount") z<-readline("enter k value") age<-as.integer(x) loan<-as.integer(y) k<-as.integer(z) plot(data$Age,data$Loan) for(i in 1:nrow(data)) { data$distance[i]<-sqrt((data$Age[i]-age)**2+(data$Loan[i]-loan)**2) } df <-data[order(data$distance),] count=0 for(i in 1:k) { count=count+ifelse(data$Paid.[i]=="yes",1,-1) } if(count>0) { print("Loan Approved") }else if(count==0){ print("Not Sure") }else { print("Not Approved") }
/knn.R
no_license
shubhamrouniyar/R-programs_basic
R
false
false
507
r
data<-read.csv('loan.csv') x<-readline("enter age") y<-readline("enter amount") z<-readline("enter k value") age<-as.integer(x) loan<-as.integer(y) k<-as.integer(z) plot(data$Age,data$Loan) for(i in 1:nrow(data)) { data$distance[i]<-sqrt((data$Age[i]-age)**2+(data$Loan[i]-loan)**2) } df <-data[order(data$distance),] count=0 for(i in 1:k) { count=count+ifelse(data$Paid.[i]=="yes",1,-1) } if(count>0) { print("Loan Approved") }else if(count==0){ print("Not Sure") }else { print("Not Approved") }
#Page 118 library("sets") both_heads<-c('hh') both_tails<-c('tt') head_and_tail<-c('ht') tail_and_head<-c('th') Sample_space<-as.set(c('hh','tt','ht','th')) Probablity_of_both_heads<-print(length(both_heads)/length(Sample_space)) Probability_of_both_tails<-print(length(both_tails)/length(Sample_space)) Overall_Probability<-print(Probablity_of_both_heads+Probability_of_both_tails)
/Introductory_Statistics_by_Douglas_S_Shafer_And_Zhiyi_Zhang/CH3/EX3.7/Ex3_7.R
permissive
FOSSEE/R_TBC_Uploads
R
false
false
391
r
#Page 118 library("sets") both_heads<-c('hh') both_tails<-c('tt') head_and_tail<-c('ht') tail_and_head<-c('th') Sample_space<-as.set(c('hh','tt','ht','th')) Probablity_of_both_heads<-print(length(both_heads)/length(Sample_space)) Probability_of_both_tails<-print(length(both_tails)/length(Sample_space)) Overall_Probability<-print(Probablity_of_both_heads+Probability_of_both_tails)
###--- Uncertainty Functions ---### # Calculate U-Deviation calcUDev <- function(MODEL, REFSET, PREDSET, PROP){ x.cal.scores <- MODEL$scores # Scores of reference + prediction sets x.val.scores <- predict(MODEL, newdata = PREDSET$spc, type = "scores") y.val.pred <- predict(MODEL, newdata = PREDSET$spc) y.val.pred <- y.val.pred[,1,] # Predictions loadings <- MODEL$loadings # Model loadings x.val <- PREDSET$spc # Spectra of prediction set obs <- PREDSET[,PROP] # Lab data for prediction set ncalobj <- nrow(REFSET) # Number of callibration samples # Get Leverage Hi <- getLeverage(x.cal.scores, x.val.scores) # Get ResXvalSamp ResXValSamp <- getResXValSamp(PREDSET$spc, REFSET$spc, x.val.scores, loadings) # Get ResXValTot ResXValTot <- getTotResXCal(REFSET$spc, x.cal.scores, loadings) # Get ResYValVar ResYValVar <- MSEP(MODEL, intercept=FALSE)$val[1,1,] # Get U-Deviation udev <- getYdev(ResYValVar, ResXValSamp, ResXValTot, Hi, ncalobj) return(udev) } #1 Xresidual in validation objects getResXValSamp <- function(x.val.mat,x.cal.mat,x.val.scores,x.cal.loadings){ nobj <- dim(x.val.mat)[1] ncomp <- dim(x.val.scores)[2] npred <- dim(x.cal.loadings)[1] res.val <- matrix(0, nrow=nobj, ncol=ncomp) Xmeans.cal <- colMeans(x.cal.mat) #compare predictors average from calib object with predictors from each object # to get predictor redisuals. X.center <- x.val.mat - matrix(rep(Xmeans.cal, each = nobj), nrow=nobj) for(i in 1:ncomp){ x.fac.load.wts <- x.val.scores[,1:i, drop=FALSE] %*% t(x.cal.loadings[,1:i,drop=FALSE]) #res.val[,i] <- rowMeans((-x.fac.load.wts + X.center)^2) res.val[,i] <- rowSums((-x.fac.load.wts + X.center)^2)/(npred-i) } return(res.val) } #2 Total X residual in Validation sets #extracted from getXvalres function by using a colMeans function #use residuals based on xcal rather than cross-validation to make it more deterministic getTotResXCal <- function(x.cal.mat,x.cal.scores,x.cal.loadings){ nobj <- dim(x.cal.mat)[1] ncomp <- dim(x.cal.scores)[2] npred <- dim(x.cal.loadings)[1] res.val <- matrix(0, nrow=nobj, ncol=ncomp) #Xmeans.cal <- colMeans(x.cal.mat) #compare predictors average from calib object with predictors from each object # to get predictor redisuals. #X.center <- x.cal.mat - matrix(rep(Xmeans.cal, each = nobj), nrow=nobj) X.center <- scale(x.cal.mat, scale=FALSE) for(i in 1:ncomp){ x.fac.load.wts <- x.cal.scores[,1:i, drop=FALSE] %*% t(x.cal.loadings[,1:i,drop=FALSE]) #res.val[,i] <- rowMeans((-x.fac.load.wts + X.center)^2) res.val[,i] <- rowSums((-x.fac.load.wts + X.center)^2)/(npred-i) } tot.res <- colMeans(res.val) return(tot.res) } #3 #note that pred.mat should be m * n matrix with m the number of object and n the number of components getResYValVar <- function(val.resp, pred.mat){ ncomp <- dim(pred.mat)[2] nobj <- dim(pred.mat)[1] res.val <- matrix(0, nrow=nobj, ncol=ncomp) #Y.center <- scale(val.resp, scale=FALSE) res.val <- sapply(1:ncomp, function(x){mean((pred.mat[,x] - val.resp)^2)}) return(res.val) } #4. Leverage corresponding to best PLS -- This function gives the leverage for each component. The total leverage is # the sum of individual component leverage for each prediction samples #************ Gives a cumulative leverage based on the number of PC used ***************# getLeverage <- function(scores.calib, scores.valid){ ta.calib <- diag(crossprod(scores.calib)) ta.calib1 <- matrix(rep(ta.calib, each = nrow(scores.valid)), nrow=nrow(scores.valid)) ncal <- dim(scores.calib)[1] Hi <- scores.valid^2 / ta.calib1 ncomp <- dim(scores.valid)[2] nobj <- dim(scores.valid)[1] Hi.pr <- matrix(0, nrow=nobj, ncol=ncomp) for(i in 1:ncomp){ if(i == 1){ Hi.pr[,1] <- Hi[,1] } else { Hi.pr[,i] <- rowSums(Hi[,1:i]) } } Hi.pr <- Hi.pr + (1/ncal) return(Hi.pr) } #5 : Compute prediction error ydev getYdev <- function(ResYValVar, ResXValSamp, ResXValTot, Hi.pr, ncalobj){ nobj <- dim(ResXValSamp)[1] ncomp <- dim(ResXValSamp)[2] ydev <- matrix(0, nrow= nobj, ncol=ncomp) for( i in 1:ncomp){ ydev[,i] <- sqrt(ResYValVar[i] * (ResXValSamp[,i]/ResXValTot[i] + Hi[,i] + 1/ncalobj) * (1- (i+1)/ncalobj)) } return(ydev) }
/Soil-Predictions-Ensemble-Example/functions_udev.R
no_license
lusensn/Soil-Predictions-MIR
R
false
false
4,350
r
###--- Uncertainty Functions ---### # Calculate U-Deviation calcUDev <- function(MODEL, REFSET, PREDSET, PROP){ x.cal.scores <- MODEL$scores # Scores of reference + prediction sets x.val.scores <- predict(MODEL, newdata = PREDSET$spc, type = "scores") y.val.pred <- predict(MODEL, newdata = PREDSET$spc) y.val.pred <- y.val.pred[,1,] # Predictions loadings <- MODEL$loadings # Model loadings x.val <- PREDSET$spc # Spectra of prediction set obs <- PREDSET[,PROP] # Lab data for prediction set ncalobj <- nrow(REFSET) # Number of callibration samples # Get Leverage Hi <- getLeverage(x.cal.scores, x.val.scores) # Get ResXvalSamp ResXValSamp <- getResXValSamp(PREDSET$spc, REFSET$spc, x.val.scores, loadings) # Get ResXValTot ResXValTot <- getTotResXCal(REFSET$spc, x.cal.scores, loadings) # Get ResYValVar ResYValVar <- MSEP(MODEL, intercept=FALSE)$val[1,1,] # Get U-Deviation udev <- getYdev(ResYValVar, ResXValSamp, ResXValTot, Hi, ncalobj) return(udev) } #1 Xresidual in validation objects getResXValSamp <- function(x.val.mat,x.cal.mat,x.val.scores,x.cal.loadings){ nobj <- dim(x.val.mat)[1] ncomp <- dim(x.val.scores)[2] npred <- dim(x.cal.loadings)[1] res.val <- matrix(0, nrow=nobj, ncol=ncomp) Xmeans.cal <- colMeans(x.cal.mat) #compare predictors average from calib object with predictors from each object # to get predictor redisuals. X.center <- x.val.mat - matrix(rep(Xmeans.cal, each = nobj), nrow=nobj) for(i in 1:ncomp){ x.fac.load.wts <- x.val.scores[,1:i, drop=FALSE] %*% t(x.cal.loadings[,1:i,drop=FALSE]) #res.val[,i] <- rowMeans((-x.fac.load.wts + X.center)^2) res.val[,i] <- rowSums((-x.fac.load.wts + X.center)^2)/(npred-i) } return(res.val) } #2 Total X residual in Validation sets #extracted from getXvalres function by using a colMeans function #use residuals based on xcal rather than cross-validation to make it more deterministic getTotResXCal <- function(x.cal.mat,x.cal.scores,x.cal.loadings){ nobj <- dim(x.cal.mat)[1] ncomp <- dim(x.cal.scores)[2] npred <- dim(x.cal.loadings)[1] res.val <- matrix(0, nrow=nobj, ncol=ncomp) #Xmeans.cal <- colMeans(x.cal.mat) #compare predictors average from calib object with predictors from each object # to get predictor redisuals. #X.center <- x.cal.mat - matrix(rep(Xmeans.cal, each = nobj), nrow=nobj) X.center <- scale(x.cal.mat, scale=FALSE) for(i in 1:ncomp){ x.fac.load.wts <- x.cal.scores[,1:i, drop=FALSE] %*% t(x.cal.loadings[,1:i,drop=FALSE]) #res.val[,i] <- rowMeans((-x.fac.load.wts + X.center)^2) res.val[,i] <- rowSums((-x.fac.load.wts + X.center)^2)/(npred-i) } tot.res <- colMeans(res.val) return(tot.res) } #3 #note that pred.mat should be m * n matrix with m the number of object and n the number of components getResYValVar <- function(val.resp, pred.mat){ ncomp <- dim(pred.mat)[2] nobj <- dim(pred.mat)[1] res.val <- matrix(0, nrow=nobj, ncol=ncomp) #Y.center <- scale(val.resp, scale=FALSE) res.val <- sapply(1:ncomp, function(x){mean((pred.mat[,x] - val.resp)^2)}) return(res.val) } #4. Leverage corresponding to best PLS -- This function gives the leverage for each component. The total leverage is # the sum of individual component leverage for each prediction samples #************ Gives a cumulative leverage based on the number of PC used ***************# getLeverage <- function(scores.calib, scores.valid){ ta.calib <- diag(crossprod(scores.calib)) ta.calib1 <- matrix(rep(ta.calib, each = nrow(scores.valid)), nrow=nrow(scores.valid)) ncal <- dim(scores.calib)[1] Hi <- scores.valid^2 / ta.calib1 ncomp <- dim(scores.valid)[2] nobj <- dim(scores.valid)[1] Hi.pr <- matrix(0, nrow=nobj, ncol=ncomp) for(i in 1:ncomp){ if(i == 1){ Hi.pr[,1] <- Hi[,1] } else { Hi.pr[,i] <- rowSums(Hi[,1:i]) } } Hi.pr <- Hi.pr + (1/ncal) return(Hi.pr) } #5 : Compute prediction error ydev getYdev <- function(ResYValVar, ResXValSamp, ResXValTot, Hi.pr, ncalobj){ nobj <- dim(ResXValSamp)[1] ncomp <- dim(ResXValSamp)[2] ydev <- matrix(0, nrow= nobj, ncol=ncomp) for( i in 1:ncomp){ ydev[,i] <- sqrt(ResYValVar[i] * (ResXValSamp[,i]/ResXValTot[i] + Hi[,i] + 1/ncalobj) * (1- (i+1)/ncalobj)) } return(ydev) }
### prewd ### prewd = file.path(getwd(),"...") ### libraries ### library(foreach) library(doParallel) library(rhdf5) ### setwd ### setwd(file.path(prewd,"...")) ### number of cores ### mc.cores = detectCores() ### basic objects ### basic.objects = c(ls(),"basic.objects","fast5.file","fast5.files") ### load objects ### load(file.path("...","fast5.files.RData")) objects = list.files(file.path(prewd,"BasicObjects")) for (object in objects){source(file.path(prewd,"BasicObjects",object))} ### build.raw.signal.five.mers.add.on.list ### build.raw.signal.five.mers.add.on.list = function(read.name){ try({ fast5.file = read.to.fast5.name[read.name] read.number = strsplit(strsplit(fast5.file,split = "read_")[[1]][2],split = "_")[[1]][1] if (is.na(read.number)){read.number = strsplit(strsplit(fast5.file,split = "read")[[1]][3],split = "_")[[1]][1]} if (is.na(read.number)){raw.signal = h5read(file.path(prewd,fast5.file),paste0("read_",strsplit(read.name,split = "\\.")[[1]][1],"/Raw/Signal"))} else {raw.signal = h5read(file.path(prewd,fast5.file),paste0("/Raw/Reads/Read_",read.number,"/Signal"))} if (is.na(read.number)){move = h5read(file.path(prewd,fast5.file),paste0("read_",strsplit(read.name,split = "\\.")[[1]][1],"/Analyses/Basecall_1D_000/BaseCalled_template/Move"))} else {move = h5read(file.path(prewd,fast5.file),"/Analyses/Basecall_1D_000/BaseCalled_template/Move")} move.rle = Rle(move) read.sequence = rev(strsplit(read.sequence.list[[read.name]],split = "")[[1]]) event.repeats = move[move == 1] event.repeats[cumsum(runLength(move.rle)[runValue(move.rle) == 1])] = runLength(move.rle)[runValue(move.rle) == 0] + 1 if (move[length(move)] == 1){event.repeats[length(event.repeats)] = 1} stride = 10 num_events_template = sequencing.summary[strsplit(read.name,split = "\\.")[[1]][1],"num_events_template"] num_events = sequencing.summary[strsplit(read.name,split = "\\.")[[1]][1],"num_events"] anchor = (num_events - num_events_template)*stride anchored.raw.signal = raw.signal[anchor:length(raw.signal)] read.sequence.five.mers = as.character(five.mers.minus[as.numeric(stats::filter(as.numeric(c("A" = 1,"C" = 2,"G" = 3,"T" = 4,"-" = 5)[read.sequence]),5^(0:8)[1:5],sides = 1)-(sum(5^(0:8)[1:5])-1))[-c(1:4)]]) read.sequence.five.mers = c(NA,NA,read.sequence.five.mers,NA,NA) events.to.raw = cut(1:length(anchored.raw.signal),(0:num_events_template)*stride,right = FALSE,labels = FALSE) event.raw.signal = lapply(tapply(1:length(anchored.raw.signal),INDEX = events.to.raw,identity),function(x){anchored.raw.signal[x]}) raw.means = sapply(event.raw.signal,mean) cumsum.event.repeats = cumsum(event.repeats) reevent = cbind(c(1,cumsum.event.repeats[-length(cumsum.event.repeats)] + 1),cumsum.event.repeats) colnames(reevent) = c("start","end") reevents.to.raw = cut(1:length(anchored.raw.signal),c(((0:num_events_template)*stride)[reevent[,"start"]],num_events_template*stride+1),right = FALSE,labels = FALSE) reevent.raw.signal = lapply(tapply(1:length(anchored.raw.signal),INDEX = reevents.to.raw,identity),function(x){anchored.raw.signal[x]}) reevent.raw.means = sapply(reevent.raw.signal,mean) events = cbind("move" = move, "read.sequence" = rep(read.sequence,times = event.repeats), "read.sequence.five.mers" = rep(read.sequence.five.mers,times = event.repeats)) means = cbind("raw.means" = raw.means, "reevent.raw.means" = rep(reevent.raw.means,times = event.repeats)) events.move = events[move == 1,] means.move = means[move == 1,] events.no.move = events[move == 0,] means.no.move = means[move == 0,] results = c(read.name, sapply(five.mers,function(x){apply(means.no.move[events.no.move[,"read.sequence.five.mers"] %in% x,"reevent.raw.means",drop = FALSE],2,mean)}) ) names(results) = NULL return(results) },silent = TRUE) } col.names = c("read.name", mkAllStrings(c("A","C","G","T"),5)) z.score.rescaling = function(x,y,z){((z - mean(y,na.rm = TRUE))*(sd(x,na.rm = TRUE)/sd(y,na.rm = TRUE))) + mean(x,na.rm = TRUE)} non.T.containing = rownames(five.mer.RNA.pore.model) %in% five.mers.non.T.containing ### update basic objects ### basic.objects = c(ls(),"build.raw.signal.five.mers.add.on.list","col.names","z.score.rescaling","five.mer.RNA.pore.model","non.T.containing") ### raw signal of kmers ### dir.create(file.path("...")) bam = get(load(file.path("...","bam.RData"))) read.to.fast5.name = get(load(file.path("...","read.to.fast5.name.RData"))) read.sequence.list = get(load(file.path("...","read.sequence.list.RData"))) sequencing.summary = get(load(file.path("...","sequencing.summary.RData"))) index.list = splitvector(1:length(names(bam)),100) raw.signal.five.mers.add.on.list = list() for (j in 1:length(index.list)){ registerDoParallel(cores = mc.cores) raw.signal.five.mers.add.on.subset = foreach(n = names(bam)[index.list[[j]]],.noexport = setdiff(ls(),c("bam","read.to.fast5.name","read.sequence.list","sequencing.summary"))) %dopar% build.raw.signal.five.mers.add.on.list(n) raw.signal.five.mers.add.on.list[[j]] = Reduce('rbind',raw.signal.five.mers.add.on.subset) } raw.signal.five.mers.add.on = Reduce('rbind',raw.signal.five.mers.add.on.list) colnames(raw.signal.five.mers.add.on) = col.names rownames(raw.signal.five.mers.add.on) = raw.signal.five.mers.add.on[,"read.name"] raw.signal.five.mers.add.on = apply(raw.signal.five.mers.add.on[,setdiff(col.names,"read.name")],c(1,2),as.numeric) raw.signal.five.mers.add.on = t(apply(raw.signal.five.mers.add.on,1,function(x){z.score.rescaling(five.mer.RNA.pore.model[names(x[!is.na(x) & non.T.containing]),"mean"],x[!is.na(x) & non.T.containing],x)})) save(raw.signal.five.mers.add.on,file=file.path("...","raw.signal.five.mers.add.on.RData")) rm(list = setdiff(ls(),basic.objects)) gc()
/raw.signal.five.mers.add.on.R
no_license
birdumbrella/nano-ID
R
false
false
6,048
r
### prewd ### prewd = file.path(getwd(),"...") ### libraries ### library(foreach) library(doParallel) library(rhdf5) ### setwd ### setwd(file.path(prewd,"...")) ### number of cores ### mc.cores = detectCores() ### basic objects ### basic.objects = c(ls(),"basic.objects","fast5.file","fast5.files") ### load objects ### load(file.path("...","fast5.files.RData")) objects = list.files(file.path(prewd,"BasicObjects")) for (object in objects){source(file.path(prewd,"BasicObjects",object))} ### build.raw.signal.five.mers.add.on.list ### build.raw.signal.five.mers.add.on.list = function(read.name){ try({ fast5.file = read.to.fast5.name[read.name] read.number = strsplit(strsplit(fast5.file,split = "read_")[[1]][2],split = "_")[[1]][1] if (is.na(read.number)){read.number = strsplit(strsplit(fast5.file,split = "read")[[1]][3],split = "_")[[1]][1]} if (is.na(read.number)){raw.signal = h5read(file.path(prewd,fast5.file),paste0("read_",strsplit(read.name,split = "\\.")[[1]][1],"/Raw/Signal"))} else {raw.signal = h5read(file.path(prewd,fast5.file),paste0("/Raw/Reads/Read_",read.number,"/Signal"))} if (is.na(read.number)){move = h5read(file.path(prewd,fast5.file),paste0("read_",strsplit(read.name,split = "\\.")[[1]][1],"/Analyses/Basecall_1D_000/BaseCalled_template/Move"))} else {move = h5read(file.path(prewd,fast5.file),"/Analyses/Basecall_1D_000/BaseCalled_template/Move")} move.rle = Rle(move) read.sequence = rev(strsplit(read.sequence.list[[read.name]],split = "")[[1]]) event.repeats = move[move == 1] event.repeats[cumsum(runLength(move.rle)[runValue(move.rle) == 1])] = runLength(move.rle)[runValue(move.rle) == 0] + 1 if (move[length(move)] == 1){event.repeats[length(event.repeats)] = 1} stride = 10 num_events_template = sequencing.summary[strsplit(read.name,split = "\\.")[[1]][1],"num_events_template"] num_events = sequencing.summary[strsplit(read.name,split = "\\.")[[1]][1],"num_events"] anchor = (num_events - num_events_template)*stride anchored.raw.signal = raw.signal[anchor:length(raw.signal)] read.sequence.five.mers = as.character(five.mers.minus[as.numeric(stats::filter(as.numeric(c("A" = 1,"C" = 2,"G" = 3,"T" = 4,"-" = 5)[read.sequence]),5^(0:8)[1:5],sides = 1)-(sum(5^(0:8)[1:5])-1))[-c(1:4)]]) read.sequence.five.mers = c(NA,NA,read.sequence.five.mers,NA,NA) events.to.raw = cut(1:length(anchored.raw.signal),(0:num_events_template)*stride,right = FALSE,labels = FALSE) event.raw.signal = lapply(tapply(1:length(anchored.raw.signal),INDEX = events.to.raw,identity),function(x){anchored.raw.signal[x]}) raw.means = sapply(event.raw.signal,mean) cumsum.event.repeats = cumsum(event.repeats) reevent = cbind(c(1,cumsum.event.repeats[-length(cumsum.event.repeats)] + 1),cumsum.event.repeats) colnames(reevent) = c("start","end") reevents.to.raw = cut(1:length(anchored.raw.signal),c(((0:num_events_template)*stride)[reevent[,"start"]],num_events_template*stride+1),right = FALSE,labels = FALSE) reevent.raw.signal = lapply(tapply(1:length(anchored.raw.signal),INDEX = reevents.to.raw,identity),function(x){anchored.raw.signal[x]}) reevent.raw.means = sapply(reevent.raw.signal,mean) events = cbind("move" = move, "read.sequence" = rep(read.sequence,times = event.repeats), "read.sequence.five.mers" = rep(read.sequence.five.mers,times = event.repeats)) means = cbind("raw.means" = raw.means, "reevent.raw.means" = rep(reevent.raw.means,times = event.repeats)) events.move = events[move == 1,] means.move = means[move == 1,] events.no.move = events[move == 0,] means.no.move = means[move == 0,] results = c(read.name, sapply(five.mers,function(x){apply(means.no.move[events.no.move[,"read.sequence.five.mers"] %in% x,"reevent.raw.means",drop = FALSE],2,mean)}) ) names(results) = NULL return(results) },silent = TRUE) } col.names = c("read.name", mkAllStrings(c("A","C","G","T"),5)) z.score.rescaling = function(x,y,z){((z - mean(y,na.rm = TRUE))*(sd(x,na.rm = TRUE)/sd(y,na.rm = TRUE))) + mean(x,na.rm = TRUE)} non.T.containing = rownames(five.mer.RNA.pore.model) %in% five.mers.non.T.containing ### update basic objects ### basic.objects = c(ls(),"build.raw.signal.five.mers.add.on.list","col.names","z.score.rescaling","five.mer.RNA.pore.model","non.T.containing") ### raw signal of kmers ### dir.create(file.path("...")) bam = get(load(file.path("...","bam.RData"))) read.to.fast5.name = get(load(file.path("...","read.to.fast5.name.RData"))) read.sequence.list = get(load(file.path("...","read.sequence.list.RData"))) sequencing.summary = get(load(file.path("...","sequencing.summary.RData"))) index.list = splitvector(1:length(names(bam)),100) raw.signal.five.mers.add.on.list = list() for (j in 1:length(index.list)){ registerDoParallel(cores = mc.cores) raw.signal.five.mers.add.on.subset = foreach(n = names(bam)[index.list[[j]]],.noexport = setdiff(ls(),c("bam","read.to.fast5.name","read.sequence.list","sequencing.summary"))) %dopar% build.raw.signal.five.mers.add.on.list(n) raw.signal.five.mers.add.on.list[[j]] = Reduce('rbind',raw.signal.five.mers.add.on.subset) } raw.signal.five.mers.add.on = Reduce('rbind',raw.signal.five.mers.add.on.list) colnames(raw.signal.five.mers.add.on) = col.names rownames(raw.signal.five.mers.add.on) = raw.signal.five.mers.add.on[,"read.name"] raw.signal.five.mers.add.on = apply(raw.signal.five.mers.add.on[,setdiff(col.names,"read.name")],c(1,2),as.numeric) raw.signal.five.mers.add.on = t(apply(raw.signal.five.mers.add.on,1,function(x){z.score.rescaling(five.mer.RNA.pore.model[names(x[!is.na(x) & non.T.containing]),"mean"],x[!is.na(x) & non.T.containing],x)})) save(raw.signal.five.mers.add.on,file=file.path("...","raw.signal.five.mers.add.on.RData")) rm(list = setdiff(ls(),basic.objects)) gc()
#' Function for fitting the GVRDM and Aksnes and Utne model #' #' Generalized visual reaction distance model and Aksnes and Utne model. #' #' @param rr Reaction distance in meters #' @param cc Effective attenuation coefficient or beam attenuation coefficient #' @param Ke Half-saturation constant #' @param Ke.trans Should Ke be transformed in the model? #' @param Eb # Light level #' @param Ap Prey area. If Ap and C0 are not provided, tt will be estimated. #' @param C0 Prey inherent contrast. If Ap and C0 are not provided, tt will be estimated. #' @param Eprime Composite saturation parameter. #' @param tt T parameter for prey type 0. Default = NA #' @param tt1 T parameter for prey type 1. Default = NA #' @param tt2 T parameter for prey type 2. Default = NA #' @param tt3 T parameter for prey type 3. Default = NA #' @param tt4 T parameter for prey type 4. Default = NA #' @param angle Nadir viewing angle in degrees #' @param kd Diffuse attenuation coefficient of downwelling irradiance #' @param beta Dynamic scaling function intercept parameter #' @param hh Dynamic scaling function rate parameter #' @param delta Dynamic scaling function shape parameter #' @param alpha Naka-Rushton exponent. Default = 1 #' @param sigma Standard deviation of visual reaction distance #' @param NVrd Non-visual reaction distance. Default = NA (only fit visual component of the model) #' @param NVthreshold Light threshold for non-visual reaction. Default = NA(only fit visual component of the model) #' @param NVsigma Standard deviation of non-visual reaction distance. Default = NA(only fit visual component of the model) #' @param kd.mult Multiplier to convert beam attenuation #' @param prey.types A vector specifying unique prey name categories that are passed to 'prey.' Used if parameters are simultaneously estimated for multiple prey types. #' @param prey Vector of prey names for each observation. #' @param prey.size Vector of prey size #' @param ccoffset Shift kappa or beam attenuation? Default = NA results in no shift. #' @param rr.log Are visual errors lognormal distribution? Default = TRUE #' @param fit.model Logical.Should the model be fitted? If FALSE, produces model diagnostics and predictions based on provided parameters. #' @param fit.obs Logical. Should observations be used to calculate a likelihood? #' @param silent Logical. Should diagnostic plots be produced? #' @param only.estimate Logical. Should only the estimated reaction distance be returned? #' @param return If fit.model is TRUE, returns the negative log likelihood. If fit is FALSE, returns model diagnostics and predictions using parameters that are passed to the model. #' @details The order of composite tt parameters should match the order of prey.types in the model. By default, the model is set up to estimate five prey parameters (tt, tt1, tt2, tt3, tt4). Additional parameters can be added by modifying the function with additional numbered parameters that have names beginning with tt. Regardless of how many tt parameters are added, only parameters that are assigned values and have a corresponding prey category will be estimated by the model. fit_gvrdm <- function(rr, cc, Ap = NA, C0 = NA, Eprime = NA, prey.types = NA, prey = NA, prey.size = NA, tt = NA, tt1 = NA, tt2 = NA, tt3 = NA, tt4 = NA, Ke, Ke.trans = FALSE, Eb, angle = NA, kd = NA, kd.mult = NA, beta = NA, kk = NA, delta = NA, alpha = 1, sigma = NA, rr.log = TRUE, NVrd = NA, NVthreshold = 0, NVsigma = NA, fit.model = TRUE, fit.obs = TRUE, ccoffset = NA, only.estimate = FALSE, silent = F, ...) { # Initialize output vectors rhs <- vector(length = length(cc)) lhs <- vector(length = length(cc)) out <- vector(length = length(cc)) VIS <- 1:length(cc) # Transform Ke? if(Ke.trans == TRUE) { Ke <- 10^Ke } # Offset beam attenuation? if(is.na(ccoffset)) { ccoffset <- 0 } # Angular dependence? if(is.na(angle[1])) { kd <- rep(0, length(rhs)) angle <- rep(90, length(rhs)) } # Diffuse downwelling attenuation coefficient multiplier? if(!is.na(kd.mult)) { kd <- kd*kd.mult } # Split visual and Non-visual reaction distance if(!is.na(NVrd)) { VIS <- which(Eb > NVthreshold) } # Non-visual reaction distance out[-VIS] <- NVrd # Assign separate tt for each prey type # Multiple prey types if(length(prey.types) > 1) { tt_vars <- ls()[grepl("tt", ls())] tt_vars <- sapply(tt_vars, function(x) eval(parse(text=x))) tt <- rep(tt, length(rr)) tt_vec <- match(prey, prey.types) tt_vec <- tt_vars[tt_vec] tt <- tt_vec } else { tt <- rep(tt, length(rr)) } # Size multiplier if(!is.na(prey.size[1])) { tt <- tt * prey.size^2 } # Lambert W function lambert.fn <- function(cc, xx) { return((2/cc)*f.lambertW(cc*sqrt(xx)/2)) } if(!is.na(C0)) { C0 <- trans.atan(val = C0, a = 0, b = 1) } # Left-hand side if(fit.obs) { lhs[VIS] <- 2 * log(rr[VIS]) + (cc[VIS]-kd[VIS]*round(cos(angle[VIS]*pi/180), 3))*rr[VIS] } # Start rhs rhs[VIS] <- Eb[VIS]^alpha/(Eb[VIS]^alpha + Ke^alpha) if(is.na(Ap) & is.na(C0)) { if(is.na(Eprime)) { # Aksnes and Utne model rhs[VIS] <- tt[VIS]*rhs[VIS] } else { # Cases with prey type variation } } if(is.na(beta) & is.na(kk)) { # Aksnes and Utne model - Do nothing } else { if(is.na(beta)) { cont_shape <- dgamma(x = abs(cc[VIS]-ccoffset), shape = kk, scale = delta) } else { cont_shape <- (beta + dgamma(x = abs(cc[VIS]-ccoffset), shape = kk, scale = delta)) } rhs[VIS] <- rhs[VIS]*cont_shape if(fit.model == T) { # Avoid cont_shape being out of bounds if(any(is.na(cont_shape))) { return(1e7) } else if(any(cont_shape <= 0)) { return(1e7) } } } # Lambert W function to find root out[VIS] <- lambert.fn(cc = (cc[VIS]-kd[VIS]*round(cos(angle[VIS]*pi/180), 3)), xx = rhs[VIS]) # NLL for visual if(!is.na(sigma)) { if(rr.log) { # Log-transformed? sigma <- exp(sigma) # Must >0 NLL <- -1*sum(log(dnorm(log(rr[VIS]), mean = log(out[VIS]), sd = sigma))) } else { NLL <- -1*sum(log(dnorm(rr[VIS], mean = out[VIS], sd = sigma))) } } # NLL for nonvisual if(!is.na(NVrd)) { if(is.na(NVsigma)) { NVsigma <- sigma } else { NVsigma <- exp(NVsigma) } NLL <- NLL + -1*sum(log(dnorm(log(rr[-VIS]), mean = log(NVrd), sd = NVsigma))) } if(is.infinite(NLL) | is.na(NLL)) { NLL <- 1e32 } if(fit.model) { return(NLL) } else { if(fit.obs) { if(!is.na(NVrd)) { out[Eb <= NVthreshold] <- NVrd } if(!silent) { out <- list(out = out, vis.diagnostic.plots = diagnostic_plots(rr = rr[VIS], cc = cc[VIS], Eb = Eb[VIS], out = out[VIS], cont_shape = cont_shape[VIS], sigma = sigma, NVrr = rr[-VIS], NVrd = NVrd, NVsigma = NVsigma)) } else { out <- list(out = out) } } else { if(!is.na(NVrd)) { out[Eb <= NVthreshold] <- NVrd } prediction_plots(cc = cc, Eb = Eb, out = out) } if(!is.na(NVrd)) { out$Eb <- Eb out$cc <- cc out$rr <- rr out$angle <- angle out$kd <- kd } else { out$Eb <- Eb out$cc <- cc out$rr <- rr out$angle <- angle out$kd <- kd } if(only.estimate) { return(out$out) # Only return fitted reaction distances } else{ return(out) } } }
/R/fit_gvrdm.R
no_license
sean-rohan-NOAA/GVRDM
R
false
false
8,602
r
#' Function for fitting the GVRDM and Aksnes and Utne model #' #' Generalized visual reaction distance model and Aksnes and Utne model. #' #' @param rr Reaction distance in meters #' @param cc Effective attenuation coefficient or beam attenuation coefficient #' @param Ke Half-saturation constant #' @param Ke.trans Should Ke be transformed in the model? #' @param Eb # Light level #' @param Ap Prey area. If Ap and C0 are not provided, tt will be estimated. #' @param C0 Prey inherent contrast. If Ap and C0 are not provided, tt will be estimated. #' @param Eprime Composite saturation parameter. #' @param tt T parameter for prey type 0. Default = NA #' @param tt1 T parameter for prey type 1. Default = NA #' @param tt2 T parameter for prey type 2. Default = NA #' @param tt3 T parameter for prey type 3. Default = NA #' @param tt4 T parameter for prey type 4. Default = NA #' @param angle Nadir viewing angle in degrees #' @param kd Diffuse attenuation coefficient of downwelling irradiance #' @param beta Dynamic scaling function intercept parameter #' @param hh Dynamic scaling function rate parameter #' @param delta Dynamic scaling function shape parameter #' @param alpha Naka-Rushton exponent. Default = 1 #' @param sigma Standard deviation of visual reaction distance #' @param NVrd Non-visual reaction distance. Default = NA (only fit visual component of the model) #' @param NVthreshold Light threshold for non-visual reaction. Default = NA(only fit visual component of the model) #' @param NVsigma Standard deviation of non-visual reaction distance. Default = NA(only fit visual component of the model) #' @param kd.mult Multiplier to convert beam attenuation #' @param prey.types A vector specifying unique prey name categories that are passed to 'prey.' Used if parameters are simultaneously estimated for multiple prey types. #' @param prey Vector of prey names for each observation. #' @param prey.size Vector of prey size #' @param ccoffset Shift kappa or beam attenuation? Default = NA results in no shift. #' @param rr.log Are visual errors lognormal distribution? Default = TRUE #' @param fit.model Logical.Should the model be fitted? If FALSE, produces model diagnostics and predictions based on provided parameters. #' @param fit.obs Logical. Should observations be used to calculate a likelihood? #' @param silent Logical. Should diagnostic plots be produced? #' @param only.estimate Logical. Should only the estimated reaction distance be returned? #' @param return If fit.model is TRUE, returns the negative log likelihood. If fit is FALSE, returns model diagnostics and predictions using parameters that are passed to the model. #' @details The order of composite tt parameters should match the order of prey.types in the model. By default, the model is set up to estimate five prey parameters (tt, tt1, tt2, tt3, tt4). Additional parameters can be added by modifying the function with additional numbered parameters that have names beginning with tt. Regardless of how many tt parameters are added, only parameters that are assigned values and have a corresponding prey category will be estimated by the model. fit_gvrdm <- function(rr, cc, Ap = NA, C0 = NA, Eprime = NA, prey.types = NA, prey = NA, prey.size = NA, tt = NA, tt1 = NA, tt2 = NA, tt3 = NA, tt4 = NA, Ke, Ke.trans = FALSE, Eb, angle = NA, kd = NA, kd.mult = NA, beta = NA, kk = NA, delta = NA, alpha = 1, sigma = NA, rr.log = TRUE, NVrd = NA, NVthreshold = 0, NVsigma = NA, fit.model = TRUE, fit.obs = TRUE, ccoffset = NA, only.estimate = FALSE, silent = F, ...) { # Initialize output vectors rhs <- vector(length = length(cc)) lhs <- vector(length = length(cc)) out <- vector(length = length(cc)) VIS <- 1:length(cc) # Transform Ke? if(Ke.trans == TRUE) { Ke <- 10^Ke } # Offset beam attenuation? if(is.na(ccoffset)) { ccoffset <- 0 } # Angular dependence? if(is.na(angle[1])) { kd <- rep(0, length(rhs)) angle <- rep(90, length(rhs)) } # Diffuse downwelling attenuation coefficient multiplier? if(!is.na(kd.mult)) { kd <- kd*kd.mult } # Split visual and Non-visual reaction distance if(!is.na(NVrd)) { VIS <- which(Eb > NVthreshold) } # Non-visual reaction distance out[-VIS] <- NVrd # Assign separate tt for each prey type # Multiple prey types if(length(prey.types) > 1) { tt_vars <- ls()[grepl("tt", ls())] tt_vars <- sapply(tt_vars, function(x) eval(parse(text=x))) tt <- rep(tt, length(rr)) tt_vec <- match(prey, prey.types) tt_vec <- tt_vars[tt_vec] tt <- tt_vec } else { tt <- rep(tt, length(rr)) } # Size multiplier if(!is.na(prey.size[1])) { tt <- tt * prey.size^2 } # Lambert W function lambert.fn <- function(cc, xx) { return((2/cc)*f.lambertW(cc*sqrt(xx)/2)) } if(!is.na(C0)) { C0 <- trans.atan(val = C0, a = 0, b = 1) } # Left-hand side if(fit.obs) { lhs[VIS] <- 2 * log(rr[VIS]) + (cc[VIS]-kd[VIS]*round(cos(angle[VIS]*pi/180), 3))*rr[VIS] } # Start rhs rhs[VIS] <- Eb[VIS]^alpha/(Eb[VIS]^alpha + Ke^alpha) if(is.na(Ap) & is.na(C0)) { if(is.na(Eprime)) { # Aksnes and Utne model rhs[VIS] <- tt[VIS]*rhs[VIS] } else { # Cases with prey type variation } } if(is.na(beta) & is.na(kk)) { # Aksnes and Utne model - Do nothing } else { if(is.na(beta)) { cont_shape <- dgamma(x = abs(cc[VIS]-ccoffset), shape = kk, scale = delta) } else { cont_shape <- (beta + dgamma(x = abs(cc[VIS]-ccoffset), shape = kk, scale = delta)) } rhs[VIS] <- rhs[VIS]*cont_shape if(fit.model == T) { # Avoid cont_shape being out of bounds if(any(is.na(cont_shape))) { return(1e7) } else if(any(cont_shape <= 0)) { return(1e7) } } } # Lambert W function to find root out[VIS] <- lambert.fn(cc = (cc[VIS]-kd[VIS]*round(cos(angle[VIS]*pi/180), 3)), xx = rhs[VIS]) # NLL for visual if(!is.na(sigma)) { if(rr.log) { # Log-transformed? sigma <- exp(sigma) # Must >0 NLL <- -1*sum(log(dnorm(log(rr[VIS]), mean = log(out[VIS]), sd = sigma))) } else { NLL <- -1*sum(log(dnorm(rr[VIS], mean = out[VIS], sd = sigma))) } } # NLL for nonvisual if(!is.na(NVrd)) { if(is.na(NVsigma)) { NVsigma <- sigma } else { NVsigma <- exp(NVsigma) } NLL <- NLL + -1*sum(log(dnorm(log(rr[-VIS]), mean = log(NVrd), sd = NVsigma))) } if(is.infinite(NLL) | is.na(NLL)) { NLL <- 1e32 } if(fit.model) { return(NLL) } else { if(fit.obs) { if(!is.na(NVrd)) { out[Eb <= NVthreshold] <- NVrd } if(!silent) { out <- list(out = out, vis.diagnostic.plots = diagnostic_plots(rr = rr[VIS], cc = cc[VIS], Eb = Eb[VIS], out = out[VIS], cont_shape = cont_shape[VIS], sigma = sigma, NVrr = rr[-VIS], NVrd = NVrd, NVsigma = NVsigma)) } else { out <- list(out = out) } } else { if(!is.na(NVrd)) { out[Eb <= NVthreshold] <- NVrd } prediction_plots(cc = cc, Eb = Eb, out = out) } if(!is.na(NVrd)) { out$Eb <- Eb out$cc <- cc out$rr <- rr out$angle <- angle out$kd <- kd } else { out$Eb <- Eb out$cc <- cc out$rr <- rr out$angle <- angle out$kd <- kd } if(only.estimate) { return(out$out) # Only return fitted reaction distances } else{ return(out) } } }
context("survival-coxph") skip_if_not_installed("survival") library(survival) fit <- coxph(Surv(time, status) ~ age + sex, lung) fit2 <- coxph(Surv(time, status) ~ age + sex, lung, robust = TRUE) test_that("coxph tidier arguments", { check_arguments(tidy.coxph) check_arguments(glance.coxph) check_arguments(augment.coxph) }) test_that("tidy.coxph", { td <- tidy(fit) td2 <- tidy(fit, exponentiate = TRUE) td3 <- tidy(fit2) check_tidy_output(td) check_tidy_output(td2) check_tidy_output(td3) }) test_that("glance.coxph", { gl <- glance(fit) gl2 <- glance(fit2) check_glance_outputs(gl, gl2) }) test_that("augment.coxph", { expect_error( augment(fit), regexp = "Must specify either `data` or `newdata` argument." ) check_augment_function( aug = augment.coxph, model = fit, data = lung, newdata = lung ) check_augment_function( aug = augment.coxph, model = fit2, data = lung, newdata = lung ) })
/packrat/lib/x86_64-apple-darwin18.2.0/3.5.2/broom/tests/testthat/test-survival-coxph.R
no_license
teyden/asthma-research
R
false
false
1,043
r
context("survival-coxph") skip_if_not_installed("survival") library(survival) fit <- coxph(Surv(time, status) ~ age + sex, lung) fit2 <- coxph(Surv(time, status) ~ age + sex, lung, robust = TRUE) test_that("coxph tidier arguments", { check_arguments(tidy.coxph) check_arguments(glance.coxph) check_arguments(augment.coxph) }) test_that("tidy.coxph", { td <- tidy(fit) td2 <- tidy(fit, exponentiate = TRUE) td3 <- tidy(fit2) check_tidy_output(td) check_tidy_output(td2) check_tidy_output(td3) }) test_that("glance.coxph", { gl <- glance(fit) gl2 <- glance(fit2) check_glance_outputs(gl, gl2) }) test_that("augment.coxph", { expect_error( augment(fit), regexp = "Must specify either `data` or `newdata` argument." ) check_augment_function( aug = augment.coxph, model = fit, data = lung, newdata = lung ) check_augment_function( aug = augment.coxph, model = fit2, data = lung, newdata = lung ) })
# date time=2019/4/16 19:43:56 setwd('/Users/takatoosetsuo/Dropbox/2019polytec/lectures/0520/presen/fig/drawsine') source('/Applications/kettex/texlive/texmf-dist/scripts/ketcindy/ketlib/ketpiccurrent.r') Ketinit() cat(ThisVersion,'\n') Fnametex='p033.tex' FnameR='p033.r' Fnameout='p033.txt' arccos=acos; arcsin=asin; arctan=atan Acos<- function(x){acos(max(-1,min(1,x)))} Asin<- function(x){asin(max(-1,min(1,x)))} Atan=atan Sqr<- function(x){if(x>=0){sqrt(x)}else{0}} Factorial=factorial Norm<- function(x){norm(matrix(x,nrow=1),"2")} Setwindow(c(-3,6), c(-2,3.5)) X=c(1.22699,-2.5);Assignadd('X',X) T=c(5.49514,-3);Assignadd('T',T) mdag1=c(-0.70565,0.20726);Assignadd('mdag1',mdag1) mdbw1=c(-0.5,-0.14);Assignadd('mdbw1',mdbw1) sgXlXXr=Listplot(c(c(0,-2.5),c(3.14159,-2.5))) sgTlTTr=Listplot(c(c(0,-3),c(7.5708,-3))) Setunitlen("15mm") sgaxx1=Listplot(c(c(-3,0),c(6,0))) sgaxy1=Listplot(c(c(0,-2),c(0,3.5))) cr1=Circledata(c(c(-1,0),c(0,0))) sg1=Listplot(c(c(-1,0),c(0,0))) sg2=Listplot(c(c(-1,0),c(-0.66293,0.94148))) bw1=Bowdata(c(-1,0),c(0,0),1.4,0.3) ag1=Anglemark(c(0,0),c(-1,0),c(-0.66293,0.94148),0.6) sgt1=Listplot(c(c(0,0),c(0.93301,0.79687))) PtL=list() GrL=list() # Windisp(GrL) if(1==1){ Openfile('/Users/takatoosetsuo/Dropbox/2019polytec/lectures/0520/presen/fig/drawsine/p033.tex','15mm','Cdy=presen0521a.cdy') Drwline(sgaxx1) Drwline(sgaxy1) Letter(c(6,0),"e","$x$") Letter(c(0,3.5),"cn","$y$") Letter(c(0,0),"se","O") Drwline(cr1) Drwline(sg1) Drwline(sg2) Letter(c(-0.5,-0.14),"c","$1$") Dottedline(bw1,0.75,1.2) Letter(c(-0.71,0.21),"c","$x$") Drwline(ag1) Texcom("{") Setcolor(c(1,0,0)) Drwline(sgt1,2) Texcom("}") Closefile("0") } quit()
/examples/sankakugraph/fig/drawsine/p033.r
no_license
s-takato/s-takato.github.io
R
false
false
1,669
r
# date time=2019/4/16 19:43:56 setwd('/Users/takatoosetsuo/Dropbox/2019polytec/lectures/0520/presen/fig/drawsine') source('/Applications/kettex/texlive/texmf-dist/scripts/ketcindy/ketlib/ketpiccurrent.r') Ketinit() cat(ThisVersion,'\n') Fnametex='p033.tex' FnameR='p033.r' Fnameout='p033.txt' arccos=acos; arcsin=asin; arctan=atan Acos<- function(x){acos(max(-1,min(1,x)))} Asin<- function(x){asin(max(-1,min(1,x)))} Atan=atan Sqr<- function(x){if(x>=0){sqrt(x)}else{0}} Factorial=factorial Norm<- function(x){norm(matrix(x,nrow=1),"2")} Setwindow(c(-3,6), c(-2,3.5)) X=c(1.22699,-2.5);Assignadd('X',X) T=c(5.49514,-3);Assignadd('T',T) mdag1=c(-0.70565,0.20726);Assignadd('mdag1',mdag1) mdbw1=c(-0.5,-0.14);Assignadd('mdbw1',mdbw1) sgXlXXr=Listplot(c(c(0,-2.5),c(3.14159,-2.5))) sgTlTTr=Listplot(c(c(0,-3),c(7.5708,-3))) Setunitlen("15mm") sgaxx1=Listplot(c(c(-3,0),c(6,0))) sgaxy1=Listplot(c(c(0,-2),c(0,3.5))) cr1=Circledata(c(c(-1,0),c(0,0))) sg1=Listplot(c(c(-1,0),c(0,0))) sg2=Listplot(c(c(-1,0),c(-0.66293,0.94148))) bw1=Bowdata(c(-1,0),c(0,0),1.4,0.3) ag1=Anglemark(c(0,0),c(-1,0),c(-0.66293,0.94148),0.6) sgt1=Listplot(c(c(0,0),c(0.93301,0.79687))) PtL=list() GrL=list() # Windisp(GrL) if(1==1){ Openfile('/Users/takatoosetsuo/Dropbox/2019polytec/lectures/0520/presen/fig/drawsine/p033.tex','15mm','Cdy=presen0521a.cdy') Drwline(sgaxx1) Drwline(sgaxy1) Letter(c(6,0),"e","$x$") Letter(c(0,3.5),"cn","$y$") Letter(c(0,0),"se","O") Drwline(cr1) Drwline(sg1) Drwline(sg2) Letter(c(-0.5,-0.14),"c","$1$") Dottedline(bw1,0.75,1.2) Letter(c(-0.71,0.21),"c","$x$") Drwline(ag1) Texcom("{") Setcolor(c(1,0,0)) Drwline(sgt1,2) Texcom("}") Closefile("0") } quit()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/preprocess_MaxQuant.R \name{preprocess_moFF} \alias{preprocess_moFF} \title{Preprocess MSnSet objects originating from moFF .tab files} \usage{ preprocess_moFF(MSnSet, accession = "prot", exp_annotation = NULL, type_annot = NULL, logtransform = TRUE, base = 2, normalisation = "quantiles", smallestUniqueGroups = TRUE, useful_properties = "peptide", filter = NULL, filter_symbol = NULL, minIdentified = 2, external_filter_file = NULL, external_filter_accession = NULL, external_filter_column = NULL, colClasses = NA, printProgress = FALSE, shiny = FALSE, message = NULL) } \arguments{ \item{MSnSet}{An \code{\link[=MSnSet-class]{MSnSet}} object that originates from a moFF .tab file.} \item{accession}{A character indicating the column that contains the unit on which you want to do inference (typically the protein identifiers).} \item{exp_annotation}{Either the path to the file which contains the experiment annotation or a data frame containing the experiment annotation. Exactly one colum in the experiment annotation should contain the mass spec run names. Annotation in a file can be both a tab-delimited text document or an Excel file. For more details, see \code{\link[utils]{read.table}} and \code{\link[openxlsx]{read.xlsx}}. As an error protection measurement, leading and trailing spaces in each column are trimmed off. The default, \code{NULL} indicates there is no annotation to be added.} \item{type_annot}{If \code{exp_annotation} is a path to a file, the type of file. \code{type_annot} is mostly obsolete as supported files will be automatically recognized by their extension. Currently only \code{"tab-delim"} (tab-delimited file), \code{"xlsx"} (Office Open XML Spreadsheet file) and \code{NULL} (file type decided based on the extension) are supported. If the extension is not recognized, the file will be assumed to be a tab-delimited file. Defaults to \code{NULL}.} \item{logtransform}{A logical value indicating whether the intensities should be log-transformed. Defaults to \code{TRUE}.} \item{base}{A positive or complex number: the base with respect to which logarithms are computed. Defaults to 2.} \item{normalisation}{A character vector of length one that describes how to normalise the \code{\link[=MSnSet-class]{MSnSet}} object. See \code{\link[=normalise-methods]{normalise}} for details. Defaults to \code{"quantiles"}. If no normalisation is wanted, set \code{normalisation="none"}.} \item{smallestUniqueGroups}{A logical indicating whether protein groups for which any of its member proteins is present in a smaller protein group should be removed from the dataset. Defaults to \code{TRUE}.} \item{useful_properties}{The columns of the \code{\link{featureData}} slot that are useful in the further analysis and/or inspection of the data and should be retained. Defaults to \code{NULL}, in which case no additional columns will be retained.} \item{filter}{A vector of names corresponding to the columns in the \code{\link{featureData}} slot of the \code{\link[=MSnSet-class]{MSnSet}} object that contain a \code{filtersymbol} that indicates which rows should be removed from the data. Typical examples are contaminants or reversed sequences. Defaults to \code{NULL}, in which case no filtering will be performed.} \item{filter_symbol}{Only used when \code{filter} is not \code{NULL}. A character indicating the symbol in the columns corresponding to the \code{filter} argument that is used to indicate rows that should be removed from the data. Defaults to \code{NULL}, which will throw an error if \code{filter} is not \code{NULL} to alert the user to specify a filter symbol.} \item{minIdentified}{A numeric value indicating the minimal number of times a peptide sequence should be identified in the dataset in order not to be removed. Defaults to 2.} \item{external_filter_file}{The name of an external protein filtering file. Sometimes, users want to filter out proteins based on a separate protein file. This file should contain at least a column with name equal to the value in \code{external_filter_accession} containing proteins, and one or more columns on which to filter, with names equal to the input in \code{external_filter_column}. Proteins that need to be filtered out should have the \code{filter_symbol} in their \code{external_filter_column}. Defaults to \code{NULL}, in which case no filtering based on an external protein file will be done.} \item{external_filter_accession}{Only used when \code{external_filter_file} is not \code{NULL}. A character indicating the column that contains the protein identifiers in the \code{external_filter_file}. Defaults to \code{NULL}, which will throw an error if \code{external_filter_file} is not \code{NULL} to alert the user to specify a filter column.} \item{external_filter_column}{Only used when \code{external_filter_file} is not \code{NULL}. A vector of names containing the column name(s) on which to filter in the \code{external_filter_file}. Defaults to \code{NULL}, which will throw an error if \code{external_filter_file} is not \code{NULL} to alert the user to specify a filter column.} \item{colClasses}{character. Only used when the \code{exp_annotation} argument is a filepath. A vector of classes to be assumed for the columns of the experimental annotation data frame. Recycled if necessary. If named and shorter than required, names are matched to the column names with unspecified values are taken to be NA. Possible values are \code{NA} (the default, when \code{type.convert} is used), \code{NULL} (when the column is skipped), one of the atomic vector classes (\code{logical}, \code{integer}, \code{numeric}, \code{complex}, \code{character}, \code{raw}), or \code{factor}, \code{Date} or \code{POSIXct}. Otherwise there needs to be an as method (from package \code{methods}) for conversion from \code{character} to the specified formal class.} \item{printProgress}{A logical indicating whether the R should print a message before performing each preprocessing step. Defaults to \code{FALSE}.} \item{shiny}{A logical indicating whether this function is being used by a Shiny app. Setting this to \code{TRUE} only works when using this function in a Shiny app and allows for dynamic progress bars. Defaults to \code{FALSE}.} \item{message}{Only used when \code{shiny=TRUE}. A single-element character vector: the message to be displayed to the user, or \code{NULL} to hide the current message (if any).} \item{details}{Only used when \code{shiny=TRUE} or \code{printProgress=TRUE}. A character vector containing the detail messages to be displayed to the user, or \code{NULL} to hide the current detail messages (if any). The detail messages will be shown with a de-emphasized appearance relative to the message.} } \value{ A preprocessed \code{\link[=MSnSet-class]{MSnSet}} object that is ready to be converted into a \code{\link[=protdata-class]{protdata}} object. } \description{ This function allows to perform a standard preprocessing pipeline on \code{\link[=MSnSet-class]{MSnSet}} objects (Gatto et al., 2012) originating from moFF .tab files (Argentini et al., 2016). By default, intensity values are log2 transformed and then quantile normalized. Next, the \code{\link[=smallestUniqueGroups]{smallestUniqueGroups}} function is applied, which removes proteins groups for which any of its member proteins is present in a smaller protein group. Then, peptides that need to be filtered out are removed. Next, irrelevant columns are dropped. Then, peptide sequences that are identified only once in a single mass spec run are removed because with only 1 identification, the model will be perfectly confounded. Finally, potential experimental annotations are added to the data frame. } \references{ Gatto L, Lilley KS. MSnbase - an R/Bioconductor package for isobaric tagged mass spectrometry data visualization, processing and quantitation. Bioinformatics. 2012 Jan 15;28(2):288-9. \url{https://doi.org/10.1093/bioinformatics/btr645}. PubMed PMID:22113085. Argentini A, Goeminne LJE, Verheggen K, Hulstaert N, Staes A, Clement L & Martens L. moFF: a robust and automated approach to extract peptide ion intensities. Nature Methods. 2016 13:964–966. \url{http://www.nature.com/nmeth/journal/v13/n12/full/nmeth.4075.html}. }
/man/preprocess_moFF.Rd
no_license
inambioinfo/MSqRob
R
false
true
8,355
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/preprocess_MaxQuant.R \name{preprocess_moFF} \alias{preprocess_moFF} \title{Preprocess MSnSet objects originating from moFF .tab files} \usage{ preprocess_moFF(MSnSet, accession = "prot", exp_annotation = NULL, type_annot = NULL, logtransform = TRUE, base = 2, normalisation = "quantiles", smallestUniqueGroups = TRUE, useful_properties = "peptide", filter = NULL, filter_symbol = NULL, minIdentified = 2, external_filter_file = NULL, external_filter_accession = NULL, external_filter_column = NULL, colClasses = NA, printProgress = FALSE, shiny = FALSE, message = NULL) } \arguments{ \item{MSnSet}{An \code{\link[=MSnSet-class]{MSnSet}} object that originates from a moFF .tab file.} \item{accession}{A character indicating the column that contains the unit on which you want to do inference (typically the protein identifiers).} \item{exp_annotation}{Either the path to the file which contains the experiment annotation or a data frame containing the experiment annotation. Exactly one colum in the experiment annotation should contain the mass spec run names. Annotation in a file can be both a tab-delimited text document or an Excel file. For more details, see \code{\link[utils]{read.table}} and \code{\link[openxlsx]{read.xlsx}}. As an error protection measurement, leading and trailing spaces in each column are trimmed off. The default, \code{NULL} indicates there is no annotation to be added.} \item{type_annot}{If \code{exp_annotation} is a path to a file, the type of file. \code{type_annot} is mostly obsolete as supported files will be automatically recognized by their extension. Currently only \code{"tab-delim"} (tab-delimited file), \code{"xlsx"} (Office Open XML Spreadsheet file) and \code{NULL} (file type decided based on the extension) are supported. If the extension is not recognized, the file will be assumed to be a tab-delimited file. Defaults to \code{NULL}.} \item{logtransform}{A logical value indicating whether the intensities should be log-transformed. Defaults to \code{TRUE}.} \item{base}{A positive or complex number: the base with respect to which logarithms are computed. Defaults to 2.} \item{normalisation}{A character vector of length one that describes how to normalise the \code{\link[=MSnSet-class]{MSnSet}} object. See \code{\link[=normalise-methods]{normalise}} for details. Defaults to \code{"quantiles"}. If no normalisation is wanted, set \code{normalisation="none"}.} \item{smallestUniqueGroups}{A logical indicating whether protein groups for which any of its member proteins is present in a smaller protein group should be removed from the dataset. Defaults to \code{TRUE}.} \item{useful_properties}{The columns of the \code{\link{featureData}} slot that are useful in the further analysis and/or inspection of the data and should be retained. Defaults to \code{NULL}, in which case no additional columns will be retained.} \item{filter}{A vector of names corresponding to the columns in the \code{\link{featureData}} slot of the \code{\link[=MSnSet-class]{MSnSet}} object that contain a \code{filtersymbol} that indicates which rows should be removed from the data. Typical examples are contaminants or reversed sequences. Defaults to \code{NULL}, in which case no filtering will be performed.} \item{filter_symbol}{Only used when \code{filter} is not \code{NULL}. A character indicating the symbol in the columns corresponding to the \code{filter} argument that is used to indicate rows that should be removed from the data. Defaults to \code{NULL}, which will throw an error if \code{filter} is not \code{NULL} to alert the user to specify a filter symbol.} \item{minIdentified}{A numeric value indicating the minimal number of times a peptide sequence should be identified in the dataset in order not to be removed. Defaults to 2.} \item{external_filter_file}{The name of an external protein filtering file. Sometimes, users want to filter out proteins based on a separate protein file. This file should contain at least a column with name equal to the value in \code{external_filter_accession} containing proteins, and one or more columns on which to filter, with names equal to the input in \code{external_filter_column}. Proteins that need to be filtered out should have the \code{filter_symbol} in their \code{external_filter_column}. Defaults to \code{NULL}, in which case no filtering based on an external protein file will be done.} \item{external_filter_accession}{Only used when \code{external_filter_file} is not \code{NULL}. A character indicating the column that contains the protein identifiers in the \code{external_filter_file}. Defaults to \code{NULL}, which will throw an error if \code{external_filter_file} is not \code{NULL} to alert the user to specify a filter column.} \item{external_filter_column}{Only used when \code{external_filter_file} is not \code{NULL}. A vector of names containing the column name(s) on which to filter in the \code{external_filter_file}. Defaults to \code{NULL}, which will throw an error if \code{external_filter_file} is not \code{NULL} to alert the user to specify a filter column.} \item{colClasses}{character. Only used when the \code{exp_annotation} argument is a filepath. A vector of classes to be assumed for the columns of the experimental annotation data frame. Recycled if necessary. If named and shorter than required, names are matched to the column names with unspecified values are taken to be NA. Possible values are \code{NA} (the default, when \code{type.convert} is used), \code{NULL} (when the column is skipped), one of the atomic vector classes (\code{logical}, \code{integer}, \code{numeric}, \code{complex}, \code{character}, \code{raw}), or \code{factor}, \code{Date} or \code{POSIXct}. Otherwise there needs to be an as method (from package \code{methods}) for conversion from \code{character} to the specified formal class.} \item{printProgress}{A logical indicating whether the R should print a message before performing each preprocessing step. Defaults to \code{FALSE}.} \item{shiny}{A logical indicating whether this function is being used by a Shiny app. Setting this to \code{TRUE} only works when using this function in a Shiny app and allows for dynamic progress bars. Defaults to \code{FALSE}.} \item{message}{Only used when \code{shiny=TRUE}. A single-element character vector: the message to be displayed to the user, or \code{NULL} to hide the current message (if any).} \item{details}{Only used when \code{shiny=TRUE} or \code{printProgress=TRUE}. A character vector containing the detail messages to be displayed to the user, or \code{NULL} to hide the current detail messages (if any). The detail messages will be shown with a de-emphasized appearance relative to the message.} } \value{ A preprocessed \code{\link[=MSnSet-class]{MSnSet}} object that is ready to be converted into a \code{\link[=protdata-class]{protdata}} object. } \description{ This function allows to perform a standard preprocessing pipeline on \code{\link[=MSnSet-class]{MSnSet}} objects (Gatto et al., 2012) originating from moFF .tab files (Argentini et al., 2016). By default, intensity values are log2 transformed and then quantile normalized. Next, the \code{\link[=smallestUniqueGroups]{smallestUniqueGroups}} function is applied, which removes proteins groups for which any of its member proteins is present in a smaller protein group. Then, peptides that need to be filtered out are removed. Next, irrelevant columns are dropped. Then, peptide sequences that are identified only once in a single mass spec run are removed because with only 1 identification, the model will be perfectly confounded. Finally, potential experimental annotations are added to the data frame. } \references{ Gatto L, Lilley KS. MSnbase - an R/Bioconductor package for isobaric tagged mass spectrometry data visualization, processing and quantitation. Bioinformatics. 2012 Jan 15;28(2):288-9. \url{https://doi.org/10.1093/bioinformatics/btr645}. PubMed PMID:22113085. Argentini A, Goeminne LJE, Verheggen K, Hulstaert N, Staes A, Clement L & Martens L. moFF: a robust and automated approach to extract peptide ion intensities. Nature Methods. 2016 13:964–966. \url{http://www.nature.com/nmeth/journal/v13/n12/full/nmeth.4075.html}. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/readEmissions.R \name{readEmissions} \alias{readEmissions} \title{Read Emissions from GDX file} \usage{ readEmissions(gdx, emiengregi, eminegregi) } \arguments{ \item{gdx}{a GDX list as created by readGDX, or the file name of a gdx file(file name is recommended as this speeds up the code)} \item{emiengregi}{enty that is read in from vm_emiengregi, vector is possible e.g. c("co2","n2o"). If you do not want to add an enty from vm_emiengregi use emiengregi=NULL} \item{eminegregi}{enty that is read in from vm_eminegregi, vector is possible e.g. c("co2","co2cement"). If you do not want to add an enty from vm_emiengregi use emiengregi=NULL} } \description{ Read emission data from a GDX file into a magpie object. summs automatically over all kinds of enty and vm_emiengregi and vm_emiengregi } \examples{ \dontrun{emi <- readEmissions(gdx,emiengregi=c("n2o","co2"),eminegregi=NULL)} } \author{ Lavinia Baumstark }
/man/readEmissions.Rd
no_license
pik-piam/remind
R
false
true
999
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/readEmissions.R \name{readEmissions} \alias{readEmissions} \title{Read Emissions from GDX file} \usage{ readEmissions(gdx, emiengregi, eminegregi) } \arguments{ \item{gdx}{a GDX list as created by readGDX, or the file name of a gdx file(file name is recommended as this speeds up the code)} \item{emiengregi}{enty that is read in from vm_emiengregi, vector is possible e.g. c("co2","n2o"). If you do not want to add an enty from vm_emiengregi use emiengregi=NULL} \item{eminegregi}{enty that is read in from vm_eminegregi, vector is possible e.g. c("co2","co2cement"). If you do not want to add an enty from vm_emiengregi use emiengregi=NULL} } \description{ Read emission data from a GDX file into a magpie object. summs automatically over all kinds of enty and vm_emiengregi and vm_emiengregi } \examples{ \dontrun{emi <- readEmissions(gdx,emiengregi=c("n2o","co2"),eminegregi=NULL)} } \author{ Lavinia Baumstark }
## see ## https://jamesmccaffrey.wordpress.com/2019/10/28/roulette-wheel-selection-for-multi-armed-bandit-problems ## for inspiration roulette_wheel <- function(coins = 40, starts = 5, true_prob = c(0.3, 0.5, 0.7)){ # must have enough coins to generate initial empirical distribution if (coins < (length(true_prob) * starts)){ stop("To generate a starting distribution, each machine must be", " played ", starts, " times - not enough coins to do so.") } # allocate first ("warm up") SS <- sapply(true_prob, FUN = function(x) sum(rbinom(starts, 1, x))) FF <- starts - SS # calculate metrics used for play allocation probs <- SS / (SS + FF) probs_normalized <- probs / sum(probs) cumu_probs_normalized <- cumsum(probs_normalized) # update number of coins coins <- coins - (length(true_prob) * starts) # create simulation data.frame sim_df <- data.frame(machine = seq_along(true_prob), true_probabilities = true_prob, observed_probs = probs, successes = SS, failures = FF, plays = SS + FF, machine_played = NA, coins_left = coins) # initialize before while loop sim_list <- vector('list', length = coins) i <- 1 # play until we run out of original coins while(coins > 0){ # which machine to play? update_index <- findInterval(runif(1), c(0, cumu_probs_normalized)) # play machine flip <- rbinom(1, 1, true_prob[update_index]) # update successes and failure for machine that was played SS[update_index] <- SS[update_index] + flip FF[update_index] <- FF[update_index] + (1-flip) # update metrics used for play allocation probs <- SS / (SS + FF) probs_normalized <- probs / sum(probs) cumu_probs_normalized <- cumsum(probs_normalized) # update number of coins coins <- coins - 1 # update simulation data.frame (very inefficient) sim_list[[i]] <- data.frame(machine = seq_along(true_prob), true_probabilities = true_prob, observed_probs = probs, successes = SS, failures = FF, plays = SS + FF, machine_played = seq_along(true_prob) == update_index, coins_left = coins) i <- i + 1 } # show success:failure ratio message("Success to failure ratio was ", round(sum(SS) / sum(FF), 2), "\n", paste0("(", paste0(SS, collapse = "+"), ")/(", paste0(FF, collapse = "+"), ")")) # return data frame of values from experiment rbind(sim_df, do.call('rbind', sim_list)) }
/miscellaneous/roulette_wheel.R
no_license
bgstieber/files_for_blog
R
false
false
2,921
r
## see ## https://jamesmccaffrey.wordpress.com/2019/10/28/roulette-wheel-selection-for-multi-armed-bandit-problems ## for inspiration roulette_wheel <- function(coins = 40, starts = 5, true_prob = c(0.3, 0.5, 0.7)){ # must have enough coins to generate initial empirical distribution if (coins < (length(true_prob) * starts)){ stop("To generate a starting distribution, each machine must be", " played ", starts, " times - not enough coins to do so.") } # allocate first ("warm up") SS <- sapply(true_prob, FUN = function(x) sum(rbinom(starts, 1, x))) FF <- starts - SS # calculate metrics used for play allocation probs <- SS / (SS + FF) probs_normalized <- probs / sum(probs) cumu_probs_normalized <- cumsum(probs_normalized) # update number of coins coins <- coins - (length(true_prob) * starts) # create simulation data.frame sim_df <- data.frame(machine = seq_along(true_prob), true_probabilities = true_prob, observed_probs = probs, successes = SS, failures = FF, plays = SS + FF, machine_played = NA, coins_left = coins) # initialize before while loop sim_list <- vector('list', length = coins) i <- 1 # play until we run out of original coins while(coins > 0){ # which machine to play? update_index <- findInterval(runif(1), c(0, cumu_probs_normalized)) # play machine flip <- rbinom(1, 1, true_prob[update_index]) # update successes and failure for machine that was played SS[update_index] <- SS[update_index] + flip FF[update_index] <- FF[update_index] + (1-flip) # update metrics used for play allocation probs <- SS / (SS + FF) probs_normalized <- probs / sum(probs) cumu_probs_normalized <- cumsum(probs_normalized) # update number of coins coins <- coins - 1 # update simulation data.frame (very inefficient) sim_list[[i]] <- data.frame(machine = seq_along(true_prob), true_probabilities = true_prob, observed_probs = probs, successes = SS, failures = FF, plays = SS + FF, machine_played = seq_along(true_prob) == update_index, coins_left = coins) i <- i + 1 } # show success:failure ratio message("Success to failure ratio was ", round(sum(SS) / sum(FF), 2), "\n", paste0("(", paste0(SS, collapse = "+"), ")/(", paste0(FF, collapse = "+"), ")")) # return data frame of values from experiment rbind(sim_df, do.call('rbind', sim_list)) }
library(RUnit) library(trenaSGM) library(org.Hs.eg.db) library(TrenaProjectBrainCell) #------------------------------------------------------------------------------------------------------------------------ Sys.setlocale("LC_ALL", "C") if(!exists("mtx")) load(system.file(package="trenaSGM", "extdata", "mayo.tcx.new.RData")) if(!exists("tbl.enhancers")) load(system.file(package="trenaSGM", "extdata", "enhancers.TREM2.RData")) if(!exists("tp")) tp <- TrenaProjectBrainCell() #------------------------------------------------------------------------------------------------------------------------ library(futile.logger) flog.appender(appender.file("timing-postgresFootprintQueries.log")) logTimingInfo <- function(msg, timingInfo){ string <- sprintf("%50s: %6.2f %6.2f %6.2f %6.2f %6.2f", msg, timingInfo[[1]], timingInfo[[2]], timingInfo[[3]], timingInfo[[4]], timingInfo[[5]]) flog.info(string, "timingLog") } #------------------------------------------------------------------------------------------------------------------------ test_brain <- function() { tss <- 41163186 # strand-aware start and end: trem2 is on the minus strand recipe <- list(title="fp.enhancers", type="footprint.database", geneSymbol="TREM2", regions=tbl.enhancers, tss=tss, matrix=mtx, db.host="khaleesi.systemsbiology.net", db.port=5432, databases=c("brain_hint_20", "brain_hint_16", "brain_wellington_20", "brain_wellington_16"), motifDiscovery="builtinFimo", annotationDbFile=dbfile(org.Hs.eg.db), tfPool=allKnownTFs(), tfMapping=c("TFClass", "MotifDb"), tfPrefilterCorrelation=0.4, orderModelByColumn="rfScore", solverNames=c("lasso", "lassopv", "pearson", "randomForest", "ridge", "spearman")) region.count <- nrow(tbl.enhancers) region.totalBases <- sum(apply(tbl.enhancers, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) msg <- sprintf("queryFootprints, %3d regions, %8d bases", region.count, region.totalBases) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) shoulder <- 10000 tbl.huge <- data.frame(chrom="chr6", start=tss-shoulder, end=tss+shoulder, stringsAsFactors=FALSE) recipe$regions <- tbl.huge region.count <- nrow(recipe$regions) region.totalBases <- sum(apply(recipe$regions, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) msg <- sprintf("queryFootprints, %3d regions, %8d bases", region.count, region.totalBases) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) shoulder <- 100000 tbl.huge <- data.frame(chrom="chr6", start=tss-shoulder, end=tss+shoulder, stringsAsFactors=FALSE) recipe$regions <- tbl.huge region.count <- nrow(recipe$regions) region.totalBases <- sum(apply(recipe$regions, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) msg <- sprintf("queryFootprints, %3d regions, %8d bases", region.count, region.totalBases) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) #---------------------------- # 6 regions, each 500bp long #---------------------------- starts <- tss-seq(0, 5000, by=1000) ends <- starts + 500 tbl.regions <- data.frame(chrom=rep("chr6", 6), start=starts, end=ends, stringsAsFactors=FALSE) recipe$regions <- tbl.regions region.count <- nrow(recipe$regions) region.totalBases <- sum(apply(recipe$regions, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) msg <- sprintf("queryFootprints, %3d regions, %8d bases", region.count, region.totalBases) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) #---------------------------- # 20 regions, each 500bp long #---------------------------- starts <- tss-seq(from=0, by=1000, length.out=20) ends <- starts + 500 tbl.regions <- data.frame(chrom=rep("chr6", 20), start=starts, end=ends, stringsAsFactors=FALSE) recipe$regions <- tbl.regions region.count <- nrow(recipe$regions) region.totalBases <- sum(apply(recipe$regions, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) msg <- sprintf("queryFootprints, %3d regions, %8d bases", region.count, region.totalBases) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) #-------------------------------------------------------------------------------------------------- # 5 regions, each 5000bp long, ignore overlap: they should be equally demanding db lookups anyway #-------------------------------------------------------------------------------------------------- starts <- tss-seq(from=0, by=5000, length.out=5) ends <- starts + 4950 tbl.regions <- data.frame(chrom=rep("chr6", 5), start=starts, end=ends, stringsAsFactors=FALSE) recipe$databases <- c("brain_hint_20") recipe$regions <- tbl.regions region.count <- nrow(recipe$regions) region.totalBases <- sum(apply(recipe$regions, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) msg <- sprintf("queryFootprints(%15s), %3d regions, %8d bases", recipe$database, region.count, region.totalBases) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) #---------------------------------------------------------------------------------- # now use placenta_fp, should take about same time as brain_hint_20 #---------------------------------------------------------------------------------- recipe$databases <- c("placenta_fp") recipe$regions <- tbl.regions region.count <- nrow(recipe$regions) region.totalBases <- sum(apply(recipe$regions, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) msg <- sprintf("queryFootprints(%15s), %3d regions, %8d bases", recipe$database, region.count, region.totalBases) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) } # test_brain #------------------------------------------------------------------------------------------------------------------------ explore_placenta_fp_speed <- function() { trem2.tss <- 41163186 # strand-aware start and end: trem2 is on the minus strand tbl.regions <- tbl.enhancers[1,] recipe <- list(title="fp.enhancers", type="footprint.database", geneSymbol="TREM2", regions=tbl.regions, tss=trem2.tss, matrix=mtx, db.host="khaleesi.systemsbiology.net", db.port=5432, databases=c("placenta_fp"), motifDiscovery="builtinFimo", annotationDbFile=dbfile(org.Hs.eg.db), tfPool=allKnownTFs(), tfMapping=c("TFClass", "MotifDb"), tfPrefilterCorrelation=0.4, orderModelByColumn="rfScore", solverNames=c("lasso", "lassopv", "pearson", "randomForest", "ridge", "spearman")) region.count <- nrow(tbl.regions) region.totalBases <- sum(apply(tbl.regions, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) recipe$databases <- "placenta_fp" msg <- sprintf("queryFootprints(%15s), %3d regions, %8d bases", recipe$database, region.count, region.totalBases) print(msg) #gc() #gcinfo(TRUE) #Rprof(filename="Rprof.placenta", interval=0.01) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) dim(tbl.fp) #Rprof(NULL) #summaryRprof("Rprof.placenta") recipe$databases <- "brain_hint_20" msg <- sprintf("queryFootprints(%15s), %3d regions, %8d bases", recipe$database, region.count, region.totalBases) print(msg) #gc() #gcinfo(TRUE) #Rprof(filename="Rprof.brain", interval=0.01) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) dim(tbl.fp) #Rprof(NULL) #summaryRprof("Rprof.brain") } # explore_placenta_fp_speed #------------------------------------------------------------------------------------------------------------------------
/inst/timingTests/footprintDatabases.R
permissive
PriceLab/trenaSGM
R
false
false
8,991
r
library(RUnit) library(trenaSGM) library(org.Hs.eg.db) library(TrenaProjectBrainCell) #------------------------------------------------------------------------------------------------------------------------ Sys.setlocale("LC_ALL", "C") if(!exists("mtx")) load(system.file(package="trenaSGM", "extdata", "mayo.tcx.new.RData")) if(!exists("tbl.enhancers")) load(system.file(package="trenaSGM", "extdata", "enhancers.TREM2.RData")) if(!exists("tp")) tp <- TrenaProjectBrainCell() #------------------------------------------------------------------------------------------------------------------------ library(futile.logger) flog.appender(appender.file("timing-postgresFootprintQueries.log")) logTimingInfo <- function(msg, timingInfo){ string <- sprintf("%50s: %6.2f %6.2f %6.2f %6.2f %6.2f", msg, timingInfo[[1]], timingInfo[[2]], timingInfo[[3]], timingInfo[[4]], timingInfo[[5]]) flog.info(string, "timingLog") } #------------------------------------------------------------------------------------------------------------------------ test_brain <- function() { tss <- 41163186 # strand-aware start and end: trem2 is on the minus strand recipe <- list(title="fp.enhancers", type="footprint.database", geneSymbol="TREM2", regions=tbl.enhancers, tss=tss, matrix=mtx, db.host="khaleesi.systemsbiology.net", db.port=5432, databases=c("brain_hint_20", "brain_hint_16", "brain_wellington_20", "brain_wellington_16"), motifDiscovery="builtinFimo", annotationDbFile=dbfile(org.Hs.eg.db), tfPool=allKnownTFs(), tfMapping=c("TFClass", "MotifDb"), tfPrefilterCorrelation=0.4, orderModelByColumn="rfScore", solverNames=c("lasso", "lassopv", "pearson", "randomForest", "ridge", "spearman")) region.count <- nrow(tbl.enhancers) region.totalBases <- sum(apply(tbl.enhancers, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) msg <- sprintf("queryFootprints, %3d regions, %8d bases", region.count, region.totalBases) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) shoulder <- 10000 tbl.huge <- data.frame(chrom="chr6", start=tss-shoulder, end=tss+shoulder, stringsAsFactors=FALSE) recipe$regions <- tbl.huge region.count <- nrow(recipe$regions) region.totalBases <- sum(apply(recipe$regions, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) msg <- sprintf("queryFootprints, %3d regions, %8d bases", region.count, region.totalBases) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) shoulder <- 100000 tbl.huge <- data.frame(chrom="chr6", start=tss-shoulder, end=tss+shoulder, stringsAsFactors=FALSE) recipe$regions <- tbl.huge region.count <- nrow(recipe$regions) region.totalBases <- sum(apply(recipe$regions, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) msg <- sprintf("queryFootprints, %3d regions, %8d bases", region.count, region.totalBases) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) #---------------------------- # 6 regions, each 500bp long #---------------------------- starts <- tss-seq(0, 5000, by=1000) ends <- starts + 500 tbl.regions <- data.frame(chrom=rep("chr6", 6), start=starts, end=ends, stringsAsFactors=FALSE) recipe$regions <- tbl.regions region.count <- nrow(recipe$regions) region.totalBases <- sum(apply(recipe$regions, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) msg <- sprintf("queryFootprints, %3d regions, %8d bases", region.count, region.totalBases) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) #---------------------------- # 20 regions, each 500bp long #---------------------------- starts <- tss-seq(from=0, by=1000, length.out=20) ends <- starts + 500 tbl.regions <- data.frame(chrom=rep("chr6", 20), start=starts, end=ends, stringsAsFactors=FALSE) recipe$regions <- tbl.regions region.count <- nrow(recipe$regions) region.totalBases <- sum(apply(recipe$regions, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) msg <- sprintf("queryFootprints, %3d regions, %8d bases", region.count, region.totalBases) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) #-------------------------------------------------------------------------------------------------- # 5 regions, each 5000bp long, ignore overlap: they should be equally demanding db lookups anyway #-------------------------------------------------------------------------------------------------- starts <- tss-seq(from=0, by=5000, length.out=5) ends <- starts + 4950 tbl.regions <- data.frame(chrom=rep("chr6", 5), start=starts, end=ends, stringsAsFactors=FALSE) recipe$databases <- c("brain_hint_20") recipe$regions <- tbl.regions region.count <- nrow(recipe$regions) region.totalBases <- sum(apply(recipe$regions, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) msg <- sprintf("queryFootprints(%15s), %3d regions, %8d bases", recipe$database, region.count, region.totalBases) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) #---------------------------------------------------------------------------------- # now use placenta_fp, should take about same time as brain_hint_20 #---------------------------------------------------------------------------------- recipe$databases <- c("placenta_fp") recipe$regions <- tbl.regions region.count <- nrow(recipe$regions) region.totalBases <- sum(apply(recipe$regions, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) msg <- sprintf("queryFootprints(%15s), %3d regions, %8d bases", recipe$database, region.count, region.totalBases) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) } # test_brain #------------------------------------------------------------------------------------------------------------------------ explore_placenta_fp_speed <- function() { trem2.tss <- 41163186 # strand-aware start and end: trem2 is on the minus strand tbl.regions <- tbl.enhancers[1,] recipe <- list(title="fp.enhancers", type="footprint.database", geneSymbol="TREM2", regions=tbl.regions, tss=trem2.tss, matrix=mtx, db.host="khaleesi.systemsbiology.net", db.port=5432, databases=c("placenta_fp"), motifDiscovery="builtinFimo", annotationDbFile=dbfile(org.Hs.eg.db), tfPool=allKnownTFs(), tfMapping=c("TFClass", "MotifDb"), tfPrefilterCorrelation=0.4, orderModelByColumn="rfScore", solverNames=c("lasso", "lassopv", "pearson", "randomForest", "ridge", "spearman")) region.count <- nrow(tbl.regions) region.totalBases <- sum(apply(tbl.regions, 1, function(row) 1 + as.numeric(row[["end"]]) - as.numeric(row[["start"]]))) recipe$databases <- "placenta_fp" msg <- sprintf("queryFootprints(%15s), %3d regions, %8d bases", recipe$database, region.count, region.totalBases) print(msg) #gc() #gcinfo(TRUE) #Rprof(filename="Rprof.placenta", interval=0.01) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) dim(tbl.fp) #Rprof(NULL) #summaryRprof("Rprof.placenta") recipe$databases <- "brain_hint_20" msg <- sprintf("queryFootprints(%15s), %3d regions, %8d bases", recipe$database, region.count, region.totalBases) print(msg) #gc() #gcinfo(TRUE) #Rprof(filename="Rprof.brain", interval=0.01) logTimingInfo(msg, system.time(tbl.fp <- trenaSGM:::.queryFootprintsFromDatabase(recipe, FALSE))) dim(tbl.fp) #Rprof(NULL) #summaryRprof("Rprof.brain") } # explore_placenta_fp_speed #------------------------------------------------------------------------------------------------------------------------
a %<-% { 1 } f <- futureOf(a) print(f) b %<-% { 2 } f <- futureOf(b) print(f) ## All futures fs <- futureOf() print(fs) ## Futures part of environment env <- new.env() env$c %<-% { 3 } f <- futureOf(env$c) print(f) f2 <- futureOf(c, envir=env) print(f2) f3 <- futureOf("c", envir=env) print(f3) fs <- futureOf(envir=env) print(fs)
/incl/futureOf.R
no_license
MarkEdmondson1234/future
R
false
false
344
r
a %<-% { 1 } f <- futureOf(a) print(f) b %<-% { 2 } f <- futureOf(b) print(f) ## All futures fs <- futureOf() print(fs) ## Futures part of environment env <- new.env() env$c %<-% { 3 } f <- futureOf(env$c) print(f) f2 <- futureOf(c, envir=env) print(f2) f3 <- futureOf("c", envir=env) print(f3) fs <- futureOf(envir=env) print(fs)
plot2 <- function (NEI=plotreadRDS ('summarySCC_PM25.rds'), outFile='plot2.png', FIPS='24510') { # Select a subset of the data conditioned by FIPS plot1 (NEI[NEI$fips == FIPS, ], outFile=outFile) }
/plot2.R
no_license
vobine/Coursera-Exploratory-Data-Analysis
R
false
false
240
r
plot2 <- function (NEI=plotreadRDS ('summarySCC_PM25.rds'), outFile='plot2.png', FIPS='24510') { # Select a subset of the data conditioned by FIPS plot1 (NEI[NEI$fips == FIPS, ], outFile=outFile) }
#### INITIAL SETUP ###### DTM_res <- 2.00 res <- 0.01 chm_res <- 4*res # Put your LiDAR data in the "input" folder base_name<-list.files('input', pattern = "asc") dir_name<-gsub(".asc","",base_name) dir.create(paste("output/", dir_name, sep ="")) #### EDIT THE SETUP FILE FOR YOUR SYSTEM #### #Make sure the setup file is correct and save # The most important step is to have Cloud Compare installed # and add the executable into the setup file file.edit('R/01_setup.R') ###### RUN THE PIPELINE ######### #load packages, site list, and functions source('R/01_setup.R') #CREATE Subsampled point cloud and SURFACE MODELS source('R/02_model.R') #Remove Trunks source('R/03_trunk_rem.R') #FINAL uncorrected MODEL source('R/model_final.R') #Normalize Site elevation source('R/04_delineation_preprocessing.R') #OUTPUT as RASTER and MESH source('R/05_final_rasterization.R') #OUTPUT PLOT source('R/06_plot.R') ####COMING SOON##### #Delineate hummocks # tol_p<-0.05 # th_tree_p<-0.2 # setwd(wd) # ow = 0 # dens = 1 #density analysis? # chm_res<-res*10 # # source('R/density.R') # # source('R/delineation.R')
/R/00_pipeline.R
no_license
fdbesanto2/TopoSeg
R
false
false
1,125
r
#### INITIAL SETUP ###### DTM_res <- 2.00 res <- 0.01 chm_res <- 4*res # Put your LiDAR data in the "input" folder base_name<-list.files('input', pattern = "asc") dir_name<-gsub(".asc","",base_name) dir.create(paste("output/", dir_name, sep ="")) #### EDIT THE SETUP FILE FOR YOUR SYSTEM #### #Make sure the setup file is correct and save # The most important step is to have Cloud Compare installed # and add the executable into the setup file file.edit('R/01_setup.R') ###### RUN THE PIPELINE ######### #load packages, site list, and functions source('R/01_setup.R') #CREATE Subsampled point cloud and SURFACE MODELS source('R/02_model.R') #Remove Trunks source('R/03_trunk_rem.R') #FINAL uncorrected MODEL source('R/model_final.R') #Normalize Site elevation source('R/04_delineation_preprocessing.R') #OUTPUT as RASTER and MESH source('R/05_final_rasterization.R') #OUTPUT PLOT source('R/06_plot.R') ####COMING SOON##### #Delineate hummocks # tol_p<-0.05 # th_tree_p<-0.2 # setwd(wd) # ow = 0 # dens = 1 #density analysis? # chm_res<-res*10 # # source('R/density.R') # # source('R/delineation.R')
if(requireNamespace("testthat", quietly = TRUE)) { library("robustHD", quietly = TRUE) testthat::test_check("robustHD") }
/tests/testthat.R
no_license
cran/robustHD
R
false
false
130
r
if(requireNamespace("testthat", quietly = TRUE)) { library("robustHD", quietly = TRUE) testthat::test_check("robustHD") }
###########################################################################/** # @RdocFunction .argAssertRange # # @title "A private function" # # \description{ # @get "title". # } # # @synopsis # # \arguments{ # \item{x}{A @numeric @vector to be validated.} # \item{range}{A @numeric @vector of length two.} # \item{...}{Not used.} # } # # \value{ # Returns \code{x} if valid, otherwise an error is thrown. # } # # @author # # @keyword "programming" # @keyword "internal" #*/########################################################################### .argAssertRange <- function(x, range, ...) { name <- substitute(x); r <- range(x); if (any(is.na(r))) { stop("Argument '", name, "' has missing values."); } if (r[1] < range[1] || r[2] > range[2]) { stop("Argument '", name, "' is out of range [", range[1], ",", range[2], "]: [", r[1], ",", r[2], "]"); } x; } # .argAssertRange() ############################################################################## # HISTORY: # 2008-08-20 # o Created to avoid the dependancy on R.utils::Arguments. ##############################################################################
/dChipIO/R/argAssertRange,private.R
no_license
ingted/R-Examples
R
false
false
1,160
r
###########################################################################/** # @RdocFunction .argAssertRange # # @title "A private function" # # \description{ # @get "title". # } # # @synopsis # # \arguments{ # \item{x}{A @numeric @vector to be validated.} # \item{range}{A @numeric @vector of length two.} # \item{...}{Not used.} # } # # \value{ # Returns \code{x} if valid, otherwise an error is thrown. # } # # @author # # @keyword "programming" # @keyword "internal" #*/########################################################################### .argAssertRange <- function(x, range, ...) { name <- substitute(x); r <- range(x); if (any(is.na(r))) { stop("Argument '", name, "' has missing values."); } if (r[1] < range[1] || r[2] > range[2]) { stop("Argument '", name, "' is out of range [", range[1], ",", range[2], "]: [", r[1], ",", r[2], "]"); } x; } # .argAssertRange() ############################################################################## # HISTORY: # 2008-08-20 # o Created to avoid the dependancy on R.utils::Arguments. ##############################################################################
library(WGCNA); library(cluster); library(ggplot2) library(reshape2) library(RColorBrewer) library(ggpubr) enableWGCNAThreads() # Brain Exp rm(list=ls()) wgcna = list.files(pattern = 'ModuleOutput') tab=read.table(wgcna,sep="\t",header=T) tab <- tab[c(1,4)] colnames(tab)=c("Gene","DEFINITION") tab$DEFINITION <- factor(tab$DEFINITION,levels=paste("WM",1:length(unique(tab$DEFINITION)),sep="")) Genes=as.data.frame(table(tab$DEFINITION)) # Loop to make the overlap # The loop goes along the length of the GeneSets lists files1=list.files(pattern="SME_cor_LR") tmp=as.data.frame(lapply(files1,read.table,sep="\t")[[1]]) tmp <- tmp[c(1,2)] GeneSets <- split(tmp,tmp$Waves) ln=length(GeneSets) cl=length(Genes$Var1) TEMP=list() INT=list() for (i in 1:ln) { TEMP[[i]]=tab[tab$Gene %in% GeneSets[[i]]$Gene,] INT[[i]]=as.data.frame(table(TEMP[[i]]$DEFINITION)) } names(INT)=names(GeneSets) names(TEMP)=names(GeneSets) # Create the matrix for each GeneSet NROWS <- sapply(GeneSets,nrow) # # GeneSet # + - # +-------+-------+ # + | a | b | # Module +-------+-------+ # - | c | d | # +-------+-------+ for (i in 1:length(INT)) { INT[[i]]$b <- NROWS[[i]]-INT[[i]]$Freq INT[[i]]$c <- Genes$Freq-INT[[i]]$Freq INT[[i]]$d <- 15585-Genes$Freq-nrow(GeneSets[[i]]) #Protein Coding bkg = 19901 Ortho bkg = 14373 Brain Expressed bkg = 15585 } # sum(Genes$Freq) RunFisher <- function(row, alt = 'greater', cnf = 0.85) { f <- fisher.test(matrix(row, nrow = 2), alternative = alt, conf.level = cnf) return(c(row, P_val = f$p.value, LogP = -log10(f$p.value), OR = f$estimate[[1]], OR_Low = f$conf.int[1], OR_Up = f$conf.int[2])) } # run FisherMat=list() for (i in 1:length(INT)) { FisherMat[[i]] <- t(apply(INT[[i]][,2:5], 1, RunFisher)) rownames(FisherMat[[i]]) <- INT[[i]]$Var1 FisherMat[[i]] <- FisherMat[[i]][rownames(FisherMat[[i]]) != "grey",] } names(FisherMat)<-names(INT) save(FisherMat,file="SME_CorrelatedGenes_EnrichMat.RData") # Create matrices of Pval tmp<-list() FisherP<-matrix() rowNames <- rownames(FisherMat[[1]]) colNames <- names(FisherMat) for (i in 1:length(INT)) { tmp[[i]] <- cbind(as.numeric(FisherMat[[i]][,5])) FisherP <- do.call(cbind,tmp) } rownames(FisherP) <- rowNames colnames(FisherP) <- colNames # Create matrices of OR tmp<-list() FisherOR<-matrix() rowNames <- rownames(FisherMat[[1]]) colNames <- names(FisherMat) for (i in 1:length(INT)) { tmp[[i]] <- cbind(as.numeric(FisherMat[[i]][,7])) FisherOR <- do.call(cbind,tmp) } rownames(FisherOR) <- rowNames colnames(FisherOR) <- colNames # Pval Adjusted library(magrittr) FisherAdj <- FisherP %>% as.matrix %>% as.vector %>% p.adjust(method='BH') %>% matrix(ncol=ncol(FisherP)) rownames(FisherAdj) <- rowNames colnames(FisherAdj) <- colNames FisherAdj[FisherAdj>0.05]=1 FisherOR[FisherOR < 1]=0 vec <- c("Delta","Theta","Alpha","Beta","Gamma","High.Gamma") FisherAdj <- FisherAdj[,match(vec,colnames(FisherAdj))] FisherOR <- FisherOR[,match(vec,colnames(FisherOR))] p <- as.data.frame(FisherAdj) cor <- as.data.frame(FisherOR) p $Module <- rownames(p ) cor$Module <- rownames(cor) p <- melt(p) cor <- melt(cor) p$cor <- cor$value p$log <- -log10(p$value) p$Module <- factor(p$Module,levels=paste("WM",1:26,sep="")) p$log[p$log < 1.3] <- NA p$abs <- abs(p$cor) p$OR<- ifelse(is.na(p$log), p$log, p$abs) #p$variable <- factor(p$variable,levels=c("ENDOTHELIAL","ASTROCYTES","OPC","OLIGODENDROCYTES","MICROGLIA","EXCITATORY", "INHIBITORY")) pdf("Modules_SME_CorrelatedGenes_Bubble.pdf",width=8,height=2) ggscatter(p, x = "variable", y = "Module", size="OR", color="log", alpha = 0.8, xlab = "", ylab = "",) + theme_minimal() + rotate_x_text(angle = 45)+ coord_flip()+ scale_size(range = c(2, 10))+ gradient_color(c("red","darkred")) dev.off()
/supp_analysis/wgcna_enrichments/ENRICHMENT_SME_GENES/SME_CorrelatedGenes_Enrichment.R
no_license
NSafarian/Within_Subject
R
false
false
3,947
r
library(WGCNA); library(cluster); library(ggplot2) library(reshape2) library(RColorBrewer) library(ggpubr) enableWGCNAThreads() # Brain Exp rm(list=ls()) wgcna = list.files(pattern = 'ModuleOutput') tab=read.table(wgcna,sep="\t",header=T) tab <- tab[c(1,4)] colnames(tab)=c("Gene","DEFINITION") tab$DEFINITION <- factor(tab$DEFINITION,levels=paste("WM",1:length(unique(tab$DEFINITION)),sep="")) Genes=as.data.frame(table(tab$DEFINITION)) # Loop to make the overlap # The loop goes along the length of the GeneSets lists files1=list.files(pattern="SME_cor_LR") tmp=as.data.frame(lapply(files1,read.table,sep="\t")[[1]]) tmp <- tmp[c(1,2)] GeneSets <- split(tmp,tmp$Waves) ln=length(GeneSets) cl=length(Genes$Var1) TEMP=list() INT=list() for (i in 1:ln) { TEMP[[i]]=tab[tab$Gene %in% GeneSets[[i]]$Gene,] INT[[i]]=as.data.frame(table(TEMP[[i]]$DEFINITION)) } names(INT)=names(GeneSets) names(TEMP)=names(GeneSets) # Create the matrix for each GeneSet NROWS <- sapply(GeneSets,nrow) # # GeneSet # + - # +-------+-------+ # + | a | b | # Module +-------+-------+ # - | c | d | # +-------+-------+ for (i in 1:length(INT)) { INT[[i]]$b <- NROWS[[i]]-INT[[i]]$Freq INT[[i]]$c <- Genes$Freq-INT[[i]]$Freq INT[[i]]$d <- 15585-Genes$Freq-nrow(GeneSets[[i]]) #Protein Coding bkg = 19901 Ortho bkg = 14373 Brain Expressed bkg = 15585 } # sum(Genes$Freq) RunFisher <- function(row, alt = 'greater', cnf = 0.85) { f <- fisher.test(matrix(row, nrow = 2), alternative = alt, conf.level = cnf) return(c(row, P_val = f$p.value, LogP = -log10(f$p.value), OR = f$estimate[[1]], OR_Low = f$conf.int[1], OR_Up = f$conf.int[2])) } # run FisherMat=list() for (i in 1:length(INT)) { FisherMat[[i]] <- t(apply(INT[[i]][,2:5], 1, RunFisher)) rownames(FisherMat[[i]]) <- INT[[i]]$Var1 FisherMat[[i]] <- FisherMat[[i]][rownames(FisherMat[[i]]) != "grey",] } names(FisherMat)<-names(INT) save(FisherMat,file="SME_CorrelatedGenes_EnrichMat.RData") # Create matrices of Pval tmp<-list() FisherP<-matrix() rowNames <- rownames(FisherMat[[1]]) colNames <- names(FisherMat) for (i in 1:length(INT)) { tmp[[i]] <- cbind(as.numeric(FisherMat[[i]][,5])) FisherP <- do.call(cbind,tmp) } rownames(FisherP) <- rowNames colnames(FisherP) <- colNames # Create matrices of OR tmp<-list() FisherOR<-matrix() rowNames <- rownames(FisherMat[[1]]) colNames <- names(FisherMat) for (i in 1:length(INT)) { tmp[[i]] <- cbind(as.numeric(FisherMat[[i]][,7])) FisherOR <- do.call(cbind,tmp) } rownames(FisherOR) <- rowNames colnames(FisherOR) <- colNames # Pval Adjusted library(magrittr) FisherAdj <- FisherP %>% as.matrix %>% as.vector %>% p.adjust(method='BH') %>% matrix(ncol=ncol(FisherP)) rownames(FisherAdj) <- rowNames colnames(FisherAdj) <- colNames FisherAdj[FisherAdj>0.05]=1 FisherOR[FisherOR < 1]=0 vec <- c("Delta","Theta","Alpha","Beta","Gamma","High.Gamma") FisherAdj <- FisherAdj[,match(vec,colnames(FisherAdj))] FisherOR <- FisherOR[,match(vec,colnames(FisherOR))] p <- as.data.frame(FisherAdj) cor <- as.data.frame(FisherOR) p $Module <- rownames(p ) cor$Module <- rownames(cor) p <- melt(p) cor <- melt(cor) p$cor <- cor$value p$log <- -log10(p$value) p$Module <- factor(p$Module,levels=paste("WM",1:26,sep="")) p$log[p$log < 1.3] <- NA p$abs <- abs(p$cor) p$OR<- ifelse(is.na(p$log), p$log, p$abs) #p$variable <- factor(p$variable,levels=c("ENDOTHELIAL","ASTROCYTES","OPC","OLIGODENDROCYTES","MICROGLIA","EXCITATORY", "INHIBITORY")) pdf("Modules_SME_CorrelatedGenes_Bubble.pdf",width=8,height=2) ggscatter(p, x = "variable", y = "Module", size="OR", color="log", alpha = 0.8, xlab = "", ylab = "",) + theme_minimal() + rotate_x_text(angle = 45)+ coord_flip()+ scale_size(range = c(2, 10))+ gradient_color(c("red","darkred")) dev.off()
## Setting phylo and phyloOrNULL class for MgDb and mgFeatures setOldClass("phylo") ## Borrowed from https://github.com/joey711/phyloseq/blob/master/R/allClasses.R # Use setClassUnion to define the unholy NULL-data union as a virtual class. # This is a way of dealing with the expected scenarios in which one or more of # the component data classes is not available, in which case NULL will be used # instead. #' @keywords internal setClassUnion("phyloOrNULL", c("phylo", "NULL"))
/R/phylo-class.R
no_license
HCBravoLab/metagenomeFeatures
R
false
false
483
r
## Setting phylo and phyloOrNULL class for MgDb and mgFeatures setOldClass("phylo") ## Borrowed from https://github.com/joey711/phyloseq/blob/master/R/allClasses.R # Use setClassUnion to define the unholy NULL-data union as a virtual class. # This is a way of dealing with the expected scenarios in which one or more of # the component data classes is not available, in which case NULL will be used # instead. #' @keywords internal setClassUnion("phyloOrNULL", c("phylo", "NULL"))
#' @details #' The main functions you will need to use are CreateInfercnvObject() and run(infercnv_object). #' For additional details on running the analysis step by step, please refer to the example vignette. #' @aliases infercnv-package "_PACKAGE" #' The infercnv Class #' #' An infercnv object encapsulates the expression data and gene chromosome ordering information #' that is leveraged by infercnv for data exploration. The infercnv object is passed among the #' infercnv data processing and plotting routines. #' #' Slots in the infercnv object include: #' #' @slot expr.data <matrix> the count or expression data matrix, manipulated throughout infercnv ops #' #' @slot count.data <matrix> retains the original count data, but shrinks along with expr.data when genes are removed. #' #' @slot gene_order <data.frame> chromosomal gene order #' #' @slot reference_grouped_cell_indices <list> mapping [['group_name']] to c(cell column indices) for reference (normal) cells #' #' @slot observation_grouped_cell_indices <list> mapping [['group_name']] to c(cell column indices) for observation (tumor) cells #' #' @slot tumor_subclusters <list> stores subclustering of tumors if requested #' #' @slot options <list> stores the options relevant to the analysis in itself (in contrast with options relevant to plotting or paths) #' #' @slot .hspike a hidden infercnv object populated with simulated spiked-in data #' #' @slot norm_pathways pathways to normalize against #' #' @slot pca_loadings pca loadings to cluster cells against #' #' @slot gene_locs chromosomal locations for each gene #' #' @export #' infercnv <- methods::setClass( "infercnv", slots = c( expr.data = "ANY", count.data = "ANY", gene_order= "data.frame", reference_grouped_cell_indices = "list", observation_grouped_cell_indices = "list", tumor_subclusters = "ANY", options = "list", .hspike = "ANY", norm_pathways="ANY", pca_loadings="ANY", gene_locs="ANY") ) #' @title CreateInfercnvObject #' #' @param raw_counts_matrix the matrix of genes (rows) vs. cells (columns) containing the raw counts #' If a filename is given, it'll be read via read.table() #' otherwise, if matrix or Matrix, will use the data directly. #' #' @param gene_order_file data file containing the positions of each gene along each chromosome in the genome. #' #' @param annotations_file a description of the cells, indicating the cell type classifications #' #' @param ref_group_names a vector containing the classifications of the reference (normal) cells to use for infering cnv #' #' @param delim delimiter used in the input files #' #' @param max_cells_per_group maximun number of cells to use per group. Default=NULL, using all cells defined in the annotations_file. This option is useful for randomly subsetting the existing data for a quicker preview run, such as using 50 cells per group instead of hundreds. #' #' @param min_max_counts_per_cell minimum and maximum counts allowed per cell. Any cells outside this range will be removed from the counts matrix. default=(100, +Inf) and uses all cells. If used, should be set as c(min_counts, max_counts) #' #' @param chr_exclude list of chromosomes in the reference genome annotations that should be excluded from analysis. Default = c('chrX', 'chrY', 'chrM') #' #' @param pathways list of pathways to perform pathway normalization against #' #' @param pcaLoadings list of pcaLoadings of the original dataset for pathway normalization #' #' @description Creation of an infercnv object. This requires the following inputs: #' A more detailed description of each input is provided below: #' #' The raw_counts_matrix: #' #' MGH54_P16_F12 MGH53_P5_C12 MGH54_P12_C10 MGH54_P16_F02 MGH54_P11_C11 ... #' DDX11L1 0.0000000 0.000000 0.000000 0.000000 0.0000000 #' WASH7P 0.0000000 2.231939 7.186235 5.284944 0.9650009 #' FAM138A 0.1709991 0.000000 0.000000 0.000000 0.0000000 #' OR4F5 0.0000000 0.000000 0.000000 0.000000 0.0000000 #' OR4F29 0.0000000 0.000000 0.000000 0.000000 0.0000000 #' ... #' #' The gene_order_file, contains chromosome, start, and stop position for each gene, tab-delimited: #' #' chr start stop #' DDX11L1 chr1 11869 14412 #' WASH7P chr1 14363 29806 #' FAM138A chr1 34554 36081 #' OR4F5 chr1 69091 70008 #' OR4F29 chr1 367640 368634 #' OR4F16 chr1 621059 622053 #' ... #' #' The annotations_file, containing the cell name and the cell type classification, tab-delimited. #' #' V1 V2 #' 1 MGH54_P2_C12 Microglia/Macrophage #' 2 MGH36_P6_F03 Microglia/Macrophage #' 3 MGH53_P4_H08 Microglia/Macrophage #' 4 MGH53_P2_E09 Microglia/Macrophage #' 5 MGH36_P5_E12 Oligodendrocytes (non-malignant) #' 6 MGH54_P2_H07 Oligodendrocytes (non-malignant) #' ... #' 179 93_P9_H03 malignant #' 180 93_P10_D04 malignant #' 181 93_P8_G09 malignant #' 182 93_P10_B10 malignant #' 183 93_P9_C07 malignant #' 184 93_P8_A12 malignant #' ... #' #' #' and the ref_group_names vector might look like so: c("Microglia/Macrophage","Oligodendrocytes (non-malignant)") #' #' @return infercnv #' #' @export #' #' @examples #' data(infercnv_data_example) #' data(infercnv_annots_example) #' data(infercnv_genes_example) #' #' infercnv_object_example <- infercnv::CreateInfercnvObject(raw_counts_matrix=infercnv_data_example, #' gene_order_file=infercnv_genes_example, #' annotations_file=infercnv_annots_example, #' ref_group_names=c("normal")) #' CreateInfercnvObject <- function(raw_counts_matrix, gene_order_file, annotations_file, ref_group_names, delim="\t", max_cells_per_group=NULL, min_max_counts_per_cell=c(100, +Inf), # can be c(low,high) for colsums chr_exclude=c('chrX', 'chrY', 'chrM'), pathways=NULL, pcaLoadings=NULL) { ## input expression data if (Reduce("|", is(raw_counts_matrix) == "character")) { flog.info(sprintf("Parsing matrix: %s", raw_counts_matrix)) if (substr(raw_counts_matrix, nchar(raw_counts_matrix)-2, nchar(raw_counts_matrix)) == ".gz") { raw.data <- read.table(connection <- gzfile(raw_counts_matrix, 'rt'), sep=delim, header=TRUE, row.names=1, check.names=FALSE) close(connection) raw.data <- as.matrix(raw.data) } else if(substr(raw_counts_matrix, nchar(raw_counts_matrix)-3, nchar(raw_counts_matrix)) == ".rds") { raw.data <- readRDS(raw_counts_matrix) } else { raw.data <- read.table(raw_counts_matrix, sep=delim, header=TRUE, row.names=1, check.names=FALSE) raw.data <- as.matrix(raw.data) } } else if (Reduce("|", is(raw_counts_matrix) %in% c("dgCMatrix", "matrix"))) { # use as is: raw.data <- raw_counts_matrix } else if (Reduce("|", is(raw_counts_matrix) %in% c("data.frame"))) { raw.data <- as.matrix(raw_counts_matrix) } else { stop("CreateInfercnvObject:: Error, raw_counts_matrix isn't recognized as a matrix, data.frame, or filename") } ## get gene order info if (Reduce("|", is(gene_order_file) == "character")) { flog.info(sprintf("Parsing gene order file: %s", gene_order_file)) gene_order <- read.table(gene_order_file, header=FALSE, row.names=1, sep="\t", check.names=FALSE) } else if (Reduce("|", is(gene_order_file) %in% c("dgCMatrix", "matrix", "data.frame"))) { gene_order <- gene_order_file } else { stop("CreateInfercnvObject:: Error, gene_order_file isn't recognized as a matrix, data.frame, or filename") } names(gene_order) <- c(C_CHR, C_START, C_STOP) if (! is.null(chr_exclude) && any(which(gene_order$chr %in% chr_exclude))) { gene_order = gene_order[-which(gene_order$chr %in% chr_exclude),] } ## read annotations file if (Reduce("|", is(annotations_file) == "character")) { flog.info(sprintf("Parsing cell annotations file: %s", annotations_file)) input_classifications <- read.table(annotations_file, header=FALSE, row.names=1, sep=delim, stringsAsFactors=FALSE, colClasses = c('character', 'character')) } else if (Reduce("|", is(annotations_file) %in% c("dgCMatrix", "matrix", "data.frame"))) { input_classifications <- annotations_file } else { stop("CreateInfercnvObject:: Error, annotations_file isn't recognized as a matrix, data.frame, or filename") } ## just in case the first line is a default header, remove it: if (rownames(input_classifications)[1] == "V1") { input_classifications = input_classifications[-1, , drop=FALSE] } ## make sure all reference samples are accounted for: if (! all( rownames(input_classifications) %in% colnames(raw.data)) ) { missing_cells <- rownames(input_classifications)[ ! ( rownames(input_classifications) %in% colnames(raw.data) ) ] error_message <- paste("Please make sure that all the annotated cell ", "names match a sample in your data matrix. ", "Attention to: ", paste(missing_cells, collapse=",")) stop(error_message) } ## extract the genes indicated in the gene ordering file: order_ret <- .order_reduce(data=raw.data, genomic_position=gene_order) num_genes_removed = dim(raw.data)[1] - dim(order_ret$exp)[1] if (num_genes_removed > 0) { flog.info(paste("num genes removed taking into account provided gene ordering list: ", num_genes_removed, " = ", num_genes_removed / dim(raw.data)[1] * 100, "% removed.", sep="")) } raw.data <- order_ret$expr input_gene_order <- order_ret$order colnames(input_gene_order) <- c('Chromosome', 'Start', 'End') if(is.null(raw.data)) { error_message <- paste("None of the genes in the expression data", "matched the genes in the reference genomic", "position file. Analysis Stopped.") stop(error_message) } ## Determine if we need to do filtering on counts per cell if (is.null(min_max_counts_per_cell)) { min_max_counts_per_cell = c(1, +Inf) } min_counts_per_cell = max(1, min_max_counts_per_cell[1]) # so that it is always at least 1 max_counts_per_cell = min_max_counts_per_cell[2] cs = colSums(raw.data) cells.keep <- which(cs >= min_counts_per_cell & cs <= max_counts_per_cell) n_orig_cells <- ncol(raw.data) n_to_remove <- n_orig_cells - length(cells.keep) flog.info(sprintf("-filtering out cells < %g or > %g, removing %g %% of cells", min_counts_per_cell, max_counts_per_cell, n_to_remove/n_orig_cells * 100) ) raw.data <- raw.data[, cells.keep] input_classifications <- input_classifications[ rownames(input_classifications) %in% colnames(raw.data), , drop=FALSE] orig_ref_group_names = ref_group_names ref_group_names <- ref_group_names[ ref_group_names %in% unique(input_classifications[,1]) ] if (! all.equal(ref_group_names, orig_ref_group_names)) { flog.warn(sprintf("-warning, at least one reference group has been removed due to cells lacking: %s", orig_ref_group_names[! orig_ref_group_names %in% ref_group_names ] )) } if (! is.null(max_cells_per_group)) { ## trim down where needed. grps = split(input_classifications, input_classifications[,1]) newdf = NULL for (grp in names(grps)) { df = grps[[grp]] if (dim(df)[1] > max_cells_per_group) { flog.info(sprintf("-reducing number of cells for grp %s from %g to %g", grp, dim(df)[1], max_cells_per_group)) grps[[grp]] = df[sample(seq_len(dim(df)[1]), max_cells_per_group),,drop=FALSE] } } input_classifications = data.frame(Reduce(rbind, grps)) } ## restrict expression data to the annotated cells. raw.data <- raw.data[,colnames(raw.data) %in% rownames(input_classifications)] ## reorder cell classifications according to expression matrix column names input_classifications <- input_classifications[order(match(row.names(input_classifications), colnames(raw.data))), , drop=FALSE] ## get indices for reference cells ref_group_cell_indices = list() for (name_group in ref_group_names) { cell_indices = which(input_classifications[,1] == name_group) if (length(cell_indices) == 0 ) { stop(sprintf("Error, not identifying cells with classification %s", name_group)) } ref_group_cell_indices[[ name_group ]] <- cell_indices } ## rest of the cells are the 'observed' set. all_group_names <- unique(input_classifications[,1]) obs_group_names <- setdiff(all_group_names, ref_group_names) ## define groupings according to the observation annotation names obs_group_cell_indices = list() for (name_group in obs_group_names) { cell_indices = which(input_classifications[,1] == name_group) obs_group_cell_indices[[ name_group ]] <- cell_indices } #Validate my pathway options if(!is.null(c(pathways, pcaLoadings))){ run_pathway_norm <- T if(any(sapply(list(pathways, pcaLoadings), is.null))){ stop("Error, to use pathway normalization need to provide pathways and PCA Loadings") } ## Validate Pathways if(typeof(pathways)!='list'){ stop(sprintf("Error, 'pathways' is %s instead of list", typeof(pathways))) } if(any(sapply(pathways, typeof) != 'character')){ stop(sprintf("Error, 'pathways' object is not composed of character vectors")) } ## Validate PCA Dims if(nrow(pcaLoadings)!=ncol(raw_counts_matrix)){ stop(paste("Error, nrow(pcaLoadings) must equal ncol(data)\n ncol PCALoadings:", nrow(pcaLoadings), '| ncol raw counts:', ncol(raw_counts_matrix))) } }else{ run_pathway_norm <- F } object <- new( Class = "infercnv", expr.data = raw.data, count.data = raw.data, gene_order = input_gene_order, reference_grouped_cell_indices = ref_group_cell_indices, observation_grouped_cell_indices = obs_group_cell_indices, tumor_subclusters = NULL, options = list("chr_exclude" = chr_exclude, "max_cells_per_group" = max_cells_per_group, "min_max_counts_per_cell" = min_max_counts_per_cell, "counts_md5" = digest(raw.data), 'run_pathway_norm' = run_pathway_norm), .hspike = NULL, norm_pathways=pathways, pca_loadings=pcaLoadings) validate_infercnv_obj(object) return(object) } # Order the data and subset the data to data in the genomic position file. # # Args: # @param data Data (expression) matrix where the row names should be in # the row names of the genomic_position file. # @param genomic_position Data frame read in from the genomic position file # # @return Returns a matrix of expression in the order of the # genomic_position file. NULL is returned if the genes in both # data parameters do not match. # .order_reduce <- function(data, genomic_position){ flog.info(paste("::order_reduce:Start.", sep="")) ret_results <- list(expr=NULL, order=NULL, chr_order=NULL) if (is.null(data) || is.null(genomic_position)){ return(ret_results) } # Drop pos_gen entries that are position 0 remove_by_position <- -1 * which(genomic_position[2] + genomic_position[3] == 0) if (length(remove_by_position)) { flog.debug(paste("::process_data:order_reduce: removing genes specified by pos == 0, count: ", length(remove_by_position), sep="")) genomic_position <- genomic_position[remove_by_position, , drop=FALSE] } # Reduce to genes in pos file flog.debug(paste("::process_data:order_reduce: gene identifers in expression matrix: ", row.names(data), collapse="\n", sep="")) flog.debug(paste("::process_data:order_reduce: gene identifers in genomic position table: ", row.names(data), collapse="\n", sep="")) keep_genes <- intersect(row.names(data), row.names(genomic_position)) flog.debug(paste("::process_data:order_reduce: keep_genes size: ", length(keep_genes), sep="")) # Keep genes found in position file if (length(keep_genes)) { ret_results$expr <- data[match(keep_genes, rownames(data)), , drop=FALSE] ret_results$order <- genomic_position[match(keep_genes, rownames(genomic_position)), , drop=FALSE] } else { flog.info(paste("::process_data:order_reduce:The position file ", "and the expression file row (gene) names do not match.")) return(list(expr=NULL, order=NULL, chr_order=NULL)) } ## ensure expr and order match up! if (isTRUE(all.equal(rownames(ret_results$expr), rownames(ret_results$order)))) { flog.info(".order_reduce(): expr and order match.") } else { stop("Error, .order_reduce(): expr and order don't match! must debug") } # Set the chr to factor so the order can be arbitrarily set and sorted. chr_levels <- unique(genomic_position[[C_CHR]]) ret_results$order[[C_CHR]] <- factor(ret_results$order[[C_CHR]], levels=chr_levels) # Sort genomic position file and expression file to genomic position file # Order genes by genomic region order_names <- row.names(ret_results$order)[with(ret_results$order, order(chr,start,stop))] ret_results$expr <- ret_results$expr[match(order_names, rownames(ret_results$expr)), , drop=FALSE] #na.omit is to rid teh duplicate gene entries (ie. Y_RNA, snoU13, ...) if they should exist. # This is the contig order, will be used in visualization. # Get the contig order in the same order as the genes. ret_results$order <- ret_results$order[match(order_names, rownames(ret_results$order)), , drop=FALSE] ret_results$chr_order <- ret_results$order[1] # Remove any gene without position information # Genes may be sorted correctly by not have position information # Here they are removed. flog.info(paste("::process_data:order_reduce:Reduction from positional ", "data, new dimensions (r,c) = ", paste(dim(data), collapse=","), " Total=", sum(data), " Min=", min(data), " Max=", max(data), ".", sep="")) flog.debug(paste("::process_data:order_reduce end.")) return(ret_results) } #' @title remove_genes() #' #' @description infercnv obj accessor method to remove genes from the matrices #' #' @param infercnv_obj infercnv object #' #' @param gene_indices_to_remove matrix indices for genes to remove #' #' @return infercnv_obj #' #' @keywords internal #' @noRd #' remove_genes <- function(infercnv_obj, gene_indices_to_remove) { infercnv_obj@expr.data <- infercnv_obj@expr.data[ -1 * gene_indices_to_remove, , drop=FALSE] infercnv_obj@count.data <- infercnv_obj@count.data[ -1 * gene_indices_to_remove, , drop=FALSE] infercnv_obj@gene_order <- infercnv_obj@gene_order[ -1 * gene_indices_to_remove, , drop=FALSE] validate_infercnv_obj(infercnv_obj) return(infercnv_obj) } #' @title validate_infercnv_obj() #' #' @description validate an infercnv_obj #' ensures that order of genes in the @gene_order slot match up perfectly with the gene rows in the @expr.data matrix. #' Otherwise, throws an error and stops execution. #' #' @param infercnv_obj infercnv_object #' #' @return none #' validate_infercnv_obj <- function(infercnv_obj) { flog.info("validating infercnv_obj") if (isTRUE(all.equal(rownames(infercnv_obj@expr.data), rownames(infercnv_obj@gene_order)))) { # all good. return(); } else { flog.error("hmm.... rownames(infercnv_obj@expr.data != rownames(infercnv_obj@gene_order))") broken.infercnv_obj = infercnv_obj save('broken.infercnv_obj', file="broken.infercnv_obj") } genes = setdiff(rownames(infercnv_obj@expr.data), rownames(infercnv_obj@gene_order)) if (length(genes) != 0) { flog.error(paste("The following genes are in infercnv_obj@expr.data and not @gene_order:", paste(genes, collapse=","), sep=" ")) } genes = setdiff(rownames(infercnv_obj@gene_order), rownames(infercnv_obj@expr.data)) if (length(genes) != 0) { flog.error(paste("The following genes are in @gene_order and not infercnv_obj@expr.data:", paste(genes, collapse=","), sep=" ")) } stop("Problem detected w/ infercnv_obj") } get_cell_name_by_grouping <- function(infercnv_obj) { cell_name_groupings = list() groupings = c(infercnv_obj@reference_grouped_cell_indices, infercnv_obj@observation_grouped_cell_indices) for (group_name in names(groupings)) { cell_names = colnames(infercnv_obj@expr.data[, groupings[[ group_name ]] ] ) cell_name_groupings[[ group_name ]] = cell_names } return(cell_name_groupings) } has_reference_cells <- function(infercnv_obj) { return(length(infercnv_obj@reference_grouped_cell_indices) != 0) }
/CNVcaller/InferCNVscripts/inferCNV.R
no_license
Chenmengpin/scRNASeq-CNVCaller
R
false
false
22,915
r
#' @details #' The main functions you will need to use are CreateInfercnvObject() and run(infercnv_object). #' For additional details on running the analysis step by step, please refer to the example vignette. #' @aliases infercnv-package "_PACKAGE" #' The infercnv Class #' #' An infercnv object encapsulates the expression data and gene chromosome ordering information #' that is leveraged by infercnv for data exploration. The infercnv object is passed among the #' infercnv data processing and plotting routines. #' #' Slots in the infercnv object include: #' #' @slot expr.data <matrix> the count or expression data matrix, manipulated throughout infercnv ops #' #' @slot count.data <matrix> retains the original count data, but shrinks along with expr.data when genes are removed. #' #' @slot gene_order <data.frame> chromosomal gene order #' #' @slot reference_grouped_cell_indices <list> mapping [['group_name']] to c(cell column indices) for reference (normal) cells #' #' @slot observation_grouped_cell_indices <list> mapping [['group_name']] to c(cell column indices) for observation (tumor) cells #' #' @slot tumor_subclusters <list> stores subclustering of tumors if requested #' #' @slot options <list> stores the options relevant to the analysis in itself (in contrast with options relevant to plotting or paths) #' #' @slot .hspike a hidden infercnv object populated with simulated spiked-in data #' #' @slot norm_pathways pathways to normalize against #' #' @slot pca_loadings pca loadings to cluster cells against #' #' @slot gene_locs chromosomal locations for each gene #' #' @export #' infercnv <- methods::setClass( "infercnv", slots = c( expr.data = "ANY", count.data = "ANY", gene_order= "data.frame", reference_grouped_cell_indices = "list", observation_grouped_cell_indices = "list", tumor_subclusters = "ANY", options = "list", .hspike = "ANY", norm_pathways="ANY", pca_loadings="ANY", gene_locs="ANY") ) #' @title CreateInfercnvObject #' #' @param raw_counts_matrix the matrix of genes (rows) vs. cells (columns) containing the raw counts #' If a filename is given, it'll be read via read.table() #' otherwise, if matrix or Matrix, will use the data directly. #' #' @param gene_order_file data file containing the positions of each gene along each chromosome in the genome. #' #' @param annotations_file a description of the cells, indicating the cell type classifications #' #' @param ref_group_names a vector containing the classifications of the reference (normal) cells to use for infering cnv #' #' @param delim delimiter used in the input files #' #' @param max_cells_per_group maximun number of cells to use per group. Default=NULL, using all cells defined in the annotations_file. This option is useful for randomly subsetting the existing data for a quicker preview run, such as using 50 cells per group instead of hundreds. #' #' @param min_max_counts_per_cell minimum and maximum counts allowed per cell. Any cells outside this range will be removed from the counts matrix. default=(100, +Inf) and uses all cells. If used, should be set as c(min_counts, max_counts) #' #' @param chr_exclude list of chromosomes in the reference genome annotations that should be excluded from analysis. Default = c('chrX', 'chrY', 'chrM') #' #' @param pathways list of pathways to perform pathway normalization against #' #' @param pcaLoadings list of pcaLoadings of the original dataset for pathway normalization #' #' @description Creation of an infercnv object. This requires the following inputs: #' A more detailed description of each input is provided below: #' #' The raw_counts_matrix: #' #' MGH54_P16_F12 MGH53_P5_C12 MGH54_P12_C10 MGH54_P16_F02 MGH54_P11_C11 ... #' DDX11L1 0.0000000 0.000000 0.000000 0.000000 0.0000000 #' WASH7P 0.0000000 2.231939 7.186235 5.284944 0.9650009 #' FAM138A 0.1709991 0.000000 0.000000 0.000000 0.0000000 #' OR4F5 0.0000000 0.000000 0.000000 0.000000 0.0000000 #' OR4F29 0.0000000 0.000000 0.000000 0.000000 0.0000000 #' ... #' #' The gene_order_file, contains chromosome, start, and stop position for each gene, tab-delimited: #' #' chr start stop #' DDX11L1 chr1 11869 14412 #' WASH7P chr1 14363 29806 #' FAM138A chr1 34554 36081 #' OR4F5 chr1 69091 70008 #' OR4F29 chr1 367640 368634 #' OR4F16 chr1 621059 622053 #' ... #' #' The annotations_file, containing the cell name and the cell type classification, tab-delimited. #' #' V1 V2 #' 1 MGH54_P2_C12 Microglia/Macrophage #' 2 MGH36_P6_F03 Microglia/Macrophage #' 3 MGH53_P4_H08 Microglia/Macrophage #' 4 MGH53_P2_E09 Microglia/Macrophage #' 5 MGH36_P5_E12 Oligodendrocytes (non-malignant) #' 6 MGH54_P2_H07 Oligodendrocytes (non-malignant) #' ... #' 179 93_P9_H03 malignant #' 180 93_P10_D04 malignant #' 181 93_P8_G09 malignant #' 182 93_P10_B10 malignant #' 183 93_P9_C07 malignant #' 184 93_P8_A12 malignant #' ... #' #' #' and the ref_group_names vector might look like so: c("Microglia/Macrophage","Oligodendrocytes (non-malignant)") #' #' @return infercnv #' #' @export #' #' @examples #' data(infercnv_data_example) #' data(infercnv_annots_example) #' data(infercnv_genes_example) #' #' infercnv_object_example <- infercnv::CreateInfercnvObject(raw_counts_matrix=infercnv_data_example, #' gene_order_file=infercnv_genes_example, #' annotations_file=infercnv_annots_example, #' ref_group_names=c("normal")) #' CreateInfercnvObject <- function(raw_counts_matrix, gene_order_file, annotations_file, ref_group_names, delim="\t", max_cells_per_group=NULL, min_max_counts_per_cell=c(100, +Inf), # can be c(low,high) for colsums chr_exclude=c('chrX', 'chrY', 'chrM'), pathways=NULL, pcaLoadings=NULL) { ## input expression data if (Reduce("|", is(raw_counts_matrix) == "character")) { flog.info(sprintf("Parsing matrix: %s", raw_counts_matrix)) if (substr(raw_counts_matrix, nchar(raw_counts_matrix)-2, nchar(raw_counts_matrix)) == ".gz") { raw.data <- read.table(connection <- gzfile(raw_counts_matrix, 'rt'), sep=delim, header=TRUE, row.names=1, check.names=FALSE) close(connection) raw.data <- as.matrix(raw.data) } else if(substr(raw_counts_matrix, nchar(raw_counts_matrix)-3, nchar(raw_counts_matrix)) == ".rds") { raw.data <- readRDS(raw_counts_matrix) } else { raw.data <- read.table(raw_counts_matrix, sep=delim, header=TRUE, row.names=1, check.names=FALSE) raw.data <- as.matrix(raw.data) } } else if (Reduce("|", is(raw_counts_matrix) %in% c("dgCMatrix", "matrix"))) { # use as is: raw.data <- raw_counts_matrix } else if (Reduce("|", is(raw_counts_matrix) %in% c("data.frame"))) { raw.data <- as.matrix(raw_counts_matrix) } else { stop("CreateInfercnvObject:: Error, raw_counts_matrix isn't recognized as a matrix, data.frame, or filename") } ## get gene order info if (Reduce("|", is(gene_order_file) == "character")) { flog.info(sprintf("Parsing gene order file: %s", gene_order_file)) gene_order <- read.table(gene_order_file, header=FALSE, row.names=1, sep="\t", check.names=FALSE) } else if (Reduce("|", is(gene_order_file) %in% c("dgCMatrix", "matrix", "data.frame"))) { gene_order <- gene_order_file } else { stop("CreateInfercnvObject:: Error, gene_order_file isn't recognized as a matrix, data.frame, or filename") } names(gene_order) <- c(C_CHR, C_START, C_STOP) if (! is.null(chr_exclude) && any(which(gene_order$chr %in% chr_exclude))) { gene_order = gene_order[-which(gene_order$chr %in% chr_exclude),] } ## read annotations file if (Reduce("|", is(annotations_file) == "character")) { flog.info(sprintf("Parsing cell annotations file: %s", annotations_file)) input_classifications <- read.table(annotations_file, header=FALSE, row.names=1, sep=delim, stringsAsFactors=FALSE, colClasses = c('character', 'character')) } else if (Reduce("|", is(annotations_file) %in% c("dgCMatrix", "matrix", "data.frame"))) { input_classifications <- annotations_file } else { stop("CreateInfercnvObject:: Error, annotations_file isn't recognized as a matrix, data.frame, or filename") } ## just in case the first line is a default header, remove it: if (rownames(input_classifications)[1] == "V1") { input_classifications = input_classifications[-1, , drop=FALSE] } ## make sure all reference samples are accounted for: if (! all( rownames(input_classifications) %in% colnames(raw.data)) ) { missing_cells <- rownames(input_classifications)[ ! ( rownames(input_classifications) %in% colnames(raw.data) ) ] error_message <- paste("Please make sure that all the annotated cell ", "names match a sample in your data matrix. ", "Attention to: ", paste(missing_cells, collapse=",")) stop(error_message) } ## extract the genes indicated in the gene ordering file: order_ret <- .order_reduce(data=raw.data, genomic_position=gene_order) num_genes_removed = dim(raw.data)[1] - dim(order_ret$exp)[1] if (num_genes_removed > 0) { flog.info(paste("num genes removed taking into account provided gene ordering list: ", num_genes_removed, " = ", num_genes_removed / dim(raw.data)[1] * 100, "% removed.", sep="")) } raw.data <- order_ret$expr input_gene_order <- order_ret$order colnames(input_gene_order) <- c('Chromosome', 'Start', 'End') if(is.null(raw.data)) { error_message <- paste("None of the genes in the expression data", "matched the genes in the reference genomic", "position file. Analysis Stopped.") stop(error_message) } ## Determine if we need to do filtering on counts per cell if (is.null(min_max_counts_per_cell)) { min_max_counts_per_cell = c(1, +Inf) } min_counts_per_cell = max(1, min_max_counts_per_cell[1]) # so that it is always at least 1 max_counts_per_cell = min_max_counts_per_cell[2] cs = colSums(raw.data) cells.keep <- which(cs >= min_counts_per_cell & cs <= max_counts_per_cell) n_orig_cells <- ncol(raw.data) n_to_remove <- n_orig_cells - length(cells.keep) flog.info(sprintf("-filtering out cells < %g or > %g, removing %g %% of cells", min_counts_per_cell, max_counts_per_cell, n_to_remove/n_orig_cells * 100) ) raw.data <- raw.data[, cells.keep] input_classifications <- input_classifications[ rownames(input_classifications) %in% colnames(raw.data), , drop=FALSE] orig_ref_group_names = ref_group_names ref_group_names <- ref_group_names[ ref_group_names %in% unique(input_classifications[,1]) ] if (! all.equal(ref_group_names, orig_ref_group_names)) { flog.warn(sprintf("-warning, at least one reference group has been removed due to cells lacking: %s", orig_ref_group_names[! orig_ref_group_names %in% ref_group_names ] )) } if (! is.null(max_cells_per_group)) { ## trim down where needed. grps = split(input_classifications, input_classifications[,1]) newdf = NULL for (grp in names(grps)) { df = grps[[grp]] if (dim(df)[1] > max_cells_per_group) { flog.info(sprintf("-reducing number of cells for grp %s from %g to %g", grp, dim(df)[1], max_cells_per_group)) grps[[grp]] = df[sample(seq_len(dim(df)[1]), max_cells_per_group),,drop=FALSE] } } input_classifications = data.frame(Reduce(rbind, grps)) } ## restrict expression data to the annotated cells. raw.data <- raw.data[,colnames(raw.data) %in% rownames(input_classifications)] ## reorder cell classifications according to expression matrix column names input_classifications <- input_classifications[order(match(row.names(input_classifications), colnames(raw.data))), , drop=FALSE] ## get indices for reference cells ref_group_cell_indices = list() for (name_group in ref_group_names) { cell_indices = which(input_classifications[,1] == name_group) if (length(cell_indices) == 0 ) { stop(sprintf("Error, not identifying cells with classification %s", name_group)) } ref_group_cell_indices[[ name_group ]] <- cell_indices } ## rest of the cells are the 'observed' set. all_group_names <- unique(input_classifications[,1]) obs_group_names <- setdiff(all_group_names, ref_group_names) ## define groupings according to the observation annotation names obs_group_cell_indices = list() for (name_group in obs_group_names) { cell_indices = which(input_classifications[,1] == name_group) obs_group_cell_indices[[ name_group ]] <- cell_indices } #Validate my pathway options if(!is.null(c(pathways, pcaLoadings))){ run_pathway_norm <- T if(any(sapply(list(pathways, pcaLoadings), is.null))){ stop("Error, to use pathway normalization need to provide pathways and PCA Loadings") } ## Validate Pathways if(typeof(pathways)!='list'){ stop(sprintf("Error, 'pathways' is %s instead of list", typeof(pathways))) } if(any(sapply(pathways, typeof) != 'character')){ stop(sprintf("Error, 'pathways' object is not composed of character vectors")) } ## Validate PCA Dims if(nrow(pcaLoadings)!=ncol(raw_counts_matrix)){ stop(paste("Error, nrow(pcaLoadings) must equal ncol(data)\n ncol PCALoadings:", nrow(pcaLoadings), '| ncol raw counts:', ncol(raw_counts_matrix))) } }else{ run_pathway_norm <- F } object <- new( Class = "infercnv", expr.data = raw.data, count.data = raw.data, gene_order = input_gene_order, reference_grouped_cell_indices = ref_group_cell_indices, observation_grouped_cell_indices = obs_group_cell_indices, tumor_subclusters = NULL, options = list("chr_exclude" = chr_exclude, "max_cells_per_group" = max_cells_per_group, "min_max_counts_per_cell" = min_max_counts_per_cell, "counts_md5" = digest(raw.data), 'run_pathway_norm' = run_pathway_norm), .hspike = NULL, norm_pathways=pathways, pca_loadings=pcaLoadings) validate_infercnv_obj(object) return(object) } # Order the data and subset the data to data in the genomic position file. # # Args: # @param data Data (expression) matrix where the row names should be in # the row names of the genomic_position file. # @param genomic_position Data frame read in from the genomic position file # # @return Returns a matrix of expression in the order of the # genomic_position file. NULL is returned if the genes in both # data parameters do not match. # .order_reduce <- function(data, genomic_position){ flog.info(paste("::order_reduce:Start.", sep="")) ret_results <- list(expr=NULL, order=NULL, chr_order=NULL) if (is.null(data) || is.null(genomic_position)){ return(ret_results) } # Drop pos_gen entries that are position 0 remove_by_position <- -1 * which(genomic_position[2] + genomic_position[3] == 0) if (length(remove_by_position)) { flog.debug(paste("::process_data:order_reduce: removing genes specified by pos == 0, count: ", length(remove_by_position), sep="")) genomic_position <- genomic_position[remove_by_position, , drop=FALSE] } # Reduce to genes in pos file flog.debug(paste("::process_data:order_reduce: gene identifers in expression matrix: ", row.names(data), collapse="\n", sep="")) flog.debug(paste("::process_data:order_reduce: gene identifers in genomic position table: ", row.names(data), collapse="\n", sep="")) keep_genes <- intersect(row.names(data), row.names(genomic_position)) flog.debug(paste("::process_data:order_reduce: keep_genes size: ", length(keep_genes), sep="")) # Keep genes found in position file if (length(keep_genes)) { ret_results$expr <- data[match(keep_genes, rownames(data)), , drop=FALSE] ret_results$order <- genomic_position[match(keep_genes, rownames(genomic_position)), , drop=FALSE] } else { flog.info(paste("::process_data:order_reduce:The position file ", "and the expression file row (gene) names do not match.")) return(list(expr=NULL, order=NULL, chr_order=NULL)) } ## ensure expr and order match up! if (isTRUE(all.equal(rownames(ret_results$expr), rownames(ret_results$order)))) { flog.info(".order_reduce(): expr and order match.") } else { stop("Error, .order_reduce(): expr and order don't match! must debug") } # Set the chr to factor so the order can be arbitrarily set and sorted. chr_levels <- unique(genomic_position[[C_CHR]]) ret_results$order[[C_CHR]] <- factor(ret_results$order[[C_CHR]], levels=chr_levels) # Sort genomic position file and expression file to genomic position file # Order genes by genomic region order_names <- row.names(ret_results$order)[with(ret_results$order, order(chr,start,stop))] ret_results$expr <- ret_results$expr[match(order_names, rownames(ret_results$expr)), , drop=FALSE] #na.omit is to rid teh duplicate gene entries (ie. Y_RNA, snoU13, ...) if they should exist. # This is the contig order, will be used in visualization. # Get the contig order in the same order as the genes. ret_results$order <- ret_results$order[match(order_names, rownames(ret_results$order)), , drop=FALSE] ret_results$chr_order <- ret_results$order[1] # Remove any gene without position information # Genes may be sorted correctly by not have position information # Here they are removed. flog.info(paste("::process_data:order_reduce:Reduction from positional ", "data, new dimensions (r,c) = ", paste(dim(data), collapse=","), " Total=", sum(data), " Min=", min(data), " Max=", max(data), ".", sep="")) flog.debug(paste("::process_data:order_reduce end.")) return(ret_results) } #' @title remove_genes() #' #' @description infercnv obj accessor method to remove genes from the matrices #' #' @param infercnv_obj infercnv object #' #' @param gene_indices_to_remove matrix indices for genes to remove #' #' @return infercnv_obj #' #' @keywords internal #' @noRd #' remove_genes <- function(infercnv_obj, gene_indices_to_remove) { infercnv_obj@expr.data <- infercnv_obj@expr.data[ -1 * gene_indices_to_remove, , drop=FALSE] infercnv_obj@count.data <- infercnv_obj@count.data[ -1 * gene_indices_to_remove, , drop=FALSE] infercnv_obj@gene_order <- infercnv_obj@gene_order[ -1 * gene_indices_to_remove, , drop=FALSE] validate_infercnv_obj(infercnv_obj) return(infercnv_obj) } #' @title validate_infercnv_obj() #' #' @description validate an infercnv_obj #' ensures that order of genes in the @gene_order slot match up perfectly with the gene rows in the @expr.data matrix. #' Otherwise, throws an error and stops execution. #' #' @param infercnv_obj infercnv_object #' #' @return none #' validate_infercnv_obj <- function(infercnv_obj) { flog.info("validating infercnv_obj") if (isTRUE(all.equal(rownames(infercnv_obj@expr.data), rownames(infercnv_obj@gene_order)))) { # all good. return(); } else { flog.error("hmm.... rownames(infercnv_obj@expr.data != rownames(infercnv_obj@gene_order))") broken.infercnv_obj = infercnv_obj save('broken.infercnv_obj', file="broken.infercnv_obj") } genes = setdiff(rownames(infercnv_obj@expr.data), rownames(infercnv_obj@gene_order)) if (length(genes) != 0) { flog.error(paste("The following genes are in infercnv_obj@expr.data and not @gene_order:", paste(genes, collapse=","), sep=" ")) } genes = setdiff(rownames(infercnv_obj@gene_order), rownames(infercnv_obj@expr.data)) if (length(genes) != 0) { flog.error(paste("The following genes are in @gene_order and not infercnv_obj@expr.data:", paste(genes, collapse=","), sep=" ")) } stop("Problem detected w/ infercnv_obj") } get_cell_name_by_grouping <- function(infercnv_obj) { cell_name_groupings = list() groupings = c(infercnv_obj@reference_grouped_cell_indices, infercnv_obj@observation_grouped_cell_indices) for (group_name in names(groupings)) { cell_names = colnames(infercnv_obj@expr.data[, groupings[[ group_name ]] ] ) cell_name_groupings[[ group_name ]] = cell_names } return(cell_name_groupings) } has_reference_cells <- function(infercnv_obj) { return(length(infercnv_obj@reference_grouped_cell_indices) != 0) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllMethods.R \name{show,EdgeSigFilter-method} \alias{show,EdgeSigFilter-method} \title{Show an EdgeSigFilter object} \usage{ \S4method{show}{EdgeSigFilter}(object) } \arguments{ \item{object}{An SigFilter object} } \description{ Show an EdgeSigFilter object }
/man/show-EdgeSigFilter-method.Rd
no_license
bedapub/ribiosNGS
R
false
true
338
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllMethods.R \name{show,EdgeSigFilter-method} \alias{show,EdgeSigFilter-method} \title{Show an EdgeSigFilter object} \usage{ \S4method{show}{EdgeSigFilter}(object) } \arguments{ \item{object}{An SigFilter object} } \description{ Show an EdgeSigFilter object }
\name{Bootstrap Student's t-test for 2 independent samples} \alias{boot.student2} \title{ Bootstrap Student's t-test for 2 independent samples } \description{ Bootstrap Student's t-test for 2 independent samples. } \usage{ boot.student2(x, y, B = 999) } \arguments{ \item{x}{ A numerical vector with the data. } \item{y}{ A numerical vector with the data. } \item{B}{ The number of bootstrap samples to use. } } \details{ We bootstrap Student's (Gosset's) t-test statistic and not the Welch t-test statistic. For the latter case see the "boot.ttest2" function in Rfast. The difference is that Gosset's test statistic assumes equaility of the variances, which if violated leads to inlfated type I errors. Bootstrap calibration though takes care of this issue. As for the bootstrap calibration, instead of sampling B times from each sample, we sample \eqn{\sqrt{B}} from each of them and then take all pairs. Each bootstrap sample is independent of each other, hence there is no violation of the theory (Chatzipantsiou et al., 2019). } \value{ A vector with the test statistic and the bootstrap p-value. } \references{ Efron Bradley and Robert J. Tibshirani (1993). An introduction to the bootstrap. New York: Chapman & Hall/CRC. Chatzipantsiou C., Dimitriadis M., Papadakis M. and Tsagris M. (2019). Extremely efficient permutation and bootstrap hypothesis tests using R. To appear in the Journal of Modern Applied Statistical Methods. \url{ https://arxiv.org/ftp/arxiv/papers/1806/1806.10947.pdf } } \author{ Michail Tsagris. R implementation and documentation: Michail Tsagris \email{mtsagris@uoc.gr}. } %\note{ %% ~~further notes~~ %} \seealso{ \code{\link{welch.tests}, \link{trim.mean} } } \examples{ x <- rexp(40, 4) y <- rbeta(50, 2.5, 7.5) t.test(x, y, var.equal = TRUE) boot.student2(x, y, 9999) }
/man/boot.student2.Rd
no_license
RfastOfficial/Rfast2
R
false
false
1,828
rd
\name{Bootstrap Student's t-test for 2 independent samples} \alias{boot.student2} \title{ Bootstrap Student's t-test for 2 independent samples } \description{ Bootstrap Student's t-test for 2 independent samples. } \usage{ boot.student2(x, y, B = 999) } \arguments{ \item{x}{ A numerical vector with the data. } \item{y}{ A numerical vector with the data. } \item{B}{ The number of bootstrap samples to use. } } \details{ We bootstrap Student's (Gosset's) t-test statistic and not the Welch t-test statistic. For the latter case see the "boot.ttest2" function in Rfast. The difference is that Gosset's test statistic assumes equaility of the variances, which if violated leads to inlfated type I errors. Bootstrap calibration though takes care of this issue. As for the bootstrap calibration, instead of sampling B times from each sample, we sample \eqn{\sqrt{B}} from each of them and then take all pairs. Each bootstrap sample is independent of each other, hence there is no violation of the theory (Chatzipantsiou et al., 2019). } \value{ A vector with the test statistic and the bootstrap p-value. } \references{ Efron Bradley and Robert J. Tibshirani (1993). An introduction to the bootstrap. New York: Chapman & Hall/CRC. Chatzipantsiou C., Dimitriadis M., Papadakis M. and Tsagris M. (2019). Extremely efficient permutation and bootstrap hypothesis tests using R. To appear in the Journal of Modern Applied Statistical Methods. \url{ https://arxiv.org/ftp/arxiv/papers/1806/1806.10947.pdf } } \author{ Michail Tsagris. R implementation and documentation: Michail Tsagris \email{mtsagris@uoc.gr}. } %\note{ %% ~~further notes~~ %} \seealso{ \code{\link{welch.tests}, \link{trim.mean} } } \examples{ x <- rexp(40, 4) y <- rbeta(50, 2.5, 7.5) t.test(x, y, var.equal = TRUE) boot.student2(x, y, 9999) }
library(tidyverse) #https://github.com/nytimes/covid-19-data cov <- read_csv("https://github.com/nytimes/covid-19-data/raw/master/us-states.csv") # transform the data so it's not cumulative df <- cov %>% filter(state=="Texas") %>% arrange(date) %>% mutate(cases_new=cases-lag(cases)) # make a quick and dirty chart ggplot(state, aes(x=date, y=cases_new)) + geom_col()
/covid.R
no_license
andrewbtran/NICAR-2021-plumber
R
false
false
388
r
library(tidyverse) #https://github.com/nytimes/covid-19-data cov <- read_csv("https://github.com/nytimes/covid-19-data/raw/master/us-states.csv") # transform the data so it's not cumulative df <- cov %>% filter(state=="Texas") %>% arrange(date) %>% mutate(cases_new=cases-lag(cases)) # make a quick and dirty chart ggplot(state, aes(x=date, y=cases_new)) + geom_col()
filenames <- list.files('/Users/qidiwang1/Desktop/Death Map', pattern = '*.csv$', full.names = TRUE) my_data <- lapply(filenames, read.csv, skip = 2) data <- my_data for (a in c(1:length(data))){ if (nrow(data[[a]])>0) for (i in c(1:nrow(data[[a]]))){ for (j in c(1:ncol(data[[a]]))){ if (data[[a]][i,j] == '<1') data[[a]][i,j] <- NA } } } library(zoo) vaccine <- data.frame() for (a in c(1:length(data))){ if (nrow(data[[a]])==0){ print('NO') } else{ bbb <- data.frame(Date = data[[a]]$Day,Country = substr(names(data[[a]])[2],17,nchar(names(data[[a]])[2])-1), vaccine = data[[a]][2]) names(bbb)[3] <- 'vaccine' vaccine <- rbind(vaccine, bbb) } } write.csv(vaccine,'/Users/qidiwang1/Desktop/vaccine.csv') contact <- data.frame() for (a in c(1:length(data))){ if (nrow(data[[a]])==0){ print('NO') } else{ bbb <- data.frame(Date = data[[a]]$Day,Country = substr(names(data[[a]])[2],19,nchar(names(data[[a]])[2])-1), contact = data[[a]][2]) names(bbb)[3] <- 'contact' contact <- rbind(contact, bbb) } } write.csv(contact,'/Users/qidiwang1/Desktop/contact.csv') View(contact) colMeans(data[[a]][2] contact <- data.frame() for (a in c(1:length(data))){ if (nrow(data[[a]])==0){ print('NO') } else{ aaa <- rollmean(data[[a]][2], k = 7, align = 'right',fill = NA) num = as.numeric(tail(aaa,1)/max(aaa,na.rm = TRUE)) contact <- rbind(contact, data.frame(Country = substr(names(data[[a]])[2],19,nchar(names(data[[a]])[2])-1), contact = num)) } } data2$Country <- as.character(data2$Country) contact$Country <- as.character(contact$Country) data3 <- data2%>% left_join(contact,by='Country') ggplot()+ geom_point(aes(x=data3$confirm_ratio,y=data3$contact))+ geom_text(aes(x=data3$confirm_ratio,y=data3$contact,label=data3$Country)) for (a in c(1:length(data))){ data[[a]][,2] <- as.character(data[[a]][,2]) data[[a]][,3] <- as.character(data[[a]][,3]) data[[a]][,4] <- as.character(data[[a]][,4]) data[[a]][,5] <- as.character(data[[a]][,5]) data[[a]][,6] <- as.character(data[[a]][,6]) } for (a in c(1:length(data))){ data[[a]][,2] <- as.numeric(data[[a]][,2]) data[[a]][,3] <- as.numeric(data[[a]][,3]) data[[a]][,4] <- as.numeric(data[[a]][,4]) data[[a]][,5] <- as.numeric(data[[a]][,5]) data[[a]][,6] <- as.numeric(data[[a]][,6]) } R <- data.frame() dataset <- data.frame() for (a in c(3,5)) { data[[a]][,2] <- ifelse(is.na(data[[a]][,2]),0,data[[a]][,2]) data[[a]][,3] <- ifelse(is.na(data[[a]][,3]),0,data[[a]][,3]) data[[a]][,4] <- ifelse(is.na(data[[a]][,4]),0,data[[a]][,4]) data[[a]][,5] <- ifelse(is.na(data[[a]][,5]),0,data[[a]][,5]) data[[a]][,6] <- ifelse(is.na(data[[a]][,6]),0,data[[a]][,6]) a1 <- sum(data[[a]][c(1:91),2])/nrow(data1) a2 <- sum(data[[a]][c(1:91),3])/nrow(data1) a3 <- sum(data[[a]][c(1:91),4])/nrow(data1) a4 <- sum(data[[a]][c(1:91),5])/nrow(data1) a5 <- sum(data[[a]][c(1:91),6])/nrow(data1) a6 <- sum(a2,a3,a5) if (a1 >0 & a6 >0) {Baseline <- a6/a1 } else {Baseline <- 0} data2 <- data.frame() for (i in c(98:nrow(data[[a]]))){ c <- sum(data[[a]][c((i-6):i),3])/7 d <- sum(data[[a]][c((i-6):i),4])/7 e <- sum(data[[a]][c((i-6):i),6])/7 colmean <- sum(c,d,e) colmean2 <- sum(data[[a]][c((i-6):i),2])/7 result <- cbind(as.character(data[[a]][i,1]),colmean2,colmean) data2 <- rbind(data2,result) } data2[,2] <- as.numeric(as.character(data2[,2])) data2[,3] <- as.numeric(as.character(data2[,3])) names(data2)[3] <- names(data[[a]])[2] x <- c() y <- data.frame() for (i in 1:nrow(data2)) { #b <- sum(data2[i,3],data2[i,4],data2[i,6]) if (data2[i,2]>0 & data2[i,3] >0) { R <- data2[i,3]/data2[i,2] } else {R <- 0} R_Real <- R-Baseline x <- c(x,R_Real) z <- cbind(as.character(data2[,1],R_Real)) y <- rbind(y,z) } R <- rbind(R,y) data2[,1]<-as.Date(data2[,1]) #is_chn = F plot <- ggplot() + geom_line(aes(x=data2[,1], y = x), size = 1) + #geom_smooth(method = glm)+ #lha_theme(is_chn)+ ylab('R-Index')+ xlab('Date')+ ggtitle(substr(names(data2)[3],13,nchar(names(data2)[3])-1))+ scale_x_date(date_labels = '%b-%d') ggsave(plot,filename = paste('/Users/qidiwang1/Desktop/Test/',substr(names(data2)[3],13,nchar(names(data2)[3])-1),'.png')) } View(R) View(data[[3]]) a1 <- sum(data[[3]][c(1:91),2])/91 a2 <- sum(data[[3]][c(1:91),3])/91 a3 <- sum(data[[3]][c(1:91),4])/91 a4 <- sum(data[[3]][c(1:91),5])/91 a5 <- sum(data[[3]][c(1:91),6])/91 sum(a2,a3,a5)/a1 a <- sum(data[[3]][c(92:98),3])/7 a for (i in c(98:186)){ a <- sum(data[[3]][c(i-6:i),3])/7 b <- sum(data[[3]][c(i-6:i),4])/7 c <- sum(data[[3]][c(i-6:i),6])/7 } colmean <- sum(a,b,c) colmean2 <- sum(data[[a]][c(i-6:i),2])/7 result <- cbind(as.character(data[[a]][i,1]),colmean2,colmean) data2 <- rbind(data2,result) }
/10月24号为止的r code/批量google trend数据处理(无bug版).R
no_license
qidiwang1/Rstudio
R
false
false
5,084
r
filenames <- list.files('/Users/qidiwang1/Desktop/Death Map', pattern = '*.csv$', full.names = TRUE) my_data <- lapply(filenames, read.csv, skip = 2) data <- my_data for (a in c(1:length(data))){ if (nrow(data[[a]])>0) for (i in c(1:nrow(data[[a]]))){ for (j in c(1:ncol(data[[a]]))){ if (data[[a]][i,j] == '<1') data[[a]][i,j] <- NA } } } library(zoo) vaccine <- data.frame() for (a in c(1:length(data))){ if (nrow(data[[a]])==0){ print('NO') } else{ bbb <- data.frame(Date = data[[a]]$Day,Country = substr(names(data[[a]])[2],17,nchar(names(data[[a]])[2])-1), vaccine = data[[a]][2]) names(bbb)[3] <- 'vaccine' vaccine <- rbind(vaccine, bbb) } } write.csv(vaccine,'/Users/qidiwang1/Desktop/vaccine.csv') contact <- data.frame() for (a in c(1:length(data))){ if (nrow(data[[a]])==0){ print('NO') } else{ bbb <- data.frame(Date = data[[a]]$Day,Country = substr(names(data[[a]])[2],19,nchar(names(data[[a]])[2])-1), contact = data[[a]][2]) names(bbb)[3] <- 'contact' contact <- rbind(contact, bbb) } } write.csv(contact,'/Users/qidiwang1/Desktop/contact.csv') View(contact) colMeans(data[[a]][2] contact <- data.frame() for (a in c(1:length(data))){ if (nrow(data[[a]])==0){ print('NO') } else{ aaa <- rollmean(data[[a]][2], k = 7, align = 'right',fill = NA) num = as.numeric(tail(aaa,1)/max(aaa,na.rm = TRUE)) contact <- rbind(contact, data.frame(Country = substr(names(data[[a]])[2],19,nchar(names(data[[a]])[2])-1), contact = num)) } } data2$Country <- as.character(data2$Country) contact$Country <- as.character(contact$Country) data3 <- data2%>% left_join(contact,by='Country') ggplot()+ geom_point(aes(x=data3$confirm_ratio,y=data3$contact))+ geom_text(aes(x=data3$confirm_ratio,y=data3$contact,label=data3$Country)) for (a in c(1:length(data))){ data[[a]][,2] <- as.character(data[[a]][,2]) data[[a]][,3] <- as.character(data[[a]][,3]) data[[a]][,4] <- as.character(data[[a]][,4]) data[[a]][,5] <- as.character(data[[a]][,5]) data[[a]][,6] <- as.character(data[[a]][,6]) } for (a in c(1:length(data))){ data[[a]][,2] <- as.numeric(data[[a]][,2]) data[[a]][,3] <- as.numeric(data[[a]][,3]) data[[a]][,4] <- as.numeric(data[[a]][,4]) data[[a]][,5] <- as.numeric(data[[a]][,5]) data[[a]][,6] <- as.numeric(data[[a]][,6]) } R <- data.frame() dataset <- data.frame() for (a in c(3,5)) { data[[a]][,2] <- ifelse(is.na(data[[a]][,2]),0,data[[a]][,2]) data[[a]][,3] <- ifelse(is.na(data[[a]][,3]),0,data[[a]][,3]) data[[a]][,4] <- ifelse(is.na(data[[a]][,4]),0,data[[a]][,4]) data[[a]][,5] <- ifelse(is.na(data[[a]][,5]),0,data[[a]][,5]) data[[a]][,6] <- ifelse(is.na(data[[a]][,6]),0,data[[a]][,6]) a1 <- sum(data[[a]][c(1:91),2])/nrow(data1) a2 <- sum(data[[a]][c(1:91),3])/nrow(data1) a3 <- sum(data[[a]][c(1:91),4])/nrow(data1) a4 <- sum(data[[a]][c(1:91),5])/nrow(data1) a5 <- sum(data[[a]][c(1:91),6])/nrow(data1) a6 <- sum(a2,a3,a5) if (a1 >0 & a6 >0) {Baseline <- a6/a1 } else {Baseline <- 0} data2 <- data.frame() for (i in c(98:nrow(data[[a]]))){ c <- sum(data[[a]][c((i-6):i),3])/7 d <- sum(data[[a]][c((i-6):i),4])/7 e <- sum(data[[a]][c((i-6):i),6])/7 colmean <- sum(c,d,e) colmean2 <- sum(data[[a]][c((i-6):i),2])/7 result <- cbind(as.character(data[[a]][i,1]),colmean2,colmean) data2 <- rbind(data2,result) } data2[,2] <- as.numeric(as.character(data2[,2])) data2[,3] <- as.numeric(as.character(data2[,3])) names(data2)[3] <- names(data[[a]])[2] x <- c() y <- data.frame() for (i in 1:nrow(data2)) { #b <- sum(data2[i,3],data2[i,4],data2[i,6]) if (data2[i,2]>0 & data2[i,3] >0) { R <- data2[i,3]/data2[i,2] } else {R <- 0} R_Real <- R-Baseline x <- c(x,R_Real) z <- cbind(as.character(data2[,1],R_Real)) y <- rbind(y,z) } R <- rbind(R,y) data2[,1]<-as.Date(data2[,1]) #is_chn = F plot <- ggplot() + geom_line(aes(x=data2[,1], y = x), size = 1) + #geom_smooth(method = glm)+ #lha_theme(is_chn)+ ylab('R-Index')+ xlab('Date')+ ggtitle(substr(names(data2)[3],13,nchar(names(data2)[3])-1))+ scale_x_date(date_labels = '%b-%d') ggsave(plot,filename = paste('/Users/qidiwang1/Desktop/Test/',substr(names(data2)[3],13,nchar(names(data2)[3])-1),'.png')) } View(R) View(data[[3]]) a1 <- sum(data[[3]][c(1:91),2])/91 a2 <- sum(data[[3]][c(1:91),3])/91 a3 <- sum(data[[3]][c(1:91),4])/91 a4 <- sum(data[[3]][c(1:91),5])/91 a5 <- sum(data[[3]][c(1:91),6])/91 sum(a2,a3,a5)/a1 a <- sum(data[[3]][c(92:98),3])/7 a for (i in c(98:186)){ a <- sum(data[[3]][c(i-6:i),3])/7 b <- sum(data[[3]][c(i-6:i),4])/7 c <- sum(data[[3]][c(i-6:i),6])/7 } colmean <- sum(a,b,c) colmean2 <- sum(data[[a]][c(i-6:i),2])/7 result <- cbind(as.character(data[[a]][i,1]),colmean2,colmean) data2 <- rbind(data2,result) }
library(mockery) library(rlang) library(magrittr) # helper function to mock the httr environment mock_ods_get_organisations <- function(..., status_code = c(200)) { env <- parent.frame() env$sample_data <- tibble::tribble( ~name, ~org_id, ~status, "A", 1, "active", "B", 2, "inactive" ) mock_httr(..., status_code = status_code, headers = list("x-total-count" = nrow(env$sample_data)), content = list(Organisations = env$sample_data), env = env) } # arguments ---- test_that("name input must be a single string or NA", { mock_ods_get_organisations({ expect_error({ ods_get_organisations(name = 1) }, "^name argument must be of type string$") expect_error({ ods_get_organisations(name = TRUE) }, "^name argument must be of type string$") expect_error({ ods_get_organisations(name = c("1", "2")) }, "^name argument must be a single value$") expect_error({ ods_get_organisations(name = "") }, "^name argument must not be an empty string$") expect_equal(ods_get_organisations(name = "a"), sample_data) }) }) test_that("name argument gets appended to the URL", { m <- mock_ods_get_organisations({ ods_get_organisations(name = "test name") }) expect_called(m$GET, 1) expect_call(m$GET, 1, httr::GET(url)) expect_args(m$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1000&Name=test%20name")) }) test_that("post_code argument must be a valid value", { mock_ods_get_organisations({ expect_error({ ods_get_organisations(post_code = 1) }, "^post_code argument must be of type string$") expect_error({ ods_get_organisations(post_code = "AA") }, "^post_code argument must be at least a district, e.g. AA1$") expect_error({ ods_get_organisations(post_code = c("AA1", "AA2")) }, "^post_code argument must be a single value$") expect_error({ ods_get_organisations(post_code = "") }, "^post_code argument must not be an empty string$") expect_equal(ods_get_organisations(post_code = "AA1"), sample_data) }) }) test_that("post_code argument gets appended to the URL", { m <- mock_ods_get_organisations({ ods_get_organisations(post_code = "AA1") }) expect_called(m$GET, 1) expect_call(m$GET, 1, httr::GET(url)) expect_args(m$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1000&PostCode=AA1")) }) test_that("last_change_date argument must be a valid value", { mock_ods_get_organisations({ expect_error({ ods_get_organisations(last_change_date = "2020-01-01") }, "^last_change_date argument must be of type date$") expect_error({ ods_get_organisations(last_change_date = 20200101) }, "^last_change_date argument must be of type date$") expect_error({ ods_get_organisations(last_change_date = c(as.Date("2020-01-01"), as.Date("2020-01-02"))) }, "^last_change_date argument must be a single value$") expect_equal(ods_get_organisations(last_change_date = as.Date("2020-01-01")), sample_data) }) }) test_that("last_change_date argument gets appended to the URL", { m <- mock_ods_get_organisations({ ods_get_organisations(last_change_date = as.Date("2020-01-01")) }) expect_called(m$GET, 1) expect_call(m$GET, 1, httr::GET(url)) expect_args(m$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1000&LastChangeDate=2020-01-01")) }) test_that("status argument must be a valid value", { mock_ods_get_organisations({ expect_error({ ods_get_organisations(status = "") }) expect_error({ ods_get_organisations(status = "other") }) expect_error({ ods_get_organisations(status = 1) }) expect_error({ ods_get_organisations(status = c("active", "inactive")) }, "^'arg' must be of length 1$") expect_equal(ods_get_organisations(status = "active"), sample_data) expect_equal(ods_get_organisations(status = "inactive"), sample_data) }) }) test_that("status argument gets appended to the URL", { m <- mock_ods_get_organisations({ ods_get_organisations(status = "active") }) expect_called(m$GET, 1) expect_call(m$GET, 1, httr::GET(url)) expect_args(m$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1000&Status=active")) }) test_that("primary_role_id argument must be a valid value", { mock_ods_get_organisations({ expect_error({ ods_get_organisations(primary_role_id = 1) }, "^primary_role_id argument must be of type string$") expect_error({ ods_get_organisations(primary_role_id = c("a", "b")) }, "^primary_role_id argument must be a single value$") expect_error({ ods_get_organisations(primary_role_id = "") }, "^primary_role_id argument must not be an empty string$") expect_equal(ods_get_organisations(primary_role_id = "abc"), sample_data) }) }) test_that("primary_role_id argument gets appended to the URL", { m <- mock_ods_get_organisations({ ods_get_organisations(primary_role_id = "abc") }) expect_called(m$GET, 1) expect_call(m$GET, 1, httr::GET(url)) expect_args(m$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1000&PrimaryRoleId=abc")) }) test_that("non_primary_role_id argument must be a valid value", { mock_ods_get_organisations({ expect_error({ ods_get_organisations(non_primary_role_id = 1) }, "^non_primary_role_id argument must be of type string$") expect_error({ ods_get_organisations(non_primary_role_id = c("a", "b")) }, "^non_primary_role_id argument must be a single value$") expect_error({ ods_get_organisations(non_primary_role_id = "") }, "^non_primary_role_id argument must not be an empty string$") expect_equal(ods_get_organisations(non_primary_role_id = "abc"), sample_data) }) }) test_that("non_primary_role_id argument gets appended to the URL", { m <- mock_ods_get_organisations({ ods_get_organisations(non_primary_role_id = "abc") }) expect_called(m$GET, 1) expect_call(m$GET, 1, httr::GET(url)) expect_args(m$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1000&NonPrimaryRoleId=abc")) }) test_that("it defaults to NHS trust if no arguments are passed", { m <- mock_ods_get_organisations({ expect_warning({ ods_get_organisations() }, "^No arguments specified: defaulting to Primary Role == NHS TRUST$") }) expect_called(m$GET, 1) expect_call(m$GET, 1, httr::GET(url)) expect_args(m$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1000&PrimaryRoleId=RO197")) }) # test we call the rest of the httr stuff correctly ---- test_that("it calls httr::content", { m <- mock_ods_get_organisations(ods_get_organisations(name = "A")) expect_called(m$content, 1) expect_call(m$content, 1, httr::content(res)) expect_args(m$content, 1, "data") }) test_that("it calls httr:status_code and it stops if the API call fails", { m <- mock_ods_get_organisations(status_code = c(200, 201, 400), { ods_get_organisations(name = "A") expect_error(ods_get_organisations(name = "A")) expect_error(ods_get_organisations(name = "A")) }) expect_called(m$status_code, 3) for(i in 1:3) { expect_call(m$status_code, i, httr::status_code(res)) expect_args(m$status_code, i, "data") } }) test_that("it pages if there are more than LIMIT results", { # LIMIT is set to 1000, the current max supported by the API sample_data <- tibble::tribble( ~name, ~org_id, ~status, "A", 1, "active", "B", 2, "inactive" ) %>% dplyr::mutate(rep = list(1:750)) %>% tidyr::unnest_longer(col = rep) m <- list( "GET" = mock("data", cycle = TRUE), "status_code" = mock(200, cycle = TRUE), "headers" = mock( list("x-total-count" = nrow(sample_data)) ), "content" = mock( list("Organisations" = sample_data %>% dplyr::filter(rep <= 500)), list("Organisations" = sample_data %>% dplyr::filter(rep > 500)) ) ) do.call(with_mock, c(m, .env = "httr", expr({ ods_get_organisations(name = "A") }))) expect_called(m$GET, 2) expect_called(m$status_code, 2) expect_called(m$headers, 1) expect_called(m$content, 2) }) test_that("it allows you to change the limit", { m <- mock(1, 25, 50) m_httr <- mock_ods_get_organisations({ with_mock("ods_api_results_limit" = m, ods_get_organisations(name = "A"), ods_get_organisations(name = "A"), ods_get_organisations(name = "A")) }) expect_called(m, 3) expect_called(m_httr$GET, 3) expect_args(m_httr$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1&Name=A")) expect_args(m_httr$GET, 2, paste0(ODS_API_ENDPOINT, "organisations?Limit=25&Name=A")) expect_args(m_httr$GET, 3, paste0(ODS_API_ENDPOINT, "organisations?Limit=50&Name=A")) }) test_that("it fails if during paging there is a status other than 200", { # LIMIT is set to 1000, the current max supported by the API sample_data <- tibble::tribble( ~name, ~org_id, ~status, "A", 1, "active", "B", 2, "inactive" ) %>% dplyr::mutate(rep = list(1:750)) %>% tidyr::unnest_longer(col = rep) m <- list( "GET" = mock("data", cycle = TRUE), "status_code" = mock(200, 400, cycle = TRUE), "headers" = mock( list("x-total-count" = nrow(sample_data)) ), "content" = mock( list("Organisations" = sample_data %>% dplyr::filter(rep <= 500)), list("Organisations" = sample_data %>% dplyr::filter(rep > 500)) ) ) do.call(with_mock, c(m, .env = "httr", expr({ expect_error( ods_get_organisations(name = "A") ) }))) expect_called(m$GET, 2) expect_called(m$status_code, 2) expect_called(m$headers, 1) expect_called(m$content, 1) }) test_that("it calls janitor::clean_names", { m <- mock() with_mock("clean_names" = m, .env = "janitor", mock_ods_get_organisations({ ods_get_organisations(name = "a") })) expect_called(m, 1) expect_call(m, 1, janitor::clean_names(organisations)) expect_args(m, 1, sample_data) }) # test helper functions ---- test_that("ods_get_trusts calls ods_get_organisations correctly", { expect_equal(1, 1) m <- mock() with_mock(ods_get_organisations = m, { ods_get_trusts() }) expect_called(m, 1) expect_call(m, 1, ods_get_organisations(name, post_code, last_change_date, status, primary_role_id = "RO197", non_primary_role_id, org_record_class)) }) test_that("ods_get_trust_sites calls ods_get_organisations correctly", { m <- mock() with_mock(ods_get_organisations = m, { ods_get_trust_sites() }) expect_called(m, 1) expect_call(m, 1, ods_get_organisations(name, post_code, last_change_date, status, primary_role_id = "RO198", non_primary_role_id, org_record_class)) }) test_that("ods_get_ccgs calls ods_get_organisations correctly", { m <- mock() with_mock(ods_get_organisations = m, { ods_get_ccgs() }) expect_called(m, 1) expect_call(m, 1, ods_get_organisations(name, post_code, last_change_date, status, primary_role_id = "RO98", non_primary_role_id, org_record_class)) })
/tests/testthat/test-ods_get_organisations.R
permissive
blackwellrd/NHSRtools
R
false
false
12,354
r
library(mockery) library(rlang) library(magrittr) # helper function to mock the httr environment mock_ods_get_organisations <- function(..., status_code = c(200)) { env <- parent.frame() env$sample_data <- tibble::tribble( ~name, ~org_id, ~status, "A", 1, "active", "B", 2, "inactive" ) mock_httr(..., status_code = status_code, headers = list("x-total-count" = nrow(env$sample_data)), content = list(Organisations = env$sample_data), env = env) } # arguments ---- test_that("name input must be a single string or NA", { mock_ods_get_organisations({ expect_error({ ods_get_organisations(name = 1) }, "^name argument must be of type string$") expect_error({ ods_get_organisations(name = TRUE) }, "^name argument must be of type string$") expect_error({ ods_get_organisations(name = c("1", "2")) }, "^name argument must be a single value$") expect_error({ ods_get_organisations(name = "") }, "^name argument must not be an empty string$") expect_equal(ods_get_organisations(name = "a"), sample_data) }) }) test_that("name argument gets appended to the URL", { m <- mock_ods_get_organisations({ ods_get_organisations(name = "test name") }) expect_called(m$GET, 1) expect_call(m$GET, 1, httr::GET(url)) expect_args(m$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1000&Name=test%20name")) }) test_that("post_code argument must be a valid value", { mock_ods_get_organisations({ expect_error({ ods_get_organisations(post_code = 1) }, "^post_code argument must be of type string$") expect_error({ ods_get_organisations(post_code = "AA") }, "^post_code argument must be at least a district, e.g. AA1$") expect_error({ ods_get_organisations(post_code = c("AA1", "AA2")) }, "^post_code argument must be a single value$") expect_error({ ods_get_organisations(post_code = "") }, "^post_code argument must not be an empty string$") expect_equal(ods_get_organisations(post_code = "AA1"), sample_data) }) }) test_that("post_code argument gets appended to the URL", { m <- mock_ods_get_organisations({ ods_get_organisations(post_code = "AA1") }) expect_called(m$GET, 1) expect_call(m$GET, 1, httr::GET(url)) expect_args(m$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1000&PostCode=AA1")) }) test_that("last_change_date argument must be a valid value", { mock_ods_get_organisations({ expect_error({ ods_get_organisations(last_change_date = "2020-01-01") }, "^last_change_date argument must be of type date$") expect_error({ ods_get_organisations(last_change_date = 20200101) }, "^last_change_date argument must be of type date$") expect_error({ ods_get_organisations(last_change_date = c(as.Date("2020-01-01"), as.Date("2020-01-02"))) }, "^last_change_date argument must be a single value$") expect_equal(ods_get_organisations(last_change_date = as.Date("2020-01-01")), sample_data) }) }) test_that("last_change_date argument gets appended to the URL", { m <- mock_ods_get_organisations({ ods_get_organisations(last_change_date = as.Date("2020-01-01")) }) expect_called(m$GET, 1) expect_call(m$GET, 1, httr::GET(url)) expect_args(m$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1000&LastChangeDate=2020-01-01")) }) test_that("status argument must be a valid value", { mock_ods_get_organisations({ expect_error({ ods_get_organisations(status = "") }) expect_error({ ods_get_organisations(status = "other") }) expect_error({ ods_get_organisations(status = 1) }) expect_error({ ods_get_organisations(status = c("active", "inactive")) }, "^'arg' must be of length 1$") expect_equal(ods_get_organisations(status = "active"), sample_data) expect_equal(ods_get_organisations(status = "inactive"), sample_data) }) }) test_that("status argument gets appended to the URL", { m <- mock_ods_get_organisations({ ods_get_organisations(status = "active") }) expect_called(m$GET, 1) expect_call(m$GET, 1, httr::GET(url)) expect_args(m$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1000&Status=active")) }) test_that("primary_role_id argument must be a valid value", { mock_ods_get_organisations({ expect_error({ ods_get_organisations(primary_role_id = 1) }, "^primary_role_id argument must be of type string$") expect_error({ ods_get_organisations(primary_role_id = c("a", "b")) }, "^primary_role_id argument must be a single value$") expect_error({ ods_get_organisations(primary_role_id = "") }, "^primary_role_id argument must not be an empty string$") expect_equal(ods_get_organisations(primary_role_id = "abc"), sample_data) }) }) test_that("primary_role_id argument gets appended to the URL", { m <- mock_ods_get_organisations({ ods_get_organisations(primary_role_id = "abc") }) expect_called(m$GET, 1) expect_call(m$GET, 1, httr::GET(url)) expect_args(m$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1000&PrimaryRoleId=abc")) }) test_that("non_primary_role_id argument must be a valid value", { mock_ods_get_organisations({ expect_error({ ods_get_organisations(non_primary_role_id = 1) }, "^non_primary_role_id argument must be of type string$") expect_error({ ods_get_organisations(non_primary_role_id = c("a", "b")) }, "^non_primary_role_id argument must be a single value$") expect_error({ ods_get_organisations(non_primary_role_id = "") }, "^non_primary_role_id argument must not be an empty string$") expect_equal(ods_get_organisations(non_primary_role_id = "abc"), sample_data) }) }) test_that("non_primary_role_id argument gets appended to the URL", { m <- mock_ods_get_organisations({ ods_get_organisations(non_primary_role_id = "abc") }) expect_called(m$GET, 1) expect_call(m$GET, 1, httr::GET(url)) expect_args(m$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1000&NonPrimaryRoleId=abc")) }) test_that("it defaults to NHS trust if no arguments are passed", { m <- mock_ods_get_organisations({ expect_warning({ ods_get_organisations() }, "^No arguments specified: defaulting to Primary Role == NHS TRUST$") }) expect_called(m$GET, 1) expect_call(m$GET, 1, httr::GET(url)) expect_args(m$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1000&PrimaryRoleId=RO197")) }) # test we call the rest of the httr stuff correctly ---- test_that("it calls httr::content", { m <- mock_ods_get_organisations(ods_get_organisations(name = "A")) expect_called(m$content, 1) expect_call(m$content, 1, httr::content(res)) expect_args(m$content, 1, "data") }) test_that("it calls httr:status_code and it stops if the API call fails", { m <- mock_ods_get_organisations(status_code = c(200, 201, 400), { ods_get_organisations(name = "A") expect_error(ods_get_organisations(name = "A")) expect_error(ods_get_organisations(name = "A")) }) expect_called(m$status_code, 3) for(i in 1:3) { expect_call(m$status_code, i, httr::status_code(res)) expect_args(m$status_code, i, "data") } }) test_that("it pages if there are more than LIMIT results", { # LIMIT is set to 1000, the current max supported by the API sample_data <- tibble::tribble( ~name, ~org_id, ~status, "A", 1, "active", "B", 2, "inactive" ) %>% dplyr::mutate(rep = list(1:750)) %>% tidyr::unnest_longer(col = rep) m <- list( "GET" = mock("data", cycle = TRUE), "status_code" = mock(200, cycle = TRUE), "headers" = mock( list("x-total-count" = nrow(sample_data)) ), "content" = mock( list("Organisations" = sample_data %>% dplyr::filter(rep <= 500)), list("Organisations" = sample_data %>% dplyr::filter(rep > 500)) ) ) do.call(with_mock, c(m, .env = "httr", expr({ ods_get_organisations(name = "A") }))) expect_called(m$GET, 2) expect_called(m$status_code, 2) expect_called(m$headers, 1) expect_called(m$content, 2) }) test_that("it allows you to change the limit", { m <- mock(1, 25, 50) m_httr <- mock_ods_get_organisations({ with_mock("ods_api_results_limit" = m, ods_get_organisations(name = "A"), ods_get_organisations(name = "A"), ods_get_organisations(name = "A")) }) expect_called(m, 3) expect_called(m_httr$GET, 3) expect_args(m_httr$GET, 1, paste0(ODS_API_ENDPOINT, "organisations?Limit=1&Name=A")) expect_args(m_httr$GET, 2, paste0(ODS_API_ENDPOINT, "organisations?Limit=25&Name=A")) expect_args(m_httr$GET, 3, paste0(ODS_API_ENDPOINT, "organisations?Limit=50&Name=A")) }) test_that("it fails if during paging there is a status other than 200", { # LIMIT is set to 1000, the current max supported by the API sample_data <- tibble::tribble( ~name, ~org_id, ~status, "A", 1, "active", "B", 2, "inactive" ) %>% dplyr::mutate(rep = list(1:750)) %>% tidyr::unnest_longer(col = rep) m <- list( "GET" = mock("data", cycle = TRUE), "status_code" = mock(200, 400, cycle = TRUE), "headers" = mock( list("x-total-count" = nrow(sample_data)) ), "content" = mock( list("Organisations" = sample_data %>% dplyr::filter(rep <= 500)), list("Organisations" = sample_data %>% dplyr::filter(rep > 500)) ) ) do.call(with_mock, c(m, .env = "httr", expr({ expect_error( ods_get_organisations(name = "A") ) }))) expect_called(m$GET, 2) expect_called(m$status_code, 2) expect_called(m$headers, 1) expect_called(m$content, 1) }) test_that("it calls janitor::clean_names", { m <- mock() with_mock("clean_names" = m, .env = "janitor", mock_ods_get_organisations({ ods_get_organisations(name = "a") })) expect_called(m, 1) expect_call(m, 1, janitor::clean_names(organisations)) expect_args(m, 1, sample_data) }) # test helper functions ---- test_that("ods_get_trusts calls ods_get_organisations correctly", { expect_equal(1, 1) m <- mock() with_mock(ods_get_organisations = m, { ods_get_trusts() }) expect_called(m, 1) expect_call(m, 1, ods_get_organisations(name, post_code, last_change_date, status, primary_role_id = "RO197", non_primary_role_id, org_record_class)) }) test_that("ods_get_trust_sites calls ods_get_organisations correctly", { m <- mock() with_mock(ods_get_organisations = m, { ods_get_trust_sites() }) expect_called(m, 1) expect_call(m, 1, ods_get_organisations(name, post_code, last_change_date, status, primary_role_id = "RO198", non_primary_role_id, org_record_class)) }) test_that("ods_get_ccgs calls ods_get_organisations correctly", { m <- mock() with_mock(ods_get_organisations = m, { ods_get_ccgs() }) expect_called(m, 1) expect_call(m, 1, ods_get_organisations(name, post_code, last_change_date, status, primary_role_id = "RO98", non_primary_role_id, org_record_class)) })
#' @keywords internal #' @export #' @rdname devtools-deprecated revdep_email <- function(pkg = ".", date, version, author = getOption("devtools.name"), draft = TRUE, unsent = NULL, template = "revdep/email.md", only_problems = TRUE) { .Deprecated("revdepcheck::revdep_email()", package = "devtools") pkg <- as.package(pkg) force(date) force(version) if (is.null(author)) { stop("Please supply `author`", call. = FALSE) } if (is.null(unsent)) { results <- readRDS(revdep_check_path(pkg))$results } else { results <- unsent } if (only_problems) { results <- Filter(has_problems, results) } if (length(results) == 0) { message("No emails to send") return(invisible()) } template_path <- file.path(pkg$path, template) if (!file.exists(template_path)) { stop("`", template, "` does not exist", call. = FALSE) } template <- readLines(template_path) maintainers <- vapply(results, function(x) x$maintainer, character(1)) orphaned <- grepl("ORPHAN", maintainers) if (any(orphaned)) { orphans <- paste(names(results)[orphaned], collapse = ", ") message("Dropping ", sum(orphaned), " orphaned packages: ", orphans) results <- results[!orphaned] maintainers <- maintainers[!orphaned] } gh <- github_info(pkg$path) data <- lapply(results, maintainer_data, pkg = pkg, version = version, gh = gh, date = date, author = author) bodies <- lapply(data, whisker::whisker.render, template = template) subjects <- lapply(data, function(x) { paste0(x$your_package, " and " , x$my_package, " ", x$my_version, " release") }) emails <- Map(maintainer_email, maintainers, bodies, subjects) message("Testing first email") send_email(emails[[1]], draft = TRUE) if (yesno("Did first draft email look ok?")) return(invisible()) sent <- vapply(emails, send_email, draft = draft, FUN.VALUE = logical(1)) if (all(sent)) { message("All emails successfully sent") } else { message(sum(!sent), " failed. Call again with unsent = .Last.value") } results <- results[!sent] invisible(results) } send_email <- function(email, draft = TRUE) { send <- if (draft) gmailr::create_draft else gmailr::send_message msg <- if (draft) "Drafting" else "Sending" tryCatch( { message(msg, ": ", gmailr::subject(email)) send(email) TRUE }, interrupt = function(e) { message("Aborted by user") invokeRestart("abort") }, error = function(e) { message("Failed") FALSE } ) } maintainer_data <- function(result, pkg, version, gh, date, author) { problems <- result$results summary <- indent(paste(trunc_middle(unlist(problems)), collapse = "\n\n")) list( your_package = result$package, your_version = result$version, your_summary = summarise_check_results(problems), your_results = summary, you_have_problems = length(unlist(problems)) > 0, you_cant_install = any(grepl("Rcheck/00install[.]out", problems$errors)), me = author, date = date, my_package = pkg$package, my_version = version, my_github = gh$fullname ) } maintainer_email <- function(to, body, subject) { gmailr::mime(To = to, Subject = subject, body = body) }
/R/revdep-email.R
no_license
jimhester/devtools
R
false
false
3,414
r
#' @keywords internal #' @export #' @rdname devtools-deprecated revdep_email <- function(pkg = ".", date, version, author = getOption("devtools.name"), draft = TRUE, unsent = NULL, template = "revdep/email.md", only_problems = TRUE) { .Deprecated("revdepcheck::revdep_email()", package = "devtools") pkg <- as.package(pkg) force(date) force(version) if (is.null(author)) { stop("Please supply `author`", call. = FALSE) } if (is.null(unsent)) { results <- readRDS(revdep_check_path(pkg))$results } else { results <- unsent } if (only_problems) { results <- Filter(has_problems, results) } if (length(results) == 0) { message("No emails to send") return(invisible()) } template_path <- file.path(pkg$path, template) if (!file.exists(template_path)) { stop("`", template, "` does not exist", call. = FALSE) } template <- readLines(template_path) maintainers <- vapply(results, function(x) x$maintainer, character(1)) orphaned <- grepl("ORPHAN", maintainers) if (any(orphaned)) { orphans <- paste(names(results)[orphaned], collapse = ", ") message("Dropping ", sum(orphaned), " orphaned packages: ", orphans) results <- results[!orphaned] maintainers <- maintainers[!orphaned] } gh <- github_info(pkg$path) data <- lapply(results, maintainer_data, pkg = pkg, version = version, gh = gh, date = date, author = author) bodies <- lapply(data, whisker::whisker.render, template = template) subjects <- lapply(data, function(x) { paste0(x$your_package, " and " , x$my_package, " ", x$my_version, " release") }) emails <- Map(maintainer_email, maintainers, bodies, subjects) message("Testing first email") send_email(emails[[1]], draft = TRUE) if (yesno("Did first draft email look ok?")) return(invisible()) sent <- vapply(emails, send_email, draft = draft, FUN.VALUE = logical(1)) if (all(sent)) { message("All emails successfully sent") } else { message(sum(!sent), " failed. Call again with unsent = .Last.value") } results <- results[!sent] invisible(results) } send_email <- function(email, draft = TRUE) { send <- if (draft) gmailr::create_draft else gmailr::send_message msg <- if (draft) "Drafting" else "Sending" tryCatch( { message(msg, ": ", gmailr::subject(email)) send(email) TRUE }, interrupt = function(e) { message("Aborted by user") invokeRestart("abort") }, error = function(e) { message("Failed") FALSE } ) } maintainer_data <- function(result, pkg, version, gh, date, author) { problems <- result$results summary <- indent(paste(trunc_middle(unlist(problems)), collapse = "\n\n")) list( your_package = result$package, your_version = result$version, your_summary = summarise_check_results(problems), your_results = summary, you_have_problems = length(unlist(problems)) > 0, you_cant_install = any(grepl("Rcheck/00install[.]out", problems$errors)), me = author, date = date, my_package = pkg$package, my_version = version, my_github = gh$fullname ) } maintainer_email <- function(to, body, subject) { gmailr::mime(To = to, Subject = subject, body = body) }
JamPlot.col <- function(x) { ifelse(x < 7, x, x + 1) ## Avoid hard-to-see yellow... } JamPlot.pch <- function(x) { x - 1 ## Start with squares... } JamPlot.by <- function(dframe, xkey, ykey, dykey = NULL, bykey, byval = NULL, overlay = FALSE, type = "b", xlab = NULL, ylab = NULL, col = NULL, pch = NULL, cex = NULL, lty = 1, lwd = 1, legend.loc = "topleft", legend.text = NULL, ...) { if (is.null(byval)) byval <- sort(unique(dframe[,bykey])) if (is.null(xlab)) xlab <- xkey if (is.null(ylab)) ylab <- ykey if (is.null(col)) col <- 1:length(byval) if (length(pch) == 1) pch <- rep(pch, length(byval)) if (is.null(pch)) pch <- 0:(length(byval) - 1) if (is.null(cex)) cex <- rep(1, length(pch)) + ifelse(pch == 18, 0.35, 0.0) if (is.null(legend.text)) legend.text <- byval dframe <- dframe[order(dframe[,bykey], dframe[,xkey]),] if (!overlay) { par(las = 1) plot(dframe[,xkey], dframe[,ykey], type = "n", xlab = xlab, ylab = ylab, ...) } for (k in seq_along(byval)) { keep <- which(dframe[,bykey] == byval[k]) x <- dframe[keep, xkey] y <- dframe[keep, ykey] if (type == "p") { points(x, y, type = "p", col = col[k], pch = pch[k], cex = cex[k]) } else if (type == "l") { lines(x, y, type = "l", col = col[k], lty = lty, lwd = lwd) } else { lines( x, y, type = "l", col = col[k], lty = lty, lwd = lwd) points(x, y, type = "p", col = col[k], pch = pch[k], cex = cex[k]) } if (!is.null(dykey)) { dy <- dframe[keep, dykey] JamPlot.err(x, y, dy, col = col[k]) } } if (!is.null(legend.loc)) legend(legend.loc, bty = "n", legend = legend.text, col = col, pch = pch) } JamPlot.err <- function(x, y, dx, dy, ...) { stopifnot(length(y) == length(x)) stopifnot(length(y) == length(dy)) if (length(dx) == 1) dx <- rep(dx, length(x)) for (k in seq_along(y)) { lines(c(x[k], x[k]), c(y[k] - dy[k], y[k] + dy[k]), ...) lines(c(x[k] - dx[k], x[k] + dx[k]), c(y[k] - dy[k], y[k] - dy[k]), ...) lines(c(x[k] - dx[k], x[k] + dx[k]), c(y[k] + dy[k], y[k] + dy[k]), ...) } } JamPlot.logAxis <- function(side, tick.power, tick.labels, ...) { .jamPlot.logAxis(side, tick.power, tick.labels, outer = FALSE, ...) } JamPlot.logX <- function(xlim, ylim, xlab, ylab, tick.power = NULL, tick.labels = TRUE, outer = FALSE, ...) { if (is.null(tick.power)) tick.power <- .jamPlot.imputeTickPowers(xlim) plot(xlim, ylim, xlab = xlab, ylab = ylab, log = "x", type = "n", axes = FALSE, xlim = .jamPlot.logAxisLimit(tick.power), ylim = ylim, ...) .jamPlot.logAxis(1, tick.power, tick.labels, outer, ...) axis(2) box() } JamPlot.logY <- function(xlim, ylim, xlab, ylab, tick.power = NULL, tick.labels = TRUE, outer = FALSE, x.axis = TRUE, ...) { if (is.null(tick.power)) tick.power <- .jamPlot.imputeTickPowers(ylim) if (!x.axis) xlab <- "" plot(xlim, ylim, log = "y", type = "n", axes = FALSE, ylim = .jamPlot.logAxisLimit(tick.power), xlab = xlab, ylab = ylab, ...) if (x.axis) axis(1) .jamPlot.logAxis(2, tick.power, tick.labels, outer) box() } JamPlot.logXY <- function(xlim, ylim, xlab, ylab, x.tick.power = NULL, x.tick.labels = TRUE, y.tick.power = NULL, y.tick.labels = TRUE, outer = FALSE, ...) { if (is.null(x.tick.power)) x.tick.power <- .jamPlot.imputeTickPowers(xlim) if (is.null(y.tick.power)) y.tick.power <- .jamPlot.imputeTickPowers(ylim) plot(xlim, ylim, log = "xy", type = "n", axes = FALSE, xlim = .jamPlot.logAxisLimit(x.tick.power), ylim = .jamPlot.logAxisLimit(y.tick.power), xlab = xlab, ylab = ylab, ...) .jamPlot.logAxis(1, x.tick.power, x.tick.labels, outer) .jamPlot.logAxis(2, y.tick.power, y.tick.labels, outer) box() } .jamPlot.logAxis <- function(side, tick.power, tick.labels, outer, ...) { if (isTRUE(tick.labels)) labels <- .jamPlot.tickLabels(tick.power) else labels <- tick.labels axis(side, at = .jamPlot.tickValues(tick.power), labels = labels, outer = outer, ...) } .jamPlot.logAxisLimit <- function(tick.power) { c(10 ^ min(tick.power), 10 ^ max(tick.power)) } .jamPlot.imputeTickPowers <- function(x) { minpow <- floor(log10(min(x))) maxpow <- ceiling(log10(max(x))) minpow:maxpow } .jamPlot.tickLabels <- function(tick.power) { eval(parse(text = sprintf("expression(%s)", paste(sprintf("10 ^ %d", tick.power), collapse = ", ")))) } .jamPlot.tickValues <- function(tick.power) { 10 ^ tick.power } JamPlot.scatter <- function(dframe, xkey, ykey, bykey, byval = NULL, col = NULL, pch = NULL, legend.loc = "topleft", legend.text = NULL, ...) { if (is.null(byval)) byval <- sort(unique(dframe[,bykey])) if (is.null(col)) col <- 1:length(byval) if (is.null(pch)) pch <- 0:(length(byval) - 1) if (is.null(legend.text)) legend.text <- byval par(las = 1) plot(dframe[,xkey], dframe[,ykey], type = "n", ...) for (k in seq_along(byval)) { keep <- which(dframe[,bykey] == byval[k]) x <- dframe[keep, xkey] y <- dframe[keep, ykey] points(x, y, col = col[k], pch = pch[k]) } if (!is.null(legend.loc)) legend(legend.loc, bty = "n", legend = legend.text, col = col, pch = pch) } JamPlot.arrows <- function(x, y, col, pch, space, length = 0.08, angle = 30) { stopifnot(length(x) == length(y)) points(x, y, type = "p", col = col, pch = pch) for (k in 2:length(x)) { x0 <- x[k - 1] y0 <- y[k - 1] x1 <- x[k] y1 <- y[k] dx <- x1 - x0 dy <- y1 - y0 gamma <- space / abs(dx) if (gamma < 0.5) { x0 <- x0 + gamma * dx x1 <- x1 - gamma * dx y0 <- y0 + gamma * dy y1 <- y1 - gamma * dy arrows(x0, y0, x1, y1, length = length, angle = angle, col = col) } } } JamPlot.loglogline <- function(xdata, ydata, xline, engine = lm, lty = 1, col = 1, ...) { lmobj <- engine(log(ydata) ~ log(xdata), ...) yline <- exp(lmobj$coeff[1]) * (xline ^ lmobj$coeff[2]) lines(xline, yline, col = col, lty = lty) lmobj }
/plot/JamPlot.R
permissive
tipplerow/jamr
R
false
false
6,484
r
JamPlot.col <- function(x) { ifelse(x < 7, x, x + 1) ## Avoid hard-to-see yellow... } JamPlot.pch <- function(x) { x - 1 ## Start with squares... } JamPlot.by <- function(dframe, xkey, ykey, dykey = NULL, bykey, byval = NULL, overlay = FALSE, type = "b", xlab = NULL, ylab = NULL, col = NULL, pch = NULL, cex = NULL, lty = 1, lwd = 1, legend.loc = "topleft", legend.text = NULL, ...) { if (is.null(byval)) byval <- sort(unique(dframe[,bykey])) if (is.null(xlab)) xlab <- xkey if (is.null(ylab)) ylab <- ykey if (is.null(col)) col <- 1:length(byval) if (length(pch) == 1) pch <- rep(pch, length(byval)) if (is.null(pch)) pch <- 0:(length(byval) - 1) if (is.null(cex)) cex <- rep(1, length(pch)) + ifelse(pch == 18, 0.35, 0.0) if (is.null(legend.text)) legend.text <- byval dframe <- dframe[order(dframe[,bykey], dframe[,xkey]),] if (!overlay) { par(las = 1) plot(dframe[,xkey], dframe[,ykey], type = "n", xlab = xlab, ylab = ylab, ...) } for (k in seq_along(byval)) { keep <- which(dframe[,bykey] == byval[k]) x <- dframe[keep, xkey] y <- dframe[keep, ykey] if (type == "p") { points(x, y, type = "p", col = col[k], pch = pch[k], cex = cex[k]) } else if (type == "l") { lines(x, y, type = "l", col = col[k], lty = lty, lwd = lwd) } else { lines( x, y, type = "l", col = col[k], lty = lty, lwd = lwd) points(x, y, type = "p", col = col[k], pch = pch[k], cex = cex[k]) } if (!is.null(dykey)) { dy <- dframe[keep, dykey] JamPlot.err(x, y, dy, col = col[k]) } } if (!is.null(legend.loc)) legend(legend.loc, bty = "n", legend = legend.text, col = col, pch = pch) } JamPlot.err <- function(x, y, dx, dy, ...) { stopifnot(length(y) == length(x)) stopifnot(length(y) == length(dy)) if (length(dx) == 1) dx <- rep(dx, length(x)) for (k in seq_along(y)) { lines(c(x[k], x[k]), c(y[k] - dy[k], y[k] + dy[k]), ...) lines(c(x[k] - dx[k], x[k] + dx[k]), c(y[k] - dy[k], y[k] - dy[k]), ...) lines(c(x[k] - dx[k], x[k] + dx[k]), c(y[k] + dy[k], y[k] + dy[k]), ...) } } JamPlot.logAxis <- function(side, tick.power, tick.labels, ...) { .jamPlot.logAxis(side, tick.power, tick.labels, outer = FALSE, ...) } JamPlot.logX <- function(xlim, ylim, xlab, ylab, tick.power = NULL, tick.labels = TRUE, outer = FALSE, ...) { if (is.null(tick.power)) tick.power <- .jamPlot.imputeTickPowers(xlim) plot(xlim, ylim, xlab = xlab, ylab = ylab, log = "x", type = "n", axes = FALSE, xlim = .jamPlot.logAxisLimit(tick.power), ylim = ylim, ...) .jamPlot.logAxis(1, tick.power, tick.labels, outer, ...) axis(2) box() } JamPlot.logY <- function(xlim, ylim, xlab, ylab, tick.power = NULL, tick.labels = TRUE, outer = FALSE, x.axis = TRUE, ...) { if (is.null(tick.power)) tick.power <- .jamPlot.imputeTickPowers(ylim) if (!x.axis) xlab <- "" plot(xlim, ylim, log = "y", type = "n", axes = FALSE, ylim = .jamPlot.logAxisLimit(tick.power), xlab = xlab, ylab = ylab, ...) if (x.axis) axis(1) .jamPlot.logAxis(2, tick.power, tick.labels, outer) box() } JamPlot.logXY <- function(xlim, ylim, xlab, ylab, x.tick.power = NULL, x.tick.labels = TRUE, y.tick.power = NULL, y.tick.labels = TRUE, outer = FALSE, ...) { if (is.null(x.tick.power)) x.tick.power <- .jamPlot.imputeTickPowers(xlim) if (is.null(y.tick.power)) y.tick.power <- .jamPlot.imputeTickPowers(ylim) plot(xlim, ylim, log = "xy", type = "n", axes = FALSE, xlim = .jamPlot.logAxisLimit(x.tick.power), ylim = .jamPlot.logAxisLimit(y.tick.power), xlab = xlab, ylab = ylab, ...) .jamPlot.logAxis(1, x.tick.power, x.tick.labels, outer) .jamPlot.logAxis(2, y.tick.power, y.tick.labels, outer) box() } .jamPlot.logAxis <- function(side, tick.power, tick.labels, outer, ...) { if (isTRUE(tick.labels)) labels <- .jamPlot.tickLabels(tick.power) else labels <- tick.labels axis(side, at = .jamPlot.tickValues(tick.power), labels = labels, outer = outer, ...) } .jamPlot.logAxisLimit <- function(tick.power) { c(10 ^ min(tick.power), 10 ^ max(tick.power)) } .jamPlot.imputeTickPowers <- function(x) { minpow <- floor(log10(min(x))) maxpow <- ceiling(log10(max(x))) minpow:maxpow } .jamPlot.tickLabels <- function(tick.power) { eval(parse(text = sprintf("expression(%s)", paste(sprintf("10 ^ %d", tick.power), collapse = ", ")))) } .jamPlot.tickValues <- function(tick.power) { 10 ^ tick.power } JamPlot.scatter <- function(dframe, xkey, ykey, bykey, byval = NULL, col = NULL, pch = NULL, legend.loc = "topleft", legend.text = NULL, ...) { if (is.null(byval)) byval <- sort(unique(dframe[,bykey])) if (is.null(col)) col <- 1:length(byval) if (is.null(pch)) pch <- 0:(length(byval) - 1) if (is.null(legend.text)) legend.text <- byval par(las = 1) plot(dframe[,xkey], dframe[,ykey], type = "n", ...) for (k in seq_along(byval)) { keep <- which(dframe[,bykey] == byval[k]) x <- dframe[keep, xkey] y <- dframe[keep, ykey] points(x, y, col = col[k], pch = pch[k]) } if (!is.null(legend.loc)) legend(legend.loc, bty = "n", legend = legend.text, col = col, pch = pch) } JamPlot.arrows <- function(x, y, col, pch, space, length = 0.08, angle = 30) { stopifnot(length(x) == length(y)) points(x, y, type = "p", col = col, pch = pch) for (k in 2:length(x)) { x0 <- x[k - 1] y0 <- y[k - 1] x1 <- x[k] y1 <- y[k] dx <- x1 - x0 dy <- y1 - y0 gamma <- space / abs(dx) if (gamma < 0.5) { x0 <- x0 + gamma * dx x1 <- x1 - gamma * dx y0 <- y0 + gamma * dy y1 <- y1 - gamma * dy arrows(x0, y0, x1, y1, length = length, angle = angle, col = col) } } } JamPlot.loglogline <- function(xdata, ydata, xline, engine = lm, lty = 1, col = 1, ...) { lmobj <- engine(log(ydata) ~ log(xdata), ...) yline <- exp(lmobj$coeff[1]) * (xline ^ lmobj$coeff[2]) lines(xline, yline, col = col, lty = lty) lmobj }
#' @title `rtsdata` - Efficient Data Storage system for R Time Series. #' #' @description The `rtsdata` package simplifies the management of Time Series in R. This package #' overwrites the `getSymbols` function from `quantmod` package to allow for minimal changes #' to get started. The `rtsdata` package provides functionality to **download** and **store** historical time series. #' #' The **download** functionality will intelligently update historical data as needed. #' The incremental data is downloaded first to updated historical data. The full #' history is **only** downloaded if incremental data is not consistent. I.e. #' the last saved record is different from the first downloaded record. #' #' The following download plugins are currently available: #' * Yahoo Finance - based on `quantmod` package. #' * FRED - based on `quantmod` package. #' * Quandl - based on `Quandl` package. #' Quandl recommends getting an API key. #' Add following code options(Quandl.api_key = api_key) to your .Rprofile file. #' * AlphaVantage(av) - based on `quantmod` package. #' You need an API key from www.alphavantage.co. #' Add following code options(getSymbols.av.Default = api_key) to your .Rprofile file. #' * Tiingo - based on `quantmod` package #' You need an API key from api.tiingo.com. #' Add following code options(getSymbols.av.Default = api_key) to your .Rprofile file. #' #' The download functionality plugins are easily created. The user needs to provide a #' function to download historical data with ticker, start, and end dates parameters #' to create new download plugin. #' #' The **storage** functionality provides a consistent interface to store historical time series. #' The following storage plugins are currently available: #' * Rdata - store historical time series data in the Rdata files. #' * CSV - store historical time series data in the CSV files. The CSV storage is not #' efficient because CSV files will have to be parsed every time the data is loaded. #' The advantage of this format is ease of access to the stored historical data by external programs. #' For example the CSV files can be opened in Notepad or Excel. #' * MongoDB - store historical time series data in the MongoDB GridFS system. The MongoDB #' storage provides optional authentication. The MongoDB storage functionality is currently only #' available in the development version at bitbucket. #' #' The storage functionality plugins are easily created. The user needs to provide #' a functions to load and save data to create new storage plugin. #' #' @examples #' # small toy example #' #' # register data source to generate fake stock data for use in rtsdata examples #' register.data.source(src = 'sample', data = ds.getSymbol.fake.stock.data) #' #' # Full Update till '2018-02-13' #' data = getSymbols('test', src = 'sample', from = '2018-01-01', to = '2018-02-13', #' auto.assign=FALSE, verbose=TRUE) #' #' # No updated needed, data is loaded from file #' data = getSymbols('test', src = 'sample', from = '2018-01-01', to = '2018-02-13', #' auto.assign=FALSE, verbose=TRUE) #' #' # Incremental update from '2018-02-13' till today #' data = getSymbols('test', src = 'sample', from = '2018-01-01', #' auto.assign=FALSE, verbose=TRUE) #' #' # No updated needed, data is loaded from file #' data = getSymbols('test', src = 'sample', from = '2018-01-01', #' auto.assign=FALSE, verbose=TRUE) #' #' # data is stored in the 'sample_Rdata' folder at the following location #' ds.default.location() #' #' #' @import xts #' #' @name rtsdata #' @docType package #' NULL
/R/rtsdata.r
no_license
cran/rtsdata
R
false
false
3,713
r
#' @title `rtsdata` - Efficient Data Storage system for R Time Series. #' #' @description The `rtsdata` package simplifies the management of Time Series in R. This package #' overwrites the `getSymbols` function from `quantmod` package to allow for minimal changes #' to get started. The `rtsdata` package provides functionality to **download** and **store** historical time series. #' #' The **download** functionality will intelligently update historical data as needed. #' The incremental data is downloaded first to updated historical data. The full #' history is **only** downloaded if incremental data is not consistent. I.e. #' the last saved record is different from the first downloaded record. #' #' The following download plugins are currently available: #' * Yahoo Finance - based on `quantmod` package. #' * FRED - based on `quantmod` package. #' * Quandl - based on `Quandl` package. #' Quandl recommends getting an API key. #' Add following code options(Quandl.api_key = api_key) to your .Rprofile file. #' * AlphaVantage(av) - based on `quantmod` package. #' You need an API key from www.alphavantage.co. #' Add following code options(getSymbols.av.Default = api_key) to your .Rprofile file. #' * Tiingo - based on `quantmod` package #' You need an API key from api.tiingo.com. #' Add following code options(getSymbols.av.Default = api_key) to your .Rprofile file. #' #' The download functionality plugins are easily created. The user needs to provide a #' function to download historical data with ticker, start, and end dates parameters #' to create new download plugin. #' #' The **storage** functionality provides a consistent interface to store historical time series. #' The following storage plugins are currently available: #' * Rdata - store historical time series data in the Rdata files. #' * CSV - store historical time series data in the CSV files. The CSV storage is not #' efficient because CSV files will have to be parsed every time the data is loaded. #' The advantage of this format is ease of access to the stored historical data by external programs. #' For example the CSV files can be opened in Notepad or Excel. #' * MongoDB - store historical time series data in the MongoDB GridFS system. The MongoDB #' storage provides optional authentication. The MongoDB storage functionality is currently only #' available in the development version at bitbucket. #' #' The storage functionality plugins are easily created. The user needs to provide #' a functions to load and save data to create new storage plugin. #' #' @examples #' # small toy example #' #' # register data source to generate fake stock data for use in rtsdata examples #' register.data.source(src = 'sample', data = ds.getSymbol.fake.stock.data) #' #' # Full Update till '2018-02-13' #' data = getSymbols('test', src = 'sample', from = '2018-01-01', to = '2018-02-13', #' auto.assign=FALSE, verbose=TRUE) #' #' # No updated needed, data is loaded from file #' data = getSymbols('test', src = 'sample', from = '2018-01-01', to = '2018-02-13', #' auto.assign=FALSE, verbose=TRUE) #' #' # Incremental update from '2018-02-13' till today #' data = getSymbols('test', src = 'sample', from = '2018-01-01', #' auto.assign=FALSE, verbose=TRUE) #' #' # No updated needed, data is loaded from file #' data = getSymbols('test', src = 'sample', from = '2018-01-01', #' auto.assign=FALSE, verbose=TRUE) #' #' # data is stored in the 'sample_Rdata' folder at the following location #' ds.default.location() #' #' #' @import xts #' #' @name rtsdata #' @docType package #' NULL
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Test_DF_Correlations.R \name{Test_DF_Correlations} \alias{Test_DF_Correlations} \title{A Function estimate the correlation among all columns in a data frame, categorical and quantitative} \usage{ Test_DF_Correlations(df) } \arguments{ \item{df}{a data frame of data, can contain both numeric (quantitative) and factor (categorical) data} } \description{ This function estimates the correlation structure among all columns in a data frame } \examples{ Test_DF_Correlations() } \keyword{among} \keyword{analysis} \keyword{correlation} \keyword{factors}
/man/Test_DF_Correlations.Rd
no_license
hughesevoanth/NILC
R
false
true
629
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Test_DF_Correlations.R \name{Test_DF_Correlations} \alias{Test_DF_Correlations} \title{A Function estimate the correlation among all columns in a data frame, categorical and quantitative} \usage{ Test_DF_Correlations(df) } \arguments{ \item{df}{a data frame of data, can contain both numeric (quantitative) and factor (categorical) data} } \description{ This function estimates the correlation structure among all columns in a data frame } \examples{ Test_DF_Correlations() } \keyword{among} \keyword{analysis} \keyword{correlation} \keyword{factors}
complete <- function(directory, id = 1:332) { ## 'directory' is a character vector of length 1 indicating ## the location of the CSV files ## 'id' is an integer vector indicating the monitor ID numbers ## to be used coco = data.frame () ## Return a data frame of the form: ## id nobs ## 1 117 ## 2 1041 ## ... ## where 'id' is the monitor ID number and 'nobs' is the ## number of complete cases for (i in seq_along (id)) { data = read.csv (file.path (directory, sprintf ('%03d.csv', id[i]))) coco[i, "id"] = id[i] coco[i, "nobs"] = sum (complete.cases (data)) } return (coco) }
/project-1/complete.R
no_license
vobine/Coursera-R-Programming
R
false
false
631
r
complete <- function(directory, id = 1:332) { ## 'directory' is a character vector of length 1 indicating ## the location of the CSV files ## 'id' is an integer vector indicating the monitor ID numbers ## to be used coco = data.frame () ## Return a data frame of the form: ## id nobs ## 1 117 ## 2 1041 ## ... ## where 'id' is the monitor ID number and 'nobs' is the ## number of complete cases for (i in seq_along (id)) { data = read.csv (file.path (directory, sprintf ('%03d.csv', id[i]))) coco[i, "id"] = id[i] coco[i, "nobs"] = sum (complete.cases (data)) } return (coco) }
library(geojsonio) library(broom) library(ggplot2) library(dplyr) library(stringr) library(classInt) library(viridis) library(BAMMtools) library(rlang) library(tmap) spdf <- geojson_read("illinois_ethnicity.geojson", what = "sp") spdf$STATE_CODE <- as.character(spdf$STATE_CODE) map <- tm_shape(spdf) + tm_polygons("TOTALPOP",style = "quantile",palette = "BuPu",classes=7) + tm_layout(inner.margins = c(0.06, 0.10, 0.10, 0.08)) + tm_text("COUNTY_NAM",size=0.3) + tmap_save(filename="sag.jpg",dpi=300) spdf_fortified <- tidy(spdf, region = "STATE_CODE") spdf_fortified = spdf_fortified %>% left_join(. , spdf, by=c("id"="STATE_CODE"),`copy` = TRUE) oldcolnames <- names(spdf_fortified) # newcolnames <- oldcolnames %>% str_replace("Estimate..Total...","") %>% str_replace_all("\\.\\.\\.",", ") %>% str_replace_all("\\."," ") # spdf_fortified <- spdf_fortified %>% rename_at(vars(oldcolnames), ~ newcolnames) spdf_fortified[density_columns] <- spdf_fortified[oldcolnames] / spdf_fortified$TOTALPOP.x # spdf_fortified %>% select(contains("Estimate")| contains("TOTALPOP")) %>% mutate_at(vars(-TOTALPOP), funs(. / TOTALPOP * 100)) spdf_fortified_density <- spdf_fortified %>% select(contains("Estimate") | contains("TOTALPOP")) %>% mutate_at(vars(-TOTALPOP), funs(. / TOTALPOP * 100)) density_columns <- paste(oldcolnames,"...density") spdf_fortified %>% mutate() spdf_fortified_density <- spdf_fortified_density %>% rename_at(spdf_fortified_density, vars(oldcolnames), ~ density_columns) first <- 21 # second <- length(colnames) second <- first + 2 oldcolnames save <- FALSE for (col in oldcolnames[first:second]) { cat(col) col_rename <- str_replace(col,"Estimate..Total...","") %>% str_replace_all("\\.\\.\\.",", ") #%>% str_replace_all("\\."," ") cat(col_rename) # breaks <- getJenksBreaks(spdf[[col]],k=7) breaks <- classIntervals(spdf[[col]], n = 5, style = "kmeans")$brks spdf_fortified <- spdf_fortified %>% mutate(county_class = cut(!!parse_quo(col,env = caller_env()),breaks,include.lowest = T)) cat(spdf_fortified$county_class) cat(breaks) cat("\n") p <- ggplot() + geom_polygon(data = spdf_fortified, aes_string(fill = county_class, x = "long", y = "lat", group = "group") , size=0, alpha=0.9) + theme_void() + # scale_fill_viridis( breaks=breaks, # guide = guide_legend( keyheight = unit(2, units = "mm"), # keywidth=unit(6, units = "mm"), label.position = "bottom", # title.position = 'top', nrow=7), name=col_rename,trans="log" ) + scale_fill_brewer(palette="Greens") + # scale_fill_gradientn( # colours = terrain.colors(15), # breaks = seq(from = 0, to = 0.7, by = 0.05) # ) labs( title = col_rename, subtitle = "Data: ACS 2019", caption = "IRCDLN" ) + theme( text = element_text(color = "#22211d"), plot.background = element_rect(fill = "#f5f5f2", color = NA), # legend.title=element_text(col_rename) # panel.background = element_rect(fill = "#f5f5f2", color = NA), # legend.background = element_rect(fill = "#f5f5f2", color = NA), # plot.title = element_text(size= 10, hjust=-0.8, color = "#4e4d47", margin = margin(b = 0, t = 0.2, l = 1, unit = "cm")), # plot.subtitle = element_text(size= 10, hjust=-0.55, color = "#4e4d47", margin = margin(b = 0, t = 0.2, l = 1, unit = "cm")), # # legend.position = c(00.05, 0.03), ) + theme(plot.margin = unit(c(0,1,0.5,1), "cm")) + coord_map() if (save) { ggsave( paste(col_rename,".jpg"), plot = p, dpi = 300, limitsize = TRUE, ) } else { print(p) Sys.sleep(2) } }
/map making/choropleth.R
no_license
alireza116/irlcdn
R
false
false
3,756
r
library(geojsonio) library(broom) library(ggplot2) library(dplyr) library(stringr) library(classInt) library(viridis) library(BAMMtools) library(rlang) library(tmap) spdf <- geojson_read("illinois_ethnicity.geojson", what = "sp") spdf$STATE_CODE <- as.character(spdf$STATE_CODE) map <- tm_shape(spdf) + tm_polygons("TOTALPOP",style = "quantile",palette = "BuPu",classes=7) + tm_layout(inner.margins = c(0.06, 0.10, 0.10, 0.08)) + tm_text("COUNTY_NAM",size=0.3) + tmap_save(filename="sag.jpg",dpi=300) spdf_fortified <- tidy(spdf, region = "STATE_CODE") spdf_fortified = spdf_fortified %>% left_join(. , spdf, by=c("id"="STATE_CODE"),`copy` = TRUE) oldcolnames <- names(spdf_fortified) # newcolnames <- oldcolnames %>% str_replace("Estimate..Total...","") %>% str_replace_all("\\.\\.\\.",", ") %>% str_replace_all("\\."," ") # spdf_fortified <- spdf_fortified %>% rename_at(vars(oldcolnames), ~ newcolnames) spdf_fortified[density_columns] <- spdf_fortified[oldcolnames] / spdf_fortified$TOTALPOP.x # spdf_fortified %>% select(contains("Estimate")| contains("TOTALPOP")) %>% mutate_at(vars(-TOTALPOP), funs(. / TOTALPOP * 100)) spdf_fortified_density <- spdf_fortified %>% select(contains("Estimate") | contains("TOTALPOP")) %>% mutate_at(vars(-TOTALPOP), funs(. / TOTALPOP * 100)) density_columns <- paste(oldcolnames,"...density") spdf_fortified %>% mutate() spdf_fortified_density <- spdf_fortified_density %>% rename_at(spdf_fortified_density, vars(oldcolnames), ~ density_columns) first <- 21 # second <- length(colnames) second <- first + 2 oldcolnames save <- FALSE for (col in oldcolnames[first:second]) { cat(col) col_rename <- str_replace(col,"Estimate..Total...","") %>% str_replace_all("\\.\\.\\.",", ") #%>% str_replace_all("\\."," ") cat(col_rename) # breaks <- getJenksBreaks(spdf[[col]],k=7) breaks <- classIntervals(spdf[[col]], n = 5, style = "kmeans")$brks spdf_fortified <- spdf_fortified %>% mutate(county_class = cut(!!parse_quo(col,env = caller_env()),breaks,include.lowest = T)) cat(spdf_fortified$county_class) cat(breaks) cat("\n") p <- ggplot() + geom_polygon(data = spdf_fortified, aes_string(fill = county_class, x = "long", y = "lat", group = "group") , size=0, alpha=0.9) + theme_void() + # scale_fill_viridis( breaks=breaks, # guide = guide_legend( keyheight = unit(2, units = "mm"), # keywidth=unit(6, units = "mm"), label.position = "bottom", # title.position = 'top', nrow=7), name=col_rename,trans="log" ) + scale_fill_brewer(palette="Greens") + # scale_fill_gradientn( # colours = terrain.colors(15), # breaks = seq(from = 0, to = 0.7, by = 0.05) # ) labs( title = col_rename, subtitle = "Data: ACS 2019", caption = "IRCDLN" ) + theme( text = element_text(color = "#22211d"), plot.background = element_rect(fill = "#f5f5f2", color = NA), # legend.title=element_text(col_rename) # panel.background = element_rect(fill = "#f5f5f2", color = NA), # legend.background = element_rect(fill = "#f5f5f2", color = NA), # plot.title = element_text(size= 10, hjust=-0.8, color = "#4e4d47", margin = margin(b = 0, t = 0.2, l = 1, unit = "cm")), # plot.subtitle = element_text(size= 10, hjust=-0.55, color = "#4e4d47", margin = margin(b = 0, t = 0.2, l = 1, unit = "cm")), # # legend.position = c(00.05, 0.03), ) + theme(plot.margin = unit(c(0,1,0.5,1), "cm")) + coord_map() if (save) { ggsave( paste(col_rename,".jpg"), plot = p, dpi = 300, limitsize = TRUE, ) } else { print(p) Sys.sleep(2) } }
# Kernel-Funktionen # Zusammengesetzt aus Kosinus und 1/x #Soll-Parameter xp <- 5 # x-wert von Punkt 1 s <- -0.1 # Steigung von Punkt 1 # Hilfsfunktionen kernel.par <- function(xp,s,int){ #Teil 1 der Funktion x1 <- asin(-s) # x-wert mit Steigung s der cosinus-komponente y1 <- cos(x1) #Teil 2 der Funktion x2 <- (-1/s)^0.5 # x-wert mit Steigung s der 1/x-komponente y2 <- 1/x2 # Parameter zum Anpassen der Komponenten a <- y2-y1 b <- x2-x1 c <- x1/xp # int: scaling factor for intensity (result*int) # # Rückgabewerte zusammenfassen return(c(xp,a,b,c,int)) } edist <- function(x){sqrt((x[1] - x[3])^2 + ((x[2] - x[4])^2))} # euklidische Distanz #Kernelfunktionen gau1 <- function(x, sd){dnorm(edist(x), mean=0, sd=sd)} # euklidische Distanzen gewichtet mit normalverteilung des statischen kernels kernel1 <- function(x,kp){ # compound kernel using cos(x) and 1/x for weighting # the distance between points # Args: # x: distance between points # kp: vector made with kernel.par # Return: # y: weighted distance xp <- kp[1]*kp[4] a <- kp[2] b <- kp[3] c <- kp[4] int <- kp[5] x <- x*c if (x <= xp) {y <- cos(x) + a} if (x > xp) {y <- 1/(x + b)} y <- y*int return(y) } kernel1d <- function(x,kp){ # mit Distanzberechnung d <- edist(x) xp <- kp[1]*kp[4] a <- kp[2] b <- kp[3] c <- kp[4] int <- kp[5] x <- d*c if (d <= xp) {y <- cos(x) + a} if (d > xp) {y <- 1/(x + b)} y <- y*int return(y) } x=pdist[22,] x=edist(pdist[22,]) kernel1d(x=x, kp=kerpar) kernel1d(x=pdist[22,], kp=kerpar) #Plot kerpar <- kernel.par(xp,s) x <- seq(from=0, to=30000,by=1) y <- x for (i in 1:length(x)) { y[i] <- kernel1(x[i],kerpar) } plot(x,y) ## gnu plot #plot [-5:5] sin(x),cos(x),asin(x)
/scripts/kernel_v01.r
no_license
FFaupel/ArchaeoTools
R
false
false
1,932
r
# Kernel-Funktionen # Zusammengesetzt aus Kosinus und 1/x #Soll-Parameter xp <- 5 # x-wert von Punkt 1 s <- -0.1 # Steigung von Punkt 1 # Hilfsfunktionen kernel.par <- function(xp,s,int){ #Teil 1 der Funktion x1 <- asin(-s) # x-wert mit Steigung s der cosinus-komponente y1 <- cos(x1) #Teil 2 der Funktion x2 <- (-1/s)^0.5 # x-wert mit Steigung s der 1/x-komponente y2 <- 1/x2 # Parameter zum Anpassen der Komponenten a <- y2-y1 b <- x2-x1 c <- x1/xp # int: scaling factor for intensity (result*int) # # Rückgabewerte zusammenfassen return(c(xp,a,b,c,int)) } edist <- function(x){sqrt((x[1] - x[3])^2 + ((x[2] - x[4])^2))} # euklidische Distanz #Kernelfunktionen gau1 <- function(x, sd){dnorm(edist(x), mean=0, sd=sd)} # euklidische Distanzen gewichtet mit normalverteilung des statischen kernels kernel1 <- function(x,kp){ # compound kernel using cos(x) and 1/x for weighting # the distance between points # Args: # x: distance between points # kp: vector made with kernel.par # Return: # y: weighted distance xp <- kp[1]*kp[4] a <- kp[2] b <- kp[3] c <- kp[4] int <- kp[5] x <- x*c if (x <= xp) {y <- cos(x) + a} if (x > xp) {y <- 1/(x + b)} y <- y*int return(y) } kernel1d <- function(x,kp){ # mit Distanzberechnung d <- edist(x) xp <- kp[1]*kp[4] a <- kp[2] b <- kp[3] c <- kp[4] int <- kp[5] x <- d*c if (d <= xp) {y <- cos(x) + a} if (d > xp) {y <- 1/(x + b)} y <- y*int return(y) } x=pdist[22,] x=edist(pdist[22,]) kernel1d(x=x, kp=kerpar) kernel1d(x=pdist[22,], kp=kerpar) #Plot kerpar <- kernel.par(xp,s) x <- seq(from=0, to=30000,by=1) y <- x for (i in 1:length(x)) { y[i] <- kernel1(x[i],kerpar) } plot(x,y) ## gnu plot #plot [-5:5] sin(x),cos(x),asin(x)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Game of Codes.R \name{lexpectancy.gender} \alias{lexpectancy.gender} \title{Life expectancy by gender} \usage{ lexpectancy.gender(x) } \arguments{ \item{x}{Dummy: 0 for female and 1 for male} } \description{ Life expectancy by gender } \seealso{ \code{\link{mean}} which this function wraps }
/FinalProjects/Game.of.Codes/man/lexpectancy.gender.Rd
no_license
tyler-abbot/ProgrammingFall2016
R
false
true
372
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Game of Codes.R \name{lexpectancy.gender} \alias{lexpectancy.gender} \title{Life expectancy by gender} \usage{ lexpectancy.gender(x) } \arguments{ \item{x}{Dummy: 0 for female and 1 for male} } \description{ Life expectancy by gender } \seealso{ \code{\link{mean}} which this function wraps }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{get_eval_at} \alias{get_eval_at} \title{Get a list of x-coordinates to evaluate fit$func_y at} \usage{ get_eval_at(fit, facet_by) } \arguments{ \item{fit}{An mcpfit object.} \item{facet_by}{String. Name of a varying group. \code{facet_by} only applies for \code{type = "segments"}} } \description{ Solves two problems: if setting the number of points too high, the function becomes slow. If setting it too low, the posterior at large intercept- changes at change points look discrete, because they are evaluated at very few x in that interval. } \details{ This function makes a vector of x-values with large spacing in general, but finer resolution at change points. }
/man/get_eval_at.Rd
no_license
guhjy/mcp
R
false
true
761
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{get_eval_at} \alias{get_eval_at} \title{Get a list of x-coordinates to evaluate fit$func_y at} \usage{ get_eval_at(fit, facet_by) } \arguments{ \item{fit}{An mcpfit object.} \item{facet_by}{String. Name of a varying group. \code{facet_by} only applies for \code{type = "segments"}} } \description{ Solves two problems: if setting the number of points too high, the function becomes slow. If setting it too low, the posterior at large intercept- changes at change points look discrete, because they are evaluated at very few x in that interval. } \details{ This function makes a vector of x-values with large spacing in general, but finer resolution at change points. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/functions_SF4.R \name{plot_loc} \alias{plot_loc} \title{Plot hybridization data} \usage{ plot_loc(loc) } \arguments{ \item{loc}{sample location (bel [Belize]/ hon [Honduras]/ pan [Panama])} } \description{ \code{getPofZ} plot hybridization data of a location }
/man/plot_loc.Rd
permissive
k-hench/GenomicOriginsScripts
R
false
true
339
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/functions_SF4.R \name{plot_loc} \alias{plot_loc} \title{Plot hybridization data} \usage{ plot_loc(loc) } \arguments{ \item{loc}{sample location (bel [Belize]/ hon [Honduras]/ pan [Panama])} } \description{ \code{getPofZ} plot hybridization data of a location }
# Filtering out ling_data for complete cases ---------------------------------- ling_data <- ling_data[complete.cases(ling_data),] # Filtering out observations that had incorrect state names ------------------- ling_data <- ling_data %>% group_by(STATE) %>% filter(n() > 88) %>% ungroup() # Filtering ling_data --------------------------------------------------------- ling_data <- ling_data %>% filter(long > -130) # Preparing map_df for combining with ling_data ------------------------------- names(map_df) <- c("long", "lat", "group", "order", "state", "county") map_df$state <- state.abb[match(map_df$state, tolower(state.name))] map_df$county <- NULL # Preparing data for maternal grandmother plot -------------------------------- maternal_grandmother <- filter(ling_data, Q068 %in% c(1, 2, 3, 4, 7)) answers_grandmother <- all.ans[['68']] answers_grandmother$Q068 <- rownames(answers_grandmother) maternal_grandmother$Q068 <- as.character(maternal_grandmother$Q068) maternal_grandmother <- inner_join(maternal_grandmother, answers_grandmother, by="Q068") # Preparing data for maternal grandfather plot -------------------------------- maternal_grandfather <- filter(ling_data, Q070 %in% c(2, 3, 4, 6, 7)) answers_grandfather <- all.ans[['70']] answers_grandfather$Q070 <- rownames(answers_grandfather) maternal_grandfather$Q070 <- as.character(maternal_grandfather$Q070) maternal_grandfather <- inner_join(maternal_grandfather, answers_grandfather, by="Q070") # Standardizing ling_location ------------------------------------------------- scaled_location <- ling_location varnames <- c("Number.of.people.in.cell", "Latitude", "Longitude") # index vector of columns which must not be scaled index <- names(ling_location) %in% varnames # scale only the columns not in index temp <- scale(ling_location[, !index]) scaled_location[, !index] <- temp # K-Means --------------------------------------------------------------------- k_means <- kmeans(scaled_location, centers = 8) scaled_location$cluster <- k_means$cluster
/labs/lab2-3033134639/R/clean.R
no_license
tfaulk13/stat-215-a
R
false
false
2,051
r
# Filtering out ling_data for complete cases ---------------------------------- ling_data <- ling_data[complete.cases(ling_data),] # Filtering out observations that had incorrect state names ------------------- ling_data <- ling_data %>% group_by(STATE) %>% filter(n() > 88) %>% ungroup() # Filtering ling_data --------------------------------------------------------- ling_data <- ling_data %>% filter(long > -130) # Preparing map_df for combining with ling_data ------------------------------- names(map_df) <- c("long", "lat", "group", "order", "state", "county") map_df$state <- state.abb[match(map_df$state, tolower(state.name))] map_df$county <- NULL # Preparing data for maternal grandmother plot -------------------------------- maternal_grandmother <- filter(ling_data, Q068 %in% c(1, 2, 3, 4, 7)) answers_grandmother <- all.ans[['68']] answers_grandmother$Q068 <- rownames(answers_grandmother) maternal_grandmother$Q068 <- as.character(maternal_grandmother$Q068) maternal_grandmother <- inner_join(maternal_grandmother, answers_grandmother, by="Q068") # Preparing data for maternal grandfather plot -------------------------------- maternal_grandfather <- filter(ling_data, Q070 %in% c(2, 3, 4, 6, 7)) answers_grandfather <- all.ans[['70']] answers_grandfather$Q070 <- rownames(answers_grandfather) maternal_grandfather$Q070 <- as.character(maternal_grandfather$Q070) maternal_grandfather <- inner_join(maternal_grandfather, answers_grandfather, by="Q070") # Standardizing ling_location ------------------------------------------------- scaled_location <- ling_location varnames <- c("Number.of.people.in.cell", "Latitude", "Longitude") # index vector of columns which must not be scaled index <- names(ling_location) %in% varnames # scale only the columns not in index temp <- scale(ling_location[, !index]) scaled_location[, !index] <- temp # K-Means --------------------------------------------------------------------- k_means <- kmeans(scaled_location, centers = 8) scaled_location$cluster <- k_means$cluster
#Question 1_______________________________________ library(dplyr) library(caret) set.seed(1996) n <- 1000 p <- 10000 x <- matrix(rnorm(n*p), n, p) colnames(x) <- paste("x", 1:ncol(x), sep = "_") y <- rbinom(n, 1, 0.5) %>% factor() x_subset <- x[ ,sample(p, 100)] fit <- train(x_subset, y, method = "glm") fit$results #Question 2_______________________________________ #library(devtools) #devtools::install_bioc("genefilter") BiocManager::install("genefilter",version = "3.8") library(genefilter) library(dplyr) library(caret) tt <- colttests(x, y) head(tt) pvals <- tt$p.value #Question 3_______________________________________ ind <- (which(pvals <= 0.01)) length(ind) #Question 4_______________________________________ x_subset <- x[ ,ind] fit <- train(x_subset, y, method = "glm") fit$results #Question 5_______________________________________ fit <- train(x_subset, y, method = "knn", tuneGrid = data.frame(k = seq(101, 301, 25))) ggplot(fit) #Question 6_______________________________________ library(purrr) library(dslabs) library(caret) data("tissue_gene_expression") y <- tissue_gene_expression$y x <- tissue_gene_expression$x fit <- train(x, y, method = "knn", tuneGrid = data.frame(k = seq(1,7,2))) fit$results[order(fit$results$Accuracy)]
/exercises/comprehension-cross-validation.R
no_license
modishajay/HarvardX_data_science
R
false
false
1,266
r
#Question 1_______________________________________ library(dplyr) library(caret) set.seed(1996) n <- 1000 p <- 10000 x <- matrix(rnorm(n*p), n, p) colnames(x) <- paste("x", 1:ncol(x), sep = "_") y <- rbinom(n, 1, 0.5) %>% factor() x_subset <- x[ ,sample(p, 100)] fit <- train(x_subset, y, method = "glm") fit$results #Question 2_______________________________________ #library(devtools) #devtools::install_bioc("genefilter") BiocManager::install("genefilter",version = "3.8") library(genefilter) library(dplyr) library(caret) tt <- colttests(x, y) head(tt) pvals <- tt$p.value #Question 3_______________________________________ ind <- (which(pvals <= 0.01)) length(ind) #Question 4_______________________________________ x_subset <- x[ ,ind] fit <- train(x_subset, y, method = "glm") fit$results #Question 5_______________________________________ fit <- train(x_subset, y, method = "knn", tuneGrid = data.frame(k = seq(101, 301, 25))) ggplot(fit) #Question 6_______________________________________ library(purrr) library(dslabs) library(caret) data("tissue_gene_expression") y <- tissue_gene_expression$y x <- tissue_gene_expression$x fit <- train(x, y, method = "knn", tuneGrid = data.frame(k = seq(1,7,2))) fit$results[order(fit$results$Accuracy)]
diameter <- function(dpcellno) # diameter() - diameter in microns from dpcellno { diam <- (dpcellno + 48.281)/5.678 return(diam) }
/pre-papilla-cells/ppcell/R/diameter.R
no_license
nevillejackson/Fleece-biology
R
false
false
137
r
diameter <- function(dpcellno) # diameter() - diameter in microns from dpcellno { diam <- (dpcellno + 48.281)/5.678 return(diam) }
get_species_freq = function() { if (!is.null(input$file2)) { sp_read <- variables$sp_read df_freq <- count(sp_read, sp_read$sp) data.frame(Specie = df_freq$`sp_read$sp`, Freq = df_freq$n) } }
/SpeciesFreq.R
no_license
gustmtofoli/Coord
R
false
false
207
r
get_species_freq = function() { if (!is.null(input$file2)) { sp_read <- variables$sp_read df_freq <- count(sp_read, sp_read$sp) data.frame(Specie = df_freq$`sp_read$sp`, Freq = df_freq$n) } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/id_mutations.R \name{id_indels} \alias{id_indels} \title{ID Indel-based Amino Acid Changes} \usage{ id_indels(variant.calls, ref) } \arguments{ \item{variant.calls}{Data frame with cols POS, REF, ALT, AF, DP. Additional columns will be ignored.} \item{ref}{reference genome in "MixVir" format (genomic positions repeated for each associated feature they're associated with, etc.)} } \value{ Data frame with cols "POS", "REF_BASE", "GENE", "REF_CODON", "REF_AA", "GENE_AA_POS", "REF_IDENT", "REF", "ALT", "AF", "ALT_COUNT", "samp_codon", "samp_AA", "samp_identity", "DP" } \description{ Identify amino acid changes associaged with indel variation. Changes associated with SNVs are identified in separate function. Used by call_mutations function. } \examples{ id_indels() } \keyword{indel}
/MixviR_v1.2/man/id_indels.Rd
no_license
Niroshan23/MixviR
R
false
true
868
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/id_mutations.R \name{id_indels} \alias{id_indels} \title{ID Indel-based Amino Acid Changes} \usage{ id_indels(variant.calls, ref) } \arguments{ \item{variant.calls}{Data frame with cols POS, REF, ALT, AF, DP. Additional columns will be ignored.} \item{ref}{reference genome in "MixVir" format (genomic positions repeated for each associated feature they're associated with, etc.)} } \value{ Data frame with cols "POS", "REF_BASE", "GENE", "REF_CODON", "REF_AA", "GENE_AA_POS", "REF_IDENT", "REF", "ALT", "AF", "ALT_COUNT", "samp_codon", "samp_AA", "samp_identity", "DP" } \description{ Identify amino acid changes associaged with indel variation. Changes associated with SNVs are identified in separate function. Used by call_mutations function. } \examples{ id_indels() } \keyword{indel}
#' Ranks features using the Combinatorial Laplacian Score for 0- and 1-forms. #' #' Given a nerve or a clique complex and a set of features consisting of functions with support on #' the set of points underlying the complex, it asseses the significance of each feature #' in the simplicial complex by computing its scalar and vectorial Combinatorial Laplacian #' Score and comparing it with the null distribution that results from reshufling many times the values of #' the function across the point cloud. For nerve complexes, feature functions induce 0- and #' 1-forms in the complex by averaging the function across the points associated to 0- and 1-simplices #' respectively. For clique complexes, feature functions are directly 0-forms in the complex and 1-forms #' are obtained by averaging the function across the two vertices connected by each edge. #' #' The calculation of p-values can be optimized by iteratively doubling the number of samples of the #' null distribution until convergence is reached. Two version of this iteraction scheme are implemented. #' In the fist one, a p-value is considered convergent if there are at least 10 samples of the null #' distribution that do not exceed the associated Combinatorial Laplacian Score. In the second one, a p-value is considered #' convergent of the condition above holds, and, in case there are less than 10 small samples a generalized #' Pareto distribution (GPD) is used to approximate a p-value. A p-value obtained by a GPD is considered #' convergent if the relative variation is small in the last 3 iteractions and the quartiles #' of the approximation are relatively close. #' #' #' @param g2 an object of the class \code{simplicial} containing the nerve or clique complex or a list of #' such objects. #' @param f a numeric vector or matrix specifying one or more functions with support on #' the set of points whose significance will be assessed in the simplicial complex. Each #' column corresponds to a point and each row specifies a different function. #' @param num_perms number of permutations used to build the null distribution for each #' feature. When \code{optimize.p} is not \code{NULL} this is the maximum number of #' permutations. By default is set to 1000. #' @param seed integer specifying the seed used to initialize the generator of permutations. #' By default is set to 10. #' @param num_cores integer specifying the number of cores to be used in the computation. By #' default only one core is used. #' @param mc.preschedule when set to TRUE parallel compulations are prescheduled, see #' \link[parallel]{mclapply}. Only has effect if \code{num_cores} > 1 and the code is not being run on Windows. #' By default is set to TRUE. #' @param one_forms when set TRUE the Combinatorial Laplacian Score for 1-forms is #' also computed. By default is set to FALSE. #' @param weights when set to TRUE it takes 2-simplices into account when computing weights. #' By default is set to FALSE. #' @param covariates numeric vector or matrix specifying covariate functions to be samples in #' tandem with the functions in f. Each column correspond to a point and each row specifies a #' different covariate function. Is ignored when set to \code{NULL}. Default value is \code{NULL}. #' @param combination.method method used to combine p-values, can be "KM" for the Kost-McDermott method #' or "EBM" for the empirical Brown's method (Gibbs et. al. 16). Default value is "KM". Only has an #' effect if g2 is a list. #' @param optimize.p string indicating the type of optimization used for computing p-values. #' Must have value \code{NULL} for no optimization, \code{"perm"} for optimizing the calculation of #' p-values using only permutations, or \code{"gpd"} for using a permutations and GPD in optimizing p-value calculation. #' By default is set to \code{NULL}. #' @param min_perms minimum number of permutations to be used when computing p-values, only #' relevant when \code{optimize.p} is set to \code{"perm"} or \code{"gpd"}. By default is set to 100. #' @param pow positive number indicating the power to which the samples of the null distribution and the associated #' score are to be transformed before computing a GPD approximation (only used when #' \code{optimize.p} is set to \code{"gdp"}). #' @param nextremes vector of integers with the candidate number of extremes for fitting a GDP. #' Only used when \code{optimize.p} is set to \code{"gdp"}. By default is set to #' \code{c(seq(50, 250, 25), seq(300, 500, 50), seq(600, 1000, 100))}. #' @param alpha level of FDR control for choosing the number of extremes. Only used when #' \code{optimize.p} is set to \code{"gdp"}. By default is set to 0.15. #' #' @details When computing a p-value using a GPD, only null distribution samples in the first quartile are considered. #' The Combinatorial Laplacian Score and associated null distribution samples are transformed by the function #' \deqn{f(x) = (1 - (x - loc)/scale)^pow} #' where \eqn{loc} is the first quartile of the null distribution and \eqn{scale} is the first quartile minus #' the 5%-quantile. A number of extremes for fitting a GPD is chosen using the ForwardStop p-value adjustment, and #' quartiles for the p-value estimates are obtained by sampling GDP parameters form a multivariate normal distribution. #' #' @return When g2 is a simplicial complex, returns a data frame with the value of the Combinatorial Laplacian #' Score for 0- and 1-forms, the p-values, and the q-values computed using Benjamini-Hochberg procedure. #' If \code{optimize.p} is set to \code{"perm"} or \code{"gpd"} then then number of samples at which convergence #' of p-values was obtained is also returned. When g2 is a list, returns a list with Combinatorial Laplacian #' Scores, individual p-values, combined p-values and q-values. #' #' #' @examples #' # Example 1 #' library(RayleighSelection) #' gy <- nerve_complex(list(c(1,4,6,10), c(1,2,7), c(2,3,8), c(3,4,9,10), c(4,5))) #' rayleigh_selection(gy,t(as.data.frame(c(0,1,1,0,0,0,0,0,0,1))), one_forms = TRUE) #' #' #' # Example 2: MNIST dataset #' data("mnist") #' #' # Compute reduced representation using Laplacian eigenmap of pixels with high variance #' library(dimRed) #' leim <- LaplacianEigenmaps() #' mnist_top <- mnist[apply(mnist, 1, var) > 10000,] #' emb <- leim@fun(as(t(mnist_top), "dimRedData"), leim@stdpars) #' #' # Compute Mapper representation using the Laplacian eigenmap as an auxiliary function and correlation #' # distance as metric #' library(TDAmapper) #' mnist_distances <- (1.0 - cor(mnist_top)) #' m2 <- mapper2D(distance_matrix = mnist_distances, #' filter_values = list(emb@data@data[,1], emb@data@data[,2]), #' num_intervals = c(30,30), #' percent_overlap = 35, #' num_bins_when_clustering = 10); #' #' # Compute the nerve complex #' gg <- nerve_complex(m2$points_in_vertex) #' #' # Compute R score, p-value, and q-value for the pixels 301st to 305th #' rayleigh_selection(gg, mnist[301:305,], one_forms = TRUE) #' #' # Compute another mapper representation with different percent_overlap #' m2.2 <- mapper2D(distance_matrix = mnist_distances, #' filter_values = list(emb@data@data[,1], emb@data@data[,2]), #' num_intervals = c(30,30), #' percent_overlap = 50, #' num_bins_when_clustering = 10); #' #' # Compute the nerve complex and combine in list #' gg.list <- list(m2, nerve_complex(m2.2$points_in_vertex)) #' #' # Compute R scores, p-values and q-values #' rayleigh_selection(gg.list, mnist[301:305,], one_forms = TRUE) #' #' @export #' rayleigh_selection <- function(g2, f, num_perms = 1000, seed = 10, num_cores = 1, mc.preschedule = TRUE, one_forms = FALSE, weights = FALSE, covariates = NULL, combination.method = "KM", optimize.p = NULL, min_perms = 100, pow = 1, nextremes = c(seq(50, 250, 25), seq(300, 500, 50), seq(600, 1000, 100)), alpha = 0.15){ # Check class of f if (!is(f,'matrix') && !is(f,'Matrix')) { if (is(f,'numeric')) { f <- t(as.matrix(f)) } else { f <- as.matrix(f) } } # Check class of covariates if ( !is.null(covariates) && !is(covariates,'matrix') && !is(covariates,'Matrix')) { if (is(covariates ,'numeric')) { covariates <- t(as.matrix(covariates)) } else { covariates <- as.matrix(covariates) } } if(!is.null(optimize.p) && optimize.p != "perm" && optimize.p != "gpd"){ optimize.p <- NULL warning( "optimize.p must be either NULL, 'perm' or 'gpd'. Proceding with no p-value optimization." ) } if(!is(g2, "list")){ # case where g2 is a single complex if(max(unlist(g2$points_in_vertex)) > ncol(f)){ stop(sprintf("The simplicial complex has %d points and f is defined on %d points.", max(unlist(g2$points_in_vertex)), ncol(f))) } if(!is.null(covariates) && max(unlist(g2$points_in_vertex)) > ncol(covariates)){ stop(sprintf("The simplicial complex has %d points and covariates is defined on %d points.", max(unlist(g2$points_in_vertex)), ncol(covariates))) } lout <- combinatorial_laplacian(g2, one_forms, weights) scorer <- new(LaplacianScorer,lout, g2$points_in_vertex, g2$adjacency, one_forms) } if(is(g2, "list")){ # case where g2 is a list of complexes points_in_vertex.list <- lapply(g2, function(x) x$points_in_vertex) adjacency.list <- lapply(g2, function(x) x$adjacency) lout.list <- lapply(g2, combinatorial_laplacian, one_forms = one_forms, weights = weights) has.wrong.size <- unlist(lapply(points_in_vertex.list, function(x) max(unlist(x)) > ncol(f))) if(any(has.wrong.size)){ stop("Some simplicial complex has a different number of points to the number of rows of f") } scorer <- new(ScorerEnsemble, lout.list, points_in_vertex.list, adjacency.list, one_forms) } dims <- if(one_forms) c(0,1) else 0 # dimension to be considered out <- list() use.gpd <- !is.null(optimize.p) && optimize.p == "gpd" use.mclapply <- (Sys.info()['sysname'] != "Windows") && nrow(f) > 1 && num_cores > 1 if(is.null(optimize.p)) min_perms <- num_perms out <- NULL set.seed(seed) for(d in dims){ if(use.mclapply){ compute.out <- parallel::mclapply( asplit(f, 1), compute.p, scorer = scorer, dim = d, min.perm = min_perms, use.gpd = use.gpd, cov = covariates, max.perm = num_perms, n.cores = 1, combination.method = combination.method, pow = pow, nextremes = nextremes, alpha = alpha, mc.preschedule = mc.preschedule, mc.cores = num_cores ) compute.df <- do.call(rbind, compute.out) } else { compute.df <- compute.p( f, scorer = scorer, dim = d, min.perm = min_perms, use.gpd = use.gpd, cov = covariates, max.perm = num_perms, n.cores = num_cores, combination.method = combination.method, pow = pow, nextremes = nextremes, alpha = alpha ) } # Renaming columns and dropping unnecessary columns if(is(g2, "list")){ for(i in seq_along(g2)){ names(compute.df)[i] <- sprintf("R%d.%d", d, i) names(compute.df)[i + length(g2)] <- sprintf("p%d.%d", d, i) } names(compute.df)[2*length(g2)+2] <- sprintf("combined.p%d", d) compute.df[[sprintf("q%d", d)]] <- p.adjust(compute.df[ ,2*length(g2)+2], method = "BH") } else { names(compute.df)[1] <- sprintf("R%d", d) names(compute.df)[2] <- sprintf("p%d", d) compute.df[[sprintf("q%d", d)]] <- p.adjust(compute.df[ ,2], method = "BH") compute.df$combined.p <- NULL } if(is.null(optimize.p)){ compute.df$n.conv <- NULL } else { names(compute.df)[names(compute.df) == "n.conv"] <- sprintf("n%d.conv", d) } if(is.null(out)) out <- compute.df else out <- cbind(out, compute.df) } rownames(out) <- rownames(f) return(out) }
/R/rayleigh_selection.R
permissive
CamaraLab/RayleighSelection
R
false
false
12,250
r
#' Ranks features using the Combinatorial Laplacian Score for 0- and 1-forms. #' #' Given a nerve or a clique complex and a set of features consisting of functions with support on #' the set of points underlying the complex, it asseses the significance of each feature #' in the simplicial complex by computing its scalar and vectorial Combinatorial Laplacian #' Score and comparing it with the null distribution that results from reshufling many times the values of #' the function across the point cloud. For nerve complexes, feature functions induce 0- and #' 1-forms in the complex by averaging the function across the points associated to 0- and 1-simplices #' respectively. For clique complexes, feature functions are directly 0-forms in the complex and 1-forms #' are obtained by averaging the function across the two vertices connected by each edge. #' #' The calculation of p-values can be optimized by iteratively doubling the number of samples of the #' null distribution until convergence is reached. Two version of this iteraction scheme are implemented. #' In the fist one, a p-value is considered convergent if there are at least 10 samples of the null #' distribution that do not exceed the associated Combinatorial Laplacian Score. In the second one, a p-value is considered #' convergent of the condition above holds, and, in case there are less than 10 small samples a generalized #' Pareto distribution (GPD) is used to approximate a p-value. A p-value obtained by a GPD is considered #' convergent if the relative variation is small in the last 3 iteractions and the quartiles #' of the approximation are relatively close. #' #' #' @param g2 an object of the class \code{simplicial} containing the nerve or clique complex or a list of #' such objects. #' @param f a numeric vector or matrix specifying one or more functions with support on #' the set of points whose significance will be assessed in the simplicial complex. Each #' column corresponds to a point and each row specifies a different function. #' @param num_perms number of permutations used to build the null distribution for each #' feature. When \code{optimize.p} is not \code{NULL} this is the maximum number of #' permutations. By default is set to 1000. #' @param seed integer specifying the seed used to initialize the generator of permutations. #' By default is set to 10. #' @param num_cores integer specifying the number of cores to be used in the computation. By #' default only one core is used. #' @param mc.preschedule when set to TRUE parallel compulations are prescheduled, see #' \link[parallel]{mclapply}. Only has effect if \code{num_cores} > 1 and the code is not being run on Windows. #' By default is set to TRUE. #' @param one_forms when set TRUE the Combinatorial Laplacian Score for 1-forms is #' also computed. By default is set to FALSE. #' @param weights when set to TRUE it takes 2-simplices into account when computing weights. #' By default is set to FALSE. #' @param covariates numeric vector or matrix specifying covariate functions to be samples in #' tandem with the functions in f. Each column correspond to a point and each row specifies a #' different covariate function. Is ignored when set to \code{NULL}. Default value is \code{NULL}. #' @param combination.method method used to combine p-values, can be "KM" for the Kost-McDermott method #' or "EBM" for the empirical Brown's method (Gibbs et. al. 16). Default value is "KM". Only has an #' effect if g2 is a list. #' @param optimize.p string indicating the type of optimization used for computing p-values. #' Must have value \code{NULL} for no optimization, \code{"perm"} for optimizing the calculation of #' p-values using only permutations, or \code{"gpd"} for using a permutations and GPD in optimizing p-value calculation. #' By default is set to \code{NULL}. #' @param min_perms minimum number of permutations to be used when computing p-values, only #' relevant when \code{optimize.p} is set to \code{"perm"} or \code{"gpd"}. By default is set to 100. #' @param pow positive number indicating the power to which the samples of the null distribution and the associated #' score are to be transformed before computing a GPD approximation (only used when #' \code{optimize.p} is set to \code{"gdp"}). #' @param nextremes vector of integers with the candidate number of extremes for fitting a GDP. #' Only used when \code{optimize.p} is set to \code{"gdp"}. By default is set to #' \code{c(seq(50, 250, 25), seq(300, 500, 50), seq(600, 1000, 100))}. #' @param alpha level of FDR control for choosing the number of extremes. Only used when #' \code{optimize.p} is set to \code{"gdp"}. By default is set to 0.15. #' #' @details When computing a p-value using a GPD, only null distribution samples in the first quartile are considered. #' The Combinatorial Laplacian Score and associated null distribution samples are transformed by the function #' \deqn{f(x) = (1 - (x - loc)/scale)^pow} #' where \eqn{loc} is the first quartile of the null distribution and \eqn{scale} is the first quartile minus #' the 5%-quantile. A number of extremes for fitting a GPD is chosen using the ForwardStop p-value adjustment, and #' quartiles for the p-value estimates are obtained by sampling GDP parameters form a multivariate normal distribution. #' #' @return When g2 is a simplicial complex, returns a data frame with the value of the Combinatorial Laplacian #' Score for 0- and 1-forms, the p-values, and the q-values computed using Benjamini-Hochberg procedure. #' If \code{optimize.p} is set to \code{"perm"} or \code{"gpd"} then then number of samples at which convergence #' of p-values was obtained is also returned. When g2 is a list, returns a list with Combinatorial Laplacian #' Scores, individual p-values, combined p-values and q-values. #' #' #' @examples #' # Example 1 #' library(RayleighSelection) #' gy <- nerve_complex(list(c(1,4,6,10), c(1,2,7), c(2,3,8), c(3,4,9,10), c(4,5))) #' rayleigh_selection(gy,t(as.data.frame(c(0,1,1,0,0,0,0,0,0,1))), one_forms = TRUE) #' #' #' # Example 2: MNIST dataset #' data("mnist") #' #' # Compute reduced representation using Laplacian eigenmap of pixels with high variance #' library(dimRed) #' leim <- LaplacianEigenmaps() #' mnist_top <- mnist[apply(mnist, 1, var) > 10000,] #' emb <- leim@fun(as(t(mnist_top), "dimRedData"), leim@stdpars) #' #' # Compute Mapper representation using the Laplacian eigenmap as an auxiliary function and correlation #' # distance as metric #' library(TDAmapper) #' mnist_distances <- (1.0 - cor(mnist_top)) #' m2 <- mapper2D(distance_matrix = mnist_distances, #' filter_values = list(emb@data@data[,1], emb@data@data[,2]), #' num_intervals = c(30,30), #' percent_overlap = 35, #' num_bins_when_clustering = 10); #' #' # Compute the nerve complex #' gg <- nerve_complex(m2$points_in_vertex) #' #' # Compute R score, p-value, and q-value for the pixels 301st to 305th #' rayleigh_selection(gg, mnist[301:305,], one_forms = TRUE) #' #' # Compute another mapper representation with different percent_overlap #' m2.2 <- mapper2D(distance_matrix = mnist_distances, #' filter_values = list(emb@data@data[,1], emb@data@data[,2]), #' num_intervals = c(30,30), #' percent_overlap = 50, #' num_bins_when_clustering = 10); #' #' # Compute the nerve complex and combine in list #' gg.list <- list(m2, nerve_complex(m2.2$points_in_vertex)) #' #' # Compute R scores, p-values and q-values #' rayleigh_selection(gg.list, mnist[301:305,], one_forms = TRUE) #' #' @export #' rayleigh_selection <- function(g2, f, num_perms = 1000, seed = 10, num_cores = 1, mc.preschedule = TRUE, one_forms = FALSE, weights = FALSE, covariates = NULL, combination.method = "KM", optimize.p = NULL, min_perms = 100, pow = 1, nextremes = c(seq(50, 250, 25), seq(300, 500, 50), seq(600, 1000, 100)), alpha = 0.15){ # Check class of f if (!is(f,'matrix') && !is(f,'Matrix')) { if (is(f,'numeric')) { f <- t(as.matrix(f)) } else { f <- as.matrix(f) } } # Check class of covariates if ( !is.null(covariates) && !is(covariates,'matrix') && !is(covariates,'Matrix')) { if (is(covariates ,'numeric')) { covariates <- t(as.matrix(covariates)) } else { covariates <- as.matrix(covariates) } } if(!is.null(optimize.p) && optimize.p != "perm" && optimize.p != "gpd"){ optimize.p <- NULL warning( "optimize.p must be either NULL, 'perm' or 'gpd'. Proceding with no p-value optimization." ) } if(!is(g2, "list")){ # case where g2 is a single complex if(max(unlist(g2$points_in_vertex)) > ncol(f)){ stop(sprintf("The simplicial complex has %d points and f is defined on %d points.", max(unlist(g2$points_in_vertex)), ncol(f))) } if(!is.null(covariates) && max(unlist(g2$points_in_vertex)) > ncol(covariates)){ stop(sprintf("The simplicial complex has %d points and covariates is defined on %d points.", max(unlist(g2$points_in_vertex)), ncol(covariates))) } lout <- combinatorial_laplacian(g2, one_forms, weights) scorer <- new(LaplacianScorer,lout, g2$points_in_vertex, g2$adjacency, one_forms) } if(is(g2, "list")){ # case where g2 is a list of complexes points_in_vertex.list <- lapply(g2, function(x) x$points_in_vertex) adjacency.list <- lapply(g2, function(x) x$adjacency) lout.list <- lapply(g2, combinatorial_laplacian, one_forms = one_forms, weights = weights) has.wrong.size <- unlist(lapply(points_in_vertex.list, function(x) max(unlist(x)) > ncol(f))) if(any(has.wrong.size)){ stop("Some simplicial complex has a different number of points to the number of rows of f") } scorer <- new(ScorerEnsemble, lout.list, points_in_vertex.list, adjacency.list, one_forms) } dims <- if(one_forms) c(0,1) else 0 # dimension to be considered out <- list() use.gpd <- !is.null(optimize.p) && optimize.p == "gpd" use.mclapply <- (Sys.info()['sysname'] != "Windows") && nrow(f) > 1 && num_cores > 1 if(is.null(optimize.p)) min_perms <- num_perms out <- NULL set.seed(seed) for(d in dims){ if(use.mclapply){ compute.out <- parallel::mclapply( asplit(f, 1), compute.p, scorer = scorer, dim = d, min.perm = min_perms, use.gpd = use.gpd, cov = covariates, max.perm = num_perms, n.cores = 1, combination.method = combination.method, pow = pow, nextremes = nextremes, alpha = alpha, mc.preschedule = mc.preschedule, mc.cores = num_cores ) compute.df <- do.call(rbind, compute.out) } else { compute.df <- compute.p( f, scorer = scorer, dim = d, min.perm = min_perms, use.gpd = use.gpd, cov = covariates, max.perm = num_perms, n.cores = num_cores, combination.method = combination.method, pow = pow, nextremes = nextremes, alpha = alpha ) } # Renaming columns and dropping unnecessary columns if(is(g2, "list")){ for(i in seq_along(g2)){ names(compute.df)[i] <- sprintf("R%d.%d", d, i) names(compute.df)[i + length(g2)] <- sprintf("p%d.%d", d, i) } names(compute.df)[2*length(g2)+2] <- sprintf("combined.p%d", d) compute.df[[sprintf("q%d", d)]] <- p.adjust(compute.df[ ,2*length(g2)+2], method = "BH") } else { names(compute.df)[1] <- sprintf("R%d", d) names(compute.df)[2] <- sprintf("p%d", d) compute.df[[sprintf("q%d", d)]] <- p.adjust(compute.df[ ,2], method = "BH") compute.df$combined.p <- NULL } if(is.null(optimize.p)){ compute.df$n.conv <- NULL } else { names(compute.df)[names(compute.df) == "n.conv"] <- sprintf("n%d.conv", d) } if(is.null(out)) out <- compute.df else out <- cbind(out, compute.df) } rownames(out) <- rownames(f) return(out) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/projects_and_repos.R \name{gl_create_merge_request} \alias{gl_create_merge_request} \title{Create a merge request} \usage{ gl_create_merge_request( project, source_branch, target_branch = "master", title, description, verb = httr::POST, ... ) } \arguments{ \item{project}{name or id of project (not repository!)} \item{source_branch}{name of branch to be merged} \item{target_branch}{name of branch into which to merge} \item{title}{title of the merge request} \item{description}{description text for the merge request} \item{verb}{is ignored, will always be forced to match the action the function name indicates} \item{...}{passed on to \code{\link{gitlab}}. Might contain more fields documented in gitlab API doc.} } \description{ Create a merge request }
/man/gl_create_merge_request.Rd
no_license
KevCaz/gitlabr
R
false
true
857
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/projects_and_repos.R \name{gl_create_merge_request} \alias{gl_create_merge_request} \title{Create a merge request} \usage{ gl_create_merge_request( project, source_branch, target_branch = "master", title, description, verb = httr::POST, ... ) } \arguments{ \item{project}{name or id of project (not repository!)} \item{source_branch}{name of branch to be merged} \item{target_branch}{name of branch into which to merge} \item{title}{title of the merge request} \item{description}{description text for the merge request} \item{verb}{is ignored, will always be forced to match the action the function name indicates} \item{...}{passed on to \code{\link{gitlab}}. Might contain more fields documented in gitlab API doc.} } \description{ Create a merge request }
# Conducts statistics on bootstrapped assignments. library(argparse) library(data.table) # Get arguments. parser <- ArgumentParser() parser$add_argument('--input', required = TRUE) parser$add_argument('--output', required = TRUE) args <- parser$parse_args() # Load the data. message('Loading data') dt.data <- fread(args$input) # Calculate some statistics. Remember: the SE for a bootstrap is just the SD of # the sampling distribution! message('Calculating statistics') reference.values <- dt.data[threshold == max(threshold), count] get.stats <- function (counts) { t.result <- t.test(counts, reference.values, alternative = 'greater') data.table(mean = mean(counts), se = sd(counts), t = t.result$statistic, p = t.result$p.value) } dt.stats <- dt.data[, get.stats(count), keyby = .(threshold)] # Write the output. message('Writing output') write.csv(dt.stats, file = args$output, row.names = FALSE)
/scripts/localizations/get_bootstrapped_stats.R
permissive
morrislab/plos-medicine-joint-patterns
R
false
false
951
r
# Conducts statistics on bootstrapped assignments. library(argparse) library(data.table) # Get arguments. parser <- ArgumentParser() parser$add_argument('--input', required = TRUE) parser$add_argument('--output', required = TRUE) args <- parser$parse_args() # Load the data. message('Loading data') dt.data <- fread(args$input) # Calculate some statistics. Remember: the SE for a bootstrap is just the SD of # the sampling distribution! message('Calculating statistics') reference.values <- dt.data[threshold == max(threshold), count] get.stats <- function (counts) { t.result <- t.test(counts, reference.values, alternative = 'greater') data.table(mean = mean(counts), se = sd(counts), t = t.result$statistic, p = t.result$p.value) } dt.stats <- dt.data[, get.stats(count), keyby = .(threshold)] # Write the output. message('Writing output') write.csv(dt.stats, file = args$output, row.names = FALSE)
testlist <- list(b = c(NA, -1667457892L, -852010L, 67964173L, -1835887972L )) result <- do.call(mcga:::ByteVectorToDoubles,testlist) str(result)
/mcga/inst/testfiles/ByteVectorToDoubles/AFL_ByteVectorToDoubles/ByteVectorToDoubles_valgrind_files/1613106542-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
144
r
testlist <- list(b = c(NA, -1667457892L, -852010L, 67964173L, -1835887972L )) result <- do.call(mcga:::ByteVectorToDoubles,testlist) str(result)
library(glmnet) mydata = read.table("./TrainingSet/RF/stomach.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.9,family="gaussian",standardize=TRUE) sink('./Model/EN/Classifier/stomach/stomach_091.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/Classifier/stomach/stomach_091.R
no_license
leon1003/QSMART
R
false
false
352
r
library(glmnet) mydata = read.table("./TrainingSet/RF/stomach.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.9,family="gaussian",standardize=TRUE) sink('./Model/EN/Classifier/stomach/stomach_091.txt',append=TRUE) print(glm$glmnet.fit) sink()
############################################### # Wheeler, GM (2018): "Incoherent dose-escalation in phase I trials using the escalation with overdose control approach", # Statistical Papers, 59, 801-811 (2018). DOI: 10.1007/s00362-016-0790-7 # First published 24/06/2016 # # R Code # # Graham Wheeler ############################################### rm(list=ls()) #################### # Install packages # #################### install.packages("dclone") library(dclone) ############# # Functions # ############# ptox.fn<-function(rho0,gamma,dose.vec,theta){ plogis((1/(gamma - dose.vec[1]))*(gamma*qlogis(rho0) - dose.vec[1]*qlogis(theta)+(qlogis(theta)-qlogis(rho0))*dose.vec)) } coherence.trial.fn<-function(start.seed, numsim, numpat, alpha, strong.prior=FALSE, sdose, ptox0, theta, start=1, doseskip=TRUE, dose.inc=NULL, permissible.alphas=seq(minalpha,maxalpha,by=0.01), n.chains=1, n.iter=10000, n.burnin=5000, n.thin=4){ rounded.dosemat <- outmat <- array(NA, dim=c(numsim, numpat-1, length(permissible.alphas))) incomat<-matrix(NA, nrow=numsim, ncol = numpat-1) l <- length(sdose) Xmin <- min(sdose) Xmax <- max(sdose) x.mat <- y.mat <- tox.mat <- matrix(rep(NA, numsim*numpat), nrow = numsim) mtd.dists <- matrix(rep(NA, numsim*5), ncol = 5) seed.fail <- NA seed.count <- start.seed for(i in 1:numsim){ # First patient treated at minimum dose (dose 1), with extremely low prob of tox (ideally) alpha.vec<-NULL y.vec<-1 while(y.vec==1){ x.vec<-Xmin N<-1 next.vec<-start set.seed(seed.count) y.vec <- sum( runif(1) < ptox0[next.vec] ) if(y.vec==1){ seed.count<-seed.count+1 seed.fail<-c(seed.fail, seed.count-1) } } while(N<=numpat){ data1<-list(Xmin=Xmin, Xmax=Xmax, theta=theta, K=N, Y=c(y.vec,NA), X=c(x.vec,NA)) if(strong.prior==FALSE){ brugs<-bugs.fit(model=model1, data=data1, params=c("gamma"), n.chains=n.chains, n.iter=n.iter, n.thin=n.thin, n.burnin=n.burnin, program="openbugs", seed=1) }else{ brugs<-bugs.fit(model=model1strong, data=data1, params=c("gamma"), n.chains=n.chains, n.iter=n.iter, n.thin=n.thin, n.burnin=n.burnin, program="openbugs", seed=1) } t<-unlist(lapply(1:n.chains,function(z) brugs[[z]][,1])) ewoc.dose<-quantile(t, alpha) # If all patients treated, return posterior distribution of gamma and do not compute future doses if(N==numpat){ break } # Find dose "closest" to the "alpha"th quantile found in ewoc.dose next.dose<-which.min((ewoc.dose-sdose)^2) # Update record of recommended doses and records of DLTs/non-DLTs ifelse(doseskip==TRUE, x.vec<-c(x.vec,sdose[next.dose]), ifelse(sdose[next.dose] > x.vec[N], x.vec<-c(x.vec, sdose[which(sdose==x.vec[N])+dose.inc]), x.vec<-c(x.vec, sdose[next.dose]))) next.dose<-which(sdose==x.vec[N+1]) y.j <- sum( runif(1) < ptox0[next.dose] ) y.vec<-c(y.vec, y.j) next.vec<-c(next.vec, next.dose) data1<-list(Xmin=Xmin, Xmax=Xmax, theta=theta, K=N+1, Y=c(y.vec[1:N],1,NA), X=c(x.vec,NA)) if(strong.prior==FALSE){ brugs<-bugs.fit(model=model1, data=data1, params=c("gamma"), n.chains=n.chains, n.iter=n.iter, n.thin=n.thin, n.burnin=n.burnin, program="openbugs", seed=1) }else{ brugs<-bugs.fit(model=model1strong, data=data1, params=c("gamma"), n.chains=n.chains, n.iter=n.iter, n.thin=n.thin, n.burnin=n.burnin, program="openbugs", seed=1) } t<-unlist(lapply(1:n.chains,function(z) brugs[[z]][,1])) # array of Ntrials by Npatients by different feasibility bounds to choose next dose - used to determine incomat outmat[i,N,]<-round(quantile(t,permissible.alphas),0) rounded.dosemat[i,N,]<-sdose[sapply(1:length(outmat[i,N,]), function(z) which.min((outmat[i,N,z]-sdose)^2))] # matrix of Ntrials by Npatients - entries are smallest increment at which incoherence occurs - index of permissible.alphas incomat[i,N]<-ifelse(max(outmat[i,N,])<x.vec[N+1],NA,min(which(outmat[i,N,]>=x.vec[N+1]))) # Add next patient and repeat N<-N+1 } x.mat[i,]<-x.vec y.mat[i,]<-y.vec tox.mat[i,]<-ptox0[next.vec] mtd.dists[i,]<-c(mean(t),median(t),sd(t),quantile(t,0.025),quantile(t,0.975)) cat(paste("Simulation",i,"complete\n")) seed.count<-seed.count+1 } seed.fail<-seed.fail[-1] return(list(x.mat=x.mat, y.mat=y.mat, tox.mat=tox.mat, mtd.dists=mtd.dists, seed.fail=seed.fail, outmat=outmat, incomat=incomat, rounded.dosemat=rounded.dosemat)) } scenalphas.fn<-function(scenario, alphas, ntrials, npats){ obj<-get(paste("scen",scenario,"",sep="")) newinco<-matrix(NA,nrow=ntrials,ncol=npats-1) for(i in 1:ntrials){ for(N in 1:(npats-1)){ newinco[i,N]<-ifelse(max(obj$rounded.dosemat[i,N,])<obj$x.mat[i,N+1], NA, ifelse(length(which(obj$rounded.dosemat[i,N,]<=obj$x.mat[i,N+1]))==0, 1, max(which(obj$rounded.dosemat[i,N,]<=obj$x.mat[i,N+1]))+1)) } } matrix(alphas[t(newinco)],nrow=ntrials,byrow=T) } #################################### # Set up Simulations - Section 3.3 # #################################### theta <- 1/3 dose.FU <- seq(140, 425, by = 1) ptox <- ptox.fn(0.08, 300, dose.FU, theta) num.scens <- 6 nsims <- 100 maxpat <- 40 start.seed <- 804 minalpha <- 0.25 maxalpha <- 0.50 alphas <- seq(minalpha, maxalpha, by = 0.01) n.chains <- 2 n.iter <- 10000 n.burnin <- 5000 n.thin <- 2 ################### # Run Simulations # ################### model1<-function(){ for (i in 1:K){ Y[i]~dbern(p[i]) logit(p[i])<- (1/(gamma - Xmin))*(gamma*logit(rho0) - Xmin*logit(theta)+(logit(theta)-logit(rho0))*X[i]) } gamma ~ dunif(Xmin, Xmax) rho0 ~ dunif(0,theta) } # Original scen1<-coherence.trial.fn(start.seed = start.seed, numsim = nsims, numpat = maxpat, alpha = minalpha, strong.prior=FALSE, sdose=dose.FU, ptox0=ptox, theta=theta, start=1, doseskip=TRUE, permissible.alphas=alphas, n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.thin=n.thin) # 20 dose levels (every 15mg/m2) scen2<-coherence.trial.fn(start.seed = start.seed, numsim = nsims, numpat = maxpat, alpha = minalpha, strong.prior=FALSE, sdose=dose.FU[seq(1,length(dose.FU),length=20)], ptox0=ptox[seq(1,length(dose.FU),length=20)], theta=theta, start=1, doseskip=TRUE, permissible.alphas=alphas, n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.thin=n.thin) # 16 dose levels (every 19mg/m2) scen3<-coherence.trial.fn(start.seed = start.seed, numsim = nsims, numpat = maxpat, alpha = minalpha, strong.prior=FALSE, sdose=dose.FU[seq(1,length(dose.FU),length=16)], ptox0=ptox[seq(1,length(dose.FU),length=16)], theta=theta, start=1, doseskip=TRUE, permissible.alphas=alphas, n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.thin=n.thin) # 6 dose levels (every 57mg/m2) scen4<-coherence.trial.fn(start.seed = start.seed, numsim = nsims, numpat = maxpat, alpha = minalpha, strong.prior=FALSE, sdose=dose.FU[seq(1,length(dose.FU),length=6)], ptox0=ptox[seq(1,length(dose.FU),length=6)], theta=theta, start=1, doseskip=TRUE, permissible.alphas=alphas, n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.thin=n.thin) # Strong skew prior model1strong<-function(){ for (i in 1:K){ Y[i]~dbern(p[i]) logit(p[i])<- (1/(gamma - Xmin))*(gamma*logit(rho0) - Xmin*logit(theta)+(logit(theta)-logit(rho0))*X[i]) } gamma<-Xmin + (Xmax-Xmin)*mubeta mubeta ~ dbeta(3,7) rho0<-theta*rhomu rhomu~dbeta(7,3) } scen5<-coherence.trial.fn(start.seed = start.seed, numsim = nsims, numpat = maxpat, alpha = minalpha, strong.prior=TRUE, sdose=dose.FU, ptox0=ptox, theta=theta, start=1, doseskip=TRUE, permissible.alphas=alphas, n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.thin=n.thin) # Strong centered prior model1strong<-function(){ for (i in 1:K){ Y[i]~dbern(p[i]) logit(p[i])<- (1/(gamma - Xmin))*(gamma*logit(rho0) - Xmin*logit(theta)+(logit(theta)-logit(rho0))*X[i]) } gamma<-Xmin + (Xmax-Xmin)*mubeta mubeta ~ dbeta(5,5) rho0<-theta*rhomu rhomu~dbeta(5,5) } scen6<-coherence.trial.fn(start.seed = start.seed, numsim = nsims, numpat = maxpat, alpha = minalpha, strong.prior=TRUE, sdose=dose.FU, ptox0=ptox, theta=theta, start=1, doseskip=TRUE, permissible.alphas=alphas, n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.thin=n.thin) ################# # Plot Figure 1 # ################# for(i in 1:num.scens){ assign(paste0("scen",i,"alphas"), scenalphas.fn(scenario = i, alphas = alphas, ntrials = nsims, npats = maxpat)) assign(paste0("mean.inco",i), apply(get(paste0("scen",i,"alphas")), 2, mean, na.rm=T)) assign(paste0("med.inco",i), apply(get(paste0("scen",i,"alphas")), 2, quantile, na.rm=T, probs=0.5)) assign(paste0("quant",i,"low"), apply(get(paste0("scen",i,"alphas")), 2, quantile, na.rm=T, probs=0.025)) assign(paste0("quant",i,"hi"), apply(get(paste0("scen",i,"alphas")), 2, quantile, na.rm=T, probs=0.975)) assign(paste0("min",i), apply(get(paste0("scen",i,"alphas")), 2, min, na.rm=T)) assign(paste0("max",i), apply(get(paste0("scen",i,"alphas")), 2, max, na.rm=T)) } cuts<-seq(10, maxpat, by = 10) par(plt=c(0.11,0.46,0.75,0.95)) plot(2:maxpat, mean.inco1,type="l",col="black",xlab="", ylab=expression(paste(alpha[n+1]^"min")), ylim=c(minalpha,maxalpha), main="Scenario 1", las=1, bty="L") lines(2:maxpat, quant1low, lty=2, col="black") lines(2:maxpat, quant1hi, lty=2, col="black") min_feas_excess<-!is.na(scen1alphas) min_feas_excess_n<-apply(min_feas_excess[,cuts - 1],2,sum) for(i in 1:length(cuts)){ mtext(paste0("(N = ",min_feas_excess_n[i],")"), 1, cex=0.75, line=2, at=cuts[i]) } par(new=TRUE,plt=c(0.6,0.95,0.75,0.95)) plot(2:maxpat, mean.inco2, type="l", col="black", xlab="", ylab="", ylim=c(minalpha,maxalpha), main="Scenario 2", las=1, bty="L") lines(2:maxpat, quant2low, lty=2, col="black") lines(2:maxpat, quant2hi, lty=2, col="black") min_feas_excess<-!is.na(scen2alphas) min_feas_excess_n<-apply(min_feas_excess[,cuts - 1],2,sum) for(i in 1:length(cuts)){ mtext(paste0("(N = ",min_feas_excess_n[i],")"), 1, cex=0.75, line=2, at=cuts[i]) } par(new=TRUE,plt=c(0.11,0.46,0.45,0.65)) plot(2:maxpat, mean.inco3, type="l", col="black",xlab="", ylab=expression(paste(alpha[n+1]^"min")), ylim=c(minalpha,maxalpha), main="Scenario 3", las=1, bty="L") lines(2:maxpat, quant3low, lty=2, col="black") lines(2:maxpat, quant3hi, lty=2, col="black") min_feas_excess<-!is.na(scen3alphas) min_feas_excess_n<-apply(min_feas_excess[,cuts - 1],2,sum) for(i in 1:length(cuts)){ mtext(paste0("(N = ",min_feas_excess_n[i],")"), 1, cex=0.75, line=2, at=cuts[i]) } par(new=TRUE,plt=c(0.6,0.95,0.45,0.65)) plot(2:maxpat, mean.inco4, type="l", col="black",xlab="", ylab="", ylim=c(minalpha,maxalpha), main="Scenario 4", las=1, bty="L") lines(2:maxpat, quant4low, lty=2, col="black") lines(2:maxpat, quant4hi, lty=2, col="black") min_feas_excess<-!is.na(scen4alphas) min_feas_excess_n<-apply(min_feas_excess[,cuts - 1],2,sum) for(i in 1:length(cuts)){ mtext(paste0("(N = ",min_feas_excess_n[i],")"), 1, cex=0.75, line=2, at=cuts[i]) } par(new=TRUE,plt=c(0.11,0.46,0.15,0.35)) plot(2:maxpat, mean.inco5, type="l", col="black", xlab="", ylab=expression(paste(alpha[n+1]^"min")), ylim=c(minalpha,maxalpha), main="Scenario 5", las=1, bty="L") lines(2:maxpat, quant5low, lty=2, col="black") lines(2:maxpat, quant5hi, lty=2, col="black") min_feas_excess<-!is.na(scen5alphas) min_feas_excess_n<-apply(min_feas_excess[,cuts - 1],2,sum) for(i in 1:length(cuts)){ mtext(paste0("(N = ",min_feas_excess_n[i],")"), 1, cex=0.75, line=2, at=cuts[i]) } mtext("Patient", 1, cex=1, line=3.25, at=23) par(new=TRUE,plt=c(0.6,0.95,0.15,0.35)) plot(2:maxpat, mean.inco6, type="l", col="black", xlab="", ylab="", ylim=c(minalpha,maxalpha), main="Scenario 6", las=1, bty="L") lines(2:maxpat, quant6low, lty=2, col="black") lines(2:maxpat, quant6hi, lty=2, col="black") min_feas_excess<-!is.na(scen6alphas) min_feas_excess_n<-apply(min_feas_excess[,cuts - 1],2,sum) for(i in 1:length(cuts)){ mtext(paste0("(N = ",min_feas_excess_n[i],")"), 1, cex=0.75, line=2, at=cuts[i]) } mtext("Patient", 1, cex=1, line=3.25, at=23) par(new=TRUE,plt=c(0.11,0.95,0.02,0.08)) plot(0,0,bty="n", xaxt="n",yaxt="n", type="n", xlim=c(-1,1),ylim=c(-1,1), xlab="", ylab="") legend(-0.7,0.3,legend=c("Mean", "95% Credible Interval"), lty=c(1,2), horiz=TRUE) ########################################################### # Code to produce results similar to those in for Table 1 # ########################################################### ############ # Function # ############ ewoc.seq.fn<-function(Xmin, Xmax, theta, N, y.vec, x.vec, minalpha, maxalpha, n.chains, n.iter, n.burnin, n.thin){ ntrials<-length(y.vec)/maxpat outmat<-array(NA, dim = c(ntrials, N-1, length(seq(minalpha, maxalpha, by=0.01)))) incomat<-matrix(NA, nrow=ntrials, ncol=N-1) for(i in 1:ntrials){ for(j in 1:(N-1)){ if(ntrials == 1){ yvec_trial <- y.vec xvec_trial <- x.vec }else{ yvec_trial <- y.vec[i,] xvec_trial <- x.vec[i,] } data1<-list(Xmin=Xmin, Xmax=Xmax, theta=theta, K=j+1, Y=c(yvec_trial[1:j],1,NA), X=c(xvec_trial[1:(j+1)],NA)) brugs<-bugs.fit(model = model1, data=data1, n.chains = n.chains, params=c("gamma"), n.burnin = n.burnin, n.iter = n.iter, n.thin = n.thin, program = "openbugs", seed = 1:n.chains) # array of Ntrials by Npatients by different feasibility bounds to choose next dose after "possibles" - used to determine incomat outmat[i,j,]<-round(quantile(unlist(lapply(1:n.chains,function(z) brugs[[z]][,1])),seq(minalpha,maxalpha,by=0.01)),0) # matrix of Ntrials by Npatients - entries are smallest increment at which incoherence occurs - index of seq(minalpha,maxalpha,by=0.01) incomat[i,j]<-ifelse(max(outmat[i,j,])<xvec_trial[j+1],NA,min(which(outmat[i,j,]>=xvec_trial[j+1]))) } } return(list(incomat=incomat, outmat=outmat)) } ############ # Run Code # ############ pick_trial<-7 # Choose "pick_trial" from, for example, previous Scenario 1 simulations... foo<-ewoc.seq.fn(Xmin = min(dose.FU), Xmax = max(dose.FU), theta, N=maxpat, scen1$y.mat[pick_trial, ], scen1$x.mat[pick_trial, ], minalpha=minalpha, maxalpha=maxalpha, n.chains = n.chains, n.thin = n.thin, n.burnin = n.burnin, n.iter = n.iter) final_out<-rbind(scen1$x.mat[pick_trial,], scen1$y.mat[pick_trial,], c(NA, alphas[foo$incomat[1, ]])) row.names(final_out)<-c("Dose", "DLT Outcome", "alpha_min") final_out ####### # END # #######
/R Code - Incoherent Dose Escalation.R
no_license
graham-wheeler/incoherent_DE
R
false
false
15,466
r
############################################### # Wheeler, GM (2018): "Incoherent dose-escalation in phase I trials using the escalation with overdose control approach", # Statistical Papers, 59, 801-811 (2018). DOI: 10.1007/s00362-016-0790-7 # First published 24/06/2016 # # R Code # # Graham Wheeler ############################################### rm(list=ls()) #################### # Install packages # #################### install.packages("dclone") library(dclone) ############# # Functions # ############# ptox.fn<-function(rho0,gamma,dose.vec,theta){ plogis((1/(gamma - dose.vec[1]))*(gamma*qlogis(rho0) - dose.vec[1]*qlogis(theta)+(qlogis(theta)-qlogis(rho0))*dose.vec)) } coherence.trial.fn<-function(start.seed, numsim, numpat, alpha, strong.prior=FALSE, sdose, ptox0, theta, start=1, doseskip=TRUE, dose.inc=NULL, permissible.alphas=seq(minalpha,maxalpha,by=0.01), n.chains=1, n.iter=10000, n.burnin=5000, n.thin=4){ rounded.dosemat <- outmat <- array(NA, dim=c(numsim, numpat-1, length(permissible.alphas))) incomat<-matrix(NA, nrow=numsim, ncol = numpat-1) l <- length(sdose) Xmin <- min(sdose) Xmax <- max(sdose) x.mat <- y.mat <- tox.mat <- matrix(rep(NA, numsim*numpat), nrow = numsim) mtd.dists <- matrix(rep(NA, numsim*5), ncol = 5) seed.fail <- NA seed.count <- start.seed for(i in 1:numsim){ # First patient treated at minimum dose (dose 1), with extremely low prob of tox (ideally) alpha.vec<-NULL y.vec<-1 while(y.vec==1){ x.vec<-Xmin N<-1 next.vec<-start set.seed(seed.count) y.vec <- sum( runif(1) < ptox0[next.vec] ) if(y.vec==1){ seed.count<-seed.count+1 seed.fail<-c(seed.fail, seed.count-1) } } while(N<=numpat){ data1<-list(Xmin=Xmin, Xmax=Xmax, theta=theta, K=N, Y=c(y.vec,NA), X=c(x.vec,NA)) if(strong.prior==FALSE){ brugs<-bugs.fit(model=model1, data=data1, params=c("gamma"), n.chains=n.chains, n.iter=n.iter, n.thin=n.thin, n.burnin=n.burnin, program="openbugs", seed=1) }else{ brugs<-bugs.fit(model=model1strong, data=data1, params=c("gamma"), n.chains=n.chains, n.iter=n.iter, n.thin=n.thin, n.burnin=n.burnin, program="openbugs", seed=1) } t<-unlist(lapply(1:n.chains,function(z) brugs[[z]][,1])) ewoc.dose<-quantile(t, alpha) # If all patients treated, return posterior distribution of gamma and do not compute future doses if(N==numpat){ break } # Find dose "closest" to the "alpha"th quantile found in ewoc.dose next.dose<-which.min((ewoc.dose-sdose)^2) # Update record of recommended doses and records of DLTs/non-DLTs ifelse(doseskip==TRUE, x.vec<-c(x.vec,sdose[next.dose]), ifelse(sdose[next.dose] > x.vec[N], x.vec<-c(x.vec, sdose[which(sdose==x.vec[N])+dose.inc]), x.vec<-c(x.vec, sdose[next.dose]))) next.dose<-which(sdose==x.vec[N+1]) y.j <- sum( runif(1) < ptox0[next.dose] ) y.vec<-c(y.vec, y.j) next.vec<-c(next.vec, next.dose) data1<-list(Xmin=Xmin, Xmax=Xmax, theta=theta, K=N+1, Y=c(y.vec[1:N],1,NA), X=c(x.vec,NA)) if(strong.prior==FALSE){ brugs<-bugs.fit(model=model1, data=data1, params=c("gamma"), n.chains=n.chains, n.iter=n.iter, n.thin=n.thin, n.burnin=n.burnin, program="openbugs", seed=1) }else{ brugs<-bugs.fit(model=model1strong, data=data1, params=c("gamma"), n.chains=n.chains, n.iter=n.iter, n.thin=n.thin, n.burnin=n.burnin, program="openbugs", seed=1) } t<-unlist(lapply(1:n.chains,function(z) brugs[[z]][,1])) # array of Ntrials by Npatients by different feasibility bounds to choose next dose - used to determine incomat outmat[i,N,]<-round(quantile(t,permissible.alphas),0) rounded.dosemat[i,N,]<-sdose[sapply(1:length(outmat[i,N,]), function(z) which.min((outmat[i,N,z]-sdose)^2))] # matrix of Ntrials by Npatients - entries are smallest increment at which incoherence occurs - index of permissible.alphas incomat[i,N]<-ifelse(max(outmat[i,N,])<x.vec[N+1],NA,min(which(outmat[i,N,]>=x.vec[N+1]))) # Add next patient and repeat N<-N+1 } x.mat[i,]<-x.vec y.mat[i,]<-y.vec tox.mat[i,]<-ptox0[next.vec] mtd.dists[i,]<-c(mean(t),median(t),sd(t),quantile(t,0.025),quantile(t,0.975)) cat(paste("Simulation",i,"complete\n")) seed.count<-seed.count+1 } seed.fail<-seed.fail[-1] return(list(x.mat=x.mat, y.mat=y.mat, tox.mat=tox.mat, mtd.dists=mtd.dists, seed.fail=seed.fail, outmat=outmat, incomat=incomat, rounded.dosemat=rounded.dosemat)) } scenalphas.fn<-function(scenario, alphas, ntrials, npats){ obj<-get(paste("scen",scenario,"",sep="")) newinco<-matrix(NA,nrow=ntrials,ncol=npats-1) for(i in 1:ntrials){ for(N in 1:(npats-1)){ newinco[i,N]<-ifelse(max(obj$rounded.dosemat[i,N,])<obj$x.mat[i,N+1], NA, ifelse(length(which(obj$rounded.dosemat[i,N,]<=obj$x.mat[i,N+1]))==0, 1, max(which(obj$rounded.dosemat[i,N,]<=obj$x.mat[i,N+1]))+1)) } } matrix(alphas[t(newinco)],nrow=ntrials,byrow=T) } #################################### # Set up Simulations - Section 3.3 # #################################### theta <- 1/3 dose.FU <- seq(140, 425, by = 1) ptox <- ptox.fn(0.08, 300, dose.FU, theta) num.scens <- 6 nsims <- 100 maxpat <- 40 start.seed <- 804 minalpha <- 0.25 maxalpha <- 0.50 alphas <- seq(minalpha, maxalpha, by = 0.01) n.chains <- 2 n.iter <- 10000 n.burnin <- 5000 n.thin <- 2 ################### # Run Simulations # ################### model1<-function(){ for (i in 1:K){ Y[i]~dbern(p[i]) logit(p[i])<- (1/(gamma - Xmin))*(gamma*logit(rho0) - Xmin*logit(theta)+(logit(theta)-logit(rho0))*X[i]) } gamma ~ dunif(Xmin, Xmax) rho0 ~ dunif(0,theta) } # Original scen1<-coherence.trial.fn(start.seed = start.seed, numsim = nsims, numpat = maxpat, alpha = minalpha, strong.prior=FALSE, sdose=dose.FU, ptox0=ptox, theta=theta, start=1, doseskip=TRUE, permissible.alphas=alphas, n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.thin=n.thin) # 20 dose levels (every 15mg/m2) scen2<-coherence.trial.fn(start.seed = start.seed, numsim = nsims, numpat = maxpat, alpha = minalpha, strong.prior=FALSE, sdose=dose.FU[seq(1,length(dose.FU),length=20)], ptox0=ptox[seq(1,length(dose.FU),length=20)], theta=theta, start=1, doseskip=TRUE, permissible.alphas=alphas, n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.thin=n.thin) # 16 dose levels (every 19mg/m2) scen3<-coherence.trial.fn(start.seed = start.seed, numsim = nsims, numpat = maxpat, alpha = minalpha, strong.prior=FALSE, sdose=dose.FU[seq(1,length(dose.FU),length=16)], ptox0=ptox[seq(1,length(dose.FU),length=16)], theta=theta, start=1, doseskip=TRUE, permissible.alphas=alphas, n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.thin=n.thin) # 6 dose levels (every 57mg/m2) scen4<-coherence.trial.fn(start.seed = start.seed, numsim = nsims, numpat = maxpat, alpha = minalpha, strong.prior=FALSE, sdose=dose.FU[seq(1,length(dose.FU),length=6)], ptox0=ptox[seq(1,length(dose.FU),length=6)], theta=theta, start=1, doseskip=TRUE, permissible.alphas=alphas, n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.thin=n.thin) # Strong skew prior model1strong<-function(){ for (i in 1:K){ Y[i]~dbern(p[i]) logit(p[i])<- (1/(gamma - Xmin))*(gamma*logit(rho0) - Xmin*logit(theta)+(logit(theta)-logit(rho0))*X[i]) } gamma<-Xmin + (Xmax-Xmin)*mubeta mubeta ~ dbeta(3,7) rho0<-theta*rhomu rhomu~dbeta(7,3) } scen5<-coherence.trial.fn(start.seed = start.seed, numsim = nsims, numpat = maxpat, alpha = minalpha, strong.prior=TRUE, sdose=dose.FU, ptox0=ptox, theta=theta, start=1, doseskip=TRUE, permissible.alphas=alphas, n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.thin=n.thin) # Strong centered prior model1strong<-function(){ for (i in 1:K){ Y[i]~dbern(p[i]) logit(p[i])<- (1/(gamma - Xmin))*(gamma*logit(rho0) - Xmin*logit(theta)+(logit(theta)-logit(rho0))*X[i]) } gamma<-Xmin + (Xmax-Xmin)*mubeta mubeta ~ dbeta(5,5) rho0<-theta*rhomu rhomu~dbeta(5,5) } scen6<-coherence.trial.fn(start.seed = start.seed, numsim = nsims, numpat = maxpat, alpha = minalpha, strong.prior=TRUE, sdose=dose.FU, ptox0=ptox, theta=theta, start=1, doseskip=TRUE, permissible.alphas=alphas, n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.thin=n.thin) ################# # Plot Figure 1 # ################# for(i in 1:num.scens){ assign(paste0("scen",i,"alphas"), scenalphas.fn(scenario = i, alphas = alphas, ntrials = nsims, npats = maxpat)) assign(paste0("mean.inco",i), apply(get(paste0("scen",i,"alphas")), 2, mean, na.rm=T)) assign(paste0("med.inco",i), apply(get(paste0("scen",i,"alphas")), 2, quantile, na.rm=T, probs=0.5)) assign(paste0("quant",i,"low"), apply(get(paste0("scen",i,"alphas")), 2, quantile, na.rm=T, probs=0.025)) assign(paste0("quant",i,"hi"), apply(get(paste0("scen",i,"alphas")), 2, quantile, na.rm=T, probs=0.975)) assign(paste0("min",i), apply(get(paste0("scen",i,"alphas")), 2, min, na.rm=T)) assign(paste0("max",i), apply(get(paste0("scen",i,"alphas")), 2, max, na.rm=T)) } cuts<-seq(10, maxpat, by = 10) par(plt=c(0.11,0.46,0.75,0.95)) plot(2:maxpat, mean.inco1,type="l",col="black",xlab="", ylab=expression(paste(alpha[n+1]^"min")), ylim=c(minalpha,maxalpha), main="Scenario 1", las=1, bty="L") lines(2:maxpat, quant1low, lty=2, col="black") lines(2:maxpat, quant1hi, lty=2, col="black") min_feas_excess<-!is.na(scen1alphas) min_feas_excess_n<-apply(min_feas_excess[,cuts - 1],2,sum) for(i in 1:length(cuts)){ mtext(paste0("(N = ",min_feas_excess_n[i],")"), 1, cex=0.75, line=2, at=cuts[i]) } par(new=TRUE,plt=c(0.6,0.95,0.75,0.95)) plot(2:maxpat, mean.inco2, type="l", col="black", xlab="", ylab="", ylim=c(minalpha,maxalpha), main="Scenario 2", las=1, bty="L") lines(2:maxpat, quant2low, lty=2, col="black") lines(2:maxpat, quant2hi, lty=2, col="black") min_feas_excess<-!is.na(scen2alphas) min_feas_excess_n<-apply(min_feas_excess[,cuts - 1],2,sum) for(i in 1:length(cuts)){ mtext(paste0("(N = ",min_feas_excess_n[i],")"), 1, cex=0.75, line=2, at=cuts[i]) } par(new=TRUE,plt=c(0.11,0.46,0.45,0.65)) plot(2:maxpat, mean.inco3, type="l", col="black",xlab="", ylab=expression(paste(alpha[n+1]^"min")), ylim=c(minalpha,maxalpha), main="Scenario 3", las=1, bty="L") lines(2:maxpat, quant3low, lty=2, col="black") lines(2:maxpat, quant3hi, lty=2, col="black") min_feas_excess<-!is.na(scen3alphas) min_feas_excess_n<-apply(min_feas_excess[,cuts - 1],2,sum) for(i in 1:length(cuts)){ mtext(paste0("(N = ",min_feas_excess_n[i],")"), 1, cex=0.75, line=2, at=cuts[i]) } par(new=TRUE,plt=c(0.6,0.95,0.45,0.65)) plot(2:maxpat, mean.inco4, type="l", col="black",xlab="", ylab="", ylim=c(minalpha,maxalpha), main="Scenario 4", las=1, bty="L") lines(2:maxpat, quant4low, lty=2, col="black") lines(2:maxpat, quant4hi, lty=2, col="black") min_feas_excess<-!is.na(scen4alphas) min_feas_excess_n<-apply(min_feas_excess[,cuts - 1],2,sum) for(i in 1:length(cuts)){ mtext(paste0("(N = ",min_feas_excess_n[i],")"), 1, cex=0.75, line=2, at=cuts[i]) } par(new=TRUE,plt=c(0.11,0.46,0.15,0.35)) plot(2:maxpat, mean.inco5, type="l", col="black", xlab="", ylab=expression(paste(alpha[n+1]^"min")), ylim=c(minalpha,maxalpha), main="Scenario 5", las=1, bty="L") lines(2:maxpat, quant5low, lty=2, col="black") lines(2:maxpat, quant5hi, lty=2, col="black") min_feas_excess<-!is.na(scen5alphas) min_feas_excess_n<-apply(min_feas_excess[,cuts - 1],2,sum) for(i in 1:length(cuts)){ mtext(paste0("(N = ",min_feas_excess_n[i],")"), 1, cex=0.75, line=2, at=cuts[i]) } mtext("Patient", 1, cex=1, line=3.25, at=23) par(new=TRUE,plt=c(0.6,0.95,0.15,0.35)) plot(2:maxpat, mean.inco6, type="l", col="black", xlab="", ylab="", ylim=c(minalpha,maxalpha), main="Scenario 6", las=1, bty="L") lines(2:maxpat, quant6low, lty=2, col="black") lines(2:maxpat, quant6hi, lty=2, col="black") min_feas_excess<-!is.na(scen6alphas) min_feas_excess_n<-apply(min_feas_excess[,cuts - 1],2,sum) for(i in 1:length(cuts)){ mtext(paste0("(N = ",min_feas_excess_n[i],")"), 1, cex=0.75, line=2, at=cuts[i]) } mtext("Patient", 1, cex=1, line=3.25, at=23) par(new=TRUE,plt=c(0.11,0.95,0.02,0.08)) plot(0,0,bty="n", xaxt="n",yaxt="n", type="n", xlim=c(-1,1),ylim=c(-1,1), xlab="", ylab="") legend(-0.7,0.3,legend=c("Mean", "95% Credible Interval"), lty=c(1,2), horiz=TRUE) ########################################################### # Code to produce results similar to those in for Table 1 # ########################################################### ############ # Function # ############ ewoc.seq.fn<-function(Xmin, Xmax, theta, N, y.vec, x.vec, minalpha, maxalpha, n.chains, n.iter, n.burnin, n.thin){ ntrials<-length(y.vec)/maxpat outmat<-array(NA, dim = c(ntrials, N-1, length(seq(minalpha, maxalpha, by=0.01)))) incomat<-matrix(NA, nrow=ntrials, ncol=N-1) for(i in 1:ntrials){ for(j in 1:(N-1)){ if(ntrials == 1){ yvec_trial <- y.vec xvec_trial <- x.vec }else{ yvec_trial <- y.vec[i,] xvec_trial <- x.vec[i,] } data1<-list(Xmin=Xmin, Xmax=Xmax, theta=theta, K=j+1, Y=c(yvec_trial[1:j],1,NA), X=c(xvec_trial[1:(j+1)],NA)) brugs<-bugs.fit(model = model1, data=data1, n.chains = n.chains, params=c("gamma"), n.burnin = n.burnin, n.iter = n.iter, n.thin = n.thin, program = "openbugs", seed = 1:n.chains) # array of Ntrials by Npatients by different feasibility bounds to choose next dose after "possibles" - used to determine incomat outmat[i,j,]<-round(quantile(unlist(lapply(1:n.chains,function(z) brugs[[z]][,1])),seq(minalpha,maxalpha,by=0.01)),0) # matrix of Ntrials by Npatients - entries are smallest increment at which incoherence occurs - index of seq(minalpha,maxalpha,by=0.01) incomat[i,j]<-ifelse(max(outmat[i,j,])<xvec_trial[j+1],NA,min(which(outmat[i,j,]>=xvec_trial[j+1]))) } } return(list(incomat=incomat, outmat=outmat)) } ############ # Run Code # ############ pick_trial<-7 # Choose "pick_trial" from, for example, previous Scenario 1 simulations... foo<-ewoc.seq.fn(Xmin = min(dose.FU), Xmax = max(dose.FU), theta, N=maxpat, scen1$y.mat[pick_trial, ], scen1$x.mat[pick_trial, ], minalpha=minalpha, maxalpha=maxalpha, n.chains = n.chains, n.thin = n.thin, n.burnin = n.burnin, n.iter = n.iter) final_out<-rbind(scen1$x.mat[pick_trial,], scen1$y.mat[pick_trial,], c(NA, alphas[foo$incomat[1, ]])) row.names(final_out)<-c("Dose", "DLT Outcome", "alpha_min") final_out ####### # END # #######
testlist <- list(b = numeric(0), p1 = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, -3.80269803056297e+245, -6.75805164691332e-243, -2.08465040737023e+101, -7.36599172844076e+192, -1.10525061476907e-126), p2 = c(3.2667689008931e+187, -2.80363318787251e-287, 3.49300992181426e-315, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(metacoder:::intersect_line_rectangle,testlist) str(result)
/metacoder/inst/testfiles/intersect_line_rectangle/AFL_intersect_line_rectangle/intersect_line_rectangle_valgrind_files/1615768443-test.R
permissive
akhikolla/updatedatatype-list3
R
false
false
880
r
testlist <- list(b = numeric(0), p1 = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, -3.80269803056297e+245, -6.75805164691332e-243, -2.08465040737023e+101, -7.36599172844076e+192, -1.10525061476907e-126), p2 = c(3.2667689008931e+187, -2.80363318787251e-287, 3.49300992181426e-315, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(metacoder:::intersect_line_rectangle,testlist) str(result)
#' Author: Ted Kwartler #' Date: 6-22-2018 #' Purpose: RSI Example #' # Opts options(scipen=999) # Libs library(TTR) library(quantmod) library(PerformanceAnalytics) library(dygraphs) library(htmltools) # Get AMZN getSymbols("AMZN") AMZN <- AMZN['2018-01-01/2018-06-22'] # Calc RSI AMZNrsi <-RSI(AMZN$AMZN.Close, maType="SMA", #Usually EMA n =14) tail(AMZNrsi,10) # Visualize browsable( tagList( dygraph(AMZN$AMZN.Close, group = "Price", height = 200, width = "100%"), dygraph(AMZNrsi, group = "Price", height = 200, width = "100%") %>% dyLimit(30, label = 'OverSold') %>% dyLimit(70, label = 'OverBought') %>% dyRangeSelector() ) ) # One more getSymbols("HAS") # Calc RSI HASrsi <-RSI(HAS$HAS.Close, maType="SMA", #Usually EMA; not covered n =14) # Visualize browsable( tagList( dygraph(HAS$HAS.Close, group = "Price", height = 200, width = "100%"), dygraph(HASrsi, group = "Price", height = 200, width = "100%") %>% dyLimit(30, label = 'OverSold') %>% dyLimit(70, label = 'OverBought') %>% dyRangeSelector() ) ) # Now compound indicators HASmacd <- MACD(HAS$HAS.Close, nFast = 12, nSlow = 26, nSig = 9, maType="SMA", #Usually EMA; not covered percent = T) HAScompoundRule <- Lag(ifelse(HASrsi$rsi < 70 & HASrsi$rsi > 30 & HASmacd$macd > HASmacd$signal,1, 0)) ret <- ROC(Cl(HAS)) * HAScompoundRule charts.PerformanceSummary(ret) # End
/scripts/1_TTR_F.R
no_license
EvanShui/ODSC_west_2018
R
false
false
1,773
r
#' Author: Ted Kwartler #' Date: 6-22-2018 #' Purpose: RSI Example #' # Opts options(scipen=999) # Libs library(TTR) library(quantmod) library(PerformanceAnalytics) library(dygraphs) library(htmltools) # Get AMZN getSymbols("AMZN") AMZN <- AMZN['2018-01-01/2018-06-22'] # Calc RSI AMZNrsi <-RSI(AMZN$AMZN.Close, maType="SMA", #Usually EMA n =14) tail(AMZNrsi,10) # Visualize browsable( tagList( dygraph(AMZN$AMZN.Close, group = "Price", height = 200, width = "100%"), dygraph(AMZNrsi, group = "Price", height = 200, width = "100%") %>% dyLimit(30, label = 'OverSold') %>% dyLimit(70, label = 'OverBought') %>% dyRangeSelector() ) ) # One more getSymbols("HAS") # Calc RSI HASrsi <-RSI(HAS$HAS.Close, maType="SMA", #Usually EMA; not covered n =14) # Visualize browsable( tagList( dygraph(HAS$HAS.Close, group = "Price", height = 200, width = "100%"), dygraph(HASrsi, group = "Price", height = 200, width = "100%") %>% dyLimit(30, label = 'OverSold') %>% dyLimit(70, label = 'OverBought') %>% dyRangeSelector() ) ) # Now compound indicators HASmacd <- MACD(HAS$HAS.Close, nFast = 12, nSlow = 26, nSig = 9, maType="SMA", #Usually EMA; not covered percent = T) HAScompoundRule <- Lag(ifelse(HASrsi$rsi < 70 & HASrsi$rsi > 30 & HASmacd$macd > HASmacd$signal,1, 0)) ret <- ROC(Cl(HAS)) * HAScompoundRule charts.PerformanceSummary(ret) # End
## code to prepare `DATASET` dataset goes here file_name <- file.path('data-raw', 'spot_price_uk_102018-092019.csv') col_spec <- readr::cols( `Date and Time` = readr::col_datetime(format = "%Y/%m/%d %H:%M"), Period = readr::col_skip(), Price = readr::col_double() ) spot_price_uk_2018 <- readr::read_csv(file_name, col_types = col_spec, locale = readr::locale(tz = "Europe/London")) spot_price_uk_2018 <- dplyr::rename(spot_price_uk_2018, timestamp = `Date and Time`, price = Price) usethis::use_data(spot_price_uk_2018, overwrite = TRUE)
/data-raw/spot_price_uk_102018-092019.R
no_license
NickPTaylor/rbattery
R
false
false
635
r
## code to prepare `DATASET` dataset goes here file_name <- file.path('data-raw', 'spot_price_uk_102018-092019.csv') col_spec <- readr::cols( `Date and Time` = readr::col_datetime(format = "%Y/%m/%d %H:%M"), Period = readr::col_skip(), Price = readr::col_double() ) spot_price_uk_2018 <- readr::read_csv(file_name, col_types = col_spec, locale = readr::locale(tz = "Europe/London")) spot_price_uk_2018 <- dplyr::rename(spot_price_uk_2018, timestamp = `Date and Time`, price = Price) usethis::use_data(spot_price_uk_2018, overwrite = TRUE)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_access.R \name{github_api} \alias{github_api} \title{github_api} \usage{ github_api(path, token = github_pat(), user_agent = httr::user_agent("https://github.com/adam-gruer/ozunConfIssues")) } \arguments{ \item{path}{the path to a github API v3 endpoint , excluding the root https://api.github.com} \item{token}{a github personal access token} } \value{ a list of class github_api containing 3 elements: content - parsed json data, path - the url of the api endpoint the data was requested from, response - the complete response from the request } \description{ github_api } \examples{ github_api("/repos/ropensci/ozunconf17/issues") }
/man/github_api.Rd
permissive
adam-gruer/ozunConfIssues
R
false
true
746
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_access.R \name{github_api} \alias{github_api} \title{github_api} \usage{ github_api(path, token = github_pat(), user_agent = httr::user_agent("https://github.com/adam-gruer/ozunConfIssues")) } \arguments{ \item{path}{the path to a github API v3 endpoint , excluding the root https://api.github.com} \item{token}{a github personal access token} } \value{ a list of class github_api containing 3 elements: content - parsed json data, path - the url of the api endpoint the data was requested from, response - the complete response from the request } \description{ github_api } \examples{ github_api("/repos/ropensci/ozunconf17/issues") }
create.LRT.formula <- function(rdata, edata, bet){ rform <- paste(rdata$vd, '~ offset(I(', bet, '* pred.expo))') if(length(rdata$vx) > 0){ rform <- paste(rform, '+', paste(rdata$vx, collapse = '+')) } if(length(rdata$vy) > 0){ rform <- paste(rform, '+', paste(rdata$vy, collapse = '+')) } rform <- as.formula(rform) eform <- paste(edata$vz, '~', edata$vd, '+', paste(edata$vg, collapse = '+')) if(length(edata$vx) > 0){ eform <- paste(eform, '+', paste(edata$vx, collapse = '+')) } if(length(edata$vh) > 0){ eform <- paste(eform, '+', paste(edata$vh, collapse = '+')) } eform <- as.formula(eform) list(rform = rform, eform = eform) }
/R/create.LRT.formula.R
permissive
yadevi/MRCC
R
false
false
682
r
create.LRT.formula <- function(rdata, edata, bet){ rform <- paste(rdata$vd, '~ offset(I(', bet, '* pred.expo))') if(length(rdata$vx) > 0){ rform <- paste(rform, '+', paste(rdata$vx, collapse = '+')) } if(length(rdata$vy) > 0){ rform <- paste(rform, '+', paste(rdata$vy, collapse = '+')) } rform <- as.formula(rform) eform <- paste(edata$vz, '~', edata$vd, '+', paste(edata$vg, collapse = '+')) if(length(edata$vx) > 0){ eform <- paste(eform, '+', paste(edata$vx, collapse = '+')) } if(length(edata$vh) > 0){ eform <- paste(eform, '+', paste(edata$vh, collapse = '+')) } eform <- as.formula(eform) list(rform = rform, eform = eform) }
sel_m <- "m5" sel_i <- 7 # get estimated factor values Fs <- read.csv(paste("../objects/stage3/cfa/", sel_m, "_pred.csv", sep=""), row.names=1) # get cluster means (from GMMs) c_means <- read.csv(paste("../objects/stage3/lpa/py_gmm_final/full/", sel_m, "_", sel_i, "_c_means.csv", sep="")) names(c_means) <- names(Fs) c_means_A <- read.csv(paste("../objects/stage3/lpa/py_gmm_final/A/", sel_m, "_", sel_i, "_c_means.csv", sep="")) names(c_means_A) <- names(Fs) c_means_B <- read.csv(paste("../objects/stage3/lpa/py_gmm_final/B/", sel_m, "_", sel_i, "_c_means.csv", sep="")) names(c_means_B) <- names(Fs) # get some fit indices c_info <- read.csv(paste("../objects/stage3/lpa/py_gmm_final/full/", sel_m, "_", sel_i, "_c_info.csv", sep="")) # only retain clusters that exceed certain thresholds enrich_ratio <- 1.25 c_info$enrich >= enrich_ratio results <- cbind(c_info$pvals <= 0.01, c_info$enrich >= enrich_ratio) results <- cbind(results, (results[,1] == T) & (results[,2] == T)) c_ok <- which(results[,3] == T) c_del <- which(results[,3] != T) # get participants' classifications (from GMMs) part_class <- read.csv(paste("../objects/stage3/lpa/py_gmm_final/full/", sel_m, "_", sel_i, "_part_class.csv", sep=""), header=F) + 1 names(part_class) <- "cluster" table(part_class) #plot(as.numeric(table(part_class)), c_info$weights, las=1) # get classification probabilities part_p <- read.csv(paste("../objects/stage3/lpa/py_gmm_final/full/", sel_m, "_", sel_i, "_part_p.csv", sep=""), header=T) part_unc <- part_p[cbind(1:nrow(part_p),as.numeric(unlist(part_class)))] # plot enrichment analysis pdf(file=paste("../output/stage3/lpa/", sel_m, "_enrichment.pdf", sep=""), height=5) p_enrich <- c_info$enrich p_pvals <- c_info$pvals p_pvals[which(c_info$pvals == 0)] <- 0.000000001 p_pvals <- log10(p_pvals) plot(p_enrich, p_pvals, las=1, pch=16, col="cyan", cex=2.5, xlab="Enrichment", ylab="log10(p-value)", xlim=c(0, 12), ylim=c(-10, 0)) rect(enrich_ratio, -20, 20, log10(0.01), density=5, border=0, col="lightgrey") text(p_enrich, p_pvals, paste("C", 1:nrow(results), sep=""), cex=.6) abline(v=1, lty=3, col="darkgrey") abline(v=enrich_ratio, lty=3, col="darkgrey") abline(h=log10(0.01), lty=3, col="darkgrey") text(x=1, y=-5, "Enrichement = 1", srt=90, cex=.8, font=2) text(x=enrich_ratio, y=-5, paste("Enrichement =", enrich_ratio), srt=90, cex=.8, font=2) text(x=6, y=log10(0.01), "p-value = 0.01", cex=.8, font=2) dev.off() # prepare data for plotting the identified profiles pdat <- t(c_means[c_ok,]) colnames(pdat) <- paste("C", c_ok, sep="") pdat <- pdat[,order(colSums(pdat))] pdat_A <- t(c_means_A[c(2,5,4,7),]) colnames(pdat_A) <- paste("C", c_ok, sep="") pdat_A <- pdat_A[,order(colSums(pdat_A))] pdat_B <- t(c_means_B[c(2,1,7,6),]) colnames(pdat_B) <- paste("C", c_ok, sep="") pdat_B <- pdat_B[,order(colSums(pdat_B))] pdat_pos <- pdat pdat_pos[which(pdat_pos < 0)] <- 0 pdat_neg <- pdat pdat_neg[which(pdat_neg > 0)] <- 0 prof_lab <- paste("Profile ", c("I","II","III","IV"), " (", colnames(pdat), ")", sep="") fullsample <- read.csv("../data/fullsample.csv", row.names=1) fullsample$class <- as.factor(part_class[,1]) levels(fullsample$class)[is.element(levels(fullsample$class), c_del)] <- NA print(table(fullsample$class)) # add original dospert domain means (for separate plot) domains <- colnames(fullsample)[grepl("R_1", colnames(fullsample))] domains <- substr(domains, 1, 4) fullsample <- cbind(fullsample, t(apply(fullsample, 1, function(x) { sapply(domains, function(d) { as.numeric(mean(as.numeric(x[grepl(d, names(x))]))) }) })) ) dospert_cmeans <- NULL for (d in domains) { tmp <- rbind(tapply(fullsample[,d], list(fullsample$class), mean)) rownames(tmp) <- d dospert_cmeans <- rbind(dospert_cmeans, tmp) } dospert_cmeans <- dospert_cmeans[,c("1", "3", "5", "2")] dospert_cmeans <- dospert_cmeans[c("socR", "recR", "finR", "ethR", "heaR"),] # Choose whether to rescale Fs to DOSPERT scale using raw domain means rescale <- F if (rescale == T) { pdat["M5r",] <- pdat["M5r",] + mean(fullsample$heaR) - mean(Fs$M5r) pdat["M5soc",] <- pdat["M5soc",] + mean(fullsample$socR) - mean(Fs$M5soc) pdat["M5rec",] <- pdat["M5rec",] + mean(fullsample$recR) - mean(Fs$M5rec) pdat["M5gam",] <- pdat["M5gam",] + mean(fullsample$finR) - mean(Fs$M5gam) pdat["M5inv",] <- pdat["M5inv",] + mean(fullsample$finR) - mean(Fs$M5inv) pdat["M5eth",] <- pdat["M5eth",] + mean(fullsample$ethR) - mean(Fs$M5eth) pdat["M5hea",] <- pdat["M5hea",] + mean(fullsample$heaR) - mean(Fs$M5hea) } # get some aggregate indicator variables (for different profiles) mAge <- rbind(round(tapply(fullsample$Age, fullsample$class, mean), 1)) mAge[] <- paste(mAge, "y", sep="") colnames(mAge) <- paste("C", colnames(mAge), sep="") pFem <- tapply(fullsample$Gender, fullsample$class, table) pFem <- data.frame(matrix(unlist(pFem), nrow=length(pFem), byrow=T)) pFem <- paste(round(apply(pFem, 1, prop.table)[1,] * 100, 0), "%", sep="") names(pFem) <- colnames(mAge) class_ok <- which(!is.na(fullsample$class)) class_nok <- which(is.na(fullsample$class)) fullsample <- subset(fullsample, !is.na(class)) print(prop.table(c(length(class_ok), length(class_nok)))) # get MEANs and SDs of factor scores for separate clusters (and compare with model-inferred means) emp_means <- t(aggregate(Fs[class_ok,], by=list(fullsample$class), mean)) emp_sds <- t(aggregate(Fs[class_ok,], by=list(fullsample$class), sd)) colnames(emp_means) <- paste("C", emp_means[1,], sep="") colnames(emp_sds) <- paste("C", emp_sds[1,], sep="") emp_means <- emp_means[-1,] emp_sds <- emp_sds[-1,] emp_means <- emp_means[,colnames(pdat)] emp_sds <- emp_sds[,colnames(pdat)] emp_means <- apply(emp_means, c(1,2), as.numeric) emp_sds <- apply(emp_sds, c(1,2), as.numeric) emp_sem <- emp_sds / sqrt(nrow(fullsample)) pdat emp_means # set largest class as reference fullsample$class <- relevel(fullsample$class, ref = order(table(fullsample$class), decreasing=T)[1]) # Predictors fullsample$Age <- scale(fullsample$Age) fullsample$Gender <- as.factor(fullsample$Gender) fullsample$Partner <- NA fullsample$Partner[is.element(fullsample$Marital_Status, c(2,4))] <- "Yes" fullsample$Partner[is.element(fullsample$Marital_Status, c(1,3,5,6))] <- "No" fullsample$Partner <- as.factor(fullsample$Partner) fullsample$Political <- relevel(fullsample$Political, ref = "Ind.") # run multinomial regression with predictors and DV cluster membership library(nnet) mymod <- multinom(class ~ Age + Gender + Partner + Children + Education + Income + Political, data=fullsample) cs <- t(round(coef(mymod), 2)) cs <- round(exp(cs), 2) z <- summary(mymod)$coefficients/summary(mymod)$standard.errors ps <- (1 - pnorm(abs(z), 0, 1)) * 2 ps <- t(ps) round(ps, 2) sign_ind <- which(ps < .05) cs[sign_ind] <- paste(cs[sign_ind], "*", sep="") cs <- cs[-which(rownames(cs) == "(Intercept)"),] colnames(cs) <- paste("C", colnames(cs), sep="") missing <- colnames(pdat)[which(!is.element(colnames(pdat), colnames(cs)))] cs <- cbind(cs, 1) colnames(cs)[ncol(cs)] <- missing cs <- cs[,match(colnames(pdat), colnames(cs))] # plot cluster profiles pdf(file=paste("../output/stage3/lpa/", sel_m, ".pdf", sep=""), width=10, height=4) layout(matrix(c(1,2), ncol=1)) par(mar=c(1,4,2,0)) ydim <- ceiling(max(colSums(abs(t(c_means))))) #cols = c("red", colorRampPalette(c("aquamarine1", "blue4"))(m$d-1)) library(viridis) p_cols <- c("red", plasma(ncol(c_means)-1, begin=.10, end=.95)) #p_dens <- rep(c(NA, 35), 99)[1:length(p_cols)] #p_angl <- rep(c(0, 90, 0, 45), 99)[1:length(p_cols)] p_dens <- c(NA, 35, NA, NA, 35, NA) p_angl <- c(0, 90, 0, 0, 45, 0) # b <- barplot(pdat_pos, xlim=c(0, ncol(pdat)+1.5), ylim=c(-ydim,ydim), border="white", las=1, col=p_cols, density=p_dens, angle=p_angl, ylab="Mean factor scores")#, names=paste("C", 1:ncol(pdat), sep="")) # barplot(pdat_neg, add=T, border="white", col=p_cols, density=p_dens, angle=p_angl, yaxt="n", names=rep("", ncol(pdat))) if (rescale == T) ylim <- c(1,7) else ylim=c(-1.5,1.5) plot(1, xlim=c(.5, ncol(pdat)+.5), ylim=ylim, type="n", las=1, xaxt="n", xlab=NA, ylab="Factor score", frame=F) abline(h=4, lty=3) #axis(1, at=1:ncol(pdat), colnames(pdat)) #text(x=1:ncol(pdat), y=-1.75, paste("Profile ", c("A","B","C","D"), " (", colnames(pdat), ")", sep=""), xpd=T) text(x=1:ncol(pdat), y=-1.75, prof_lab, xpd=T) abline(h=0, lty=3) for (i in 1:ncol(pdat)) { shift <- seq(i-.25, i+.25, length.out=c(nrow(pdat))) rect(i-.4, ylim[1], i+.4, ylim[2], col=gray(.95), border=0) vals <- pdat[,i] for (j in 1:length(vals)) { lines(x=rep(shift[j], 2), y=c(0, vals[j]), col=p_cols[j], lwd=14, lend=1) # add estimated cluster means for both subsamples lines(x=rep(shift[j]-0.01, 2), y=c(0, pdat_A[j,i]), col="lightgrey", lwd=2, lend=1) lines(x=rep(shift[j]+0.01, 2), y=c(0, pdat_B[j,i]), col="lightgrey", lwd=2, lend=1) # add empirical means lines(x=c(shift[j]-.025, shift[j]+.025), y=rep(emp_means[j,i], 2), col="black", lwd=1, lend=1) # add +/- 2 SEM lines(x=rep(shift[j], 2), y=c((emp_means - 2*emp_sem)[j,i], (emp_means + 2*emp_sem)[j,i]), col="black", lwd=3, lend=1) } } lab <- toupper(gsub(toupper(sel_m), "", row.names(t(c_means)))) lab2 <- lab lab2 <- gsub("\\<R\\>", "General", lab2) lab2 <- gsub("\\<SOC\\>", "Social", lab2) lab2 <- gsub("\\<REC\\>", "Recreational", lab2) lab2 <- gsub("\\<GAM\\>", "Gambling", lab2) lab2 <- gsub("\\<INV\\>", "Investment", lab2) lab2 <- gsub("\\<ETH\\>", "Ethical", lab2) lab2 <- gsub("\\<HEA\\>", "Health", lab2) l <- legend("top", lab2, box.lty=0, horiz=T, y.intersp=-3, xpd=T, bg=NA) for (j in 1:length(p_cols)) { x <- l$text$x[j] - 0.1 y <- l$text$y[j] s1 <- 0.035 s2 <- 0.14 rect(x-s1+.05, y-s2, x+s1+.04, y+s2, col=p_cols[j], xpd=T, border=0) } abline(h=0) par(mar=c(0,4,1,0)) plot(pdat, type="n", xlim=c(0.5, ncol(pdat)+.5), ylim=c(0,nrow(cs)+3), xaxt="n", yaxt="n", xlab="", ylab="", frame=F) Ns <- table(fullsample$class) names(Ns) <- paste("C", names(Ns), sep="") Ns <- Ns[colnames(cs)] #Ns <- paste("N=", Ns, sep="") mAge <- mAge[,colnames(cs)] pFem <- pFem[colnames(cs)] cs <- rbind(Ns, pFem, mAge, rep("", ncol(cs)), cs) rownames(cs)[which(rownames(cs) == "Ns")] <- "N" rownames(cs)[which(rownames(cs) == "pFem")] <- "Female" rownames(cs)[which(rownames(cs) == "mAge")] <- "Mean Age" rownames(cs)[which(rownames(cs) == "Age")] <- "Age (SD)" rownames(cs)[which(rownames(cs) == "Gender2")] <- "Gender (male)" rownames(cs)[which(rownames(cs) == "PartnerYes")] <- "With partner" rownames(cs)[which(rownames(cs) == "PoliticalDem.")] <- "Democrat" rownames(cs)[which(rownames(cs) == "PoliticalRep.")] <- "Republican" text(cs, x=matrix(1:ncol(pdat), nrow=nrow(cs), ncol=ncol(pdat), byrow=T), y=matrix(nrow(cs):1, nrow=nrow(cs), ncol=ncol(cs), byrow=F), cex=.8, xpd=T) #abline(v=b) text(rownames(cs), x=0, y=nrow(cs):1, xpd=T, pos=4, cex=.8) dev.off() fullsample <- cbind(fullsample, Fs[row.names(fullsample),]) fullsample <- cbind(fullsample, unc=part_unc[as.numeric(row.names(fullsample))]) levels(fullsample$class) <- 1:length(levels(fullsample$class)) library(scales) cols <- as.numeric(fullsample$class) + 1 cols <- alpha(cols, fullsample$unc) # plot classification pdf(file=paste("../output/stage3/lpa/", sel_m, "_classification.pdf", sep=""), width=10, height=10) p_dat <- fullsample[,grepl("M5", colnames(fullsample))] names(p_dat) <- toupper(gsub(toupper(sel_m), "", names(p_dat))) plot(p_dat, col=cols, pch=18, cex=.5) dev.off() # plot classification probabilities pdf(file=paste("../output/stage3/lpa/", sel_m, "_classprobs.pdf", sep=""), width=10, height=6) par(mfrow=c(2,2), mar=c(4,4,2,1), mgp=c(2.25,.75,0)) p_cols2 <- inferno(n = length(unique(unlist(part_class))), begin=.2) p_ok <- part_p p_nok <- part_p for (c in c_ok[c(1,3,4,2)]) { p_cols2 <- rep("cyan", length(p_cols2)) p_cols2[c] <- "green4" ind <- which(part_class != c) p_ok[ind, c] <- NA p_nok[-ind, c] <- NA p_dat <- data.frame(p = part_p[,c], c = part_class) #p_dat <- p_dat[order(p_dat$p, decreasing=T),] p_dat <- p_dat[order(p_dat$c),] p_dat$col <- p_dat$col <- p_cols2[p_dat$c] p_dat$ind <- 1:nrow(p_dat) #barplot(p_dat$p, ylim=c(0, 1), col=p_dat$c, las=1, border=0) current_title <- prof_lab[grep(c, prof_lab)] current_title <- gsub("P", "Probability of being classified to p", current_title) plot(1, type="n", xlim=c(1,3200), ylim=c(-0.1,1), las=1, xlab="Participants", ylab="", main=current_title, frame=0, xaxt="n") for (x in 1:nrow(p_dat)) { lines(x=c(x,x), y=c(0,p_dat$p[x]), col=p_dat$col[x], lwd=.0001) } xs <- seq(1, 3200, length.out=5) axis(1, at=xs, floor(xs)) text("Classification probability", srt=90, x=-550, y=.5, xpd=T) ranges <- tapply(p_dat$ind, list(p_dat$cluster), range) for (j in 1:length(ranges)) { range <- ranges[[j]] if (j == c) col2 = "green4" else col2 = "darkgrey" lines(x=c(range[1], range[1]), y=c(-0.1,-0.02), xpd=T, col=col2) lines(x=c(range[2], range[2]), y=c(-0.1,-0.02), xpd=T, col=col2) lines(x=c(range[1], range[2]), y=c(-0.1,-0.1), xpd=T, col=col2) #rect(range[1], -.1, range[2], 1.02, col=NA, border="grey", xpd=T) text(paste("C", names(ranges)[j], sep=""), x=mean(range), y=-.05, cex=.6, col=col2) } } dev.off() # print p's with which participants were classified to "their" cluster print(round(apply(p_ok, 2, mean, na.rm=T), 2)) # print p with which participants were classified to any other cluster print(round(mean(unlist(p_nok), na.rm=T), 2)) pdat <- cbind(pdat, data.frame("EX" = c(.5, .25, 0.05, -.9, -1, .5, 1.1))) # plot profiles seperately for (i in 1:ncol(pdat)) { c <- colnames(pdat)[i] png(file=paste("../output/stage3/lpa/", sel_m, "_", c, ".png", sep=""), width=3000, height=900, res=300) par(mar=c(1,4,1,1)) plot(1, xlim=c(.75, 1.25), ylim=ylim, type="n", las=1, xaxt="n", xlab=NA, ylab="", yaxt="n", frame=F) #axis(2, labels=F) rect(.75, ylim[1], 1.25, ylim[2], col=gray(.95), border=0) shift <- seq(1-.2, 1+.2, length.out=c(nrow(pdat))) arrows(x0=.73, y0=0.3, y1=1.4, xpd=T) arrows(x0=.73, y0=-0.3, y1=-1.4, xpd=T) text(0.7, .8, srt=90, "more\nrisk-seeking", xpd=T, cex=.9) text(0.7, -.8, srt=90, "more\nrisk-averse", xpd=T, cex=.9) vals <- pdat[,i] for (j in 1:length(vals)) { lines(x=rep(shift[j], 2), y=c(0, vals[j]), col=p_cols[j], lwd=60, lend=1) text(shift[j], 1.5, xpd=T, lab2[j], cex=1, pos=3) } #lines(c(.69, 1.25), c(0, 0), xpd=T) lines(c(.75, 1.25), c(0, 0), xpd=T) text(.73, 0, "Average", xpd=T) dev.off() } # plot "original" DOSPERT raw scores (for comparisons) pdf(file=paste("../output/stage3/lpa/", sel_m, "_DOSPERT_raw.pdf", sep=""), width=10, height=4) #p_cols3 <- p_cols #p_cols3 <- p_cols3[-1] #p_cols3 <- p_cols3[-3] p_cols3 <- cividis(n=nrow(dospert_cmeans)) b <- barplot(dospert_cmeans, beside=T, ylim=c(0,6), yaxt="n", col=p_cols3, border="white", ylab="Average DOSPERT-Score", names=prof_lab) axis(2, at=0:6, 1:7, las=1) abline(h=3, lty=3) legend(x=12.5, xjust=0.5, y=7.5, xpd=T, horiz=T, c("Social", "Recreational", "Financial", "Ethical", "Health"), box.lwd=0, pch=15, col=p_cols3) dev.off()
/stage3_lpa/py_analyze_final.R
no_license
renatofrey/dospert_osf
R
false
false
15,281
r
sel_m <- "m5" sel_i <- 7 # get estimated factor values Fs <- read.csv(paste("../objects/stage3/cfa/", sel_m, "_pred.csv", sep=""), row.names=1) # get cluster means (from GMMs) c_means <- read.csv(paste("../objects/stage3/lpa/py_gmm_final/full/", sel_m, "_", sel_i, "_c_means.csv", sep="")) names(c_means) <- names(Fs) c_means_A <- read.csv(paste("../objects/stage3/lpa/py_gmm_final/A/", sel_m, "_", sel_i, "_c_means.csv", sep="")) names(c_means_A) <- names(Fs) c_means_B <- read.csv(paste("../objects/stage3/lpa/py_gmm_final/B/", sel_m, "_", sel_i, "_c_means.csv", sep="")) names(c_means_B) <- names(Fs) # get some fit indices c_info <- read.csv(paste("../objects/stage3/lpa/py_gmm_final/full/", sel_m, "_", sel_i, "_c_info.csv", sep="")) # only retain clusters that exceed certain thresholds enrich_ratio <- 1.25 c_info$enrich >= enrich_ratio results <- cbind(c_info$pvals <= 0.01, c_info$enrich >= enrich_ratio) results <- cbind(results, (results[,1] == T) & (results[,2] == T)) c_ok <- which(results[,3] == T) c_del <- which(results[,3] != T) # get participants' classifications (from GMMs) part_class <- read.csv(paste("../objects/stage3/lpa/py_gmm_final/full/", sel_m, "_", sel_i, "_part_class.csv", sep=""), header=F) + 1 names(part_class) <- "cluster" table(part_class) #plot(as.numeric(table(part_class)), c_info$weights, las=1) # get classification probabilities part_p <- read.csv(paste("../objects/stage3/lpa/py_gmm_final/full/", sel_m, "_", sel_i, "_part_p.csv", sep=""), header=T) part_unc <- part_p[cbind(1:nrow(part_p),as.numeric(unlist(part_class)))] # plot enrichment analysis pdf(file=paste("../output/stage3/lpa/", sel_m, "_enrichment.pdf", sep=""), height=5) p_enrich <- c_info$enrich p_pvals <- c_info$pvals p_pvals[which(c_info$pvals == 0)] <- 0.000000001 p_pvals <- log10(p_pvals) plot(p_enrich, p_pvals, las=1, pch=16, col="cyan", cex=2.5, xlab="Enrichment", ylab="log10(p-value)", xlim=c(0, 12), ylim=c(-10, 0)) rect(enrich_ratio, -20, 20, log10(0.01), density=5, border=0, col="lightgrey") text(p_enrich, p_pvals, paste("C", 1:nrow(results), sep=""), cex=.6) abline(v=1, lty=3, col="darkgrey") abline(v=enrich_ratio, lty=3, col="darkgrey") abline(h=log10(0.01), lty=3, col="darkgrey") text(x=1, y=-5, "Enrichement = 1", srt=90, cex=.8, font=2) text(x=enrich_ratio, y=-5, paste("Enrichement =", enrich_ratio), srt=90, cex=.8, font=2) text(x=6, y=log10(0.01), "p-value = 0.01", cex=.8, font=2) dev.off() # prepare data for plotting the identified profiles pdat <- t(c_means[c_ok,]) colnames(pdat) <- paste("C", c_ok, sep="") pdat <- pdat[,order(colSums(pdat))] pdat_A <- t(c_means_A[c(2,5,4,7),]) colnames(pdat_A) <- paste("C", c_ok, sep="") pdat_A <- pdat_A[,order(colSums(pdat_A))] pdat_B <- t(c_means_B[c(2,1,7,6),]) colnames(pdat_B) <- paste("C", c_ok, sep="") pdat_B <- pdat_B[,order(colSums(pdat_B))] pdat_pos <- pdat pdat_pos[which(pdat_pos < 0)] <- 0 pdat_neg <- pdat pdat_neg[which(pdat_neg > 0)] <- 0 prof_lab <- paste("Profile ", c("I","II","III","IV"), " (", colnames(pdat), ")", sep="") fullsample <- read.csv("../data/fullsample.csv", row.names=1) fullsample$class <- as.factor(part_class[,1]) levels(fullsample$class)[is.element(levels(fullsample$class), c_del)] <- NA print(table(fullsample$class)) # add original dospert domain means (for separate plot) domains <- colnames(fullsample)[grepl("R_1", colnames(fullsample))] domains <- substr(domains, 1, 4) fullsample <- cbind(fullsample, t(apply(fullsample, 1, function(x) { sapply(domains, function(d) { as.numeric(mean(as.numeric(x[grepl(d, names(x))]))) }) })) ) dospert_cmeans <- NULL for (d in domains) { tmp <- rbind(tapply(fullsample[,d], list(fullsample$class), mean)) rownames(tmp) <- d dospert_cmeans <- rbind(dospert_cmeans, tmp) } dospert_cmeans <- dospert_cmeans[,c("1", "3", "5", "2")] dospert_cmeans <- dospert_cmeans[c("socR", "recR", "finR", "ethR", "heaR"),] # Choose whether to rescale Fs to DOSPERT scale using raw domain means rescale <- F if (rescale == T) { pdat["M5r",] <- pdat["M5r",] + mean(fullsample$heaR) - mean(Fs$M5r) pdat["M5soc",] <- pdat["M5soc",] + mean(fullsample$socR) - mean(Fs$M5soc) pdat["M5rec",] <- pdat["M5rec",] + mean(fullsample$recR) - mean(Fs$M5rec) pdat["M5gam",] <- pdat["M5gam",] + mean(fullsample$finR) - mean(Fs$M5gam) pdat["M5inv",] <- pdat["M5inv",] + mean(fullsample$finR) - mean(Fs$M5inv) pdat["M5eth",] <- pdat["M5eth",] + mean(fullsample$ethR) - mean(Fs$M5eth) pdat["M5hea",] <- pdat["M5hea",] + mean(fullsample$heaR) - mean(Fs$M5hea) } # get some aggregate indicator variables (for different profiles) mAge <- rbind(round(tapply(fullsample$Age, fullsample$class, mean), 1)) mAge[] <- paste(mAge, "y", sep="") colnames(mAge) <- paste("C", colnames(mAge), sep="") pFem <- tapply(fullsample$Gender, fullsample$class, table) pFem <- data.frame(matrix(unlist(pFem), nrow=length(pFem), byrow=T)) pFem <- paste(round(apply(pFem, 1, prop.table)[1,] * 100, 0), "%", sep="") names(pFem) <- colnames(mAge) class_ok <- which(!is.na(fullsample$class)) class_nok <- which(is.na(fullsample$class)) fullsample <- subset(fullsample, !is.na(class)) print(prop.table(c(length(class_ok), length(class_nok)))) # get MEANs and SDs of factor scores for separate clusters (and compare with model-inferred means) emp_means <- t(aggregate(Fs[class_ok,], by=list(fullsample$class), mean)) emp_sds <- t(aggregate(Fs[class_ok,], by=list(fullsample$class), sd)) colnames(emp_means) <- paste("C", emp_means[1,], sep="") colnames(emp_sds) <- paste("C", emp_sds[1,], sep="") emp_means <- emp_means[-1,] emp_sds <- emp_sds[-1,] emp_means <- emp_means[,colnames(pdat)] emp_sds <- emp_sds[,colnames(pdat)] emp_means <- apply(emp_means, c(1,2), as.numeric) emp_sds <- apply(emp_sds, c(1,2), as.numeric) emp_sem <- emp_sds / sqrt(nrow(fullsample)) pdat emp_means # set largest class as reference fullsample$class <- relevel(fullsample$class, ref = order(table(fullsample$class), decreasing=T)[1]) # Predictors fullsample$Age <- scale(fullsample$Age) fullsample$Gender <- as.factor(fullsample$Gender) fullsample$Partner <- NA fullsample$Partner[is.element(fullsample$Marital_Status, c(2,4))] <- "Yes" fullsample$Partner[is.element(fullsample$Marital_Status, c(1,3,5,6))] <- "No" fullsample$Partner <- as.factor(fullsample$Partner) fullsample$Political <- relevel(fullsample$Political, ref = "Ind.") # run multinomial regression with predictors and DV cluster membership library(nnet) mymod <- multinom(class ~ Age + Gender + Partner + Children + Education + Income + Political, data=fullsample) cs <- t(round(coef(mymod), 2)) cs <- round(exp(cs), 2) z <- summary(mymod)$coefficients/summary(mymod)$standard.errors ps <- (1 - pnorm(abs(z), 0, 1)) * 2 ps <- t(ps) round(ps, 2) sign_ind <- which(ps < .05) cs[sign_ind] <- paste(cs[sign_ind], "*", sep="") cs <- cs[-which(rownames(cs) == "(Intercept)"),] colnames(cs) <- paste("C", colnames(cs), sep="") missing <- colnames(pdat)[which(!is.element(colnames(pdat), colnames(cs)))] cs <- cbind(cs, 1) colnames(cs)[ncol(cs)] <- missing cs <- cs[,match(colnames(pdat), colnames(cs))] # plot cluster profiles pdf(file=paste("../output/stage3/lpa/", sel_m, ".pdf", sep=""), width=10, height=4) layout(matrix(c(1,2), ncol=1)) par(mar=c(1,4,2,0)) ydim <- ceiling(max(colSums(abs(t(c_means))))) #cols = c("red", colorRampPalette(c("aquamarine1", "blue4"))(m$d-1)) library(viridis) p_cols <- c("red", plasma(ncol(c_means)-1, begin=.10, end=.95)) #p_dens <- rep(c(NA, 35), 99)[1:length(p_cols)] #p_angl <- rep(c(0, 90, 0, 45), 99)[1:length(p_cols)] p_dens <- c(NA, 35, NA, NA, 35, NA) p_angl <- c(0, 90, 0, 0, 45, 0) # b <- barplot(pdat_pos, xlim=c(0, ncol(pdat)+1.5), ylim=c(-ydim,ydim), border="white", las=1, col=p_cols, density=p_dens, angle=p_angl, ylab="Mean factor scores")#, names=paste("C", 1:ncol(pdat), sep="")) # barplot(pdat_neg, add=T, border="white", col=p_cols, density=p_dens, angle=p_angl, yaxt="n", names=rep("", ncol(pdat))) if (rescale == T) ylim <- c(1,7) else ylim=c(-1.5,1.5) plot(1, xlim=c(.5, ncol(pdat)+.5), ylim=ylim, type="n", las=1, xaxt="n", xlab=NA, ylab="Factor score", frame=F) abline(h=4, lty=3) #axis(1, at=1:ncol(pdat), colnames(pdat)) #text(x=1:ncol(pdat), y=-1.75, paste("Profile ", c("A","B","C","D"), " (", colnames(pdat), ")", sep=""), xpd=T) text(x=1:ncol(pdat), y=-1.75, prof_lab, xpd=T) abline(h=0, lty=3) for (i in 1:ncol(pdat)) { shift <- seq(i-.25, i+.25, length.out=c(nrow(pdat))) rect(i-.4, ylim[1], i+.4, ylim[2], col=gray(.95), border=0) vals <- pdat[,i] for (j in 1:length(vals)) { lines(x=rep(shift[j], 2), y=c(0, vals[j]), col=p_cols[j], lwd=14, lend=1) # add estimated cluster means for both subsamples lines(x=rep(shift[j]-0.01, 2), y=c(0, pdat_A[j,i]), col="lightgrey", lwd=2, lend=1) lines(x=rep(shift[j]+0.01, 2), y=c(0, pdat_B[j,i]), col="lightgrey", lwd=2, lend=1) # add empirical means lines(x=c(shift[j]-.025, shift[j]+.025), y=rep(emp_means[j,i], 2), col="black", lwd=1, lend=1) # add +/- 2 SEM lines(x=rep(shift[j], 2), y=c((emp_means - 2*emp_sem)[j,i], (emp_means + 2*emp_sem)[j,i]), col="black", lwd=3, lend=1) } } lab <- toupper(gsub(toupper(sel_m), "", row.names(t(c_means)))) lab2 <- lab lab2 <- gsub("\\<R\\>", "General", lab2) lab2 <- gsub("\\<SOC\\>", "Social", lab2) lab2 <- gsub("\\<REC\\>", "Recreational", lab2) lab2 <- gsub("\\<GAM\\>", "Gambling", lab2) lab2 <- gsub("\\<INV\\>", "Investment", lab2) lab2 <- gsub("\\<ETH\\>", "Ethical", lab2) lab2 <- gsub("\\<HEA\\>", "Health", lab2) l <- legend("top", lab2, box.lty=0, horiz=T, y.intersp=-3, xpd=T, bg=NA) for (j in 1:length(p_cols)) { x <- l$text$x[j] - 0.1 y <- l$text$y[j] s1 <- 0.035 s2 <- 0.14 rect(x-s1+.05, y-s2, x+s1+.04, y+s2, col=p_cols[j], xpd=T, border=0) } abline(h=0) par(mar=c(0,4,1,0)) plot(pdat, type="n", xlim=c(0.5, ncol(pdat)+.5), ylim=c(0,nrow(cs)+3), xaxt="n", yaxt="n", xlab="", ylab="", frame=F) Ns <- table(fullsample$class) names(Ns) <- paste("C", names(Ns), sep="") Ns <- Ns[colnames(cs)] #Ns <- paste("N=", Ns, sep="") mAge <- mAge[,colnames(cs)] pFem <- pFem[colnames(cs)] cs <- rbind(Ns, pFem, mAge, rep("", ncol(cs)), cs) rownames(cs)[which(rownames(cs) == "Ns")] <- "N" rownames(cs)[which(rownames(cs) == "pFem")] <- "Female" rownames(cs)[which(rownames(cs) == "mAge")] <- "Mean Age" rownames(cs)[which(rownames(cs) == "Age")] <- "Age (SD)" rownames(cs)[which(rownames(cs) == "Gender2")] <- "Gender (male)" rownames(cs)[which(rownames(cs) == "PartnerYes")] <- "With partner" rownames(cs)[which(rownames(cs) == "PoliticalDem.")] <- "Democrat" rownames(cs)[which(rownames(cs) == "PoliticalRep.")] <- "Republican" text(cs, x=matrix(1:ncol(pdat), nrow=nrow(cs), ncol=ncol(pdat), byrow=T), y=matrix(nrow(cs):1, nrow=nrow(cs), ncol=ncol(cs), byrow=F), cex=.8, xpd=T) #abline(v=b) text(rownames(cs), x=0, y=nrow(cs):1, xpd=T, pos=4, cex=.8) dev.off() fullsample <- cbind(fullsample, Fs[row.names(fullsample),]) fullsample <- cbind(fullsample, unc=part_unc[as.numeric(row.names(fullsample))]) levels(fullsample$class) <- 1:length(levels(fullsample$class)) library(scales) cols <- as.numeric(fullsample$class) + 1 cols <- alpha(cols, fullsample$unc) # plot classification pdf(file=paste("../output/stage3/lpa/", sel_m, "_classification.pdf", sep=""), width=10, height=10) p_dat <- fullsample[,grepl("M5", colnames(fullsample))] names(p_dat) <- toupper(gsub(toupper(sel_m), "", names(p_dat))) plot(p_dat, col=cols, pch=18, cex=.5) dev.off() # plot classification probabilities pdf(file=paste("../output/stage3/lpa/", sel_m, "_classprobs.pdf", sep=""), width=10, height=6) par(mfrow=c(2,2), mar=c(4,4,2,1), mgp=c(2.25,.75,0)) p_cols2 <- inferno(n = length(unique(unlist(part_class))), begin=.2) p_ok <- part_p p_nok <- part_p for (c in c_ok[c(1,3,4,2)]) { p_cols2 <- rep("cyan", length(p_cols2)) p_cols2[c] <- "green4" ind <- which(part_class != c) p_ok[ind, c] <- NA p_nok[-ind, c] <- NA p_dat <- data.frame(p = part_p[,c], c = part_class) #p_dat <- p_dat[order(p_dat$p, decreasing=T),] p_dat <- p_dat[order(p_dat$c),] p_dat$col <- p_dat$col <- p_cols2[p_dat$c] p_dat$ind <- 1:nrow(p_dat) #barplot(p_dat$p, ylim=c(0, 1), col=p_dat$c, las=1, border=0) current_title <- prof_lab[grep(c, prof_lab)] current_title <- gsub("P", "Probability of being classified to p", current_title) plot(1, type="n", xlim=c(1,3200), ylim=c(-0.1,1), las=1, xlab="Participants", ylab="", main=current_title, frame=0, xaxt="n") for (x in 1:nrow(p_dat)) { lines(x=c(x,x), y=c(0,p_dat$p[x]), col=p_dat$col[x], lwd=.0001) } xs <- seq(1, 3200, length.out=5) axis(1, at=xs, floor(xs)) text("Classification probability", srt=90, x=-550, y=.5, xpd=T) ranges <- tapply(p_dat$ind, list(p_dat$cluster), range) for (j in 1:length(ranges)) { range <- ranges[[j]] if (j == c) col2 = "green4" else col2 = "darkgrey" lines(x=c(range[1], range[1]), y=c(-0.1,-0.02), xpd=T, col=col2) lines(x=c(range[2], range[2]), y=c(-0.1,-0.02), xpd=T, col=col2) lines(x=c(range[1], range[2]), y=c(-0.1,-0.1), xpd=T, col=col2) #rect(range[1], -.1, range[2], 1.02, col=NA, border="grey", xpd=T) text(paste("C", names(ranges)[j], sep=""), x=mean(range), y=-.05, cex=.6, col=col2) } } dev.off() # print p's with which participants were classified to "their" cluster print(round(apply(p_ok, 2, mean, na.rm=T), 2)) # print p with which participants were classified to any other cluster print(round(mean(unlist(p_nok), na.rm=T), 2)) pdat <- cbind(pdat, data.frame("EX" = c(.5, .25, 0.05, -.9, -1, .5, 1.1))) # plot profiles seperately for (i in 1:ncol(pdat)) { c <- colnames(pdat)[i] png(file=paste("../output/stage3/lpa/", sel_m, "_", c, ".png", sep=""), width=3000, height=900, res=300) par(mar=c(1,4,1,1)) plot(1, xlim=c(.75, 1.25), ylim=ylim, type="n", las=1, xaxt="n", xlab=NA, ylab="", yaxt="n", frame=F) #axis(2, labels=F) rect(.75, ylim[1], 1.25, ylim[2], col=gray(.95), border=0) shift <- seq(1-.2, 1+.2, length.out=c(nrow(pdat))) arrows(x0=.73, y0=0.3, y1=1.4, xpd=T) arrows(x0=.73, y0=-0.3, y1=-1.4, xpd=T) text(0.7, .8, srt=90, "more\nrisk-seeking", xpd=T, cex=.9) text(0.7, -.8, srt=90, "more\nrisk-averse", xpd=T, cex=.9) vals <- pdat[,i] for (j in 1:length(vals)) { lines(x=rep(shift[j], 2), y=c(0, vals[j]), col=p_cols[j], lwd=60, lend=1) text(shift[j], 1.5, xpd=T, lab2[j], cex=1, pos=3) } #lines(c(.69, 1.25), c(0, 0), xpd=T) lines(c(.75, 1.25), c(0, 0), xpd=T) text(.73, 0, "Average", xpd=T) dev.off() } # plot "original" DOSPERT raw scores (for comparisons) pdf(file=paste("../output/stage3/lpa/", sel_m, "_DOSPERT_raw.pdf", sep=""), width=10, height=4) #p_cols3 <- p_cols #p_cols3 <- p_cols3[-1] #p_cols3 <- p_cols3[-3] p_cols3 <- cividis(n=nrow(dospert_cmeans)) b <- barplot(dospert_cmeans, beside=T, ylim=c(0,6), yaxt="n", col=p_cols3, border="white", ylab="Average DOSPERT-Score", names=prof_lab) axis(2, at=0:6, 1:7, las=1) abline(h=3, lty=3) legend(x=12.5, xjust=0.5, y=7.5, xpd=T, horiz=T, c("Social", "Recreational", "Financial", "Ethical", "Health"), box.lwd=0, pch=15, col=p_cols3) dev.off()
var<-read.csv("C:/Users/Dell/Desktop/7th SEM/DSR/Lab/Mult_Reg_Yield.csv") head(var) set.seed(1234) ind<-sample(2,nrow(var),replace=TRUE,prob=c(0.7,0.3)) ind length(ind) training<-var[ind==1,] testing<-var[ind==2,] model<-lm(X.Yield ~ Time +Temperature,data=training) model summary(model) plot(X.Yield ~ Time,training, main = "CS005") abline(model,col="red") modelnew<-lm(X.Yield ~ Time,data=training) modelnew summary(modelnew) plot(X.Yield ~ Time,training, main= "CS005") abline(modelnew,col="blue") newypred<-fitted(modelnew) newypred pred<-predict(modelnew,training) pred testpred<-predict(modelnew,testing) testpred predict(model,data.frame(Time=200,Temperature=240)) predict(modelnew,data.frame(Time=200,Temperature=240)) plot(modelnew,which=1, main="CS005")
/Prog7.R
no_license
Prashamshini/1BM16CS005_DSR
R
false
false
800
r
var<-read.csv("C:/Users/Dell/Desktop/7th SEM/DSR/Lab/Mult_Reg_Yield.csv") head(var) set.seed(1234) ind<-sample(2,nrow(var),replace=TRUE,prob=c(0.7,0.3)) ind length(ind) training<-var[ind==1,] testing<-var[ind==2,] model<-lm(X.Yield ~ Time +Temperature,data=training) model summary(model) plot(X.Yield ~ Time,training, main = "CS005") abline(model,col="red") modelnew<-lm(X.Yield ~ Time,data=training) modelnew summary(modelnew) plot(X.Yield ~ Time,training, main= "CS005") abline(modelnew,col="blue") newypred<-fitted(modelnew) newypred pred<-predict(modelnew,training) pred testpred<-predict(modelnew,testing) testpred predict(model,data.frame(Time=200,Temperature=240)) predict(modelnew,data.frame(Time=200,Temperature=240)) plot(modelnew,which=1, main="CS005")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/task_timecourse.R \name{getTimeCourseSettings} \alias{getTimeCourseSettings} \alias{getTC} \title{Get time course settings} \usage{ getTimeCourseSettings(model = getCurrentModel()) getTC(model = getCurrentModel()) } \arguments{ \item{model}{a model object} } \value{ A list of time course task settings including method options. } \description{ \code{getTimeCourseSettings} gets time course task settings including method options. } \details{ The \href{https://jpahle.github.io/CoRC/articles/task_management.html}{online article on managing tasks} provides some further context. } \seealso{ Other time course: \code{\link{runTimeCourse}()}, \code{\link{setTimeCourseSettings}()} } \concept{time course}
/man/getTimeCourseSettings.Rd
permissive
jpahle/CoRC
R
false
true
783
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/task_timecourse.R \name{getTimeCourseSettings} \alias{getTimeCourseSettings} \alias{getTC} \title{Get time course settings} \usage{ getTimeCourseSettings(model = getCurrentModel()) getTC(model = getCurrentModel()) } \arguments{ \item{model}{a model object} } \value{ A list of time course task settings including method options. } \description{ \code{getTimeCourseSettings} gets time course task settings including method options. } \details{ The \href{https://jpahle.github.io/CoRC/articles/task_management.html}{online article on managing tasks} provides some further context. } \seealso{ Other time course: \code{\link{runTimeCourse}()}, \code{\link{setTimeCourseSettings}()} } \concept{time course}
# --------------------MICE.IMPUTE.POLYREG----------------------------- #'Imputation by polytomous regression - ordered #' #'Imputes missing data in a categorical variable using polytomous regression #' #'By default, ordered factors with more than two levels are imputed by #'\code{mice.impute.polr}. #' #'The function \code{mice.impute.polr()} imputes for ordered categorical response #'variables by the proportional odds logistic regression (polr) model. The #'function repeatedly applies logistic regression on the successive splits. The #'model is also known as the cumulative link model. #' #'The algorithm of \code{mice.impute.polr} uses the function \code{polr()} from #'the \code{MASS} package. #' #'In order to avoid bias due to perfect prediction, the algorithm augment the #'data according to the method of White, Daniel and Royston (2010). #' #'The call to \code{polr} might fail, usually because the data are very sparse. #'In that case, \code{multinom} is tried as a fallback, and a record is written #'to the \code{loggedEvents} component of the \code{mids} object. #' #'@aliases mice.impute.polr #'@param y Incomplete data vector of length \code{n} #'@param ry Vector of missing data pattern (\code{FALSE}=missing, #'\code{TRUE}=observed) #'@param x Matrix (\code{n} x \code{p}) of complete covariates. #'@param nnet.maxit Tuning parameter for \code{nnet()}. #'@param nnet.trace Tuning parameter for \code{nnet()}. #'@param nnet.maxNWts Tuning parameter for \code{nnet()}. #'@param ... Other named arguments. #'@return A vector of length \code{nmis} with imputations. #'@author Stef van Buuren, Karin Groohuis-Oudshoorn, 2000-2010 #'@seealso \code{\link{mice}}, \code{\link[nnet]{multinom}}, #'\code{\link[MASS]{polr}} #'@references #' #'Van Buuren, S., Groothuis-Oudshoorn, K. (2011). \code{mice}: Multivariate #'Imputation by Chained Equations in \code{R}. \emph{Journal of Statistical #'Software}, \bold{45}(3), 1-67. \url{http://www.jstatsoft.org/v45/i03/} #' #'Brand, J.P.L. (1999) \emph{Development, implementation and evaluation of #'multiple imputation strategies for the statistical analysis of incomplete #'data sets.} Dissertation. Rotterdam: Erasmus University. #' #'White, I.R., Daniel, R. Royston, P. (2010). Avoiding bias due to perfect #'prediction in multiple imputation of incomplete categorical variables. #'\emph{Computational Statistics and Data Analysis}, 54, 2267-2275. #' #'Venables, W.N. & Ripley, B.D. (2002). \emph{Modern applied statistics with #'S-Plus (4th ed)}. Springer, Berlin. #'@keywords datagen #'@export #' #--------------------MICE.IMPUTE.POLR----------------------------- mice.impute.polr <- function (y, ry, x, nnet.maxit=100, nnet.trace=FALSE, nnet.maxNWts=1500, ...) { ### added 08/12/2010 x <- as.matrix(x) aug <- augment(y, ry, x, ...) x <- aug$x y <- aug$y ry <- aug$ry w <- aug$w xy <- cbind.data.frame(y = y, x = x) ## polr may fail on sparse data. We revert to multinom in such cases. fit <- try(suppressWarnings(polr(formula(xy), data = xy[ry, ], weights=w[ry],...)), silent=TRUE) if (inherits(fit, "try-error")) { fit <- multinom(formula(xy), data=xy[ry,], weights=w[ry], maxit=nnet.maxit, trace=nnet.trace, maxNWts=nnet.maxNWts, ...) updateLog(meth="multinom", frame=3) } post <- predict(fit, xy[!ry, ], type = "probs") if (sum(!ry) == 1) post <- matrix(post, nrow = 1, ncol = length(post)) fy <- as.factor(y) nc <- length(levels(fy)) un <- rep(runif(sum(!ry)), each = nc) if (is.vector(post)) post <- matrix(c(1 - post, post), ncol = 2) draws <- un > apply(post, 1, cumsum) idx <- 1 + apply(draws, 2, sum) return(levels(fy)[idx]) }
/R/mice.impute.polr.r
no_license
Ncalverley/mice
R
false
false
3,773
r
# --------------------MICE.IMPUTE.POLYREG----------------------------- #'Imputation by polytomous regression - ordered #' #'Imputes missing data in a categorical variable using polytomous regression #' #'By default, ordered factors with more than two levels are imputed by #'\code{mice.impute.polr}. #' #'The function \code{mice.impute.polr()} imputes for ordered categorical response #'variables by the proportional odds logistic regression (polr) model. The #'function repeatedly applies logistic regression on the successive splits. The #'model is also known as the cumulative link model. #' #'The algorithm of \code{mice.impute.polr} uses the function \code{polr()} from #'the \code{MASS} package. #' #'In order to avoid bias due to perfect prediction, the algorithm augment the #'data according to the method of White, Daniel and Royston (2010). #' #'The call to \code{polr} might fail, usually because the data are very sparse. #'In that case, \code{multinom} is tried as a fallback, and a record is written #'to the \code{loggedEvents} component of the \code{mids} object. #' #'@aliases mice.impute.polr #'@param y Incomplete data vector of length \code{n} #'@param ry Vector of missing data pattern (\code{FALSE}=missing, #'\code{TRUE}=observed) #'@param x Matrix (\code{n} x \code{p}) of complete covariates. #'@param nnet.maxit Tuning parameter for \code{nnet()}. #'@param nnet.trace Tuning parameter for \code{nnet()}. #'@param nnet.maxNWts Tuning parameter for \code{nnet()}. #'@param ... Other named arguments. #'@return A vector of length \code{nmis} with imputations. #'@author Stef van Buuren, Karin Groohuis-Oudshoorn, 2000-2010 #'@seealso \code{\link{mice}}, \code{\link[nnet]{multinom}}, #'\code{\link[MASS]{polr}} #'@references #' #'Van Buuren, S., Groothuis-Oudshoorn, K. (2011). \code{mice}: Multivariate #'Imputation by Chained Equations in \code{R}. \emph{Journal of Statistical #'Software}, \bold{45}(3), 1-67. \url{http://www.jstatsoft.org/v45/i03/} #' #'Brand, J.P.L. (1999) \emph{Development, implementation and evaluation of #'multiple imputation strategies for the statistical analysis of incomplete #'data sets.} Dissertation. Rotterdam: Erasmus University. #' #'White, I.R., Daniel, R. Royston, P. (2010). Avoiding bias due to perfect #'prediction in multiple imputation of incomplete categorical variables. #'\emph{Computational Statistics and Data Analysis}, 54, 2267-2275. #' #'Venables, W.N. & Ripley, B.D. (2002). \emph{Modern applied statistics with #'S-Plus (4th ed)}. Springer, Berlin. #'@keywords datagen #'@export #' #--------------------MICE.IMPUTE.POLR----------------------------- mice.impute.polr <- function (y, ry, x, nnet.maxit=100, nnet.trace=FALSE, nnet.maxNWts=1500, ...) { ### added 08/12/2010 x <- as.matrix(x) aug <- augment(y, ry, x, ...) x <- aug$x y <- aug$y ry <- aug$ry w <- aug$w xy <- cbind.data.frame(y = y, x = x) ## polr may fail on sparse data. We revert to multinom in such cases. fit <- try(suppressWarnings(polr(formula(xy), data = xy[ry, ], weights=w[ry],...)), silent=TRUE) if (inherits(fit, "try-error")) { fit <- multinom(formula(xy), data=xy[ry,], weights=w[ry], maxit=nnet.maxit, trace=nnet.trace, maxNWts=nnet.maxNWts, ...) updateLog(meth="multinom", frame=3) } post <- predict(fit, xy[!ry, ], type = "probs") if (sum(!ry) == 1) post <- matrix(post, nrow = 1, ncol = length(post)) fy <- as.factor(y) nc <- length(levels(fy)) un <- rep(runif(sum(!ry)), each = nc) if (is.vector(post)) post <- matrix(c(1 - post, post), ncol = 2) draws <- un > apply(post, 1, cumsum) idx <- 1 + apply(draws, 2, sum) return(levels(fy)[idx]) }
library(ggplot2) library(agridat) library(ggpubr) library(dcolumn) library(readr) library(ggpubr) library(devtools) library(dplyr) library(gri) library(ggpubr) Wits_Biomass <- read_csv("C:/Users/etelford.IC.002/Dropbox/Phd/R/Masters/Wits_Biomass.csv") Wits_biomass<-Wits_Biomass Wits_biomass$species<- factor(Wits_biomass$species, levels=c('VEX','SN','VS')) Wits_biomass$nodule_wt<-as.numeric(Wits_biomass$nodule_wt) Wits_biomass$nodule_count<-as.numeric(Wits_biomass$nodule_count) theme.clean.1 <- function(){ theme_bw()+ theme(axis.text.x = element_text(size = 8, angle = 0, vjust = 1, hjust = 1), axis.text.y = element_text(size = 10), axis.title.x = element_text(size = 12, face = "plain"), axis.title.y = element_text(size = 12, face = "plain"), panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), panel.grid.minor.y = element_blank(), panel.grid.major.y = element_blank(), plot.margin = unit(c(0.5, 0.5, 0.5, 0.5), units = , "cm"), plot.title = element_text(size = 20, vjust = 1, hjust = 0.5), legend.text = element_text(size = 8, face = "italic"), legend.title = element_blank(), legend.position = c(0.7, 0.95))} Log_R<-log(Wits_biomass$AG_wt) Log_B<-log(Wits_biomass$BG_wt) Log_N_w<-log(Wits_biomass$nodule_wt) Log_N_c<-log(Wits_biomass$nodule_count) Log_T<-log10(Wits_biomass$Total_wt) Log_RS<-log(Wits_biomass$ratio) variable_names <- list( "VEX" = "Vachellia exuvialis" , "SN" = "Sengalia nigrescens", "VS" = "Vachellia sieberiana") variable_labeller <- function(variable,value){ return(variable_names[value]) } (p1 <- ggplot(Wits_biomass, aes(x=treatment, y=Log_N_w, fill=treatment)) + geom_boxplot( alpha = 0.8, colour = "black") + theme.clean() + theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0),legend.position="none", strip.text.x = element_text(size = 13)) + labs(x = "Age at first clipping (months)", y = "Nodule biomass log (grams)") + facet_wrap(~species, ncol=3, labeller= variable_labeller) + theme(strip.background = element_rect(colour="black", fill="white",))) p1<- p1+ scale_fill_grey(start = 0, end = .9) p1 theme.clean <- function(){ theme_bw()+ theme(axis.text.x = element_text(size = 8, angle = 0, vjust = 1, hjust = 1), axis.text.y = element_text(size = 10), axis.title.x = element_text(size = 12, face = "plain"), axis.title.y = element_text(size = 12, face = "plain"), panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), panel.grid.minor.y = element_blank(), panel.grid.major.y = element_blank(), plot.margin = unit(c(0.5, 0.5, 0.5, 0.5), units = , "cm"), plot.title = element_text(size = 20, vjust = 1, hjust = 0.5), legend.text = element_text(size = 8, face = "italic"), legend.title = element_blank(), legend.position = c(0.72, 0.85))} (p5 <- ggplot(Wits_biomass, aes (x = Log_N_w, y = Log_N_c, colour = treatment)) + geom_point() + theme.clean.1() + theme(strip.background = element_rect(colour="black", fill="white",))+labs(y = "Nodule count log", x = "Nodule biomass log (grams)")+geom_smooth(method=lm, aes(fill=treatment))+facet_wrap(~species, ncol=3, labeller= variable_labeller)+theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0), strip.text.x = element_text(size = 13))) p5<- p5 + scale_fill_grey(start = 0, end = .9) p5<- p5 + scale_colour_grey(start = 0, end = .9) p5 (p6 <- ggplot(Wits_biomass, aes (x = Log_B, y = Log_N_w, colour = treatment)) + geom_point() + theme.clean.1() + theme(strip.background = element_rect(colour="black", fill="white",))+labs(y = "Nodule biomass log (grams)", x = "Belowground biomass log (grams)")+geom_smooth(method=lm, aes(fill=treatment))+facet_wrap(~species, ncol=3, labeller= variable_labeller)+theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0), strip.text.x = element_text(size = 13))) p6 <- p6 + scale_fill_grey(start = 0, end = .9) p6 <- p6 + scale_colour_grey(start = 0, end = .9) p6 (p3 <- ggplot(Wits_biomass, aes (x = Log_B, y = Log_N_w, colour = treatment)) + geom_point() + theme.clean.1() + theme(strip.background = element_rect(colour="black", fill="white",))+labs(y = "Nodule biomass log (grams)", x = "Whole plant biomass log (grams)")+geom_smooth(method=lm, aes(fill=treatment))+facet_wrap(~species, ncol=3, labeller= variable_labeller)+theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0), strip.text.x = element_text(size = 13))) p3 <- p3 + scale_fill_grey(start = 0, end = .9) p3 <- p3 + scale_colour_grey(start = 0, end = .9) p3 Wits_Biomass_N <- read_csv("Wits_Biomass_N.csv") Wits_Biomass_N$species<- factor(Wits_Biomass_N$species, levels=c('VEX','SN','VS')) Wits_Biomass_N$nodule_wt<-as.numeric(Wits_Biomass_N$nodule_wt) Wits_Biomass_N$nodule_count<-as.numeric(Wits_Biomass_N$nodule_count) Log_B_zero<-log(Wits_Biomass_N$BG_wt) Log_N_w_zero<-log(Wits_Biomass_N$nodule_wt) variable_names <- list( "VEX" = "Vachellia exuvialis" , "SN" = "Sengalia nigrescens", "VS" = "Vachellia sieberiana") variable_labeller <- function(variable,value){ return(variable_names[value]) } Wits_Biomass_N$sp_treat <- as.factor(paste(as.character(Wits_Biomass_N$treatment),as.character(Wits_Biomass_N$species),sep="_")) summary(Wits_Biomass_N) (p13 <- ggplot(Wits_Biomass_N, aes (x = Log_B_zero, y = Log_N_w_zero, colour = treatment)) + geom_point() + theme.clean.1() + theme(strip.background = element_rect(colour="black", fill="white",))+labs(y = "Nodule biomass log (grams)", x = "Belowground biomass log (grams)")+geom_smooth(method=lm, aes(fill=treatment))+facet_wrap(~species, ncol=3, labeller= variable_labeller)+theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0), strip.text.x = element_text(size = 13))) p13 <- p13 + scale_fill_grey(start = 0, end = .9) p13 <- p13 + scale_colour_grey(start = 0, end = .9) p13 (p7 <- ggplot(Wits_biomass, aes(x=treatment, y=Log_T, fill=treatment)) + geom_boxplot( alpha = 0.8, colour = "black") + theme.clean() + theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0),legend.position="none", strip.text.x = element_text(size = 13)) + labs(x = "Age at first clipping (months)", y = "Whole plant biomass log (grams)") + facet_wrap(~species, ncol=3, labeller= variable_labeller) + theme(strip.background = element_rect(colour="black", fill="white",))) p7 <- p7 + scale_fill_grey(start = 0, end = .9) p7 (p8 <- ggplot(Wits_biomass, aes(x=treatment, y=Log_B, fill=treatment)) + geom_boxplot( alpha = 0.8, colour = "black") + theme.clean() + theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0),legend.position="none", strip.text.x = element_text(size = 13)) + labs(x = "Age at first clipping (months)", y = "Belowground biomass log (grams)") + facet_wrap(~species, ncol=3, labeller= variable_labeller) + theme(strip.background = element_rect(colour="black", fill="white",))) p8 <- p8 + scale_fill_grey(start = 0, end = .9) p8 (p9 <- ggplot(Wits_biomass, aes(x=treatment, y=Log_RS, fill=treatment)) + geom_boxplot( alpha = 0.8, colour = "black") + theme.clean() + theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0),legend.position="none", strip.text.x = element_text(size = 13)) + labs(x = "Age at first clipping (months)", y = "Aboveground: belowground ratio log (grams)") + facet_wrap(~species, ncol=3, labeller= variable_labeller) + theme(strip.background = element_rect(colour="black", fill="white",))) p9 <- p9 + scale_fill_grey(start = 0, end = .9) p9 ggarrange( p7, labels = c("a")) ggarrange( p8, labels = c("b")) ggarrange( p9, labels = c("c")) (p14 <- ggplot(Wits_biomass, aes(x=treatment, y=Log_T, fill=treatment)) + geom_boxplot( alpha = 0.8, colour = "black") + theme.clean() + theme(axis.text.x = element_text(size = 12, angle = 0),legend.position="none") + labs(x = "Treatment", y = "Whole biomass log (grams)") + facet_wrap(~species, ncol=3, labeller= variable_labeller) + theme(strip.background = element_rect(colour="black", fill="white",))) (p15 <- ggplot(Wits_biomass, aes(x=treatment, y=Log_RS, fill=treatment)) + geom_boxplot( alpha = 0.8, colour = "black") + theme.clean() + theme(axis.text.x = element_text(size = 12, angle = 0),legend.position="none") + labs(x = "Treatment", y = "Root: shoot ratio log") + facet_grid(.~ species) + theme(strip.background = element_rect(colour="black", fill="white",))) ggarrange( p1, p5, p14, p15, labels = c("A", "B","A", "B")) (p6<-ggplot(Wits_biomass, aes (x = Log_T, y = Log_N_c, colour = treatment)) + geom_point() + theme.clean() + theme(strip.background = element_rect(colour="black", fill="white",))+labs(y = "Total biomass log (grams)" , x = "Nodule count log")+geom_smooth(method=lm, aes(fill=treatment))+facet_grid(.~ species)+theme(legend.position = "none")) (p7<-ggplot(Wits_biomass, aes (x = Log_T, y = Log_N_w, colour = treatment)) + geom_point() + theme.clean() + theme(strip.background = element_rect(colour="black", fill="white",))+labs(y = "Total biomass log (grams)" , x = "Nodule weight log (grams)")+geom_smooth(method=lm, aes(fill=treatment))+facet_grid(.~ species)+theme(legend.position = "none")) ##Isotope box plots wits_iso <- read_csv("C:/Users/s1014831/Desktop/Statistics/Wits data/wits_iso.csv") summary(wits_iso) wits_iso$Species<- factor(wits_iso$Species, levels=c('VS','VEX','SN')) (p18 <- ggplot(wits_iso, aes(Treatment, N,fill= Treatment)) + geom_boxplot( alpha = 0.8, colour = "#8B2323") + theme.clean() + theme(axis.text.x = element_text(size = 12, angle = 0),legend.position="none") + labs(x = "Treatment", y = "Leaf N content (per 200 um)") + facet_wrap(.~Species) + theme(strip.background = element_rect(colour="black", fill="white",))) (p19 <- ggplot(wits_iso, aes(Treatment, delta15N,fill= Treatment)) + geom_boxplot( alpha = 0.8, colour = "#8B2323") + theme.clean() + theme(axis.text.x = element_text(size = 12, angle = 0),legend.position="none") + labs(x = "Treatment", y = "Leaf 15N content (per 200 um)") + facet_wrap(.~Species) + theme(strip.background = element_rect(colour="black", fill="white",))) (p20 <- ggplot(wits_iso, aes(Treatment, C,fill= Treatment)) + geom_boxplot( alpha = 0.8, colour = "#8B2323") + theme.clean() + theme(axis.text.x = element_text(size = 12, angle = 0),legend.position="none") + labs(x = "Treatment", y = "Leaf C content (per 200 um)") + facet_wrap(.~Species) + theme(strip.background = element_rect(colour="black", fill="white",))) (p21 <- ggplot(wits_iso, aes(Treatment, delta13C, fill= Treatment)) + geom_boxplot( alpha = 0.8, colour = "#8B2323") + theme.clean() + theme(axis.text.x = element_text(size = 12, angle = 0),legend.position="none") + labs(x = "Treatment", y = "Leaf 13C content (per 200 um)") + facet_wrap(.~Species)+theme(strip.background = element_rect(colour="black", fill="white",))) (p22 <- ggplot(wits_iso, aes(Treatment, CN_ratio, fill= Treatment)) + geom_boxplot( alpha = 0.8, colour = "#8B2323") + theme.clean() + theme(axis.text.x = element_text(size = 12, angle = 0),legend.position="none") + labs(x = "Treatment", y = "C:N ratio") + facet_wrap(.~Species)+theme(strip.background = element_rect(colour="black", fill="white",))) ggarrange(p18, p22, p19, p22,labels = c("A", "B", "C", "D")) hist(Wits_biomass$AG_wt) log_N<-log10(Wits_biomass$AG_wt) hist(log_N) R.aov <- aov(log_N~ treatment*species , data = Wits_biomass) summary(R.aov) TukeyHSD(R.aov) model.tables(R.aov, "means") plot(R.aov, 1) leveneTest(AG_wt ~ treatment*species, data = Wits_biomass) hist(Wits_Biomass$BG_wt) log_N<-log10(Wits_Biomass$BG_wt) hist(log_N) R.aov <- aov(log_N~ treatment*species , data = Wits_Biomass) summary(R.aov) TukeyHSD(R.aov) model.tables(R.aov, "means") plot(R.aov, 1) leveneTest(BG_wt ~ treatment*species, data = Wits_Biomass) hist(Wits_Biomass$ratio) log_N<-log10(Wits_Biomass$ratio) hist(log_N) R.aov <- aov(log_N~ treatment*species , data = Wits_Biomass) summary(R.aov) TukeyHSD(R.aov) model.tables(R.aov, "means") plot(R.aov, 1) leveneTest(ratio ~ treatment*species, data = Wits_Biomass) hist(Wits_Biomass$fine_root_weight) log_N<-sqrt(Wits_Biomass$fine_root_weight) hist(log_N) R.aov <- aov(log_N~ treatment*species , data = Wits_Biomass) summary(R.aov) TukeyHSD(R.aov) model.tables(R.aov, "means") plot(R.aov, 1) leveneTest(fine_root_weight ~ treatment*species, data = Wits_Biomass)
/Wits_grey_boxplots.R
no_license
Elizabeth261191/Telford_et_al_2021
R
false
false
13,485
r
library(ggplot2) library(agridat) library(ggpubr) library(dcolumn) library(readr) library(ggpubr) library(devtools) library(dplyr) library(gri) library(ggpubr) Wits_Biomass <- read_csv("C:/Users/etelford.IC.002/Dropbox/Phd/R/Masters/Wits_Biomass.csv") Wits_biomass<-Wits_Biomass Wits_biomass$species<- factor(Wits_biomass$species, levels=c('VEX','SN','VS')) Wits_biomass$nodule_wt<-as.numeric(Wits_biomass$nodule_wt) Wits_biomass$nodule_count<-as.numeric(Wits_biomass$nodule_count) theme.clean.1 <- function(){ theme_bw()+ theme(axis.text.x = element_text(size = 8, angle = 0, vjust = 1, hjust = 1), axis.text.y = element_text(size = 10), axis.title.x = element_text(size = 12, face = "plain"), axis.title.y = element_text(size = 12, face = "plain"), panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), panel.grid.minor.y = element_blank(), panel.grid.major.y = element_blank(), plot.margin = unit(c(0.5, 0.5, 0.5, 0.5), units = , "cm"), plot.title = element_text(size = 20, vjust = 1, hjust = 0.5), legend.text = element_text(size = 8, face = "italic"), legend.title = element_blank(), legend.position = c(0.7, 0.95))} Log_R<-log(Wits_biomass$AG_wt) Log_B<-log(Wits_biomass$BG_wt) Log_N_w<-log(Wits_biomass$nodule_wt) Log_N_c<-log(Wits_biomass$nodule_count) Log_T<-log10(Wits_biomass$Total_wt) Log_RS<-log(Wits_biomass$ratio) variable_names <- list( "VEX" = "Vachellia exuvialis" , "SN" = "Sengalia nigrescens", "VS" = "Vachellia sieberiana") variable_labeller <- function(variable,value){ return(variable_names[value]) } (p1 <- ggplot(Wits_biomass, aes(x=treatment, y=Log_N_w, fill=treatment)) + geom_boxplot( alpha = 0.8, colour = "black") + theme.clean() + theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0),legend.position="none", strip.text.x = element_text(size = 13)) + labs(x = "Age at first clipping (months)", y = "Nodule biomass log (grams)") + facet_wrap(~species, ncol=3, labeller= variable_labeller) + theme(strip.background = element_rect(colour="black", fill="white",))) p1<- p1+ scale_fill_grey(start = 0, end = .9) p1 theme.clean <- function(){ theme_bw()+ theme(axis.text.x = element_text(size = 8, angle = 0, vjust = 1, hjust = 1), axis.text.y = element_text(size = 10), axis.title.x = element_text(size = 12, face = "plain"), axis.title.y = element_text(size = 12, face = "plain"), panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), panel.grid.minor.y = element_blank(), panel.grid.major.y = element_blank(), plot.margin = unit(c(0.5, 0.5, 0.5, 0.5), units = , "cm"), plot.title = element_text(size = 20, vjust = 1, hjust = 0.5), legend.text = element_text(size = 8, face = "italic"), legend.title = element_blank(), legend.position = c(0.72, 0.85))} (p5 <- ggplot(Wits_biomass, aes (x = Log_N_w, y = Log_N_c, colour = treatment)) + geom_point() + theme.clean.1() + theme(strip.background = element_rect(colour="black", fill="white",))+labs(y = "Nodule count log", x = "Nodule biomass log (grams)")+geom_smooth(method=lm, aes(fill=treatment))+facet_wrap(~species, ncol=3, labeller= variable_labeller)+theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0), strip.text.x = element_text(size = 13))) p5<- p5 + scale_fill_grey(start = 0, end = .9) p5<- p5 + scale_colour_grey(start = 0, end = .9) p5 (p6 <- ggplot(Wits_biomass, aes (x = Log_B, y = Log_N_w, colour = treatment)) + geom_point() + theme.clean.1() + theme(strip.background = element_rect(colour="black", fill="white",))+labs(y = "Nodule biomass log (grams)", x = "Belowground biomass log (grams)")+geom_smooth(method=lm, aes(fill=treatment))+facet_wrap(~species, ncol=3, labeller= variable_labeller)+theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0), strip.text.x = element_text(size = 13))) p6 <- p6 + scale_fill_grey(start = 0, end = .9) p6 <- p6 + scale_colour_grey(start = 0, end = .9) p6 (p3 <- ggplot(Wits_biomass, aes (x = Log_B, y = Log_N_w, colour = treatment)) + geom_point() + theme.clean.1() + theme(strip.background = element_rect(colour="black", fill="white",))+labs(y = "Nodule biomass log (grams)", x = "Whole plant biomass log (grams)")+geom_smooth(method=lm, aes(fill=treatment))+facet_wrap(~species, ncol=3, labeller= variable_labeller)+theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0), strip.text.x = element_text(size = 13))) p3 <- p3 + scale_fill_grey(start = 0, end = .9) p3 <- p3 + scale_colour_grey(start = 0, end = .9) p3 Wits_Biomass_N <- read_csv("Wits_Biomass_N.csv") Wits_Biomass_N$species<- factor(Wits_Biomass_N$species, levels=c('VEX','SN','VS')) Wits_Biomass_N$nodule_wt<-as.numeric(Wits_Biomass_N$nodule_wt) Wits_Biomass_N$nodule_count<-as.numeric(Wits_Biomass_N$nodule_count) Log_B_zero<-log(Wits_Biomass_N$BG_wt) Log_N_w_zero<-log(Wits_Biomass_N$nodule_wt) variable_names <- list( "VEX" = "Vachellia exuvialis" , "SN" = "Sengalia nigrescens", "VS" = "Vachellia sieberiana") variable_labeller <- function(variable,value){ return(variable_names[value]) } Wits_Biomass_N$sp_treat <- as.factor(paste(as.character(Wits_Biomass_N$treatment),as.character(Wits_Biomass_N$species),sep="_")) summary(Wits_Biomass_N) (p13 <- ggplot(Wits_Biomass_N, aes (x = Log_B_zero, y = Log_N_w_zero, colour = treatment)) + geom_point() + theme.clean.1() + theme(strip.background = element_rect(colour="black", fill="white",))+labs(y = "Nodule biomass log (grams)", x = "Belowground biomass log (grams)")+geom_smooth(method=lm, aes(fill=treatment))+facet_wrap(~species, ncol=3, labeller= variable_labeller)+theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0), strip.text.x = element_text(size = 13))) p13 <- p13 + scale_fill_grey(start = 0, end = .9) p13 <- p13 + scale_colour_grey(start = 0, end = .9) p13 (p7 <- ggplot(Wits_biomass, aes(x=treatment, y=Log_T, fill=treatment)) + geom_boxplot( alpha = 0.8, colour = "black") + theme.clean() + theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0),legend.position="none", strip.text.x = element_text(size = 13)) + labs(x = "Age at first clipping (months)", y = "Whole plant biomass log (grams)") + facet_wrap(~species, ncol=3, labeller= variable_labeller) + theme(strip.background = element_rect(colour="black", fill="white",))) p7 <- p7 + scale_fill_grey(start = 0, end = .9) p7 (p8 <- ggplot(Wits_biomass, aes(x=treatment, y=Log_B, fill=treatment)) + geom_boxplot( alpha = 0.8, colour = "black") + theme.clean() + theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0),legend.position="none", strip.text.x = element_text(size = 13)) + labs(x = "Age at first clipping (months)", y = "Belowground biomass log (grams)") + facet_wrap(~species, ncol=3, labeller= variable_labeller) + theme(strip.background = element_rect(colour="black", fill="white",))) p8 <- p8 + scale_fill_grey(start = 0, end = .9) p8 (p9 <- ggplot(Wits_biomass, aes(x=treatment, y=Log_RS, fill=treatment)) + geom_boxplot( alpha = 0.8, colour = "black") + theme.clean() + theme(axis.text.x = element_text(size = 13, angle = 0),axis.text.y = element_text(size = 13, angle = 0),legend.position="none", strip.text.x = element_text(size = 13)) + labs(x = "Age at first clipping (months)", y = "Aboveground: belowground ratio log (grams)") + facet_wrap(~species, ncol=3, labeller= variable_labeller) + theme(strip.background = element_rect(colour="black", fill="white",))) p9 <- p9 + scale_fill_grey(start = 0, end = .9) p9 ggarrange( p7, labels = c("a")) ggarrange( p8, labels = c("b")) ggarrange( p9, labels = c("c")) (p14 <- ggplot(Wits_biomass, aes(x=treatment, y=Log_T, fill=treatment)) + geom_boxplot( alpha = 0.8, colour = "black") + theme.clean() + theme(axis.text.x = element_text(size = 12, angle = 0),legend.position="none") + labs(x = "Treatment", y = "Whole biomass log (grams)") + facet_wrap(~species, ncol=3, labeller= variable_labeller) + theme(strip.background = element_rect(colour="black", fill="white",))) (p15 <- ggplot(Wits_biomass, aes(x=treatment, y=Log_RS, fill=treatment)) + geom_boxplot( alpha = 0.8, colour = "black") + theme.clean() + theme(axis.text.x = element_text(size = 12, angle = 0),legend.position="none") + labs(x = "Treatment", y = "Root: shoot ratio log") + facet_grid(.~ species) + theme(strip.background = element_rect(colour="black", fill="white",))) ggarrange( p1, p5, p14, p15, labels = c("A", "B","A", "B")) (p6<-ggplot(Wits_biomass, aes (x = Log_T, y = Log_N_c, colour = treatment)) + geom_point() + theme.clean() + theme(strip.background = element_rect(colour="black", fill="white",))+labs(y = "Total biomass log (grams)" , x = "Nodule count log")+geom_smooth(method=lm, aes(fill=treatment))+facet_grid(.~ species)+theme(legend.position = "none")) (p7<-ggplot(Wits_biomass, aes (x = Log_T, y = Log_N_w, colour = treatment)) + geom_point() + theme.clean() + theme(strip.background = element_rect(colour="black", fill="white",))+labs(y = "Total biomass log (grams)" , x = "Nodule weight log (grams)")+geom_smooth(method=lm, aes(fill=treatment))+facet_grid(.~ species)+theme(legend.position = "none")) ##Isotope box plots wits_iso <- read_csv("C:/Users/s1014831/Desktop/Statistics/Wits data/wits_iso.csv") summary(wits_iso) wits_iso$Species<- factor(wits_iso$Species, levels=c('VS','VEX','SN')) (p18 <- ggplot(wits_iso, aes(Treatment, N,fill= Treatment)) + geom_boxplot( alpha = 0.8, colour = "#8B2323") + theme.clean() + theme(axis.text.x = element_text(size = 12, angle = 0),legend.position="none") + labs(x = "Treatment", y = "Leaf N content (per 200 um)") + facet_wrap(.~Species) + theme(strip.background = element_rect(colour="black", fill="white",))) (p19 <- ggplot(wits_iso, aes(Treatment, delta15N,fill= Treatment)) + geom_boxplot( alpha = 0.8, colour = "#8B2323") + theme.clean() + theme(axis.text.x = element_text(size = 12, angle = 0),legend.position="none") + labs(x = "Treatment", y = "Leaf 15N content (per 200 um)") + facet_wrap(.~Species) + theme(strip.background = element_rect(colour="black", fill="white",))) (p20 <- ggplot(wits_iso, aes(Treatment, C,fill= Treatment)) + geom_boxplot( alpha = 0.8, colour = "#8B2323") + theme.clean() + theme(axis.text.x = element_text(size = 12, angle = 0),legend.position="none") + labs(x = "Treatment", y = "Leaf C content (per 200 um)") + facet_wrap(.~Species) + theme(strip.background = element_rect(colour="black", fill="white",))) (p21 <- ggplot(wits_iso, aes(Treatment, delta13C, fill= Treatment)) + geom_boxplot( alpha = 0.8, colour = "#8B2323") + theme.clean() + theme(axis.text.x = element_text(size = 12, angle = 0),legend.position="none") + labs(x = "Treatment", y = "Leaf 13C content (per 200 um)") + facet_wrap(.~Species)+theme(strip.background = element_rect(colour="black", fill="white",))) (p22 <- ggplot(wits_iso, aes(Treatment, CN_ratio, fill= Treatment)) + geom_boxplot( alpha = 0.8, colour = "#8B2323") + theme.clean() + theme(axis.text.x = element_text(size = 12, angle = 0),legend.position="none") + labs(x = "Treatment", y = "C:N ratio") + facet_wrap(.~Species)+theme(strip.background = element_rect(colour="black", fill="white",))) ggarrange(p18, p22, p19, p22,labels = c("A", "B", "C", "D")) hist(Wits_biomass$AG_wt) log_N<-log10(Wits_biomass$AG_wt) hist(log_N) R.aov <- aov(log_N~ treatment*species , data = Wits_biomass) summary(R.aov) TukeyHSD(R.aov) model.tables(R.aov, "means") plot(R.aov, 1) leveneTest(AG_wt ~ treatment*species, data = Wits_biomass) hist(Wits_Biomass$BG_wt) log_N<-log10(Wits_Biomass$BG_wt) hist(log_N) R.aov <- aov(log_N~ treatment*species , data = Wits_Biomass) summary(R.aov) TukeyHSD(R.aov) model.tables(R.aov, "means") plot(R.aov, 1) leveneTest(BG_wt ~ treatment*species, data = Wits_Biomass) hist(Wits_Biomass$ratio) log_N<-log10(Wits_Biomass$ratio) hist(log_N) R.aov <- aov(log_N~ treatment*species , data = Wits_Biomass) summary(R.aov) TukeyHSD(R.aov) model.tables(R.aov, "means") plot(R.aov, 1) leveneTest(ratio ~ treatment*species, data = Wits_Biomass) hist(Wits_Biomass$fine_root_weight) log_N<-sqrt(Wits_Biomass$fine_root_weight) hist(log_N) R.aov <- aov(log_N~ treatment*species , data = Wits_Biomass) summary(R.aov) TukeyHSD(R.aov) model.tables(R.aov, "means") plot(R.aov, 1) leveneTest(fine_root_weight ~ treatment*species, data = Wits_Biomass)
makeVector <- function(x = numeric()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setmean <- function(mean) m <<- mean getmean <- function() m list(set = set, get = get, setmean = setmean, getmean = getmean) } cachemean <- function(x, ...) { m <- x$getmean() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- mean(data, ...) x$setmean(m) m }
/makeVector.r
no_license
galbamonte/ProgrammingAssignment2
R
false
false
608
r
makeVector <- function(x = numeric()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setmean <- function(mean) m <<- mean getmean <- function() m list(set = set, get = get, setmean = setmean, getmean = getmean) } cachemean <- function(x, ...) { m <- x$getmean() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- mean(data, ...) x$setmean(m) m }
get_daily_debias_coeff <- function(joined.data){ # -------------------------------------- # purpose: save coefficients for linear debiasing (slope, intercept, standard deviation of residuals, r2 of linear regression) # Creator: Laura Puckett, December 14 2018 # -------------------------------------- get_lm_coeff <- function(col.obs, col.for){ model = lm(unlist(col.obs) ~ unlist(col.for)) intercept = model$coefficients[1] slope = model$coefficients[2] res.sd = sd(residuals(model)) r2 = summary(model)$r.squared return(list(intercept, slope, res.sd, r2)) } df = data.frame(matrix(NA, ncol = length(VarNames), nrow = 6)) colnames(df) = VarNames for (rowNum in 1:4){ for(colNum in 1:length(VarNames)){ df[rowNum, VarNames[colNum]] = get_lm_coeff(joined.data[,paste0(VarNames[colNum],".obs")], joined.data[,paste0(VarNames[colNum],".for")])[[rowNum]] } } df = as.data.frame(df) row.names(df) <- c("intercept", "slope", "sd.res.daily", "r2.daily", "ds.res.hourly", "r2.hourly") # could convert to key column later instead of row,column return(df) }
/get_daily_debias_coeff.R
no_license
EcoDynForecast/NOAA_download_downscale
R
false
false
1,129
r
get_daily_debias_coeff <- function(joined.data){ # -------------------------------------- # purpose: save coefficients for linear debiasing (slope, intercept, standard deviation of residuals, r2 of linear regression) # Creator: Laura Puckett, December 14 2018 # -------------------------------------- get_lm_coeff <- function(col.obs, col.for){ model = lm(unlist(col.obs) ~ unlist(col.for)) intercept = model$coefficients[1] slope = model$coefficients[2] res.sd = sd(residuals(model)) r2 = summary(model)$r.squared return(list(intercept, slope, res.sd, r2)) } df = data.frame(matrix(NA, ncol = length(VarNames), nrow = 6)) colnames(df) = VarNames for (rowNum in 1:4){ for(colNum in 1:length(VarNames)){ df[rowNum, VarNames[colNum]] = get_lm_coeff(joined.data[,paste0(VarNames[colNum],".obs")], joined.data[,paste0(VarNames[colNum],".for")])[[rowNum]] } } df = as.data.frame(df) row.names(df) <- c("intercept", "slope", "sd.res.daily", "r2.daily", "ds.res.hourly", "r2.hourly") # could convert to key column later instead of row,column return(df) }
rm(list=ls) library(dplyr) library(ggplot2) activity_df<-read.csv('data/activity.csv',colClasses = c("numeric","character","numeric"),stringsAsFactors = FALSE) activity_df<-activity_df %>% mutate(date=as.Date(date,"%Y-%M-%d")) activity_df[which(is.na(activity_df$steps)),c('steps')]<-0 activity_steps<-activity_df %>% select(date,steps) %>%group_by(date) %>%summarise( steps = sum(steps)) activity_steps geom_histogram(activity_steps$steps,xlab="steps") hist(activity_steps$steps,xlab="steps",col="blue") ggplot(data=activity_steps, aes(x=steps,colour=date) )+ geom_histogram(color="black", fill = "white",bins=30, alpha=0.5, position="dodge") + labs(title="Histogram of Total Steps Per Day",x="Steps", y = "Frequency") + scale_color_brewer(palette="Dark2") + theme_minimal() ?hist
/ds/jh/reproducibleresearch/week2/practice.R
no_license
pkdatascience/coursera_workspace
R
false
false
806
r
rm(list=ls) library(dplyr) library(ggplot2) activity_df<-read.csv('data/activity.csv',colClasses = c("numeric","character","numeric"),stringsAsFactors = FALSE) activity_df<-activity_df %>% mutate(date=as.Date(date,"%Y-%M-%d")) activity_df[which(is.na(activity_df$steps)),c('steps')]<-0 activity_steps<-activity_df %>% select(date,steps) %>%group_by(date) %>%summarise( steps = sum(steps)) activity_steps geom_histogram(activity_steps$steps,xlab="steps") hist(activity_steps$steps,xlab="steps",col="blue") ggplot(data=activity_steps, aes(x=steps,colour=date) )+ geom_histogram(color="black", fill = "white",bins=30, alpha=0.5, position="dodge") + labs(title="Histogram of Total Steps Per Day",x="Steps", y = "Frequency") + scale_color_brewer(palette="Dark2") + theme_minimal() ?hist
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model.R \name{stan_inits} \alias{stan_inits} \title{Set up initial conditions for model} \usage{ stan_inits(data, strains = 2) } \description{ Set up initial conditions for model } \examples{ dt <- stan_data(latest_obs(germany_obs)) inits <- stan_inits(dt) inits inits() }
/man/stan_inits.Rd
permissive
dwolffram/bp.delta
R
false
true
351
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model.R \name{stan_inits} \alias{stan_inits} \title{Set up initial conditions for model} \usage{ stan_inits(data, strains = 2) } \description{ Set up initial conditions for model } \examples{ dt <- stan_data(latest_obs(germany_obs)) inits <- stan_inits(dt) inits inits() }
### R code from vignette source 'colext.Rnw' ################################################### ### code chunk number 1: colext.Rnw:1-3 ################################################### options(width=70) options(continue=" ") ################################################### ### code chunk number 2: colext.Rnw:377-416 ################################################### M <- 250 # Number of sites J <- 3 # num secondary sample periods T <- 10 # num primary sample periods psi <- rep(NA, T) # Occupancy probability muZ <- z <- array(dim = c(M, T)) # Expected and realized occurrence y <- array(NA, dim = c(M, J, T)) # Detection histories set.seed(13973) psi[1] <- 0.4 # Initial occupancy probability p <- c(0.3,0.4,0.5,0.5,0.1,0.3,0.5,0.5,0.6,0.2) phi <- runif(n=T-1, min=0.6, max=0.8) # Survival probability (1-epsilon) gamma <- runif(n=T-1, min=0.1, max=0.2) # Colonization probability # Generate latent states of occurrence # First year z[,1] <- rbinom(M, 1, psi[1]) # Initial occupancy state # Later years for(i in 1:M){ # Loop over sites for(k in 2:T){ # Loop over years muZ[k] <- z[i, k-1]*phi[k-1] + (1-z[i, k-1])*gamma[k-1] z[i,k] <- rbinom(1, 1, muZ[k]) } } # Generate detection/non-detection data for(i in 1:M){ for(k in 1:T){ prob <- z[i,k] * p[k] for(j in 1:J){ y[i,j,k] <- rbinom(1, 1, prob) } } } # Compute annual population occupancy for (k in 2:T){ psi[k] <- psi[k-1]*phi[k-1] + (1-psi[k-1])*gamma[k-1] } ################################################### ### code chunk number 3: sim ################################################### plot(1:T, colMeans(z), type = "b", xlab = "Year", ylab = "Proportion of sites occupied", col = "black", xlim=c(0.5, 10.5), xaxp=c(1,10,9), ylim = c(0,0.6), lwd = 2, lty = 1, frame.plot = FALSE, las = 1, pch=16) psi.app <- colMeans(apply(y, c(1,3), max)) lines(1:T, psi.app, type = "b", col = "blue", lty=3, lwd = 2) legend(1, 0.6, c("truth", "observed"), col=c("black", "blue"), lty=c(1,3), pch=c(16,1)) ################################################### ### code chunk number 4: colext.Rnw:457-458 ################################################### library(unmarked) ################################################### ### code chunk number 5: colext.Rnw:467-468 ################################################### yy <- matrix(y, M, J*T) ################################################### ### code chunk number 6: colext.Rnw:476-478 ################################################### year <- matrix(c('01','02','03','04','05','06','07','08','09','10'), nrow(yy), T, byrow=TRUE) ################################################### ### code chunk number 7: colext.Rnw:493-498 ################################################### simUMF <- unmarkedMultFrame( y = yy, yearlySiteCovs = list(year = year), numPrimary=T) summary(simUMF) ################################################### ### code chunk number 8: colext.Rnw:577-578 ################################################### plogis(-0.813) ################################################### ### code chunk number 9: colext.Rnw:647-652 (eval = FALSE) ################################################### ## m1 <- colext(psiformula = ~1, # First-year occupancy ## gammaformula = ~ year-1, # Colonization ## epsilonformula = ~ year-1, # Extinction ## pformula = ~ year-1, # Detection ## data = simUMF) ################################################### ### code chunk number 10: colext.Rnw:654-655 (eval = FALSE) ################################################### ## m1 ################################################### ### code chunk number 11: colext.Rnw:736-741 (eval = FALSE) ################################################### ## nd <- data.frame(year=c('01','02','03','04','05','06','07','08','09')) ## E.ext <- predict(m1, type='ext', newdata=nd) ## E.col <- predict(m1, type='col', newdata=nd) ## nd <- data.frame(year=c('01','02','03','04','05','06','07','08','09','10')) ## E.det <- predict(m1, type='det', newdata=nd) ################################################### ### code chunk number 12: yearlysim (eval = FALSE) ################################################### ## op <- par(mfrow=c(3,1), mai=c(0.6, 0.6, 0.1, 0.1)) ## ## with(E.ext, { # Plot for extinction probability ## plot(1:9, Predicted, pch=1, xaxt='n', xlab='Year', ## ylab=expression(paste('Extinction probability ( ', epsilon, ' )')), ## ylim=c(0,1), col=4) ## axis(1, at=1:9, labels=nd$year[1:9]) ## arrows(1:9, lower, 1:9, upper, code=3, angle=90, length=0.03, col=4) ## points((1:9)-0.1, 1-phi, col=1, lwd = 1, pch=16) ## legend(7, 1, c('Parameter', 'Estimate'), col=c(1,4), pch=c(16, 1), ## cex=0.8) ## }) ## ## with(E.col, { # Plot for colonization probability ## plot(1:9, Predicted, pch=1, xaxt='n', xlab='Year', ## ylab=expression(paste('Colonization probability ( ', gamma, ' )')), ## ylim=c(0,1), col=4) ## axis(1, at=1:9, labels=nd$year[1:9]) ## arrows(1:9, lower, 1:9, upper, code=3, angle=90, length=0.03, col=4) ## points((1:9)-0.1, gamma, col=1, lwd = 1, pch=16) ## legend(7, 1, c('Parameter', 'Estimate'), col=c(1,4), pch=c(16, 1), ## cex=0.8) ## }) ## ## with(E.det, { # Plot for detection probability: note 10 years ## plot(1:10, Predicted, pch=1, xaxt='n', xlab='Year', ## ylab=expression(paste('Detection probability ( ', p, ' )')), ## ylim=c(0,1), col=4) ## axis(1, at=1:10, labels=nd$year) ## arrows(1:10, lower, 1:10, upper, code=3, angle=90, length=0.03, col=4) ## points((1:10)-0.1, p, col=1, lwd = 1, pch=16) ## legend(7.5, 1, c('Parameter','Estimate'), col=c(1,4), pch=c(16, 1), ## cex=0.8) ## }) ## ## par(op) ################################################### ### code chunk number 13: colext.Rnw:881-898 ################################################### turnover <- function(fm) { psi.hat <- plogis(coef(fm, type="psi")) if(length(psi.hat) > 1) stop("this function only works if psi is scalar") T <- getData(fm)@numPrimary tau.hat <- numeric(T-1) gamma.hat <- plogis(coef(fm, type="col")) phi.hat <- 1 - plogis(coef(fm, type="ext")) if(length(gamma.hat) != T-1 | length(phi.hat) != T-1) stop("this function only works if gamma and phi T-1 vectors") for(t in 2:T) { psi.hat[t] <- psi.hat[t-1]*phi.hat[t-1] + (1-psi.hat[t-1])*gamma.hat[t-1] tau.hat[t-1] <- gamma.hat[t-1]*(1-psi.hat[t-1]) / psi.hat[t-1] } return(tau.hat) } ################################################### ### code chunk number 14: colext.Rnw:981-995 (eval = FALSE) ################################################### ## ## chisq <- function(fm) { ## umf <- getData(fm) ## y <- getY(umf) ## sr <- fm@sitesRemoved ## if(length(sr)>0) ## y <- y[-sr,,drop=FALSE] ## fv <- fitted(fm, na.rm=TRUE) ## y[is.na(fv)] <- NA ## sum((y-fv)^2/(fv*(1-fv))) ## } ## ## set.seed(344) ## pb.gof <- parboot(m0, statistic=chisq, nsim=100) ################################################### ### code chunk number 15: gof (eval = FALSE) ################################################### ## plot(pb.gof, xlab=expression(chi^2), main="", col=gray(0.95), ## xlim=c(7300, 7700)) ################################################### ### code chunk number 16: colext.Rnw:1039-1041 ################################################### data(crossbill) colnames(crossbill) ################################################### ### code chunk number 17: colext.Rnw:1078-1081 ################################################### DATE <- as.matrix(crossbill[,32:58]) y.cross <- as.matrix(crossbill[,5:31]) y.cross[is.na(DATE) != is.na(y.cross)] <- NA ################################################### ### code chunk number 18: colext.Rnw:1093-1096 ################################################### sd.DATE <- sd(c(DATE), na.rm=TRUE) mean.DATE <- mean(DATE, na.rm=TRUE) DATE <- (DATE - mean.DATE) / sd.DATE ################################################### ### code chunk number 19: colext.Rnw:1106-1112 ################################################### years <- as.character(1999:2007) years <- matrix(years, nrow(crossbill), 9, byrow=TRUE) umf <- unmarkedMultFrame(y=y.cross, siteCovs=crossbill[,2:3], yearlySiteCovs=list(year=years), obsCovs=list(date=DATE), numPrimary=9) ################################################### ### code chunk number 20: colext.Rnw:1137-1139 (eval = FALSE) ################################################### ## # A model with constant parameters ## fm0 <- colext(~1, ~1, ~1, ~1, umf) ################################################### ### code chunk number 21: colext.Rnw:1141-1143 (eval = FALSE) ################################################### ## # Like fm0, but with year-dependent detection ## fm1 <- colext(~1, ~1, ~1, ~year, umf) ################################################### ### code chunk number 22: colext.Rnw:1145-1147 (eval = FALSE) ################################################### ## # Like fm0, but with year-dependent colonization and extinction ## fm2 <- colext(~1, ~year-1, ~year-1, ~1, umf) ################################################### ### code chunk number 23: colext.Rnw:1149-1151 (eval = FALSE) ################################################### ## # A fully time-dependent model ## fm3 <- colext(~1, ~year-1, ~year-1, ~year, umf) ################################################### ### code chunk number 24: colext.Rnw:1153-1155 (eval = FALSE) ################################################### ## # Like fm3 with forest-dependence of 1st-year occupancy ## fm4 <- colext(~forest, ~year-1, ~year-1, ~year, umf) ################################################### ### code chunk number 25: colext.Rnw:1157-1160 (eval = FALSE) ################################################### ## # Like fm4 with date- and year-dependence of detection ## fm5 <- colext(~forest, ~year-1, ~year-1, ~year + date + I(date^2), ## umf, starts=c(coef(fm4), 0, 0)) ################################################### ### code chunk number 26: colext.Rnw:1162-1165 (eval = FALSE) ################################################### ## # Same as fm5, but with detection in addition depending on forest cover ## fm6 <- colext(~forest, ~year-1, ~year-1, ~year + date + I(date^2) + ## forest, umf) ################################################### ### code chunk number 27: cov (eval = FALSE) ################################################### ## op <- par(mfrow=c(1,2), mai=c(0.8,0.8,0.1,0.1)) ## ## nd <- data.frame(forest=seq(0, 100, length=50)) ## E.psi <- predict(fm6, type="psi", newdata=nd, appendData=TRUE) ## ## with(E.psi, { ## plot(forest, Predicted, ylim=c(0,1), type="l", ## xlab="Percent cover of forest", ## ylab=expression(hat(psi)), cex.lab=0.8, cex.axis=0.8) ## lines(forest, Predicted+1.96*SE, col=gray(0.7)) ## lines(forest, Predicted-1.96*SE, col=gray(0.7)) ## }) ## ## nd <- data.frame(date=seq(-2, 2, length=50), ## year=factor("2005", levels=c(unique(years))), ## forest=50) ## E.p <- predict(fm6, type="det", newdata=nd, appendData=TRUE) ## E.p$dateOrig <- E.p$date*sd.DATE + mean.DATE ## ## with(E.p, { ## plot(dateOrig, Predicted, ylim=c(0,1), type="l", ## xlab="Julian date", ylab=expression( italic(p) ), ## cex.lab=0.8, cex.axis=0.8) ## lines(dateOrig, Predicted+1.96*SE, col=gray(0.7)) ## lines(dateOrig, Predicted-1.96*SE, col=gray(0.7)) ## }) ## par(op)
/unmarked/inst/doc/colext.R
no_license
ingted/R-Examples
R
false
false
12,244
r
### R code from vignette source 'colext.Rnw' ################################################### ### code chunk number 1: colext.Rnw:1-3 ################################################### options(width=70) options(continue=" ") ################################################### ### code chunk number 2: colext.Rnw:377-416 ################################################### M <- 250 # Number of sites J <- 3 # num secondary sample periods T <- 10 # num primary sample periods psi <- rep(NA, T) # Occupancy probability muZ <- z <- array(dim = c(M, T)) # Expected and realized occurrence y <- array(NA, dim = c(M, J, T)) # Detection histories set.seed(13973) psi[1] <- 0.4 # Initial occupancy probability p <- c(0.3,0.4,0.5,0.5,0.1,0.3,0.5,0.5,0.6,0.2) phi <- runif(n=T-1, min=0.6, max=0.8) # Survival probability (1-epsilon) gamma <- runif(n=T-1, min=0.1, max=0.2) # Colonization probability # Generate latent states of occurrence # First year z[,1] <- rbinom(M, 1, psi[1]) # Initial occupancy state # Later years for(i in 1:M){ # Loop over sites for(k in 2:T){ # Loop over years muZ[k] <- z[i, k-1]*phi[k-1] + (1-z[i, k-1])*gamma[k-1] z[i,k] <- rbinom(1, 1, muZ[k]) } } # Generate detection/non-detection data for(i in 1:M){ for(k in 1:T){ prob <- z[i,k] * p[k] for(j in 1:J){ y[i,j,k] <- rbinom(1, 1, prob) } } } # Compute annual population occupancy for (k in 2:T){ psi[k] <- psi[k-1]*phi[k-1] + (1-psi[k-1])*gamma[k-1] } ################################################### ### code chunk number 3: sim ################################################### plot(1:T, colMeans(z), type = "b", xlab = "Year", ylab = "Proportion of sites occupied", col = "black", xlim=c(0.5, 10.5), xaxp=c(1,10,9), ylim = c(0,0.6), lwd = 2, lty = 1, frame.plot = FALSE, las = 1, pch=16) psi.app <- colMeans(apply(y, c(1,3), max)) lines(1:T, psi.app, type = "b", col = "blue", lty=3, lwd = 2) legend(1, 0.6, c("truth", "observed"), col=c("black", "blue"), lty=c(1,3), pch=c(16,1)) ################################################### ### code chunk number 4: colext.Rnw:457-458 ################################################### library(unmarked) ################################################### ### code chunk number 5: colext.Rnw:467-468 ################################################### yy <- matrix(y, M, J*T) ################################################### ### code chunk number 6: colext.Rnw:476-478 ################################################### year <- matrix(c('01','02','03','04','05','06','07','08','09','10'), nrow(yy), T, byrow=TRUE) ################################################### ### code chunk number 7: colext.Rnw:493-498 ################################################### simUMF <- unmarkedMultFrame( y = yy, yearlySiteCovs = list(year = year), numPrimary=T) summary(simUMF) ################################################### ### code chunk number 8: colext.Rnw:577-578 ################################################### plogis(-0.813) ################################################### ### code chunk number 9: colext.Rnw:647-652 (eval = FALSE) ################################################### ## m1 <- colext(psiformula = ~1, # First-year occupancy ## gammaformula = ~ year-1, # Colonization ## epsilonformula = ~ year-1, # Extinction ## pformula = ~ year-1, # Detection ## data = simUMF) ################################################### ### code chunk number 10: colext.Rnw:654-655 (eval = FALSE) ################################################### ## m1 ################################################### ### code chunk number 11: colext.Rnw:736-741 (eval = FALSE) ################################################### ## nd <- data.frame(year=c('01','02','03','04','05','06','07','08','09')) ## E.ext <- predict(m1, type='ext', newdata=nd) ## E.col <- predict(m1, type='col', newdata=nd) ## nd <- data.frame(year=c('01','02','03','04','05','06','07','08','09','10')) ## E.det <- predict(m1, type='det', newdata=nd) ################################################### ### code chunk number 12: yearlysim (eval = FALSE) ################################################### ## op <- par(mfrow=c(3,1), mai=c(0.6, 0.6, 0.1, 0.1)) ## ## with(E.ext, { # Plot for extinction probability ## plot(1:9, Predicted, pch=1, xaxt='n', xlab='Year', ## ylab=expression(paste('Extinction probability ( ', epsilon, ' )')), ## ylim=c(0,1), col=4) ## axis(1, at=1:9, labels=nd$year[1:9]) ## arrows(1:9, lower, 1:9, upper, code=3, angle=90, length=0.03, col=4) ## points((1:9)-0.1, 1-phi, col=1, lwd = 1, pch=16) ## legend(7, 1, c('Parameter', 'Estimate'), col=c(1,4), pch=c(16, 1), ## cex=0.8) ## }) ## ## with(E.col, { # Plot for colonization probability ## plot(1:9, Predicted, pch=1, xaxt='n', xlab='Year', ## ylab=expression(paste('Colonization probability ( ', gamma, ' )')), ## ylim=c(0,1), col=4) ## axis(1, at=1:9, labels=nd$year[1:9]) ## arrows(1:9, lower, 1:9, upper, code=3, angle=90, length=0.03, col=4) ## points((1:9)-0.1, gamma, col=1, lwd = 1, pch=16) ## legend(7, 1, c('Parameter', 'Estimate'), col=c(1,4), pch=c(16, 1), ## cex=0.8) ## }) ## ## with(E.det, { # Plot for detection probability: note 10 years ## plot(1:10, Predicted, pch=1, xaxt='n', xlab='Year', ## ylab=expression(paste('Detection probability ( ', p, ' )')), ## ylim=c(0,1), col=4) ## axis(1, at=1:10, labels=nd$year) ## arrows(1:10, lower, 1:10, upper, code=3, angle=90, length=0.03, col=4) ## points((1:10)-0.1, p, col=1, lwd = 1, pch=16) ## legend(7.5, 1, c('Parameter','Estimate'), col=c(1,4), pch=c(16, 1), ## cex=0.8) ## }) ## ## par(op) ################################################### ### code chunk number 13: colext.Rnw:881-898 ################################################### turnover <- function(fm) { psi.hat <- plogis(coef(fm, type="psi")) if(length(psi.hat) > 1) stop("this function only works if psi is scalar") T <- getData(fm)@numPrimary tau.hat <- numeric(T-1) gamma.hat <- plogis(coef(fm, type="col")) phi.hat <- 1 - plogis(coef(fm, type="ext")) if(length(gamma.hat) != T-1 | length(phi.hat) != T-1) stop("this function only works if gamma and phi T-1 vectors") for(t in 2:T) { psi.hat[t] <- psi.hat[t-1]*phi.hat[t-1] + (1-psi.hat[t-1])*gamma.hat[t-1] tau.hat[t-1] <- gamma.hat[t-1]*(1-psi.hat[t-1]) / psi.hat[t-1] } return(tau.hat) } ################################################### ### code chunk number 14: colext.Rnw:981-995 (eval = FALSE) ################################################### ## ## chisq <- function(fm) { ## umf <- getData(fm) ## y <- getY(umf) ## sr <- fm@sitesRemoved ## if(length(sr)>0) ## y <- y[-sr,,drop=FALSE] ## fv <- fitted(fm, na.rm=TRUE) ## y[is.na(fv)] <- NA ## sum((y-fv)^2/(fv*(1-fv))) ## } ## ## set.seed(344) ## pb.gof <- parboot(m0, statistic=chisq, nsim=100) ################################################### ### code chunk number 15: gof (eval = FALSE) ################################################### ## plot(pb.gof, xlab=expression(chi^2), main="", col=gray(0.95), ## xlim=c(7300, 7700)) ################################################### ### code chunk number 16: colext.Rnw:1039-1041 ################################################### data(crossbill) colnames(crossbill) ################################################### ### code chunk number 17: colext.Rnw:1078-1081 ################################################### DATE <- as.matrix(crossbill[,32:58]) y.cross <- as.matrix(crossbill[,5:31]) y.cross[is.na(DATE) != is.na(y.cross)] <- NA ################################################### ### code chunk number 18: colext.Rnw:1093-1096 ################################################### sd.DATE <- sd(c(DATE), na.rm=TRUE) mean.DATE <- mean(DATE, na.rm=TRUE) DATE <- (DATE - mean.DATE) / sd.DATE ################################################### ### code chunk number 19: colext.Rnw:1106-1112 ################################################### years <- as.character(1999:2007) years <- matrix(years, nrow(crossbill), 9, byrow=TRUE) umf <- unmarkedMultFrame(y=y.cross, siteCovs=crossbill[,2:3], yearlySiteCovs=list(year=years), obsCovs=list(date=DATE), numPrimary=9) ################################################### ### code chunk number 20: colext.Rnw:1137-1139 (eval = FALSE) ################################################### ## # A model with constant parameters ## fm0 <- colext(~1, ~1, ~1, ~1, umf) ################################################### ### code chunk number 21: colext.Rnw:1141-1143 (eval = FALSE) ################################################### ## # Like fm0, but with year-dependent detection ## fm1 <- colext(~1, ~1, ~1, ~year, umf) ################################################### ### code chunk number 22: colext.Rnw:1145-1147 (eval = FALSE) ################################################### ## # Like fm0, but with year-dependent colonization and extinction ## fm2 <- colext(~1, ~year-1, ~year-1, ~1, umf) ################################################### ### code chunk number 23: colext.Rnw:1149-1151 (eval = FALSE) ################################################### ## # A fully time-dependent model ## fm3 <- colext(~1, ~year-1, ~year-1, ~year, umf) ################################################### ### code chunk number 24: colext.Rnw:1153-1155 (eval = FALSE) ################################################### ## # Like fm3 with forest-dependence of 1st-year occupancy ## fm4 <- colext(~forest, ~year-1, ~year-1, ~year, umf) ################################################### ### code chunk number 25: colext.Rnw:1157-1160 (eval = FALSE) ################################################### ## # Like fm4 with date- and year-dependence of detection ## fm5 <- colext(~forest, ~year-1, ~year-1, ~year + date + I(date^2), ## umf, starts=c(coef(fm4), 0, 0)) ################################################### ### code chunk number 26: colext.Rnw:1162-1165 (eval = FALSE) ################################################### ## # Same as fm5, but with detection in addition depending on forest cover ## fm6 <- colext(~forest, ~year-1, ~year-1, ~year + date + I(date^2) + ## forest, umf) ################################################### ### code chunk number 27: cov (eval = FALSE) ################################################### ## op <- par(mfrow=c(1,2), mai=c(0.8,0.8,0.1,0.1)) ## ## nd <- data.frame(forest=seq(0, 100, length=50)) ## E.psi <- predict(fm6, type="psi", newdata=nd, appendData=TRUE) ## ## with(E.psi, { ## plot(forest, Predicted, ylim=c(0,1), type="l", ## xlab="Percent cover of forest", ## ylab=expression(hat(psi)), cex.lab=0.8, cex.axis=0.8) ## lines(forest, Predicted+1.96*SE, col=gray(0.7)) ## lines(forest, Predicted-1.96*SE, col=gray(0.7)) ## }) ## ## nd <- data.frame(date=seq(-2, 2, length=50), ## year=factor("2005", levels=c(unique(years))), ## forest=50) ## E.p <- predict(fm6, type="det", newdata=nd, appendData=TRUE) ## E.p$dateOrig <- E.p$date*sd.DATE + mean.DATE ## ## with(E.p, { ## plot(dateOrig, Predicted, ylim=c(0,1), type="l", ## xlab="Julian date", ylab=expression( italic(p) ), ## cex.lab=0.8, cex.axis=0.8) ## lines(dateOrig, Predicted+1.96*SE, col=gray(0.7)) ## lines(dateOrig, Predicted-1.96*SE, col=gray(0.7)) ## }) ## par(op)
##' @importFrom utils available.packages dlstats <- function(packages) { cran_db <- available.packages(repos="http://cloud.r-project.org/") bioc_db <- available.packages(repos="https://bioconductor.org/packages/devel/bioc") cran_pkg <- cran_db[, "Package"] bioc_pkg <- bioc_db[, "Package"] pkgs <- packages[packages %in% c(cran_pkg, bioc_pkg)] msg <- paste0("--> Packages: '", paste(packages[! packages %in% pkgs], collapse='/'), "', not available in either CRAN or Bioconductor...") warning(msg) ds1 <- cran_stats(pkgs[pkgs %in% cran_pkg]) ds2 <- bioc_stats(pkgs[pkgs %in% bioc_pkg]) return(list(cran_stats=ds1, bioc_stats=ds2) ) }
/R/dlstats.R
no_license
rlugojr/dlstats
R
false
false
701
r
##' @importFrom utils available.packages dlstats <- function(packages) { cran_db <- available.packages(repos="http://cloud.r-project.org/") bioc_db <- available.packages(repos="https://bioconductor.org/packages/devel/bioc") cran_pkg <- cran_db[, "Package"] bioc_pkg <- bioc_db[, "Package"] pkgs <- packages[packages %in% c(cran_pkg, bioc_pkg)] msg <- paste0("--> Packages: '", paste(packages[! packages %in% pkgs], collapse='/'), "', not available in either CRAN or Bioconductor...") warning(msg) ds1 <- cran_stats(pkgs[pkgs %in% cran_pkg]) ds2 <- bioc_stats(pkgs[pkgs %in% bioc_pkg]) return(list(cran_stats=ds1, bioc_stats=ds2) ) }