content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kmo_optimal_solution.R
\name{kmo_optimal_solution}
\alias{kmo_optimal_solution}
\title{Calculates the Optimal Solution for Kayser-Meyer-Olkin (KMO) in a Dataframe}
\usage{
kmo_optimal_solution(df, squared = TRUE)
}
\arguments{
\item{df}{a dataframe with only \code{int} or \code{num} type of variables}
\item{squared}{TRUE if matrix is squared (such as adjacency matrices), FALSE otherwise}
}
\value{
A list with \enumerate{
\item \code{df} - A dataframe that has reached its optimal solution in terms of KMO values
\item \code{removed} - A list of removed variables ordened by the first to last removed during the procedure
\item \code{kmo_results} - Results of the final iteration of the \code{\link{kmo}} function
}
}
\description{
\code{kmo_optimal_solution()} call upon the \code{\link[FactorAssumptions]{kmo}} function to iterate over the variables of a dataframe.
}
\details{
If finds any individual KMO's below the optimal value of 0.5 then removes the lowest KMO value variable until no more variable has not-optimal KMO values.
}
\examples{
set.seed(123)
df <- as.data.frame(matrix(rnorm(100*10, 1, .5), ncol=10))
kmo_optimal_solution(df, squared = FALSE)
}
\seealso{
\code{\link{kmo}} for kmo computation function
}
| /man/kmo_optimal_solution.Rd | no_license | storopoli/FactorAssumptions | R | false | true | 1,307 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kmo_optimal_solution.R
\name{kmo_optimal_solution}
\alias{kmo_optimal_solution}
\title{Calculates the Optimal Solution for Kayser-Meyer-Olkin (KMO) in a Dataframe}
\usage{
kmo_optimal_solution(df, squared = TRUE)
}
\arguments{
\item{df}{a dataframe with only \code{int} or \code{num} type of variables}
\item{squared}{TRUE if matrix is squared (such as adjacency matrices), FALSE otherwise}
}
\value{
A list with \enumerate{
\item \code{df} - A dataframe that has reached its optimal solution in terms of KMO values
\item \code{removed} - A list of removed variables ordened by the first to last removed during the procedure
\item \code{kmo_results} - Results of the final iteration of the \code{\link{kmo}} function
}
}
\description{
\code{kmo_optimal_solution()} call upon the \code{\link[FactorAssumptions]{kmo}} function to iterate over the variables of a dataframe.
}
\details{
If finds any individual KMO's below the optimal value of 0.5 then removes the lowest KMO value variable until no more variable has not-optimal KMO values.
}
\examples{
set.seed(123)
df <- as.data.frame(matrix(rnorm(100*10, 1, .5), ncol=10))
kmo_optimal_solution(df, squared = FALSE)
}
\seealso{
\code{\link{kmo}} for kmo computation function
}
|
\name{predict.blackbox}
\alias{predict.blackbox}
\title{ Predict method of blackbox objects }
\description{
\code{predict.blackbox} reads an \code{blackbox} object and uses the estimates to generate a matrix of predicted values.
}
\usage{
\method{predict}{blackbox}(object, dims=1, ...)
}
\arguments{
\item{object}{ A \code{blackbox} output object. }
\item{dims}{ Number of dimensions used in prediction. Must be equal to or less than number of dimensions used in estimation. }
\item{...}{ Ignored. }
}
\value{
A matrix of predicted values generated from the parameters estimated from a \code{blackbox} object.
}
\author{
Keith Poole \email{ktpoole@uga.edu}
Howard Rosenthal \email{hr31@nyu.edu}
Jeffrey Lewis \email{jblewis@ucla.edu}
James Lo \email{lojames@usc.edu}
Royce Carroll \email{rcarroll@rice.edu}
}
\seealso{
'\link{blackbox}', '\link{Issues1980}'
}
\examples{
## Estimate blackbox object from example and call predict function
data(Issues1980)
Issues1980[Issues1980[,"abortion1"]==7,"abortion1"] <- 8 #missing recode
Issues1980[Issues1980[,"abortion2"]==7,"abortion2"] <- 8 #missing recode
### This command conducts estimates, which we instead load using data()
# Issues1980_bb <- blackbox(Issues1980,missing=c(0,8,9),verbose=FALSE,dims=3,minscale=8)
data(Issues1980_bb)
prediction <- predict.blackbox(Issues1980_bb,dims=3)
## Examine predicted vs. observed values for first 10 respondents
## Note that 4th and 6th respondents are NA because of missing data
Issues1980[1:10,]
prediction[1:10,]
## Check correlation across all predicted vs. observed, excluding missing values
prediction[which(Issues1980 \%in\% c(0,8,9))] <- NA
cor(as.numeric(prediction), as.numeric(Issues1980), use="pairwise.complete")
}
\keyword{ multivariate }
| /man/predict.blackbox.Rd | no_license | cran/basicspace | R | false | false | 1,847 | rd | \name{predict.blackbox}
\alias{predict.blackbox}
\title{ Predict method of blackbox objects }
\description{
\code{predict.blackbox} reads an \code{blackbox} object and uses the estimates to generate a matrix of predicted values.
}
\usage{
\method{predict}{blackbox}(object, dims=1, ...)
}
\arguments{
\item{object}{ A \code{blackbox} output object. }
\item{dims}{ Number of dimensions used in prediction. Must be equal to or less than number of dimensions used in estimation. }
\item{...}{ Ignored. }
}
\value{
A matrix of predicted values generated from the parameters estimated from a \code{blackbox} object.
}
\author{
Keith Poole \email{ktpoole@uga.edu}
Howard Rosenthal \email{hr31@nyu.edu}
Jeffrey Lewis \email{jblewis@ucla.edu}
James Lo \email{lojames@usc.edu}
Royce Carroll \email{rcarroll@rice.edu}
}
\seealso{
'\link{blackbox}', '\link{Issues1980}'
}
\examples{
## Estimate blackbox object from example and call predict function
data(Issues1980)
Issues1980[Issues1980[,"abortion1"]==7,"abortion1"] <- 8 #missing recode
Issues1980[Issues1980[,"abortion2"]==7,"abortion2"] <- 8 #missing recode
### This command conducts estimates, which we instead load using data()
# Issues1980_bb <- blackbox(Issues1980,missing=c(0,8,9),verbose=FALSE,dims=3,minscale=8)
data(Issues1980_bb)
prediction <- predict.blackbox(Issues1980_bb,dims=3)
## Examine predicted vs. observed values for first 10 respondents
## Note that 4th and 6th respondents are NA because of missing data
Issues1980[1:10,]
prediction[1:10,]
## Check correlation across all predicted vs. observed, excluding missing values
prediction[which(Issues1980 \%in\% c(0,8,9))] <- NA
cor(as.numeric(prediction), as.numeric(Issues1980), use="pairwise.complete")
}
\keyword{ multivariate }
|
df <- read.csv("Metadata.csv",nrows=77)
# which variables?
str(df)
df$Reactor.cycle <- as.factor(df$Reactor.cycle) #to make 1 and 2 and not a continu variable legend
#startplot
library("ggplot2")
ggplot(data=df,aes(x= Timepoint,y=temp,fill=Reactor.cycle))+
geom_point(shape=21,size=4) +
geom_line()
####### first plot#######
# store GGplot object
p1 <- ggplot(data=df,aes(x= Timepoint,y=temp,fill=Reactor.cylce))
p1 <- p1 + geom_point(shape=21,size=4,alpha = 0.5) + theme_bw() +
geom_line()
# Facet it
p3 <- p1 + facet_grid(~Reactor.cycle)
# how do i know whats in reactor phase
unique(df$Reactor.phase)
#plot alles in fucntie van reactor phase
p4 <- p1 + facet_grid(Reactor.phase~Reactor.cycle)
#####second plot######
# store GGplot object
p1 <- ggplot(data=df,aes(x= Timepoint,y=temp,fill=Reactor.phase))
p1 <- p1 + geom_point(shape=21,size=4,alpha = 0.5) + theme_bw() +
geom_line(aes(color=Reactor.cycle))
# Facet it
p3 <- p1 + facet_grid(~Reactor.cycle)
# how do i know whats in reactor phase
unique(df$Reactor.phase)
#plot alles in fucntie van reactor phase
p4 <- p1 + facet_grid(Reactor.phase~Reactor.cycle)
###################################
### right side: conductivty #######
### middle: diversity DO ##########
### left: cell density ############
############### conductivity ######
df <- read.csv("Metadata.csv",nrows=77)
pp1 <- ggplot(data=df,aes(x= Timepoint,y=Conductivity,fill=Reactor.phase))
pp1 <- pp1 + geom_point(shape=21,size=4,alpha = 0.5) + theme_bw() +
geom_line(aes(color=Reactor.cycle))
# Facet it
pp3 <- pp1 + facet_grid(~Reactor.cycle)
# how do i know whats in reactor phase
unique(df$Reactor.phase)
#plot alles in fucntie van reactor phase
pp4 <- pp1 + facet_grid(Reactor.phase~Reactor.cycle)
#########diversity DO #############
ppp1 <- ggplot(data=df,aes(x= Timepoint,y=Diversity...D0,fill=Reactor.phase))
ppp1 <- ppp1 + geom_point(shape=21,size=4,alpha = 0.5) + theme_bw()
# Facet it
ppp2 <- ggplot(data=df,aes(x= Timepoint,y=Diversity...D1,fill=Reactor.phase))
ppp2 <- ppp1 + geom_point(shape=21,size=4,alpha = 0.5) + theme_bw()
###
ppp3 <- ggplot(data=df,aes(x= Timepoint,y=Diversity...D3,fill=Reactor.phase))
ppp3 <- ppp1 + geom_point(shape=21,size=4,alpha = 0.5) + theme_bw()
multiplot(ppp1,ppp2,ppp3,cols=2)
| /SCRIPT.R | no_license | FelixTaveirne/SWC_test | R | false | false | 2,265 | r | df <- read.csv("Metadata.csv",nrows=77)
# which variables?
str(df)
df$Reactor.cycle <- as.factor(df$Reactor.cycle) #to make 1 and 2 and not a continu variable legend
#startplot
library("ggplot2")
ggplot(data=df,aes(x= Timepoint,y=temp,fill=Reactor.cycle))+
geom_point(shape=21,size=4) +
geom_line()
####### first plot#######
# store GGplot object
p1 <- ggplot(data=df,aes(x= Timepoint,y=temp,fill=Reactor.cylce))
p1 <- p1 + geom_point(shape=21,size=4,alpha = 0.5) + theme_bw() +
geom_line()
# Facet it
p3 <- p1 + facet_grid(~Reactor.cycle)
# how do i know whats in reactor phase
unique(df$Reactor.phase)
#plot alles in fucntie van reactor phase
p4 <- p1 + facet_grid(Reactor.phase~Reactor.cycle)
#####second plot######
# store GGplot object
p1 <- ggplot(data=df,aes(x= Timepoint,y=temp,fill=Reactor.phase))
p1 <- p1 + geom_point(shape=21,size=4,alpha = 0.5) + theme_bw() +
geom_line(aes(color=Reactor.cycle))
# Facet it
p3 <- p1 + facet_grid(~Reactor.cycle)
# how do i know whats in reactor phase
unique(df$Reactor.phase)
#plot alles in fucntie van reactor phase
p4 <- p1 + facet_grid(Reactor.phase~Reactor.cycle)
###################################
### right side: conductivty #######
### middle: diversity DO ##########
### left: cell density ############
############### conductivity ######
df <- read.csv("Metadata.csv",nrows=77)
pp1 <- ggplot(data=df,aes(x= Timepoint,y=Conductivity,fill=Reactor.phase))
pp1 <- pp1 + geom_point(shape=21,size=4,alpha = 0.5) + theme_bw() +
geom_line(aes(color=Reactor.cycle))
# Facet it
pp3 <- pp1 + facet_grid(~Reactor.cycle)
# how do i know whats in reactor phase
unique(df$Reactor.phase)
#plot alles in fucntie van reactor phase
pp4 <- pp1 + facet_grid(Reactor.phase~Reactor.cycle)
#########diversity DO #############
ppp1 <- ggplot(data=df,aes(x= Timepoint,y=Diversity...D0,fill=Reactor.phase))
ppp1 <- ppp1 + geom_point(shape=21,size=4,alpha = 0.5) + theme_bw()
# Facet it
ppp2 <- ggplot(data=df,aes(x= Timepoint,y=Diversity...D1,fill=Reactor.phase))
ppp2 <- ppp1 + geom_point(shape=21,size=4,alpha = 0.5) + theme_bw()
###
ppp3 <- ggplot(data=df,aes(x= Timepoint,y=Diversity...D3,fill=Reactor.phase))
ppp3 <- ppp1 + geom_point(shape=21,size=4,alpha = 0.5) + theme_bw()
multiplot(ppp1,ppp2,ppp3,cols=2)
|
#' @import utils
utils::globalVariables(c("where", "contains"))
| /R/globals.R | permissive | mstarrant/migrate | R | false | false | 64 | r | #' @import utils
utils::globalVariables(c("where", "contains"))
|
\name{data}
\docType{data}
\alias{data}
\title{Trait data}
\usage{
data
}
\description{
Simulated trait records data for the package P-SIMEX.
It contains information on each individual's trait and covariates.
}
\format{
"sex" "f_inb" "animal.id" "id" "year" "animal" "y"
\tabular{ll}{
sex:\tab Sex of the individual\cr
f_inb:\tab Inbreeding coefficient of the individual
(it can also be calculated from the pedigree)\cr
id:\tab Individual's id \cr
animal.id:\tab Individual's id (duplicate for animal model) \cr
animal:\tab Individual's id (duplicate for animal model) \cr
year:\tab Generation number of the individual in the pedigree
(it can also be extracted from the pedigree)\cr
y:\tab Individual's trait record\cr
}
}
\keyword{datasets}
\keyword{PSIMEX} | /man/data.Rd | no_license | cran/PSIMEX | R | false | false | 830 | rd | \name{data}
\docType{data}
\alias{data}
\title{Trait data}
\usage{
data
}
\description{
Simulated trait records data for the package P-SIMEX.
It contains information on each individual's trait and covariates.
}
\format{
"sex" "f_inb" "animal.id" "id" "year" "animal" "y"
\tabular{ll}{
sex:\tab Sex of the individual\cr
f_inb:\tab Inbreeding coefficient of the individual
(it can also be calculated from the pedigree)\cr
id:\tab Individual's id \cr
animal.id:\tab Individual's id (duplicate for animal model) \cr
animal:\tab Individual's id (duplicate for animal model) \cr
year:\tab Generation number of the individual in the pedigree
(it can also be extracted from the pedigree)\cr
y:\tab Individual's trait record\cr
}
}
\keyword{datasets}
\keyword{PSIMEX} |
#' Calculates the estimated likelihood for a regression model under a general sample design
#'
#' This function calculates the estimated likelihood
#'
#' @details
#' Add some details later.
#'
#' @param beta vector of regression coefficients
#' @param sd error standard deviation
#' @param ys vector of sample values of the dependent variable
#' @param xs matrix of sample covariate values of dimension n by p
#' where n is the sample size (length(ys))
#' @param N the population size
#' @param p.s A function defining the ODS sample design.
#' It must take arguments ys, yr, log and stuff, and return
#' the probability (or log of the probability) that a particular
#' sample was selected.
#' @param specs An object containing detailed specifications of the design.
#' @param log If FALSE, the function returns the likelihood, otherwise the log-likelihood
#' @param pi.s the probabilities of selection of the sample units
#' @param R the number of simulations used in the monte carlo approximation
#' of the likelihood
#' @param all.resamp.x Optional. An R by N-n integer-valued matrix
#' whose rows are SRSWR resamples from 1:n
#' @param all.errors.y Optional. An R by N-n real-valued matrix
#' of standard normal realisations.
#' @param verbose If TRUE, the function outputs information to the console.
#' @return The likelihood or log-likelihood.
#' @examples
#' data(population_example)
#' L.est(beta=c(4,1),N=1000,sd=0.8,ys=sample.example$y,xs=cbind(1,sample.example$y),
#' p.s=p.s.ushaped,specs=c(n0=10,tuner=0.1,return.what=1),log=TRUE,
#' pi.s=sample.example$pi)
#' @export
L.est <- function(beta,sd,ys,xs,N,p.s,specs=NULL,log=FALSE,pi.s,R=100,
all.resamp.x,all.errors.y,verbose=FALSE){
if(!is.matrix(xs)) xs <- matrix(xs,ncol=1)
p <- ncol(xs)
n <- length(ys)
l.misspecified <- sum( dnorm(x=ys,mean=as.vector(xs%*%beta),sd=sd,log=T) )
all.log.p.s <- rep(NA,R)
for(r in c(1:R)){
if(missing(all.resamp.x)) resamp.x <- sample(x=1:n,size=N-n,prob=1/pi.s,replace=T)
if(!missing(all.resamp.x)) resamp.x <- all.resamp.x[r,]
if(missing(all.errors.y)) errors.y <- rnorm(n=N-n)
if(!missing(all.errors.y)) errors.y <- all.errors.y[r,]
xr.sim <- xs[resamp.x,]
yr.sim <- as.vector(xr.sim %*% beta) + errors.y*sd
all.log.p.s[r] <- p.s(ys=ys,yr=yr.sim,specs=specs,log=T)
}
K <- median(all.log.p.s)
log.sampdesign.factor <- log( mean(exp(all.log.p.s-K)) ) + K
l <- l.misspecified + log.sampdesign.factor
if(verbose) cat("l.misspecified=",l.misspecified," ; l=",l," beta=(",paste(beta,sep=""),") ; sd=",sd,"\n")
if(log) return(l)
if(!log) return(exp(l))
}
| /R/L_est.R | no_license | rgcstats/ODS | R | false | false | 2,630 | r | #' Calculates the estimated likelihood for a regression model under a general sample design
#'
#' This function calculates the estimated likelihood
#'
#' @details
#' Add some details later.
#'
#' @param beta vector of regression coefficients
#' @param sd error standard deviation
#' @param ys vector of sample values of the dependent variable
#' @param xs matrix of sample covariate values of dimension n by p
#' where n is the sample size (length(ys))
#' @param N the population size
#' @param p.s A function defining the ODS sample design.
#' It must take arguments ys, yr, log and stuff, and return
#' the probability (or log of the probability) that a particular
#' sample was selected.
#' @param specs An object containing detailed specifications of the design.
#' @param log If FALSE, the function returns the likelihood, otherwise the log-likelihood
#' @param pi.s the probabilities of selection of the sample units
#' @param R the number of simulations used in the monte carlo approximation
#' of the likelihood
#' @param all.resamp.x Optional. An R by N-n integer-valued matrix
#' whose rows are SRSWR resamples from 1:n
#' @param all.errors.y Optional. An R by N-n real-valued matrix
#' of standard normal realisations.
#' @param verbose If TRUE, the function outputs information to the console.
#' @return The likelihood or log-likelihood.
#' @examples
#' data(population_example)
#' L.est(beta=c(4,1),N=1000,sd=0.8,ys=sample.example$y,xs=cbind(1,sample.example$y),
#' p.s=p.s.ushaped,specs=c(n0=10,tuner=0.1,return.what=1),log=TRUE,
#' pi.s=sample.example$pi)
#' @export
L.est <- function(beta,sd,ys,xs,N,p.s,specs=NULL,log=FALSE,pi.s,R=100,
all.resamp.x,all.errors.y,verbose=FALSE){
if(!is.matrix(xs)) xs <- matrix(xs,ncol=1)
p <- ncol(xs)
n <- length(ys)
l.misspecified <- sum( dnorm(x=ys,mean=as.vector(xs%*%beta),sd=sd,log=T) )
all.log.p.s <- rep(NA,R)
for(r in c(1:R)){
if(missing(all.resamp.x)) resamp.x <- sample(x=1:n,size=N-n,prob=1/pi.s,replace=T)
if(!missing(all.resamp.x)) resamp.x <- all.resamp.x[r,]
if(missing(all.errors.y)) errors.y <- rnorm(n=N-n)
if(!missing(all.errors.y)) errors.y <- all.errors.y[r,]
xr.sim <- xs[resamp.x,]
yr.sim <- as.vector(xr.sim %*% beta) + errors.y*sd
all.log.p.s[r] <- p.s(ys=ys,yr=yr.sim,specs=specs,log=T)
}
K <- median(all.log.p.s)
log.sampdesign.factor <- log( mean(exp(all.log.p.s-K)) ) + K
l <- l.misspecified + log.sampdesign.factor
if(verbose) cat("l.misspecified=",l.misspecified," ; l=",l," beta=(",paste(beta,sep=""),") ; sd=",sd,"\n")
if(log) return(l)
if(!log) return(exp(l))
}
|
#' Copus 1
#'
#' Generate a plot for Copus Figure 1
#' @export
#' @return a list of plots
#' @import ggplot2
#' @import dplyr
#' @import scales
#' @import Hmisc
#' @import reshape2
sfi_plot_copus_1 <- function(){
# # no scientific notation
# options(scipen = '999')
#
# # get data
data <- all_data$copus$f1
# plot point, line, smooth data
g1 <- ggplot(data,
aes(x, y)) +
geom_point(size = 1,
color = 'black',
alpha = 0.6) +
geom_smooth(method = 'loess',
linetype =0,
fill = 'black',
alpha = 0.4) +
geom_smooth(method = 'lm',
color = 'black',
se = FALSE) +
labs(title = 'Figure 1. In Sample Prediction') +
theme_sfi(lp = 'none',
x_axis_title_style = 'bold',
y_axis_title_style = 'bold',
title_style = 'bold')
# plot point, line, smooth data
g2 <- ggplot(data,
aes(x, y)) +
geom_point(size = 1.5,
color = 'black',
alpha = 0.9) +
geom_smooth(method = 'loess',
linetype = 0,
se = TRUE,
fill = 'black',
alpha = 0.7) +
geom_smooth(method = 'lm',
color = 'darkgrey',
linetype = 'dashed',
se = FALSE,
alpha = 0.4) +
labs(title = 'Figure 1. In Sample Prediction',
subtitle = 'Version 2') +
theme_sfi(lp = 'none',
x_axis_title_style = 'bold',
y_axis_title_style = 'bold',
title_style = 'bold')
return(list(g1, g2))
}
| /R/sfi_plot_copus_1.R | no_license | databrew/sfi | R | false | false | 1,679 | r | #' Copus 1
#'
#' Generate a plot for Copus Figure 1
#' @export
#' @return a list of plots
#' @import ggplot2
#' @import dplyr
#' @import scales
#' @import Hmisc
#' @import reshape2
sfi_plot_copus_1 <- function(){
# # no scientific notation
# options(scipen = '999')
#
# # get data
data <- all_data$copus$f1
# plot point, line, smooth data
g1 <- ggplot(data,
aes(x, y)) +
geom_point(size = 1,
color = 'black',
alpha = 0.6) +
geom_smooth(method = 'loess',
linetype =0,
fill = 'black',
alpha = 0.4) +
geom_smooth(method = 'lm',
color = 'black',
se = FALSE) +
labs(title = 'Figure 1. In Sample Prediction') +
theme_sfi(lp = 'none',
x_axis_title_style = 'bold',
y_axis_title_style = 'bold',
title_style = 'bold')
# plot point, line, smooth data
g2 <- ggplot(data,
aes(x, y)) +
geom_point(size = 1.5,
color = 'black',
alpha = 0.9) +
geom_smooth(method = 'loess',
linetype = 0,
se = TRUE,
fill = 'black',
alpha = 0.7) +
geom_smooth(method = 'lm',
color = 'darkgrey',
linetype = 'dashed',
se = FALSE,
alpha = 0.4) +
labs(title = 'Figure 1. In Sample Prediction',
subtitle = 'Version 2') +
theme_sfi(lp = 'none',
x_axis_title_style = 'bold',
y_axis_title_style = 'bold',
title_style = 'bold')
return(list(g1, g2))
}
|
/Rscripts/11 Importing and Exporting.R | no_license | anhnguyendepocen/RProgrammingCentralBank | R | false | false | 2,023 | r | ||
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392674e+77, 5.48616546317643e+303, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615777155-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 362 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392674e+77, 5.48616546317643e+303, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reasytrie.R
\name{trie_contain}
\alias{trie_contain}
\title{Search if a word is present in the trie.}
\usage{
trie_contain(trie, word)
}
\arguments{
\item{trie}{A \code{trie}.}
\item{word}{The word to be searched in the trie.}
}
\value{
a logical indicating that if the word is present or not.
Returns TRUE if the word is in the trie.
Returns FALSE if the word is not in the trie.
}
\description{
Search if a word is present in the trie.
}
\examples{
trie <- trie_create()
trie_contain(trie, "test")
}
| /man/trie_contain.Rd | permissive | mgaroub/reasytries | R | false | true | 581 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reasytrie.R
\name{trie_contain}
\alias{trie_contain}
\title{Search if a word is present in the trie.}
\usage{
trie_contain(trie, word)
}
\arguments{
\item{trie}{A \code{trie}.}
\item{word}{The word to be searched in the trie.}
}
\value{
a logical indicating that if the word is present or not.
Returns TRUE if the word is in the trie.
Returns FALSE if the word is not in the trie.
}
\description{
Search if a word is present in the trie.
}
\examples{
trie <- trie_create()
trie_contain(trie, "test")
}
|
## packages used
remotes::install_github("rstudio/rticles")
tidyverse
install.packages("ggthemes")
## citation styles
#https://www.zotero.org/styles
https://yutannihilation.github.io/allYourFigureAreBelongToUs/ | /code/test_code.R | no_license | MattBixley/R4biochem | R | false | false | 212 | r | ## packages used
remotes::install_github("rstudio/rticles")
tidyverse
install.packages("ggthemes")
## citation styles
#https://www.zotero.org/styles
https://yutannihilation.github.io/allYourFigureAreBelongToUs/ |
#R's t-test is Welch's by default,
#therefore call var.equal=TRUE for students t-test
#load libraries
library(tidyverse)
library(dplyr)
library(magrittr)
library(readr)
#read .csv file format
#candidates_female = fertility data for female candidate genes
#candidates_male = fertility data for male candidate genes
candidates_female <- read_csv("spreadsheet_subset2.csv")
candidates_male <- read_csv("spreadsheet_subset_male.csv")
#assign the variables, note "1" refers to WT, "2" refers to heterozygous groups
#sample sizes
n1<-c("n1")
n2<-c("n2")
n3<-c("n3")
#mean litter size
m1<-c("m1")
m2<-c("m2")
m3<-c("n3")
#standard deviation
s1<-c("s1")
s2<-c("s2")
s3<-c("s3")
#make each colum numeric
candidates_female %<>% mutate_if(is.character,as.numeric)
candidates_male %<>% mutate_if(is.character,as.numeric)
#check the structure
candidates_female %>% str()
candidates_male %>% str()
#t-test function for WT vs KO, Student's if variance is equal, Welch if not
t.test_WTvsKO <- function(m1,m3,s1,s3,n1,n3, equal.variance=FALSE)
{
#Welch
if( equal.variance==FALSE ) {
se <- sqrt( (s1^2/n1) + (s3^2/n3) )
df <- ( (s1^2/n1 + s3^2/n3)^2 )/( (s1^2/n1)^2/(n1-1) + (s3^2/n3)^2/(n3-1) )
} else {
#Students
se <- sqrt( (1/n1 + 1/n3) * ((n1-1)*s1^2 + (n3-1)*s3^2)/(n1+n3-2) )
df <- n1+n3-2
}
t <- (m1-m3)/se
return(2*pt(-abs(t),df))
}
#t-test function for WT vs het, Student's if variance is equal, Welch if not
t.test_WTvshet <- function(m1,m2,s1,s2,n1,n2, equal.variance=FALSE)
{
#Welch
if( equal.variance==FALSE ) {
se <- sqrt( (s1^2/n1) + (s2^2/n2) )
df <- ( (s1^2/n1 + s2^2/n2)^2 )/( (s1^2/n1)^2/(n1-1) + (s2^2/n2)^2/(n2-1) )
} else {
#Students
se <- sqrt( (1/n1 + 1/n2) * ((n1-1)*s1^2 + (n2-1)*s2^2)/(n1+n2-2) )
df <- n1+n2-2
}
t <- (m1-m2)/se
return(2*pt(-abs(t),df))
}
#t-test function for het vs KO, Student's if variance is equal, Welch if not
t.test_hetvsKO <- function(m2,m3,s2,s3,n2,n3, equal.variance=FALSE)
{
#Welch
if( equal.variance==FALSE ) {
se <- sqrt( (s2^2/n2) + (s3^2/n3) )
df <- ( (s2^2/n2 + s3^2/n3)^2 )/( (s2^2/n2)^2/(n2-1) + (s3^2/n3)^2/(n3-1) )
} else {
#Students
se <- sqrt( (1/n2 + 1/n3) * ((n2-1)*s2^2 + (n3-1)*s3^2)/(n2+n3-2) )
df <- n2+n3-2
}
t <- (m2-m3)/se
return(2*pt(-abs(t),df))
}
#extract a subset of the spreadsheet so we can avoid NAs
#female WT vs KO
a = candidates_female %>%
filter(!is.na(s1), !is.na(s3), !is.na(m1), !is.na(m3), !is.na(n1), !is.na(n3))
#female WT vs het
b = candidates_female %>%
filter(!is.na(s1), !is.na(s2), !is.na(m1), !is.na(m2), !is.na(n1), !is.na(n2))
#female het vs KO
c = candidates_female %>%
filter(!is.na(s2), !is.na(s3), !is.na(m2), !is.na(m3), !is.na(n2), !is.na(n3))
#male WT vs KO
d = candidates_male %>%
filter(!is.na(s1), !is.na(s3), !is.na(m1), !is.na(m3), !is.na(n1), !is.na(n3))
#male WT vs het
e = candidates_male %>%
filter(!is.na(s1), !is.na(s2), !is.na(m1), !is.na(m2), !is.na(n1), !is.na(n2))
#male het vs KO
f = candidates_male %>%
filter(!is.na(s2), !is.na(s3), !is.na(m2), !is.na(m3), !is.na(n2), !is.na(n3))
#finally, run the t-test!
#female WT vs KO
t.test_WTvsKO(a$m1, a$m3, a$s1, a$s3, a$n1, a$n3)
#female WT vs het
t.test_WTvshet(b$m1, b$m2, b$s1, b$s2, b$n1, b$n2)
#female het vs KO
t.test_hetvsKO(c$m2, c$m3, c$s2, c$s3, c$n2, c$n3)
#male WT vs KO
t.test_WTvsKO(d$m1,d$m3, d$s1, d$s3, d$n1, d$n3)
#male WT vs het
t.test_WTvshet(e$m1, e$m2, e$s1, e$s2, e$n1, e$n2)
#male het vs KO
t.test_hetvsKO(f$m2, f$m3, f$s2, f$s3, f$n2, f$n3)
| /t-test.R | no_license | annabananakobana/MLR_Ttests | R | false | false | 3,726 | r | #R's t-test is Welch's by default,
#therefore call var.equal=TRUE for students t-test
#load libraries
library(tidyverse)
library(dplyr)
library(magrittr)
library(readr)
#read .csv file format
#candidates_female = fertility data for female candidate genes
#candidates_male = fertility data for male candidate genes
candidates_female <- read_csv("spreadsheet_subset2.csv")
candidates_male <- read_csv("spreadsheet_subset_male.csv")
#assign the variables, note "1" refers to WT, "2" refers to heterozygous groups
#sample sizes
n1<-c("n1")
n2<-c("n2")
n3<-c("n3")
#mean litter size
m1<-c("m1")
m2<-c("m2")
m3<-c("n3")
#standard deviation
s1<-c("s1")
s2<-c("s2")
s3<-c("s3")
#make each colum numeric
candidates_female %<>% mutate_if(is.character,as.numeric)
candidates_male %<>% mutate_if(is.character,as.numeric)
#check the structure
candidates_female %>% str()
candidates_male %>% str()
#t-test function for WT vs KO, Student's if variance is equal, Welch if not
t.test_WTvsKO <- function(m1,m3,s1,s3,n1,n3, equal.variance=FALSE)
{
#Welch
if( equal.variance==FALSE ) {
se <- sqrt( (s1^2/n1) + (s3^2/n3) )
df <- ( (s1^2/n1 + s3^2/n3)^2 )/( (s1^2/n1)^2/(n1-1) + (s3^2/n3)^2/(n3-1) )
} else {
#Students
se <- sqrt( (1/n1 + 1/n3) * ((n1-1)*s1^2 + (n3-1)*s3^2)/(n1+n3-2) )
df <- n1+n3-2
}
t <- (m1-m3)/se
return(2*pt(-abs(t),df))
}
#t-test function for WT vs het, Student's if variance is equal, Welch if not
t.test_WTvshet <- function(m1,m2,s1,s2,n1,n2, equal.variance=FALSE)
{
#Welch
if( equal.variance==FALSE ) {
se <- sqrt( (s1^2/n1) + (s2^2/n2) )
df <- ( (s1^2/n1 + s2^2/n2)^2 )/( (s1^2/n1)^2/(n1-1) + (s2^2/n2)^2/(n2-1) )
} else {
#Students
se <- sqrt( (1/n1 + 1/n2) * ((n1-1)*s1^2 + (n2-1)*s2^2)/(n1+n2-2) )
df <- n1+n2-2
}
t <- (m1-m2)/se
return(2*pt(-abs(t),df))
}
#t-test function for het vs KO, Student's if variance is equal, Welch if not
t.test_hetvsKO <- function(m2,m3,s2,s3,n2,n3, equal.variance=FALSE)
{
#Welch
if( equal.variance==FALSE ) {
se <- sqrt( (s2^2/n2) + (s3^2/n3) )
df <- ( (s2^2/n2 + s3^2/n3)^2 )/( (s2^2/n2)^2/(n2-1) + (s3^2/n3)^2/(n3-1) )
} else {
#Students
se <- sqrt( (1/n2 + 1/n3) * ((n2-1)*s2^2 + (n3-1)*s3^2)/(n2+n3-2) )
df <- n2+n3-2
}
t <- (m2-m3)/se
return(2*pt(-abs(t),df))
}
#extract a subset of the spreadsheet so we can avoid NAs
#female WT vs KO
a = candidates_female %>%
filter(!is.na(s1), !is.na(s3), !is.na(m1), !is.na(m3), !is.na(n1), !is.na(n3))
#female WT vs het
b = candidates_female %>%
filter(!is.na(s1), !is.na(s2), !is.na(m1), !is.na(m2), !is.na(n1), !is.na(n2))
#female het vs KO
c = candidates_female %>%
filter(!is.na(s2), !is.na(s3), !is.na(m2), !is.na(m3), !is.na(n2), !is.na(n3))
#male WT vs KO
d = candidates_male %>%
filter(!is.na(s1), !is.na(s3), !is.na(m1), !is.na(m3), !is.na(n1), !is.na(n3))
#male WT vs het
e = candidates_male %>%
filter(!is.na(s1), !is.na(s2), !is.na(m1), !is.na(m2), !is.na(n1), !is.na(n2))
#male het vs KO
f = candidates_male %>%
filter(!is.na(s2), !is.na(s3), !is.na(m2), !is.na(m3), !is.na(n2), !is.na(n3))
#finally, run the t-test!
#female WT vs KO
t.test_WTvsKO(a$m1, a$m3, a$s1, a$s3, a$n1, a$n3)
#female WT vs het
t.test_WTvshet(b$m1, b$m2, b$s1, b$s2, b$n1, b$n2)
#female het vs KO
t.test_hetvsKO(c$m2, c$m3, c$s2, c$s3, c$n2, c$n3)
#male WT vs KO
t.test_WTvsKO(d$m1,d$m3, d$s1, d$s3, d$n1, d$n3)
#male WT vs het
t.test_WTvshet(e$m1, e$m2, e$s1, e$s2, e$n1, e$n2)
#male het vs KO
t.test_hetvsKO(f$m2, f$m3, f$s2, f$s3, f$n2, f$n3)
|
/projeto3.r | no_license | melissarib/algoritmos-usp | R | false | false | 2,956 | r | ||
library(textreadr)
library(tm)
library(tidyverse)
library(tidytext)
library(knitr)
library(stopwords)
# this is my dictionary
dataset_widenet_gram_2_twitter <- read.csv("./../skims/gram_2_blogs/gram_2_blogs_01_1.txt", stringsAsFactors = FALSE)
dataset_widenet_gram_3_twitter <- read.csv("./../skims/gram_3_blogs/gram_3_blogs_01_1.txt", stringsAsFactors = FALSE)
#unique eevrything
dataset_widenet_gram_2_twitter <- select(dataset_widenet_gram_2_twitter,c("word","freq"))
dataset_widenet_gram_2_twitter <- unique(dataset_widenet_gram_2_twitter)
dataset_widenet_gram_2_twitter <- aggregate(freq ~ word,
data=dataset_widenet_gram_2_twitter,FUN=sum,
na.rm = TRUE)
str(dataset_widenet_gram_2_twitter)
head(dataset_widenet_gram_2_twitter)
dataset_widenet_gram_3_twitter <- select(dataset_widenet_gram_3_twitter,c("word","freq"))
dataset_widenet_gram_3_twitter <- unique(dataset_widenet_gram_3_twitter)
dataset_widenet_gram_3_twitter <- aggregate(freq ~ word,
data=dataset_widenet_gram_3_twitter,FUN=sum,
na.rm = TRUE)
str(dataset_widenet_gram_3_twitter)
head(dataset_widenet_gram_3_twitter)
gram_2_twitter_split <- dataset_widenet_gram_2_twitter %>%
separate(word, c("word1", "word2"), sep = " ")
head(gram_2_twitter_split)
gram_3_twitter_split <- dataset_widenet_gram_3_twitter %>%
separate(word, c("word1", "word2", "word3"), sep = " ")
head(gram_3_twitter_split)
gram_2_twitter_freq <- gram_2_twitter_split %>% mutate(term_freq = freq/sum(freq))
gram_2_twitter_freq <- gram_2_twitter_freq %>%
mutate(fake_freq = term_freq + 0.1*dnorm(rnorm(freq))/sum(dnorm(rnorm(freq))))
head(gram_2_twitter_freq)
gram_2_twitter_freq <- gram_2_twitter_freq %>% arrange(desc(fake_freq))
gram_3_twitter_freq <- gram_3_twitter_split %>% mutate(term_freq = freq/sum(freq))
gram_3_twitter_freq <- gram_3_twitter_freq %>%
mutate(fake_freq = term_freq + 0.1*dnorm(rnorm(freq))/sum(dnorm(rnorm(freq))))
head(gram_3_twitter_freq)
gram_3_twitter_freq <- gram_3_twitter_freq %>% arrange(desc(fake_freq))
#this is a list of input words to develop the model in steps of (hopefully)
#increasing predictive accuracy
input_gram_1 <- select(gram_2_twitter_freq, word1)
head(input_gram_1)
str(input_gram_1)
input_gram_1 <- unique(input_gram_1)
head(input_gram_1)
str(input_gram_1)
#naive prediction, no smoothing
#start with an easy one
head(gram_2_twitter_freq,10)
for (i in 1:1) {
#print("i")
#print(i)
#print(input_gram_1[i,])
for (j in 1:nrow(gram_2_twitter_freq)){
if(input_gram_1[i,] == gram_2_twitter_freq$word1[j]) {
print("j")
print(j)
print("we are in gram_2 word1")
print(input_gram_1[i,])
print(gram_2_twitter_freq$word1[j])
print("next word")
print(gram_2_twitter_freq$word2[j])
print("freq")
print(gram_2_twitter_freq$freq[j])
print("term freq")
print(gram_2_twitter_freq$fake_freq[j])
}
}
}
#1:nrow(input_gram_1)
# will print all of them
# I need to print only the first one
for (i in 3:3) {
#print("i")
#print(i)
#print(input_gram_1[i,])
for (j in 1:nrow(gram_2_twitter_freq)){
if(input_gram_1[i,] == gram_2_twitter_freq$word1[j]) {
print("j")
print(j)
print("we are in gram_2 word1")
print(input_gram_1[i,])
#print(gram_2_twitter_freq$word1[j])
print("next word")
print(gram_2_twitter_freq$word2[j])
#print("freq")
#print(gram_2_twitter_freq$freq[j])
print("term freq")
print(gram_2_twitter_freq$fake_freq[j])
}
}
}
# first one
for (i in 1:3) {
#print("i")
#print(i)
#print(input_gram_1[i,])
count <- 1
for (j in 1:nrow(gram_2_twitter_freq)){
if(input_gram_1[i,] == gram_2_twitter_freq$word1[j]) {
#print("count")
#print(count)
if (count == 1) {
print("best word")
print("j")
print(j)
print("we are in gram_2 word1")
print(input_gram_1[i,])
#print(gram_2_twitter_freq$word1[j])
print("next word")
print(gram_2_twitter_freq$word2[j])
#print("freq")
#print(gram_2_twitter_freq$freq[j])
print("term freq")
print(gram_2_twitter_freq$fake_freq[j])
}
if (count == 2) {
print("next best word")
print("j")
print(j)
print("we are in gram_2 word1")
print(input_gram_1[i,])
#print(gram_2_twitter_freq$word1[j])
print("next word")
print(gram_2_twitter_freq$word2[j])
#print("freq")
#print(gram_2_twitter_freq$freq[j])
print("term freq")
print(gram_2_twitter_freq$fake_freq[j])
}
count <- count + 1
}
}
}
### no we feed data 2 grams, not just a list of words
gram_2_twitter_input <- read.csv("./../barrel/gram_2/gram_2_twitter_happi_01_1.txt",
stringsAsFactors = FALSE)
gram_2_twitter_input <- select(gram_2_twitter_input,word)
head(gram_2_twitter_input)
str(gram_2_twitter_input)
gram_2_twitter_input_split <- gram_2_twitter_input %>%
separate(word, c("word1", "word2"), sep = " ")
head(gram_2_twitter_input_split)
gram_3_twitter_input <- read.csv("./../barrel/gram_3/gram_3_twitter_happi_01_1.txt",
stringsAsFactors = FALSE)
gram_3_twitter_input <- select(gram_3_twitter_input,word)
head(gram_3_twitter_input)
str(gram_3_twitter_input)
gram_3_twitter_input_split <- gram_3_twitter_input %>%
separate(word, c("word1", "word2", "word3"), sep = " ")
head(gram_3_twitter_input_split)
#########
gram_2_twitter_input_split$word1[1]
gram_2_twitter_freq$word1[1]
head(gram_2_twitter_input_split,3)
for (i in 1:3) {
print("i ############################################################")
print(i)
#print(input_gram_1[i,])
count <- 1
for (j in 1:nrow(gram_2_twitter_freq)){
if(gram_2_twitter_input_split$word1[i] == gram_2_twitter_freq$word1[j]) {
if (count == 1) {
print("best word")
print("j")
print(j)
print("we are in gram_2 input word1")
print(gram_2_twitter_input_split$word1[i])
print("we are in gram_2 input word2")
print(gram_2_twitter_input_split$word2[i])
print("next word we predict")
print(gram_2_twitter_freq$word2[j])
print("term freq")
print(gram_2_twitter_freq$fake_freq[j])
}
if (count == 2) {
print("next best word")
print("j")
print(j)
print("we are in gram_2 input word1")
print(gram_2_twitter_input_split$word1[i])
print("we are in gram_2 input word2")
print(gram_2_twitter_input_split$word2[i])
print("next word we predict")
print(gram_2_twitter_freq$word2[j])
print("term freq")
print(gram_2_twitter_freq$fake_freq[j])
}
count <- count + 1
}
}
}
#build test output data frame gram_2
output_dfwidenet_gram_2_2 <- data.frame(input_word1=character(), input_word2=character(),
predict_word2_first=character(),predict_word2_first_freq=numeric(),
predict_word2_second=character(), predict_word2_second_freq=numeric(),
stringsAsFactors=FALSE)
str(output_dfwidenet_gram_2_2)
head(output_dfwidenet_gram_2_2)
nrow(output_dfwidenet_gram_2_2)
for (i in 1:nrow(gram_2_twitter_input_split)) {
print("i ############################################################")
print(i)
#print(input_gram_1[i,])
count <- 1
for (j in 1:nrow(gram_2_twitter_freq)){
if(gram_2_twitter_input_split$word1[i] == gram_2_twitter_freq$word1[j]) {
if (count == 1) {
print("best word")
print("j")
print(j)
print("we are in gram_2 input word1")
output_dfwidenet_gram_2_2[i,1] <- gram_2_twitter_input_split$word1[i]
print("we are in gram_2 input word2")
output_dfwidenet_gram_2_2[i,2] <- gram_2_twitter_input_split$word2[i]
print("next word we predict")
output_dfwidenet_gram_2_2[i,3] <- gram_2_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_2_2[i,4] <- gram_2_twitter_freq$fake_freq[j]
}
if (count == 2) {
print("next best word")
print("j")
print(j)
print("we are in gram_2 input word1")
print(gram_2_twitter_input_split$word1[i])
print("we are in gram_2 input word2")
print(gram_2_twitter_input_split$word2[i])
print("next word we predict")
output_dfwidenet_gram_2_2[i,5] <- gram_2_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_2_2[i,6] <- gram_2_twitter_freq$fake_freq[j]
}
count <- count + 1
}
}
}
write.csv(output_dfwidenet_gram_2_2,"./../model/test_01/output_dfwidenet_gram_2_2.csv")
#build test output data frame gram_3
output_dfwidenet_gram_3_3 <- data.frame(input_word1=character(), input_word2=character(),
predict_word2_first=character(),predict_word2_first_freq=numeric(),
predict_word2_second=character(), predict_word2_second_freq=numeric(),
stringsAsFactors=FALSE)
str(output_dfwidenet_gram_3_3)
head(output_dfwidenet_gram_3_3)
nrow(output_dfwidenet_gram_3_3)
for (i in 1:nrow(gram_3_twitter_input_split)) {
print("i ############################################################")
print(i)
#print(input_gram_1[i,])
count <- 1
for (j in 1:nrow(gram_3_twitter_freq)){
if(gram_3_twitter_input_split$word1[i] == gram_3_twitter_freq$word1[j]) {
if (count == 1) {
print("best word")
print("j")
print(j)
print("we are in gram_3 input word1")
output_dfwidenet_gram_3_3[i,1] <- gram_3_twitter_input_split$word1[i]
print("we are in gram_3 input word2")
output_dfwidenet_gram_3_3[i,2] <- gram_3_twitter_input_split$word2[i]
print("next word we predict")
output_dfwidenet_gram_3_3[i,3] <- gram_3_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_3_3[i,4] <- gram_3_twitter_freq$fake_freq[j]
}
if (count == 2) {
print("next best word")
print("j")
print(j)
print("we are in gram_3 input word1")
print(gram_3_twitter_input_split$word1[i])
print("we are in gram_3 input word2")
print(gram_3_twitter_input_split$word2[i])
print("next word we predict")
output_dfwidenet_gram_3_3[i,5] <- gram_3_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_3_3[i,6] <- gram_3_twitter_freq$fake_freq[j]
}
count <- count + 1
}
}
}
write.csv(output_dfwidenet_gram_3_3,"./../model/test_01/output_dfwidenet_gram_3_3.csv")
###########################################################################################
#build test output data frame gram_2 but using gram_3 as input
output_dfwidenet_gram_3_2 <- data.frame(input_word1=character(), input_word2=character(),
predict_word2_first=character(),predict_word2_first_freq=numeric(),
predict_word2_second=character(), predict_word2_second_freq=numeric(),
stringsAsFactors=FALSE)
str(output_dfwidenet_gram_3_2)
head(output_dfwidenet_gram_3_2)
nrow(output_dfwidenet_gram_3_2)
for (i in 1:nrow(gram_3_twitter_input_split)) {
print("i ############################################################")
print(i)
#print(input_gram_1[i,])
count <- 1
for (j in 1:nrow(gram_2_twitter_freq)){
if(gram_3_twitter_input_split$word1[i] == gram_2_twitter_freq$word1[j]) {
if (count == 1) {
print("best word")
print("j")
print(j)
print("we are in gram_3 input word1")
output_dfwidenet_gram_3_2[i,1] <- gram_3_twitter_input_split$word1[i]
print("we are in gram_3 input word2")
output_dfwidenet_gram_3_2[i,2] <- gram_3_twitter_input_split$word2[i]
print("next word we predict")
output_dfwidenet_gram_3_2[i,3] <- gram_2_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_3_2[i,4] <- gram_2_twitter_freq$fake_freq[j]
}
if (count == 2) {
print("next best word")
print("j")
print(j)
print("we are in gram_3 input word1")
print(gram_3_twitter_input_split$word1[i])
print("we are in gram_3 input word2")
print(gram_3_twitter_input_split$word2[i])
print("next word we predict")
output_dfwidenet_gram_3_2[i,5] <- gram_2_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_3_2[i,6] <- gram_2_twitter_freq$fake_freq[j]
}
count <- count + 1
}
}
}
write.csv(output_dfwidenet_gram_3_2,"./../model/test_01/output_dfwidenet_gram_3_2.csv")
#build test output data frame gram_3 from gram_2 input
output_dfwidenet_gram_2_3 <- data.frame(input_word1=character(), input_word2=character(),
predict_word2_first=character(),predict_word2_first_freq=numeric(),
predict_word2_second=character(), predict_word2_second_freq=numeric(),
stringsAsFactors=FALSE)
str(output_dfwidenet_gram_2_3)
head(output_dfwidenet_gram_2_3)
nrow(output_dfwidenet_gram_2_3)
for (i in 1:nrow(gram_2_twitter_input_split)) {
print("i ############################################################")
print(i)
#print(input_gram_1[i,])
count <- 1
for (j in 1:nrow(gram_3_twitter_freq)){
if(gram_2_twitter_input_split$word1[i] == gram_3_twitter_freq$word1[j]) {
if (count == 1) {
print("best word")
print("j")
print(j)
print("we are in gram_3 input word1")
output_dfwidenet_gram_2_3[i,1] <- gram_2_twitter_input_split$word1[i]
print("we are in gram_3 input word2")
output_dfwidenet_gram_2_3[i,2] <- gram_2_twitter_input_split$word2[i]
print("next word we predict")
output_dfwidenet_gram_2_3[i,3] <- gram_3_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_2_3[i,4] <- gram_3_twitter_freq$fake_freq[j]
}
if (count == 2) {
print("next best word")
print("j")
print(j)
print("we are in gram_3 input word1")
print(gram_3_twitter_input_split$word1[i])
print("we are in gram_3 input word2")
print(gram_3_twitter_input_split$word2[i])
print("next word we predict")
output_dfwidenet_gram_2_3[i,5] <- gram_3_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_2_3[i,6] <- gram_3_twitter_freq$fake_freq[j]
}
count <- count + 1
}
}
}
write.csv(output_dfwidenet_gram_2_3,"./../model/test_01/output_dfwidenet_gram_2_3.csv")
########################
##### word3
#build test output data frame gram_3
output3_dfwidenet_gram_3_3 <- data.frame(input_word2=character(), input_word3=character(),
predict_word3_first=character(),predict_word3_first_freq=numeric(),
predict_word3_second=character(), predict_word3_second_freq=numeric(),
stringsAsFactors=FALSE)
str(output3_dfwidenet_gram_3_3)
head(output3_dfwidenet_gram_3_3)
nrow(output3_dfwidenet_gram_3_3)
for (i in 1:nrow(gram_3_twitter_input_split)) {
print("i ############################################################")
print(i)
#print(input_gram_1[i,])
count <- 1
for (j in 1:nrow(gram_3_twitter_freq)){
if(gram_3_twitter_input_split$word2[i] == gram_3_twitter_freq$word2[j]) {
if (count == 1) {
print("best word")
print("j")
print(j)
print("we are in gram_3 input word1")
output3_dfwidenet_gram_3_3[i,1] <- gram_3_twitter_input_split$word2[i]
print("we are in gram_3 input word2")
output3_dfwidenet_gram_3_3[i,2] <- gram_3_twitter_input_split$word3[i]
print("next word we predict")
output3_dfwidenet_gram_3_3[i,3] <- gram_3_twitter_freq$word3[j]
print("term freq")
output3_dfwidenet_gram_3_3[i,4] <- gram_3_twitter_freq$fake_freq[j]
}
if (count == 2) {
print("next best word")
print("j")
print(j)
print("we are in gram_3 input word1")
print(gram_3_twitter_input_split$word2[i])
print("we are in gram_3 input word2")
print(gram_3_twitter_input_split$word3[i])
print("next word we predict")
output3_dfwidenet_gram_3_3[i,5] <- gram_3_twitter_freq$word3[j]
print("term freq")
output3_dfwidenet_gram_3_3[i,6] <- gram_3_twitter_freq$fake_freq[j]
}
count <- count + 1
}
}
}
write.csv(output3_dfwidenet_gram_3_3,"./../model/test_01/output3_dfwidenet_gram_3_3.csv")
| /CP06_04_input_wide_net.R | no_license | rubiera/RCapstone_Week2Writeup | R | false | false | 17,636 | r | library(textreadr)
library(tm)
library(tidyverse)
library(tidytext)
library(knitr)
library(stopwords)
# this is my dictionary
dataset_widenet_gram_2_twitter <- read.csv("./../skims/gram_2_blogs/gram_2_blogs_01_1.txt", stringsAsFactors = FALSE)
dataset_widenet_gram_3_twitter <- read.csv("./../skims/gram_3_blogs/gram_3_blogs_01_1.txt", stringsAsFactors = FALSE)
#unique eevrything
dataset_widenet_gram_2_twitter <- select(dataset_widenet_gram_2_twitter,c("word","freq"))
dataset_widenet_gram_2_twitter <- unique(dataset_widenet_gram_2_twitter)
dataset_widenet_gram_2_twitter <- aggregate(freq ~ word,
data=dataset_widenet_gram_2_twitter,FUN=sum,
na.rm = TRUE)
str(dataset_widenet_gram_2_twitter)
head(dataset_widenet_gram_2_twitter)
dataset_widenet_gram_3_twitter <- select(dataset_widenet_gram_3_twitter,c("word","freq"))
dataset_widenet_gram_3_twitter <- unique(dataset_widenet_gram_3_twitter)
dataset_widenet_gram_3_twitter <- aggregate(freq ~ word,
data=dataset_widenet_gram_3_twitter,FUN=sum,
na.rm = TRUE)
str(dataset_widenet_gram_3_twitter)
head(dataset_widenet_gram_3_twitter)
gram_2_twitter_split <- dataset_widenet_gram_2_twitter %>%
separate(word, c("word1", "word2"), sep = " ")
head(gram_2_twitter_split)
gram_3_twitter_split <- dataset_widenet_gram_3_twitter %>%
separate(word, c("word1", "word2", "word3"), sep = " ")
head(gram_3_twitter_split)
gram_2_twitter_freq <- gram_2_twitter_split %>% mutate(term_freq = freq/sum(freq))
gram_2_twitter_freq <- gram_2_twitter_freq %>%
mutate(fake_freq = term_freq + 0.1*dnorm(rnorm(freq))/sum(dnorm(rnorm(freq))))
head(gram_2_twitter_freq)
gram_2_twitter_freq <- gram_2_twitter_freq %>% arrange(desc(fake_freq))
gram_3_twitter_freq <- gram_3_twitter_split %>% mutate(term_freq = freq/sum(freq))
gram_3_twitter_freq <- gram_3_twitter_freq %>%
mutate(fake_freq = term_freq + 0.1*dnorm(rnorm(freq))/sum(dnorm(rnorm(freq))))
head(gram_3_twitter_freq)
gram_3_twitter_freq <- gram_3_twitter_freq %>% arrange(desc(fake_freq))
#this is a list of input words to develop the model in steps of (hopefully)
#increasing predictive accuracy
input_gram_1 <- select(gram_2_twitter_freq, word1)
head(input_gram_1)
str(input_gram_1)
input_gram_1 <- unique(input_gram_1)
head(input_gram_1)
str(input_gram_1)
#naive prediction, no smoothing
#start with an easy one
head(gram_2_twitter_freq,10)
for (i in 1:1) {
#print("i")
#print(i)
#print(input_gram_1[i,])
for (j in 1:nrow(gram_2_twitter_freq)){
if(input_gram_1[i,] == gram_2_twitter_freq$word1[j]) {
print("j")
print(j)
print("we are in gram_2 word1")
print(input_gram_1[i,])
print(gram_2_twitter_freq$word1[j])
print("next word")
print(gram_2_twitter_freq$word2[j])
print("freq")
print(gram_2_twitter_freq$freq[j])
print("term freq")
print(gram_2_twitter_freq$fake_freq[j])
}
}
}
#1:nrow(input_gram_1)
# will print all of them
# I need to print only the first one
for (i in 3:3) {
#print("i")
#print(i)
#print(input_gram_1[i,])
for (j in 1:nrow(gram_2_twitter_freq)){
if(input_gram_1[i,] == gram_2_twitter_freq$word1[j]) {
print("j")
print(j)
print("we are in gram_2 word1")
print(input_gram_1[i,])
#print(gram_2_twitter_freq$word1[j])
print("next word")
print(gram_2_twitter_freq$word2[j])
#print("freq")
#print(gram_2_twitter_freq$freq[j])
print("term freq")
print(gram_2_twitter_freq$fake_freq[j])
}
}
}
# first one
for (i in 1:3) {
#print("i")
#print(i)
#print(input_gram_1[i,])
count <- 1
for (j in 1:nrow(gram_2_twitter_freq)){
if(input_gram_1[i,] == gram_2_twitter_freq$word1[j]) {
#print("count")
#print(count)
if (count == 1) {
print("best word")
print("j")
print(j)
print("we are in gram_2 word1")
print(input_gram_1[i,])
#print(gram_2_twitter_freq$word1[j])
print("next word")
print(gram_2_twitter_freq$word2[j])
#print("freq")
#print(gram_2_twitter_freq$freq[j])
print("term freq")
print(gram_2_twitter_freq$fake_freq[j])
}
if (count == 2) {
print("next best word")
print("j")
print(j)
print("we are in gram_2 word1")
print(input_gram_1[i,])
#print(gram_2_twitter_freq$word1[j])
print("next word")
print(gram_2_twitter_freq$word2[j])
#print("freq")
#print(gram_2_twitter_freq$freq[j])
print("term freq")
print(gram_2_twitter_freq$fake_freq[j])
}
count <- count + 1
}
}
}
### no we feed data 2 grams, not just a list of words
gram_2_twitter_input <- read.csv("./../barrel/gram_2/gram_2_twitter_happi_01_1.txt",
stringsAsFactors = FALSE)
gram_2_twitter_input <- select(gram_2_twitter_input,word)
head(gram_2_twitter_input)
str(gram_2_twitter_input)
gram_2_twitter_input_split <- gram_2_twitter_input %>%
separate(word, c("word1", "word2"), sep = " ")
head(gram_2_twitter_input_split)
gram_3_twitter_input <- read.csv("./../barrel/gram_3/gram_3_twitter_happi_01_1.txt",
stringsAsFactors = FALSE)
gram_3_twitter_input <- select(gram_3_twitter_input,word)
head(gram_3_twitter_input)
str(gram_3_twitter_input)
gram_3_twitter_input_split <- gram_3_twitter_input %>%
separate(word, c("word1", "word2", "word3"), sep = " ")
head(gram_3_twitter_input_split)
#########
gram_2_twitter_input_split$word1[1]
gram_2_twitter_freq$word1[1]
head(gram_2_twitter_input_split,3)
for (i in 1:3) {
print("i ############################################################")
print(i)
#print(input_gram_1[i,])
count <- 1
for (j in 1:nrow(gram_2_twitter_freq)){
if(gram_2_twitter_input_split$word1[i] == gram_2_twitter_freq$word1[j]) {
if (count == 1) {
print("best word")
print("j")
print(j)
print("we are in gram_2 input word1")
print(gram_2_twitter_input_split$word1[i])
print("we are in gram_2 input word2")
print(gram_2_twitter_input_split$word2[i])
print("next word we predict")
print(gram_2_twitter_freq$word2[j])
print("term freq")
print(gram_2_twitter_freq$fake_freq[j])
}
if (count == 2) {
print("next best word")
print("j")
print(j)
print("we are in gram_2 input word1")
print(gram_2_twitter_input_split$word1[i])
print("we are in gram_2 input word2")
print(gram_2_twitter_input_split$word2[i])
print("next word we predict")
print(gram_2_twitter_freq$word2[j])
print("term freq")
print(gram_2_twitter_freq$fake_freq[j])
}
count <- count + 1
}
}
}
#build test output data frame gram_2
output_dfwidenet_gram_2_2 <- data.frame(input_word1=character(), input_word2=character(),
predict_word2_first=character(),predict_word2_first_freq=numeric(),
predict_word2_second=character(), predict_word2_second_freq=numeric(),
stringsAsFactors=FALSE)
str(output_dfwidenet_gram_2_2)
head(output_dfwidenet_gram_2_2)
nrow(output_dfwidenet_gram_2_2)
for (i in 1:nrow(gram_2_twitter_input_split)) {
print("i ############################################################")
print(i)
#print(input_gram_1[i,])
count <- 1
for (j in 1:nrow(gram_2_twitter_freq)){
if(gram_2_twitter_input_split$word1[i] == gram_2_twitter_freq$word1[j]) {
if (count == 1) {
print("best word")
print("j")
print(j)
print("we are in gram_2 input word1")
output_dfwidenet_gram_2_2[i,1] <- gram_2_twitter_input_split$word1[i]
print("we are in gram_2 input word2")
output_dfwidenet_gram_2_2[i,2] <- gram_2_twitter_input_split$word2[i]
print("next word we predict")
output_dfwidenet_gram_2_2[i,3] <- gram_2_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_2_2[i,4] <- gram_2_twitter_freq$fake_freq[j]
}
if (count == 2) {
print("next best word")
print("j")
print(j)
print("we are in gram_2 input word1")
print(gram_2_twitter_input_split$word1[i])
print("we are in gram_2 input word2")
print(gram_2_twitter_input_split$word2[i])
print("next word we predict")
output_dfwidenet_gram_2_2[i,5] <- gram_2_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_2_2[i,6] <- gram_2_twitter_freq$fake_freq[j]
}
count <- count + 1
}
}
}
write.csv(output_dfwidenet_gram_2_2,"./../model/test_01/output_dfwidenet_gram_2_2.csv")
#build test output data frame gram_3
output_dfwidenet_gram_3_3 <- data.frame(input_word1=character(), input_word2=character(),
predict_word2_first=character(),predict_word2_first_freq=numeric(),
predict_word2_second=character(), predict_word2_second_freq=numeric(),
stringsAsFactors=FALSE)
str(output_dfwidenet_gram_3_3)
head(output_dfwidenet_gram_3_3)
nrow(output_dfwidenet_gram_3_3)
for (i in 1:nrow(gram_3_twitter_input_split)) {
print("i ############################################################")
print(i)
#print(input_gram_1[i,])
count <- 1
for (j in 1:nrow(gram_3_twitter_freq)){
if(gram_3_twitter_input_split$word1[i] == gram_3_twitter_freq$word1[j]) {
if (count == 1) {
print("best word")
print("j")
print(j)
print("we are in gram_3 input word1")
output_dfwidenet_gram_3_3[i,1] <- gram_3_twitter_input_split$word1[i]
print("we are in gram_3 input word2")
output_dfwidenet_gram_3_3[i,2] <- gram_3_twitter_input_split$word2[i]
print("next word we predict")
output_dfwidenet_gram_3_3[i,3] <- gram_3_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_3_3[i,4] <- gram_3_twitter_freq$fake_freq[j]
}
if (count == 2) {
print("next best word")
print("j")
print(j)
print("we are in gram_3 input word1")
print(gram_3_twitter_input_split$word1[i])
print("we are in gram_3 input word2")
print(gram_3_twitter_input_split$word2[i])
print("next word we predict")
output_dfwidenet_gram_3_3[i,5] <- gram_3_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_3_3[i,6] <- gram_3_twitter_freq$fake_freq[j]
}
count <- count + 1
}
}
}
write.csv(output_dfwidenet_gram_3_3,"./../model/test_01/output_dfwidenet_gram_3_3.csv")
###########################################################################################
#build test output data frame gram_2 but using gram_3 as input
output_dfwidenet_gram_3_2 <- data.frame(input_word1=character(), input_word2=character(),
predict_word2_first=character(),predict_word2_first_freq=numeric(),
predict_word2_second=character(), predict_word2_second_freq=numeric(),
stringsAsFactors=FALSE)
str(output_dfwidenet_gram_3_2)
head(output_dfwidenet_gram_3_2)
nrow(output_dfwidenet_gram_3_2)
for (i in 1:nrow(gram_3_twitter_input_split)) {
print("i ############################################################")
print(i)
#print(input_gram_1[i,])
count <- 1
for (j in 1:nrow(gram_2_twitter_freq)){
if(gram_3_twitter_input_split$word1[i] == gram_2_twitter_freq$word1[j]) {
if (count == 1) {
print("best word")
print("j")
print(j)
print("we are in gram_3 input word1")
output_dfwidenet_gram_3_2[i,1] <- gram_3_twitter_input_split$word1[i]
print("we are in gram_3 input word2")
output_dfwidenet_gram_3_2[i,2] <- gram_3_twitter_input_split$word2[i]
print("next word we predict")
output_dfwidenet_gram_3_2[i,3] <- gram_2_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_3_2[i,4] <- gram_2_twitter_freq$fake_freq[j]
}
if (count == 2) {
print("next best word")
print("j")
print(j)
print("we are in gram_3 input word1")
print(gram_3_twitter_input_split$word1[i])
print("we are in gram_3 input word2")
print(gram_3_twitter_input_split$word2[i])
print("next word we predict")
output_dfwidenet_gram_3_2[i,5] <- gram_2_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_3_2[i,6] <- gram_2_twitter_freq$fake_freq[j]
}
count <- count + 1
}
}
}
write.csv(output_dfwidenet_gram_3_2,"./../model/test_01/output_dfwidenet_gram_3_2.csv")
#build test output data frame gram_3 from gram_2 input
output_dfwidenet_gram_2_3 <- data.frame(input_word1=character(), input_word2=character(),
predict_word2_first=character(),predict_word2_first_freq=numeric(),
predict_word2_second=character(), predict_word2_second_freq=numeric(),
stringsAsFactors=FALSE)
str(output_dfwidenet_gram_2_3)
head(output_dfwidenet_gram_2_3)
nrow(output_dfwidenet_gram_2_3)
for (i in 1:nrow(gram_2_twitter_input_split)) {
print("i ############################################################")
print(i)
#print(input_gram_1[i,])
count <- 1
for (j in 1:nrow(gram_3_twitter_freq)){
if(gram_2_twitter_input_split$word1[i] == gram_3_twitter_freq$word1[j]) {
if (count == 1) {
print("best word")
print("j")
print(j)
print("we are in gram_3 input word1")
output_dfwidenet_gram_2_3[i,1] <- gram_2_twitter_input_split$word1[i]
print("we are in gram_3 input word2")
output_dfwidenet_gram_2_3[i,2] <- gram_2_twitter_input_split$word2[i]
print("next word we predict")
output_dfwidenet_gram_2_3[i,3] <- gram_3_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_2_3[i,4] <- gram_3_twitter_freq$fake_freq[j]
}
if (count == 2) {
print("next best word")
print("j")
print(j)
print("we are in gram_3 input word1")
print(gram_3_twitter_input_split$word1[i])
print("we are in gram_3 input word2")
print(gram_3_twitter_input_split$word2[i])
print("next word we predict")
output_dfwidenet_gram_2_3[i,5] <- gram_3_twitter_freq$word2[j]
print("term freq")
output_dfwidenet_gram_2_3[i,6] <- gram_3_twitter_freq$fake_freq[j]
}
count <- count + 1
}
}
}
write.csv(output_dfwidenet_gram_2_3,"./../model/test_01/output_dfwidenet_gram_2_3.csv")
########################
##### word3
#build test output data frame gram_3
output3_dfwidenet_gram_3_3 <- data.frame(input_word2=character(), input_word3=character(),
predict_word3_first=character(),predict_word3_first_freq=numeric(),
predict_word3_second=character(), predict_word3_second_freq=numeric(),
stringsAsFactors=FALSE)
str(output3_dfwidenet_gram_3_3)
head(output3_dfwidenet_gram_3_3)
nrow(output3_dfwidenet_gram_3_3)
for (i in 1:nrow(gram_3_twitter_input_split)) {
print("i ############################################################")
print(i)
#print(input_gram_1[i,])
count <- 1
for (j in 1:nrow(gram_3_twitter_freq)){
if(gram_3_twitter_input_split$word2[i] == gram_3_twitter_freq$word2[j]) {
if (count == 1) {
print("best word")
print("j")
print(j)
print("we are in gram_3 input word1")
output3_dfwidenet_gram_3_3[i,1] <- gram_3_twitter_input_split$word2[i]
print("we are in gram_3 input word2")
output3_dfwidenet_gram_3_3[i,2] <- gram_3_twitter_input_split$word3[i]
print("next word we predict")
output3_dfwidenet_gram_3_3[i,3] <- gram_3_twitter_freq$word3[j]
print("term freq")
output3_dfwidenet_gram_3_3[i,4] <- gram_3_twitter_freq$fake_freq[j]
}
if (count == 2) {
print("next best word")
print("j")
print(j)
print("we are in gram_3 input word1")
print(gram_3_twitter_input_split$word2[i])
print("we are in gram_3 input word2")
print(gram_3_twitter_input_split$word3[i])
print("next word we predict")
output3_dfwidenet_gram_3_3[i,5] <- gram_3_twitter_freq$word3[j]
print("term freq")
output3_dfwidenet_gram_3_3[i,6] <- gram_3_twitter_freq$fake_freq[j]
}
count <- count + 1
}
}
}
write.csv(output3_dfwidenet_gram_3_3,"./../model/test_01/output3_dfwidenet_gram_3_3.csv")
|
#'---
#'title: "Program Statistics from the Centers for Medicare and Medicaid Services"
#'output:
#' rmarkdown::html_vignette:
#' toc: yes
#'vignette: |
#' %\VignetteIndexEntry{MDCR Program Statistics}
#' %\VignetteEngine{knitr::rmarkdown}
#' %\VignetteEncoding{UTF-8}
#'---
#'
#+label='setup', include = FALSE, cache = FALSE
knitr::opts_chunk$set(collapse = TRUE, cache = FALSE)
options(qwraps2_markup = "markdown")
#'
#' The data sets provided in this package are built from the program statistics
#' data provided by the Centers for Medicaid and Medicare Services,
#' https://www.cms.gov/Research-Statistics-Data-and-Systems/Statistics-Trends-and-Reports/CMSProgramStatistics/.
#'
#' If you are interested in verifying the construction of the provided data sets
#' you can view the code and verify the source data at
#' https://github.com/dewittpe/cms.codes.
#'
#' This vignette provides a summary of the data sets provided. Each of the
#' data sets are provided as pure data.tables. Many of the names for the
#' columns of the data sets are not R syntactically valid names, that is, many
#' of the names contain spaces.
#'
#' If you are going to reproduce the examples provided here you'll need to have
#' two namespaces loaded
#+ label = "namespaces", messages = FALSE
library(magrittr)
library(data.table)
#'
#' # Total Medicare Enrollment
#'
#' From the website: The Medicare Enrollment Section contains trend,
#' demographic, and state tables showing total Medicare enrollment, Original
#' Medicare enrollment, Medicare Advantage and Other Health Plan enrollment,
#' newly-enrolled beneficiaries, deaths, and Medicare-Medicaid Enrollment.
#'
#' There are several data sets provided with enrollment data. The enrollment
#' values are in person years and subject to standard disclaimers regarding
#' rounding issues.
#+ label = "list_enrollment_datasets", echo = FALSE, results = 'asis'
enrollment_data_sets <- data(package = "cms.codes")$results
enrollment_data_sets <- enrollment_data_sets[grepl("^MDCR_ENROLL_.*", enrollment_data_sets[, "Item"]), c("Item", "Title")]
enrollment_data_sets[, "Item"] %<>% paste0("[", ., "](#", tolower(gsub("_", "-", .)), ")")
knitr::kable(enrollment_data_sets)
#'
# /* MDCR ENROLL AB 01 {{{ */
#' ## MDCR ENROLL AB 01
#'
#' Total Medicare Enrollment: Total, Original Medicare, and Medicare Advantage
#' and Other Health Plan Enrollment
#'
#' Load the data set:
data(MDCR_ENROLL_AB_01, package = "cms.codes")
#'
#' This data set contains total enrollment data for the years
{{ as.character(min(MDCR_ENROLL_AB_01$Year)) }}
#' to
{{ paste0(max(MDCR_ENROLL_AB_01$Year), ".") }}
#'
#' The provided enrollment information is:
#+ results = "asis"
cat(paste("*", names(MDCR_ENROLL_AB_01)), sep = "\n")
#'
#' The overall enrollment in Medicare can be seen in the following graphic.
#+ label = "plot_MDCR_ENROLL_AB_01", echo = FALSE, results = "hide", fig.width = 7, fig.height = 7
plot_enroll <-
reshape2::melt(MDCR_ENROLL_AB_01,
id.vars = "Year",
measure.vars = c("Total Enrollment",
"Total Original Medicare Enrollment",
"Total Medicare Advantage and Other Health Plan Enrollment"))
levels(plot_enroll$variable)[grepl("^Total\\ Enrollment$", levels(plot_enroll$variable))] <- "Total"
levels(plot_enroll$variable)[grepl("Original", levels(plot_enroll$variable))] <- "Orignal Medicare"
levels(plot_enroll$variable)[grepl("Medicare Advantage", levels(plot_enroll$variable))] <- "Medicare Advantage and Other Health Plan"
plot_enroll$facet <- "Total Enrollment"
plot_percent_increase <-
reshape2::melt(MDCR_ENROLL_AB_01,
id.vars = "Year",
measure.vars = c("Total Enrollment Percentage Increase from Prior Year",
"Total Original Medicare Enrollment Percentage Increase from Prior Year",
"Total Medicare Advantage and Other Health Plan Enrollment Percentage Increase from Prior Year"))
levels(plot_percent_increase$variable)[grepl("^Total\\ Enrollment", levels(plot_percent_increase$variable))] <- "Total"
levels(plot_percent_increase$variable)[grepl("Original", levels(plot_percent_increase$variable))] <- "Orignal Medicare"
levels(plot_percent_increase$variable)[grepl("Medicare Advantage", levels(plot_percent_increase$variable))] <- "Medicare Advantage and Other Health Plan"
plot_percent_increase$facet <- "Percent Increase from Prior Year"
plot_percent_of_total <-
reshape2::melt(MDCR_ENROLL_AB_01,
id.vars = "Year",
measure.vars = c("Total Original Medicare Percent of Total Enrollment",
"Total Medicare Advantage and Other Health Plan Enrollment Percent of Total Enrollment"))
levels(plot_percent_of_total$variable)[grepl("Original", levels(plot_percent_of_total$variable))] <- "Orignal Medicare"
levels(plot_percent_of_total$variable)[grepl("Medicare Advantage", levels(plot_percent_of_total$variable))] <- "Medicare Advantage and Other Health Plan"
plot_percent_of_total$facet <- "Percent of Total"
plot_data <- rbind(plot_enroll, plot_percent_increase, plot_percent_of_total)
plot_data$facet %<>% factor(levels = c("Total Enrollment", "Percent Increase from Prior Year", "Percent of Total"))
ggplot2::ggplot(data = plot_data) +
ggplot2::aes(x = Year, y = value, color = variable) +
ggplot2::geom_line() +
ggplot2::geom_point() +
ggplot2::facet_wrap( ~ facet, scale = "free_y", ncol = 1) +
ggplot2::scale_x_continuous(breaks = MDCR_ENROLL_AB_01$Year) +
ggplot2::ylab("") +
ggplot2::theme(legend.position = "bottom",
legend.title = ggplot2::element_blank())
#'
#' The full data set is relatively small and can be displayed in one table
#' easily.
#+ label = "table_MDCR_ENROLL_AB_01", echo = FALSE, results = "asis"
knitr::kable(MDCR_ENROLL_AB_01)
# /* End of MDCR ENROLL AB 01 }}} */
#'
# /* MDCR ENROLL AB 02 {{{ */
#' ## MDCR ENROLL AB 02
#'
#' Total Medicare Enrollment: Total, Original Medicare, Medicare Advantage and
#' Other Health Plan Enrollment, and Resident Population, by Area of Residence.
#'
# Load the data set.
data(MDCR_ENROLL_AB_02, package = "cms.codes")
MDCR_ENROLL_AB_02 %<>% as.data.table
#'
#' The information provided in this dataset is
#+ results = "asis"
cat(paste("*", names(MDCR_ENROLL_AB_02)), sep = "\n")
#'
#' There are
{{ length(unique(MDCR_ENROLL_AB_02[["Area of Residence"]])) }}
#' unique values for Area of Residence. These are each of the fifty States and
#' the totals for the United States.
#+ results = "asis"
MDCR_ENROLL_AB_02[`Area of Residence` == "United States"] %>% knitr::kable(.)
#+ results = "asis"
MDCR_ENROLL_AB_02[`Area of Residence` == "Colorado"] %>% knitr::kable(.)
# /* End of MDCR ENROLL AB 02 }}} */
#'
# /* MDCR ENROLL AB 03 {{{ */
#' ## MDCR ENROLL AB 03
#'
#' Total Medicare Enrollment: Part A and/or Part B Total, Aged, and Disabled
#' Enrollees
#'
# Load the data set.
data(MDCR_ENROLL_AB_03, package = "cms.codes")
MDCR_ENROLL_AB_03 %<>% as.data.table
#'
#' The information provided in this dataset is
#+ results = "asis"
cat(paste("*", names(MDCR_ENROLL_AB_03)), sep = "\n")
# /* End of MDCR ENROLL AB 03 }}} */
#'
# /* MDCR ENROLL AB 04 {{{ */
#'
#' ## MDCR ENROLL AB 04
#'
#' Total Medicare Enrollment: Part A and/or Part B Enrollees, by Age Group
#'
data(MDCR_ENROLL_AB_04, package = "cms.codes")
str(MDCR_ENROLL_AB_04)
# /* End of MDCR ENROOL AB 04 }}} */
#'
# /* MDCR ENROLL AB 05 {{{ */
#'
#' ## MDCR ENROLL AB 05
#'
#' Total Medicare Enrollment: Total Medicare Enrollment: Part A and/or Part B
#' Enrollment by Demographic Caharacteristics
#'
data(MDCR_ENROLL_AB_05, package = "cms.codes")
str(MDCR_ENROLL_AB_05)
# /* End of MDCR ENROOL AB 05 }}} */
#'
# /* MDCR ENROLL AB 06 {{{ */
#'
#' ## MDCR ENROLL AB 06
#'
#' Total Medicare Enrollment: Part A and/or Part B Enrollees, by Type of
#' Entitlement and Demographic Characteristics
#'
data(MDCR_ENROLL_AB_06, package = "cms.codes")
str(MDCR_ENROLL_AB_06)
# /* End of MDCR ENROOL AB 06 }}} */
#'
# /* MDCR ENROLL AB 07 {{{ */
#'
#' ## MDCR ENROLL AB 07
#'
#' Total Medicare Enrollment: Part A and/or Part B Total, Aged, and Disabled
#' Enrollees, by Area of Residence
#'
data(MDCR_ENROLL_AB_07, package = "cms.codes")
str(MDCR_ENROLL_AB_07)
# /* End of MDCR ENROOL AB 07 }}} */
#'
# /* MDCR ENROLL AB 08 {{{ */
#'
#' ## MDCR ENROLL AB 08
#'
#' Total Medicare Enrollment: Part A and/or Part B Enrollees, by Type of
#' Entitlement and Area of Residence
#'
data(MDCR_ENROLL_AB_08, package = "cms.codes")
str(MDCR_ENROLL_AB_08)
# /* End of MDCR ENROOL AB 08 }}} */
#'
# /* Provider Taxonomy {{{ */
#'
#' # Provider Taxonomy
#'
#' Quoting from the [CMS
#' webpage](https://www.cms.gov/Medicare/Provider-Enrollment-and-Certification/MedicareProviderSupEnroll/Taxonomy.html)
#'
#'>The Healthcare Provider Taxonomy Code Set is a hierarchical code set that consists of codes, descriptions, and definitions. Healthcare Provider Taxonomy Codes are designed to categorize the type, classification, and/or specialization of health care providers. The Code Set consists of two sections: Individuals and Groups of Individuals, and Non-Individuals. The Code Set is updated twice a year, effective April 1 and October 1. The “Crosswalk – Medicare Provider/Supplier to Healthcare Provider Taxonomy” was updated because of changes made to the Healthcare Provider Taxonomy Code Set that will be implemented October 1, 2008. That Code Set is available from the Washington Publishing Company. The Code Set is maintained by the National Uniform Claim Committee. The Code Set is a Health Insurance Portability and Accountability (HIPAA) standard code set. As such, it is the only code set that may be used in HIPAA standard transactions to report the type/classification/specialization of a health care provider when such reporting is required.
#'>
#'>When applying for a National Provider Identifier (NPI) from the National Plan and Provider Enumeration System (NPPES), a health care provider must select the Healthcare Provider Taxonomy Code or code description that the health care provider determines most closely describes the health care provider's type/classification/specialization, and report that code or code description in the NPI application. In some situations, a health care provider might need to report more than one Healthcare Provider Taxonomy Code or code description in order to adequately describe the type/classification/specialization. Therefore, a health care provider may select more than one Healthcare Provider Taxonomy Code or code description when applying for an NPI, but must indicate one of them as the primary. The NPPES does not verify with the health care providers or with trusted sources that the Healthcare Provider Taxonomy Code or code description selections made by health care providers when applying for NPIs are accurate (e.g., the NPPES does not verify that an individual who reports a Physician Code is, in fact, a physician, or a physician with the reported specialization). The NPPES does, however, validate that the Code and code description selections exist within the current version of the Healthcare Provider Taxonomy Code Set.
#'>
#'>The Healthcare Provider Taxonomy Codes and code descriptions that health care providers select when applying for NPIs may or may not be the same as the categorizations used by Medicare and other health plans in their enrollment and credentialing activities. The Healthcare Provider Taxonomy Code or code description information collected by NPPES is used to help uniquely identify health care providers in order to assign them NPIs, not to ensure that they are credentialed or qualified to render health care.
#'
data(MDCR_PROVIDER_TAXONOMY, package = "cms.codes")
#'
#' there are several footnotes provided with the data. These footnotes have
#' been summarized in the Details section of the man file for the data set.
#' Please read the man file.
#+ eval = FALSE
# /*
if (!interactive()) {
# */
help("MDCR_PROVIDER_TAXONOMY", package = "cms.codes")
# /*
}
# */
#'
str(MDCR_PROVIDER_TAXONOMY, width = 80, strict.width = "cut")
# /* End of Provider Taxonomy }}} */
#'
# /* Places of Service {{{ */
#'
#' # Places of Services
#'
#' # Place of Service Code Set
#'
#' Two digit codes associated with places of services. Per the CMS website
#' https://www.cms.gov/Medicare/Coding/place-of-service-codes/Place_of_Service_Code_Set.html
#' "These codes should be used on professional claims to specify the entity
#' where service(s) were rendered."
#'
data("PLACE_OF_SERVICE", package = "cms.codes")
str(PLACE_OF_SERVICE)
# /* }}} */
| /vignette-spinners/program-statistics.R | permissive | dewittpe/cms.codes | R | false | false | 12,760 | r | #'---
#'title: "Program Statistics from the Centers for Medicare and Medicaid Services"
#'output:
#' rmarkdown::html_vignette:
#' toc: yes
#'vignette: |
#' %\VignetteIndexEntry{MDCR Program Statistics}
#' %\VignetteEngine{knitr::rmarkdown}
#' %\VignetteEncoding{UTF-8}
#'---
#'
#+label='setup', include = FALSE, cache = FALSE
knitr::opts_chunk$set(collapse = TRUE, cache = FALSE)
options(qwraps2_markup = "markdown")
#'
#' The data sets provided in this package are built from the program statistics
#' data provided by the Centers for Medicaid and Medicare Services,
#' https://www.cms.gov/Research-Statistics-Data-and-Systems/Statistics-Trends-and-Reports/CMSProgramStatistics/.
#'
#' If you are interested in verifying the construction of the provided data sets
#' you can view the code and verify the source data at
#' https://github.com/dewittpe/cms.codes.
#'
#' This vignette provides a summary of the data sets provided. Each of the
#' data sets are provided as pure data.tables. Many of the names for the
#' columns of the data sets are not R syntactically valid names, that is, many
#' of the names contain spaces.
#'
#' If you are going to reproduce the examples provided here you'll need to have
#' two namespaces loaded
#+ label = "namespaces", messages = FALSE
library(magrittr)
library(data.table)
#'
#' # Total Medicare Enrollment
#'
#' From the website: The Medicare Enrollment Section contains trend,
#' demographic, and state tables showing total Medicare enrollment, Original
#' Medicare enrollment, Medicare Advantage and Other Health Plan enrollment,
#' newly-enrolled beneficiaries, deaths, and Medicare-Medicaid Enrollment.
#'
#' There are several data sets provided with enrollment data. The enrollment
#' values are in person years and subject to standard disclaimers regarding
#' rounding issues.
#+ label = "list_enrollment_datasets", echo = FALSE, results = 'asis'
enrollment_data_sets <- data(package = "cms.codes")$results
enrollment_data_sets <- enrollment_data_sets[grepl("^MDCR_ENROLL_.*", enrollment_data_sets[, "Item"]), c("Item", "Title")]
enrollment_data_sets[, "Item"] %<>% paste0("[", ., "](#", tolower(gsub("_", "-", .)), ")")
knitr::kable(enrollment_data_sets)
#'
# /* MDCR ENROLL AB 01 {{{ */
#' ## MDCR ENROLL AB 01
#'
#' Total Medicare Enrollment: Total, Original Medicare, and Medicare Advantage
#' and Other Health Plan Enrollment
#'
#' Load the data set:
data(MDCR_ENROLL_AB_01, package = "cms.codes")
#'
#' This data set contains total enrollment data for the years
{{ as.character(min(MDCR_ENROLL_AB_01$Year)) }}
#' to
{{ paste0(max(MDCR_ENROLL_AB_01$Year), ".") }}
#'
#' The provided enrollment information is:
#+ results = "asis"
cat(paste("*", names(MDCR_ENROLL_AB_01)), sep = "\n")
#'
#' The overall enrollment in Medicare can be seen in the following graphic.
#+ label = "plot_MDCR_ENROLL_AB_01", echo = FALSE, results = "hide", fig.width = 7, fig.height = 7
plot_enroll <-
reshape2::melt(MDCR_ENROLL_AB_01,
id.vars = "Year",
measure.vars = c("Total Enrollment",
"Total Original Medicare Enrollment",
"Total Medicare Advantage and Other Health Plan Enrollment"))
levels(plot_enroll$variable)[grepl("^Total\\ Enrollment$", levels(plot_enroll$variable))] <- "Total"
levels(plot_enroll$variable)[grepl("Original", levels(plot_enroll$variable))] <- "Orignal Medicare"
levels(plot_enroll$variable)[grepl("Medicare Advantage", levels(plot_enroll$variable))] <- "Medicare Advantage and Other Health Plan"
plot_enroll$facet <- "Total Enrollment"
plot_percent_increase <-
reshape2::melt(MDCR_ENROLL_AB_01,
id.vars = "Year",
measure.vars = c("Total Enrollment Percentage Increase from Prior Year",
"Total Original Medicare Enrollment Percentage Increase from Prior Year",
"Total Medicare Advantage and Other Health Plan Enrollment Percentage Increase from Prior Year"))
levels(plot_percent_increase$variable)[grepl("^Total\\ Enrollment", levels(plot_percent_increase$variable))] <- "Total"
levels(plot_percent_increase$variable)[grepl("Original", levels(plot_percent_increase$variable))] <- "Orignal Medicare"
levels(plot_percent_increase$variable)[grepl("Medicare Advantage", levels(plot_percent_increase$variable))] <- "Medicare Advantage and Other Health Plan"
plot_percent_increase$facet <- "Percent Increase from Prior Year"
plot_percent_of_total <-
reshape2::melt(MDCR_ENROLL_AB_01,
id.vars = "Year",
measure.vars = c("Total Original Medicare Percent of Total Enrollment",
"Total Medicare Advantage and Other Health Plan Enrollment Percent of Total Enrollment"))
levels(plot_percent_of_total$variable)[grepl("Original", levels(plot_percent_of_total$variable))] <- "Orignal Medicare"
levels(plot_percent_of_total$variable)[grepl("Medicare Advantage", levels(plot_percent_of_total$variable))] <- "Medicare Advantage and Other Health Plan"
plot_percent_of_total$facet <- "Percent of Total"
plot_data <- rbind(plot_enroll, plot_percent_increase, plot_percent_of_total)
plot_data$facet %<>% factor(levels = c("Total Enrollment", "Percent Increase from Prior Year", "Percent of Total"))
ggplot2::ggplot(data = plot_data) +
ggplot2::aes(x = Year, y = value, color = variable) +
ggplot2::geom_line() +
ggplot2::geom_point() +
ggplot2::facet_wrap( ~ facet, scale = "free_y", ncol = 1) +
ggplot2::scale_x_continuous(breaks = MDCR_ENROLL_AB_01$Year) +
ggplot2::ylab("") +
ggplot2::theme(legend.position = "bottom",
legend.title = ggplot2::element_blank())
#'
#' The full data set is relatively small and can be displayed in one table
#' easily.
#+ label = "table_MDCR_ENROLL_AB_01", echo = FALSE, results = "asis"
knitr::kable(MDCR_ENROLL_AB_01)
# /* End of MDCR ENROLL AB 01 }}} */
#'
# /* MDCR ENROLL AB 02 {{{ */
#' ## MDCR ENROLL AB 02
#'
#' Total Medicare Enrollment: Total, Original Medicare, Medicare Advantage and
#' Other Health Plan Enrollment, and Resident Population, by Area of Residence.
#'
# Load the data set.
data(MDCR_ENROLL_AB_02, package = "cms.codes")
MDCR_ENROLL_AB_02 %<>% as.data.table
#'
#' The information provided in this dataset is
#+ results = "asis"
cat(paste("*", names(MDCR_ENROLL_AB_02)), sep = "\n")
#'
#' There are
{{ length(unique(MDCR_ENROLL_AB_02[["Area of Residence"]])) }}
#' unique values for Area of Residence. These are each of the fifty States and
#' the totals for the United States.
#+ results = "asis"
MDCR_ENROLL_AB_02[`Area of Residence` == "United States"] %>% knitr::kable(.)
#+ results = "asis"
MDCR_ENROLL_AB_02[`Area of Residence` == "Colorado"] %>% knitr::kable(.)
# /* End of MDCR ENROLL AB 02 }}} */
#'
# /* MDCR ENROLL AB 03 {{{ */
#' ## MDCR ENROLL AB 03
#'
#' Total Medicare Enrollment: Part A and/or Part B Total, Aged, and Disabled
#' Enrollees
#'
# Load the data set.
data(MDCR_ENROLL_AB_03, package = "cms.codes")
MDCR_ENROLL_AB_03 %<>% as.data.table
#'
#' The information provided in this dataset is
#+ results = "asis"
cat(paste("*", names(MDCR_ENROLL_AB_03)), sep = "\n")
# /* End of MDCR ENROLL AB 03 }}} */
#'
# /* MDCR ENROLL AB 04 {{{ */
#'
#' ## MDCR ENROLL AB 04
#'
#' Total Medicare Enrollment: Part A and/or Part B Enrollees, by Age Group
#'
data(MDCR_ENROLL_AB_04, package = "cms.codes")
str(MDCR_ENROLL_AB_04)
# /* End of MDCR ENROOL AB 04 }}} */
#'
# /* MDCR ENROLL AB 05 {{{ */
#'
#' ## MDCR ENROLL AB 05
#'
#' Total Medicare Enrollment: Total Medicare Enrollment: Part A and/or Part B
#' Enrollment by Demographic Caharacteristics
#'
data(MDCR_ENROLL_AB_05, package = "cms.codes")
str(MDCR_ENROLL_AB_05)
# /* End of MDCR ENROOL AB 05 }}} */
#'
# /* MDCR ENROLL AB 06 {{{ */
#'
#' ## MDCR ENROLL AB 06
#'
#' Total Medicare Enrollment: Part A and/or Part B Enrollees, by Type of
#' Entitlement and Demographic Characteristics
#'
data(MDCR_ENROLL_AB_06, package = "cms.codes")
str(MDCR_ENROLL_AB_06)
# /* End of MDCR ENROOL AB 06 }}} */
#'
# /* MDCR ENROLL AB 07 {{{ */
#'
#' ## MDCR ENROLL AB 07
#'
#' Total Medicare Enrollment: Part A and/or Part B Total, Aged, and Disabled
#' Enrollees, by Area of Residence
#'
data(MDCR_ENROLL_AB_07, package = "cms.codes")
str(MDCR_ENROLL_AB_07)
# /* End of MDCR ENROOL AB 07 }}} */
#'
# /* MDCR ENROLL AB 08 {{{ */
#'
#' ## MDCR ENROLL AB 08
#'
#' Total Medicare Enrollment: Part A and/or Part B Enrollees, by Type of
#' Entitlement and Area of Residence
#'
data(MDCR_ENROLL_AB_08, package = "cms.codes")
str(MDCR_ENROLL_AB_08)
# /* End of MDCR ENROOL AB 08 }}} */
#'
# /* Provider Taxonomy {{{ */
#'
#' # Provider Taxonomy
#'
#' Quoting from the [CMS
#' webpage](https://www.cms.gov/Medicare/Provider-Enrollment-and-Certification/MedicareProviderSupEnroll/Taxonomy.html)
#'
#'>The Healthcare Provider Taxonomy Code Set is a hierarchical code set that consists of codes, descriptions, and definitions. Healthcare Provider Taxonomy Codes are designed to categorize the type, classification, and/or specialization of health care providers. The Code Set consists of two sections: Individuals and Groups of Individuals, and Non-Individuals. The Code Set is updated twice a year, effective April 1 and October 1. The “Crosswalk – Medicare Provider/Supplier to Healthcare Provider Taxonomy” was updated because of changes made to the Healthcare Provider Taxonomy Code Set that will be implemented October 1, 2008. That Code Set is available from the Washington Publishing Company. The Code Set is maintained by the National Uniform Claim Committee. The Code Set is a Health Insurance Portability and Accountability (HIPAA) standard code set. As such, it is the only code set that may be used in HIPAA standard transactions to report the type/classification/specialization of a health care provider when such reporting is required.
#'>
#'>When applying for a National Provider Identifier (NPI) from the National Plan and Provider Enumeration System (NPPES), a health care provider must select the Healthcare Provider Taxonomy Code or code description that the health care provider determines most closely describes the health care provider's type/classification/specialization, and report that code or code description in the NPI application. In some situations, a health care provider might need to report more than one Healthcare Provider Taxonomy Code or code description in order to adequately describe the type/classification/specialization. Therefore, a health care provider may select more than one Healthcare Provider Taxonomy Code or code description when applying for an NPI, but must indicate one of them as the primary. The NPPES does not verify with the health care providers or with trusted sources that the Healthcare Provider Taxonomy Code or code description selections made by health care providers when applying for NPIs are accurate (e.g., the NPPES does not verify that an individual who reports a Physician Code is, in fact, a physician, or a physician with the reported specialization). The NPPES does, however, validate that the Code and code description selections exist within the current version of the Healthcare Provider Taxonomy Code Set.
#'>
#'>The Healthcare Provider Taxonomy Codes and code descriptions that health care providers select when applying for NPIs may or may not be the same as the categorizations used by Medicare and other health plans in their enrollment and credentialing activities. The Healthcare Provider Taxonomy Code or code description information collected by NPPES is used to help uniquely identify health care providers in order to assign them NPIs, not to ensure that they are credentialed or qualified to render health care.
#'
data(MDCR_PROVIDER_TAXONOMY, package = "cms.codes")
#'
#' there are several footnotes provided with the data. These footnotes have
#' been summarized in the Details section of the man file for the data set.
#' Please read the man file.
#+ eval = FALSE
# /*
if (!interactive()) {
# */
help("MDCR_PROVIDER_TAXONOMY", package = "cms.codes")
# /*
}
# */
#'
str(MDCR_PROVIDER_TAXONOMY, width = 80, strict.width = "cut")
# /* End of Provider Taxonomy }}} */
#'
# /* Places of Service {{{ */
#'
#' # Places of Services
#'
#' # Place of Service Code Set
#'
#' Two digit codes associated with places of services. Per the CMS website
#' https://www.cms.gov/Medicare/Coding/place-of-service-codes/Place_of_Service_Code_Set.html
#' "These codes should be used on professional claims to specify the entity
#' where service(s) were rendered."
#'
data("PLACE_OF_SERVICE", package = "cms.codes")
str(PLACE_OF_SERVICE)
# /* }}} */
|
#' Extremely randomized split
#'
#' @param X
#' @param Y
#' @param timeScale
#' @param ntry
#'
#' @import kmlShape
#' @import Evomorph
#' @import emdist
#'
#' @keywords internal
ERvar_split <- function(X ,Y,ntry=3,timeScale=0.1){
impur <- rep(0,dim(X$X)[length(dim(X$X))])
toutes_imp <- list()
impur_list = list()
split <- list()
Pure <- FALSE
Imp_shape <- Inf
var_shape <- Inf
for (i in 1:dim(X$X)[length(dim(X$X))]){
if (X$type=="factor"){
if (length(unique(X$X[,i]))>1){
L <- Fact.partitions(X$X[,i],X$id)
split_courant <- list()
impur_courant <- rep(NA,length(L))
toutes_imp_courant <- list()
# On tire une partition au hasard
tirage <- sample(1:length(L), 1)
# Il faut maintenant regarder quelles sont les meilleures combinaisons ::
split[[i]] <- rep(2,length(X$id))
for (l in L[[tirage]]){
split[[i]][which(X$id==l)] <- 1
}
# Il faut maintenant regarder la qualite du decoupage ::
impurete <- impurity_split(Y,split[[i]])
impur[i] <- impurete$impur
toutes_imp[[i]] <- impurete$imp_list
}
else {
impur[i] <- Inf
split[[i]] <- Inf
}
}
if( X$type=="curve"){
# Il faut commencer par tirer les multiples centres ::
id_centers <- matrix(NA,ntry,2)
for (l in 1:ntry){
id_centers[l,] <- sample(unique(X$id),2)
}
### Il faut ensuite boucler sur le ntry
split_prime <- matrix(2,ntry,length(unique(X$id)))
u <- 0
impurete2 <- list()
qui <- NULL
imp <- NULL
for (c in 1:ntry){
w_gauche <- which(X$id==id_centers[c,1])
w_droit <- which(X$id==id_centers[c,2])
for (l in 1:length(unique(X$id))){
w <- which(X$id==unique(X$id)[l])
dg <- distFrechet(X$time[w_gauche],X$X[w_gauche,i],X$time[w],X$X[w,i], timeScale = timeScale)
dd <- distFrechet(X$time[w_droit],X$X[w_droit,i],X$time[w],X$X[w,i], timeScale = timeScale)
if (dg<=dd) split_prime[c,l] <- 1
}
if (length(unique(split_prime[c,]))>1){
u <- u+1
qui <- c(qui, c)
impurete2[[c]] <- impurity_split(Y,split_prime[c,], timeScale)
imp <- c(imp,impurete2[[c]]$impur)
}
}
if (u>0){
gagnant <- qui[which.min(imp)]
split[[i]] <- split_prime[gagnant,]
impurete <- impurete2[[gagnant]]
impur[i] <- impurete$impur
toutes_imp[[i]] <- impurete$imp_list
}
else{
impur[i] <- Inf
split[[i]] <- Inf}
}
if (X$type=="shape"){
n_elem = dim(X$X)[3]
if (n_elem>2){
id_centers <- matrix(NA,ntry,2)
for (l in 1:ntry){
id_centers[l,] <- sample(X$id,2)
}
split_prime <- matrix(2,ntry,length(X$id))
dd = rep(NA,n_elem)
dg = rep(NA,n_elem)
for (c in 1:ntry){
for (k in 1:n_elem){
dg[k] <- emd2d(X$X[,,k,i],X$X[,,which(X$id==id_centers[c,1]),i])
dd[k] <- emd2d(X$X[,,k,i],X$X[,,which(X$id==id_centers[c,2]),i])
}
for (l in 1:length(unique(X$id))){
if (is.nan(dg[l]) || is.nan(dd[l])) split_prime[c,l] <- sample(c(1,2),1)
else if (dg[l]<=dd[l]) split_prime[c,l] <- 1
}
if (length(split_prime[c,])>1){
impurete2 <- impurity_split(Y,split_prime[c,], timeScale)
if (impurete2$impur <Imp_shape && is.na(impurete2$impur)==FALSE){
Imp_shape <- impurete2$impur
var_shape <- i
gauche = id_centers[c,1]
droite = id_centers[c,2]
impur_list = impurete2$imp_list
split = split_prime[c,]
Pure = FALSE
}
}
}
}
}
if (X$type=="image"){
if (nrow(X$X)>2){
id_centers <- matrix(NA,ntry,2)
for (l in 1:ntry){
id_centers[l,] <- sample(X$id,2)
}
split_prime <- matrix(2,ntry,length(X$id))
u <- 0
qui <- NULL
impurete2 <- list()
imp <- NULL
for (c in 1:ntry){
w_g <- which(X$id==id_centers[c,1])
w_d <- which(X$id==id_centers[c,2])
### Il nous faut calculer la distance :
dg = apply(apply(X$X[,,i],1,"-",X$X[w_g,,i])^2,2,"mean")
dd = apply(apply(X$X[,,i],1,"-",X$X[w_d,,i])^2,2,"mean")
split_prime[c,which((dg<=dd)==TRUE)]=1
if (length(unique(split_prime[c,]))>1){
u <-u+1
qui <- c(qui,c)
impurete2[[c]] <- impurity_split(Y,split_prime[c,], timeScale)
imp <- c(imp,impurete2[[c]]$impur)
}
}
if (u>0){
gagnant <- qui[which.min(imp)]
split[[i]] <- split_prime[gagnant,]
impurete <- impurete2[[gagnant]]
impur[i] <- impurete$impur
toutes_imp[[i]] <- impurete$imp_list
}
else{
impur[i] <- Inf
split[[i]] <- Inf
}
}
else{
split[[i]] <- c(1,2)
impurete <- impurity_split(Y,split[[i]], timeScale)
impur[i] <- impurete$impur
toutes_imp[[i]] <- impurete$imp_list
}
}
if(X$type=="scalar"){
if (length(unique(X$X[,i]))>2){
### On doit tier les centres
#centers <- sample(X$X[,i],2)
centers <- matrix(NA,ntry,2)
for (l in 1:ntry){
centers[l,] <- sample(X$X[,i],2)
}
#split[[i]] <- rep(2,length(X$X[,i]))
split_prime <- matrix(2,ntry,length(X$X[,i]))
for (l in 1:length(X$X[,i])){
for (k in 1:ntry){
if (abs(centers[k,1]-X$X[l,i])<= abs(centers[k,2]-X$X[l,i])) split_prime[k,l] <- 1
}
}
u <- 0
qui <- NULL
impurete2 <- list()
imp <- NULL
for (k in 1:ntry){
if (length(unique(split_prime[k,]))>1){
u <- u+1
qui <- c(qui,k)
impurete2[[k]] <- c(impurete2,impurity_split(Y,split_prime[k,], timeScale))
imp <- c(imp, impurete2[[k]]$impur)
}
}
if (u>0){
gagnant <- qui[which.min(imp)]
split[[i]] <- split_prime[gagnant,]
impurete <- impurete2[[gagnant]]
impur[i] <- impurete$impur
toutes_imp[[i]] <- impurete$imp_list
}
else{
impur[i] <- Inf
split[[i]] <- Inf
}
}
else {
impur[i] <- Inf
split[[i]] <- Inf
}
}
}
if (Imp_shape<Inf){
return(list(split = split, impurete = Imp_shape, gauche = gauche, droite= droite , variable = var_shape,Pure = Pure, impur_list = impur_list))
}
if (length(unique(impur))==1 & is.element(Inf,impur)==TRUE){
return(list(Pure=TRUE))
}
true_split <- which.min(impur)
split <- split[[true_split]]
return(list(split=split, impurete=min(impur),impur_list = toutes_imp[[true_split]], variable=which.min(impur), Pure=Pure))
}
| /R/ERvar_split.R | no_license | Lcapitaine/FrechForest | R | false | false | 7,086 | r | #' Extremely randomized split
#'
#' @param X
#' @param Y
#' @param timeScale
#' @param ntry
#'
#' @import kmlShape
#' @import Evomorph
#' @import emdist
#'
#' @keywords internal
ERvar_split <- function(X ,Y,ntry=3,timeScale=0.1){
impur <- rep(0,dim(X$X)[length(dim(X$X))])
toutes_imp <- list()
impur_list = list()
split <- list()
Pure <- FALSE
Imp_shape <- Inf
var_shape <- Inf
for (i in 1:dim(X$X)[length(dim(X$X))]){
if (X$type=="factor"){
if (length(unique(X$X[,i]))>1){
L <- Fact.partitions(X$X[,i],X$id)
split_courant <- list()
impur_courant <- rep(NA,length(L))
toutes_imp_courant <- list()
# On tire une partition au hasard
tirage <- sample(1:length(L), 1)
# Il faut maintenant regarder quelles sont les meilleures combinaisons ::
split[[i]] <- rep(2,length(X$id))
for (l in L[[tirage]]){
split[[i]][which(X$id==l)] <- 1
}
# Il faut maintenant regarder la qualite du decoupage ::
impurete <- impurity_split(Y,split[[i]])
impur[i] <- impurete$impur
toutes_imp[[i]] <- impurete$imp_list
}
else {
impur[i] <- Inf
split[[i]] <- Inf
}
}
if( X$type=="curve"){
# Il faut commencer par tirer les multiples centres ::
id_centers <- matrix(NA,ntry,2)
for (l in 1:ntry){
id_centers[l,] <- sample(unique(X$id),2)
}
### Il faut ensuite boucler sur le ntry
split_prime <- matrix(2,ntry,length(unique(X$id)))
u <- 0
impurete2 <- list()
qui <- NULL
imp <- NULL
for (c in 1:ntry){
w_gauche <- which(X$id==id_centers[c,1])
w_droit <- which(X$id==id_centers[c,2])
for (l in 1:length(unique(X$id))){
w <- which(X$id==unique(X$id)[l])
dg <- distFrechet(X$time[w_gauche],X$X[w_gauche,i],X$time[w],X$X[w,i], timeScale = timeScale)
dd <- distFrechet(X$time[w_droit],X$X[w_droit,i],X$time[w],X$X[w,i], timeScale = timeScale)
if (dg<=dd) split_prime[c,l] <- 1
}
if (length(unique(split_prime[c,]))>1){
u <- u+1
qui <- c(qui, c)
impurete2[[c]] <- impurity_split(Y,split_prime[c,], timeScale)
imp <- c(imp,impurete2[[c]]$impur)
}
}
if (u>0){
gagnant <- qui[which.min(imp)]
split[[i]] <- split_prime[gagnant,]
impurete <- impurete2[[gagnant]]
impur[i] <- impurete$impur
toutes_imp[[i]] <- impurete$imp_list
}
else{
impur[i] <- Inf
split[[i]] <- Inf}
}
if (X$type=="shape"){
n_elem = dim(X$X)[3]
if (n_elem>2){
id_centers <- matrix(NA,ntry,2)
for (l in 1:ntry){
id_centers[l,] <- sample(X$id,2)
}
split_prime <- matrix(2,ntry,length(X$id))
dd = rep(NA,n_elem)
dg = rep(NA,n_elem)
for (c in 1:ntry){
for (k in 1:n_elem){
dg[k] <- emd2d(X$X[,,k,i],X$X[,,which(X$id==id_centers[c,1]),i])
dd[k] <- emd2d(X$X[,,k,i],X$X[,,which(X$id==id_centers[c,2]),i])
}
for (l in 1:length(unique(X$id))){
if (is.nan(dg[l]) || is.nan(dd[l])) split_prime[c,l] <- sample(c(1,2),1)
else if (dg[l]<=dd[l]) split_prime[c,l] <- 1
}
if (length(split_prime[c,])>1){
impurete2 <- impurity_split(Y,split_prime[c,], timeScale)
if (impurete2$impur <Imp_shape && is.na(impurete2$impur)==FALSE){
Imp_shape <- impurete2$impur
var_shape <- i
gauche = id_centers[c,1]
droite = id_centers[c,2]
impur_list = impurete2$imp_list
split = split_prime[c,]
Pure = FALSE
}
}
}
}
}
if (X$type=="image"){
if (nrow(X$X)>2){
id_centers <- matrix(NA,ntry,2)
for (l in 1:ntry){
id_centers[l,] <- sample(X$id,2)
}
split_prime <- matrix(2,ntry,length(X$id))
u <- 0
qui <- NULL
impurete2 <- list()
imp <- NULL
for (c in 1:ntry){
w_g <- which(X$id==id_centers[c,1])
w_d <- which(X$id==id_centers[c,2])
### Il nous faut calculer la distance :
dg = apply(apply(X$X[,,i],1,"-",X$X[w_g,,i])^2,2,"mean")
dd = apply(apply(X$X[,,i],1,"-",X$X[w_d,,i])^2,2,"mean")
split_prime[c,which((dg<=dd)==TRUE)]=1
if (length(unique(split_prime[c,]))>1){
u <-u+1
qui <- c(qui,c)
impurete2[[c]] <- impurity_split(Y,split_prime[c,], timeScale)
imp <- c(imp,impurete2[[c]]$impur)
}
}
if (u>0){
gagnant <- qui[which.min(imp)]
split[[i]] <- split_prime[gagnant,]
impurete <- impurete2[[gagnant]]
impur[i] <- impurete$impur
toutes_imp[[i]] <- impurete$imp_list
}
else{
impur[i] <- Inf
split[[i]] <- Inf
}
}
else{
split[[i]] <- c(1,2)
impurete <- impurity_split(Y,split[[i]], timeScale)
impur[i] <- impurete$impur
toutes_imp[[i]] <- impurete$imp_list
}
}
if(X$type=="scalar"){
if (length(unique(X$X[,i]))>2){
### On doit tier les centres
#centers <- sample(X$X[,i],2)
centers <- matrix(NA,ntry,2)
for (l in 1:ntry){
centers[l,] <- sample(X$X[,i],2)
}
#split[[i]] <- rep(2,length(X$X[,i]))
split_prime <- matrix(2,ntry,length(X$X[,i]))
for (l in 1:length(X$X[,i])){
for (k in 1:ntry){
if (abs(centers[k,1]-X$X[l,i])<= abs(centers[k,2]-X$X[l,i])) split_prime[k,l] <- 1
}
}
u <- 0
qui <- NULL
impurete2 <- list()
imp <- NULL
for (k in 1:ntry){
if (length(unique(split_prime[k,]))>1){
u <- u+1
qui <- c(qui,k)
impurete2[[k]] <- c(impurete2,impurity_split(Y,split_prime[k,], timeScale))
imp <- c(imp, impurete2[[k]]$impur)
}
}
if (u>0){
gagnant <- qui[which.min(imp)]
split[[i]] <- split_prime[gagnant,]
impurete <- impurete2[[gagnant]]
impur[i] <- impurete$impur
toutes_imp[[i]] <- impurete$imp_list
}
else{
impur[i] <- Inf
split[[i]] <- Inf
}
}
else {
impur[i] <- Inf
split[[i]] <- Inf
}
}
}
if (Imp_shape<Inf){
return(list(split = split, impurete = Imp_shape, gauche = gauche, droite= droite , variable = var_shape,Pure = Pure, impur_list = impur_list))
}
if (length(unique(impur))==1 & is.element(Inf,impur)==TRUE){
return(list(Pure=TRUE))
}
true_split <- which.min(impur)
split <- split[[true_split]]
return(list(split=split, impurete=min(impur),impur_list = toutes_imp[[true_split]], variable=which.min(impur), Pure=Pure))
}
|
#' @param model an object of class SaemixModel, created by a call to the
#' function \code{\link{saemixModel}}
#' @param data an object of class SaemixData, created by a call to the function
#' \code{\link{saemixData}}
#' @param control a list of options, see \code{\link{saemixControl}}
#' @return An object of class SaemixObject containing the results of the fit of
#' the data by the non-linear mixed effect model. A summary of the results is
#' printed out to the terminal, and, provided the appropriate options have not
#' been changed, numerical and graphical outputs are saved in a directory.
#' @author Emmanuelle Comets <emmanuelle.comets@@inserm.fr>, Audrey Lavenu,
#' Marc Lavielle.
#' @seealso \code{\link{SaemixData}},\code{\link{SaemixModel}},
#' \code{\link{SaemixObject}}, \code{\link{saemixControl}},
#' \code{\link{plot.saemix}}
#' @references Kuhn E, Lavielle M. Maximum likelihood estimation in nonlinear
#' mixed effects models. Computational Statistics and Data Analysis 49, 4
#' (2005), 1020-1038.
#'
#' Comets E, Lavenu A, Lavielle M. SAEMIX, an R version of the SAEM algorithm.
#' 20th meeting of the Population Approach Group in Europe, Athens, Greece
#' (2011), Abstr 2173.
#' @keywords models
#' @examples
#'
#' data(theo.saemix)
#'
#' saemix.data<-saemixData(name.data=theo.saemix,header=TRUE,sep=" ",na=NA,
#' name.group=c("Id"),name.predictors=c("Dose","Time"),
#' name.response=c("Concentration"),name.covariates=c("Weight","Sex"),
#' units=list(x="hr",y="mg/L", covariates=c("kg","-")), name.X="Time")
#'
#' model1cpt<-function(psi,id,xidep) {
#' dose<-xidep[,1]
#' tim<-xidep[,2]
#' ka<-psi[id,1]
#' V<-psi[id,2]
#' CL<-psi[id,3]
#' k<-CL/V
#' ypred<-dose*ka/(V*(ka-k))*(exp(-k*tim)-exp(-ka*tim))
#' return(ypred)
#' }
#'
#' saemix.model<-saemixModel(model=model1cpt,
#' description="One-compartment model with first-order absorption",
#' psi0=matrix(c(1.,20,0.5,0.1,0,-0.01),ncol=3, byrow=TRUE,
#' dimnames=list(NULL, c("ka","V","CL"))),transform.par=c(1,1,1),
#' covariate.model=matrix(c(0,1,0,0,0,0),ncol=3,byrow=TRUE),fixed.estim=c(1,1,1),
#' covariance.model=matrix(c(1,0,0,0,1,0,0,0,1),ncol=3,byrow=TRUE),
#' omega.init=matrix(c(1,0,0,0,1,0,0,0,1),ncol=3,byrow=TRUE),error.model="constant")
#'
#'
#' # Not run (strict time constraints for CRAN)
#' # saemix.fit<-saemix(saemix.model,saemix.data,list(seed=632545,directory="newtheo",
#' # save=FALSE,save.graphs=FALSE))
#'
#' # Prints a summary of the results
#' # print(saemix.fit)
#'
#' # Outputs the estimates of individual parameters
#' # psi(saemix.fit)
#'
#' # Shows some diagnostic plots to evaluate the fit
#' # plot(saemix.fit)
#'
#'
#' @export saemix
saemix_post_cat<-function(model,data,control=list()) {
if(class(model)!="SaemixModel") {
cat("Please provide a valid model object (see the help page for SaemixModel)\n")
return()
}
if(class(data)!="SaemixData") {
cat("Please provide a valid data object (see the help page for SaemixData)\n")
return()
}
saemixObject<-new(Class="SaemixObject",data=data,model=model,options=control)
# saemixObject<-new(Class="SaemixObject",data=saemix.data, model=saemix.model,options=saemix.options)
opt.warn<-getOption("warn")
if(!saemixObject["options"]$warnings) options(warn=-1)
saemix.options<-saemixObject["options"]
saemix.model<-saemixObject["model"]
saemix.data<-saemixObject["data"]
saemix.data@ocov<-saemix.data@ocov[saemix.data@data[,"mdv"]==0,,drop=FALSE]
saemix.data@data<-saemix.data@data[saemix.data@data[,"mdv"]==0,]
saemix.data@ntot.obs<-dim(saemix.data@data)[1]
# showall(saemixObject)
# Initialising random generator
OLDRAND<-TRUE
set.seed(saemix.options$seed)
############################################
# Main Algorithm
############################################
# Initialisation - creating several lists with necessary information extracted (Uargs, Dargs, opt,varList, suffStat)
xinit<-initialiseMainAlgo_cat(saemix.data,saemix.model,saemix.options)
saemix.model<-xinit$saemix.model
Dargs<-xinit$Dargs
Uargs<-xinit$Uargs
varList<-xinit$varList
phiM<-xinit$phiM
mean.phi<-xinit$mean.phi
DYF<-xinit$DYF
opt<-xinit$opt
betas<-betas.ini<-xinit$betas
fixed.psi<-xinit$fixedpsi.ini
var.eta<-varList$diag.omega
theta0<-c(fixed.psi,var.eta[Uargs$i1.omega2],varList$pres[Uargs$ind.res])
parpop<-matrix(data=0,nrow=(saemix.options$nbiter.tot+1),ncol=(Uargs$nb.parameters+length(Uargs$i1.omega2)))
colnames(parpop)<-c(saemix.model["name.modpar"], saemix.model["name.random"])
allpar<-matrix(data=0,nrow=(saemix.options$nbiter.tot+1), ncol=(Uargs$nb.betas+length(Uargs$i1.omega2)))
colnames(allpar)<-c(saemix.model["name.fixed"],saemix.model["name.random"])
parpop[1,]<-theta0
allpar[1,]<-xinit$allpar0
# using several Markov chains - only useful if passed back to main routine...
# chdat<-new(Class="SaemixRepData",data=saemix.data, nb.chains=saemix.options$nb.chains)
# NM<-chdat["NM"]
# IdM<-chdat["dataM"]$IdM
# yM<-chdat["dataM"]$yM
# XM<-chdat["dataM"][,saemix.data["name.predictors"],drop=FALSE]
# List of sufficient statistics - change during call to stochasticApprox
suffStat<-list(statphi1=0,statphi2=0,statphi3=0,statrese=0)
phi<-array(data=0,dim=c(Dargs$N, Uargs$nb.parameters, saemix.options$nb.chains))
# structural model, check nb of parameters
structural.model<-saemix.model["model"]
# nb.parameters<-saemix.model["nb.parameters"]
xmcmc<-estep_cat(1, Uargs, Dargs, opt, structural.model, mean.phi, varList, DYF, phiM, saemixObject)
# xmcmc<-estep_newkernel(1, Uargs, Dargs, opt, structural.model, mean.phi, varList, DYF, phiM)
# varList<-xmcmc$varList
DYF<-xmcmc$DYF
phiM<-xmcmc$phiM
post_rwm<-xmcmc$post
post_new<-xmcmc$post_new
return(list(post_rwm = post_rwm,post_new = post_new))
}
| /PKPD - Main/warfarin_cat_incremental/post_cat.R | no_license | BelhalK/AccelerationTrainingAlgorithms | R | false | false | 5,872 | r | #' @param model an object of class SaemixModel, created by a call to the
#' function \code{\link{saemixModel}}
#' @param data an object of class SaemixData, created by a call to the function
#' \code{\link{saemixData}}
#' @param control a list of options, see \code{\link{saemixControl}}
#' @return An object of class SaemixObject containing the results of the fit of
#' the data by the non-linear mixed effect model. A summary of the results is
#' printed out to the terminal, and, provided the appropriate options have not
#' been changed, numerical and graphical outputs are saved in a directory.
#' @author Emmanuelle Comets <emmanuelle.comets@@inserm.fr>, Audrey Lavenu,
#' Marc Lavielle.
#' @seealso \code{\link{SaemixData}},\code{\link{SaemixModel}},
#' \code{\link{SaemixObject}}, \code{\link{saemixControl}},
#' \code{\link{plot.saemix}}
#' @references Kuhn E, Lavielle M. Maximum likelihood estimation in nonlinear
#' mixed effects models. Computational Statistics and Data Analysis 49, 4
#' (2005), 1020-1038.
#'
#' Comets E, Lavenu A, Lavielle M. SAEMIX, an R version of the SAEM algorithm.
#' 20th meeting of the Population Approach Group in Europe, Athens, Greece
#' (2011), Abstr 2173.
#' @keywords models
#' @examples
#'
#' data(theo.saemix)
#'
#' saemix.data<-saemixData(name.data=theo.saemix,header=TRUE,sep=" ",na=NA,
#' name.group=c("Id"),name.predictors=c("Dose","Time"),
#' name.response=c("Concentration"),name.covariates=c("Weight","Sex"),
#' units=list(x="hr",y="mg/L", covariates=c("kg","-")), name.X="Time")
#'
#' model1cpt<-function(psi,id,xidep) {
#' dose<-xidep[,1]
#' tim<-xidep[,2]
#' ka<-psi[id,1]
#' V<-psi[id,2]
#' CL<-psi[id,3]
#' k<-CL/V
#' ypred<-dose*ka/(V*(ka-k))*(exp(-k*tim)-exp(-ka*tim))
#' return(ypred)
#' }
#'
#' saemix.model<-saemixModel(model=model1cpt,
#' description="One-compartment model with first-order absorption",
#' psi0=matrix(c(1.,20,0.5,0.1,0,-0.01),ncol=3, byrow=TRUE,
#' dimnames=list(NULL, c("ka","V","CL"))),transform.par=c(1,1,1),
#' covariate.model=matrix(c(0,1,0,0,0,0),ncol=3,byrow=TRUE),fixed.estim=c(1,1,1),
#' covariance.model=matrix(c(1,0,0,0,1,0,0,0,1),ncol=3,byrow=TRUE),
#' omega.init=matrix(c(1,0,0,0,1,0,0,0,1),ncol=3,byrow=TRUE),error.model="constant")
#'
#'
#' # Not run (strict time constraints for CRAN)
#' # saemix.fit<-saemix(saemix.model,saemix.data,list(seed=632545,directory="newtheo",
#' # save=FALSE,save.graphs=FALSE))
#'
#' # Prints a summary of the results
#' # print(saemix.fit)
#'
#' # Outputs the estimates of individual parameters
#' # psi(saemix.fit)
#'
#' # Shows some diagnostic plots to evaluate the fit
#' # plot(saemix.fit)
#'
#'
#' @export saemix
saemix_post_cat<-function(model,data,control=list()) {
if(class(model)!="SaemixModel") {
cat("Please provide a valid model object (see the help page for SaemixModel)\n")
return()
}
if(class(data)!="SaemixData") {
cat("Please provide a valid data object (see the help page for SaemixData)\n")
return()
}
saemixObject<-new(Class="SaemixObject",data=data,model=model,options=control)
# saemixObject<-new(Class="SaemixObject",data=saemix.data, model=saemix.model,options=saemix.options)
opt.warn<-getOption("warn")
if(!saemixObject["options"]$warnings) options(warn=-1)
saemix.options<-saemixObject["options"]
saemix.model<-saemixObject["model"]
saemix.data<-saemixObject["data"]
saemix.data@ocov<-saemix.data@ocov[saemix.data@data[,"mdv"]==0,,drop=FALSE]
saemix.data@data<-saemix.data@data[saemix.data@data[,"mdv"]==0,]
saemix.data@ntot.obs<-dim(saemix.data@data)[1]
# showall(saemixObject)
# Initialising random generator
OLDRAND<-TRUE
set.seed(saemix.options$seed)
############################################
# Main Algorithm
############################################
# Initialisation - creating several lists with necessary information extracted (Uargs, Dargs, opt,varList, suffStat)
xinit<-initialiseMainAlgo_cat(saemix.data,saemix.model,saemix.options)
saemix.model<-xinit$saemix.model
Dargs<-xinit$Dargs
Uargs<-xinit$Uargs
varList<-xinit$varList
phiM<-xinit$phiM
mean.phi<-xinit$mean.phi
DYF<-xinit$DYF
opt<-xinit$opt
betas<-betas.ini<-xinit$betas
fixed.psi<-xinit$fixedpsi.ini
var.eta<-varList$diag.omega
theta0<-c(fixed.psi,var.eta[Uargs$i1.omega2],varList$pres[Uargs$ind.res])
parpop<-matrix(data=0,nrow=(saemix.options$nbiter.tot+1),ncol=(Uargs$nb.parameters+length(Uargs$i1.omega2)))
colnames(parpop)<-c(saemix.model["name.modpar"], saemix.model["name.random"])
allpar<-matrix(data=0,nrow=(saemix.options$nbiter.tot+1), ncol=(Uargs$nb.betas+length(Uargs$i1.omega2)))
colnames(allpar)<-c(saemix.model["name.fixed"],saemix.model["name.random"])
parpop[1,]<-theta0
allpar[1,]<-xinit$allpar0
# using several Markov chains - only useful if passed back to main routine...
# chdat<-new(Class="SaemixRepData",data=saemix.data, nb.chains=saemix.options$nb.chains)
# NM<-chdat["NM"]
# IdM<-chdat["dataM"]$IdM
# yM<-chdat["dataM"]$yM
# XM<-chdat["dataM"][,saemix.data["name.predictors"],drop=FALSE]
# List of sufficient statistics - change during call to stochasticApprox
suffStat<-list(statphi1=0,statphi2=0,statphi3=0,statrese=0)
phi<-array(data=0,dim=c(Dargs$N, Uargs$nb.parameters, saemix.options$nb.chains))
# structural model, check nb of parameters
structural.model<-saemix.model["model"]
# nb.parameters<-saemix.model["nb.parameters"]
xmcmc<-estep_cat(1, Uargs, Dargs, opt, structural.model, mean.phi, varList, DYF, phiM, saemixObject)
# xmcmc<-estep_newkernel(1, Uargs, Dargs, opt, structural.model, mean.phi, varList, DYF, phiM)
# varList<-xmcmc$varList
DYF<-xmcmc$DYF
phiM<-xmcmc$phiM
post_rwm<-xmcmc$post
post_new<-xmcmc$post_new
return(list(post_rwm = post_rwm,post_new = post_new))
}
|
library(shiny)
ui <- fluidPage(
verticalLayout(
textInput("a","1st str",value = "Hello"),
textInput("b","2nd str",value = "World!"),
actionButton("go", "paste", width = 100),
br(),
sidebarPanel(textOutput(outputId = 'ab'))
)
)
server <-
function(input,output){
re <- eventReactive(
input$go, paste(input$a,input$b)
)
output$ab <- renderText({re()})
}
shinyApp(ui, server)
| /Final_Project/Shiny/basic/app.R | no_license | pumpkinlinlin/CSX_RProject_Spring_2018 | R | false | false | 423 | r | library(shiny)
ui <- fluidPage(
verticalLayout(
textInput("a","1st str",value = "Hello"),
textInput("b","2nd str",value = "World!"),
actionButton("go", "paste", width = 100),
br(),
sidebarPanel(textOutput(outputId = 'ab'))
)
)
server <-
function(input,output){
re <- eventReactive(
input$go, paste(input$a,input$b)
)
output$ab <- renderText({re()})
}
shinyApp(ui, server)
|
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Upload MST QC DATA"),
checkboxInput("CB", label = "Export from .Asyr", value = FALSE),
checkboxInput("CB2", label = "Write data to csv", value = FALSE),
checkboxInput("CB3", label = "Upload data to database", value = FALSE),
actionButton("goButton","RUN"),
actionButton('Quit','Quit'),
br(),br(),br(),
textOutput("MSG"),
dataTableOutput('DF')
))
| /inst/MSTQC/ui.R | no_license | JARS3N/PipeFish | R | false | false | 433 | r |
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Upload MST QC DATA"),
checkboxInput("CB", label = "Export from .Asyr", value = FALSE),
checkboxInput("CB2", label = "Write data to csv", value = FALSE),
checkboxInput("CB3", label = "Upload data to database", value = FALSE),
actionButton("goButton","RUN"),
actionButton('Quit','Quit'),
br(),br(),br(),
textOutput("MSG"),
dataTableOutput('DF')
))
|
# regression model analysis
# read the data
Regression_1<-fread(file="~/Desktop/intro to data/project/dataset/out-201501.csv",select=c(137,139:145,168,194,196,232))
Regression_2<-fread(file="~/Desktop/intro to data/project/dataset/out-201402.csv",select=c(137,139:145,168,194,196,232))
Regression_3<-fread(file="~/Desktop/intro to data/project/dataset/out-201403.csv",select=c(137,139:145,168,194,196,232))
Regression_4<-fread(file="~/Desktop/intro to data/project/dataset/out-201404.csv",select=c(137,139:145,168,194,196,232))
Regression_5<-fread(file="~/Desktop/intro to data/project/dataset/out-201405.csv",select=c(137,139:145,168,194,196,232))
Regression_6<-fread(file="~/Desktop/intro to data/project/dataset/out-201406.csv",select=c(137,139:145,168,194,196,232))
Regression_7<-fread(file="~/Desktop/intro to data/project/dataset/out-201407.csv",select=c(137,139:145,168,194,196,232))
Regression_8<-fread(file="~/Desktop/intro to data/project/dataset/out-201408.csv",select=c(137,139:145,168,194,196,232))
Regression_9<-fread(file="~/Desktop/intro to data/project/dataset/out-201409.csv",select=c(137,139:145,168,194,196,232))
Regression_10<-fread(file="~/Desktop/intro to data/project/dataset/out-201410.csv",select=c(137,139:145,168,194,196,232))
Regression_11<-fread(file="~/Desktop/intro to data/project/dataset/out-201411.csv",select=c(137,139:145,168,194,196,232))
Regression_12<-fread(file="~/Desktop/intro to data/project/dataset/out-201412.csv",select=c(137,139:145,168,194,196,232))
RegressionData<-rbind(Regression_1,Regression_2,Regression_3,Regression_4,Regression_5,Regression_6,Regression_7,Regression_8,Regression_9,Regression_10,Regression_11,Regression_12)
# clean the dataset by removing NAs
RegressionData[RegressionData==""]<-NA
RegressionData1<-na.omit(RegressionData)
View(RegressionData1)
RegressionRow<-which((RegressionData1$State_PL=="California") & (RegressionData1$Type_PL=="Business") & (RegressionData1$Location_PL=="Urban"))
RegressionData2<-RegressionData1[c(RegressionRow),]
View(RegressionData2)
RegressionData2$Promoter<-0
ProRow1<-which((RegressionData2$NPS_Type)=="Promoter")
RegressionData2$Promoter[c(ProRow1)]<-1
RegressionData2$Detractor<-0
DetRow1<-which((RegressionData2$NPS_Type)=="Detractor")
RegressionData2$Detractor[c(DetRow1)]<-1
install.packages("mfx")
library(mfx)
Lin1<-lm(formula=RegressionData2$Likelihood_Recommend_H~RegressionData2$Guest_Room_H+RegressionData2$Tranquility_H+RegressionData2$Condition_Hotel_H+RegressionData2$Customer_SVC_H+RegressionData2$Staff_Cared_H+RegressionData2$Internet_Sat_H+RegressionData2$Check_In_H, data=RegressionData2)
summary(Lin1)
ProbPro<-glm(RegressionData2$Promoter~RegressionData2$Guest_Room_H+RegressionData2$Tranquility_H+RegressionData2$Condition_Hotel_H+RegressionData2$Customer_SVC_H+RegressionData2$Staff_Cared_H+RegressionData2$Internet_Sat_H+RegressionData2$Check_In_H, family=binomial(link="probit"),data=RegressionData2)
summary(ProbPro)
install.packages("pseudo")
library(pseudo)
install.packages("BaylorEdPsych")
library(BaylorEdPsych)
PseudoR2(ProbPro)
ProbProMfx<-probitmfx(RegressionData2$Promoter~RegressionData2$Guest_Room_H+RegressionData2$Tranquility_H+RegressionData2$Condition_Hotel_H+RegressionData2$Customer_SVC_H+RegressionData2$Staff_Cared_H+RegressionData2$Internet_Sat_H+RegressionData2$Check_In_H,data=RegressionData2,
atmean=TRUE, robust=TRUE)
ProbProMfx
coefplot(ProbPro)
ProbDet<-glm(RegressionData2$Detractor~RegressionData2$Guest_Room_H+RegressionData2$Tranquility_H+RegressionData2$Condition_Hotel_H+RegressionData2$Customer_SVC_H+RegressionData2$Staff_Cared_H+RegressionData2$Internet_Sat_H+RegressionData2$Check_In_H, family=binomial(link="probit"),data=RegressionData2)
PseudoR2(ProbDet)
ProbDetMfx<-probitmfx(RegressionData2$Detractor~RegressionData2$Guest_Room_H+RegressionData2$Tranquility_H+RegressionData2$Condition_Hotel_H+RegressionData2$Customer_SVC_H+RegressionData2$Staff_Cared_H+RegressionData2$Internet_Sat_H+RegressionData2$Check_In_H,data=RegressionData2,
atmean=TRUE, robust=TRUE)
ProbDetMfx
# focus more on guest room condition and customer service
| /IST687/IST_687_Regression_Analysis.R | no_license | MathieuWmy/Portfolio | R | false | false | 4,160 | r | # regression model analysis
# read the data
Regression_1<-fread(file="~/Desktop/intro to data/project/dataset/out-201501.csv",select=c(137,139:145,168,194,196,232))
Regression_2<-fread(file="~/Desktop/intro to data/project/dataset/out-201402.csv",select=c(137,139:145,168,194,196,232))
Regression_3<-fread(file="~/Desktop/intro to data/project/dataset/out-201403.csv",select=c(137,139:145,168,194,196,232))
Regression_4<-fread(file="~/Desktop/intro to data/project/dataset/out-201404.csv",select=c(137,139:145,168,194,196,232))
Regression_5<-fread(file="~/Desktop/intro to data/project/dataset/out-201405.csv",select=c(137,139:145,168,194,196,232))
Regression_6<-fread(file="~/Desktop/intro to data/project/dataset/out-201406.csv",select=c(137,139:145,168,194,196,232))
Regression_7<-fread(file="~/Desktop/intro to data/project/dataset/out-201407.csv",select=c(137,139:145,168,194,196,232))
Regression_8<-fread(file="~/Desktop/intro to data/project/dataset/out-201408.csv",select=c(137,139:145,168,194,196,232))
Regression_9<-fread(file="~/Desktop/intro to data/project/dataset/out-201409.csv",select=c(137,139:145,168,194,196,232))
Regression_10<-fread(file="~/Desktop/intro to data/project/dataset/out-201410.csv",select=c(137,139:145,168,194,196,232))
Regression_11<-fread(file="~/Desktop/intro to data/project/dataset/out-201411.csv",select=c(137,139:145,168,194,196,232))
Regression_12<-fread(file="~/Desktop/intro to data/project/dataset/out-201412.csv",select=c(137,139:145,168,194,196,232))
RegressionData<-rbind(Regression_1,Regression_2,Regression_3,Regression_4,Regression_5,Regression_6,Regression_7,Regression_8,Regression_9,Regression_10,Regression_11,Regression_12)
# clean the dataset by removing NAs
RegressionData[RegressionData==""]<-NA
RegressionData1<-na.omit(RegressionData)
View(RegressionData1)
RegressionRow<-which((RegressionData1$State_PL=="California") & (RegressionData1$Type_PL=="Business") & (RegressionData1$Location_PL=="Urban"))
RegressionData2<-RegressionData1[c(RegressionRow),]
View(RegressionData2)
RegressionData2$Promoter<-0
ProRow1<-which((RegressionData2$NPS_Type)=="Promoter")
RegressionData2$Promoter[c(ProRow1)]<-1
RegressionData2$Detractor<-0
DetRow1<-which((RegressionData2$NPS_Type)=="Detractor")
RegressionData2$Detractor[c(DetRow1)]<-1
install.packages("mfx")
library(mfx)
Lin1<-lm(formula=RegressionData2$Likelihood_Recommend_H~RegressionData2$Guest_Room_H+RegressionData2$Tranquility_H+RegressionData2$Condition_Hotel_H+RegressionData2$Customer_SVC_H+RegressionData2$Staff_Cared_H+RegressionData2$Internet_Sat_H+RegressionData2$Check_In_H, data=RegressionData2)
summary(Lin1)
ProbPro<-glm(RegressionData2$Promoter~RegressionData2$Guest_Room_H+RegressionData2$Tranquility_H+RegressionData2$Condition_Hotel_H+RegressionData2$Customer_SVC_H+RegressionData2$Staff_Cared_H+RegressionData2$Internet_Sat_H+RegressionData2$Check_In_H, family=binomial(link="probit"),data=RegressionData2)
summary(ProbPro)
install.packages("pseudo")
library(pseudo)
install.packages("BaylorEdPsych")
library(BaylorEdPsych)
PseudoR2(ProbPro)
ProbProMfx<-probitmfx(RegressionData2$Promoter~RegressionData2$Guest_Room_H+RegressionData2$Tranquility_H+RegressionData2$Condition_Hotel_H+RegressionData2$Customer_SVC_H+RegressionData2$Staff_Cared_H+RegressionData2$Internet_Sat_H+RegressionData2$Check_In_H,data=RegressionData2,
atmean=TRUE, robust=TRUE)
ProbProMfx
coefplot(ProbPro)
ProbDet<-glm(RegressionData2$Detractor~RegressionData2$Guest_Room_H+RegressionData2$Tranquility_H+RegressionData2$Condition_Hotel_H+RegressionData2$Customer_SVC_H+RegressionData2$Staff_Cared_H+RegressionData2$Internet_Sat_H+RegressionData2$Check_In_H, family=binomial(link="probit"),data=RegressionData2)
PseudoR2(ProbDet)
ProbDetMfx<-probitmfx(RegressionData2$Detractor~RegressionData2$Guest_Room_H+RegressionData2$Tranquility_H+RegressionData2$Condition_Hotel_H+RegressionData2$Customer_SVC_H+RegressionData2$Staff_Cared_H+RegressionData2$Internet_Sat_H+RegressionData2$Check_In_H,data=RegressionData2,
atmean=TRUE, robust=TRUE)
ProbDetMfx
# focus more on guest room condition and customer service
|
/20190930_구글맵마커표시.R | no_license | i-am-chan/2019_2_BigData | R | false | false | 794 | r | ||
results = data.frame(matrix(, nrow=5, ncol=13))
colnames(results) = c("model", "log-likelihood", "resid. df", "BIC",
"ABIC", "cAIC", "likelihood-ratio", "Entropy",
"Class1", "Class2", "Class3", "Class4", "Class5")
entropy<-function (p) sum(-p*log(p))
Argentina = nonnegtive %>%
filter(country == "Argentina") %>%
dplyr::select(c("tax", "religion", "free_election", "state_aid",
"civil_rights", "women"))
for (i in 1:5){
#model = replicate(5, NA)
model <- poLCA(f, data= Argentina, nclass= i, na.rm = FALSE, nrep=15, maxiter=3500)
results[i,1] = paste("model", i)
results[i,2]<- model$llik
results[i,3]<- model$resid.df
results[i,4]<- model$bic
results[i,5]<- (-2* model$llik) + ((log((model$N + 2)/24)) * model$npar) #abic
results[i,6]<- (-2* model$llik) + model$npar * (1 + log(model$N)) #caic
results[i,7]<- model$Gsq
results[i,8] <- round(((entropy(model$P) - mean(apply(model$posterior,1, entropy),na.rm = TRUE)) / entropy(model$P)),3)
if (i == 1) {
results[i, 8] = c("-")
}
results[i, 9:13] = c(round(model$P,3), rep("-", 5-i))
}
write.csv(results, paste("csv_each_country/Argentina", ".csv", sep = ""), row.names = FALSE)
| /All_data_script_csv.R | no_license | DavidykZhao/Comparative_pol_measurement_project | R | false | false | 1,232 | r |
results = data.frame(matrix(, nrow=5, ncol=13))
colnames(results) = c("model", "log-likelihood", "resid. df", "BIC",
"ABIC", "cAIC", "likelihood-ratio", "Entropy",
"Class1", "Class2", "Class3", "Class4", "Class5")
entropy<-function (p) sum(-p*log(p))
Argentina = nonnegtive %>%
filter(country == "Argentina") %>%
dplyr::select(c("tax", "religion", "free_election", "state_aid",
"civil_rights", "women"))
for (i in 1:5){
#model = replicate(5, NA)
model <- poLCA(f, data= Argentina, nclass= i, na.rm = FALSE, nrep=15, maxiter=3500)
results[i,1] = paste("model", i)
results[i,2]<- model$llik
results[i,3]<- model$resid.df
results[i,4]<- model$bic
results[i,5]<- (-2* model$llik) + ((log((model$N + 2)/24)) * model$npar) #abic
results[i,6]<- (-2* model$llik) + model$npar * (1 + log(model$N)) #caic
results[i,7]<- model$Gsq
results[i,8] <- round(((entropy(model$P) - mean(apply(model$posterior,1, entropy),na.rm = TRUE)) / entropy(model$P)),3)
if (i == 1) {
results[i, 8] = c("-")
}
results[i, 9:13] = c(round(model$P,3), rep("-", 5-i))
}
write.csv(results, paste("csv_each_country/Argentina", ".csv", sep = ""), row.names = FALSE)
|
# filter cells and genes
setwd("~/lustre/06-Human_cell_atlas/pooled_data/01_stomach/")
library("reshape2")
library("ggplot2")
library("cowplot")
source("../../scripts/filter_tools.r")
samplingPos <- "."
OUT <- paste0("03-expression/merged/filtering/", samplingPos)
dir.create(OUT, showWarnings = F, recursive = T)
#load(file = paste0(OUT, "/filtering.RData"))
# load gene ID
geneID <- read.table("~/lustre/06-Human_cell_atlas/Genomes/human/gene_ID2Name_fixed.txt", header = F, sep = "\t", stringsAsFactors = F)
dim(geneID)
colnames(geneID) <- c("ensembl", "symbol")
geneID$ensembl_alt <- gsub("\\.[0-9]+", "", geneID$ensembl)
# load expression data
exprMatr <- read.table("03-expression/merged/UMIcount_allGenes.txt",header = T,row.names = 1,sep = "\t", stringsAsFactors = F, check.names = F, comment.char = "")
dim(exprMatr)
# extract data
#exprMatr <- exprMatr[, grep(paste0("^", samplingPos, "-"), colnames(exprMatr))]
# remove control/abnormal sample
black_list <- "WD_C-D11_"
exprMatr <- exprMatr[, - grep(black_list, colnames(exprMatr))]
# remove 4th inner
#inner_id <- as.numeric(gsub(".*_", "", colnames(exprMatr))) %% 8; inner_id[inner_id==0] <- 8; inner_id <- factor(inner_id)
#exprMatr <- exprMatr[, inner_id!=4]
print("Before filter:")
dim(exprMatr)
## 1. cell level ----
# 1 clean reads and A/B ratio
cleanStat_per_cell <- read.table("01-cleandata/merged/cleanFqStat.txt", header = F, sep = "\t", row.names = 5, stringsAsFactors = F, check.names = F)
dim(cleanStat_per_cell)
cleanStat_per_cell <- merge(x = colnames(exprMatr), y = cleanStat_per_cell, by.x = 1, by.y = 0, sort = F)
rownames(cleanStat_per_cell) <- cleanStat_per_cell$x
cleanStat_per_cell <- cleanStat_per_cell[, -1]
cleanStat_per_cell$ABratio <- cleanStat_per_cell$V8/(cleanStat_per_cell$V8 + cleanStat_per_cell$V9)
hist(cleanStat_per_cell$V12/1e6, breaks = 40, xlab = "Clean reads (M)", main = NA)
hist(cleanStat_per_cell$ABratio, breaks = 40, xlab = "A/B ratio", main = NA)
cellStat <- cleanStat_per_cell[, c("ABratio", "V12")]
colnames(cellStat)[2] <- "cleanReads"
# 2 clean reads and mapping ratio
mapStat_per_cell <- read.table("02-alignment/merged/mapStat.txt",header = F, sep = "\t", row.names = 2, stringsAsFactors = F, check.names = F)
dim(mapStat_per_cell)
mapStat_per_cell <- merge(x = colnames(exprMatr), y = mapStat_per_cell, by.x = 1, by.y = 0, sort = F)
rownames(mapStat_per_cell) <- mapStat_per_cell$x
mapStat_per_cell <- mapStat_per_cell[, -1]
hist(mapStat_per_cell$V7,breaks = 40, xlab = "Mapping ratio", main = NA, xlim = c(0, 1))
cellStat$mpRatio <- mapStat_per_cell$V7
# 3 detected genes
expressed_genes_per_cell <- apply(exprMatr>0, 2, sum)
hist(expressed_genes_per_cell,breaks = 40, xlab = "Detected genes", main = NA)
cellStat$nGene <- expressed_genes_per_cell
# 4 UMI
nUMI_per_cell <- colSums(exprMatr)
hist(nUMI_per_cell/1e3,breaks = 40, xlab = "Detected transcripts (k)", main = NA)
cellStat$nUMI <- nUMI_per_cell
# 5 mito ratio
#mito.genes <- grep(pattern = "^MT-", x = rownames(exprMatr), value = T)
percent.mito <- colSums(exprMatr[grep(pattern = "^MT-", x = rownames(exprMatr)),])/colSums(exprMatr)
hist(percent.mito, breaks = 40, xlab = "Mito. ratio", main = NA)
cellStat$mitoRatio <- percent.mito
#save.image(file="filtering.RData")
#load("filtering.RData")
# 6 ERCC ratio
exprStat_per_cell <- read.table("03-expression/merged/exprStat.txt", header = F, sep = "\t", row.names = 2, stringsAsFactors = F)
dim(exprStat_per_cell)
exprStat_per_cell <- merge(x = colnames(exprMatr), y = exprStat_per_cell, by.x = 1, by.y = 0, sort = F)
hist(exprStat_per_cell$V6 / exprStat_per_cell$V3, breaks = 40, xlab = "ERCC ratio", main = NA)
cellStat$ERCCratio <- exprStat_per_cell$V6 / exprStat_per_cell$V3
# 7 correlation
# cor_cells <- cor(exprMatr[rowSums(exprMatr)>0, ], method = "spearman")
# diag(cor_cells) <- NA
# avg_cor <- rowMeans(cor_cells, na.rm = T)
# hist(avg_cor, breaks = 40, xlab = "Pairwise correlation", main = NA)
cellStat$avgCor <- 1
# 8 doublets
library("mgcv")
plot(cellStat$nUMI, cellStat$nGene, xlab = "UMI number", ylab = "Gene number")
gam.reg <- gam(nUMI ~ s(nGene, bs = "cs"), data = cellStat)
gam.pre <- predict(gam.reg, newdata = list(nGene=cellStat$nGene))
gam_DF <- data.frame(cellStat, pred_value = gam.pre, obs_fc = cellStat$nUMI / gam.pre)
cellStat$doublet <- gam_DF$nUMI>quantile(gam_DF$nUMI, 0.99) & gam_DF$obs_fc>2
table(cellStat$doublet)
# merged
do_plotIndex()
plot(cellStat$nUMI, cellStat$nGene)
plot(cellStat$cleanReads/1e6, cellStat$ABratio, xlab = "Clean reads (M)", ylab = "A/B ratio"); abline(v = 0.5, col = "blue", lty = 2)
plot(cellStat$cleanReads/1e6, cellStat$mitoRatio, xlab = "Clean reads (M)", ylab = "Mito. ratio"); abline(v = 0.5, col = "blue", lty = 2)
plot(cellStat$cleanReads/1e6, cellStat$ERCCratio, xlab = "Clean reads (M)", ylab = "ERCC ratio"); abline(v = 0.5, col = "blue", lty = 2)
plot(cellStat$cleanReads/1e6, cellStat$avgCor, xlab = "Clean reads (M)", ylab = "Pairwise correlation"); abline(v = 0.5, col = "blue", lty = 2)
### setting cutoff for filtering
cutoff_DF <- data.frame(ABratio = 0.9, cleanReads = 0.4*1e6, mpRatio = 0.6,
nGene_l = 1000, nGene_h = 7500, nUMI_l = 3*1e3, nUMI_h = 92*1e3,
mitoRatio = 0.15, ERCCratio = 0.25, avgCor = 0.15)
###
filtering_out <- do_cellFiltering()
cellStat$filter <- filtering_out$res
# report
do_show_ftd_stat(cellStat)
sink(paste0(OUT, "/filtering_stat.txt"))
print(cutoff_DF)
cat("\n")
do_show_ftd_stat(cellStat)
sink()
# plot
pdf(paste0(OUT, "/filtering_cells.pdf"), width = 4, height = 4, useDingbats = F)
library("ggplot2")
p <- ggplot(filtering_out$tb, aes(x = cell, y = index, fill = as.factor(value))) + geom_tile(show.legend = F) + theme_bw() +
theme(axis.title = element_blank(), axis.text.x = element_blank(), axis.ticks.x = element_blank(), panel.border = element_rect(size = 1, color = "black")) +
scale_y_discrete(limits = rev(levels(filtering_out$tb$index)), position = "right", expand = c(0, 0))
print(p)
# stat for each bigBatch
cellStat_DF <- cellStat
cellStat_DF$bigBatch <- gsub("-.*", "", rownames(cellStat_DF))
cellStat_DF_melted <- melt(table(cellStat_DF[, c("bigBatch", "filter")]))
ggplot(cellStat_DF_melted, aes(x = bigBatch, y = value, fill = bigBatch, alpha = filter)) +
geom_bar(stat = "identity", show.legend = F) +
scale_alpha_discrete(range = c(0.4,1)) +
theme(axis.title.x = element_blank()) +
scale_y_continuous(limits = c(0, max(aggregate(cellStat_DF_melted$value, by = list(cellStat_DF_melted$bigBatch), sum)[,2])*1.05),
expand = c(0, 0)) +
ylab("Cell number") +
geom_text(aes(label = value, y = value), size = 3, position = position_stack(vjust = 0.5), show.legend = F)
# stat for each feature
ggplot(cellStat, aes(ABratio, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab("A/B ratio") + ylab("Cell number") +
geom_vline(xintercept = cutoff_DF$ABratio, linetype = "dashed")
ggplot(cellStat, aes(cleanReads/1e6, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab("Clean reads number / (Million)") + ylab("Cell number") +
geom_vline(xintercept = cutoff_DF$cleanReads/1e6, linetype = "dashed")
ggplot(cellStat, aes(mpRatio, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab("Mapping ratio") + ylab("Cell number") +
geom_vline(xintercept = cutoff_DF$mpRatio, linetype = "dashed")
ggplot(cellStat, aes(nGene, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab("Dected gene number") + ylab("Cell number") +
geom_vline(xintercept = c(cutoff_DF$nGene_l, cutoff_DF$nGene_h), linetype = "dashed")
tmp <- data.frame(cellStat, bigBatch = gsub("-.*", "", rownames(cellStat)), stringsAsFactors = F)
if(length(unique(tmp$bigBatch))>1) {
ggplot(tmp, aes(nGene, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black", show.legend = F) +
xlab("Dected gene number") + ylab("Cell number") + facet_grid(bigBatch ~ .) +
geom_vline(xintercept = c(cutoff_DF$nGene_l, cutoff_DF$nGene_h), linetype = "dashed")
}
ggplot(cellStat, aes(nUMI, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab("UMI count") + ylab("Cell number") +
geom_vline(xintercept = c(cutoff_DF$nUMI_l, cutoff_DF$nUMI_h), linetype = "dashed")
ggplot(cellStat, aes(log10(nUMI), fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab(expression(paste(log[10], " (UMI count)"))) + ylab("Cell number") +
geom_vline(xintercept = log10(c(cutoff_DF$nUMI_l, cutoff_DF$nUMI_h)), linetype = "dashed")
ggplot(cellStat, aes(mitoRatio, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab("Expression raio of mitochondrial genes") + ylab("Cell number") +
geom_vline(xintercept = cutoff_DF$mitoRatio, linetype = "dashed")
ggplot(cellStat, aes(avgCor, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab("Average of pairwise Spearman correlation") + ylab("Cell number") +
geom_vline(xintercept = cutoff_DF$avgCor, linetype = "dashed")
ggplot(cellStat, aes(x = cleanReads/1e6, y = ABratio, color=filter, size=nUMI)) + geom_point(alpha=0.6) +
theme_grey() + xlab("Reads number / (Million)") + ylab("A/B ratio") +
geom_vline(xintercept = cutoff_DF$cleanReads/1e6, linetype = "dashed") +
geom_hline(yintercept = cutoff_DF$ABratio, linetype = "dashed")
ggplot(cellStat, aes(x = cleanReads/1e6, y = ERCCratio, color=filter, size=nUMI)) + geom_point(alpha=0.6) +
theme_grey() + xlab("Reads number / (Million)") + ylab("ERCC ratio") +
geom_vline(xintercept = cutoff_DF$cleanReads/1e6, linetype = "dashed") +
geom_hline(yintercept = cutoff_DF$ERCCratio, linetype = "dashed")
ggplot(cellStat, aes(x = cleanReads/1e6, y = avgCor, color=filter, size=nUMI)) + geom_point(alpha=0.6) +
theme_grey() + xlab("Reads number / (Million)") + ylab("Average Spearman correlation") +
geom_vline(xintercept = cutoff_DF$cleanReads/1e6, linetype = "dashed") +
geom_hline(yintercept = cutoff_DF$avgCor, linetype = "dashed")
ggplot(cellStat, aes(x = nUMI, y = nGene, color=filter, size=cleanReads, shape=doublet==1)) + geom_point(alpha=0.6) +
theme_grey() + xlab("UMI number") + ylab("Detected gene number") +
geom_vline(xintercept = c(cutoff_DF$nUMI_l, cutoff_DF$nUMI_h), linetype = "dashed") +
geom_hline(yintercept = c(cutoff_DF$nGene_l, cutoff_DF$nGene_h), linetype = "dashed") +
scale_shape_discrete(name="doublet") +
geom_smooth(method = 'gam', formula = y ~ s(x, bs = "cs"), color = "1", show.legend = F, alpha = 0.6)
dev.off()
## 2. gene level ----
# 1
expressed_cells_per_gene <- apply(exprMatr[, cellStat$filter]>0, 1, sum)
hist(expressed_cells_per_gene/ncol(exprMatr[, cellStat$filter]), breaks = 40, xlab = "Ratio of cells expressed", main = NA)
abline(v=0.05,lty=2,col="blue")
# 2
nCountsPerGene <- apply(exprMatr, 1, sum)
hist(nCountsPerGene, breaks = 1000000, xlim = c(0,1000))
# merge
geneStat <- data.frame(nCell=expressed_cells_per_gene, nUMI=nCountsPerGene)
geneStat$validCell <- sum(cellStat$filter)
geneStat$cellRatio <- geneStat$nCell/geneStat$validCell
geneStat$filter <- geneStat$nCell>=10
table(geneStat$filter)
## 3. filtering both of them
exprMatr_filtered <- exprMatr[geneStat$filter, cellStat$filter]
print("After filter:")
dim(exprMatr_filtered)
# filtering only cells
exprMatr_cellFiltered <- exprMatr[, cellStat$filter]
dim(exprMatr_cellFiltered)
exprMatr_cellFiltered_CPM <- sweep(exprMatr_cellFiltered, MARGIN = 2, STATS = colSums(exprMatr_cellFiltered), FUN = "/") * 1e6
exprMatr_cellFiltered_CPM_ensembl <- exprMatr_cellFiltered_CPM
rownames(exprMatr_cellFiltered_CPM_ensembl) <- geneID$ensembl_alt[match(rownames(exprMatr_cellFiltered_CPM_ensembl), geneID$symbol)]
# output
data.table::fwrite(x = exprMatr, file = paste0(OUT, "/UMIcount_unfiltered.txt"), row.names = T, col.names = T, quote = F, sep = "\t", nThread = 10)
system(paste0("gzip -c ", paste0(OUT, "/UMIcount_unfiltered.txt"), " > ", OUT, "/UMIcount_unfiltered.txt.gz"))
data.table::fwrite(x = exprMatr_filtered, file = paste0(OUT, "/UMIcount_filtered.txt"), row.names = T, col.names = T, quote = F, sep = "\t", nThread = 10)
system(paste0("gzip -c ", paste0(OUT, "/UMIcount_filtered.txt"), " > ", OUT, "/UMIcount_filtered.txt.gz"))
data.table::fwrite(x = exprMatr_cellFiltered, file = paste0(OUT, "/UMIcount_cellFiltered.txt"), row.names = T, col.names = T, quote = F, sep = "\t", nThread = 10)
system(paste0("gzip -c ", paste0(OUT, "/UMIcount_cellFiltered.txt"), " > ", OUT, "/UMIcount_cellFiltered.txt.gz"))
data.table::fwrite(x = exprMatr_cellFiltered_CPM, file = paste0(OUT, "/UMIcount_cellFiltered_CPM.txt"), row.names = T, col.names = T, quote = F, sep = "\t", nThread = 10)
data.table::fwrite(x = exprMatr_cellFiltered_CPM_ensembl, file = paste0(OUT, "/UMIcount_cellFiltered_CPM_ensembl.txt"), row.names = T, col.names = T, quote = F, sep = "\t", nThread = 10)
write.table(x = cellStat,file = paste0(OUT, "/filtering_cells.txt"), row.names = T, col.names = NA, quote = F, sep = "\t")
write.table(x = geneStat,file = paste0(OUT, "/filtering_genes.txt"), row.names = T, col.names = NA, quote = F, sep = "\t")
# extract normalized data from filtered data
# exprMatr_CPM <- sweep(x = exprMatr, MARGIN = 2, STATS = colSums(exprMatr), FUN = "/") * 0.1 * 1e6 # scale by 0.1M
# exprMatr_normed <- exprMatr_CPM[rownames(exprMatr_filtered), cellStat$filter]
# write.table(x = exprMatr_normed, file = paste0(OUT, "/UMIcount_normed.txt"), row.names = T, col.names = NA, quote = F, sep = "\t")
# log2 transformation
# exprMatr_normed_log2 <- log2(exprMatr_normed + 1)
# write.table(x = exprMatr_normed_log2, file = paste0(OUT, "/UMIcount_normed_log2.txt"), row.names = T, col.names = NA, quote = F, sep = "\t")
save.image(file = paste0(OUT, "/filtering.RData"))
| /scRNA-seq/pooled_data/01_stomach/do_filter.r | permissive | shunsunsun/GeACT | R | false | false | 14,132 | r | # filter cells and genes
setwd("~/lustre/06-Human_cell_atlas/pooled_data/01_stomach/")
library("reshape2")
library("ggplot2")
library("cowplot")
source("../../scripts/filter_tools.r")
samplingPos <- "."
OUT <- paste0("03-expression/merged/filtering/", samplingPos)
dir.create(OUT, showWarnings = F, recursive = T)
#load(file = paste0(OUT, "/filtering.RData"))
# load gene ID
geneID <- read.table("~/lustre/06-Human_cell_atlas/Genomes/human/gene_ID2Name_fixed.txt", header = F, sep = "\t", stringsAsFactors = F)
dim(geneID)
colnames(geneID) <- c("ensembl", "symbol")
geneID$ensembl_alt <- gsub("\\.[0-9]+", "", geneID$ensembl)
# load expression data
exprMatr <- read.table("03-expression/merged/UMIcount_allGenes.txt",header = T,row.names = 1,sep = "\t", stringsAsFactors = F, check.names = F, comment.char = "")
dim(exprMatr)
# extract data
#exprMatr <- exprMatr[, grep(paste0("^", samplingPos, "-"), colnames(exprMatr))]
# remove control/abnormal sample
black_list <- "WD_C-D11_"
exprMatr <- exprMatr[, - grep(black_list, colnames(exprMatr))]
# remove 4th inner
#inner_id <- as.numeric(gsub(".*_", "", colnames(exprMatr))) %% 8; inner_id[inner_id==0] <- 8; inner_id <- factor(inner_id)
#exprMatr <- exprMatr[, inner_id!=4]
print("Before filter:")
dim(exprMatr)
## 1. cell level ----
# 1 clean reads and A/B ratio
cleanStat_per_cell <- read.table("01-cleandata/merged/cleanFqStat.txt", header = F, sep = "\t", row.names = 5, stringsAsFactors = F, check.names = F)
dim(cleanStat_per_cell)
cleanStat_per_cell <- merge(x = colnames(exprMatr), y = cleanStat_per_cell, by.x = 1, by.y = 0, sort = F)
rownames(cleanStat_per_cell) <- cleanStat_per_cell$x
cleanStat_per_cell <- cleanStat_per_cell[, -1]
cleanStat_per_cell$ABratio <- cleanStat_per_cell$V8/(cleanStat_per_cell$V8 + cleanStat_per_cell$V9)
hist(cleanStat_per_cell$V12/1e6, breaks = 40, xlab = "Clean reads (M)", main = NA)
hist(cleanStat_per_cell$ABratio, breaks = 40, xlab = "A/B ratio", main = NA)
cellStat <- cleanStat_per_cell[, c("ABratio", "V12")]
colnames(cellStat)[2] <- "cleanReads"
# 2 clean reads and mapping ratio
mapStat_per_cell <- read.table("02-alignment/merged/mapStat.txt",header = F, sep = "\t", row.names = 2, stringsAsFactors = F, check.names = F)
dim(mapStat_per_cell)
mapStat_per_cell <- merge(x = colnames(exprMatr), y = mapStat_per_cell, by.x = 1, by.y = 0, sort = F)
rownames(mapStat_per_cell) <- mapStat_per_cell$x
mapStat_per_cell <- mapStat_per_cell[, -1]
hist(mapStat_per_cell$V7,breaks = 40, xlab = "Mapping ratio", main = NA, xlim = c(0, 1))
cellStat$mpRatio <- mapStat_per_cell$V7
# 3 detected genes
expressed_genes_per_cell <- apply(exprMatr>0, 2, sum)
hist(expressed_genes_per_cell,breaks = 40, xlab = "Detected genes", main = NA)
cellStat$nGene <- expressed_genes_per_cell
# 4 UMI
nUMI_per_cell <- colSums(exprMatr)
hist(nUMI_per_cell/1e3,breaks = 40, xlab = "Detected transcripts (k)", main = NA)
cellStat$nUMI <- nUMI_per_cell
# 5 mito ratio
#mito.genes <- grep(pattern = "^MT-", x = rownames(exprMatr), value = T)
percent.mito <- colSums(exprMatr[grep(pattern = "^MT-", x = rownames(exprMatr)),])/colSums(exprMatr)
hist(percent.mito, breaks = 40, xlab = "Mito. ratio", main = NA)
cellStat$mitoRatio <- percent.mito
#save.image(file="filtering.RData")
#load("filtering.RData")
# 6 ERCC ratio
exprStat_per_cell <- read.table("03-expression/merged/exprStat.txt", header = F, sep = "\t", row.names = 2, stringsAsFactors = F)
dim(exprStat_per_cell)
exprStat_per_cell <- merge(x = colnames(exprMatr), y = exprStat_per_cell, by.x = 1, by.y = 0, sort = F)
hist(exprStat_per_cell$V6 / exprStat_per_cell$V3, breaks = 40, xlab = "ERCC ratio", main = NA)
cellStat$ERCCratio <- exprStat_per_cell$V6 / exprStat_per_cell$V3
# 7 correlation
# cor_cells <- cor(exprMatr[rowSums(exprMatr)>0, ], method = "spearman")
# diag(cor_cells) <- NA
# avg_cor <- rowMeans(cor_cells, na.rm = T)
# hist(avg_cor, breaks = 40, xlab = "Pairwise correlation", main = NA)
cellStat$avgCor <- 1
# 8 doublets
library("mgcv")
plot(cellStat$nUMI, cellStat$nGene, xlab = "UMI number", ylab = "Gene number")
gam.reg <- gam(nUMI ~ s(nGene, bs = "cs"), data = cellStat)
gam.pre <- predict(gam.reg, newdata = list(nGene=cellStat$nGene))
gam_DF <- data.frame(cellStat, pred_value = gam.pre, obs_fc = cellStat$nUMI / gam.pre)
cellStat$doublet <- gam_DF$nUMI>quantile(gam_DF$nUMI, 0.99) & gam_DF$obs_fc>2
table(cellStat$doublet)
# merged
do_plotIndex()
plot(cellStat$nUMI, cellStat$nGene)
plot(cellStat$cleanReads/1e6, cellStat$ABratio, xlab = "Clean reads (M)", ylab = "A/B ratio"); abline(v = 0.5, col = "blue", lty = 2)
plot(cellStat$cleanReads/1e6, cellStat$mitoRatio, xlab = "Clean reads (M)", ylab = "Mito. ratio"); abline(v = 0.5, col = "blue", lty = 2)
plot(cellStat$cleanReads/1e6, cellStat$ERCCratio, xlab = "Clean reads (M)", ylab = "ERCC ratio"); abline(v = 0.5, col = "blue", lty = 2)
plot(cellStat$cleanReads/1e6, cellStat$avgCor, xlab = "Clean reads (M)", ylab = "Pairwise correlation"); abline(v = 0.5, col = "blue", lty = 2)
### setting cutoff for filtering
cutoff_DF <- data.frame(ABratio = 0.9, cleanReads = 0.4*1e6, mpRatio = 0.6,
nGene_l = 1000, nGene_h = 7500, nUMI_l = 3*1e3, nUMI_h = 92*1e3,
mitoRatio = 0.15, ERCCratio = 0.25, avgCor = 0.15)
###
filtering_out <- do_cellFiltering()
cellStat$filter <- filtering_out$res
# report
do_show_ftd_stat(cellStat)
sink(paste0(OUT, "/filtering_stat.txt"))
print(cutoff_DF)
cat("\n")
do_show_ftd_stat(cellStat)
sink()
# plot
pdf(paste0(OUT, "/filtering_cells.pdf"), width = 4, height = 4, useDingbats = F)
library("ggplot2")
p <- ggplot(filtering_out$tb, aes(x = cell, y = index, fill = as.factor(value))) + geom_tile(show.legend = F) + theme_bw() +
theme(axis.title = element_blank(), axis.text.x = element_blank(), axis.ticks.x = element_blank(), panel.border = element_rect(size = 1, color = "black")) +
scale_y_discrete(limits = rev(levels(filtering_out$tb$index)), position = "right", expand = c(0, 0))
print(p)
# stat for each bigBatch
cellStat_DF <- cellStat
cellStat_DF$bigBatch <- gsub("-.*", "", rownames(cellStat_DF))
cellStat_DF_melted <- melt(table(cellStat_DF[, c("bigBatch", "filter")]))
ggplot(cellStat_DF_melted, aes(x = bigBatch, y = value, fill = bigBatch, alpha = filter)) +
geom_bar(stat = "identity", show.legend = F) +
scale_alpha_discrete(range = c(0.4,1)) +
theme(axis.title.x = element_blank()) +
scale_y_continuous(limits = c(0, max(aggregate(cellStat_DF_melted$value, by = list(cellStat_DF_melted$bigBatch), sum)[,2])*1.05),
expand = c(0, 0)) +
ylab("Cell number") +
geom_text(aes(label = value, y = value), size = 3, position = position_stack(vjust = 0.5), show.legend = F)
# stat for each feature
ggplot(cellStat, aes(ABratio, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab("A/B ratio") + ylab("Cell number") +
geom_vline(xintercept = cutoff_DF$ABratio, linetype = "dashed")
ggplot(cellStat, aes(cleanReads/1e6, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab("Clean reads number / (Million)") + ylab("Cell number") +
geom_vline(xintercept = cutoff_DF$cleanReads/1e6, linetype = "dashed")
ggplot(cellStat, aes(mpRatio, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab("Mapping ratio") + ylab("Cell number") +
geom_vline(xintercept = cutoff_DF$mpRatio, linetype = "dashed")
ggplot(cellStat, aes(nGene, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab("Dected gene number") + ylab("Cell number") +
geom_vline(xintercept = c(cutoff_DF$nGene_l, cutoff_DF$nGene_h), linetype = "dashed")
tmp <- data.frame(cellStat, bigBatch = gsub("-.*", "", rownames(cellStat)), stringsAsFactors = F)
if(length(unique(tmp$bigBatch))>1) {
ggplot(tmp, aes(nGene, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black", show.legend = F) +
xlab("Dected gene number") + ylab("Cell number") + facet_grid(bigBatch ~ .) +
geom_vline(xintercept = c(cutoff_DF$nGene_l, cutoff_DF$nGene_h), linetype = "dashed")
}
ggplot(cellStat, aes(nUMI, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab("UMI count") + ylab("Cell number") +
geom_vline(xintercept = c(cutoff_DF$nUMI_l, cutoff_DF$nUMI_h), linetype = "dashed")
ggplot(cellStat, aes(log10(nUMI), fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab(expression(paste(log[10], " (UMI count)"))) + ylab("Cell number") +
geom_vline(xintercept = log10(c(cutoff_DF$nUMI_l, cutoff_DF$nUMI_h)), linetype = "dashed")
ggplot(cellStat, aes(mitoRatio, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab("Expression raio of mitochondrial genes") + ylab("Cell number") +
geom_vline(xintercept = cutoff_DF$mitoRatio, linetype = "dashed")
ggplot(cellStat, aes(avgCor, fill=filter)) + geom_histogram(bins = 40, alpha=.5, position="identity", color="black") +
theme_grey() + xlab("Average of pairwise Spearman correlation") + ylab("Cell number") +
geom_vline(xintercept = cutoff_DF$avgCor, linetype = "dashed")
ggplot(cellStat, aes(x = cleanReads/1e6, y = ABratio, color=filter, size=nUMI)) + geom_point(alpha=0.6) +
theme_grey() + xlab("Reads number / (Million)") + ylab("A/B ratio") +
geom_vline(xintercept = cutoff_DF$cleanReads/1e6, linetype = "dashed") +
geom_hline(yintercept = cutoff_DF$ABratio, linetype = "dashed")
ggplot(cellStat, aes(x = cleanReads/1e6, y = ERCCratio, color=filter, size=nUMI)) + geom_point(alpha=0.6) +
theme_grey() + xlab("Reads number / (Million)") + ylab("ERCC ratio") +
geom_vline(xintercept = cutoff_DF$cleanReads/1e6, linetype = "dashed") +
geom_hline(yintercept = cutoff_DF$ERCCratio, linetype = "dashed")
ggplot(cellStat, aes(x = cleanReads/1e6, y = avgCor, color=filter, size=nUMI)) + geom_point(alpha=0.6) +
theme_grey() + xlab("Reads number / (Million)") + ylab("Average Spearman correlation") +
geom_vline(xintercept = cutoff_DF$cleanReads/1e6, linetype = "dashed") +
geom_hline(yintercept = cutoff_DF$avgCor, linetype = "dashed")
ggplot(cellStat, aes(x = nUMI, y = nGene, color=filter, size=cleanReads, shape=doublet==1)) + geom_point(alpha=0.6) +
theme_grey() + xlab("UMI number") + ylab("Detected gene number") +
geom_vline(xintercept = c(cutoff_DF$nUMI_l, cutoff_DF$nUMI_h), linetype = "dashed") +
geom_hline(yintercept = c(cutoff_DF$nGene_l, cutoff_DF$nGene_h), linetype = "dashed") +
scale_shape_discrete(name="doublet") +
geom_smooth(method = 'gam', formula = y ~ s(x, bs = "cs"), color = "1", show.legend = F, alpha = 0.6)
dev.off()
## 2. gene level ----
# 1
expressed_cells_per_gene <- apply(exprMatr[, cellStat$filter]>0, 1, sum)
hist(expressed_cells_per_gene/ncol(exprMatr[, cellStat$filter]), breaks = 40, xlab = "Ratio of cells expressed", main = NA)
abline(v=0.05,lty=2,col="blue")
# 2
nCountsPerGene <- apply(exprMatr, 1, sum)
hist(nCountsPerGene, breaks = 1000000, xlim = c(0,1000))
# merge
geneStat <- data.frame(nCell=expressed_cells_per_gene, nUMI=nCountsPerGene)
geneStat$validCell <- sum(cellStat$filter)
geneStat$cellRatio <- geneStat$nCell/geneStat$validCell
geneStat$filter <- geneStat$nCell>=10
table(geneStat$filter)
## 3. filtering both of them
exprMatr_filtered <- exprMatr[geneStat$filter, cellStat$filter]
print("After filter:")
dim(exprMatr_filtered)
# filtering only cells
exprMatr_cellFiltered <- exprMatr[, cellStat$filter]
dim(exprMatr_cellFiltered)
exprMatr_cellFiltered_CPM <- sweep(exprMatr_cellFiltered, MARGIN = 2, STATS = colSums(exprMatr_cellFiltered), FUN = "/") * 1e6
exprMatr_cellFiltered_CPM_ensembl <- exprMatr_cellFiltered_CPM
rownames(exprMatr_cellFiltered_CPM_ensembl) <- geneID$ensembl_alt[match(rownames(exprMatr_cellFiltered_CPM_ensembl), geneID$symbol)]
# output
data.table::fwrite(x = exprMatr, file = paste0(OUT, "/UMIcount_unfiltered.txt"), row.names = T, col.names = T, quote = F, sep = "\t", nThread = 10)
system(paste0("gzip -c ", paste0(OUT, "/UMIcount_unfiltered.txt"), " > ", OUT, "/UMIcount_unfiltered.txt.gz"))
data.table::fwrite(x = exprMatr_filtered, file = paste0(OUT, "/UMIcount_filtered.txt"), row.names = T, col.names = T, quote = F, sep = "\t", nThread = 10)
system(paste0("gzip -c ", paste0(OUT, "/UMIcount_filtered.txt"), " > ", OUT, "/UMIcount_filtered.txt.gz"))
data.table::fwrite(x = exprMatr_cellFiltered, file = paste0(OUT, "/UMIcount_cellFiltered.txt"), row.names = T, col.names = T, quote = F, sep = "\t", nThread = 10)
system(paste0("gzip -c ", paste0(OUT, "/UMIcount_cellFiltered.txt"), " > ", OUT, "/UMIcount_cellFiltered.txt.gz"))
data.table::fwrite(x = exprMatr_cellFiltered_CPM, file = paste0(OUT, "/UMIcount_cellFiltered_CPM.txt"), row.names = T, col.names = T, quote = F, sep = "\t", nThread = 10)
data.table::fwrite(x = exprMatr_cellFiltered_CPM_ensembl, file = paste0(OUT, "/UMIcount_cellFiltered_CPM_ensembl.txt"), row.names = T, col.names = T, quote = F, sep = "\t", nThread = 10)
write.table(x = cellStat,file = paste0(OUT, "/filtering_cells.txt"), row.names = T, col.names = NA, quote = F, sep = "\t")
write.table(x = geneStat,file = paste0(OUT, "/filtering_genes.txt"), row.names = T, col.names = NA, quote = F, sep = "\t")
# extract normalized data from filtered data
# exprMatr_CPM <- sweep(x = exprMatr, MARGIN = 2, STATS = colSums(exprMatr), FUN = "/") * 0.1 * 1e6 # scale by 0.1M
# exprMatr_normed <- exprMatr_CPM[rownames(exprMatr_filtered), cellStat$filter]
# write.table(x = exprMatr_normed, file = paste0(OUT, "/UMIcount_normed.txt"), row.names = T, col.names = NA, quote = F, sep = "\t")
# log2 transformation
# exprMatr_normed_log2 <- log2(exprMatr_normed + 1)
# write.table(x = exprMatr_normed_log2, file = paste0(OUT, "/UMIcount_normed_log2.txt"), row.names = T, col.names = NA, quote = F, sep = "\t")
save.image(file = paste0(OUT, "/filtering.RData"))
|
#' Converts messy names and ID's to tidy clean ones.
#'
#' For sorting out a vector with long and complicated identifiers or row names, where the true ID of a row is hidden in a string.\cr
#' E.g: Make "dirty" ID's like "A0006_3911_BT-F1_GTCGTCTA_run20190930N" turn into "clean" ID's like 3991_BT
#' @param vector A vector of "dirty" IDs
#' @param identifier ID's need to be formated with a number and following identifier, e.g "34_individuals2019" where "_individuals2019" is the identifier. Any entries not matching this format will be removed.
#' @param identifier_left Wether the identifier is on the left hand (T) or right-hand (R) side of the number
#' @param numLength if you want leading zeroes, use this parameter to specify the length of the number, e.g "8" for 00000342
#' @param prefix if you want a prefix in the new cleaned ID. Ex: "individuals2019_" will give you "individuals2019_0034". If not specified, the old identifier will be used instead. Set to NA if you only want the number.
#' @param na_remove if you want to remove any entries that don't follow your pattern (otherwise, they'll turn to NA)
#' @export
clean_ID = function(vector,identifier="", identifier_left=F, numLength=4, prefix, na_remove=F,numeric=F) {
require(tidyverse)
require(stringr)
# SET THE REGULAR EXPRESSION
if (!identifier_left) regExpr = paste("[0-9]{1,50}",identifier,sep="")
else regExpr = paste(identifier,"[0-9]{1,50}",sep="")
# Extract the ID's from the dirty ID's
ID_dirty = vector
ID_clean = ID_dirty %>% str_extract(regExpr)
# Remove the old identifier (for now)
ID_clean = ID_clean %>% sub(identifier,"",.)
# Remove NA values
if (na_remove) ID_clean = ID_clean[!is.na(ID_clean)]
# Add leading zeroes
if (numLength!=0) ID_clean[!is.na(ID_clean)] = ID_clean[!is.na(ID_clean)] %>% as.numeric() %>% sprintf(paste("%0",numLength,"d",sep=""),.)
# Make the ID completely numeric
if (numeric) ID_clean = as.numeric(ID_clean)
# Add the new prefix
if (exists("prefix")){
if (is.na(prefix)) return(ID_clean)
else ID_clean[!is.na(ID_clean)] = paste(prefix, ID_clean[!is.na(ID_clean)], sep="")
}
else if (identifier_left) ID_clean[!is.na(ID_clean)] = paste(ID_clean[!is.na(ID_clean)], identifier, sep="")
else if (!identifier_left) ID_clean[!is.na(ID_clean)] = paste(identifier, ID_clean[!is.na(ID_clean)], sep="")
return(ID_clean)
}
#' In a dataframe, converts messy names and ID's to tidy clean ones.
#'
#' For sorting out column with long and complicated identifiers or row names, where the true ID of a row is hidden in a string.\cr
#' E.g: Make "dirty" ID's like "A0006_3911_BT-F1_GTCGTCTA_run20190930N" turn into "clean" ID's like 3991_BT
#' @param df The data frame
#' @param column The name of a column containing dirty IDs
#' @param identifier ID's need to be formated with a number and following identifier, e.g "34_individuals2019" where "_individuals2019" is the identifier. Any entries not matching this format will be removed.
#' @param identifier_left Wether the identifier is on the left hand (T) or right-hand (R) side of the number
#' @param numLength if you want leading zeroes, use this parameter to specify the length of the number, e.g "8" for 00000342
#' @param prefix if you want a prefix in the new cleaned ID. Ex: "individuals2019_" will give you "individuals2019_0034"
#' @param na_remove if you want to remove any rows that don't follow your pattern (otherwise, they'll turn to NA). Default is True.
#' @export
clean_ID_df = function(df, column_name="ID", identifier="", identifier_left=F, numLength=F, prefix="", na_remove=T, keep_name=F, numeric=F){
require(tidyverse)
require(stringr)
# Ectract the dirty ID's
ID_dirty = unlist(df[column_name])
# Clean the ID
ID_clean = clean_ID(ID_dirty, identifier, identifier_left, numLength, prefix,numeric=numeric)
# Insert the cleaned ID's into the column
df[column_name] = ID_clean
# Remove NA values
if (na_remove) df = df %>% remoNA(column_name)
# Rename the old ID column
# Check what name to use
if (keep_name == F) column_name_new = "ID"
else if (keep_name == T) column_name_new = column_name
else column_name_new = keep_name
# Rename the column to "ID"
df = df %>% rename(!! column_name_new := !! column_name)
return(df)
}
#' Converting sdy to F or M
#'
#' Used on dataframes, for determining sex based on SDY in a given column
#' @export
#'
determineSex = function(dataframe, column, cutoff) {
dataframe = dataframe %>% group_by(ID, SEQRUN) %>% mutate(
sex = SDY_to_sex(dataframe %>% select(matches(column)) %>% filter(dataframe$ID==ID) , cutoff)
)
# %>% select(-c(column))
return(dataframe )
}
#' Set sex to NA if many SNPs missing.
#'
#' In a dataframe, sets sex to "NA" when a certain amount of SNP's are missing as NA
#' @export
#'
unSexBad = function(dataframe, column, sensitivity=0.35) {
sex = unlist(dataframe[column])
colNum = length(names(dataframe))
na_prop <- apply(dataframe, 1, function(x) sum(is.na(x))/length(x))
sex[na_prop > sensitivity] = "?"
dataframe$sex = sex
return(dataframe)
}
#' Rename genotypes based on a lookup table
#'
#' In a dataframe, rename genotype columns
#' @export
renameGenotypes = function(dataframe, LUT, not_genotypes=c()) {
for (i in names(dataframe %>% select(-c(not_genotypes)))) {
dataframe <- dataframe %>% renameGenotype(i, LUT)
}
dataframe
}
#' determinesex2
#' @keywords internal
determineSex2 = function(dataframe, column, cutoff) {
dataframe = dataframe %>% group_by(ID) %>% mutate(
sex = SDY_to_sex(dataframe %>% select(matches(column)) %>% filter(dataframe$ID==ID) , cutoff)
)
# %>% select(-c(column))
return(dataframe )
}
#' SDY_to_sex
#' @keywords internal
SDY_to_sex = function(vector, cutoff) {
sdy = mean(unlist(vector[1]), na.rm=T)
if (is.na(sdy)) return(NA)
else if (sdy <= cutoff) return("F")
else return("M")
}
#' safeMerge
#' @keywords internal
safeMerge = function(vector){
# Get the datatype of the vector
type = typeof(vector)
#1 remove NA values
vector = vector[!is.na(vector)]
#check if the remaning entries are equal
#if they are, return one of them
#if they're not, return NA
if (length(unique(vector)) == 1) return(unique(vector))
else return(convertType(NA,type))
}
#' renameGenotype
#' @keywords internal
renameGenotype = function(dataframe, column, LUT=c("1"="1 1","2"="1 2","3"="2 2")){
genotype = dataframe[column] %>% unlist()
col = LUT[genotype]
col[is.na(col)] = "* *"
dataframe[column] = col
return(dataframe)
}
snps_clean_freqNA = function(df) {
message(glue("dataframe starting at {ncol(df)} columns."))
nas = df %>% colnames() %>% sapply(function(x){
sum(is.na(df[x]))
})
message("See historgram...")
hist(nas, breaks=ncol(df))
filter_at = numextract(readline(prompt="Enter cutoff value: "))
df = df %>% select_if(~sum(is.na(.)) < filter_at)
message(paste("Columns cut off at",filter_at,"NA-s.",sep=" "))
message(glue("Dataframe is now at {ncol(df)} columns."))
message(glue("Removing mono-something columns"))
df = Filter(function(x){ length(unique(x))!=1 }, df)
message(glue("Dataframe is now at {ncol(df)} columns."))
message("Done")
return(df)
}
| /R/script - base genotools.R | no_license | Eiriksen/fishytools | R | false | false | 7,295 | r |
#' Converts messy names and ID's to tidy clean ones.
#'
#' For sorting out a vector with long and complicated identifiers or row names, where the true ID of a row is hidden in a string.\cr
#' E.g: Make "dirty" ID's like "A0006_3911_BT-F1_GTCGTCTA_run20190930N" turn into "clean" ID's like 3991_BT
#' @param vector A vector of "dirty" IDs
#' @param identifier ID's need to be formated with a number and following identifier, e.g "34_individuals2019" where "_individuals2019" is the identifier. Any entries not matching this format will be removed.
#' @param identifier_left Wether the identifier is on the left hand (T) or right-hand (R) side of the number
#' @param numLength if you want leading zeroes, use this parameter to specify the length of the number, e.g "8" for 00000342
#' @param prefix if you want a prefix in the new cleaned ID. Ex: "individuals2019_" will give you "individuals2019_0034". If not specified, the old identifier will be used instead. Set to NA if you only want the number.
#' @param na_remove if you want to remove any entries that don't follow your pattern (otherwise, they'll turn to NA)
#' @export
clean_ID = function(vector,identifier="", identifier_left=F, numLength=4, prefix, na_remove=F,numeric=F) {
require(tidyverse)
require(stringr)
# SET THE REGULAR EXPRESSION
if (!identifier_left) regExpr = paste("[0-9]{1,50}",identifier,sep="")
else regExpr = paste(identifier,"[0-9]{1,50}",sep="")
# Extract the ID's from the dirty ID's
ID_dirty = vector
ID_clean = ID_dirty %>% str_extract(regExpr)
# Remove the old identifier (for now)
ID_clean = ID_clean %>% sub(identifier,"",.)
# Remove NA values
if (na_remove) ID_clean = ID_clean[!is.na(ID_clean)]
# Add leading zeroes
if (numLength!=0) ID_clean[!is.na(ID_clean)] = ID_clean[!is.na(ID_clean)] %>% as.numeric() %>% sprintf(paste("%0",numLength,"d",sep=""),.)
# Make the ID completely numeric
if (numeric) ID_clean = as.numeric(ID_clean)
# Add the new prefix
if (exists("prefix")){
if (is.na(prefix)) return(ID_clean)
else ID_clean[!is.na(ID_clean)] = paste(prefix, ID_clean[!is.na(ID_clean)], sep="")
}
else if (identifier_left) ID_clean[!is.na(ID_clean)] = paste(ID_clean[!is.na(ID_clean)], identifier, sep="")
else if (!identifier_left) ID_clean[!is.na(ID_clean)] = paste(identifier, ID_clean[!is.na(ID_clean)], sep="")
return(ID_clean)
}
#' In a dataframe, converts messy names and ID's to tidy clean ones.
#'
#' For sorting out column with long and complicated identifiers or row names, where the true ID of a row is hidden in a string.\cr
#' E.g: Make "dirty" ID's like "A0006_3911_BT-F1_GTCGTCTA_run20190930N" turn into "clean" ID's like 3991_BT
#' @param df The data frame
#' @param column The name of a column containing dirty IDs
#' @param identifier ID's need to be formated with a number and following identifier, e.g "34_individuals2019" where "_individuals2019" is the identifier. Any entries not matching this format will be removed.
#' @param identifier_left Wether the identifier is on the left hand (T) or right-hand (R) side of the number
#' @param numLength if you want leading zeroes, use this parameter to specify the length of the number, e.g "8" for 00000342
#' @param prefix if you want a prefix in the new cleaned ID. Ex: "individuals2019_" will give you "individuals2019_0034"
#' @param na_remove if you want to remove any rows that don't follow your pattern (otherwise, they'll turn to NA). Default is True.
#' @export
clean_ID_df = function(df, column_name="ID", identifier="", identifier_left=F, numLength=F, prefix="", na_remove=T, keep_name=F, numeric=F){
require(tidyverse)
require(stringr)
# Ectract the dirty ID's
ID_dirty = unlist(df[column_name])
# Clean the ID
ID_clean = clean_ID(ID_dirty, identifier, identifier_left, numLength, prefix,numeric=numeric)
# Insert the cleaned ID's into the column
df[column_name] = ID_clean
# Remove NA values
if (na_remove) df = df %>% remoNA(column_name)
# Rename the old ID column
# Check what name to use
if (keep_name == F) column_name_new = "ID"
else if (keep_name == T) column_name_new = column_name
else column_name_new = keep_name
# Rename the column to "ID"
df = df %>% rename(!! column_name_new := !! column_name)
return(df)
}
#' Converting sdy to F or M
#'
#' Used on dataframes, for determining sex based on SDY in a given column
#' @export
#'
determineSex = function(dataframe, column, cutoff) {
dataframe = dataframe %>% group_by(ID, SEQRUN) %>% mutate(
sex = SDY_to_sex(dataframe %>% select(matches(column)) %>% filter(dataframe$ID==ID) , cutoff)
)
# %>% select(-c(column))
return(dataframe )
}
#' Set sex to NA if many SNPs missing.
#'
#' In a dataframe, sets sex to "NA" when a certain amount of SNP's are missing as NA
#' @export
#'
unSexBad = function(dataframe, column, sensitivity=0.35) {
sex = unlist(dataframe[column])
colNum = length(names(dataframe))
na_prop <- apply(dataframe, 1, function(x) sum(is.na(x))/length(x))
sex[na_prop > sensitivity] = "?"
dataframe$sex = sex
return(dataframe)
}
#' Rename genotypes based on a lookup table
#'
#' In a dataframe, rename genotype columns
#' @export
renameGenotypes = function(dataframe, LUT, not_genotypes=c()) {
for (i in names(dataframe %>% select(-c(not_genotypes)))) {
dataframe <- dataframe %>% renameGenotype(i, LUT)
}
dataframe
}
#' determinesex2
#' @keywords internal
determineSex2 = function(dataframe, column, cutoff) {
dataframe = dataframe %>% group_by(ID) %>% mutate(
sex = SDY_to_sex(dataframe %>% select(matches(column)) %>% filter(dataframe$ID==ID) , cutoff)
)
# %>% select(-c(column))
return(dataframe )
}
#' SDY_to_sex
#' @keywords internal
SDY_to_sex = function(vector, cutoff) {
sdy = mean(unlist(vector[1]), na.rm=T)
if (is.na(sdy)) return(NA)
else if (sdy <= cutoff) return("F")
else return("M")
}
#' safeMerge
#' @keywords internal
safeMerge = function(vector){
# Get the datatype of the vector
type = typeof(vector)
#1 remove NA values
vector = vector[!is.na(vector)]
#check if the remaning entries are equal
#if they are, return one of them
#if they're not, return NA
if (length(unique(vector)) == 1) return(unique(vector))
else return(convertType(NA,type))
}
#' renameGenotype
#' @keywords internal
renameGenotype = function(dataframe, column, LUT=c("1"="1 1","2"="1 2","3"="2 2")){
genotype = dataframe[column] %>% unlist()
col = LUT[genotype]
col[is.na(col)] = "* *"
dataframe[column] = col
return(dataframe)
}
snps_clean_freqNA = function(df) {
message(glue("dataframe starting at {ncol(df)} columns."))
nas = df %>% colnames() %>% sapply(function(x){
sum(is.na(df[x]))
})
message("See historgram...")
hist(nas, breaks=ncol(df))
filter_at = numextract(readline(prompt="Enter cutoff value: "))
df = df %>% select_if(~sum(is.na(.)) < filter_at)
message(paste("Columns cut off at",filter_at,"NA-s.",sep=" "))
message(glue("Dataframe is now at {ncol(df)} columns."))
message(glue("Removing mono-something columns"))
df = Filter(function(x){ length(unique(x))!=1 }, df)
message(glue("Dataframe is now at {ncol(df)} columns."))
message("Done")
return(df)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ThetaMultiplicative.R
\name{ThetaMultiplicative}
\alias{ThetaMultiplicative}
\title{Multiplicative intensity function \eqn{\theta(t)}}
\usage{
ThetaMultiplicative(t, f, w0, eta, gamma, terminal.points, ct)
}
\arguments{
\item{t}{a numeric vector of time points at which to evaluate the function.}
\item{f}{a numeric vector containing frequency values.}
\item{w0}{a numeric vector containing initial phase values.}
\item{eta}{a numeric vector containing \eqn{\eta} values (contribution of each periodic component to the intensity function).}
\item{gamma}{a numeric vector containing \eqn{\gamma} values (amplitude of each periodic component in the function).}
\item{terminal.points}{a numeric vector containing the endpoints of the dyadic partitioning.}
\item{ct}{a numeric vector containing the estimated piecewise constant intensity function \eqn{c(t)}. The length of ct should be a whole number power of 2.}
}
\value{
A numeric vector containing the values of the multiplicative intensity function calculated at given time points.
}
\description{
Calculates the multiplicative intensity function \eqn{\theta(t)} introduced in Ramezan \emph{et al.} (2014).
}
\references{
Ramezan, R., Marriott, P., and Chenouri, S. (2014), \emph{Statistics in Medicine}, \strong{33}(2), 238-256. doi: 10.1002/sim.5923.
}
| /man/ThetaMultiplicative.Rd | no_license | dpwynne/mmnst | R | false | true | 1,390 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ThetaMultiplicative.R
\name{ThetaMultiplicative}
\alias{ThetaMultiplicative}
\title{Multiplicative intensity function \eqn{\theta(t)}}
\usage{
ThetaMultiplicative(t, f, w0, eta, gamma, terminal.points, ct)
}
\arguments{
\item{t}{a numeric vector of time points at which to evaluate the function.}
\item{f}{a numeric vector containing frequency values.}
\item{w0}{a numeric vector containing initial phase values.}
\item{eta}{a numeric vector containing \eqn{\eta} values (contribution of each periodic component to the intensity function).}
\item{gamma}{a numeric vector containing \eqn{\gamma} values (amplitude of each periodic component in the function).}
\item{terminal.points}{a numeric vector containing the endpoints of the dyadic partitioning.}
\item{ct}{a numeric vector containing the estimated piecewise constant intensity function \eqn{c(t)}. The length of ct should be a whole number power of 2.}
}
\value{
A numeric vector containing the values of the multiplicative intensity function calculated at given time points.
}
\description{
Calculates the multiplicative intensity function \eqn{\theta(t)} introduced in Ramezan \emph{et al.} (2014).
}
\references{
Ramezan, R., Marriott, P., and Chenouri, S. (2014), \emph{Statistics in Medicine}, \strong{33}(2), 238-256. doi: 10.1002/sim.5923.
}
|
library(tidyverse)
library(fs)
source("script/process_phys.R")
header = 13L
footer = 3L
ecg <-
process_phys(dir = "all_data/",
pattern = "_ECG.txt",
sampling_rate = 1024,
header = 13,
footer = 3)
eda <-
process_phys(dir = "all_data/",
pattern = "_SCR.txt",
sampling_rate = 32,
header = 13,
footer = 3)
length_33 <- read_lines("all_data/33_B_emot_SCR.txt") %>% length()
eda_33 <- read_tsv("all_data/33_B_emot_SCR.txt",
skip = header,
n_max = length_33 - header - footer,
col_types = "id__",
col_names = c("sample","value"))
quiet_read_tsv <- quietly(read_tsv)
dir = "all_data/"
pattern = "_SCR.txt"
sampling_rate = 1024L
header = 13L
footer = 3L
df <-
tibble( file = dir_ls(dir, regexp = pattern),
# Have to read the lines first to know to skip the last 3 lines
file_length = map_int(file, ~read_lines(.x) %>% length()),
name = str_replace(file, str_glue(".*/(\\d+.*){pattern}$"),"\\1")) %>%
# Read all files, skip header and footer,
mutate(data = map2(file, file_length, ~quiet_read_tsv( .x,
skip = header,
n_max = .y - header - footer,
col_types = "id__",
col_names = c("sample","value")))) %>%
separate(name, into = c("id","session"), extra = "merge") # %>%
unnest() %>%
select(id, session, file, time, value) %>%
mutate(time = sample/sampling_rate)))
safe_read_tsv("all_data/33_B_emot_SCR.txt") %>% str()
df %>%
mutate(warn = map(data, "warnings")) %>%
print(n = 100)
df %>%
slice(45) %>%
pull(data)
| /script/ecg_process.R | no_license | nthun/nightmare_and_ans | R | false | false | 1,951 | r | library(tidyverse)
library(fs)
source("script/process_phys.R")
header = 13L
footer = 3L
ecg <-
process_phys(dir = "all_data/",
pattern = "_ECG.txt",
sampling_rate = 1024,
header = 13,
footer = 3)
eda <-
process_phys(dir = "all_data/",
pattern = "_SCR.txt",
sampling_rate = 32,
header = 13,
footer = 3)
length_33 <- read_lines("all_data/33_B_emot_SCR.txt") %>% length()
eda_33 <- read_tsv("all_data/33_B_emot_SCR.txt",
skip = header,
n_max = length_33 - header - footer,
col_types = "id__",
col_names = c("sample","value"))
quiet_read_tsv <- quietly(read_tsv)
dir = "all_data/"
pattern = "_SCR.txt"
sampling_rate = 1024L
header = 13L
footer = 3L
df <-
tibble( file = dir_ls(dir, regexp = pattern),
# Have to read the lines first to know to skip the last 3 lines
file_length = map_int(file, ~read_lines(.x) %>% length()),
name = str_replace(file, str_glue(".*/(\\d+.*){pattern}$"),"\\1")) %>%
# Read all files, skip header and footer,
mutate(data = map2(file, file_length, ~quiet_read_tsv( .x,
skip = header,
n_max = .y - header - footer,
col_types = "id__",
col_names = c("sample","value")))) %>%
separate(name, into = c("id","session"), extra = "merge") # %>%
unnest() %>%
select(id, session, file, time, value) %>%
mutate(time = sample/sampling_rate)))
safe_read_tsv("all_data/33_B_emot_SCR.txt") %>% str()
df %>%
mutate(warn = map(data, "warnings")) %>%
print(n = 100)
df %>%
slice(45) %>%
pull(data)
|
library(imputeTS)
### Name: plotNA.distribution
### Title: Visualize Distribution of Missing Values
### Aliases: plotNA.distribution
### ** Examples
#Example 1: Visualize the missing values in x
x <- ts(c(1:11, 4:9,NA,NA,NA,11:15,7:15,15:6,NA,NA,2:5,3:7))
plotNA.distribution(x)
#Example 2: Visualize the missing values in tsAirgap time series
plotNA.distribution(tsAirgap)
#Example 3: Same as example 1, just written with pipe operator
x <- ts(c(1:11, 4:9,NA,NA,NA,11:15,7:15,15:6,NA,NA,2:5,3:7))
x %>% plotNA.distribution
| /data/genthat_extracted_code/imputeTS/examples/plotNA.distribution.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 534 | r | library(imputeTS)
### Name: plotNA.distribution
### Title: Visualize Distribution of Missing Values
### Aliases: plotNA.distribution
### ** Examples
#Example 1: Visualize the missing values in x
x <- ts(c(1:11, 4:9,NA,NA,NA,11:15,7:15,15:6,NA,NA,2:5,3:7))
plotNA.distribution(x)
#Example 2: Visualize the missing values in tsAirgap time series
plotNA.distribution(tsAirgap)
#Example 3: Same as example 1, just written with pipe operator
x <- ts(c(1:11, 4:9,NA,NA,NA,11:15,7:15,15:6,NA,NA,2:5,3:7))
x %>% plotNA.distribution
|
testlist <- list(A = structure(c(2.31639392448701e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613102440-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 343 | r | testlist <- list(A = structure(c(2.31639392448701e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
#' Generate bar plots from warnExtract data frame
#'
#' @description generates bar plots from data frames that were produced by \link[jobsec]{warnExtract}.
#' @param data a data frame of WARn data from \link[jobsec]{warnExtract}.
#' @param by a string specifying bar fill categories.
#' @importFrom ggplot2 ggplot geom_bar xlab ylab theme labs aes element_text
#' @importFrom stringr str_to_title
#' @importFrom magrittr %>%
#' @examples
#' #extract warn data
#' df<- warnExtract(start_date = "2018-01-01", end_date = "2019-01-01")
#' #bar plots
#' warnBar(df, by = "reason")
#' @export warnBar
warnBar <- function(data,
by = c("rollup", "locality", "reason")){
#check user imputs
by <- base::match.arg(by)
#get first column date name
date_name <- names(data[1])
#convert data to character for plotting predictions
if("type" %in% colnames(data)){
data$date <- as.character(data$date)
}
#plot by rollup
if("type" %in% colnames(data)){
if(by == "rollup")
#plot
bar_plot <- ggplot2::ggplot(data, ggplot2::aes(x=get(date_name), y=n_employees, fill = type)) +
ggplot2::geom_bar(stat = "identity")+
ggplot2::xlab("")+
ggplot2::ylab("Employees")
}else{
if(by == "rollup")
#plot
bar_plot <- ggplot2::ggplot(data, ggplot2::aes(x=get(date_name), y=n_employees)) +
ggplot2::geom_bar(stat = "identity", fill = "steelblue")+
ggplot2::xlab("")+
ggplot2::ylab("Employees")
}
#plot by layoff reason.
if(by == "reason"){
#check for layoff reason in data
if("layoff_reason" %in% colnames(data)){
#plot
bar_plot <- ggplot2::ggplot(data, ggplot2::aes(x=get(date_name), y=n_employees, fill = layoff_reason)) +
ggplot2::geom_bar(stat = "identity", position="stack")+
ggplot2::ylab("Employees")
}
else{
stop("'layoff_reason' not found in data set. Use another warnBar method.")
}
}
#plot by locality
if(by == "locality"){
#check for number of counties and warn if to many.
if(length(unique(data$county)) > 5){
warning("Recommended selecting <= 5 counties for graph legibility.")
}
#plot
bar_plot <- ggplot2::ggplot(data, ggplot2::aes(x=get(date_name), y=n_employees ,fill = county )) +
ggplot2::geom_bar(stat = "identity" , position="dodge") +
ggplot2::ylab("Layoffs")
}
#Add additional details and titles to plots
bar_plot <- bar_plot+
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45, hjust = 1))+
ggplot2::labs(x = "", fill = "Region",
caption = base::paste("Source: CA WARN Data"),
title = base::paste ("CA WARN Events by County", min(data[[1]]), "-", max(data[[1]]) ))
return(bar_plot)
}
| /R/warnBar.R | no_license | jacob-light/jobsec | R | false | false | 2,780 | r | #' Generate bar plots from warnExtract data frame
#'
#' @description generates bar plots from data frames that were produced by \link[jobsec]{warnExtract}.
#' @param data a data frame of WARn data from \link[jobsec]{warnExtract}.
#' @param by a string specifying bar fill categories.
#' @importFrom ggplot2 ggplot geom_bar xlab ylab theme labs aes element_text
#' @importFrom stringr str_to_title
#' @importFrom magrittr %>%
#' @examples
#' #extract warn data
#' df<- warnExtract(start_date = "2018-01-01", end_date = "2019-01-01")
#' #bar plots
#' warnBar(df, by = "reason")
#' @export warnBar
warnBar <- function(data,
by = c("rollup", "locality", "reason")){
#check user imputs
by <- base::match.arg(by)
#get first column date name
date_name <- names(data[1])
#convert data to character for plotting predictions
if("type" %in% colnames(data)){
data$date <- as.character(data$date)
}
#plot by rollup
if("type" %in% colnames(data)){
if(by == "rollup")
#plot
bar_plot <- ggplot2::ggplot(data, ggplot2::aes(x=get(date_name), y=n_employees, fill = type)) +
ggplot2::geom_bar(stat = "identity")+
ggplot2::xlab("")+
ggplot2::ylab("Employees")
}else{
if(by == "rollup")
#plot
bar_plot <- ggplot2::ggplot(data, ggplot2::aes(x=get(date_name), y=n_employees)) +
ggplot2::geom_bar(stat = "identity", fill = "steelblue")+
ggplot2::xlab("")+
ggplot2::ylab("Employees")
}
#plot by layoff reason.
if(by == "reason"){
#check for layoff reason in data
if("layoff_reason" %in% colnames(data)){
#plot
bar_plot <- ggplot2::ggplot(data, ggplot2::aes(x=get(date_name), y=n_employees, fill = layoff_reason)) +
ggplot2::geom_bar(stat = "identity", position="stack")+
ggplot2::ylab("Employees")
}
else{
stop("'layoff_reason' not found in data set. Use another warnBar method.")
}
}
#plot by locality
if(by == "locality"){
#check for number of counties and warn if to many.
if(length(unique(data$county)) > 5){
warning("Recommended selecting <= 5 counties for graph legibility.")
}
#plot
bar_plot <- ggplot2::ggplot(data, ggplot2::aes(x=get(date_name), y=n_employees ,fill = county )) +
ggplot2::geom_bar(stat = "identity" , position="dodge") +
ggplot2::ylab("Layoffs")
}
#Add additional details and titles to plots
bar_plot <- bar_plot+
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45, hjust = 1))+
ggplot2::labs(x = "", fill = "Region",
caption = base::paste("Source: CA WARN Data"),
title = base::paste ("CA WARN Events by County", min(data[[1]]), "-", max(data[[1]]) ))
return(bar_plot)
}
|
library(TreePar)
library(TreeSim)
library(NELSI)
library(doParallel)
library(foreach)
# Simulate 10 trees with one rate shift
set.seed(1234)
nspecies <- 100
time <- c(0, 0.5)
rho <- c(1, 0.5)
lambda <- c(1.5, 4)
mu <- c(1.5, 0)
rateshift_trees <- sim.rateshift.taxa(nspecies, 10, lambda = lambda,
mu = mu, frac = rho, times = time, complete = F)
# Check that the trees have a root node age of more than 0.5 (otherwise there is no rate shift)
plot(rateshift_trees[[1]], show.tip.label = F)
nodelabels(round(intnode.times(rateshift_trees[[1]]), 2))
print( sapply(rateshift_trees, function(x) max(intnode.times(x))) )
# Check that rate shifts can be estimated for this trees
# Check that the rateshifts can be estimated
tr <- rateshift_trees[[1]]
x_times <- sort(intnode.times(tr), decreasing = T)
start <- min(x_times)
end <- max(x_times)
grid <- diff(range(x_times))/20
res <- bd.shifts.optim(x_times, sampling = c(1, 0.5), grid, start, end, posdiv = T)
res[[2]]
fit_rate_shifts <- function(tree, rho){ # Rho at present and in the past. This parameter needs to be fixed
x_times <- sort(intnode.times(tree), decreasing = T)
start <- min(x_times)
end <- max(x_times)
grid <- diff(range(x_times))/20
res <- bd.shifts.optim(x_times, rho, grid, start, end, posdiv = T)[[2]]
# Find likelihoods, lambda, mu, and rate-shift times
likelihoods <- sapply(res, function(x) x[1])
lambda0 <- res[[1]][3] / (1 - res[[1]][2]) # These are the lambda and mu estimates from turover and net
mu0 <- lambda0 * res[[1]][2] # speciation for 0 rate shifts. Please check.
# The following are also computed, but note that some of them are negative and that this might.
# I couldn't simulate trees using these parameters, maybe because of the negative values?
lambda11 <- res[[2]][3] / (1 - res[[2]][2])
mu11 <- lambda11 * res[[2]][2]
lambda12 <- res[[2]][5] / (1 - res[[2]][4])
mu12 <- lambda12 * res[[2]][4]
time1 <- res[[2]][length(res[[2]])]
return(list(likelihoods, shifts0= c(lambda0, mu0), shifts1=c(lambda11, lambda12, mu11, mu12, time1)))
}
pvals <- vector()
likelihoods_distros <- list()
likelihoods_empirical <- vector()
empirical_tree_param_estimates <- list()
cl <- makeCluster(8)
registerDoParallel(cl)
for(tr in 1:length(rateshift_trees)){
print(paste('STARTED', tr, 'OF', length(rateshift_trees)))
reference_estimates <- fit_rate_shifts(rateshift_trees[[tr]], rho)
sim_trees0 <- sim.bd.taxa(n = nspecies, numbsim = 100, lambda = reference_estimates$shifts0[1],
mu = reference_estimates$shifts0[2], frac = 0.5, complete = F)
liks_sim_trees0 <- foreach(mt = sim_trees0, .packages = c('NELSI', 'TreePar')) %dopar% fit_rate_shifts(mt, rho)[[1]][1]
likelihoods_distros[[tr]] <- liks_sim_trees0
likelihoods_empirical[tr] <- reference_estimates[[1]][1]
empirical_tree_param_estimates[[tr]] <- reference_estimates$shifts0
pvals[tr] <- sum(reference_estimates[[1]][1] > liks_sim_trees0)
print(paste('COMPLETED', tr, 'OF', length(rateshift_trees)))
}
stopCluster(cl)
print("the p values are")
print(pvals)
pdf('histograms_100_tips.pdf')
par(mfrow = c(3, 3))
for(i in 1:9){
hist(as.numeric(likelihoods_distros[[i]]), main = '', ylab = '', xlab = '', col = rgb(0, 0, 0.5, 0.3))
lines(x = c(likelihoods_empirical[i], likelihoods_empirical[i]), y = c(0, 20), col = 'red', lwd = 2)
}
dev.off()
| /one_shift_adequacy_100_tips.R | no_license | sebastianduchene/phylodynamics_adequacy | R | false | false | 3,477 | r | library(TreePar)
library(TreeSim)
library(NELSI)
library(doParallel)
library(foreach)
# Simulate 10 trees with one rate shift
set.seed(1234)
nspecies <- 100
time <- c(0, 0.5)
rho <- c(1, 0.5)
lambda <- c(1.5, 4)
mu <- c(1.5, 0)
rateshift_trees <- sim.rateshift.taxa(nspecies, 10, lambda = lambda,
mu = mu, frac = rho, times = time, complete = F)
# Check that the trees have a root node age of more than 0.5 (otherwise there is no rate shift)
plot(rateshift_trees[[1]], show.tip.label = F)
nodelabels(round(intnode.times(rateshift_trees[[1]]), 2))
print( sapply(rateshift_trees, function(x) max(intnode.times(x))) )
# Check that rate shifts can be estimated for this trees
# Check that the rateshifts can be estimated
tr <- rateshift_trees[[1]]
x_times <- sort(intnode.times(tr), decreasing = T)
start <- min(x_times)
end <- max(x_times)
grid <- diff(range(x_times))/20
res <- bd.shifts.optim(x_times, sampling = c(1, 0.5), grid, start, end, posdiv = T)
res[[2]]
fit_rate_shifts <- function(tree, rho){ # Rho at present and in the past. This parameter needs to be fixed
x_times <- sort(intnode.times(tree), decreasing = T)
start <- min(x_times)
end <- max(x_times)
grid <- diff(range(x_times))/20
res <- bd.shifts.optim(x_times, rho, grid, start, end, posdiv = T)[[2]]
# Find likelihoods, lambda, mu, and rate-shift times
likelihoods <- sapply(res, function(x) x[1])
lambda0 <- res[[1]][3] / (1 - res[[1]][2]) # These are the lambda and mu estimates from turover and net
mu0 <- lambda0 * res[[1]][2] # speciation for 0 rate shifts. Please check.
# The following are also computed, but note that some of them are negative and that this might.
# I couldn't simulate trees using these parameters, maybe because of the negative values?
lambda11 <- res[[2]][3] / (1 - res[[2]][2])
mu11 <- lambda11 * res[[2]][2]
lambda12 <- res[[2]][5] / (1 - res[[2]][4])
mu12 <- lambda12 * res[[2]][4]
time1 <- res[[2]][length(res[[2]])]
return(list(likelihoods, shifts0= c(lambda0, mu0), shifts1=c(lambda11, lambda12, mu11, mu12, time1)))
}
pvals <- vector()
likelihoods_distros <- list()
likelihoods_empirical <- vector()
empirical_tree_param_estimates <- list()
cl <- makeCluster(8)
registerDoParallel(cl)
for(tr in 1:length(rateshift_trees)){
print(paste('STARTED', tr, 'OF', length(rateshift_trees)))
reference_estimates <- fit_rate_shifts(rateshift_trees[[tr]], rho)
sim_trees0 <- sim.bd.taxa(n = nspecies, numbsim = 100, lambda = reference_estimates$shifts0[1],
mu = reference_estimates$shifts0[2], frac = 0.5, complete = F)
liks_sim_trees0 <- foreach(mt = sim_trees0, .packages = c('NELSI', 'TreePar')) %dopar% fit_rate_shifts(mt, rho)[[1]][1]
likelihoods_distros[[tr]] <- liks_sim_trees0
likelihoods_empirical[tr] <- reference_estimates[[1]][1]
empirical_tree_param_estimates[[tr]] <- reference_estimates$shifts0
pvals[tr] <- sum(reference_estimates[[1]][1] > liks_sim_trees0)
print(paste('COMPLETED', tr, 'OF', length(rateshift_trees)))
}
stopCluster(cl)
print("the p values are")
print(pvals)
pdf('histograms_100_tips.pdf')
par(mfrow = c(3, 3))
for(i in 1:9){
hist(as.numeric(likelihoods_distros[[i]]), main = '', ylab = '', xlab = '', col = rgb(0, 0, 0.5, 0.3))
lines(x = c(likelihoods_empirical[i], likelihoods_empirical[i]), y = c(0, 20), col = 'red', lwd = 2)
}
dev.off()
|
# ELECTIVO: CIENCIA ABIERTA Y SOFTWARE LIBRE.
# MAGÍSTER EN CCSS UNIVERSIDAD DE CHILE - 2020
# Ver asunto de codificación de idioma (estandarizar a UTF-8)
# Mandar paquetes a instalar previamente
# Clase y diapos: contenidos; documentación tidyverse; PSPP; CEP y taller datos propios
# ---- 0. PAQUETES A INSTALAR ---
install.packages(c("readxl", "tidyverse", "haven", "car"))
# ---- 1. IMPORTACIÓN DE DATOS A R, DESDE DIFERENTES FORMATOS ----
# Para ganar en reproductibilidad, es mejor trabajar con "proyectos de R".
# Es una ubicación mandatada de forma automática por un archivo .Rproj
# Aunque es poco recomendable, podemos defnir manualmente la carpeta de trabajo
#Botones: Session -> Set Working Directory -> Choose directory -> Elegir carpeta
#Abreviación de botones: Ctrl + Shift + H
#Escogemos la carpeta donde tengamos nuestros archivos (sintaxis, base de datos a usar)
setwd("~/Dropbox/Felipe/Docencia/Universidad de Chile/Magister CCSS UCH/C. Electivo Metodología/clases/clase 6")
#VEREMOS COMO IMPORTAR BASES DE DATOS DESDE 4 TIPOS DE DATOS: CSV, EXCEL, SPSS, STATA
#Ver archivo paraguay.xlsx, entender estructura de libro de datos.
#Configurar hoja correspondiente a base de datos de encuesta en archivo "paraguay" en Excel a archivo CSV
#Esto lo hacemos desde explorador de archivos en "guardar como"
#Observar estructura interna (abrir CSV con bloc de notas)
#Notación latinoamericana (, decimales - ; separador)
#Notación EEUU/EUROPA (. decimales - , separador)
# A. Abrir archivo paraguay desde CSV (ojo con diferencias Linux/Windows)
#NOTACIÓN EEUU: decimales como punto y no coma
paraguay_csv <- read.csv("datos/paraguay.csv")
View(paraguay_csv) # Base no se lee correctamente, no se separan bien columnas
#¿Qué pasa si usamos una función adecuada a notación latina?
#lee comas como decimales y punto y comas como separador de variables
paraguay_csv2 <- read.csv2("datos/paraguay.csv")
View(paraguay_csv2)
# podemos eliminar manualmente primera fila para leer correctamente nombre variable
# B. Abrir archivo paraguay desde excel.
library(readxl)
paraguay_excel <- read_excel("datos/paraguay.xlsx")
head(paraguay_excel)
#¿Cuál es el problema?
#Uso de argumentos en función read_excel
paraguay_excel <- read_excel("datos/paraguay.xlsx", sheet = 2) # posición de la hoja
paraguay_excel <- read_excel("datos/paraguay.xlsx", sheet = "respuestas") # nombre hoja
paraguay_excel <- read_excel("datos/paraguay.xlsx", sheet = "respuestas", skip = 1) #saltar fila de preguntas del cuestionario
#Es preciso indicar el nombre o posición de la hoja y desde qué fila leer datos.
#Por defecto la función lee como nombre de variable la primera fila que lee.
# Limpiar entorno de trabajo
# C. Abrir base de datos desde STATA y SPSS
# Paquete que incluye funciones para leer bases de datos desde otros formatos
library(haven)
# Para leer desde SPSS
CEP_dic19_spss <- read_sav("datos/CEP_dic2019.sav")
#Para leer desde Stata
CEP_dic19_stata <- read_dta("datos/CEP_dic2019.dta")
# Visualizar encuesta CEP en PSPP
# Eliminar base stata manualmente desde entorno
# ---- 2. RECODIFICACIÓN Y TRANSFORMACIÓN DE VARIABLES ----
# Análisis global de los datos
class(CEP_dic19_spss) # Tipo de objeto
dim(CEP_dic19_spss) # Dimensiones de la matriz
names(CEP_dic19_spss) # Nombres de las variables
# Selección de variables de interés
library(dplyr) #Cargar paquete'dplyr'
# Sexo: DS_P1
# Edad: DS_P2_EXACTA (intervalar)
# Apoyo o rechazo octubre 19: ESP_32
# Construimos base de datos con selección de tres variables de la original
CEP <- select(CEP_dic19_spss, DS_P1, DS_P2_EXACTA, ESP_32)
# Luego de seleccionar, podemos sobreescribir el objeto CEP para renombrar variables
CEP <- rename(CEP, sexo = DS_P1, edad = DS_P2_EXACTA, O19 = ESP_32)
# No obstante, podríamos seleccionar y al mismo tiempo renombrar las variables
CEP <- select(CEP_dic19_spss, sexo = DS_P1, edad = DS_P2_EXACTA, O19 = ESP_32)
# RECODIFICACIÓN
# Sexo (nominal)
#---------------
table(CEP$sexo) #1 = hombre, 2 = mujer
class(CEP$sexo)
CEP$sexo <- as.numeric(CEP$sexo) #convertir a vector numérico para poder transformar
CEP <- mutate(CEP, sexorec = recode(CEP$sexo, "1" = "hombre", "2" = "mujer"))
class(CEP$sexorec) #queda como objeto character
CEP$sexorec #visualizar datos concretos guardados
table(CEP$sexorec)
# Apoyo o rechazo octubre 19 (ordinal)
#------------------------------------
#1. Apoyo / 2. Apoyo parcial / 3. Neutro / 4. Rechazo parcial / 5. Rechazo
# Recodificar en apoyo, neutro y rechazo
table(CEP$O19)
class(CEP$O19)
CEP$O19 <- as.numeric(CEP$O19)
#Especificar paquete desde el cual queremos ejecutar la función 'recode'
#para poder recodificar según tramos
CEP <- mutate(CEP, O19_rec = car::recode(CEP$O19, "1:2 = 1; 3 = 2;
4:5 = 3; else = NA"))
table(CEP$O19_rec)
#Convertir a factor para poner etiquetas
CEP$O19_rec <- factor(CEP$O19_rec, labels= c("Apruebo", "Neutro", "Rechazo"))
table(CEP$O19_rec)
# edad (intervalar)
#------------------
#Recodificar en números asociados a rangos. 18-29; 30-49; 50-69; 70 o más.
summary(CEP$edad)
class(CEP$edad)
CEP$edad <- as.numeric(CEP$edad)
#Especificar paquete desde el cual queremos ejecutar la función 'recode'
#para poder recodificar según tramos
CEP <- mutate(CEP, edad_rango = car::recode(CEP$edad, "18:29 = 1;30:49 = 2;
50:69 =3; else = 4"))
table(CEP$edad_rango)
#Convertir a factor para poner etiquetas
CEP$edad_rango <- factor(CEP$edad_rango, labels= c("18-29", "30-49", "50-69", "70+"))
table(CEP$edad_rango)
# Guardar base de datos con selección de variables y variables recodificadas
# Guarda archivo compriméndolo por defecto (útil para carga en Github y envios email)
saveRDS(CEP, file = "datos/CEP_dic19_seleccion.rds")
# Guarda archivo sin comprimir
saveRDS(CEP, file = "datos/CEP_dic19_seleccion.rds", compress = F)
# Limpiar entorno de trabajo y cargar base guardada
CEP <- readRDS("datos/CEP_dic19_seleccion.rds")
# ---- 3. BREVE ANÁLISIS ----
table(CEP$sexorec, CEP$O19_rec)
prop.table(table(CEP$sexorec, CEP$O19_rec))*100
prop.table(table(CEP$sexorec, CEP$O19_rec),1)*100
table(CEP$edad_rango, CEP$O19_rec)
prop.table(table(CEP$edad_rango, CEP$O19_rec))*100
prop.table(table(CEP$edad_rango, CEP$O19_rec),1)*100
# ---- 4. TALLER PRÁCTICO: CARGAR DATOS PROPIOS, SELECCIONAR VARIABLES A UTILIAR ----
# Instalar paquetes necesarios
# Cargar paquetes a utilizar en sesión de trabajo
# Definir carpeta de trabajo
# Leer base de datos y guardarla como objeto R
# Crear nueva base con subconjunto de variables específico a analizar
# Guardar sintaxis y base de datos en formato R
save(datos, file = "datos/base_electivo.rds") # ¿Dónde queda guardada? | /codigo/sintaxis sesion 6.R | no_license | feliperuizbruzzone/electivo-magister-2020 | R | false | false | 6,805 | r | # ELECTIVO: CIENCIA ABIERTA Y SOFTWARE LIBRE.
# MAGÍSTER EN CCSS UNIVERSIDAD DE CHILE - 2020
# Ver asunto de codificación de idioma (estandarizar a UTF-8)
# Mandar paquetes a instalar previamente
# Clase y diapos: contenidos; documentación tidyverse; PSPP; CEP y taller datos propios
# ---- 0. PAQUETES A INSTALAR ---
install.packages(c("readxl", "tidyverse", "haven", "car"))
# ---- 1. IMPORTACIÓN DE DATOS A R, DESDE DIFERENTES FORMATOS ----
# Para ganar en reproductibilidad, es mejor trabajar con "proyectos de R".
# Es una ubicación mandatada de forma automática por un archivo .Rproj
# Aunque es poco recomendable, podemos defnir manualmente la carpeta de trabajo
#Botones: Session -> Set Working Directory -> Choose directory -> Elegir carpeta
#Abreviación de botones: Ctrl + Shift + H
#Escogemos la carpeta donde tengamos nuestros archivos (sintaxis, base de datos a usar)
setwd("~/Dropbox/Felipe/Docencia/Universidad de Chile/Magister CCSS UCH/C. Electivo Metodología/clases/clase 6")
#VEREMOS COMO IMPORTAR BASES DE DATOS DESDE 4 TIPOS DE DATOS: CSV, EXCEL, SPSS, STATA
#Ver archivo paraguay.xlsx, entender estructura de libro de datos.
#Configurar hoja correspondiente a base de datos de encuesta en archivo "paraguay" en Excel a archivo CSV
#Esto lo hacemos desde explorador de archivos en "guardar como"
#Observar estructura interna (abrir CSV con bloc de notas)
#Notación latinoamericana (, decimales - ; separador)
#Notación EEUU/EUROPA (. decimales - , separador)
# A. Abrir archivo paraguay desde CSV (ojo con diferencias Linux/Windows)
#NOTACIÓN EEUU: decimales como punto y no coma
paraguay_csv <- read.csv("datos/paraguay.csv")
View(paraguay_csv) # Base no se lee correctamente, no se separan bien columnas
#¿Qué pasa si usamos una función adecuada a notación latina?
#lee comas como decimales y punto y comas como separador de variables
paraguay_csv2 <- read.csv2("datos/paraguay.csv")
View(paraguay_csv2)
# podemos eliminar manualmente primera fila para leer correctamente nombre variable
# B. Abrir archivo paraguay desde excel.
library(readxl)
paraguay_excel <- read_excel("datos/paraguay.xlsx")
head(paraguay_excel)
#¿Cuál es el problema?
#Uso de argumentos en función read_excel
paraguay_excel <- read_excel("datos/paraguay.xlsx", sheet = 2) # posición de la hoja
paraguay_excel <- read_excel("datos/paraguay.xlsx", sheet = "respuestas") # nombre hoja
paraguay_excel <- read_excel("datos/paraguay.xlsx", sheet = "respuestas", skip = 1) #saltar fila de preguntas del cuestionario
#Es preciso indicar el nombre o posición de la hoja y desde qué fila leer datos.
#Por defecto la función lee como nombre de variable la primera fila que lee.
# Limpiar entorno de trabajo
# C. Abrir base de datos desde STATA y SPSS
# Paquete que incluye funciones para leer bases de datos desde otros formatos
library(haven)
# Para leer desde SPSS
CEP_dic19_spss <- read_sav("datos/CEP_dic2019.sav")
#Para leer desde Stata
CEP_dic19_stata <- read_dta("datos/CEP_dic2019.dta")
# Visualizar encuesta CEP en PSPP
# Eliminar base stata manualmente desde entorno
# ---- 2. RECODIFICACIÓN Y TRANSFORMACIÓN DE VARIABLES ----
# Análisis global de los datos
class(CEP_dic19_spss) # Tipo de objeto
dim(CEP_dic19_spss) # Dimensiones de la matriz
names(CEP_dic19_spss) # Nombres de las variables
# Selección de variables de interés
library(dplyr) #Cargar paquete'dplyr'
# Sexo: DS_P1
# Edad: DS_P2_EXACTA (intervalar)
# Apoyo o rechazo octubre 19: ESP_32
# Construimos base de datos con selección de tres variables de la original
CEP <- select(CEP_dic19_spss, DS_P1, DS_P2_EXACTA, ESP_32)
# Luego de seleccionar, podemos sobreescribir el objeto CEP para renombrar variables
CEP <- rename(CEP, sexo = DS_P1, edad = DS_P2_EXACTA, O19 = ESP_32)
# No obstante, podríamos seleccionar y al mismo tiempo renombrar las variables
CEP <- select(CEP_dic19_spss, sexo = DS_P1, edad = DS_P2_EXACTA, O19 = ESP_32)
# RECODIFICACIÓN
# Sexo (nominal)
#---------------
table(CEP$sexo) #1 = hombre, 2 = mujer
class(CEP$sexo)
CEP$sexo <- as.numeric(CEP$sexo) #convertir a vector numérico para poder transformar
CEP <- mutate(CEP, sexorec = recode(CEP$sexo, "1" = "hombre", "2" = "mujer"))
class(CEP$sexorec) #queda como objeto character
CEP$sexorec #visualizar datos concretos guardados
table(CEP$sexorec)
# Apoyo o rechazo octubre 19 (ordinal)
#------------------------------------
#1. Apoyo / 2. Apoyo parcial / 3. Neutro / 4. Rechazo parcial / 5. Rechazo
# Recodificar en apoyo, neutro y rechazo
table(CEP$O19)
class(CEP$O19)
CEP$O19 <- as.numeric(CEP$O19)
#Especificar paquete desde el cual queremos ejecutar la función 'recode'
#para poder recodificar según tramos
CEP <- mutate(CEP, O19_rec = car::recode(CEP$O19, "1:2 = 1; 3 = 2;
4:5 = 3; else = NA"))
table(CEP$O19_rec)
#Convertir a factor para poner etiquetas
CEP$O19_rec <- factor(CEP$O19_rec, labels= c("Apruebo", "Neutro", "Rechazo"))
table(CEP$O19_rec)
# edad (intervalar)
#------------------
#Recodificar en números asociados a rangos. 18-29; 30-49; 50-69; 70 o más.
summary(CEP$edad)
class(CEP$edad)
CEP$edad <- as.numeric(CEP$edad)
#Especificar paquete desde el cual queremos ejecutar la función 'recode'
#para poder recodificar según tramos
CEP <- mutate(CEP, edad_rango = car::recode(CEP$edad, "18:29 = 1;30:49 = 2;
50:69 =3; else = 4"))
table(CEP$edad_rango)
#Convertir a factor para poner etiquetas
CEP$edad_rango <- factor(CEP$edad_rango, labels= c("18-29", "30-49", "50-69", "70+"))
table(CEP$edad_rango)
# Guardar base de datos con selección de variables y variables recodificadas
# Guarda archivo compriméndolo por defecto (útil para carga en Github y envios email)
saveRDS(CEP, file = "datos/CEP_dic19_seleccion.rds")
# Guarda archivo sin comprimir
saveRDS(CEP, file = "datos/CEP_dic19_seleccion.rds", compress = F)
# Limpiar entorno de trabajo y cargar base guardada
CEP <- readRDS("datos/CEP_dic19_seleccion.rds")
# ---- 3. BREVE ANÁLISIS ----
table(CEP$sexorec, CEP$O19_rec)
prop.table(table(CEP$sexorec, CEP$O19_rec))*100
prop.table(table(CEP$sexorec, CEP$O19_rec),1)*100
table(CEP$edad_rango, CEP$O19_rec)
prop.table(table(CEP$edad_rango, CEP$O19_rec))*100
prop.table(table(CEP$edad_rango, CEP$O19_rec),1)*100
# ---- 4. TALLER PRÁCTICO: CARGAR DATOS PROPIOS, SELECCIONAR VARIABLES A UTILIAR ----
# Instalar paquetes necesarios
# Cargar paquetes a utilizar en sesión de trabajo
# Definir carpeta de trabajo
# Leer base de datos y guardarla como objeto R
# Crear nueva base con subconjunto de variables específico a analizar
# Guardar sintaxis y base de datos en formato R
save(datos, file = "datos/base_electivo.rds") # ¿Dónde queda guardada? |
#### ff_standings (MFL) ####
#' Get a dataframe of league standings
#'
#' @param conn a conn object created by `ff_connect()`
#' @param ... arguments passed to other methods
#'
#' @examples
#' \donttest{
#' try({ # try only shown here because sometimes CRAN checks are weird
#' ssb_conn <- ff_connect(platform = "mfl", league_id = 54040, season = 2020)
#' ff_standings(ssb_conn)
#' }) # end try
#' }
#'
#' @describeIn ff_standings MFL: returns H2H/points/all-play/best-ball data in a table.
#'
#' @export
ff_standings.mfl_conn <- function(conn, ...) {
standings_endpoint <- mfl_getendpoint(conn, "leagueStandings", ALL=1) %>%
purrr::pluck("content", "leagueStandings", "franchise") %>%
tibble::tibble() %>%
tidyr::unnest_wider(1) %>%
dplyr::left_join(
ff_franchises(conn) %>% dplyr::select("franchise_id", "franchise_name"),
by = c("id" = "franchise_id")
) %>%
dplyr::mutate_all(~gsub(pattern = "\\$",replacement = "",x = .x)) %>%
dplyr::mutate_at(dplyr::vars(-.data$id, -.data$franchise_name, -dplyr::matches("streak|strk|wlt")), as.numeric) %>%
dplyr::mutate_at(dplyr::vars(dplyr::matches("wlt")),~gsub(x = .x, pattern = "‑", replacement = "-",fixed = TRUE)) %>%
dplyr::mutate_if(is.numeric, round, 3) %>%
dplyr::select(dplyr::any_of(c(
"franchise_id" = "id",
"franchise_name",
"h2h_wins" = "h2hw",
"h2h_losses" = "h2hl",
"h2h_ties" = "h2ht",
"h2h_winpct" = "h2hpct",
"h2h_wlt" = "h2hwlt",
"allplay_wins" = "all_play_w",
"allplay_losses" = "all_play_l",
"allplay_ties" = "all_play_t",
"allplay_winpct" = "all_play_pct",
"allplay_wlt" = "all_play_wlt",
"points_for" = "pf",
"points_against" = "pa",
"avg_points_for" = "avgpf",
"avg_points_against" = "avgpa",
"max_points_against" = "maxpa",
"min_points_against" = "minpa",
"potential_points" = "pp",
"victory_points" = "vp",
"offensive_points" = "op",
"defensive_points" = "dp",
"streak" = "strk",
"streak_type",
"streak_len",
"power_rank" = "pwr",
"power_rank_alt" = "altpwr",
"accounting_balance" = "acct",
"faab_balance" = "bbidbalance",
"salary"
)))
return(standings_endpoint)
}
| /R/mfl_standings.R | permissive | jfontestad/ffscrapr | R | false | false | 2,286 | r | #### ff_standings (MFL) ####
#' Get a dataframe of league standings
#'
#' @param conn a conn object created by `ff_connect()`
#' @param ... arguments passed to other methods
#'
#' @examples
#' \donttest{
#' try({ # try only shown here because sometimes CRAN checks are weird
#' ssb_conn <- ff_connect(platform = "mfl", league_id = 54040, season = 2020)
#' ff_standings(ssb_conn)
#' }) # end try
#' }
#'
#' @describeIn ff_standings MFL: returns H2H/points/all-play/best-ball data in a table.
#'
#' @export
ff_standings.mfl_conn <- function(conn, ...) {
standings_endpoint <- mfl_getendpoint(conn, "leagueStandings", ALL=1) %>%
purrr::pluck("content", "leagueStandings", "franchise") %>%
tibble::tibble() %>%
tidyr::unnest_wider(1) %>%
dplyr::left_join(
ff_franchises(conn) %>% dplyr::select("franchise_id", "franchise_name"),
by = c("id" = "franchise_id")
) %>%
dplyr::mutate_all(~gsub(pattern = "\\$",replacement = "",x = .x)) %>%
dplyr::mutate_at(dplyr::vars(-.data$id, -.data$franchise_name, -dplyr::matches("streak|strk|wlt")), as.numeric) %>%
dplyr::mutate_at(dplyr::vars(dplyr::matches("wlt")),~gsub(x = .x, pattern = "‑", replacement = "-",fixed = TRUE)) %>%
dplyr::mutate_if(is.numeric, round, 3) %>%
dplyr::select(dplyr::any_of(c(
"franchise_id" = "id",
"franchise_name",
"h2h_wins" = "h2hw",
"h2h_losses" = "h2hl",
"h2h_ties" = "h2ht",
"h2h_winpct" = "h2hpct",
"h2h_wlt" = "h2hwlt",
"allplay_wins" = "all_play_w",
"allplay_losses" = "all_play_l",
"allplay_ties" = "all_play_t",
"allplay_winpct" = "all_play_pct",
"allplay_wlt" = "all_play_wlt",
"points_for" = "pf",
"points_against" = "pa",
"avg_points_for" = "avgpf",
"avg_points_against" = "avgpa",
"max_points_against" = "maxpa",
"min_points_against" = "minpa",
"potential_points" = "pp",
"victory_points" = "vp",
"offensive_points" = "op",
"defensive_points" = "dp",
"streak" = "strk",
"streak_type",
"streak_len",
"power_rank" = "pwr",
"power_rank_alt" = "altpwr",
"accounting_balance" = "acct",
"faab_balance" = "bbidbalance",
"salary"
)))
return(standings_endpoint)
}
|
library(RcmdrPlugin.IPSUR)
data(RcmdrTestDrive)
attach(RcmdrTestDrive)
# shows names of variables/columns
names(RcmdrTestDrive)
# summary of all variables
summary(RcmdrTestDrive)
# make a table with one variable
race <- table(RcmdrTestDrive$race)
# make a bar char from a table with one variable
barplot(race)
# calculate agg mean (salary) group by (gender) from table - 2 methods (tapply or by)
aggMean <- tapply(RcmdrTestDrive$salary, RcmdrTestDrive$gender, mean)
by(RcmdrTestDrive$salary, RcmdrTestDrive$gender, mean, na.rm = TRUE)
# get the agg mean of a variable group by another variable
mean(RcmdrTestDrive$salary[RcmdrTestDrive$gender == 'Male'])
# get the max/min agg from a variable
aggMean[which(aggMean == max(aggMean))]
# calculate spread of two variables using standard deviation
sdSalaryVsGender <- tapply(RcmdrTestDrive$salary, RcmdrTestDrive$gender, sd)
# boxplot of two variables (y~x)
boxplot(RcmdrTestDrive$salary~RcmdrTestDrive$gender, data=RcmdrTestDrive)
# sort a variable
reduction <- sort(RcmdrTestDrive$reduction)
# get value of variable by index 137th
reduction[137]
# find the Inner Quartile Range (IQR) of a variable
IQR(reduction)
# five number summary of a variable
fivenum(reduction)
# calculate (approx) IQR from five number summary
fivenum(reduction)[4] - fivenum(reduction)[2]
# check for outliers
# potential
temp <- fivenum(reduction)
1.5 * (temp[4] - temp[2]) + temp[4]
# suspected
3 * (temp[4] - temp[2]) + temp[4]
# boxplot shows outliers (variable has a ton so use median for measure of central tendency and MAD or scales IQR for spread)
boxplot(RcmdrTestDrive$after, data = RcmdrTestDrive)
# measures of center (mean vs median)
c(mean(RcmdrTestDrive$after), median(RcmdrTestDrive$after))
# measures of spread (sd vs mad vs rescaled IQR)
c(sd(RcmdrTestDrive$after), mad(RcmdrTestDrive$after), IQR(RcmdrTestDrive$after)/1.349)
# measures of shape (skewness vs 2*SQRT(6/n) and kurtosis vs 2*SQRT(24/n))
library(e1071)
# not sure why do this
2*sqrt(6/nrow(RcmdrTestDrive))
# 0 is expected value, the whale is in the tail (pos or neg)
skewness(RcmdrTestDrive$after)
# not sure why do thi
2*sqrt(24/nrow(RcmdrTestDrive))
# 3 is expected value, 3 types in R (excess is type 1)
kurtosis(RcmdrTestDrive$after, type = 1)
# histogram
hist(RcmdrTestDrive$after)
| /IntroductionToProbabilityAndStatisticsUsingR/Chapter3_Exercises_20170902.R | no_license | wolfe-man/502 | R | false | false | 2,386 | r | library(RcmdrPlugin.IPSUR)
data(RcmdrTestDrive)
attach(RcmdrTestDrive)
# shows names of variables/columns
names(RcmdrTestDrive)
# summary of all variables
summary(RcmdrTestDrive)
# make a table with one variable
race <- table(RcmdrTestDrive$race)
# make a bar char from a table with one variable
barplot(race)
# calculate agg mean (salary) group by (gender) from table - 2 methods (tapply or by)
aggMean <- tapply(RcmdrTestDrive$salary, RcmdrTestDrive$gender, mean)
by(RcmdrTestDrive$salary, RcmdrTestDrive$gender, mean, na.rm = TRUE)
# get the agg mean of a variable group by another variable
mean(RcmdrTestDrive$salary[RcmdrTestDrive$gender == 'Male'])
# get the max/min agg from a variable
aggMean[which(aggMean == max(aggMean))]
# calculate spread of two variables using standard deviation
sdSalaryVsGender <- tapply(RcmdrTestDrive$salary, RcmdrTestDrive$gender, sd)
# boxplot of two variables (y~x)
boxplot(RcmdrTestDrive$salary~RcmdrTestDrive$gender, data=RcmdrTestDrive)
# sort a variable
reduction <- sort(RcmdrTestDrive$reduction)
# get value of variable by index 137th
reduction[137]
# find the Inner Quartile Range (IQR) of a variable
IQR(reduction)
# five number summary of a variable
fivenum(reduction)
# calculate (approx) IQR from five number summary
fivenum(reduction)[4] - fivenum(reduction)[2]
# check for outliers
# potential
temp <- fivenum(reduction)
1.5 * (temp[4] - temp[2]) + temp[4]
# suspected
3 * (temp[4] - temp[2]) + temp[4]
# boxplot shows outliers (variable has a ton so use median for measure of central tendency and MAD or scales IQR for spread)
boxplot(RcmdrTestDrive$after, data = RcmdrTestDrive)
# measures of center (mean vs median)
c(mean(RcmdrTestDrive$after), median(RcmdrTestDrive$after))
# measures of spread (sd vs mad vs rescaled IQR)
c(sd(RcmdrTestDrive$after), mad(RcmdrTestDrive$after), IQR(RcmdrTestDrive$after)/1.349)
# measures of shape (skewness vs 2*SQRT(6/n) and kurtosis vs 2*SQRT(24/n))
library(e1071)
# not sure why do this
2*sqrt(6/nrow(RcmdrTestDrive))
# 0 is expected value, the whale is in the tail (pos or neg)
skewness(RcmdrTestDrive$after)
# not sure why do thi
2*sqrt(24/nrow(RcmdrTestDrive))
# 3 is expected value, 3 types in R (excess is type 1)
kurtosis(RcmdrTestDrive$after, type = 1)
# histogram
hist(RcmdrTestDrive$after)
|
plot_filter_density <-
function(df, var, plot_title = NULL) {
fvar <- rlang::enquo(var)
plot_title <-
if(is.null(plot_title)) {
glue::glue("Density of {rlang::quo_name(fvar)} values, chromosome 28")
} else {
plot_title
}
df %>%
ggplot(aes(x = !!fvar, fill = dataset)) +
geom_density(alpha = 0.3) +
theme_bw() +
labs(
x = rlang::quo_name(fvar),
y = "Kernel density",
title = plot_title)
}
var_sum <-
function(var) {
var <- rlang::enquo(var)
df %>%
filter(!is.infinite(!!var)) %>%
group_by(dataset) %>%
summarise(
!!glue::glue("{rlang::quo_name(var)}_min") := min(!!var, na.rm = TRUE),
!!glue::glue("{rlang::quo_name(var)}_max") := max(!!var, na.rm = TRUE),
!!glue::glue("{rlang::quo_name(var)}_mean") := mean(!!var, na.rm = TRUE)
)
}
summarize_dropped <-
function(df, var, cutoff) {
var <- rlang::enquo(var)
total <-
df %>%
filter(dataset == "bovine_demo_pre") %>%
filter(!is.na(!!var)) %>%
filter(!is.infinite(!!var)) %>%
pull(!!var) %>%
length(.)
fail <-
df %>%
filter(dataset == "bovine_demo_pre") %>%
filter(!is.na(!!var)) %>%
filter(!is.infinite(!!var)) %>%
filter(!!var < cutoff) %>%
pull(!!var) %>%
length(.)
tibble::tribble(
~ `Variable`, ~`n total`, ~ `n failed`, ~ `% failed`,
rlang::quo_name(var), scales::comma(total), scales::comma(fail), scales::percent(fail/total)
)
}
| /source_functions/filter_eval_functions.R | no_license | harlydurbin/bovine_demo | R | false | false | 1,626 | r |
plot_filter_density <-
function(df, var, plot_title = NULL) {
fvar <- rlang::enquo(var)
plot_title <-
if(is.null(plot_title)) {
glue::glue("Density of {rlang::quo_name(fvar)} values, chromosome 28")
} else {
plot_title
}
df %>%
ggplot(aes(x = !!fvar, fill = dataset)) +
geom_density(alpha = 0.3) +
theme_bw() +
labs(
x = rlang::quo_name(fvar),
y = "Kernel density",
title = plot_title)
}
var_sum <-
function(var) {
var <- rlang::enquo(var)
df %>%
filter(!is.infinite(!!var)) %>%
group_by(dataset) %>%
summarise(
!!glue::glue("{rlang::quo_name(var)}_min") := min(!!var, na.rm = TRUE),
!!glue::glue("{rlang::quo_name(var)}_max") := max(!!var, na.rm = TRUE),
!!glue::glue("{rlang::quo_name(var)}_mean") := mean(!!var, na.rm = TRUE)
)
}
summarize_dropped <-
function(df, var, cutoff) {
var <- rlang::enquo(var)
total <-
df %>%
filter(dataset == "bovine_demo_pre") %>%
filter(!is.na(!!var)) %>%
filter(!is.infinite(!!var)) %>%
pull(!!var) %>%
length(.)
fail <-
df %>%
filter(dataset == "bovine_demo_pre") %>%
filter(!is.na(!!var)) %>%
filter(!is.infinite(!!var)) %>%
filter(!!var < cutoff) %>%
pull(!!var) %>%
length(.)
tibble::tribble(
~ `Variable`, ~`n total`, ~ `n failed`, ~ `% failed`,
rlang::quo_name(var), scales::comma(total), scales::comma(fail), scales::percent(fail/total)
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_compressed_sdmx.R
\name{get_compressed_sdmx}
\alias{get_compressed_sdmx}
\title{Download and extract compressed SDMX XML}
\usage{
get_compressed_sdmx(url = NULL, verbose = FALSE, format = "gz")
}
\arguments{
\item{url}{a URL from the bulk download facility to download the zipped SDMX XML file}
\item{verbose}{a logical value with default \code{FALSE}, so detailed messages (for debugging) will not printed.
Can be set also with \code{options(restatapi_verbose=TRUE)}.}
\item{format}{the format of the compression, either "zip" or "gz" the default value}
}
\value{
an xml class object with SDMX tags extracted and read from the downloaded file.
}
\description{
Downloads and extracts the data values from the SDMX XML data file
}
\details{
It is a sub-function to use in the \code{\link{get_eurostat_raw}} and the \code{\link{get_eurostat_data}} functions.
}
\examples{
base_url<-"https://ec.europa.eu/eurostat/"
url_end<-"estat-navtree-portlet-prod/BulkDownloadListing?file=data/agr_r_milkpr.sdmx.zip"
url<-paste0(base_url,url_end)
options(timeout=2)
sdmx_xml<-get_compressed_sdmx(url,verbose=TRUE,format="zip")
options(timeout=60)
}
| /man/get_compressed_sdmx.Rd | no_license | eurostat/restatapi | R | false | true | 1,220 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_compressed_sdmx.R
\name{get_compressed_sdmx}
\alias{get_compressed_sdmx}
\title{Download and extract compressed SDMX XML}
\usage{
get_compressed_sdmx(url = NULL, verbose = FALSE, format = "gz")
}
\arguments{
\item{url}{a URL from the bulk download facility to download the zipped SDMX XML file}
\item{verbose}{a logical value with default \code{FALSE}, so detailed messages (for debugging) will not printed.
Can be set also with \code{options(restatapi_verbose=TRUE)}.}
\item{format}{the format of the compression, either "zip" or "gz" the default value}
}
\value{
an xml class object with SDMX tags extracted and read from the downloaded file.
}
\description{
Downloads and extracts the data values from the SDMX XML data file
}
\details{
It is a sub-function to use in the \code{\link{get_eurostat_raw}} and the \code{\link{get_eurostat_data}} functions.
}
\examples{
base_url<-"https://ec.europa.eu/eurostat/"
url_end<-"estat-navtree-portlet-prod/BulkDownloadListing?file=data/agr_r_milkpr.sdmx.zip"
url<-paste0(base_url,url_end)
options(timeout=2)
sdmx_xml<-get_compressed_sdmx(url,verbose=TRUE,format="zip")
options(timeout=60)
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/autonomic_ganglia.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.02,family="gaussian",standardize=FALSE)
sink('./autonomic_ganglia_012.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/autonomic_ganglia/autonomic_ganglia_012.R | no_license | esbgkannan/QSMART | R | false | false | 373 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/autonomic_ganglia.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.02,family="gaussian",standardize=FALSE)
sink('./autonomic_ganglia_012.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/cladogeneticTraitCont.R
\name{cladogeneticTraitCont}
\alias{cladogeneticTraitCont}
\title{Simulate Cladogenetic Trait Evolution}
\usage{
cladogeneticTraitCont(taxa, rate = 1, meanChange = 0, rootTrait = 0)
}
\arguments{
\item{taxa}{A five-column matrix of taxonomic data, as output by
simFossilTaxa}
\item{rate}{rate of trait change; variance of evolutionary change
distribution per speciation event}
\item{meanChange}{Mean change per speciation event. Default is 0; change to
simulate 'active' speciational trends, where the expected change at each
speciational event is non-zero.}
\item{rootTrait}{The trait value of the first taxon in the dataset; set to 0
by default.}
}
\value{
Retuns a vector of trait values for each taxon, with value names
being the taxa IDs (column 1 of the input) with a 't' pasted (as with rtree
in the ape library).
}
\description{
This function simulates trait evolution at each speciation/branching event
in a matrix output from simFossilTaxa.
}
\details{
This function simulates continuous trait evolution where change occurs under
a Brownian model, but only at events that create new distinct morphotaxa
(i.e. species as recognized in the fossil record), either branching events
or anagenesis (pseudospeciation). These are the types of morphological
differentiation which can be simulated in the function simFossilTaxa. This
is sometimes referred to as cladogenetic or speciation trait evolution and
is related to Puncuated Equilibrium theory. Anagenestic shifts aren't
cladogenetic events per se (no branching!), so perhaps the best way to this
of this function is it allows traits to change anytime simFossilTaxa created
a new 'morphotaxon' in a simulation.
Importantly, trait changes only occur at the base of 'new' species, thus
allowing cladogenetic trait evolution to be asymmetrical at branching
points: i.e. only one branch actually changes position in trait-space, as
expected under a budding cladogenesis model. This distinction is important
as converting the taxa matrix to a phylogeny and simulating the trait
changes under a 'speciational' tree-transformation would assume that
divergence occurred on both daughter lineages at each node. (This has been
the standard approach for simulating cladogenetic trait change on trees).
Cryptic taxa generated with prop.cryptic in simFossilTaxa will not differ at
all in trait values. These species will all be identical.
See this link for additional details:
http://nemagraptus.blogspot.com/2012/03/simulating-budding-cladogenetictrait.html
}
\examples{
set.seed(444)
taxa <- simFossilTaxa(p=0.1,q=0.1,nruns=1,mintaxa=30,maxtime=1000,plot=TRUE)
trait <- cladogeneticTraitCont(taxa)
tree <- taxa2phylo(taxa)
plotTraitgram(trait,tree,conf.int=FALSE)
#with cryptic speciation
taxa <- simFossilTaxa(p=0.1,q=0.1,prop.cryptic=0.5,nruns=1,mintaxa=30,maxtime=1000,
plot=TRUE)
trait <- cladogeneticTraitCont(taxa)
tree <- taxa2phylo(taxa)
plotTraitgram(trait,tree,conf.int=FALSE)
}
\author{
David W. Bapst
}
\seealso{
\code{\link{simFossilTaxa}},
This function is similar to Brownian motion simulation functions such as
\code{rTraitCont} in ape, \code{sim.char} in geiger and \code{fastBM} in
phytools.
See also \code{\link{unitLengthTree}} in this package and
\code{speciationalTree} in the package geiger. These are tree transformation
functions; together with BM simulation functions, they would be expected to
have a similar effect as this function (when cladogenesis is 'bifurcating'
and not 'budding'; see above).
}
| /man/cladogeneticTraitCont.Rd | permissive | pnovack-gottshall/paleotree | R | false | false | 3,598 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/cladogeneticTraitCont.R
\name{cladogeneticTraitCont}
\alias{cladogeneticTraitCont}
\title{Simulate Cladogenetic Trait Evolution}
\usage{
cladogeneticTraitCont(taxa, rate = 1, meanChange = 0, rootTrait = 0)
}
\arguments{
\item{taxa}{A five-column matrix of taxonomic data, as output by
simFossilTaxa}
\item{rate}{rate of trait change; variance of evolutionary change
distribution per speciation event}
\item{meanChange}{Mean change per speciation event. Default is 0; change to
simulate 'active' speciational trends, where the expected change at each
speciational event is non-zero.}
\item{rootTrait}{The trait value of the first taxon in the dataset; set to 0
by default.}
}
\value{
Retuns a vector of trait values for each taxon, with value names
being the taxa IDs (column 1 of the input) with a 't' pasted (as with rtree
in the ape library).
}
\description{
This function simulates trait evolution at each speciation/branching event
in a matrix output from simFossilTaxa.
}
\details{
This function simulates continuous trait evolution where change occurs under
a Brownian model, but only at events that create new distinct morphotaxa
(i.e. species as recognized in the fossil record), either branching events
or anagenesis (pseudospeciation). These are the types of morphological
differentiation which can be simulated in the function simFossilTaxa. This
is sometimes referred to as cladogenetic or speciation trait evolution and
is related to Puncuated Equilibrium theory. Anagenestic shifts aren't
cladogenetic events per se (no branching!), so perhaps the best way to this
of this function is it allows traits to change anytime simFossilTaxa created
a new 'morphotaxon' in a simulation.
Importantly, trait changes only occur at the base of 'new' species, thus
allowing cladogenetic trait evolution to be asymmetrical at branching
points: i.e. only one branch actually changes position in trait-space, as
expected under a budding cladogenesis model. This distinction is important
as converting the taxa matrix to a phylogeny and simulating the trait
changes under a 'speciational' tree-transformation would assume that
divergence occurred on both daughter lineages at each node. (This has been
the standard approach for simulating cladogenetic trait change on trees).
Cryptic taxa generated with prop.cryptic in simFossilTaxa will not differ at
all in trait values. These species will all be identical.
See this link for additional details:
http://nemagraptus.blogspot.com/2012/03/simulating-budding-cladogenetictrait.html
}
\examples{
set.seed(444)
taxa <- simFossilTaxa(p=0.1,q=0.1,nruns=1,mintaxa=30,maxtime=1000,plot=TRUE)
trait <- cladogeneticTraitCont(taxa)
tree <- taxa2phylo(taxa)
plotTraitgram(trait,tree,conf.int=FALSE)
#with cryptic speciation
taxa <- simFossilTaxa(p=0.1,q=0.1,prop.cryptic=0.5,nruns=1,mintaxa=30,maxtime=1000,
plot=TRUE)
trait <- cladogeneticTraitCont(taxa)
tree <- taxa2phylo(taxa)
plotTraitgram(trait,tree,conf.int=FALSE)
}
\author{
David W. Bapst
}
\seealso{
\code{\link{simFossilTaxa}},
This function is similar to Brownian motion simulation functions such as
\code{rTraitCont} in ape, \code{sim.char} in geiger and \code{fastBM} in
phytools.
See also \code{\link{unitLengthTree}} in this package and
\code{speciationalTree} in the package geiger. These are tree transformation
functions; together with BM simulation functions, they would be expected to
have a similar effect as this function (when cladogenesis is 'bifurcating'
and not 'budding'; see above).
}
|
# venn diagrams for figure 3
install.packages('VennDiagram')
library(VennDiagram)
VenDi3 <- function(DF1,DF2,DF3,FCthresh){ #accepts unfiltered data.frames (3) and FC threshold (not log2!) -> outputs vendiagram
DF1_filtered <- (filter.df.data(DF1,FCthresh))[,"gene"]
DF2_filtered <- (filter.df.data(DF2,FCthresh))[,"gene"]
DF3_filtered <- (filter.df.data(DF3,FCthresh))[,"gene"]
a1 <- length(DF1_filtered)
print(a1)
a2 <- length(DF2_filtered)
print(a2)
a3 <- length(DF3_filtered)
print(a3)
nn12 <- length(intersect(DF1_filtered,DF2_filtered))
print(nn12)
nn23 <- length(intersect(DF2_filtered,DF3_filtered))
print(nn23)
nn13 <- length(intersect(DF1_filtered,DF3_filtered))
print(nn13)
nn123 <- length(intersect(intersect(DF1_filtered,DF2_filtered),DF3_filtered))
print(nn123)
grid.newpage()
draw.triple.venn(area1 = a1, area2 = a2, area3 = a3, n12 = nn12, n23 = nn23, n13 = nn13,
n123 = nn123, category = c("EPZ-6438", "Panobinostat", "Combo"),
fill = c("blue", "yellow", "green"),cex = 3,cat.cex = 2.5,cat.pos = c(-10,10,180),cat.dist = c(.1,.1,.1),
euler.d = FALSE,scaled = FALSE,alpha = .4
)
}
VenDi3(MA5vME5,MA5vMB5,MA5vMG5,2)
VenDi3(FA5vFE5,FA5vFB5,FA5vFG5,2)
Find # shared b/t unique to combo
UTC_shared <- function(DF1,DF2,DF3,DF4,DF5,DF6,FCthresh){ #accepts unfiltered data.frames (6) and FC threshold (not log2!) -> outputs # of shared unique to combo genes
DF1_filtered <- (filter.df.data(DF1,FCthresh))[,"gene"]
print(length(DF1_filtered))
DF2_filtered <- (filter.df.data(DF2,FCthresh))[,"gene"]
print(length(DF2_filtered))
DF3_filtered <- (filter.df.data(DF3,FCthresh))[,"gene"]
print(length(DF3_filtered))
DF4_filtered <- (filter.df.data(DF4,FCthresh))[,"gene"]
print(length(DF4_filtered))
DF5_filtered <- (filter.df.data(DF5,FCthresh))[,"gene"]
print(length(DF5_filtered))
DF6_filtered <- (filter.df.data(DF6,FCthresh))[,"gene"]
print(length(DF6_filtered))
#Flam
UTC_F <- DF3_filtered[!(DF3_filtered %in% union(DF1_filtered,DF2_filtered))]
print(length(UTC_F))
#MMM1
UTC_M <- DF6_filtered[!(DF6_filtered %in% union(DF4_filtered,DF5_filtered))]
print(length(UTC_M))
print("shared UTC:")
print(length(intersect(UTC_F,UTC_M)))
}
UTC_shared(FA5vFE5,FA5vFB5,FA5vFG5,MA5vME5,MA5vMB5,MA5vMG5,2)
| /script for figure 3 venn diagrams.R | no_license | tsharding/Code-samples-from-Oncotarget-paper-data | R | false | false | 2,376 | r | # venn diagrams for figure 3
install.packages('VennDiagram')
library(VennDiagram)
VenDi3 <- function(DF1,DF2,DF3,FCthresh){ #accepts unfiltered data.frames (3) and FC threshold (not log2!) -> outputs vendiagram
DF1_filtered <- (filter.df.data(DF1,FCthresh))[,"gene"]
DF2_filtered <- (filter.df.data(DF2,FCthresh))[,"gene"]
DF3_filtered <- (filter.df.data(DF3,FCthresh))[,"gene"]
a1 <- length(DF1_filtered)
print(a1)
a2 <- length(DF2_filtered)
print(a2)
a3 <- length(DF3_filtered)
print(a3)
nn12 <- length(intersect(DF1_filtered,DF2_filtered))
print(nn12)
nn23 <- length(intersect(DF2_filtered,DF3_filtered))
print(nn23)
nn13 <- length(intersect(DF1_filtered,DF3_filtered))
print(nn13)
nn123 <- length(intersect(intersect(DF1_filtered,DF2_filtered),DF3_filtered))
print(nn123)
grid.newpage()
draw.triple.venn(area1 = a1, area2 = a2, area3 = a3, n12 = nn12, n23 = nn23, n13 = nn13,
n123 = nn123, category = c("EPZ-6438", "Panobinostat", "Combo"),
fill = c("blue", "yellow", "green"),cex = 3,cat.cex = 2.5,cat.pos = c(-10,10,180),cat.dist = c(.1,.1,.1),
euler.d = FALSE,scaled = FALSE,alpha = .4
)
}
VenDi3(MA5vME5,MA5vMB5,MA5vMG5,2)
VenDi3(FA5vFE5,FA5vFB5,FA5vFG5,2)
Find # shared b/t unique to combo
UTC_shared <- function(DF1,DF2,DF3,DF4,DF5,DF6,FCthresh){ #accepts unfiltered data.frames (6) and FC threshold (not log2!) -> outputs # of shared unique to combo genes
DF1_filtered <- (filter.df.data(DF1,FCthresh))[,"gene"]
print(length(DF1_filtered))
DF2_filtered <- (filter.df.data(DF2,FCthresh))[,"gene"]
print(length(DF2_filtered))
DF3_filtered <- (filter.df.data(DF3,FCthresh))[,"gene"]
print(length(DF3_filtered))
DF4_filtered <- (filter.df.data(DF4,FCthresh))[,"gene"]
print(length(DF4_filtered))
DF5_filtered <- (filter.df.data(DF5,FCthresh))[,"gene"]
print(length(DF5_filtered))
DF6_filtered <- (filter.df.data(DF6,FCthresh))[,"gene"]
print(length(DF6_filtered))
#Flam
UTC_F <- DF3_filtered[!(DF3_filtered %in% union(DF1_filtered,DF2_filtered))]
print(length(UTC_F))
#MMM1
UTC_M <- DF6_filtered[!(DF6_filtered %in% union(DF4_filtered,DF5_filtered))]
print(length(UTC_M))
print("shared UTC:")
print(length(intersect(UTC_F,UTC_M)))
}
UTC_shared(FA5vFE5,FA5vFB5,FA5vFG5,MA5vME5,MA5vMB5,MA5vMG5,2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lgcpMethods.R
\name{plot.lgcpAutocorr}
\alias{plot.lgcpAutocorr}
\title{plot.lgcpAutocorr function}
\usage{
\method{plot}{lgcpAutocorr}(x, sel = 1:dim(x)[3], ask = TRUE, crop = TRUE,
plotwin = FALSE, ...)
}
\arguments{
\item{x}{an object of class lgcpAutocorr}
\item{sel}{vector of integers between 1 and grid$len: which grids to plot. Default NULL, in which case all grids are plotted.}
\item{ask}{logical; if TRUE the user is asked before each plot}
\item{crop}{whether or not to crop to bounding box of observation window}
\item{plotwin}{logical whether to plot the window attr(x,"window"), default is FALSE}
\item{...}{other arguments passed to image.plot}
}
\value{
a plot
}
\description{
Plots \code{lgcpAutocorr} objects: output from \code{autocorr}
}
\examples{
\dontrun{ac <- autocorr(lg,qt=c(1,2,3))}
# assumes that lg has class lgcpPredict
\dontrun{plot(ac)}
}
\seealso{
\link{autocorr}
}
| /man/plot.lgcpAutocorr.Rd | no_license | goldingn/lgcp | R | false | true | 1,013 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lgcpMethods.R
\name{plot.lgcpAutocorr}
\alias{plot.lgcpAutocorr}
\title{plot.lgcpAutocorr function}
\usage{
\method{plot}{lgcpAutocorr}(x, sel = 1:dim(x)[3], ask = TRUE, crop = TRUE,
plotwin = FALSE, ...)
}
\arguments{
\item{x}{an object of class lgcpAutocorr}
\item{sel}{vector of integers between 1 and grid$len: which grids to plot. Default NULL, in which case all grids are plotted.}
\item{ask}{logical; if TRUE the user is asked before each plot}
\item{crop}{whether or not to crop to bounding box of observation window}
\item{plotwin}{logical whether to plot the window attr(x,"window"), default is FALSE}
\item{...}{other arguments passed to image.plot}
}
\value{
a plot
}
\description{
Plots \code{lgcpAutocorr} objects: output from \code{autocorr}
}
\examples{
\dontrun{ac <- autocorr(lg,qt=c(1,2,3))}
# assumes that lg has class lgcpPredict
\dontrun{plot(ac)}
}
\seealso{
\link{autocorr}
}
|
# work by Ellie
library(randomForest)
library('tidyverse')
#library(ggplot2)
#library(purrr)
library(stringr)
#install.packages('tibble')
#library(tibble)
library(forcats)
library(caret)
library(Metrics)
#### reading shortlisted attributes
train_raw <- read.csv("train_bck_Selected.csv")
test_raw <- read.csv("test_bck_Selected.csv")
data <- rbind(train_raw,test_raw)
set.seed(1234)
test_num <- sample(1:nrow(data), floor(nrow(data)/2))
train<- data[-test_num,]
test<- data[test_num,]
rf_model <- randomForest(SalePrice ~ . ,
data = train,
keep.forest=TRUE,
importance=TRUE,
mtry = 6,
ntree = 9)
rf_model
varImp(rf_model)
importance(rf_model)
varImpPlot(rf_model)
y_hat <- predict(rf_model, train)
TRAIN.rf_scored <- as_tibble(cbind(train, y_hat))
#glimpse(TRAIN.rf_scored)
#RMSE is 12410.51 for training
library(Metrics)
rmse(train$SalePrice, y_hat)
#on test data
library(randomForest)
y_hat1 <- predict(rf_model, test)
test.rf_scored <- as_tibble(cbind(test, y_hat1))
#glimpse(test.rf_scored)
#rmse is 38866.15
rmse(train$SalePrice, y_hat)
rmse(test$SalePrice, y_hat1)
rmse(test$SalePrice, y_hat1) -rmse(train$SalePrice, y_hat) | /Codes/step3_Random Forest_with_parameters.r | no_license | joychentw/House-Price-Prediction | R | false | false | 1,267 | r | # work by Ellie
library(randomForest)
library('tidyverse')
#library(ggplot2)
#library(purrr)
library(stringr)
#install.packages('tibble')
#library(tibble)
library(forcats)
library(caret)
library(Metrics)
#### reading shortlisted attributes
train_raw <- read.csv("train_bck_Selected.csv")
test_raw <- read.csv("test_bck_Selected.csv")
data <- rbind(train_raw,test_raw)
set.seed(1234)
test_num <- sample(1:nrow(data), floor(nrow(data)/2))
train<- data[-test_num,]
test<- data[test_num,]
rf_model <- randomForest(SalePrice ~ . ,
data = train,
keep.forest=TRUE,
importance=TRUE,
mtry = 6,
ntree = 9)
rf_model
varImp(rf_model)
importance(rf_model)
varImpPlot(rf_model)
y_hat <- predict(rf_model, train)
TRAIN.rf_scored <- as_tibble(cbind(train, y_hat))
#glimpse(TRAIN.rf_scored)
#RMSE is 12410.51 for training
library(Metrics)
rmse(train$SalePrice, y_hat)
#on test data
library(randomForest)
y_hat1 <- predict(rf_model, test)
test.rf_scored <- as_tibble(cbind(test, y_hat1))
#glimpse(test.rf_scored)
#rmse is 38866.15
rmse(train$SalePrice, y_hat)
rmse(test$SalePrice, y_hat1)
rmse(test$SalePrice, y_hat1) -rmse(train$SalePrice, y_hat) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/signs.R
\name{add_graph_f4_sign}
\alias{add_graph_f4_sign}
\title{Extend a data frame with f_4 statistics predicted by a graph.}
\usage{
add_graph_f4_sign(data, graph)
}
\arguments{
\item{data}{The data frame to get the labels to compute the \eqn{f_4}
statistics from.}
\item{graph}{The admixture graph.}
}
\value{
A data frame identical to \code{data} except with an additional
column, \code{graph_f4_sign}, containing the sign of the \eqn{f_4}
statistics as determined by the graph.
}
\description{
Extracts the sign for the \eqn{f_4} statistics predicted by the graph for all
rows in a data frame and extends the data frame with the graph \eqn{f_4}.
}
\details{
The data frame, \code{data}, must contain columns \code{W}, \code{X},
\code{Y}, and \code{Z}. The function then computes the sign of the
\eqn{f4(W, X; Y, Z)} statistics for all rows and adds these as a column,
\code{graph_f4_sign}, to the data frame.
}
| /man/add_graph_f4_sign.Rd | no_license | guzhongru/admixture_graph | R | false | true | 1,019 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/signs.R
\name{add_graph_f4_sign}
\alias{add_graph_f4_sign}
\title{Extend a data frame with f_4 statistics predicted by a graph.}
\usage{
add_graph_f4_sign(data, graph)
}
\arguments{
\item{data}{The data frame to get the labels to compute the \eqn{f_4}
statistics from.}
\item{graph}{The admixture graph.}
}
\value{
A data frame identical to \code{data} except with an additional
column, \code{graph_f4_sign}, containing the sign of the \eqn{f_4}
statistics as determined by the graph.
}
\description{
Extracts the sign for the \eqn{f_4} statistics predicted by the graph for all
rows in a data frame and extends the data frame with the graph \eqn{f_4}.
}
\details{
The data frame, \code{data}, must contain columns \code{W}, \code{X},
\code{Y}, and \code{Z}. The function then computes the sign of the
\eqn{f4(W, X; Y, Z)} statistics for all rows and adds these as a column,
\code{graph_f4_sign}, to the data frame.
}
|
#' Element-wise logit function
#'
#' This function will do logit transformation on a probability matrix.
#' The formula is \code{logit(x) = log(x/(1-x))}
#'
#' @param x a matrix contains probabilities
#'
#' @return The logit transformation of a probability matrix
#'
#' @examples
#' \dontrun{logit(matrix(rbeta(3*4,1,1),nrow=3,ncol=4))}
#'
#' @export
logit <- function(x) {
# the domain of logit() should be in (0,1)
stopifnot(all(x > 0) & all(x < 1))
log(x/(1 - x))
}
#' Element-wise inverse logit function
#'
#' This function will do inveser logit transformation on a matrix
#' contains real numbers. The formula is \code{inver_logit(x) = (1+exp(-x))^(-1)}
#'
#' @param x a matrix contains real numbers
#'
#' @return a probability matrix
#'
#' @examples
#' \dontrun{inver_logit(matrix(rnorm(3*4),nrow=3,ncol=4))}
#'
#' @export
inver_logit <- function(x) {
1/(1 + exp(-x))
}
#' Index generating function
#'
#' This function will generate a series of indexes used to index out
#' the variables of the ith data set when we only know the number of
#' variables
#'
#' @param i index for the ith data set.
#' @param ds a vector contains the number of variables for all
#' the data sets.
#'
#' @return A series of indexes for the variables in the ith data set
#'
#' @examples
#' \dontrun{index_Xi(2, c(400,200,100))}
index_Xi <- function(i, ds) {
if (i == 1) {
columns_Xi <- 1:ds[1]
} else {
columns_Xi <- (sum(ds[1:(i - 1)]) + 1):sum(ds[1:i])
}
columns_Xi
}
#' Compute the variation expalined raitios when Gaussian data is used
#'
#' This function computes the variation expalined ratios for component
#' model on quantitative data sets. Details can be found
#' in the paper \url{https://arxiv.org/abs/1902.06241}.
#'
#' @param X a \eqn{n*d} quantitative data set
#' @param mu a \eqn{n*1} column offset term
#' @param A a \eqn{n*R} score matrix
#' @param B a \eqn{d*R} loading matrix
#' @param Q a \eqn{n*d} weighting matrix of the same size of X
#'
#' @return This function returns a list contains the variaiton expalined
#' ratios of the whole model (varExp_total) or of each component (varExp_PCs).
#'
#' @examples
#' \dontrun{ out <- varExp_Gaussian(X,mu,A,B,Q)
#' out$varExp_total
#' out$varExp_PCs
#' }
varExp_Gaussian <- function(X, mu, A, B, Q) {
# parameter used
m = dim(X)[1]
# compute the loglikelihood of mle and null model
X_centered <- X - ones(m) %*% mu
QX <- Q * X_centered
# likelihood of the null model
l_null <- norm(QX, "F")^2 # null model
# likelihood of the full model
E_hat <- X_centered - tcrossprod(A, B)
QE_hat <- Q * E_hat
l_model <- norm(QE_hat, "F")^2 # full model
# compute the least squares of an individual PC
R <- dim(B)[2]
l_PCs <- rep(0, R)
for (r in 1:R) {
Ar <- A[, r]
Br <- B[, r]
QPCr <- Q * (tcrossprod(Ar, Br))
l_PCs[r] <- l_null - 2 * (crossprod((QX %*% Br), Ar)) + crossprod(Ar, (QPCr %*% Br))
}
# compute variation explained by each PC
varExp_PCs <- (1 - l_PCs/l_null) * 100
# total variation explained
varExp_total <- (1 - l_model/l_null) * 100
# return the results
out <- list()
out$varExp_total <- varExp_total
out$varExp_PCs <- varExp_PCs
return(out)
}
#' ones function
#'
#' This function will generate a column of ones
#'
#' @param n a integer number indicates the dimension of the vector
#'
#' @return a n dimensional column vector contains ones
#'
#' @examples
#' \dontrun{ones(10)}
#'
#' @export
ones <- function(n) {
stopifnot(n > 0)
as.matrix(rep(1, n))
}
#' Generate log-spaced sequence
#'
#' This function will generate a log-spaced sequence.
#' The inputs are the same as function \code{seq} in base library.
#'
#' @param from The initial value of the sequence
#' @param to The last value of the sequence
#' @param length.out desired length of the sequence
#'
#' @return A \code{length.out} dimensional squence
#'
#' @examples
#' \dontrun{log10_seq(from=1, to=500, length.out=30)}
#'
#' @export
log10_seq <- function(from, to, length.out) {
# to must larger than from and from must be larger than 0
stopifnot((to > from) & (from > 0))
10^(seq(log10(from), log10(to), length.out = length.out))
}
#' Logistic loss function
#'
#' This function will compute the logistic loss when a
#' binary data set and its natural parameter matrix is avaliable.
#'
#' @param X a binary matrix
#' @param Theta a natual parameter (log-odds) matrix
#'
#' @return The logistic loss
#'
#' @examples
#' \dontrun{
#' Theta <- matrix(rnorm(3*4),3,4)
#' X <- matrix(data=0,3,4)
#' X[Theta>0] <- 1
#' obj_logistic(X,Theta)
#' }
obj_logistic <- function(X, Theta) {
# X: binary data matrix Theta: offset + Z, the log-odds Theta = ones(m,1)*mu + Z; when x=1, the loss
# is -log(1/(1+exp(-theta))). when x=0, the loss is -log(1/(1+exp(theta)). The following is the
# matrix form
# X must be a binary matrix and contains 1 and 0
stopifnot(all(unique(as.vector(X)) == c(1, 0)))
# logistic loss
X <- 2 * X - 1
tmp <- 1/(1 + exp(-X * Theta))
out <- -sum(sum(log(tmp)))
out
}
#' Evluating pESCA model when simulated parameters are avaliable
#'
#' This function will evaluate the the performance of the constructed
#' pESCA model with group penalty when the simulated parameters are
#' avaliable.
#'
#' @param mu estimated offset term in column vector form
#' @param A estimated score matrix
#' @param B estimated loading matrix
#' @param S estimated group sparse pattern on \code{B}
#' @param ds a vector contains the number of variables in multiple data sets
#' @param simulatedData the output of function \code{dataSimu_group_sparse}
#'
#' @return This function returns a list contains \itemize{
#' \item RVs_structs: a vector contains the RV coefficients in estimating
#' the global common (C123), local common (C12, C13, C23) and distinct structures
#' (D1, D2, D3);
#' \item Ranks_structs: a vector contains the ranks of estimated
#' C123, C12, C13, C23, D1, D2, D3;
#' \item RMSEs_params: the relative mean squared errors (RMSEs) in estimating
#' the simulated parameters \eqn{\Theta, \Theta_1, \Theta_2, \Theta_3, \mu}
#' }
#'
#' @examples
#' \dontrun{eval_metrics_simu_group(mu,A,B,S,ds,simulatedData)}
#'
#' @export
eval_metrics_simu_group <- function(mu, A, B, S, ds, simulatedData) {
Theta_simu <- simulatedData$Theta_simu
mu_simu <- simulatedData$mu_simu
U_simu <- simulatedData$U_simu
D_simu <- simulatedData$D_simu
V_simu <- simulatedData$V_simu
n <- dim(U_simu)[1]
# simulated parameters Theta1 Theta2 Theta3
Theta1_simu <- Theta_simu[, index_Xi(1, ds)]
Theta2_simu <- Theta_simu[, index_Xi(2, ds)]
Theta3_simu <- Theta_simu[, index_Xi(3, ds)]
# C123
i <- 1
index_factors <- (3 * (i - 1) + 1):(3 * i)
C123_simu <- U_simu[, index_factors] %*% diag(D_simu[index_factors]) %*% t(V_simu[, index_factors])
# C12
i <- 2
index_factors <- (3 * (i - 1) + 1):(3 * i)
C12_simu <- U_simu[, index_factors] %*% diag(D_simu[index_factors]) %*% t(V_simu[, index_factors])
C12_simu <- C12_simu[, c(index_Xi(1, ds), index_Xi(2, ds))]
# C13
i <- 3
index_factors <- (3 * (i - 1) + 1):(3 * i)
C13_simu <- U_simu[, index_factors] %*% diag(D_simu[index_factors]) %*% t(V_simu[, index_factors])
C13_simu <- C13_simu[, c(index_Xi(1, ds), index_Xi(3, ds))]
# C23
i <- 4
index_factors <- (3 * (i - 1) + 1):(3 * i)
C23_simu <- U_simu[, index_factors] %*% diag(D_simu[index_factors]) %*% t(V_simu[, index_factors])
C23_simu <- C23_simu[, c(index_Xi(2, ds), index_Xi(3, ds))]
# D1
i <- 5
index_factors <- (3 * (i - 1) + 1):(3 * i)
D1_simu <- U_simu[, index_factors] %*% diag(D_simu[index_factors]) %*% t(V_simu[, index_factors])
D1_simu <- D1_simu[, index_Xi(1, ds)]
# D2
i <- 6
index_factors <- (3 * (i - 1) + 1):(3 * i)
D2_simu <- U_simu[, index_factors] %*% diag(D_simu[index_factors]) %*% t(V_simu[, index_factors])
D2_simu <- D2_simu[, index_Xi(2, ds)]
# D3
i <- 7
index_factors <- (3 * (i - 1) + 1):(3 * i)
D3_simu <- U_simu[, index_factors] %*% diag(D_simu[index_factors]) %*% t(V_simu[, index_factors])
D3_simu <- D3_simu[, index_Xi(3, ds)]
# model evulation using simulated parameters
Theta_Hat <- ones(n) %*% t(mu) + A %*% t(B)
Theta1_Hat <- Theta_Hat[, index_Xi(1, ds)]
Theta2_Hat <- Theta_Hat[, index_Xi(2, ds)]
Theta3_Hat <- Theta_Hat[, index_Xi(3, ds)]
C123_index <- (colSums(S) == 3)
C123_Hat <- A[, C123_index] %*% t(B[, C123_index])
C12_index <- (colSums(S[1:2, ]) == 2) & (!C123_index)
C12_Hat <- A[, C12_index] %*% t(B[, C12_index])
C12_Hat <- C12_Hat[, c(index_Xi(1, ds), index_Xi(2, ds))]
C13_index <- (colSums(S[c(1, 3), ]) == 2) & (!C123_index)
C13_Hat <- A[, C13_index] %*% t(B[, C13_index])
C13_Hat <- C13_Hat[, c(index_Xi(1, ds), index_Xi(3, ds))]
C23_index <- (colSums(S[c(2, 3), ]) == 2) & (!C123_index)
C23_Hat <- A[, C23_index] %*% t(B[, C23_index])
C23_Hat <- C23_Hat[, c(index_Xi(2, ds), index_Xi(3, ds))]
D_index <- (colSums(S) == 1)
D1_index <- D_index & (S[1, ] == 1)
D2_index <- D_index & (S[2, ] == 1)
D3_index <- D_index & (S[3, ] == 1)
D1_Hat <- A[, D1_index] %*% t(B[, D1_index])
D1_Hat <- D1_Hat[, index_Xi(1, ds)]
D2_Hat <- A[, D2_index] %*% t(B[, D2_index])
D2_Hat <- D2_Hat[, index_Xi(2, ds)]
D3_Hat <- A[, D3_index] %*% t(B[, D3_index])
D3_Hat <- D3_Hat[, index_Xi(3, ds)]
# RV coefficients of estimated structures
RV_C123 <- RV_modified(C123_simu, C123_Hat)
RV_C12 <- RV_modified(C12_simu, C12_Hat)
RV_C13 <- RV_modified(C13_simu, C13_Hat)
RV_C23 <- RV_modified(C23_simu, C23_Hat)
RV_D1 <- RV_modified(D1_Hat, D1_simu)
RV_D2 <- RV_modified(D2_Hat, D2_simu)
RV_D3 <- RV_modified(D3_Hat, D3_simu)
RVs_structures <- c(RV_C123, RV_C12, RV_C13, RV_C23, RV_D1, RV_D2, RV_D3)
names(RVs_structures) <- c("C123", "C12", "C13", "C23", "D1", "D2", "D3")
# ranks of estimated structures
Ranks_structures <- c(sum(C123_index), sum(C12_index), sum(C13_index), sum(C23_index), sum(D1_index),
sum(D2_index), sum(D3_index))
names(Ranks_structures) <- c("C123", "C12", "C13", "C23", "D1", "D2", "D3")
# RV coefficients of estimated Theta
RMSE_Theta1 <- norm(Theta1_Hat - Theta1_simu, "F")^2/norm(Theta1_simu, "F")^2
RMSE_Theta2 <- norm(Theta2_Hat - Theta2_simu, "F")^2/norm(Theta2_simu, "F")^2
RMSE_Theta3 <- norm(Theta3_Hat - Theta3_simu, "F")^2/norm(Theta3_simu, "F")^2
RMSE_Theta <- norm(Theta_Hat - Theta_simu, "F")^2/norm(Theta_simu, "F")^2
RMSE_mu <- norm(mu - mu_simu, "F")^2/norm(mu_simu, "F")^2
RMSEs_parameters <- c(RMSE_Theta, RMSE_Theta1, RMSE_Theta2, RMSE_Theta3, RMSE_mu)
names(RMSEs_parameters) <- c("Theta", "Theta_1", "Theta_2", "Theta_3", "mu")
output <- list()
output$RVs_structs <- RVs_structures
output$Ranks_structs <- Ranks_structures
output$RMSEs_params <- RMSEs_parameters
return(output)
}
#' Modified RV coefficient of two matrices
#'
#' This function will compute the modified RV coefficient of two
#' matrices. The details of the modified RV coefficient can be found
#' in the paper \url{https://academic.oup.com/bioinformatics/article/25/3/401/244239}.
#'
#' @param X a matrix
#' @param Y another matrix
#'
#' @return This function returns the modified RV coefficient between
#' two matrices
#'
#' @examples
#' \dontrun{RV_modified(X,Y)}
RV_modified <- function(X, Y) {
# RV modifed coefficient by bda group
AA <- X %*% t(X)
BB <- Y %*% t(Y)
AA0 <- AA - diag(diag(AA))
BB0 <- BB - diag(diag(BB))
RV <- sum(diag(AA0 %*% BB0))/norm(AA0, "F")/norm(BB0, "F")
return(RV)
}
#' Split multiple data sets into training and test sets
#'
#' This function will split multiple data sets into training and test
#' sets. Nonmissing elements are randomly selected as the test sets.
#' Then the selected elements are taken as missing, and regarded as
#' training sets. The details can be found in \url{https://arxiv.org/abs/1902.06241}.
#'
#' @inheritParams pESCA_CV
#' @param ratio_mis how many percent of test set could be? default: 0.1
#'
#' @return This function returns a list contains \itemize{
#' \item trainSets: a list contains the training sets;
#' \item testSets: a list contains the test sets;
#' \item indexSets: a list contains the index sets.
#' }
#'
#' @examples
#' \dontrun{dataSplit(dataSets,dataTypes,ratio_mis=0.1)}
dataSplit <- function(dataSets, dataTypes, ratio_mis = 0.1) {
# number of data sets, size of each data set
nDataSets <- length(dataSets) # number of data sets
n <- rep(0, nDataSets) # number of samples
d <- rep(0, nDataSets) # numbers of variables in different data sets
for (i in 1:nDataSets) {
n[i] <- dim(dataSets[[i]])[1]
d[i] <- dim(dataSets[[i]])[2]
}
n <- n[1]
# split data sets into training set and test set
trainSets <- as.list(1:nDataSets) # training set
testSets <- as.list(1:nDataSets) # test set
indexSets <- as.list(1:nDataSets) # index of the test set
for (i in 1:nDataSets) {
# index out the i-th data set
Xi <- dataSets[[i]]
dataType_Xi <- dataTypes[i]
# generate the index of the test set
full_ind_vec <- 1:(n * d[i])
# if it is binary data, using hierachical sampling
if (dataType_Xi == "B") {
ones_ind_vec <- full_ind_vec[Xi == 1]
zeros_ind_vec <- full_ind_vec[Xi == 0]
index_Xi_ones <- sample(ones_ind_vec, round(ratio_mis * length(ones_ind_vec)))
index_Xi_zeros <- sample(zeros_ind_vec, round(ratio_mis * length(zeros_ind_vec)))
# test the sampled samples
if (!(all(Xi[index_Xi_ones] == 1)) | !(all(Xi[index_Xi_zeros] == 0)))
message("the hierachical sampling does not work")
index_Xi_test <- c(index_Xi_ones, index_Xi_zeros)
} else {
non_NaN_mat <- 1 - is.na(Xi)
non_NaN_ind_vec <- full_ind_vec[non_NaN_mat > 0]
index_Xi_test <- sample(non_NaN_ind_vec, round(ratio_mis * length(non_NaN_ind_vec)))
}
# generate the train set
Xi_train <- Xi
Xi_train[index_Xi_test] <- NA
trainSets[[i]] <- Xi_train
# generate the test set
Xi_test <- Xi[index_Xi_test]
testSets[[i]] <- Xi_test
indexSets[[i]] <- index_Xi_test
}
# return
result <- list()
result$trainSets <- trainSets
result$testSets <- testSets
result$indexSets <- indexSets
return(result)
}
#' Compute CV errors
#'
#' This function will compute CV errors for a specific model
#'
#' @param splitedData output of function \code{dataSplit}
#' @param dataTypes the data types for each data set
#' @param alphas dispersion parameters for each data set
#' @param ThetaHat estimated Theta
#' @param d a numeric vector contains the number of variables of data sets
#'
#' @return This function returns a vector contains CV errors
#'
#' @examples
#' \dontrun{cvError_comput(splitedData,dataTypes,alphas,ThetaHat,d)}
cvError_comput <- function(splitedData, dataTypes, alphas, ThetaHat, d) {
nDataSets <- length(d)
testSets <- splitedData$testSets
indexSets <- splitedData$indexSets
testError_vec <- rep(0, nDataSets)
for (i in 1:nDataSets) {
# index out ThetaHat_Xi
columns_Xi <- index_Xi(i, d)
ThetaHat_Xi <- ThetaHat[, columns_Xi]
# compute the CV error
index_Xi_test <- indexSets[[i]]
Xi_test <- testSets[[i]]
dataType_Xi <- dataTypes[i]
if (dataType_Xi == "G") {
testError_Xi <- (1/alphas[i]) * 0.5 * norm(Xi_test - ThetaHat_Xi[index_Xi_test], "2")^2
} else if (dataType_Xi == "B") {
testError_Xi <- (1/alphas[i]) * obj_logistic(Xi_test, ThetaHat_Xi[index_Xi_test])
}
testError_vec[i] <- testError_Xi
}
# return
return(testError_vec)
}
#' A function to compute the trace of two matrices product
#'
#' This function will compute the trace of two matrices
#'
#' @param X a numerical matrix
#' @param Y a numerical matrix with the same size as \code{X}
#'
#' @return This function returns a scalar contains the trace
#'
#' @examples
#' \dontrun{trace_fast(X, Y)}
trace_fast <- function(X, Y) {
# fast trace function if n>p, trace(X,Y) = trace(X'Y); if n<p, trace(X,Y) = trace(YX');
stopifnot(all(dim(X) == dim(Y)))
result <- fast_traceC(X, Y)
return(result)
}
| /R/supplementary_functions.R | no_license | YipengUva/RpESCA | R | false | false | 16,904 | r | #' Element-wise logit function
#'
#' This function will do logit transformation on a probability matrix.
#' The formula is \code{logit(x) = log(x/(1-x))}
#'
#' @param x a matrix contains probabilities
#'
#' @return The logit transformation of a probability matrix
#'
#' @examples
#' \dontrun{logit(matrix(rbeta(3*4,1,1),nrow=3,ncol=4))}
#'
#' @export
logit <- function(x) {
# the domain of logit() should be in (0,1)
stopifnot(all(x > 0) & all(x < 1))
log(x/(1 - x))
}
#' Element-wise inverse logit function
#'
#' This function will do inveser logit transformation on a matrix
#' contains real numbers. The formula is \code{inver_logit(x) = (1+exp(-x))^(-1)}
#'
#' @param x a matrix contains real numbers
#'
#' @return a probability matrix
#'
#' @examples
#' \dontrun{inver_logit(matrix(rnorm(3*4),nrow=3,ncol=4))}
#'
#' @export
inver_logit <- function(x) {
1/(1 + exp(-x))
}
#' Index generating function
#'
#' This function will generate a series of indexes used to index out
#' the variables of the ith data set when we only know the number of
#' variables
#'
#' @param i index for the ith data set.
#' @param ds a vector contains the number of variables for all
#' the data sets.
#'
#' @return A series of indexes for the variables in the ith data set
#'
#' @examples
#' \dontrun{index_Xi(2, c(400,200,100))}
index_Xi <- function(i, ds) {
if (i == 1) {
columns_Xi <- 1:ds[1]
} else {
columns_Xi <- (sum(ds[1:(i - 1)]) + 1):sum(ds[1:i])
}
columns_Xi
}
#' Compute the variation expalined raitios when Gaussian data is used
#'
#' This function computes the variation expalined ratios for component
#' model on quantitative data sets. Details can be found
#' in the paper \url{https://arxiv.org/abs/1902.06241}.
#'
#' @param X a \eqn{n*d} quantitative data set
#' @param mu a \eqn{n*1} column offset term
#' @param A a \eqn{n*R} score matrix
#' @param B a \eqn{d*R} loading matrix
#' @param Q a \eqn{n*d} weighting matrix of the same size of X
#'
#' @return This function returns a list contains the variaiton expalined
#' ratios of the whole model (varExp_total) or of each component (varExp_PCs).
#'
#' @examples
#' \dontrun{ out <- varExp_Gaussian(X,mu,A,B,Q)
#' out$varExp_total
#' out$varExp_PCs
#' }
varExp_Gaussian <- function(X, mu, A, B, Q) {
# parameter used
m = dim(X)[1]
# compute the loglikelihood of mle and null model
X_centered <- X - ones(m) %*% mu
QX <- Q * X_centered
# likelihood of the null model
l_null <- norm(QX, "F")^2 # null model
# likelihood of the full model
E_hat <- X_centered - tcrossprod(A, B)
QE_hat <- Q * E_hat
l_model <- norm(QE_hat, "F")^2 # full model
# compute the least squares of an individual PC
R <- dim(B)[2]
l_PCs <- rep(0, R)
for (r in 1:R) {
Ar <- A[, r]
Br <- B[, r]
QPCr <- Q * (tcrossprod(Ar, Br))
l_PCs[r] <- l_null - 2 * (crossprod((QX %*% Br), Ar)) + crossprod(Ar, (QPCr %*% Br))
}
# compute variation explained by each PC
varExp_PCs <- (1 - l_PCs/l_null) * 100
# total variation explained
varExp_total <- (1 - l_model/l_null) * 100
# return the results
out <- list()
out$varExp_total <- varExp_total
out$varExp_PCs <- varExp_PCs
return(out)
}
#' ones function
#'
#' This function will generate a column of ones
#'
#' @param n a integer number indicates the dimension of the vector
#'
#' @return a n dimensional column vector contains ones
#'
#' @examples
#' \dontrun{ones(10)}
#'
#' @export
ones <- function(n) {
stopifnot(n > 0)
as.matrix(rep(1, n))
}
#' Generate log-spaced sequence
#'
#' This function will generate a log-spaced sequence.
#' The inputs are the same as function \code{seq} in base library.
#'
#' @param from The initial value of the sequence
#' @param to The last value of the sequence
#' @param length.out desired length of the sequence
#'
#' @return A \code{length.out} dimensional squence
#'
#' @examples
#' \dontrun{log10_seq(from=1, to=500, length.out=30)}
#'
#' @export
log10_seq <- function(from, to, length.out) {
# to must larger than from and from must be larger than 0
stopifnot((to > from) & (from > 0))
10^(seq(log10(from), log10(to), length.out = length.out))
}
#' Logistic loss function
#'
#' This function will compute the logistic loss when a
#' binary data set and its natural parameter matrix is avaliable.
#'
#' @param X a binary matrix
#' @param Theta a natual parameter (log-odds) matrix
#'
#' @return The logistic loss
#'
#' @examples
#' \dontrun{
#' Theta <- matrix(rnorm(3*4),3,4)
#' X <- matrix(data=0,3,4)
#' X[Theta>0] <- 1
#' obj_logistic(X,Theta)
#' }
obj_logistic <- function(X, Theta) {
# X: binary data matrix Theta: offset + Z, the log-odds Theta = ones(m,1)*mu + Z; when x=1, the loss
# is -log(1/(1+exp(-theta))). when x=0, the loss is -log(1/(1+exp(theta)). The following is the
# matrix form
# X must be a binary matrix and contains 1 and 0
stopifnot(all(unique(as.vector(X)) == c(1, 0)))
# logistic loss
X <- 2 * X - 1
tmp <- 1/(1 + exp(-X * Theta))
out <- -sum(sum(log(tmp)))
out
}
#' Evluating pESCA model when simulated parameters are avaliable
#'
#' This function will evaluate the the performance of the constructed
#' pESCA model with group penalty when the simulated parameters are
#' avaliable.
#'
#' @param mu estimated offset term in column vector form
#' @param A estimated score matrix
#' @param B estimated loading matrix
#' @param S estimated group sparse pattern on \code{B}
#' @param ds a vector contains the number of variables in multiple data sets
#' @param simulatedData the output of function \code{dataSimu_group_sparse}
#'
#' @return This function returns a list contains \itemize{
#' \item RVs_structs: a vector contains the RV coefficients in estimating
#' the global common (C123), local common (C12, C13, C23) and distinct structures
#' (D1, D2, D3);
#' \item Ranks_structs: a vector contains the ranks of estimated
#' C123, C12, C13, C23, D1, D2, D3;
#' \item RMSEs_params: the relative mean squared errors (RMSEs) in estimating
#' the simulated parameters \eqn{\Theta, \Theta_1, \Theta_2, \Theta_3, \mu}
#' }
#'
#' @examples
#' \dontrun{eval_metrics_simu_group(mu,A,B,S,ds,simulatedData)}
#'
#' @export
eval_metrics_simu_group <- function(mu, A, B, S, ds, simulatedData) {
Theta_simu <- simulatedData$Theta_simu
mu_simu <- simulatedData$mu_simu
U_simu <- simulatedData$U_simu
D_simu <- simulatedData$D_simu
V_simu <- simulatedData$V_simu
n <- dim(U_simu)[1]
# simulated parameters Theta1 Theta2 Theta3
Theta1_simu <- Theta_simu[, index_Xi(1, ds)]
Theta2_simu <- Theta_simu[, index_Xi(2, ds)]
Theta3_simu <- Theta_simu[, index_Xi(3, ds)]
# C123
i <- 1
index_factors <- (3 * (i - 1) + 1):(3 * i)
C123_simu <- U_simu[, index_factors] %*% diag(D_simu[index_factors]) %*% t(V_simu[, index_factors])
# C12
i <- 2
index_factors <- (3 * (i - 1) + 1):(3 * i)
C12_simu <- U_simu[, index_factors] %*% diag(D_simu[index_factors]) %*% t(V_simu[, index_factors])
C12_simu <- C12_simu[, c(index_Xi(1, ds), index_Xi(2, ds))]
# C13
i <- 3
index_factors <- (3 * (i - 1) + 1):(3 * i)
C13_simu <- U_simu[, index_factors] %*% diag(D_simu[index_factors]) %*% t(V_simu[, index_factors])
C13_simu <- C13_simu[, c(index_Xi(1, ds), index_Xi(3, ds))]
# C23
i <- 4
index_factors <- (3 * (i - 1) + 1):(3 * i)
C23_simu <- U_simu[, index_factors] %*% diag(D_simu[index_factors]) %*% t(V_simu[, index_factors])
C23_simu <- C23_simu[, c(index_Xi(2, ds), index_Xi(3, ds))]
# D1
i <- 5
index_factors <- (3 * (i - 1) + 1):(3 * i)
D1_simu <- U_simu[, index_factors] %*% diag(D_simu[index_factors]) %*% t(V_simu[, index_factors])
D1_simu <- D1_simu[, index_Xi(1, ds)]
# D2
i <- 6
index_factors <- (3 * (i - 1) + 1):(3 * i)
D2_simu <- U_simu[, index_factors] %*% diag(D_simu[index_factors]) %*% t(V_simu[, index_factors])
D2_simu <- D2_simu[, index_Xi(2, ds)]
# D3
i <- 7
index_factors <- (3 * (i - 1) + 1):(3 * i)
D3_simu <- U_simu[, index_factors] %*% diag(D_simu[index_factors]) %*% t(V_simu[, index_factors])
D3_simu <- D3_simu[, index_Xi(3, ds)]
# model evulation using simulated parameters
Theta_Hat <- ones(n) %*% t(mu) + A %*% t(B)
Theta1_Hat <- Theta_Hat[, index_Xi(1, ds)]
Theta2_Hat <- Theta_Hat[, index_Xi(2, ds)]
Theta3_Hat <- Theta_Hat[, index_Xi(3, ds)]
C123_index <- (colSums(S) == 3)
C123_Hat <- A[, C123_index] %*% t(B[, C123_index])
C12_index <- (colSums(S[1:2, ]) == 2) & (!C123_index)
C12_Hat <- A[, C12_index] %*% t(B[, C12_index])
C12_Hat <- C12_Hat[, c(index_Xi(1, ds), index_Xi(2, ds))]
C13_index <- (colSums(S[c(1, 3), ]) == 2) & (!C123_index)
C13_Hat <- A[, C13_index] %*% t(B[, C13_index])
C13_Hat <- C13_Hat[, c(index_Xi(1, ds), index_Xi(3, ds))]
C23_index <- (colSums(S[c(2, 3), ]) == 2) & (!C123_index)
C23_Hat <- A[, C23_index] %*% t(B[, C23_index])
C23_Hat <- C23_Hat[, c(index_Xi(2, ds), index_Xi(3, ds))]
D_index <- (colSums(S) == 1)
D1_index <- D_index & (S[1, ] == 1)
D2_index <- D_index & (S[2, ] == 1)
D3_index <- D_index & (S[3, ] == 1)
D1_Hat <- A[, D1_index] %*% t(B[, D1_index])
D1_Hat <- D1_Hat[, index_Xi(1, ds)]
D2_Hat <- A[, D2_index] %*% t(B[, D2_index])
D2_Hat <- D2_Hat[, index_Xi(2, ds)]
D3_Hat <- A[, D3_index] %*% t(B[, D3_index])
D3_Hat <- D3_Hat[, index_Xi(3, ds)]
# RV coefficients of estimated structures
RV_C123 <- RV_modified(C123_simu, C123_Hat)
RV_C12 <- RV_modified(C12_simu, C12_Hat)
RV_C13 <- RV_modified(C13_simu, C13_Hat)
RV_C23 <- RV_modified(C23_simu, C23_Hat)
RV_D1 <- RV_modified(D1_Hat, D1_simu)
RV_D2 <- RV_modified(D2_Hat, D2_simu)
RV_D3 <- RV_modified(D3_Hat, D3_simu)
RVs_structures <- c(RV_C123, RV_C12, RV_C13, RV_C23, RV_D1, RV_D2, RV_D3)
names(RVs_structures) <- c("C123", "C12", "C13", "C23", "D1", "D2", "D3")
# ranks of estimated structures
Ranks_structures <- c(sum(C123_index), sum(C12_index), sum(C13_index), sum(C23_index), sum(D1_index),
sum(D2_index), sum(D3_index))
names(Ranks_structures) <- c("C123", "C12", "C13", "C23", "D1", "D2", "D3")
# RV coefficients of estimated Theta
RMSE_Theta1 <- norm(Theta1_Hat - Theta1_simu, "F")^2/norm(Theta1_simu, "F")^2
RMSE_Theta2 <- norm(Theta2_Hat - Theta2_simu, "F")^2/norm(Theta2_simu, "F")^2
RMSE_Theta3 <- norm(Theta3_Hat - Theta3_simu, "F")^2/norm(Theta3_simu, "F")^2
RMSE_Theta <- norm(Theta_Hat - Theta_simu, "F")^2/norm(Theta_simu, "F")^2
RMSE_mu <- norm(mu - mu_simu, "F")^2/norm(mu_simu, "F")^2
RMSEs_parameters <- c(RMSE_Theta, RMSE_Theta1, RMSE_Theta2, RMSE_Theta3, RMSE_mu)
names(RMSEs_parameters) <- c("Theta", "Theta_1", "Theta_2", "Theta_3", "mu")
output <- list()
output$RVs_structs <- RVs_structures
output$Ranks_structs <- Ranks_structures
output$RMSEs_params <- RMSEs_parameters
return(output)
}
#' Modified RV coefficient of two matrices
#'
#' This function will compute the modified RV coefficient of two
#' matrices. The details of the modified RV coefficient can be found
#' in the paper \url{https://academic.oup.com/bioinformatics/article/25/3/401/244239}.
#'
#' @param X a matrix
#' @param Y another matrix
#'
#' @return This function returns the modified RV coefficient between
#' two matrices
#'
#' @examples
#' \dontrun{RV_modified(X,Y)}
RV_modified <- function(X, Y) {
# RV modifed coefficient by bda group
AA <- X %*% t(X)
BB <- Y %*% t(Y)
AA0 <- AA - diag(diag(AA))
BB0 <- BB - diag(diag(BB))
RV <- sum(diag(AA0 %*% BB0))/norm(AA0, "F")/norm(BB0, "F")
return(RV)
}
#' Split multiple data sets into training and test sets
#'
#' This function will split multiple data sets into training and test
#' sets. Nonmissing elements are randomly selected as the test sets.
#' Then the selected elements are taken as missing, and regarded as
#' training sets. The details can be found in \url{https://arxiv.org/abs/1902.06241}.
#'
#' @inheritParams pESCA_CV
#' @param ratio_mis how many percent of test set could be? default: 0.1
#'
#' @return This function returns a list contains \itemize{
#' \item trainSets: a list contains the training sets;
#' \item testSets: a list contains the test sets;
#' \item indexSets: a list contains the index sets.
#' }
#'
#' @examples
#' \dontrun{dataSplit(dataSets,dataTypes,ratio_mis=0.1)}
dataSplit <- function(dataSets, dataTypes, ratio_mis = 0.1) {
# number of data sets, size of each data set
nDataSets <- length(dataSets) # number of data sets
n <- rep(0, nDataSets) # number of samples
d <- rep(0, nDataSets) # numbers of variables in different data sets
for (i in 1:nDataSets) {
n[i] <- dim(dataSets[[i]])[1]
d[i] <- dim(dataSets[[i]])[2]
}
n <- n[1]
# split data sets into training set and test set
trainSets <- as.list(1:nDataSets) # training set
testSets <- as.list(1:nDataSets) # test set
indexSets <- as.list(1:nDataSets) # index of the test set
for (i in 1:nDataSets) {
# index out the i-th data set
Xi <- dataSets[[i]]
dataType_Xi <- dataTypes[i]
# generate the index of the test set
full_ind_vec <- 1:(n * d[i])
# if it is binary data, using hierachical sampling
if (dataType_Xi == "B") {
ones_ind_vec <- full_ind_vec[Xi == 1]
zeros_ind_vec <- full_ind_vec[Xi == 0]
index_Xi_ones <- sample(ones_ind_vec, round(ratio_mis * length(ones_ind_vec)))
index_Xi_zeros <- sample(zeros_ind_vec, round(ratio_mis * length(zeros_ind_vec)))
# test the sampled samples
if (!(all(Xi[index_Xi_ones] == 1)) | !(all(Xi[index_Xi_zeros] == 0)))
message("the hierachical sampling does not work")
index_Xi_test <- c(index_Xi_ones, index_Xi_zeros)
} else {
non_NaN_mat <- 1 - is.na(Xi)
non_NaN_ind_vec <- full_ind_vec[non_NaN_mat > 0]
index_Xi_test <- sample(non_NaN_ind_vec, round(ratio_mis * length(non_NaN_ind_vec)))
}
# generate the train set
Xi_train <- Xi
Xi_train[index_Xi_test] <- NA
trainSets[[i]] <- Xi_train
# generate the test set
Xi_test <- Xi[index_Xi_test]
testSets[[i]] <- Xi_test
indexSets[[i]] <- index_Xi_test
}
# return
result <- list()
result$trainSets <- trainSets
result$testSets <- testSets
result$indexSets <- indexSets
return(result)
}
#' Compute CV errors
#'
#' This function will compute CV errors for a specific model
#'
#' @param splitedData output of function \code{dataSplit}
#' @param dataTypes the data types for each data set
#' @param alphas dispersion parameters for each data set
#' @param ThetaHat estimated Theta
#' @param d a numeric vector contains the number of variables of data sets
#'
#' @return This function returns a vector contains CV errors
#'
#' @examples
#' \dontrun{cvError_comput(splitedData,dataTypes,alphas,ThetaHat,d)}
cvError_comput <- function(splitedData, dataTypes, alphas, ThetaHat, d) {
nDataSets <- length(d)
testSets <- splitedData$testSets
indexSets <- splitedData$indexSets
testError_vec <- rep(0, nDataSets)
for (i in 1:nDataSets) {
# index out ThetaHat_Xi
columns_Xi <- index_Xi(i, d)
ThetaHat_Xi <- ThetaHat[, columns_Xi]
# compute the CV error
index_Xi_test <- indexSets[[i]]
Xi_test <- testSets[[i]]
dataType_Xi <- dataTypes[i]
if (dataType_Xi == "G") {
testError_Xi <- (1/alphas[i]) * 0.5 * norm(Xi_test - ThetaHat_Xi[index_Xi_test], "2")^2
} else if (dataType_Xi == "B") {
testError_Xi <- (1/alphas[i]) * obj_logistic(Xi_test, ThetaHat_Xi[index_Xi_test])
}
testError_vec[i] <- testError_Xi
}
# return
return(testError_vec)
}
#' A function to compute the trace of two matrices product
#'
#' This function will compute the trace of two matrices
#'
#' @param X a numerical matrix
#' @param Y a numerical matrix with the same size as \code{X}
#'
#' @return This function returns a scalar contains the trace
#'
#' @examples
#' \dontrun{trace_fast(X, Y)}
trace_fast <- function(X, Y) {
# fast trace function if n>p, trace(X,Y) = trace(X'Y); if n<p, trace(X,Y) = trace(YX');
stopifnot(all(dim(X) == dim(Y)))
result <- fast_traceC(X, Y)
return(result)
}
|
library(ape)
testtree <- read.tree("253_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="253_0_unrooted.txt") | /codeml_files/newick_trees_processed/253_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 133 | r | library(ape)
testtree <- read.tree("253_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="253_0_unrooted.txt") |
context("test-list")
fitted_rf <- function(...) tidypredict_fit(parse_model(...))
test_that("Supports parsed models in list objects", {
expect_is(
fitted_rf(lm(mpg~wt, data = mtcars)),
"call"
)
expect_equal(
length(fitted_rf(
randomForest::randomForest(Species~., data = iris)
)),
500
)
})
| /data/genthat_extracted_code/tidypredict/tests/test-list.R | no_license | surayaaramli/typeRrh | R | false | false | 342 | r | context("test-list")
fitted_rf <- function(...) tidypredict_fit(parse_model(...))
test_that("Supports parsed models in list objects", {
expect_is(
fitted_rf(lm(mpg~wt, data = mtcars)),
"call"
)
expect_equal(
length(fitted_rf(
randomForest::randomForest(Species~., data = iris)
)),
500
)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{carriers}
\alias{carriers}
\title{Look up airline names from their carrier codes.}
\format{Data frame with columns
\describe{
\item{carrier}{Two letter abbreviation}
\item{name}{Full name}
}}
\source{
\url{http://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=236}
}
\usage{
carriers
}
\description{
Look up airline names from their carrier codes.
}
\keyword{datasets}
| /man/carriers.Rd | no_license | homerhanumat/airlines | R | false | true | 480 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{carriers}
\alias{carriers}
\title{Look up airline names from their carrier codes.}
\format{Data frame with columns
\describe{
\item{carrier}{Two letter abbreviation}
\item{name}{Full name}
}}
\source{
\url{http://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=236}
}
\usage{
carriers
}
\description{
Look up airline names from their carrier codes.
}
\keyword{datasets}
|
library(metagen)
### Name: rY
### Title: Data generation: Gaussian-Gaussian model
### Aliases: rY
### ** Examples
x_test = cbind(1,1:13)
h_test = .03
d_test = rchisq(13, df=0.02)
b_test = c(0.02, 0.03)
rY(n=10, h=h_test, d=d_test, x=x_test, b=b_test)
| /data/genthat_extracted_code/metagen/examples/rY.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 258 | r | library(metagen)
### Name: rY
### Title: Data generation: Gaussian-Gaussian model
### Aliases: rY
### ** Examples
x_test = cbind(1,1:13)
h_test = .03
d_test = rchisq(13, df=0.02)
b_test = c(0.02, 0.03)
rY(n=10, h=h_test, d=d_test, x=x_test, b=b_test)
|
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(matrix = matrix()) {
## Private variable inverse
inverse <- NULL
## Encapsulation of matrix and inverse values
set <- function(value) {
matrix <<- value
inverse <<- NULL
}
get <- function(){
return(matrix)
}
set_inv <- function(solved){
inverse <<- solved
}
get_inv <- function(){
return(inverse)
}
## Returning of encapsulated methods
list(set = set, get = get,
set_inv = set_inv,
get_inv = get_inv)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(input, ...) {
## get inverse matrix
inverse <- input$get_inv()
## if inverse is not null, return inverse
if(!is.null(inverse)) {
message("cached")
return(inverse)
}
## get matrix and solve inverse by native function of R
matrix <- input$get()
inverse <- solve(matrix)
input$set_inv(inverse)
message("no cached")
return(inverse)
}
## test functions
testMatrix <- function(matrix = cbind(c(3,1),c(2,1))){
m = makeCacheMatrix(matrix)
cacheSolve(m)
cacheSolve(m)
m$get_inv()
}
| /cachematrix.R | no_license | marcelobns/ProgrammingAssignment2 | R | false | false | 1,507 | r | ## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(matrix = matrix()) {
## Private variable inverse
inverse <- NULL
## Encapsulation of matrix and inverse values
set <- function(value) {
matrix <<- value
inverse <<- NULL
}
get <- function(){
return(matrix)
}
set_inv <- function(solved){
inverse <<- solved
}
get_inv <- function(){
return(inverse)
}
## Returning of encapsulated methods
list(set = set, get = get,
set_inv = set_inv,
get_inv = get_inv)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(input, ...) {
## get inverse matrix
inverse <- input$get_inv()
## if inverse is not null, return inverse
if(!is.null(inverse)) {
message("cached")
return(inverse)
}
## get matrix and solve inverse by native function of R
matrix <- input$get()
inverse <- solve(matrix)
input$set_inv(inverse)
message("no cached")
return(inverse)
}
## test functions
testMatrix <- function(matrix = cbind(c(3,1),c(2,1))){
m = makeCacheMatrix(matrix)
cacheSolve(m)
cacheSolve(m)
m$get_inv()
}
|
\name{mcmc.3pno.testlet}
\alias{mcmc.3pno.testlet}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
3PNO Testlet Model
}
\description{
This function estimates the 3PNO testlet model (Wang, Bradlow & Wainer, 2002, 2007)
by Markov Chain Monte Carlo methods (Glas, 2012).
}
\usage{
mcmc.3pno.testlet(dat, testlets = rep(NA, ncol(dat)),
weights = NULL, est.slope = TRUE, est.guess = TRUE, guess.prior = NULL,
testlet.variance.prior = c(1, 0.2), burnin = 500, iter = 1000,
N.sampvalues = 1000, progress.iter = 50, save.theta = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{
Data frame with dichotomous item responses for \eqn{N} persons and \eqn{I} items
}
\item{testlets}{
An integer or character vector which indicates the allocation of items to
testlets. Same entries corresponds to same testlets.
If an entry is \code{NA}, then this item does not belong to any testlet.
}
\item{weights}{
An optional vector with student sample weights
}
\item{est.slope}{
Should item slopes be estimated? The default is \code{TRUE}.
}
\item{est.guess}{
Should guessing parameters be estimated? The default is \code{TRUE}.
}
\item{guess.prior}{
A vector of length two or a matrix with \eqn{I} items and two columns
which defines the beta prior distribution of guessing
parameters. The default is a non-informative prior, i.e. the Beta(1,1)
distribution.
}
\item{testlet.variance.prior}{
A vector of length two which defines the (joint) prior for testlet variances
assuming an inverse chi-squared distribution.
The first entry is the effective sample size of the prior while the second
entry defines the prior variance of the testlet. The default of \code{c(1,.2)}
means that the prior sample size is 1 and the prior testlet variance is .2.
}
\item{burnin}{
Number of burnin iterations
}
\item{iter}{
Number of iterations
}
\item{N.sampvalues}{
Maximum number of sampled values to save
}
\item{progress.iter}{
Display progress every \code{progress.iter}-th iteration. If no progress
display is wanted, then choose \code{progress.iter} larger than \code{iter}.
}
\item{save.theta}{
Should theta values be saved?
}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% DETAILS
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\details{
The testlet response model for person \eqn{p} at item \eqn{i}
is defined as
\deqn{ P(X_{pi} = 1 ) = c_i + ( 1 - c_i )
\Phi ( a_i \theta_p + \gamma_{p,t(i)} + b_i ) \quad , \quad
\theta_p \sim N ( 0 ,1 ) , \gamma_{p,t(i)} \sim N( 0 , \sigma^2_t ) }
In case of \code{est.slope=FALSE}, all item slopes \eqn{a_i} are set to 1. Then
a variance \eqn{\sigma^2} of the \eqn{\theta_p} distribution is estimated
which is called the Rasch testlet model in the literature (Wang & Wilson, 2005).
In case of \code{est.guess=FALSE}, all guessing parameters \eqn{c_i} are
set to 0.
After fitting the testlet model, marginal item parameters are calculated (integrating
out testlet effects \eqn{\gamma_{p,t(i)}}) according the defining response equation
\deqn{ P(X_{pi} = 1 ) = c_i + ( 1 - c_i )
\Phi ( a_i^\ast \theta_p + b_i^\ast ) }
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% VALUES
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\value{
A list of class \code{mcmc.sirt} with following entries:
\item{mcmcobj}{Object of class \code{mcmc.list} containing item parameters
(\code{b_marg} and \code{a_marg} denote marginal item parameters)
and person parameters (if requested)}
\item{summary.mcmcobj}{Summary of the \code{mcmcobj} object. In this
summary the Rhat statistic and the mode estimate MAP is included.
The variable \code{PercSEratio} indicates the proportion of the Monte Carlo
standard error in relation to the total standard deviation of the
posterior distribution.}
\item{ic}{Information criteria (DIC)}
\item{burnin}{Number of burnin iterations}
\item{iter}{Total number of iterations}
\item{theta.chain}{Sampled values of \eqn{\theta_p} parameters}
\item{deviance.chain}{Sampled values of deviance values}
\item{EAP.rel}{EAP reliability}
\item{person}{Data frame with EAP person parameter estimates for
\eqn{\theta_p} and their corresponding posterior standard
deviations and for all testlet effects}
\item{dat}{Used data frame}
\item{weights}{Used student weights}
\item{\dots}{Further values}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% REFERENCES
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\references{
Glas, C. A. W. (2012). \emph{Estimating and testing the extended testlet model.}
LSAC Research Report Series, RR 12-03.
Wainer, H., Bradlow, E. T., & Wang, X. (2007).
\emph{Testlet response theory and its applications}.
Cambridge: Cambridge University Press.
Wang, W.-C., & Wilson, M. (2005). The Rasch testlet model.
\emph{Applied Psychological Measurement}, \bold{29}, 126-149.
Wang, X., Bradlow, E. T., & Wainer, H. (2002). A general Bayesian model
for testlets: Theory and applications.
\emph{Applied Psychological Measurement}, \bold{26}, 109-128.
}
\author{
Alexander Robitzsch
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
S3 methods: \code{\link{summary.mcmc.sirt}}, \code{\link{plot.mcmc.sirt}}
}
%For estimating testlet models using the \pkg{lme4} package see
%\code{\link{rasch.testlet.glmer}}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% EXAMPLES
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\examples{
\dontrun{
#############################################################################
# EXAMPLE 1: Dataset Reading
#############################################################################
data(data.read)
dat <- data.read
I <- ncol(dat)
# set burnin and total number of iterations here (CHANGE THIS!)
burnin <- 200
iter <- 500
#***
# Model 1: 1PNO model
mod1 <- mcmc.3pno.testlet( dat , est.slope=FALSE , est.guess=FALSE ,
burnin=burnin, iter=iter )
summary(mod1)
plot(mod1,ask=TRUE) # plot MCMC chains in coda style
plot(mod1,ask=TRUE , layout=2) # plot MCMC output in different layout
#***
# Model 2: 3PNO model with Beta(5,17) prior for guessing parameters
mod2 <- mcmc.3pno.testlet( dat , guess.prior=c(5,17) ,
burnin=burnin, iter=iter )
summary(mod2)
#***
# Model 3: Rasch (1PNO) testlet model
testlets <- substring( colnames(dat) , 1 , 1 )
mod3 <- mcmc.3pno.testlet( dat , testlets=testlets , est.slope=FALSE ,
est.guess=FALSE , burnin=burnin, iter=iter )
summary(mod3)
#***
# Model 4: 3PNO testlet model with (almost) fixed guessing parameters .25
mod4 <- mcmc.3pno.testlet( dat , guess.prior=1000*c(25,75) , testlets=testlets ,
burnin=burnin, iter=iter )
summary(mod4)
plot(mod4, ask=TRUE, layout=2)
#############################################################################
# SIMULATED EXAMPLE 2: Simulated data according to the Rasch testlet model
#############################################################################
set.seed(678)
N <- 3000 # number of persons
I <- 4 # number of items per testlet
TT <- 3 # number of testlets
ITT <- I*TT
b <- round( rnorm( ITT , mean=0 , sd = 1 ) , 2 )
sd0 <- 1 # sd trait
sdt <- seq( 0 , 2 , len=TT ) # sd testlets
sdt <- sdt
# simulate theta
theta <- rnorm( N , sd = sd0 )
# simulate testlets
ut <- matrix(0,nrow=N , ncol=TT )
for (tt in 1:TT){ ut[,tt] <- rnorm( N , sd = sdt[tt] ) }
ut <- ut[ , rep(1:TT,each=I) ]
# calculate response probability
prob <- matrix( pnorm( theta + ut + matrix( b , nrow=N , ncol=ITT ,
byrow=TRUE ) ) , N, ITT)
Y <- (matrix( runif(N*ITT) , N , ITT) < prob )*1
colMeans(Y)
# define testlets
testlets <- rep(1:TT , each=I )
burnin <- 300
iter <- 1000
#***
# Model 1: 1PNO model (without testlet structure)
mod1 <- mcmc.3pno.testlet( dat=Y , est.slope=FALSE , est.guess=FALSE ,
burnin=burnin, iter=iter , testlets= testlets )
summary(mod1)
summ1 <- mod1$summary.mcmcobj
# compare item parameters
cbind( b , summ1[ grep("b" , summ1$parameter ) , "Mean" ] )
# Testlet standard deviations
cbind( sdt , summ1[ grep("sigma\\.testlet" , summ1$parameter ) , "Mean" ] )
#***
# Model 2: 1PNO model (without testlet structure)
mod2 <- mcmc.3pno.testlet( dat=Y , est.slope=TRUE , est.guess=FALSE ,
burnin=burnin, iter=iter , testlets= testlets )
summary(mod2)
summ2 <- mod2$summary.mcmcobj
# compare item parameters
cbind( b , summ2[ grep("b\\[" , summ2$parameter ) , "Mean" ] )
# item discriminations
cbind( sd0 , summ2[ grep("a\\[" , summ2$parameter ) , "Mean" ] )
# Testlet standard deviations
cbind( sdt , summ2[ grep("sigma\\.testlet" , summ2$parameter ) , "Mean" ] )
#############################################################################
# SIMULATED EXAMPLE 3: Simulated data according to the 2PNO testlet model
#############################################################################
set.seed(678)
N <- 3000 # number of persons
I <- 3 # number of items per testlet
TT <- 5 # number of testlets
ITT <- I*TT
b <- round( rnorm( ITT , mean=0 , sd = 1 ) , 2 )
a <- round( runif( ITT , 0.5 , 2 ) ,2)
sdt <- seq( 0 , 2 , len=TT ) # sd testlets
sdt <- sdt
# simulate theta
theta <- rnorm( N , sd = sd0 )
# simulate testlets
ut <- matrix(0,nrow=N , ncol=TT )
for (tt in 1:TT){ ut[,tt] <- rnorm( N , sd = sdt[tt] ) }
ut <- ut[ , rep(1:TT,each=I) ]
# calculate response probability
bM <- matrix( b , nrow=N , ncol=ITT , byrow=TRUE )
aM <- matrix( a , nrow=N , ncol=ITT , byrow=TRUE )
prob <- matrix( pnorm( aM*theta + ut + bM ) , N, ITT)
Y <- (matrix( runif(N*ITT) , N , ITT) < prob )*1
colMeans(Y)
# define testlets
testlets <- rep(1:TT , each=I )
burnin <- 500
iter <- 1500
#***
# Model 1: 2PNO model
mod1 <- mcmc.3pno.testlet( dat=Y , est.slope=TRUE , est.guess=FALSE ,
burnin=burnin, iter=iter , testlets= testlets )
summary(mod1)
summ1 <- mod1$summary.mcmcobj
# compare item parameters
cbind( b , summ1[ grep("b" , summ1$parameter ) , "Mean" ] )
# item discriminations
cbind( a , summ1[ grep("a\\[" , summ1$parameter ) , "Mean" ] )
# Testlet standard deviations
cbind( sdt , summ1[ grep("sigma\\.testlet" , summ1$parameter ) , "Mean" ] )
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Testlet model}
\keyword{Testlets}
\keyword{Markov Chain Monte Carlo (MCMC)}
% __ONLY ONE__ keyword per line
| /man/mcmc.3pno.testlet.Rd | no_license | daniloap/sirt | R | false | false | 10,808 | rd | \name{mcmc.3pno.testlet}
\alias{mcmc.3pno.testlet}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
3PNO Testlet Model
}
\description{
This function estimates the 3PNO testlet model (Wang, Bradlow & Wainer, 2002, 2007)
by Markov Chain Monte Carlo methods (Glas, 2012).
}
\usage{
mcmc.3pno.testlet(dat, testlets = rep(NA, ncol(dat)),
weights = NULL, est.slope = TRUE, est.guess = TRUE, guess.prior = NULL,
testlet.variance.prior = c(1, 0.2), burnin = 500, iter = 1000,
N.sampvalues = 1000, progress.iter = 50, save.theta = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{
Data frame with dichotomous item responses for \eqn{N} persons and \eqn{I} items
}
\item{testlets}{
An integer or character vector which indicates the allocation of items to
testlets. Same entries corresponds to same testlets.
If an entry is \code{NA}, then this item does not belong to any testlet.
}
\item{weights}{
An optional vector with student sample weights
}
\item{est.slope}{
Should item slopes be estimated? The default is \code{TRUE}.
}
\item{est.guess}{
Should guessing parameters be estimated? The default is \code{TRUE}.
}
\item{guess.prior}{
A vector of length two or a matrix with \eqn{I} items and two columns
which defines the beta prior distribution of guessing
parameters. The default is a non-informative prior, i.e. the Beta(1,1)
distribution.
}
\item{testlet.variance.prior}{
A vector of length two which defines the (joint) prior for testlet variances
assuming an inverse chi-squared distribution.
The first entry is the effective sample size of the prior while the second
entry defines the prior variance of the testlet. The default of \code{c(1,.2)}
means that the prior sample size is 1 and the prior testlet variance is .2.
}
\item{burnin}{
Number of burnin iterations
}
\item{iter}{
Number of iterations
}
\item{N.sampvalues}{
Maximum number of sampled values to save
}
\item{progress.iter}{
Display progress every \code{progress.iter}-th iteration. If no progress
display is wanted, then choose \code{progress.iter} larger than \code{iter}.
}
\item{save.theta}{
Should theta values be saved?
}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% DETAILS
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\details{
The testlet response model for person \eqn{p} at item \eqn{i}
is defined as
\deqn{ P(X_{pi} = 1 ) = c_i + ( 1 - c_i )
\Phi ( a_i \theta_p + \gamma_{p,t(i)} + b_i ) \quad , \quad
\theta_p \sim N ( 0 ,1 ) , \gamma_{p,t(i)} \sim N( 0 , \sigma^2_t ) }
In case of \code{est.slope=FALSE}, all item slopes \eqn{a_i} are set to 1. Then
a variance \eqn{\sigma^2} of the \eqn{\theta_p} distribution is estimated
which is called the Rasch testlet model in the literature (Wang & Wilson, 2005).
In case of \code{est.guess=FALSE}, all guessing parameters \eqn{c_i} are
set to 0.
After fitting the testlet model, marginal item parameters are calculated (integrating
out testlet effects \eqn{\gamma_{p,t(i)}}) according the defining response equation
\deqn{ P(X_{pi} = 1 ) = c_i + ( 1 - c_i )
\Phi ( a_i^\ast \theta_p + b_i^\ast ) }
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% VALUES
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\value{
A list of class \code{mcmc.sirt} with following entries:
\item{mcmcobj}{Object of class \code{mcmc.list} containing item parameters
(\code{b_marg} and \code{a_marg} denote marginal item parameters)
and person parameters (if requested)}
\item{summary.mcmcobj}{Summary of the \code{mcmcobj} object. In this
summary the Rhat statistic and the mode estimate MAP is included.
The variable \code{PercSEratio} indicates the proportion of the Monte Carlo
standard error in relation to the total standard deviation of the
posterior distribution.}
\item{ic}{Information criteria (DIC)}
\item{burnin}{Number of burnin iterations}
\item{iter}{Total number of iterations}
\item{theta.chain}{Sampled values of \eqn{\theta_p} parameters}
\item{deviance.chain}{Sampled values of deviance values}
\item{EAP.rel}{EAP reliability}
\item{person}{Data frame with EAP person parameter estimates for
\eqn{\theta_p} and their corresponding posterior standard
deviations and for all testlet effects}
\item{dat}{Used data frame}
\item{weights}{Used student weights}
\item{\dots}{Further values}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% REFERENCES
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\references{
Glas, C. A. W. (2012). \emph{Estimating and testing the extended testlet model.}
LSAC Research Report Series, RR 12-03.
Wainer, H., Bradlow, E. T., & Wang, X. (2007).
\emph{Testlet response theory and its applications}.
Cambridge: Cambridge University Press.
Wang, W.-C., & Wilson, M. (2005). The Rasch testlet model.
\emph{Applied Psychological Measurement}, \bold{29}, 126-149.
Wang, X., Bradlow, E. T., & Wainer, H. (2002). A general Bayesian model
for testlets: Theory and applications.
\emph{Applied Psychological Measurement}, \bold{26}, 109-128.
}
\author{
Alexander Robitzsch
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
S3 methods: \code{\link{summary.mcmc.sirt}}, \code{\link{plot.mcmc.sirt}}
}
%For estimating testlet models using the \pkg{lme4} package see
%\code{\link{rasch.testlet.glmer}}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% EXAMPLES
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\examples{
\dontrun{
#############################################################################
# EXAMPLE 1: Dataset Reading
#############################################################################
data(data.read)
dat <- data.read
I <- ncol(dat)
# set burnin and total number of iterations here (CHANGE THIS!)
burnin <- 200
iter <- 500
#***
# Model 1: 1PNO model
mod1 <- mcmc.3pno.testlet( dat , est.slope=FALSE , est.guess=FALSE ,
burnin=burnin, iter=iter )
summary(mod1)
plot(mod1,ask=TRUE) # plot MCMC chains in coda style
plot(mod1,ask=TRUE , layout=2) # plot MCMC output in different layout
#***
# Model 2: 3PNO model with Beta(5,17) prior for guessing parameters
mod2 <- mcmc.3pno.testlet( dat , guess.prior=c(5,17) ,
burnin=burnin, iter=iter )
summary(mod2)
#***
# Model 3: Rasch (1PNO) testlet model
testlets <- substring( colnames(dat) , 1 , 1 )
mod3 <- mcmc.3pno.testlet( dat , testlets=testlets , est.slope=FALSE ,
est.guess=FALSE , burnin=burnin, iter=iter )
summary(mod3)
#***
# Model 4: 3PNO testlet model with (almost) fixed guessing parameters .25
mod4 <- mcmc.3pno.testlet( dat , guess.prior=1000*c(25,75) , testlets=testlets ,
burnin=burnin, iter=iter )
summary(mod4)
plot(mod4, ask=TRUE, layout=2)
#############################################################################
# SIMULATED EXAMPLE 2: Simulated data according to the Rasch testlet model
#############################################################################
set.seed(678)
N <- 3000 # number of persons
I <- 4 # number of items per testlet
TT <- 3 # number of testlets
ITT <- I*TT
b <- round( rnorm( ITT , mean=0 , sd = 1 ) , 2 )
sd0 <- 1 # sd trait
sdt <- seq( 0 , 2 , len=TT ) # sd testlets
sdt <- sdt
# simulate theta
theta <- rnorm( N , sd = sd0 )
# simulate testlets
ut <- matrix(0,nrow=N , ncol=TT )
for (tt in 1:TT){ ut[,tt] <- rnorm( N , sd = sdt[tt] ) }
ut <- ut[ , rep(1:TT,each=I) ]
# calculate response probability
prob <- matrix( pnorm( theta + ut + matrix( b , nrow=N , ncol=ITT ,
byrow=TRUE ) ) , N, ITT)
Y <- (matrix( runif(N*ITT) , N , ITT) < prob )*1
colMeans(Y)
# define testlets
testlets <- rep(1:TT , each=I )
burnin <- 300
iter <- 1000
#***
# Model 1: 1PNO model (without testlet structure)
mod1 <- mcmc.3pno.testlet( dat=Y , est.slope=FALSE , est.guess=FALSE ,
burnin=burnin, iter=iter , testlets= testlets )
summary(mod1)
summ1 <- mod1$summary.mcmcobj
# compare item parameters
cbind( b , summ1[ grep("b" , summ1$parameter ) , "Mean" ] )
# Testlet standard deviations
cbind( sdt , summ1[ grep("sigma\\.testlet" , summ1$parameter ) , "Mean" ] )
#***
# Model 2: 1PNO model (without testlet structure)
mod2 <- mcmc.3pno.testlet( dat=Y , est.slope=TRUE , est.guess=FALSE ,
burnin=burnin, iter=iter , testlets= testlets )
summary(mod2)
summ2 <- mod2$summary.mcmcobj
# compare item parameters
cbind( b , summ2[ grep("b\\[" , summ2$parameter ) , "Mean" ] )
# item discriminations
cbind( sd0 , summ2[ grep("a\\[" , summ2$parameter ) , "Mean" ] )
# Testlet standard deviations
cbind( sdt , summ2[ grep("sigma\\.testlet" , summ2$parameter ) , "Mean" ] )
#############################################################################
# SIMULATED EXAMPLE 3: Simulated data according to the 2PNO testlet model
#############################################################################
set.seed(678)
N <- 3000 # number of persons
I <- 3 # number of items per testlet
TT <- 5 # number of testlets
ITT <- I*TT
b <- round( rnorm( ITT , mean=0 , sd = 1 ) , 2 )
a <- round( runif( ITT , 0.5 , 2 ) ,2)
sdt <- seq( 0 , 2 , len=TT ) # sd testlets
sdt <- sdt
# simulate theta
theta <- rnorm( N , sd = sd0 )
# simulate testlets
ut <- matrix(0,nrow=N , ncol=TT )
for (tt in 1:TT){ ut[,tt] <- rnorm( N , sd = sdt[tt] ) }
ut <- ut[ , rep(1:TT,each=I) ]
# calculate response probability
bM <- matrix( b , nrow=N , ncol=ITT , byrow=TRUE )
aM <- matrix( a , nrow=N , ncol=ITT , byrow=TRUE )
prob <- matrix( pnorm( aM*theta + ut + bM ) , N, ITT)
Y <- (matrix( runif(N*ITT) , N , ITT) < prob )*1
colMeans(Y)
# define testlets
testlets <- rep(1:TT , each=I )
burnin <- 500
iter <- 1500
#***
# Model 1: 2PNO model
mod1 <- mcmc.3pno.testlet( dat=Y , est.slope=TRUE , est.guess=FALSE ,
burnin=burnin, iter=iter , testlets= testlets )
summary(mod1)
summ1 <- mod1$summary.mcmcobj
# compare item parameters
cbind( b , summ1[ grep("b" , summ1$parameter ) , "Mean" ] )
# item discriminations
cbind( a , summ1[ grep("a\\[" , summ1$parameter ) , "Mean" ] )
# Testlet standard deviations
cbind( sdt , summ1[ grep("sigma\\.testlet" , summ1$parameter ) , "Mean" ] )
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Testlet model}
\keyword{Testlets}
\keyword{Markov Chain Monte Carlo (MCMC)}
% __ONLY ONE__ keyword per line
|
library(Rmpi)
library(snow)
one_rep <- function(new_params, current_params) {
source("../code/data_generators.R")
source("../code/adjustment_methods.R")
args_val <- append(current_params, new_params)
set.seed(new_params$current_seed)
d_out <- datamaker_counts_only(args_val)
which_null <- d_out$meta$null
control_genes <- as.logical(which_null)
nnull <- sum(control_genes)
control_genes[control_genes][sample(1:nnull, size = nnull - args_val$ncontrol)] <- FALSE
beta_true <- rep(0, length = args_val$Ngene)
beta_true[!which_null] <- d_out$meta$true_log2foldchange
X <- as.matrix(model.matrix(~d_out$input$condition))
colnames(X) <- c("Intercept", "Treatment")
Y <- t(log2(as.matrix(d_out$input$counts + 1)))
q75 <- apply(X = Y, MARGIN = 1, FUN = quantile, probs = 0.75)
X <- cbind(X, q75)
num_sv <- max(sva::num.sv(t(Y), mod = X, method = "be"), 1)
## rmax <- min(sum(control_genes), nrow(X) - ncol(X))
## num_sv <- max(cate::est.confounder.num(~ Treatment, X.data = as.data.frame(X),
## Y = Y, rmax = rmax,
## nRepeat = 100, bcv.plot = FALSE)$r, 1)
start.time <- proc.time()
method_list <- list()
method_list$ols <- ols(Y = Y, X = X)
method_list$ruv2 <- ruv2(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes)
method_list$ruv2_rsvar <- ruv2(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes)
method_list$ruv3 <- ruv3(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes,
multiplier = FALSE)
## method_list$ruv3_rsvar <- ruv3(Y = Y, X = X, num_sv = num_sv,
## control_genes = control_genes,
## multiplier = TRUE)
method_list$ruv4 <- ruv4(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes)
method_list$ruv4_rsvar <- ruv4_rsvar_ebayes(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes)
## method_list$ruvem <- ruvem(Y = Y, X = X, num_sv = num_sv,
## control_genes = control_genes)
method_list$catenc <- cate_nc(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes,
calibrate = TRUE)
method_list$ruv4v <- vruv4(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes)
method_list$ruvb <- ruvb_bfa_gs_linked(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes)
## false_signs <- sign(beta_true[!control_genes]) != sign(method_list$ruvb$betahat)
## sorder <- order(method_list$ruvb$svalues)
## qplot(1:(args_val$Ngene - args_val$ncontrols),
## c(method_list$ruvb$svalues)[sorder], col = false_signs[sorder])
## fsr <- cumsum(false_signs[sorder]) / (1:(args_val$Ngene - args_val$ncontrols))
## plot(c(method_list$ruvb$svalues)[sorder], fsr)
## abline(0, 1)
get_mse <- function(args, beta_true, control_genes) {
if (length(args$betahat) == length(control_genes)) {
mean((args$betahat[!control_genes] - beta_true[!control_genes]) ^ 2)
} else {
mean((args$betahat - beta_true[!control_genes]) ^ 2)
}
}
get_auc <- function(args, which_null, control_genes) {
if (sum(which_null) == length(which_null)) {
return(NA)
}
if (length(args$pvalues) == length(control_genes)) {
pROC::roc(predictor = args$pvalues[!control_genes],
response = which_null[!control_genes])$auc
} else {
pROC::roc(predictor = c(args$pvalues), response = which_null[!control_genes])$auc
}
}
get_coverage <- function(args, beta_true, control_genes) {
if(length(args$lower) == length(control_genes)) {
mean(args$lower[!control_genes] < beta_true[!control_genes] &
args$upper[!control_genes] > beta_true[!control_genes])
} else {
mean(args$lower < beta_true[!control_genes] &
args$upper > beta_true[!control_genes])
}
}
mse_vec <- sapply(method_list, get_mse, beta_true = beta_true,
control_genes = control_genes)
auc_vec <- sapply(method_list, get_auc, which_null = which_null,
control_genes = control_genes)
cov_vec <- sapply(method_list, get_coverage, beta_true = beta_true,
control_genes = control_genes)
return_vec <- c(mse_vec, auc_vec, cov_vec)
xtot.time <- proc.time() - start.time
return(return_vec)
}
itermax <- 200
seed_start <- 2222
## these change
nullpi_seq <- c(0.5, 0.9, 1)
Nsamp_seq <- c(3, 5, 10, 20)
ncontrol_seq <- c(10, 100)
par_vals <- expand.grid(list((1 + seed_start):(itermax + seed_start),
nullpi_seq, Nsamp_seq, ncontrol_seq))
colnames(par_vals) <- c("current_seed", "nullpi", "Nsamp", "ncontrols")
par_vals$poisthin <- TRUE
par_vals$poisthin[abs(par_vals$nullpi - 1) < 10 ^ -10] <- FALSE
par_list <- list()
for (list_index in 1:nrow(par_vals)) {
par_list[[list_index]] <- list()
for (inner_list_index in 1:ncol(par_vals)) {
par_list[[list_index]][[inner_list_index]] <- par_vals[list_index, inner_list_index]
names(par_list[[list_index]])[inner_list_index] <- colnames(par_vals)[inner_list_index]
}
}
## these do not change
args_val <- list()
args_val$log2foldsd <- 0.8
args_val$tissue <- "muscle"
args_val$path <- "../../../data/gtex_tissue_gene_reads_v6p/"
args_val$Ngene <- 1000
args_val$log2foldmean <- 0
args_val$skip_gene <- 0
## one_rep(par_list[[3]], args_val)
## ## If on your own computer, use this
library(parallel)
cl <- makeCluster(detectCores() - 1)
sout <- t(snow::parSapply(cl = cl, par_list, FUN = one_rep, current_params = args_val))
stopCluster(cl)
## ## on RCC, use this
## np <- mpi.universe.size() - 1
## cluster <- makeMPIcluster(np)
## sout <- t(snow::parSapply(cl = cluster, X = par_list, FUN = one_rep, current_params = args_val))
## stopCluster(cluster)
## mpi.exit()
save(sout, file = "general_sims2.Rd")
mse_mat <- cbind(par_vals, sout[, 1:9])
auc_mat <- cbind(par_vals, sout[, 10:18])
cov_mat <- cbind(par_vals, sout[, 19:27])
write.csv(mse_mat, file = "mse_mat2.csv", row.names = FALSE)
write.csv(auc_mat, file = "auc_mat2.csv", row.names = FALSE)
write.csv(cov_mat, file = "cov_mat2.csv", row.names = FALSE)
## from par_list[[1070]], chosen by a random seed
library(coda)
bout <- vicar::ruvb(Y = Y, X = X, k = num_sv, ctl = control_genes,
return_mcmc = TRUE)
mcmc_b <- mcmc(t(bout$betahat_post[1, , ,drop = TRUE]))
gout <- geweke.diag(mcmc_b)
qqnorm(gout$z)
abline(0, 1)
traceplot(mcmc_b)
library(ggplot2)
eout <- effectiveSize(mcmc_b)
qplot(eout, bins = 20, fill = I("white"), color = I("black")) + theme_bw()
out$betahat_post
| /modular_sims/ruv3paper_sims_px_linked_v6p_75th/ruv3paper_sims.R | no_license | Feigeliudan01/sim_code | R | false | false | 7,314 | r | library(Rmpi)
library(snow)
one_rep <- function(new_params, current_params) {
source("../code/data_generators.R")
source("../code/adjustment_methods.R")
args_val <- append(current_params, new_params)
set.seed(new_params$current_seed)
d_out <- datamaker_counts_only(args_val)
which_null <- d_out$meta$null
control_genes <- as.logical(which_null)
nnull <- sum(control_genes)
control_genes[control_genes][sample(1:nnull, size = nnull - args_val$ncontrol)] <- FALSE
beta_true <- rep(0, length = args_val$Ngene)
beta_true[!which_null] <- d_out$meta$true_log2foldchange
X <- as.matrix(model.matrix(~d_out$input$condition))
colnames(X) <- c("Intercept", "Treatment")
Y <- t(log2(as.matrix(d_out$input$counts + 1)))
q75 <- apply(X = Y, MARGIN = 1, FUN = quantile, probs = 0.75)
X <- cbind(X, q75)
num_sv <- max(sva::num.sv(t(Y), mod = X, method = "be"), 1)
## rmax <- min(sum(control_genes), nrow(X) - ncol(X))
## num_sv <- max(cate::est.confounder.num(~ Treatment, X.data = as.data.frame(X),
## Y = Y, rmax = rmax,
## nRepeat = 100, bcv.plot = FALSE)$r, 1)
start.time <- proc.time()
method_list <- list()
method_list$ols <- ols(Y = Y, X = X)
method_list$ruv2 <- ruv2(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes)
method_list$ruv2_rsvar <- ruv2(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes)
method_list$ruv3 <- ruv3(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes,
multiplier = FALSE)
## method_list$ruv3_rsvar <- ruv3(Y = Y, X = X, num_sv = num_sv,
## control_genes = control_genes,
## multiplier = TRUE)
method_list$ruv4 <- ruv4(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes)
method_list$ruv4_rsvar <- ruv4_rsvar_ebayes(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes)
## method_list$ruvem <- ruvem(Y = Y, X = X, num_sv = num_sv,
## control_genes = control_genes)
method_list$catenc <- cate_nc(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes,
calibrate = TRUE)
method_list$ruv4v <- vruv4(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes)
method_list$ruvb <- ruvb_bfa_gs_linked(Y = Y, X = X, num_sv = num_sv,
control_genes = control_genes)
## false_signs <- sign(beta_true[!control_genes]) != sign(method_list$ruvb$betahat)
## sorder <- order(method_list$ruvb$svalues)
## qplot(1:(args_val$Ngene - args_val$ncontrols),
## c(method_list$ruvb$svalues)[sorder], col = false_signs[sorder])
## fsr <- cumsum(false_signs[sorder]) / (1:(args_val$Ngene - args_val$ncontrols))
## plot(c(method_list$ruvb$svalues)[sorder], fsr)
## abline(0, 1)
get_mse <- function(args, beta_true, control_genes) {
if (length(args$betahat) == length(control_genes)) {
mean((args$betahat[!control_genes] - beta_true[!control_genes]) ^ 2)
} else {
mean((args$betahat - beta_true[!control_genes]) ^ 2)
}
}
get_auc <- function(args, which_null, control_genes) {
if (sum(which_null) == length(which_null)) {
return(NA)
}
if (length(args$pvalues) == length(control_genes)) {
pROC::roc(predictor = args$pvalues[!control_genes],
response = which_null[!control_genes])$auc
} else {
pROC::roc(predictor = c(args$pvalues), response = which_null[!control_genes])$auc
}
}
get_coverage <- function(args, beta_true, control_genes) {
if(length(args$lower) == length(control_genes)) {
mean(args$lower[!control_genes] < beta_true[!control_genes] &
args$upper[!control_genes] > beta_true[!control_genes])
} else {
mean(args$lower < beta_true[!control_genes] &
args$upper > beta_true[!control_genes])
}
}
mse_vec <- sapply(method_list, get_mse, beta_true = beta_true,
control_genes = control_genes)
auc_vec <- sapply(method_list, get_auc, which_null = which_null,
control_genes = control_genes)
cov_vec <- sapply(method_list, get_coverage, beta_true = beta_true,
control_genes = control_genes)
return_vec <- c(mse_vec, auc_vec, cov_vec)
xtot.time <- proc.time() - start.time
return(return_vec)
}
itermax <- 200
seed_start <- 2222
## these change
nullpi_seq <- c(0.5, 0.9, 1)
Nsamp_seq <- c(3, 5, 10, 20)
ncontrol_seq <- c(10, 100)
par_vals <- expand.grid(list((1 + seed_start):(itermax + seed_start),
nullpi_seq, Nsamp_seq, ncontrol_seq))
colnames(par_vals) <- c("current_seed", "nullpi", "Nsamp", "ncontrols")
par_vals$poisthin <- TRUE
par_vals$poisthin[abs(par_vals$nullpi - 1) < 10 ^ -10] <- FALSE
par_list <- list()
for (list_index in 1:nrow(par_vals)) {
par_list[[list_index]] <- list()
for (inner_list_index in 1:ncol(par_vals)) {
par_list[[list_index]][[inner_list_index]] <- par_vals[list_index, inner_list_index]
names(par_list[[list_index]])[inner_list_index] <- colnames(par_vals)[inner_list_index]
}
}
## these do not change
args_val <- list()
args_val$log2foldsd <- 0.8
args_val$tissue <- "muscle"
args_val$path <- "../../../data/gtex_tissue_gene_reads_v6p/"
args_val$Ngene <- 1000
args_val$log2foldmean <- 0
args_val$skip_gene <- 0
## one_rep(par_list[[3]], args_val)
## ## If on your own computer, use this
library(parallel)
cl <- makeCluster(detectCores() - 1)
sout <- t(snow::parSapply(cl = cl, par_list, FUN = one_rep, current_params = args_val))
stopCluster(cl)
## ## on RCC, use this
## np <- mpi.universe.size() - 1
## cluster <- makeMPIcluster(np)
## sout <- t(snow::parSapply(cl = cluster, X = par_list, FUN = one_rep, current_params = args_val))
## stopCluster(cluster)
## mpi.exit()
save(sout, file = "general_sims2.Rd")
mse_mat <- cbind(par_vals, sout[, 1:9])
auc_mat <- cbind(par_vals, sout[, 10:18])
cov_mat <- cbind(par_vals, sout[, 19:27])
write.csv(mse_mat, file = "mse_mat2.csv", row.names = FALSE)
write.csv(auc_mat, file = "auc_mat2.csv", row.names = FALSE)
write.csv(cov_mat, file = "cov_mat2.csv", row.names = FALSE)
## from par_list[[1070]], chosen by a random seed
library(coda)
bout <- vicar::ruvb(Y = Y, X = X, k = num_sv, ctl = control_genes,
return_mcmc = TRUE)
mcmc_b <- mcmc(t(bout$betahat_post[1, , ,drop = TRUE]))
gout <- geweke.diag(mcmc_b)
qqnorm(gout$z)
abline(0, 1)
traceplot(mcmc_b)
library(ggplot2)
eout <- effectiveSize(mcmc_b)
qplot(eout, bins = 20, fill = I("white"), color = I("black")) + theme_bw()
out$betahat_post
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glview.r
\name{glVisible}
\alias{glVisible}
\title{determine visibility of mesh's vertices}
\usage{
glVisible(mesh, offset = 0.001)
}
\arguments{
\item{mesh}{triangular mesh of class "mesh3d". Must be currently rendered in
an rgl window.}
\item{offset}{initial offset to move vertex slightly away from the surface.}
}
\value{
returns logical vector, assigning TRUE/FALSE to each vertex of a
mesh.
}
\description{
Determine which vertices of a rendered mesh are visible from the current
viewpoint
}
\examples{
\dontrun{
require(rgl)
require(Morpho)
data(nose)
shade3d(shortnose.mesh,col=3)
visi <- glVisible(shortnose.mesh)
points3d(vert2points(shortnose.mesh)[which(visi),])
}
}
\author{
Stefan Schlager
}
\seealso{
\code{\link{selectVertex}}, \code{\link{cutMesh}}
}
\keyword{~kwd1}
\keyword{~kwd2}
| /man/glVisible.Rd | no_license | Celli119/mesheR | R | false | true | 880 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glview.r
\name{glVisible}
\alias{glVisible}
\title{determine visibility of mesh's vertices}
\usage{
glVisible(mesh, offset = 0.001)
}
\arguments{
\item{mesh}{triangular mesh of class "mesh3d". Must be currently rendered in
an rgl window.}
\item{offset}{initial offset to move vertex slightly away from the surface.}
}
\value{
returns logical vector, assigning TRUE/FALSE to each vertex of a
mesh.
}
\description{
Determine which vertices of a rendered mesh are visible from the current
viewpoint
}
\examples{
\dontrun{
require(rgl)
require(Morpho)
data(nose)
shade3d(shortnose.mesh,col=3)
visi <- glVisible(shortnose.mesh)
points3d(vert2points(shortnose.mesh)[which(visi),])
}
}
\author{
Stefan Schlager
}
\seealso{
\code{\link{selectVertex}}, \code{\link{cutMesh}}
}
\keyword{~kwd1}
\keyword{~kwd2}
|
# Cut the Belvedere 1894 into individual page files.
# rotated, sized and comnpressed for oW-Whaling
base.dir<-sprintf("%s/oW4_logbooks/refill_2017_04",Sys.getenv('SCRATCH'))
photos<-Sys.glob(sprintf("%s/originals/Belvedere_1894/*",
base.dir))
uploads.dir<-sprintf("%s/thumbnails/Belvedere_1894",
base.dir)
p1.coords<-list(x=c(0,1200),y=c(700,2250))
p2.coords<-list(x=c(1000,2180),y=c(700,2250))
scale.factor<-174/(max(p1.coords$x[2]-p1.coords$x[1],
p1.coords$y[2]-p1.coords$y[1],
p2.coords$x[2]-p2.coords$x[1],
p2.coords$y[2]-p2.coords$y[1]))
quality.control<-'-strip -sampling-factor 4:2:0 -quality 50'
for(i in seq_along(photos)) {
pic<-photos[i]
up.dir<-uploads.dir
if(!file.exists(up.dir)) dir.create(up.dir,recursive=TRUE)
p1.name<-sprintf("%s/%s.p1.jpg",up.dir,substr(basename(pic),0,nchar(basename(pic))-4))
system(sprintf("convert -crop %dx%d+%d+%d -resize %dx%d %s '%s' %s",
as.integer((p1.coords$x[2]-p1.coords$x[1])),
as.integer((p1.coords$y[2]-p1.coords$y[1])),
p1.coords$x[1],
p1.coords$y[1],
as.integer((p1.coords$x[2]-p1.coords$x[1])*scale.factor),
as.integer((p1.coords$y[2]-p1.coords$y[1])*scale.factor),
quality.control,
pic,p1.name))
p2.name<-sprintf("%s/%s.p2.jpg",up.dir,substr(basename(pic),0,nchar(basename(pic))-4))
system(sprintf("convert -crop %dx%d+%d+%d -resize %dx%d %s '%s' %s",
as.integer((p2.coords$x[2]-p2.coords$x[1])),
as.integer((p2.coords$y[2]-p2.coords$y[1])),
p2.coords$x[1],
p2.coords$y[1],
as.integer((p2.coords$x[2]-p2.coords$x[1])*scale.factor),
as.integer((p2.coords$y[2]-p2.coords$y[1])*scale.factor),
quality.control,
pic,p2.name))
}
| /page_uploads/refill_2017_04/Belvedere_1894/split_to_thumbnails.R | no_license | oldweather/oldWeather4 | R | false | false | 1,820 | r | # Cut the Belvedere 1894 into individual page files.
# rotated, sized and comnpressed for oW-Whaling
base.dir<-sprintf("%s/oW4_logbooks/refill_2017_04",Sys.getenv('SCRATCH'))
photos<-Sys.glob(sprintf("%s/originals/Belvedere_1894/*",
base.dir))
uploads.dir<-sprintf("%s/thumbnails/Belvedere_1894",
base.dir)
p1.coords<-list(x=c(0,1200),y=c(700,2250))
p2.coords<-list(x=c(1000,2180),y=c(700,2250))
scale.factor<-174/(max(p1.coords$x[2]-p1.coords$x[1],
p1.coords$y[2]-p1.coords$y[1],
p2.coords$x[2]-p2.coords$x[1],
p2.coords$y[2]-p2.coords$y[1]))
quality.control<-'-strip -sampling-factor 4:2:0 -quality 50'
for(i in seq_along(photos)) {
pic<-photos[i]
up.dir<-uploads.dir
if(!file.exists(up.dir)) dir.create(up.dir,recursive=TRUE)
p1.name<-sprintf("%s/%s.p1.jpg",up.dir,substr(basename(pic),0,nchar(basename(pic))-4))
system(sprintf("convert -crop %dx%d+%d+%d -resize %dx%d %s '%s' %s",
as.integer((p1.coords$x[2]-p1.coords$x[1])),
as.integer((p1.coords$y[2]-p1.coords$y[1])),
p1.coords$x[1],
p1.coords$y[1],
as.integer((p1.coords$x[2]-p1.coords$x[1])*scale.factor),
as.integer((p1.coords$y[2]-p1.coords$y[1])*scale.factor),
quality.control,
pic,p1.name))
p2.name<-sprintf("%s/%s.p2.jpg",up.dir,substr(basename(pic),0,nchar(basename(pic))-4))
system(sprintf("convert -crop %dx%d+%d+%d -resize %dx%d %s '%s' %s",
as.integer((p2.coords$x[2]-p2.coords$x[1])),
as.integer((p2.coords$y[2]-p2.coords$y[1])),
p2.coords$x[1],
p2.coords$y[1],
as.integer((p2.coords$x[2]-p2.coords$x[1])*scale.factor),
as.integer((p2.coords$y[2]-p2.coords$y[1])*scale.factor),
quality.control,
pic,p2.name))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wn_tests.R
\name{ljung_box}
\alias{ljung_box}
\title{ljung box test for white noise}
\usage{
ljung_box(
x,
p = 0,
q = 0,
k_val = c(24, 48),
model_name = "My Model",
alpha = 0.05
)
}
\arguments{
\item{x}{the time series}
\item{p}{the ar order (Default = 0)}
\item{q}{the ma order (Default = 0)}
\item{k_val}{a vector of k values}
\item{model_name}{Model name or identifier (Default: "My Model")}
\item{alpha}{Significance level to be used for ljung_box tests}
}
\value{
the results of the tests, in tidy data format
}
\description{
ljung box test for white noise
}
\examples{
library(tswge)
# Generated White Noise
wn = gen.arma.wge(n = 200, sn = 101)
ljung_box(wn)
# Not White Noise
data(hadley)
ljung_box(hadley)
}
| /man/ljung_box.Rd | no_license | mattfarrow1/tswgewrapped | R | false | true | 816 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wn_tests.R
\name{ljung_box}
\alias{ljung_box}
\title{ljung box test for white noise}
\usage{
ljung_box(
x,
p = 0,
q = 0,
k_val = c(24, 48),
model_name = "My Model",
alpha = 0.05
)
}
\arguments{
\item{x}{the time series}
\item{p}{the ar order (Default = 0)}
\item{q}{the ma order (Default = 0)}
\item{k_val}{a vector of k values}
\item{model_name}{Model name or identifier (Default: "My Model")}
\item{alpha}{Significance level to be used for ljung_box tests}
}
\value{
the results of the tests, in tidy data format
}
\description{
ljung box test for white noise
}
\examples{
library(tswge)
# Generated White Noise
wn = gen.arma.wge(n = 200, sn = 101)
ljung_box(wn)
# Not White Noise
data(hadley)
ljung_box(hadley)
}
|
context("Generally testing the workflow for synth with multiple outcomes")
library(Synth)
data(basque)
basque <- basque %>% mutate(trt = case_when(year < 1975 ~ 0,
regionno != 17 ~0,
regionno == 17 ~ 1),
gdpcap_sq = gdpcap ^ 2) %>%
filter(regionno != 1)
test_that("augsynth and augsynth_multiout are the same without augmentation", {
syn1 <- augsynth_multiout(gdpcap + gdpcap_sq ~ trt, regionno, year, 1975, basque,
progfunc="None", scm=T)
syn2 <- augsynth(gdpcap + gdpcap_sq ~ trt, regionno, year, basque,
progfunc="None", scm=T)
# weights are the same
expect_equal(c(syn1$weights), c(syn2$weights), tolerance=3e-4)
# estimates are the same
expect_equal(c(predict(syn1, att=F)), c(predict(syn2, att = F)), tolerance=5e-5)
## level of balance is same
expect_equal(syn1$l2_imbalance, syn2$l2_imbalance, tolerance=1e-5)
})
test_that("augsynth and augsynth_multiout are the same with ridge augmentation", {
syn1 <- augsynth_multiout(gdpcap + gdpcap_sq ~ trt, regionno, year, 1975, basque,
progfunc="Ridge", scm=T, lambda = 10)
syn2 <- augsynth(gdpcap + gdpcap_sq ~ trt, regionno, year, basque,
progfunc="Ridge", scm=T, lambda = 10)
# weights are the same
expect_equal(c(syn1$weights), c(syn2$weights), tolerance=3e-4)
# estimates are the same
expect_equal(c(predict(syn1, att=F)), c(predict(syn2, att = F)), tolerance=5e-5)
## level of balance is same
expect_equal(syn1$l2_imbalance, syn2$l2_imbalance, tolerance=1e-5)
})
test_that("augsynth and augsynth_multiout are the same with fixed effects augmentation", {
syn1 <- augsynth_multiout(gdpcap + gdpcap_sq ~ trt, regionno, year, 1975, basque,
progfunc="None", scm=T, fixedeff = T)
syn2 <- augsynth(gdpcap + gdpcap_sq ~ trt, regionno, year, basque,
progfunc="None", scm=T, fixedeff = T)
# weights are the same
expect_equal(c(syn1$weights), c(syn2$weights), tolerance=3e-4)
# estimates are the same
expect_equal(c(predict(syn1, att=F)), c(predict(syn2, att = F)), tolerance=5e-5)
## level of balance is same
expect_equal(syn1$l2_imbalance, syn2$l2_imbalance, tolerance=1e-5)
})
test_that("single_augsynth and augsynth_multiout are the same for one outcome", {
syn1 <- augsynth_multiout(gdpcap ~ trt, regionno, year, 1975, basque,
progfunc="None", scm=T)
syn2 <- augsynth(gdpcap ~ trt, regionno, year, basque,
progfunc="None", scm=T)
# weights are the same
expect_equal(c(syn1$weights), c(syn2$weights), tolerance=3e-4)
# estimates are the same
expect_equal(c(predict(syn1, att=F)), unname(predict(syn2, att = F)), tolerance=5e-5)
## level of balance is same
expect_equal(syn1$l2_imbalance, syn2$l2_imbalance, tolerance=1e-5)
})
test_that("single_augsynth and augsynth_multiout are the same for one outcome with ridge augmentation",{
syn1 <- augsynth_multiout(gdpcap ~ trt, regionno, year, 1975, basque,
progfunc="Ridge", scm=T)
syn2 <- augsynth(gdpcap ~ trt, regionno, year, basque,
progfunc="Ridge", scm=T)
# weights are the same
expect_equal(c(syn1$weights), c(syn2$weights), tolerance=3e-4)
# estimates are the same
expect_equal(c(predict(syn1, att=F)), unname(predict(syn2, att = F)),
tolerance=5e-5)
## level of balance is same
expect_equal(syn1$l2_imbalance, syn2$l2_imbalance, tolerance=1e-5)
})
test_that("single_augsynth and augsynth_multiout are the same for one outcome with fixed effect augmentation", {
syn1 <- augsynth_multiout(gdpcap ~ trt, regionno, year, 1975, basque,
progfunc="None", scm=T, fixedeff = T)
syn2 <- augsynth(gdpcap ~ trt, regionno, year, basque,
progfunc="None", scm=T, fixedeff = T)
# weights are the same
expect_equal(c(syn1$weights), c(syn2$weights), tolerance=3e-4)
# estimates are the same
expect_equal(c(predict(syn1, att=F)), unname(predict(syn2, att = F)), tolerance=5e-5)
## level of balance is same
expect_equal(syn1$l2_imbalance, syn2$l2_imbalance, tolerance=1e-5)
})
| /tests/testthat/test_multiple_outcomes.R | permissive | williamlief/augsynth | R | false | false | 4,426 | r | context("Generally testing the workflow for synth with multiple outcomes")
library(Synth)
data(basque)
basque <- basque %>% mutate(trt = case_when(year < 1975 ~ 0,
regionno != 17 ~0,
regionno == 17 ~ 1),
gdpcap_sq = gdpcap ^ 2) %>%
filter(regionno != 1)
test_that("augsynth and augsynth_multiout are the same without augmentation", {
syn1 <- augsynth_multiout(gdpcap + gdpcap_sq ~ trt, regionno, year, 1975, basque,
progfunc="None", scm=T)
syn2 <- augsynth(gdpcap + gdpcap_sq ~ trt, regionno, year, basque,
progfunc="None", scm=T)
# weights are the same
expect_equal(c(syn1$weights), c(syn2$weights), tolerance=3e-4)
# estimates are the same
expect_equal(c(predict(syn1, att=F)), c(predict(syn2, att = F)), tolerance=5e-5)
## level of balance is same
expect_equal(syn1$l2_imbalance, syn2$l2_imbalance, tolerance=1e-5)
})
test_that("augsynth and augsynth_multiout are the same with ridge augmentation", {
syn1 <- augsynth_multiout(gdpcap + gdpcap_sq ~ trt, regionno, year, 1975, basque,
progfunc="Ridge", scm=T, lambda = 10)
syn2 <- augsynth(gdpcap + gdpcap_sq ~ trt, regionno, year, basque,
progfunc="Ridge", scm=T, lambda = 10)
# weights are the same
expect_equal(c(syn1$weights), c(syn2$weights), tolerance=3e-4)
# estimates are the same
expect_equal(c(predict(syn1, att=F)), c(predict(syn2, att = F)), tolerance=5e-5)
## level of balance is same
expect_equal(syn1$l2_imbalance, syn2$l2_imbalance, tolerance=1e-5)
})
test_that("augsynth and augsynth_multiout are the same with fixed effects augmentation", {
syn1 <- augsynth_multiout(gdpcap + gdpcap_sq ~ trt, regionno, year, 1975, basque,
progfunc="None", scm=T, fixedeff = T)
syn2 <- augsynth(gdpcap + gdpcap_sq ~ trt, regionno, year, basque,
progfunc="None", scm=T, fixedeff = T)
# weights are the same
expect_equal(c(syn1$weights), c(syn2$weights), tolerance=3e-4)
# estimates are the same
expect_equal(c(predict(syn1, att=F)), c(predict(syn2, att = F)), tolerance=5e-5)
## level of balance is same
expect_equal(syn1$l2_imbalance, syn2$l2_imbalance, tolerance=1e-5)
})
test_that("single_augsynth and augsynth_multiout are the same for one outcome", {
syn1 <- augsynth_multiout(gdpcap ~ trt, regionno, year, 1975, basque,
progfunc="None", scm=T)
syn2 <- augsynth(gdpcap ~ trt, regionno, year, basque,
progfunc="None", scm=T)
# weights are the same
expect_equal(c(syn1$weights), c(syn2$weights), tolerance=3e-4)
# estimates are the same
expect_equal(c(predict(syn1, att=F)), unname(predict(syn2, att = F)), tolerance=5e-5)
## level of balance is same
expect_equal(syn1$l2_imbalance, syn2$l2_imbalance, tolerance=1e-5)
})
test_that("single_augsynth and augsynth_multiout are the same for one outcome with ridge augmentation",{
syn1 <- augsynth_multiout(gdpcap ~ trt, regionno, year, 1975, basque,
progfunc="Ridge", scm=T)
syn2 <- augsynth(gdpcap ~ trt, regionno, year, basque,
progfunc="Ridge", scm=T)
# weights are the same
expect_equal(c(syn1$weights), c(syn2$weights), tolerance=3e-4)
# estimates are the same
expect_equal(c(predict(syn1, att=F)), unname(predict(syn2, att = F)),
tolerance=5e-5)
## level of balance is same
expect_equal(syn1$l2_imbalance, syn2$l2_imbalance, tolerance=1e-5)
})
test_that("single_augsynth and augsynth_multiout are the same for one outcome with fixed effect augmentation", {
syn1 <- augsynth_multiout(gdpcap ~ trt, regionno, year, 1975, basque,
progfunc="None", scm=T, fixedeff = T)
syn2 <- augsynth(gdpcap ~ trt, regionno, year, basque,
progfunc="None", scm=T, fixedeff = T)
# weights are the same
expect_equal(c(syn1$weights), c(syn2$weights), tolerance=3e-4)
# estimates are the same
expect_equal(c(predict(syn1, att=F)), unname(predict(syn2, att = F)), tolerance=5e-5)
## level of balance is same
expect_equal(syn1$l2_imbalance, syn2$l2_imbalance, tolerance=1e-5)
})
|
network.inits <- function(network, n.chains){
response <- network$response
inits <- if(response == "multinomial"){
multinomial.inits(network, n.chains)
} else if(response == "binomial"){
binomial.inits(network, n.chains)
} else if(response == "normal"){
normal.inits(network, n.chains)
}
return(inits)
}
normal.inits <- function(network, n.chains){
with(network,{
delta <- Outcomes
Eta <- Outcomes[b.id]
se.Eta <- SE[b.id]
delta <- Outcomes - rep(Eta, times = na)
delta <- delta[!b.id,] #eliminate base-arm
inits <- make.inits(network, n.chains, delta, Eta, se.Eta)
return(inits)
})
}
binomial.inits <- function(network, n.chains){
with(network,{
Outcomes <- Outcomes + 0.5 # ensure ratios are always defined
N <- N + 1
p <- Outcomes/N
logits <- log(p/(1-p))
se.logits <- sqrt(1/Outcomes + 1/(N - Outcomes))
Eta <- logits[b.id]
se.Eta <- se.logits[b.id]
delta <- logits - rep(Eta, times = na)
delta <- delta[!b.id,]
inits = make.inits(network, n.chains, delta, Eta, se.Eta)
return(inits)
})
}
make.inits <- function(network, n.chains, delta, Eta, se.Eta){
with(network,{
# dependent variable for regression
y <- delta
# design matrix
base.tx <- Treat[b.id] # base treatment for N studies
end.Study <- c(0, cumsum(na)) # end row number of each trial
rows <- end.Study - seq(0, nstudy) # end number of each trial not including base treatment arms
design.mat <- matrix(0, sum(na) - nstudy, ntreat) # no. non-base arms x #txs
for (i in seq(nstudy)){
studytx <- Treat[(end.Study[i]+1):end.Study[i+1]] #treatments in ith Study
nonbase.tx <- studytx[studytx!=base.tx[i]] #non-baseline treatments for ith Study
design.mat[(1+rows[i]):rows[i+1],base.tx[i]] <- -1
for (j in seq(length(nonbase.tx)))
design.mat[j+rows[i],nonbase.tx[j]] <- 1
}
design.mat <- design.mat[,-1,drop=F]
fit <- summary(lm(y ~ design.mat - 1))
d <- se.d <- rep(NA, ntreat)
d[-1] <- coef(fit)[,1]
se.d[-1] <- coef(fit)[,2]
resid.var <- fit$sigma^2
# covariate
if(!is.null(covariate)) {
x.cen = matrix(0, nrow = sum(na), ncol = dim(covariate)[2])
for(i in 1:dim(covariate)[2]){
x.cen[,i] <- rep(covariate[,i], times = na)
}
x.cen <- x.cen[-seq(dim(x.cen)[1])[b.id],,drop=F]
x.cen <- scale(x.cen, scale = FALSE)
slope <- se.slope <- array(NA, c(ntreat, dim(covariate)[2]))
for(i in 1:dim(covariate)[2]){
fit2 <- if(covariate.model == "common" || covariate.model == "exchangeable"){
summary(lm(y ~ x.cen[,i] -1))
} else if(covariate.model == "independent"){
summary(lm(y ~ design.mat:x.cen[,i] - 1))
}
slope[-1,i] <- coef(fit2)[,1]
se.slope[-1,i] <- coef(fit2)[,2]
}
}
# baseline
if(baseline != "none"){
baseline.cen <- rep(Eta, na)
baseline.cen <- baseline.cen[-seq(length(baseline.cen))[b.id]]
baseline.cen <- scale(baseline.cen, scale = FALSE)
baseline.slope <- baseline.se.slope <- rep(NA, ntreat)
fit3 <- if(baseline == "common" || baseline == "exchangeable"){
summary(lm(y ~ baseline.cen -1))
} else if(baseline == "independent"){
summary(lm(y ~ design.mat:baseline.cen - 1))
}
baseline.slope[-1] <- coef(fit3)[,1]
baseline.se.slope[-1] <- coef(fit3)[,2]
}
############# Generate initial values
initial.values = list()
for(i in 1:n.chains){
initial.values[[i]] = list()
}
for(i in 1:n.chains){
random.Eta <- rnorm(length(Eta))
initial.values[[i]][["Eta"]] <- Eta + se.Eta * random.Eta
}
if(!is.nan(fit$fstat[1])){
for(i in 1:n.chains){
random.d = rnorm(length(d))
initial.values[[i]][["d"]] <- d + se.d * random.d
if(type == "random"){
df <- fit$df[2]
random.ISigma <- rchisq(1, df)
sigma2 <- resid.var * df/random.ISigma
if(hy.prior[[1]] == "dunif"){
if(sqrt(sigma2) > network$prior.data$hy.prior.2){
stop("data has more variability than your prior does")
}
}
if(hy.prior[[1]] == "dgamma"){
initial.values[[i]][["prec"]] <- 1/sigma2
} else if(hy.prior[[1]] == "dunif" || hy.prior[[1]] == "dhnorm"){
initial.values[[i]][["sd"]] <- sqrt(sigma2)
}
# generate values for delta
delta = matrix(NA, nrow = nrow(t), ncol = ncol(t))
for(j in 2:ncol(delta)){
diff_d <- ifelse(is.na(d[t[,1]]), d[t[,j]], d[t[,j]] - d[t[,1]])
for(ii in 1:nrow(delta)){
if(!is.na(diff_d[ii])) delta[ii,j] = rnorm(1, mean = diff_d[ii], sd = sqrt(sigma2))
}
}
initial.values[[i]][["delta"]] <- delta
}
}
}
if (!is.null(covariate)) {
if(!is.nan(fit2$fstat[1])){
for(i in 1:n.chains){
random.slope <- matrix(rnorm(dim(slope)[1]*dim(slope)[2]),dim(slope))
for(j in 1:dim(covariate)[2]){
initial.values[[i]][[paste("beta", j, sep = "")]] = slope[,j] + se.slope[,j] * random.slope[,j]
}
}
}
}
if(baseline != "none"){
if(!is.nan(fit3$fstat[1])){
for(i in 1:n.chains){
random.baseline = rnorm(length(baseline.slope))
initial.values[[i]][["b_bl"]] = baseline.slope + baseline.se.slope * random.baseline
}
}
}
return(initial.values)
})
}
############################################ multinomial inits functions
multinomial.inits <- function(network, n.chains)
{
with(network,{
if (length(miss.patterns[[1]])!= 1){
Dimputed <- multi.impute.data(network)
} else{
Dimputed <- Outcomes
}
Dimputed = Dimputed + 0.5
logits <- as.matrix(log(Dimputed[, -1]) - log(Dimputed[, 1]))
se.logits <- as.matrix(sqrt(1/Dimputed[, -1] + 1/Dimputed[, 1]))
Eta <- se.Eta <- matrix(NA, nstudy, ncat)
Eta[,2:ncat] <- logits[b.id,]
se.Eta[,2:ncat] <- se.logits[b.id,]
delta <- logits - apply(as.matrix(Eta[, -1]), 2, rep, times = na)
rows.of.basetreat <- seq(dim(as.matrix(delta))[1])*as.numeric(b.id)
delta <- delta[-rows.of.basetreat,,drop=F] # Eliminate base treatment arms
###################### Using delta, Eta, and se.Eta make initial values
y <- delta # dependent variable for regression (part of Delta)
d <- se.d <- matrix(NA, length(unique(Treat)), ncat - 1)
resid.var <- rep(NA, ncat -1)
base.tx <- Treat[b.id] # base treatment for N studies
end.Study <- c(0, cumsum(na)) # end row number of each trial
rows <- end.Study - seq(0, nstudy) # end number of each trial not including base treatment arms
design.mat <- matrix(0, sum(na) - nstudy, ntreat) # no. non-base arms x #txs
for (i in seq(nstudy)){
studytx <- Treat[(end.Study[i]+1):end.Study[i+1]] #treatments in ith Study
nonbase.tx <- studytx[studytx!=base.tx[i]] #non-baseline treatments for ith Study
design.mat[(1+rows[i]):rows[i+1],base.tx[i]] <- -1
for (j in seq(length(nonbase.tx)))
design.mat[j+rows[i],nonbase.tx[j]] <- 1
}
design.mat <- design.mat[,-1,drop=F]
for(k in 1:(ncat - 1)){
fit <- summary(lm(y[,k] ~ design.mat - 1))
d[-1,k] <- coef(fit)[1:(ntreat-1), 1]
se.d[-1,k] <- coef(fit)[1:(ntreat-1), 2]
resid.var[k] <- fit$sigma^2
}
# covariate
if(!is.null(covariate)){
x.cen <- matrix(0, nrow = sum(na), ncol = dim(covariate)[2])
for(i in 1:dim(covariate)[2]){
x.cen[,i] <- rep(covariate[,i], na)
}
x.cen <- x.cen[-seq(dim(x.cen)[1])[b.id],,drop=F]
x.cen <- scale(x.cen, scale = FALSE)
slope <- se.slope <- array(NA, c(ntreat, dim(covariate)[2], ncat - 1))
for(i in 1:dim(covariate)[2]){
for(k in 1:(ncat-1)){
fit2 <- if(covariate.model == "independent" || covariate.model == "exchangeable"){
summary(lm(y[,k] ~ design.mat:x.cen[,i] - 1))
} else if(covariate.model == "common"){
summary(lm(y[,k] ~ x.cen[,i] - 1))
}
slope[-1,i,k] <- coef(fit2)[,1]
se.slope[-1,i,k] <- coef(fit2)[,2]
}
}
}
# baseline
if(baseline != "none"){
baseline.cen <- apply(as.matrix(Eta[, -1]), 2, rep, times = na)
baseline.cen <- baseline.cen[-seq(dim(baseline.cen)[1])[b.id],]
baseline.cen <- scale(baseline.cen, scale = FALSE)
baseline.slope <- baseline.se.slope <- matrix(nrow = ntreat, ncol = ncat -1)
for(k in 1:(ncat -1)){
fit3 <- if(baseline == "common" || baseline == "exchangeable"){
summary(lm(y[,k] ~ baseline.cen[,k] -1))
} else if(baseline == "independent"){
summary(lm(y[,k] ~ design.mat:baseline.cen[,k] - 1))
}
baseline.slope[-1, k] <- coef(fit3)[,1]
baseline.se.slope[-1, k] <- coef(fit3)[,2]
}
}
################################################
initial.values = list()
for(i in 1:n.chains){
initial.values[[i]] = list()
}
for(i in 1:n.chains){
random.Eta <- matrix(rnorm(dim(Eta)[1]*dim(Eta)[2]),dim(Eta)[1],dim(Eta)[2])
initial.values[[i]][["Eta"]] <- Eta + se.Eta * random.Eta
}
if(!is.nan(fit$fstat[1])){
for(i in 1:n.chains){
random.d = matrix(rnorm(dim(d)[1]*dim(d)[2]),dim(d)[1],dim(d)[2])
initial.values[[i]][["d"]] = d + se.d * random.d
if(type == "random"){
df <- fit$df[2]
random.ISigma <- rchisq(1, df)
sigma2 <- resid.var * df/random.ISigma
initial.values[[i]][["prec"]] <- 1/sigma2 * diag(ncat - 1)
if(max(na) == 2){
delta <- array(NA, dim = c(nstudy, max(na), ncat))
for(j in 2:max(na)){
for(m in 1:(ncat-1)){
diff_d <- ifelse(is.na(d[t[,1],m]), d[t[,j],m], d[t[,j],m] - d[t[,1],m])
for(ii in 1:nstudy){
if(!is.na(diff_d[ii])) delta[ii,j,m+1] <- rnorm(1, mean = diff_d[ii], sd = sqrt(sigma2))
}
}
}
initial.values[[i]][["delta"]] <- delta
}
}
}
}
if (!is.null(covariate)) {
if(!is.nan(fit2$fstat[1])){
for(i in 1:n.chains){
random.slope <- array(rnorm(dim(slope)[1]*dim(slope)[2]*dim(slope)[3]),dim(slope))
for(j in 1:dim(covariate)[2]){
initial.values[[i]][[paste("beta", j, sep = "")]] = slope[,j,] + se.slope[,j,] * random.slope[,j,]
}
}
}
}
if(baseline != "none"){
if(!is.nan(fit3$fstat[1])){
for(i in 1:n.chains){
random.baseline = matrix(rnorm(dim(baseline.slope)[1]*dim(baseline.slope)[2]),dim(baseline.slope))
initial.values[[i]][["b_bl"]] = baseline.slope + baseline.se.slope * random.baseline
}
}
}
return(initial.values)
})
}
multi.impute.data <- function(network)
{
#Take partial sums by study and allocate them to missing outcomes according to either defined category probabilities or to probabilities computed empirically from available data. Empirical scheme first estimates allocation probabilities based on complete data and then updates by successive missing data patterns.
#
#1. Fill in all data for all outcomes with complete information
#2. Pull off summed outcome columns and back out the known data (e.g. if one type of death count known, subtract this from total deaths)
#3. Renormalize imputation probabilities among outcomes with missing values
#4. Split summed outcome categories by imputation probabilities for each sum
#5. For each outcome category, average the imputed values gotten from each partial sum
#6. Apply correction factor to ensure that sum of imputed values add up to total to be imputed
#
with(network,{
rows.all = vector("list", length = npattern)
for(i in seq(npattern)){
rows.all[[i]] = seq(nrow)[pattern == levels(pattern)[i]]
}
Dimputed = matrix(NA,dim(D)[1],ncat)
count = 0
imputed.prop = rep(1/ncat,ncat)
for (i in seq(length(miss.patterns[[1]]))) {
rows = rows.all[[i]] #data rows in missing data pattern
cols.data = miss.patterns[[1]][[i]][[2]] #data columns in first combo of missing data pattern
is.complete.cols = cols.data %in% seq(ncat) #which data columns are complete
if (any(is.complete.cols)) {
complete.cols = cols.data[is.complete.cols] #col no. of complete cols
incomplete.cols = cols.data[!is.complete.cols] #col nos. of incomplete cols
Dimputed[rows, complete.cols] = D[rows, complete.cols] #Put in complete data
}
else
incomplete.cols = cols.data
if (!all(is.complete.cols)) { #If some columns with missing data
pmat = miss.patterns[[2]][incomplete.cols,,drop=F] #Parameters corresponding to incomplete cols
if (any(is.complete.cols)) {
sums.to.split = D[rows, incomplete.cols, drop=F] - D[rows, complete.cols, drop=F]%*%t(pmat[, complete.cols,drop=F]) #back out known data
pmat[,complete.cols] = 0 #set backed out columns to zero
imputed.prop[complete.cols] = 0 #set imputation probabilities for complete data cols to zero
}
else
sums.to.split = D[rows, incomplete.cols, drop=F]
imputed.prop = imputed.prop/sum(imputed.prop) #renormalize
for (j in seq(length(rows))) {
x0 = matrix(rep(sums.to.split [j,], each=ncat),ncol=length(incomplete.cols))*t(pmat)
x1 = imputed.prop*t(pmat)
x2 = x0*x1/rep(apply(x1,2,sum),each=ncat,ncol=dim(pmat)[1])
x2[x2==0] = NA
x3 = apply(x2, 1, mean, na.rm=T) # average across potential imputed values
x5 = (N[rows[j]]- sum(Dimputed[rows[j],], na.rm=T))/sum(x3, na.rm=T) #Factor to adjust imputations
x6 = round(x3*x5) # Apply factor to imputations
if (any(is.complete.cols))
Dimputed[rows[j],seq(ncat)[-complete.cols]] = x6[!is.na(x6)]
else
Dimputed[rows[j],seq(ncat)] = x6[!is.na(x6)]
Dimputed[rows[j],1] = Dimputed[rows[j],1] + N[rows[j]] - sum(Dimputed[rows[j],]) #Correction for rounding so totals add
}
}
running.total = apply(Dimputed,2,sum,na.rm=T)
imputed.prop = running.total/sum(running.total) # Proportion of events in each category
}
return(Dimputed)
})
}
| /R/network.inits.r | no_license | Dr-Dong/network-meta | R | false | false | 14,057 | r | network.inits <- function(network, n.chains){
response <- network$response
inits <- if(response == "multinomial"){
multinomial.inits(network, n.chains)
} else if(response == "binomial"){
binomial.inits(network, n.chains)
} else if(response == "normal"){
normal.inits(network, n.chains)
}
return(inits)
}
normal.inits <- function(network, n.chains){
with(network,{
delta <- Outcomes
Eta <- Outcomes[b.id]
se.Eta <- SE[b.id]
delta <- Outcomes - rep(Eta, times = na)
delta <- delta[!b.id,] #eliminate base-arm
inits <- make.inits(network, n.chains, delta, Eta, se.Eta)
return(inits)
})
}
binomial.inits <- function(network, n.chains){
with(network,{
Outcomes <- Outcomes + 0.5 # ensure ratios are always defined
N <- N + 1
p <- Outcomes/N
logits <- log(p/(1-p))
se.logits <- sqrt(1/Outcomes + 1/(N - Outcomes))
Eta <- logits[b.id]
se.Eta <- se.logits[b.id]
delta <- logits - rep(Eta, times = na)
delta <- delta[!b.id,]
inits = make.inits(network, n.chains, delta, Eta, se.Eta)
return(inits)
})
}
make.inits <- function(network, n.chains, delta, Eta, se.Eta){
with(network,{
# dependent variable for regression
y <- delta
# design matrix
base.tx <- Treat[b.id] # base treatment for N studies
end.Study <- c(0, cumsum(na)) # end row number of each trial
rows <- end.Study - seq(0, nstudy) # end number of each trial not including base treatment arms
design.mat <- matrix(0, sum(na) - nstudy, ntreat) # no. non-base arms x #txs
for (i in seq(nstudy)){
studytx <- Treat[(end.Study[i]+1):end.Study[i+1]] #treatments in ith Study
nonbase.tx <- studytx[studytx!=base.tx[i]] #non-baseline treatments for ith Study
design.mat[(1+rows[i]):rows[i+1],base.tx[i]] <- -1
for (j in seq(length(nonbase.tx)))
design.mat[j+rows[i],nonbase.tx[j]] <- 1
}
design.mat <- design.mat[,-1,drop=F]
fit <- summary(lm(y ~ design.mat - 1))
d <- se.d <- rep(NA, ntreat)
d[-1] <- coef(fit)[,1]
se.d[-1] <- coef(fit)[,2]
resid.var <- fit$sigma^2
# covariate
if(!is.null(covariate)) {
x.cen = matrix(0, nrow = sum(na), ncol = dim(covariate)[2])
for(i in 1:dim(covariate)[2]){
x.cen[,i] <- rep(covariate[,i], times = na)
}
x.cen <- x.cen[-seq(dim(x.cen)[1])[b.id],,drop=F]
x.cen <- scale(x.cen, scale = FALSE)
slope <- se.slope <- array(NA, c(ntreat, dim(covariate)[2]))
for(i in 1:dim(covariate)[2]){
fit2 <- if(covariate.model == "common" || covariate.model == "exchangeable"){
summary(lm(y ~ x.cen[,i] -1))
} else if(covariate.model == "independent"){
summary(lm(y ~ design.mat:x.cen[,i] - 1))
}
slope[-1,i] <- coef(fit2)[,1]
se.slope[-1,i] <- coef(fit2)[,2]
}
}
# baseline
if(baseline != "none"){
baseline.cen <- rep(Eta, na)
baseline.cen <- baseline.cen[-seq(length(baseline.cen))[b.id]]
baseline.cen <- scale(baseline.cen, scale = FALSE)
baseline.slope <- baseline.se.slope <- rep(NA, ntreat)
fit3 <- if(baseline == "common" || baseline == "exchangeable"){
summary(lm(y ~ baseline.cen -1))
} else if(baseline == "independent"){
summary(lm(y ~ design.mat:baseline.cen - 1))
}
baseline.slope[-1] <- coef(fit3)[,1]
baseline.se.slope[-1] <- coef(fit3)[,2]
}
############# Generate initial values
initial.values = list()
for(i in 1:n.chains){
initial.values[[i]] = list()
}
for(i in 1:n.chains){
random.Eta <- rnorm(length(Eta))
initial.values[[i]][["Eta"]] <- Eta + se.Eta * random.Eta
}
if(!is.nan(fit$fstat[1])){
for(i in 1:n.chains){
random.d = rnorm(length(d))
initial.values[[i]][["d"]] <- d + se.d * random.d
if(type == "random"){
df <- fit$df[2]
random.ISigma <- rchisq(1, df)
sigma2 <- resid.var * df/random.ISigma
if(hy.prior[[1]] == "dunif"){
if(sqrt(sigma2) > network$prior.data$hy.prior.2){
stop("data has more variability than your prior does")
}
}
if(hy.prior[[1]] == "dgamma"){
initial.values[[i]][["prec"]] <- 1/sigma2
} else if(hy.prior[[1]] == "dunif" || hy.prior[[1]] == "dhnorm"){
initial.values[[i]][["sd"]] <- sqrt(sigma2)
}
# generate values for delta
delta = matrix(NA, nrow = nrow(t), ncol = ncol(t))
for(j in 2:ncol(delta)){
diff_d <- ifelse(is.na(d[t[,1]]), d[t[,j]], d[t[,j]] - d[t[,1]])
for(ii in 1:nrow(delta)){
if(!is.na(diff_d[ii])) delta[ii,j] = rnorm(1, mean = diff_d[ii], sd = sqrt(sigma2))
}
}
initial.values[[i]][["delta"]] <- delta
}
}
}
if (!is.null(covariate)) {
if(!is.nan(fit2$fstat[1])){
for(i in 1:n.chains){
random.slope <- matrix(rnorm(dim(slope)[1]*dim(slope)[2]),dim(slope))
for(j in 1:dim(covariate)[2]){
initial.values[[i]][[paste("beta", j, sep = "")]] = slope[,j] + se.slope[,j] * random.slope[,j]
}
}
}
}
if(baseline != "none"){
if(!is.nan(fit3$fstat[1])){
for(i in 1:n.chains){
random.baseline = rnorm(length(baseline.slope))
initial.values[[i]][["b_bl"]] = baseline.slope + baseline.se.slope * random.baseline
}
}
}
return(initial.values)
})
}
############################################ multinomial inits functions
multinomial.inits <- function(network, n.chains)
{
with(network,{
if (length(miss.patterns[[1]])!= 1){
Dimputed <- multi.impute.data(network)
} else{
Dimputed <- Outcomes
}
Dimputed = Dimputed + 0.5
logits <- as.matrix(log(Dimputed[, -1]) - log(Dimputed[, 1]))
se.logits <- as.matrix(sqrt(1/Dimputed[, -1] + 1/Dimputed[, 1]))
Eta <- se.Eta <- matrix(NA, nstudy, ncat)
Eta[,2:ncat] <- logits[b.id,]
se.Eta[,2:ncat] <- se.logits[b.id,]
delta <- logits - apply(as.matrix(Eta[, -1]), 2, rep, times = na)
rows.of.basetreat <- seq(dim(as.matrix(delta))[1])*as.numeric(b.id)
delta <- delta[-rows.of.basetreat,,drop=F] # Eliminate base treatment arms
###################### Using delta, Eta, and se.Eta make initial values
y <- delta # dependent variable for regression (part of Delta)
d <- se.d <- matrix(NA, length(unique(Treat)), ncat - 1)
resid.var <- rep(NA, ncat -1)
base.tx <- Treat[b.id] # base treatment for N studies
end.Study <- c(0, cumsum(na)) # end row number of each trial
rows <- end.Study - seq(0, nstudy) # end number of each trial not including base treatment arms
design.mat <- matrix(0, sum(na) - nstudy, ntreat) # no. non-base arms x #txs
for (i in seq(nstudy)){
studytx <- Treat[(end.Study[i]+1):end.Study[i+1]] #treatments in ith Study
nonbase.tx <- studytx[studytx!=base.tx[i]] #non-baseline treatments for ith Study
design.mat[(1+rows[i]):rows[i+1],base.tx[i]] <- -1
for (j in seq(length(nonbase.tx)))
design.mat[j+rows[i],nonbase.tx[j]] <- 1
}
design.mat <- design.mat[,-1,drop=F]
for(k in 1:(ncat - 1)){
fit <- summary(lm(y[,k] ~ design.mat - 1))
d[-1,k] <- coef(fit)[1:(ntreat-1), 1]
se.d[-1,k] <- coef(fit)[1:(ntreat-1), 2]
resid.var[k] <- fit$sigma^2
}
# covariate
if(!is.null(covariate)){
x.cen <- matrix(0, nrow = sum(na), ncol = dim(covariate)[2])
for(i in 1:dim(covariate)[2]){
x.cen[,i] <- rep(covariate[,i], na)
}
x.cen <- x.cen[-seq(dim(x.cen)[1])[b.id],,drop=F]
x.cen <- scale(x.cen, scale = FALSE)
slope <- se.slope <- array(NA, c(ntreat, dim(covariate)[2], ncat - 1))
for(i in 1:dim(covariate)[2]){
for(k in 1:(ncat-1)){
fit2 <- if(covariate.model == "independent" || covariate.model == "exchangeable"){
summary(lm(y[,k] ~ design.mat:x.cen[,i] - 1))
} else if(covariate.model == "common"){
summary(lm(y[,k] ~ x.cen[,i] - 1))
}
slope[-1,i,k] <- coef(fit2)[,1]
se.slope[-1,i,k] <- coef(fit2)[,2]
}
}
}
# baseline
if(baseline != "none"){
baseline.cen <- apply(as.matrix(Eta[, -1]), 2, rep, times = na)
baseline.cen <- baseline.cen[-seq(dim(baseline.cen)[1])[b.id],]
baseline.cen <- scale(baseline.cen, scale = FALSE)
baseline.slope <- baseline.se.slope <- matrix(nrow = ntreat, ncol = ncat -1)
for(k in 1:(ncat -1)){
fit3 <- if(baseline == "common" || baseline == "exchangeable"){
summary(lm(y[,k] ~ baseline.cen[,k] -1))
} else if(baseline == "independent"){
summary(lm(y[,k] ~ design.mat:baseline.cen[,k] - 1))
}
baseline.slope[-1, k] <- coef(fit3)[,1]
baseline.se.slope[-1, k] <- coef(fit3)[,2]
}
}
################################################
initial.values = list()
for(i in 1:n.chains){
initial.values[[i]] = list()
}
for(i in 1:n.chains){
random.Eta <- matrix(rnorm(dim(Eta)[1]*dim(Eta)[2]),dim(Eta)[1],dim(Eta)[2])
initial.values[[i]][["Eta"]] <- Eta + se.Eta * random.Eta
}
if(!is.nan(fit$fstat[1])){
for(i in 1:n.chains){
random.d = matrix(rnorm(dim(d)[1]*dim(d)[2]),dim(d)[1],dim(d)[2])
initial.values[[i]][["d"]] = d + se.d * random.d
if(type == "random"){
df <- fit$df[2]
random.ISigma <- rchisq(1, df)
sigma2 <- resid.var * df/random.ISigma
initial.values[[i]][["prec"]] <- 1/sigma2 * diag(ncat - 1)
if(max(na) == 2){
delta <- array(NA, dim = c(nstudy, max(na), ncat))
for(j in 2:max(na)){
for(m in 1:(ncat-1)){
diff_d <- ifelse(is.na(d[t[,1],m]), d[t[,j],m], d[t[,j],m] - d[t[,1],m])
for(ii in 1:nstudy){
if(!is.na(diff_d[ii])) delta[ii,j,m+1] <- rnorm(1, mean = diff_d[ii], sd = sqrt(sigma2))
}
}
}
initial.values[[i]][["delta"]] <- delta
}
}
}
}
if (!is.null(covariate)) {
if(!is.nan(fit2$fstat[1])){
for(i in 1:n.chains){
random.slope <- array(rnorm(dim(slope)[1]*dim(slope)[2]*dim(slope)[3]),dim(slope))
for(j in 1:dim(covariate)[2]){
initial.values[[i]][[paste("beta", j, sep = "")]] = slope[,j,] + se.slope[,j,] * random.slope[,j,]
}
}
}
}
if(baseline != "none"){
if(!is.nan(fit3$fstat[1])){
for(i in 1:n.chains){
random.baseline = matrix(rnorm(dim(baseline.slope)[1]*dim(baseline.slope)[2]),dim(baseline.slope))
initial.values[[i]][["b_bl"]] = baseline.slope + baseline.se.slope * random.baseline
}
}
}
return(initial.values)
})
}
multi.impute.data <- function(network)
{
#Take partial sums by study and allocate them to missing outcomes according to either defined category probabilities or to probabilities computed empirically from available data. Empirical scheme first estimates allocation probabilities based on complete data and then updates by successive missing data patterns.
#
#1. Fill in all data for all outcomes with complete information
#2. Pull off summed outcome columns and back out the known data (e.g. if one type of death count known, subtract this from total deaths)
#3. Renormalize imputation probabilities among outcomes with missing values
#4. Split summed outcome categories by imputation probabilities for each sum
#5. For each outcome category, average the imputed values gotten from each partial sum
#6. Apply correction factor to ensure that sum of imputed values add up to total to be imputed
#
with(network,{
rows.all = vector("list", length = npattern)
for(i in seq(npattern)){
rows.all[[i]] = seq(nrow)[pattern == levels(pattern)[i]]
}
Dimputed = matrix(NA,dim(D)[1],ncat)
count = 0
imputed.prop = rep(1/ncat,ncat)
for (i in seq(length(miss.patterns[[1]]))) {
rows = rows.all[[i]] #data rows in missing data pattern
cols.data = miss.patterns[[1]][[i]][[2]] #data columns in first combo of missing data pattern
is.complete.cols = cols.data %in% seq(ncat) #which data columns are complete
if (any(is.complete.cols)) {
complete.cols = cols.data[is.complete.cols] #col no. of complete cols
incomplete.cols = cols.data[!is.complete.cols] #col nos. of incomplete cols
Dimputed[rows, complete.cols] = D[rows, complete.cols] #Put in complete data
}
else
incomplete.cols = cols.data
if (!all(is.complete.cols)) { #If some columns with missing data
pmat = miss.patterns[[2]][incomplete.cols,,drop=F] #Parameters corresponding to incomplete cols
if (any(is.complete.cols)) {
sums.to.split = D[rows, incomplete.cols, drop=F] - D[rows, complete.cols, drop=F]%*%t(pmat[, complete.cols,drop=F]) #back out known data
pmat[,complete.cols] = 0 #set backed out columns to zero
imputed.prop[complete.cols] = 0 #set imputation probabilities for complete data cols to zero
}
else
sums.to.split = D[rows, incomplete.cols, drop=F]
imputed.prop = imputed.prop/sum(imputed.prop) #renormalize
for (j in seq(length(rows))) {
x0 = matrix(rep(sums.to.split [j,], each=ncat),ncol=length(incomplete.cols))*t(pmat)
x1 = imputed.prop*t(pmat)
x2 = x0*x1/rep(apply(x1,2,sum),each=ncat,ncol=dim(pmat)[1])
x2[x2==0] = NA
x3 = apply(x2, 1, mean, na.rm=T) # average across potential imputed values
x5 = (N[rows[j]]- sum(Dimputed[rows[j],], na.rm=T))/sum(x3, na.rm=T) #Factor to adjust imputations
x6 = round(x3*x5) # Apply factor to imputations
if (any(is.complete.cols))
Dimputed[rows[j],seq(ncat)[-complete.cols]] = x6[!is.na(x6)]
else
Dimputed[rows[j],seq(ncat)] = x6[!is.na(x6)]
Dimputed[rows[j],1] = Dimputed[rows[j],1] + N[rows[j]] - sum(Dimputed[rows[j],]) #Correction for rounding so totals add
}
}
running.total = apply(Dimputed,2,sum,na.rm=T)
imputed.prop = running.total/sum(running.total) # Proportion of events in each category
}
return(Dimputed)
})
}
|
library(snpStats)
data(testdata)
X <- Autosomes[,101:201]
cs <- col.summary(X)
X <- X[, cs[,"MAF"]>0.05 & cs[,"Call.rate"]>0.9]
maf <- col.summary(X)[,"MAF"]
set.seed(42)
## quantitative trait
Y<-rnorm(nrow(X),mean=as(X[,8],"numeric"),sd=4) + rnorm(nrow(X),sd=2)
eff<-snp.rhs.estimates(Y ~ 1, snp.data=X, family="gaussian")
beta.q <- sapply(eff@.Data, "[[", "beta")
vbeta.q <- sapply(eff@.Data, "[[", "Var.beta")
p.q <- pchisq(beta.q^2/vbeta.q,df=1,lower.tail=FALSE)
sd.est <- suppressWarnings(coloc:::sdY.est(vbeta=vbeta.q, maf=maf, n=nrow(X)))
## case-control trait
cc <- rbinom(nrow(X),1, p=(1+as(X[,8],"numeric"))/4)
eff<-snp.rhs.estimates(cc ~ 1, snp.data=X, family="binomial")
beta.cc <- sapply(eff@.Data, "[[", "beta")
vbeta.cc <- sapply(eff@.Data, "[[", "Var.beta")
p.cc <- pchisq(beta.cc^2/vbeta.cc,df=1,lower.tail=FALSE)
## general things
test_that("sdY.est", {
expect_true(abs(sd.est - sd(Y)) < 0.1)
})
DQ <- list(beta=beta.q,
varbeta=vbeta.q,
type="quant",
snp=colnames(X),
sdY=sd.est,
N=nrow(X))
DCC <- list(beta=beta.cc,
varbeta=vbeta.cc,
type="cc",
snp=colnames(X),
MAF=maf,
s=mean(cc),
N=nrow(X))
PQ <- list(pvalues=p.q,
type="quant",
MAF=maf,
snp=colnames(X),
sdY=sd.est,
N=nrow(X))
PCC <- list(pvalues=p.cc,
type="cc",
MAF=maf,
snp=colnames(X),
s=mean(cc),
N=nrow(X))
PCC.bad <- list(pvalues=p.cc,
type="cc",
MAF=maf,
snp=colnames(X),
s=sum(cc),
N=nrow(X))
RESULTS <- list(dd = coloc.abf(dataset1=DQ,dataset2=DCC),
dp = coloc.abf(dataset1=DQ,dataset2=PCC),
pd = coloc.abf(dataset1=PQ,dataset2=DCC),
pp = coloc.abf(dataset1=PQ,dataset2=PCC))
lapply(RESULTS,"[[","summary")
test_that("process.dataset", {
expect_that(process.dataset(list(), ""), throws_error())
expect_that(process.dataset(list(beta=1,p=2,type="blah"), ""), throws_error())
expect_error(process.dataset(DQ,suffix=".q"), NA)
expect_error(process.dataset(DCC,suffix=".q"), NA)
expect_error(process.dataset(PQ,suffix=".q"), NA)
expect_error(process.dataset(PCC,suffix=".q"), NA)
expect_error(process.dataset(PCC.bad,suffix=".q"))
pd.cc <- process.dataset(DCC,suffix=".cc")
pd.q <- process.dataset(DQ,suffix=".q")
expect_is(pd.q,"data.frame")
expect_is(pd.cc,"data.frame")
expect_equal(nrow(pd.q),ncol(X))
expect_equal(nrow(pd.cc),ncol(X))
})
## coloc.abf with coefficients
test_that("coloc.abf", {
expect_error(coloc.abf(dataset1=DQ,dataset2=DCC), NA)
result <- coloc.abf(dataset1=DQ,dataset2=DCC)
expect_true(which.max(result$summary[-1]) == 5)
expect_true(result$summary[1] == ncol(X))
})
## alternative test data
## colocdata<- read.table("inst/tests/test.txt", sep="\t", header=T)
## N <- 18124
## result <- coloc.abf(dataset1=list(beta=colocdata$beta.dataset1,
## varbeta=colocdata$varbeta.dataset1,
## type="quant",
## snp=colocdata$SNP,
## N=N),
## dataset2=list(beta=colocdata$beta.dataset1,
## varbeta=colocdata$varbeta.dataset1,
## type="quant",
## snp=colocdata$SNP,
## N=N),
## MAF=colocdata$MAF)
| /tests/testthat/test-abf.R | no_license | cgpu/coloc | R | false | false | 3,411 | r | library(snpStats)
data(testdata)
X <- Autosomes[,101:201]
cs <- col.summary(X)
X <- X[, cs[,"MAF"]>0.05 & cs[,"Call.rate"]>0.9]
maf <- col.summary(X)[,"MAF"]
set.seed(42)
## quantitative trait
Y<-rnorm(nrow(X),mean=as(X[,8],"numeric"),sd=4) + rnorm(nrow(X),sd=2)
eff<-snp.rhs.estimates(Y ~ 1, snp.data=X, family="gaussian")
beta.q <- sapply(eff@.Data, "[[", "beta")
vbeta.q <- sapply(eff@.Data, "[[", "Var.beta")
p.q <- pchisq(beta.q^2/vbeta.q,df=1,lower.tail=FALSE)
sd.est <- suppressWarnings(coloc:::sdY.est(vbeta=vbeta.q, maf=maf, n=nrow(X)))
## case-control trait
cc <- rbinom(nrow(X),1, p=(1+as(X[,8],"numeric"))/4)
eff<-snp.rhs.estimates(cc ~ 1, snp.data=X, family="binomial")
beta.cc <- sapply(eff@.Data, "[[", "beta")
vbeta.cc <- sapply(eff@.Data, "[[", "Var.beta")
p.cc <- pchisq(beta.cc^2/vbeta.cc,df=1,lower.tail=FALSE)
## general things
test_that("sdY.est", {
expect_true(abs(sd.est - sd(Y)) < 0.1)
})
DQ <- list(beta=beta.q,
varbeta=vbeta.q,
type="quant",
snp=colnames(X),
sdY=sd.est,
N=nrow(X))
DCC <- list(beta=beta.cc,
varbeta=vbeta.cc,
type="cc",
snp=colnames(X),
MAF=maf,
s=mean(cc),
N=nrow(X))
PQ <- list(pvalues=p.q,
type="quant",
MAF=maf,
snp=colnames(X),
sdY=sd.est,
N=nrow(X))
PCC <- list(pvalues=p.cc,
type="cc",
MAF=maf,
snp=colnames(X),
s=mean(cc),
N=nrow(X))
PCC.bad <- list(pvalues=p.cc,
type="cc",
MAF=maf,
snp=colnames(X),
s=sum(cc),
N=nrow(X))
RESULTS <- list(dd = coloc.abf(dataset1=DQ,dataset2=DCC),
dp = coloc.abf(dataset1=DQ,dataset2=PCC),
pd = coloc.abf(dataset1=PQ,dataset2=DCC),
pp = coloc.abf(dataset1=PQ,dataset2=PCC))
lapply(RESULTS,"[[","summary")
test_that("process.dataset", {
expect_that(process.dataset(list(), ""), throws_error())
expect_that(process.dataset(list(beta=1,p=2,type="blah"), ""), throws_error())
expect_error(process.dataset(DQ,suffix=".q"), NA)
expect_error(process.dataset(DCC,suffix=".q"), NA)
expect_error(process.dataset(PQ,suffix=".q"), NA)
expect_error(process.dataset(PCC,suffix=".q"), NA)
expect_error(process.dataset(PCC.bad,suffix=".q"))
pd.cc <- process.dataset(DCC,suffix=".cc")
pd.q <- process.dataset(DQ,suffix=".q")
expect_is(pd.q,"data.frame")
expect_is(pd.cc,"data.frame")
expect_equal(nrow(pd.q),ncol(X))
expect_equal(nrow(pd.cc),ncol(X))
})
## coloc.abf with coefficients
test_that("coloc.abf", {
expect_error(coloc.abf(dataset1=DQ,dataset2=DCC), NA)
result <- coloc.abf(dataset1=DQ,dataset2=DCC)
expect_true(which.max(result$summary[-1]) == 5)
expect_true(result$summary[1] == ncol(X))
})
## alternative test data
## colocdata<- read.table("inst/tests/test.txt", sep="\t", header=T)
## N <- 18124
## result <- coloc.abf(dataset1=list(beta=colocdata$beta.dataset1,
## varbeta=colocdata$varbeta.dataset1,
## type="quant",
## snp=colocdata$SNP,
## N=N),
## dataset2=list(beta=colocdata$beta.dataset1,
## varbeta=colocdata$varbeta.dataset1,
## type="quant",
## snp=colocdata$SNP,
## N=N),
## MAF=colocdata$MAF)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/admin.R
\name{smif.admin}
\alias{smif.admin}
\alias{admin}
\alias{timeframe}
\alias{time.frame}
\alias{showUSER}
\alias{.setAdmin}
\alias{.getAdmin}
\alias{setTimeFrame}
\alias{getTimeFrame}
\alias{getTimeFrame.months}
\alias{.getFrom}
\alias{.getTo}
\alias{.showUSER}
\title{Administration functions for smif.package}
\usage{
.setAdmin(x = TRUE)
.getAdmin()
setTimeFrame(x = 5L)
getTimeFrame(x = 1L)
getTimeFrame.months(x = 12L)
.getFrom(x = 12L)
.getTo()
.showUSER(...)
}
\description{
Used by various functions to enable or disable advanced functionality.
Functions are generally internal, as are advanced variables.
}
\details{
\code{.setAdmin} also controls the values of the global \code{".advanced"} variable,
as well as the global settings for a reserved version of option("verbose").
\code{.getAdmin} should be used to retrieve current statis of the \code{"smif.smif.admin"}
global option.
TimeFrame is number of years used for import code.
}
\keyword{internal}
| /man/smif.admin.Rd | no_license | alecthekulak/smif.package | R | false | true | 1,104 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/admin.R
\name{smif.admin}
\alias{smif.admin}
\alias{admin}
\alias{timeframe}
\alias{time.frame}
\alias{showUSER}
\alias{.setAdmin}
\alias{.getAdmin}
\alias{setTimeFrame}
\alias{getTimeFrame}
\alias{getTimeFrame.months}
\alias{.getFrom}
\alias{.getTo}
\alias{.showUSER}
\title{Administration functions for smif.package}
\usage{
.setAdmin(x = TRUE)
.getAdmin()
setTimeFrame(x = 5L)
getTimeFrame(x = 1L)
getTimeFrame.months(x = 12L)
.getFrom(x = 12L)
.getTo()
.showUSER(...)
}
\description{
Used by various functions to enable or disable advanced functionality.
Functions are generally internal, as are advanced variables.
}
\details{
\code{.setAdmin} also controls the values of the global \code{".advanced"} variable,
as well as the global settings for a reserved version of option("verbose").
\code{.getAdmin} should be used to retrieve current statis of the \code{"smif.smif.admin"}
global option.
TimeFrame is number of years used for import code.
}
\keyword{internal}
|
plot_sample <- function(res, axes = c(1, 2), main = "", lab.size = 4) {
dims <- res %>%
`[[`("eig") %>%
as.data.frame() %>%
select(2) %>%
pull() %>%
round(2) %>%
paste0("Dim ", 1:length(.), " (", ., "%)")
if (any(class(res) %in% c("PCA", "MFA"))) {
df_main <- res %>%
`[[`(c("ind", "coord")) %>%
`colnames<-`(paste0("Dim", 1:ncol(.))) %>%
as_tibble(rownames = "Label") %>%
select(-Label, Label)
res_plot <- df_main %>%
ggplot(aes_string(x = names(df_main)[axes[1]], y = names(df_main)[axes[2]])) +
geom_point() +
geom_text_repel(aes(label = Label), size = lab.size) +
geom_vline(xintercept = 0, lty = 2, col = "grey40") +
geom_hline(yintercept = 0, lty = 2, col = "grey40") +
labs(
x = dims[axes[1]],
y = dims[axes[2]],
title = main
) +
# coord_fixed(ratio = 3 / 4) +
theme_minimal() +
theme(
panel.background = element_rect(fill = "white"),
panel.grid = element_blank()
)
} else if (any(class(res) %in% c("CA"))) {
df_main <- res %>%
`[[`(c("row", "coord")) %>%
`colnames<-`(paste0("Dim", 1:ncol(.))) %>%
as_tibble(rownames = "Label") %>%
select(-Label, Label)
res_plot <- df_main %>%
ggplot(aes_string(x = names(df_main)[axes[1]], y = names(df_main)[axes[2]])) +
geom_point() +
geom_text_repel(aes(label = Label), size = lab.size) +
geom_vline(xintercept = 0, lty = 2, col = "grey40") +
geom_hline(yintercept = 0, lty = 2, col = "grey40") +
labs(
x = dims[axes[1]],
y = dims[axes[2]],
title = main
) +
# coord_fixed(ratio = 3 / 4) +
theme_minimal() +
theme(
panel.background = element_rect(fill = "white"),
panel.grid = element_blank()
)
} else if (any(class(res) %in% c("MCA"))) {
df_main <- res %>%
`[[`(c("ind", "coord")) %>%
`colnames<-`(paste0("Dim", 1:ncol(.))) %>%
as_tibble(rownames = "Label") %>%
select(-Label, Label)
res_plot <- df_main %>%
ggplot(aes_string(x = names(df_main)[axes[1]], y = names(df_main)[axes[2]])) +
geom_point() +
geom_text_repel(aes(label = Label), size = lab.size) +
geom_vline(xintercept = 0, lty = 2, col = "grey40") +
geom_hline(yintercept = 0, lty = 2, col = "grey40") +
labs(
x = dims[axes[1]],
y = dims[axes[2]],
title = main
) +
# coord_fixed(ratio = 3 / 4) +
theme_minimal() +
theme(
panel.background = element_rect(fill = "white"),
panel.grid = element_blank()
)
}
return(res_plot)
}
| /helpers/plot_sample.R | permissive | SensolutionID/sensehub_basic | R | false | false | 2,680 | r | plot_sample <- function(res, axes = c(1, 2), main = "", lab.size = 4) {
dims <- res %>%
`[[`("eig") %>%
as.data.frame() %>%
select(2) %>%
pull() %>%
round(2) %>%
paste0("Dim ", 1:length(.), " (", ., "%)")
if (any(class(res) %in% c("PCA", "MFA"))) {
df_main <- res %>%
`[[`(c("ind", "coord")) %>%
`colnames<-`(paste0("Dim", 1:ncol(.))) %>%
as_tibble(rownames = "Label") %>%
select(-Label, Label)
res_plot <- df_main %>%
ggplot(aes_string(x = names(df_main)[axes[1]], y = names(df_main)[axes[2]])) +
geom_point() +
geom_text_repel(aes(label = Label), size = lab.size) +
geom_vline(xintercept = 0, lty = 2, col = "grey40") +
geom_hline(yintercept = 0, lty = 2, col = "grey40") +
labs(
x = dims[axes[1]],
y = dims[axes[2]],
title = main
) +
# coord_fixed(ratio = 3 / 4) +
theme_minimal() +
theme(
panel.background = element_rect(fill = "white"),
panel.grid = element_blank()
)
} else if (any(class(res) %in% c("CA"))) {
df_main <- res %>%
`[[`(c("row", "coord")) %>%
`colnames<-`(paste0("Dim", 1:ncol(.))) %>%
as_tibble(rownames = "Label") %>%
select(-Label, Label)
res_plot <- df_main %>%
ggplot(aes_string(x = names(df_main)[axes[1]], y = names(df_main)[axes[2]])) +
geom_point() +
geom_text_repel(aes(label = Label), size = lab.size) +
geom_vline(xintercept = 0, lty = 2, col = "grey40") +
geom_hline(yintercept = 0, lty = 2, col = "grey40") +
labs(
x = dims[axes[1]],
y = dims[axes[2]],
title = main
) +
# coord_fixed(ratio = 3 / 4) +
theme_minimal() +
theme(
panel.background = element_rect(fill = "white"),
panel.grid = element_blank()
)
} else if (any(class(res) %in% c("MCA"))) {
df_main <- res %>%
`[[`(c("ind", "coord")) %>%
`colnames<-`(paste0("Dim", 1:ncol(.))) %>%
as_tibble(rownames = "Label") %>%
select(-Label, Label)
res_plot <- df_main %>%
ggplot(aes_string(x = names(df_main)[axes[1]], y = names(df_main)[axes[2]])) +
geom_point() +
geom_text_repel(aes(label = Label), size = lab.size) +
geom_vline(xintercept = 0, lty = 2, col = "grey40") +
geom_hline(yintercept = 0, lty = 2, col = "grey40") +
labs(
x = dims[axes[1]],
y = dims[axes[2]],
title = main
) +
# coord_fixed(ratio = 3 / 4) +
theme_minimal() +
theme(
panel.background = element_rect(fill = "white"),
panel.grid = element_blank()
)
}
return(res_plot)
}
|
## File Name: R2noharm.R
## File Version: 2.24
#------------------------------------------------------------------
# NOHARM exploratory factor analysis
R2noharm <- function( dat=NULL, pm=NULL, n=NULL,model.type, weights=NULL,
dimensions=NULL, guesses=NULL, noharm.path,
F.pattern=NULL, F.init=NULL, P.pattern=NULL, P.init=NULL,
digits.pm=4, writename=NULL,
display.fit=5, dec=".", display=TRUE ){
# INPUT:
# dat ... dataframe
# model.type ... CFA or EFA
# dimensions ... dimensions for exploratory factor analysis
# guesses ... fixed guessing paramters
# noharm.path ... path for NOHARM console (console must have the name NOHARM87.exe)
# digits.pm ... digits for calculation of product moment matrix
# writename ... name of NOHARM Output file
# dec ... "." or "," for decimal separator
# display ... display noharm settings (TRUE or FALSE)
#............................................................................
# The matrices F.pattern, F.init, P.pattern and P.init correspond to the
# definition in the NOHARM manual
# noharm.path <- shQuote(noharm.path)
if (model.type=="CFA" ){ dimensions <- ncol(F.pattern) }
if ( display ){
cat("Multidimensional Normal Ogive by Harmonic Analysis (NOHARM 4) \n" )
cat("C. Fraser & R. P. McDonald (2012)\n" )
cat("For more informations please look at \n ")
# cat( " http://people.niagaracollege.ca/cfraser/download/nhCLwindl.html\n\n")
cat( " http://noharm.niagararesearch.ca/nh4cldl.html\n\n")
cat( paste( "Path of NOHARMCL file: \n ** ", noharm.path, "\n" ) )
cat( paste( "Path of NOHARM input and output files: \n ** ", getwd(), "\n\n" ) )
if (model.type=="EFA"){
cat(paste( "Exploratory Item Factor Analysis with", dimensions, "Dimensions" ), "\n\n" )
} else { cat("Confirmatory Item Factor Analysis \n\n" ) }
flush.console()
}
########################################################
# data input
if ( ! is.null(dat) ){
# allow also for input of moment matrix data
dat <- as.matrix(dat)
I <- ncol(dat) # number of items
n <- nrow(dat) # number of subjects
if ( is.null(weights) ){
weights <- rep(1,n) } else {
weights <- weights / sum(weights) * n
}
# calculate raw product moment matrix
dat9 <- dat
dat.resp <- is.na( dat)
dat9[ dat.resp ] <- 9
# pairwise product moment matrix
# BM <- t( dat9 * (1- dat.resp ) ) %*% ( dat9 * (1- dat.resp ) )
BM <- crossprod( dat9 * (1- dat.resp )*weights )
# number of persons on an item pair
# NM <- t((1- dat.resp ) ) %*% ( (1- dat.resp ) )
NM <- crossprod( (1- dat.resp)*weights )
BM <- round( BM/NM, digits.pm )
}
inputdat <- TRUE
########################################################
if ( ! is.null(pm) ){
if ( is.vector(pm) ){
I2 <- length(pm)
I <- sqrt( 2*I2+1/4 ) - .5
BM <- matrix( 0, I, I )
colnames(BM) <- paste0("I",1:I)
vv <- 0
for (ii in 1:I){
# ii <- 1
BM[ ii, 1:ii ] <- pm[ seq(vv+1,vv+ii) ]
vv <- vv + ii
}
BM <- BM + t(BM)
diag(BM) <- diag(BM)/2
} else {
pm <- BM
I <- ncol(pm)
}
weights <- rep(1,n)
NM <- matrix(n, I,I)
dat <- BM[1:2,,drop=FALSE]
inputdat <- FALSE
}
# Exploratory analysis requested?
EX <- 1 * ( model.type=="EFA" )
# supply starting values
IV <- 1 * (model.type=="CFA" )
if ( is.null(guesses) ){
guesses <- rep(0, I )
}
# arrange NOHARM Input Files
s1 <- Sys.time()
noharm.input <- c( paste( "R2noharm Input file", s1 ),
paste( I, dimensions, n, 1, EX, IV, 0, 0, sep=" ")
)
# add guessing parameter
noharm.input <- c( noharm.input, " ", guesses, " " )
if (model.type=="CFA"){
# pattern matrices
noharm.input <- c( noharm.input,
apply( F.pattern, 1, FUN=function(ll){ paste( ll, collapse=" " ) } ), " ",
sapply( 1:dimensions, FUN=function(ss){ paste( P.pattern[ ss, 1:ss ], collapse=" " ) } ),
" " )
# add initial matrices
if ( is.null(F.init) ){ F.init <- .5*F.pattern }
if ( is.null(P.init) ){ P.init <- 0.1 + diag( .9, dimensions) }
noharm.input <- c( noharm.input,
apply( F.init, 1, FUN=function(ll){ paste( ll, collapse=" " ) } ), " ",
sapply( 1:dimensions, FUN=function(ss){ paste( P.init[ ss, 1:ss ], collapse=" " ) } )
)
}
# product moment matrix
pm <- unlist( sapply( 1:I, FUN=function( ii){ BM[ ii, 1:ii ] } ) )
LPM <- length(pm)
h1 <- seq( 1, LPM, 10 )
h2 <- c( seq( 10, LPM, 10 ) )
if (length(h1) !=length(h2) ){ h2 <- c( h2, LPM ) }
h3 <- data.frame( h1, h2 )
pm <- apply( h3, 1, FUN=function(ll){ paste( pm[ ll[1]:ll[2] ], collapse=" " ) } )
# add product moment matrix to NOHARM input
noharm.input <- c( noharm.input, " ", pm )
if (dec=="," ){ noharm.input <- gsub( "\\.", ",", noharm.input ) }
# path specifications and starting NOHARM
current.path <- getwd()
setwd( noharm.path )
writeLines( noharm.input, "mymodel.inp" )
# writeLines( "NoharmCL mymodel.inp mymodel.out 800 0.00001", "noharm_analysis.bat" )
writeLines( "NoharmCL mymodel.inp mymodel.out", "noharm_analysis.bat" )
# working without DOSBox
system( "noharm_analysis.bat", show.output.on.console=F )
# read NOHARM output
noharmout0 <- readLines( "MYMODEL.out" )
if (dec=="," ){ noharmout0 <- gsub( ",", "\\.", noharmout0 ) }
noharmout1 <- noharmout0
noharmout1 <- c( noharmout1, rep("",3), "ENDE" )
# set current working directory
setwd( current.path )
if (!is.null( writename ) ){
writeLines( noharmout0, paste( writename, ".out", sep="") )
writeLines( noharm.input, paste( writename, ".inp", sep="") )
}
# results for 1-dimensional IRT analysis
if (dimensions==1 & model.type=="EFA" ){
modtype <- 1
res <- list( "tanaka"=.noharm.tanaka( noharmout1 ),
"rmsr"=.noharm.rmsr( noharmout1 ),
"N.itempair"=NM,
"pm"=BM,
"weights"=weights,
"guesses"=guesses,
"residuals"=.noharm.residuals( noharmout1, I=I, dat=dat),
"final.constants"=.noharm.itemlevel( noharmout1, "Final Constants", I=I, dat=dat),
"thresholds"=.noharm.itemlevel( noharmout1, "Threshold Values", I=I, dat=dat),
"uniquenesses"=.noharm.itemlevel( noharmout1, "Unique Variances", I=I, dat=dat),
"difficulties"=.noharm.itemlevel( noharmout1, "Vector B", I=I, dat=dat),
"discriminations"=.noharm.itemlevel( noharmout1, "Vector A", I=I, dat=dat)
) }
# results for noharm.efa with more than one dimension
if (dimensions > 1 & model.type=="EFA" ){
modtype <- 2
res <- list( "tanaka"=.noharm.tanaka( noharmout1 ),
"rmsr"=.noharm.rmsr( noharmout1 ),
"N.itempair"=NM,
"pm"=BM,
"guesses"=guesses,
"residuals"=.noharm.residuals( noharmout1, I=I, dat=dat),
"final.constants"=.noharm.itemlevel( noharmout1, "Final Constants",I=I, dat=dat),
"factor.cor"=.noharm.correlations( noharmout1, "Factor Correlations",
"Promax Rotated", dimensions=dimensions, dat=dat),
"thresholds"=.noharm.itemlevel( noharmout1, "Threshold Values", I=I, dat=dat),
"uniquenesses"=.noharm.itemlevel( noharmout1, "Unique Variances", I=I, dat=dat),
"unrotated"=.noharm.loadings( noharmout1, "Factor Loadings",
"Varimax Rotated Factor Loadings", dimensions=dimensions, I=I, dat=dat),
"varimax"=.noharm.loadings( noharmout1, "Varimax Rotated Factor Loadings",
"Varimax Rotated Coefficients of Theta", dimensions=dimensions, I=I, dat=dat),
"varimax.theta"=.noharm.loadings( noharmout1, "Varimax Rotated Coefficients of Theta",
"oblique", dimensions=dimensions, I=I, dat=dat),
"promax"=.noharm.loadings( noharmout1, "oblique",
"Factor Correlations", dimensions=dimensions, I=I, dat=dat),
"promax.theta"=.noharm.loadings( noharmout1, "Promax Rotated Coefficients of Theta",
"ENDE", dimensions=dimensions, I=I, dat=dat)
)
}
# results for noharm.cfa with more than one dimension
if (dimensions > 1 & model.type=="CFA" ){
modtype <- 3
res <- list( "tanaka"=.noharm.tanaka( noharmout1 ),
"rmsr"=.noharm.rmsr( noharmout1 ),
"N.itempair"=NM,
"pm"=BM,
"guesses"=guesses,
"residuals"=.noharm.residuals( noharmout1, I=I, dat=dat),
"final.constants"=.noharm.itemlevel( noharmout1, "Final Constants",I=I, dat=dat),
"factor.cor"=.noharm.correlations( noharmout1, "Final Correlations", "Residual", dimensions=dimensions, dat=dat),
"thresholds"=.noharm.itemlevel( noharmout1, "Threshold Values", I=I, dat=dat),
"uniquenesses"=.noharm.itemlevel( noharmout1, "Unique Variances", I=I, dat=dat),
"loadings"=.noharm.loadings( noharmout1, "Factor Loadings",
"ENDE", dimensions=dimensions, I=I, dat=dat),
"loadings.theta"=.noharm.loadings( noharmout1, "Final Coefficients of Theta",
"Final Correlations of Theta", dimensions=dimensions, I=I, dat=dat)
)
if( !is.null( colnames(F.pattern) ) ){
colnames(res$loadings) <- colnames(F.pattern)
colnames(res$loadings.theta) <- colnames(F.pattern)
colnames(res$factor.cor) <- rownames(res$factor.cor) <- colnames(F.pattern)
}
}
# CFA 1 dimension
if ( ( dimensions==1 ) & ( model.type=="CFA" ) ){
modtype <- 4
if ( is.null(colnames(F.pattern) ) ){ colnames(F.pattern) <- "F1" }
res <- list( "tanaka"=.noharm.tanaka( noharmout1 ),
"rmsr"=.noharm.rmsr( noharmout1 ),
"N.itempair"=NM,
"pm"=BM,
"guesses"=guesses,
"residuals"=.noharm.residuals( noharmout1, I=I, dat=dat),
"final.constants"=.noharm.itemlevel( noharmout1, "Final Constants",I=I, dat=dat),
"thresholds"=.noharm.itemlevel( noharmout1, "Threshold Values", I=I, dat=dat),
"uniquenesses"=.noharm.itemlevel( noharmout1, "Unique Variances", I=I, dat=dat),
"loadings.theta"=.noharm.loadings( noharmout1, "Final Coefficients of Theta",
"Residual", dimensions=dimensions, I=I, dat=dat),
"factor.cor"=as.data.frame(matrix(1,1, 1 )),
"difficulties"=.noharm.itemlevel( noharmout1, "Vector B", I=I, dat=dat),
"discriminations"=.noharm.itemlevel( noharmout1, "Vector A", I=I, dat=dat),
"loadings"=.noharm.loadings( noharmout1, "Factor Loadings",
"LORD", dimensions=dimensions, I=I, dat=dat)
# ,
# "loadings"=.noharm.loadings( noharmout1, "Factor Loadings",
# "LORD", dimensions=dimensions, I=I, dat=dat)
)
if( !is.null( colnames(F.pattern) ) ){
colnames(res$loadings) <- colnames(F.pattern)
rownames(res$factor.cor) <- colnames(res$factor.cor) <- colnames(F.pattern)
colnames(res$loadings.theta) <- colnames(F.pattern)
} }
# collect arguments in output
res$model.type <- model.type
# res$Nobs <- nrow(dat)
# res$Nitems <- ncol(dat)
res$Nobs <- n
res$Nitems <- I
res$modtype <- modtype
res$F.init <- F.init
res$F.pattern <- F.pattern
res$P.init <- P.init
res$P.pattern <- P.pattern
res$dat <- dat
res$systime <- s1
res$guesses <- guesses
res$noharm.path <- noharm.path
res$digits.pm <- digits.pm
res$dec <- dec
res$display.fit <- display.fit
if ( modtype %in% c(1,4)){ res$dimensions <- 1 }
if ( modtype %in% c(2,3)){ res$dimensions <- ncol(res$factor.cor) }
#***
# calculate fit statistic
if ( modtype %in% 2:4){
RM <- res$residuals
PV <- diag( res$pm )
g1 <- sqrt( outer( PV * (1-PV), PV * ( 1-PV ) ) )
rM <- ( RM / g1 )
zM <- 0.5 * log( 1 + rM ) - 0.5 * log( 1 - rM )
# chi square
res$chisquare <- X2 <- ( res$Nobs - 3 ) * sum( zM^2 )
# calculate number of estimated parameters and degrees of freedom
I <- res$Nitems
if (modtype %in% 3:4){
Nestpars <- I + sum( F.pattern==1 ) + length( unique( intersect( F.pattern, 2:9999 ) ) )
Nestpars <- Nestpars + sum( diag(P.pattern)==1 ) +
length( unique( diag(P.pattern)[ diag(P.pattern) > 1] ) )
g2 <- P.pattern[ lower.tri(P.pattern) ]
Nestpars <- Nestpars + sum( g2==1 ) + length( unique( intersect( g2, 2:9999 ) ))
res$Nestpars <- Nestpars
}
if ( modtype %in% 2){
cs <- 1:(dimensions-1)
res$Nestpars <- Nestpars <- I + I*dimensions - sum(cs)
}
res$df <- df <- 0.5*I*(I+1) - Nestpars
res$chisquare_df <- res$chisquare / res$df
# calculate RMSEA
res$rmsea <- rmsea <- sqrt(max(c( (X2 / res$Nobs ) / df - 1/res$Nobs, 0)))
# calculate p values
res$p.chisquare <- 1 - pchisq( res$chisquare, df=res$df )
}
# display
if (display){
cat( paste( "Tanaka Index=", round(res$tanaka,display.fit), sep=""), "\n" )
cat( paste( "RMSR=", round(res$rmsr,display.fit), sep=""), "\n" )
if ( ! inputdat ){
cat("\n**** Note that Jackknife does not work for pm input! **** \n")
}
}
if ( ! inputdat ){
res$dat <- NULL
}
res$inputdat <- inputdat
res$upper <- 1+0*res$guess
res$lower <- res$guess
class(res) <- "R2noharm"
return( res )
}
#----------------------------------------------------------
| /R/R2noharm.R | no_license | alexanderrobitzsch/sirt | R | false | false | 15,197 | r | ## File Name: R2noharm.R
## File Version: 2.24
#------------------------------------------------------------------
# NOHARM exploratory factor analysis
R2noharm <- function( dat=NULL, pm=NULL, n=NULL,model.type, weights=NULL,
dimensions=NULL, guesses=NULL, noharm.path,
F.pattern=NULL, F.init=NULL, P.pattern=NULL, P.init=NULL,
digits.pm=4, writename=NULL,
display.fit=5, dec=".", display=TRUE ){
# INPUT:
# dat ... dataframe
# model.type ... CFA or EFA
# dimensions ... dimensions for exploratory factor analysis
# guesses ... fixed guessing paramters
# noharm.path ... path for NOHARM console (console must have the name NOHARM87.exe)
# digits.pm ... digits for calculation of product moment matrix
# writename ... name of NOHARM Output file
# dec ... "." or "," for decimal separator
# display ... display noharm settings (TRUE or FALSE)
#............................................................................
# The matrices F.pattern, F.init, P.pattern and P.init correspond to the
# definition in the NOHARM manual
# noharm.path <- shQuote(noharm.path)
if (model.type=="CFA" ){ dimensions <- ncol(F.pattern) }
if ( display ){
cat("Multidimensional Normal Ogive by Harmonic Analysis (NOHARM 4) \n" )
cat("C. Fraser & R. P. McDonald (2012)\n" )
cat("For more informations please look at \n ")
# cat( " http://people.niagaracollege.ca/cfraser/download/nhCLwindl.html\n\n")
cat( " http://noharm.niagararesearch.ca/nh4cldl.html\n\n")
cat( paste( "Path of NOHARMCL file: \n ** ", noharm.path, "\n" ) )
cat( paste( "Path of NOHARM input and output files: \n ** ", getwd(), "\n\n" ) )
if (model.type=="EFA"){
cat(paste( "Exploratory Item Factor Analysis with", dimensions, "Dimensions" ), "\n\n" )
} else { cat("Confirmatory Item Factor Analysis \n\n" ) }
flush.console()
}
########################################################
# data input
if ( ! is.null(dat) ){
# allow also for input of moment matrix data
dat <- as.matrix(dat)
I <- ncol(dat) # number of items
n <- nrow(dat) # number of subjects
if ( is.null(weights) ){
weights <- rep(1,n) } else {
weights <- weights / sum(weights) * n
}
# calculate raw product moment matrix
dat9 <- dat
dat.resp <- is.na( dat)
dat9[ dat.resp ] <- 9
# pairwise product moment matrix
# BM <- t( dat9 * (1- dat.resp ) ) %*% ( dat9 * (1- dat.resp ) )
BM <- crossprod( dat9 * (1- dat.resp )*weights )
# number of persons on an item pair
# NM <- t((1- dat.resp ) ) %*% ( (1- dat.resp ) )
NM <- crossprod( (1- dat.resp)*weights )
BM <- round( BM/NM, digits.pm )
}
inputdat <- TRUE
########################################################
if ( ! is.null(pm) ){
if ( is.vector(pm) ){
I2 <- length(pm)
I <- sqrt( 2*I2+1/4 ) - .5
BM <- matrix( 0, I, I )
colnames(BM) <- paste0("I",1:I)
vv <- 0
for (ii in 1:I){
# ii <- 1
BM[ ii, 1:ii ] <- pm[ seq(vv+1,vv+ii) ]
vv <- vv + ii
}
BM <- BM + t(BM)
diag(BM) <- diag(BM)/2
} else {
pm <- BM
I <- ncol(pm)
}
weights <- rep(1,n)
NM <- matrix(n, I,I)
dat <- BM[1:2,,drop=FALSE]
inputdat <- FALSE
}
# Exploratory analysis requested?
EX <- 1 * ( model.type=="EFA" )
# supply starting values
IV <- 1 * (model.type=="CFA" )
if ( is.null(guesses) ){
guesses <- rep(0, I )
}
# arrange NOHARM Input Files
s1 <- Sys.time()
noharm.input <- c( paste( "R2noharm Input file", s1 ),
paste( I, dimensions, n, 1, EX, IV, 0, 0, sep=" ")
)
# add guessing parameter
noharm.input <- c( noharm.input, " ", guesses, " " )
if (model.type=="CFA"){
# pattern matrices
noharm.input <- c( noharm.input,
apply( F.pattern, 1, FUN=function(ll){ paste( ll, collapse=" " ) } ), " ",
sapply( 1:dimensions, FUN=function(ss){ paste( P.pattern[ ss, 1:ss ], collapse=" " ) } ),
" " )
# add initial matrices
if ( is.null(F.init) ){ F.init <- .5*F.pattern }
if ( is.null(P.init) ){ P.init <- 0.1 + diag( .9, dimensions) }
noharm.input <- c( noharm.input,
apply( F.init, 1, FUN=function(ll){ paste( ll, collapse=" " ) } ), " ",
sapply( 1:dimensions, FUN=function(ss){ paste( P.init[ ss, 1:ss ], collapse=" " ) } )
)
}
# product moment matrix
pm <- unlist( sapply( 1:I, FUN=function( ii){ BM[ ii, 1:ii ] } ) )
LPM <- length(pm)
h1 <- seq( 1, LPM, 10 )
h2 <- c( seq( 10, LPM, 10 ) )
if (length(h1) !=length(h2) ){ h2 <- c( h2, LPM ) }
h3 <- data.frame( h1, h2 )
pm <- apply( h3, 1, FUN=function(ll){ paste( pm[ ll[1]:ll[2] ], collapse=" " ) } )
# add product moment matrix to NOHARM input
noharm.input <- c( noharm.input, " ", pm )
if (dec=="," ){ noharm.input <- gsub( "\\.", ",", noharm.input ) }
# path specifications and starting NOHARM
current.path <- getwd()
setwd( noharm.path )
writeLines( noharm.input, "mymodel.inp" )
# writeLines( "NoharmCL mymodel.inp mymodel.out 800 0.00001", "noharm_analysis.bat" )
writeLines( "NoharmCL mymodel.inp mymodel.out", "noharm_analysis.bat" )
# working without DOSBox
system( "noharm_analysis.bat", show.output.on.console=F )
# read NOHARM output
noharmout0 <- readLines( "MYMODEL.out" )
if (dec=="," ){ noharmout0 <- gsub( ",", "\\.", noharmout0 ) }
noharmout1 <- noharmout0
noharmout1 <- c( noharmout1, rep("",3), "ENDE" )
# set current working directory
setwd( current.path )
if (!is.null( writename ) ){
writeLines( noharmout0, paste( writename, ".out", sep="") )
writeLines( noharm.input, paste( writename, ".inp", sep="") )
}
# results for 1-dimensional IRT analysis
if (dimensions==1 & model.type=="EFA" ){
modtype <- 1
res <- list( "tanaka"=.noharm.tanaka( noharmout1 ),
"rmsr"=.noharm.rmsr( noharmout1 ),
"N.itempair"=NM,
"pm"=BM,
"weights"=weights,
"guesses"=guesses,
"residuals"=.noharm.residuals( noharmout1, I=I, dat=dat),
"final.constants"=.noharm.itemlevel( noharmout1, "Final Constants", I=I, dat=dat),
"thresholds"=.noharm.itemlevel( noharmout1, "Threshold Values", I=I, dat=dat),
"uniquenesses"=.noharm.itemlevel( noharmout1, "Unique Variances", I=I, dat=dat),
"difficulties"=.noharm.itemlevel( noharmout1, "Vector B", I=I, dat=dat),
"discriminations"=.noharm.itemlevel( noharmout1, "Vector A", I=I, dat=dat)
) }
# results for noharm.efa with more than one dimension
if (dimensions > 1 & model.type=="EFA" ){
modtype <- 2
res <- list( "tanaka"=.noharm.tanaka( noharmout1 ),
"rmsr"=.noharm.rmsr( noharmout1 ),
"N.itempair"=NM,
"pm"=BM,
"guesses"=guesses,
"residuals"=.noharm.residuals( noharmout1, I=I, dat=dat),
"final.constants"=.noharm.itemlevel( noharmout1, "Final Constants",I=I, dat=dat),
"factor.cor"=.noharm.correlations( noharmout1, "Factor Correlations",
"Promax Rotated", dimensions=dimensions, dat=dat),
"thresholds"=.noharm.itemlevel( noharmout1, "Threshold Values", I=I, dat=dat),
"uniquenesses"=.noharm.itemlevel( noharmout1, "Unique Variances", I=I, dat=dat),
"unrotated"=.noharm.loadings( noharmout1, "Factor Loadings",
"Varimax Rotated Factor Loadings", dimensions=dimensions, I=I, dat=dat),
"varimax"=.noharm.loadings( noharmout1, "Varimax Rotated Factor Loadings",
"Varimax Rotated Coefficients of Theta", dimensions=dimensions, I=I, dat=dat),
"varimax.theta"=.noharm.loadings( noharmout1, "Varimax Rotated Coefficients of Theta",
"oblique", dimensions=dimensions, I=I, dat=dat),
"promax"=.noharm.loadings( noharmout1, "oblique",
"Factor Correlations", dimensions=dimensions, I=I, dat=dat),
"promax.theta"=.noharm.loadings( noharmout1, "Promax Rotated Coefficients of Theta",
"ENDE", dimensions=dimensions, I=I, dat=dat)
)
}
# results for noharm.cfa with more than one dimension
if (dimensions > 1 & model.type=="CFA" ){
modtype <- 3
res <- list( "tanaka"=.noharm.tanaka( noharmout1 ),
"rmsr"=.noharm.rmsr( noharmout1 ),
"N.itempair"=NM,
"pm"=BM,
"guesses"=guesses,
"residuals"=.noharm.residuals( noharmout1, I=I, dat=dat),
"final.constants"=.noharm.itemlevel( noharmout1, "Final Constants",I=I, dat=dat),
"factor.cor"=.noharm.correlations( noharmout1, "Final Correlations", "Residual", dimensions=dimensions, dat=dat),
"thresholds"=.noharm.itemlevel( noharmout1, "Threshold Values", I=I, dat=dat),
"uniquenesses"=.noharm.itemlevel( noharmout1, "Unique Variances", I=I, dat=dat),
"loadings"=.noharm.loadings( noharmout1, "Factor Loadings",
"ENDE", dimensions=dimensions, I=I, dat=dat),
"loadings.theta"=.noharm.loadings( noharmout1, "Final Coefficients of Theta",
"Final Correlations of Theta", dimensions=dimensions, I=I, dat=dat)
)
if( !is.null( colnames(F.pattern) ) ){
colnames(res$loadings) <- colnames(F.pattern)
colnames(res$loadings.theta) <- colnames(F.pattern)
colnames(res$factor.cor) <- rownames(res$factor.cor) <- colnames(F.pattern)
}
}
# CFA 1 dimension
if ( ( dimensions==1 ) & ( model.type=="CFA" ) ){
modtype <- 4
if ( is.null(colnames(F.pattern) ) ){ colnames(F.pattern) <- "F1" }
res <- list( "tanaka"=.noharm.tanaka( noharmout1 ),
"rmsr"=.noharm.rmsr( noharmout1 ),
"N.itempair"=NM,
"pm"=BM,
"guesses"=guesses,
"residuals"=.noharm.residuals( noharmout1, I=I, dat=dat),
"final.constants"=.noharm.itemlevel( noharmout1, "Final Constants",I=I, dat=dat),
"thresholds"=.noharm.itemlevel( noharmout1, "Threshold Values", I=I, dat=dat),
"uniquenesses"=.noharm.itemlevel( noharmout1, "Unique Variances", I=I, dat=dat),
"loadings.theta"=.noharm.loadings( noharmout1, "Final Coefficients of Theta",
"Residual", dimensions=dimensions, I=I, dat=dat),
"factor.cor"=as.data.frame(matrix(1,1, 1 )),
"difficulties"=.noharm.itemlevel( noharmout1, "Vector B", I=I, dat=dat),
"discriminations"=.noharm.itemlevel( noharmout1, "Vector A", I=I, dat=dat),
"loadings"=.noharm.loadings( noharmout1, "Factor Loadings",
"LORD", dimensions=dimensions, I=I, dat=dat)
# ,
# "loadings"=.noharm.loadings( noharmout1, "Factor Loadings",
# "LORD", dimensions=dimensions, I=I, dat=dat)
)
if( !is.null( colnames(F.pattern) ) ){
colnames(res$loadings) <- colnames(F.pattern)
rownames(res$factor.cor) <- colnames(res$factor.cor) <- colnames(F.pattern)
colnames(res$loadings.theta) <- colnames(F.pattern)
} }
# collect arguments in output
res$model.type <- model.type
# res$Nobs <- nrow(dat)
# res$Nitems <- ncol(dat)
res$Nobs <- n
res$Nitems <- I
res$modtype <- modtype
res$F.init <- F.init
res$F.pattern <- F.pattern
res$P.init <- P.init
res$P.pattern <- P.pattern
res$dat <- dat
res$systime <- s1
res$guesses <- guesses
res$noharm.path <- noharm.path
res$digits.pm <- digits.pm
res$dec <- dec
res$display.fit <- display.fit
if ( modtype %in% c(1,4)){ res$dimensions <- 1 }
if ( modtype %in% c(2,3)){ res$dimensions <- ncol(res$factor.cor) }
#***
# calculate fit statistic
if ( modtype %in% 2:4){
RM <- res$residuals
PV <- diag( res$pm )
g1 <- sqrt( outer( PV * (1-PV), PV * ( 1-PV ) ) )
rM <- ( RM / g1 )
zM <- 0.5 * log( 1 + rM ) - 0.5 * log( 1 - rM )
# chi square
res$chisquare <- X2 <- ( res$Nobs - 3 ) * sum( zM^2 )
# calculate number of estimated parameters and degrees of freedom
I <- res$Nitems
if (modtype %in% 3:4){
Nestpars <- I + sum( F.pattern==1 ) + length( unique( intersect( F.pattern, 2:9999 ) ) )
Nestpars <- Nestpars + sum( diag(P.pattern)==1 ) +
length( unique( diag(P.pattern)[ diag(P.pattern) > 1] ) )
g2 <- P.pattern[ lower.tri(P.pattern) ]
Nestpars <- Nestpars + sum( g2==1 ) + length( unique( intersect( g2, 2:9999 ) ))
res$Nestpars <- Nestpars
}
if ( modtype %in% 2){
cs <- 1:(dimensions-1)
res$Nestpars <- Nestpars <- I + I*dimensions - sum(cs)
}
res$df <- df <- 0.5*I*(I+1) - Nestpars
res$chisquare_df <- res$chisquare / res$df
# calculate RMSEA
res$rmsea <- rmsea <- sqrt(max(c( (X2 / res$Nobs ) / df - 1/res$Nobs, 0)))
# calculate p values
res$p.chisquare <- 1 - pchisq( res$chisquare, df=res$df )
}
# display
if (display){
cat( paste( "Tanaka Index=", round(res$tanaka,display.fit), sep=""), "\n" )
cat( paste( "RMSR=", round(res$rmsr,display.fit), sep=""), "\n" )
if ( ! inputdat ){
cat("\n**** Note that Jackknife does not work for pm input! **** \n")
}
}
if ( ! inputdat ){
res$dat <- NULL
}
res$inputdat <- inputdat
res$upper <- 1+0*res$guess
res$lower <- res$guess
class(res) <- "R2noharm"
return( res )
}
#----------------------------------------------------------
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_functions.R
\name{get_perm_parties}
\alias{get_perm_parties}
\title{extract party names}
\usage{
get_perm_parties(pv)
}
\arguments{
\item{pv}{[\code{character(1)}]\cr
the name ID of the pollyvote object, defaults to 'pollyvote'.}
}
\value{
character vector containing all party names stored in \code{pv}.
}
\description{
This function extract party names from a pollyvote container.
}
\examples{
pv = create_pollyvote(perm_countries = "D", perm_parties = c("CSU", "SPD"))
get_perm_parties(pv)
}
| /man/get_perm_parties.Rd | no_license | pollyvote/pollyvoter | R | false | true | 579 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_functions.R
\name{get_perm_parties}
\alias{get_perm_parties}
\title{extract party names}
\usage{
get_perm_parties(pv)
}
\arguments{
\item{pv}{[\code{character(1)}]\cr
the name ID of the pollyvote object, defaults to 'pollyvote'.}
}
\value{
character vector containing all party names stored in \code{pv}.
}
\description{
This function extract party names from a pollyvote container.
}
\examples{
pv = create_pollyvote(perm_countries = "D", perm_parties = c("CSU", "SPD"))
get_perm_parties(pv)
}
|
# Power test for rqt LASSO method, dichotomous outcome, LD presented#
library(rqt)
library(SKAT)
# how to run: > source("~/Dropbox/rqt/AppNote/MajorRevision/tests/power.tests/power.ld.dich/power.ld.dich_lasso.R")
set.seed(100500)
proj.dir <- "/Users/ilya/Projects/rqt.dev/tests/power.tests/power.ld.dich_lasso/" # project directory, change it if you need.
n.snp <- 50
n <- 1000 # testing 1,000 times
alpha.level <- 1e-3
res.table <- data.frame(matrix(nrow=2, ncol=20))
colnames(res.table) <- c("rQT1", "rQT2", "rQT3",
"QT1", "QT2", "QT3",
"p.skat", "p.skat.o",
"var.pool.rqt", "var.pool.qt",
"vif.rqt", "vif.qt",
"prqt1", "prqt2", "prqt3", "pqt1", "pqt2", "pqt3", "pskat", "pskato")
rownames(res.table) <- c("0.1","0.25")
res.table.case <- data.frame(matrix(nrow=2, ncol=1, 0))
colnames(res.table.case) <- c("case")
rownames(res.table.case) <- c("0.1","0.25")
ncs <- 1500 # Number of cases
nct <- 1500 # Number of controls
for(s in c(0.1, 0.25)) {
res.pow <- list(rQT1=0, rQT2=0, rQT3=0,
QT1=0, QT2=0, QT3=0,
p.skat=0, p.skat.o=0,
var.pool.rqt=0, var.pool.qt=0,
vif.rqt=0, vif.qt=0,
prqt1=0, prqt2=0, prqt3=0, pqt1=0, pqt2=0, pqt3=0, pskat=0, pskato=0)
n.caus <- round(n.snp*s,digits = 0)
eff <- sapply(0:(n.caus-1), function(n) { paste(n, ':', ifelse( (runif(1) > 0.5), 0.2, -0.2 ), sep='') })
write(x = eff, paste(proj.dir, "effects.txt", sep=""))
ld <- c()
for(j in 0:(n.snp-2)) {
if(j %% 4) {
ld <- c(ld, paste( j, ',', j+1, ',', 0.95, sep=''))
}
}
write(x = ld, paste(proj.dir, "ldfile.txt", sep=""))
for (i in 1:n){
print(paste("n.caus.snp:", n.caus, "Iteration:", i))
# Data simulation #
# Prepare raw files with cophesim () and plink #
system(paste("plink --simulate-ncases ", ncs, " --simulate-ncontrols ", nct, " --simulate ", proj.dir, "wgas.sim ", " --out ", proj.dir, "sim.plink ", " --make-bed >/dev/null", sep=''))
system(paste("python /Users/ilya/Projects/cophesim_stable/cophesim.py -i ", proj.dir, "sim.plink -o ", proj.dir, "testout" , " -ce ", proj.dir, "effects.txt", " -LD ", proj.dir, "ldfile.txt", " >/dev/null", sep=''))
system(paste("plink --file ", proj.dir, "testout_pheno_bin.txt", " --recodeA --out ", proj.dir, "testout_pheno_bin.txt >/dev/null", sep=''))
# Combining Phenotype and Genotype #
## Dichotomous ##
p <- read.table(paste(proj.dir, "testout_pheno_bin.txt", sep=''), header=TRUE)
p <- p[["pheno"]]
g <- read.table(paste(proj.dir, "testout_pheno_bin.txt.raw", sep=''), header=TRUE)
g <- g[,7:dim(g)[2]]
colnames(g) <- paste("snp", 1:n.snp, sep="")
d <- cbind(pheno=p, g)
write.table(x = d, file=paste(proj.dir, "test.bin", i, ".dat", sep=''), quote = FALSE, row.names = FALSE)
res.table.case[as.character(s), 1] <- res.table.case[as.character(s), 1] + length(which(p==1))
# Tests #
## RQT ##
data <- data.matrix(read.table(paste(proj.dir, "test.bin", i, ".dat", sep=''), header=TRUE))
pheno <- data[,1]
geno <- data[, 2:dim(data)[2]]
colnames(geno) <- paste(seq(1, dim(geno)[2]))
geno.obj <- SummarizedExperiment(geno)
obj <- rqt(phenotype=pheno, genotype=geno.obj)
res <- geneTest(obj, method="lasso", out.type = "D", cumvar.threshold = 70, weight=F)
print(paste("p.rQT1", results(res)$pValue$pVal1))
print(paste("p.rQT2", results(res)$pValue$pVal2))
print(paste("p.rQT3", results(res)$pValue$pVal3))
print(paste("Var pooled rQT: ", results(res)$var.pooled))
print(paste("Mean vif rQT: ", results(res)$mean.vif))
## QT ##
res.qt <- geneTest(obj, method="none", out.type = "D", weight=F)
print(paste("p.QT1", results(res.qt)$pValue$pVal1))
print(paste("p.QT2", results(res.qt)$pValue$pVal2))
print(paste("p.QT3", results(res.qt)$pValue$pVal3))
print(paste("Var pooled QT: ", results(res.qt)$var.pooled))
print(paste("Mean vif QT: ", results(res.qt)$mean.vif))
## SKAT ##
obj.b<-SKAT_Null_Model(pheno ~ 1, out_type="D")
res.skat <- SKAT(geno, obj.b)$p.value
print(paste("skat", res.skat))
## SKAT-O ##
obj.b<-SKAT_Null_Model(pheno ~ 1, out_type="D")
res.skat.o <- SKAT(geno, obj.b, method = "optimal.adj")$p.value
print(paste("skat.o", res.skat.o))
# Calculating power #
### rQT ###
if(!is.na(results(res)$pValue$pVal1)) {
if (results(res)$pValue$pVal1 > alpha.level) (res.pow$rQT1 <- res.pow$rQT1 + 1)
res.pow$prqt1 <- res.pow$prqt1 + results(res)$pValue$pVal1
} else {
res.pow$rQT1 <- res.pow$rQT1 + 1
res.pow$prqt1 <- res.pow$prqt1 + 1
}
if(!is.na(results(res)$pValue$pVal2)) {
if (results(res)$pValue$pVal2 > alpha.level) (res.pow$rQT2 <- res.pow$rQT2 + 1)
res.pow$prqt2 <- res.pow$prqt2 + results(res)$pValue$pVal2
} else {
res.pow$rQT2 <- res.pow$rQT2 + 1
res.pow$prqt2 <- res.pow$prqt2 + 1
}
if(!is.na(results(res)$pValue$pVal3)) {
if (results(res)$pValue$pVal3 > alpha.level) (res.pow$rQT3 <- res.pow$rQT3 + 1)
res.pow$prqt3 <- res.pow$prqt3 + results(res)$pValue$pVal3
} else {
res.pow$rQT3 <- res.pow$rQT3 + 1
res.pow$prqt3 <- res.pow$prqt3 + 1
}
### QT ###
if(!is.na(results(res.qt)$pValue$pVal1)) {
if (results(res.qt)$pValue$pVal1 > alpha.level) (res.pow$QT1 <- res.pow$QT1 + 1)
res.pow$pqt1 <- res.pow$pqt1 + results(res.qt)$pValue$pVal1
} else {
res.pow$QT1 <- res.pow$QT1 + 1
res.pow$pqt1 <- res.pow$pqt1 + 1
}
if(!is.na(results(res.qt)$pValue$pVal2)) {
if (results(res.qt)$pValue$pVal2 > alpha.level) (res.pow$QT2 <- res.pow$QT2 + 1)
res.pow$pqt2 <- res.pow$pqt2 + results(res.qt)$pValue$pVal2
} else {
res.pow$QT2 <- res.pow$QT2 + 1
res.pow$pqt2 <- res.pow$pqt2 + 1
}
if(!is.na(results(res.qt)$pValue$pVal3)) {
if (results(res.qt)$pValue$pVal3 > alpha.level) (res.pow$QT3 <- res.pow$QT3 + 1)
res.pow$pqt3 <- res.pow$pqt3 + results(res.qt)$pValue$pVal3
} else {
res.pow$QT3 <- res.pow$QT3 + 1
res.pow$pqt3 <- res.pow$pqt3 + 1
}
### SKAT/SKAT-O ###
if (res.skat > alpha.level) (res.pow$p.skat <- res.pow$p.skat + 1)
res.pow$pskat <- res.pow$pskat + res.skat
if (res.skat.o > alpha.level) (res.pow$p.skat.o <- res.pow$p.skat.o + 1)
res.pow$pskato <- res.pow$pskato + res.skat.o
### Pooled variance ###
res.pow$var.pool.rqt <- res.pow$var.pool.rqt + results(res)$var.pooled
res.pow$var.pool.qt <- res.pow$var.pool.qt + results(res.qt)$var.pooled
### Mean VIF ###
res.pow$vif.rqt <- res.pow$vif.rqt + results(res)$mean.vif
res.pow$vif.qt <- res.pow$vif.qt + results(res.qt)$mean.vif
# Printing results #
print(paste("Type II error rate for p.rQT1 in percentage is", (res.pow$rQT1/i)*100,"%"))
print(paste("Type II error rate for p.rQT2 in percentage is", (res.pow$rQT2/i)*100,"%"))
print(paste("Type II error rate for p.rQT3 in percentage is", (res.pow$rQT3/i)*100,"%"))
###
print(paste("Type II error rate for p.QT1 in percentage is", (res.pow$QT1/i)*100,"%"))
print(paste("Type II error rate for p.QT2 in percentage is", (res.pow$QT2/i)*100,"%"))
print(paste("Type II error rate for p.QT3 in percentage is", (res.pow$QT3/i)*100,"%"))
###
print(paste("Type II error rate for p.skat in percentage is", (res.pow$p.skat/i)*100,"%"))
print(paste("Type II error rate for p.skat.o in percentage is", (res.pow$p.skat.o/i)*100,"%"))
###
print(paste("Avg pooled variance rQT", (res.pow$var.pool.rqt/i)))
print(paste("Avg pooled variance QT", (res.pow$var.pool.qt/i)))
###
print(paste("Avg mean vif rQT", (res.pow$vif.rqt/i)))
print(paste("Avg mean vif QT", (res.pow$vif.qt/i)))
### P-values ###
print(paste("Avg pvalue rqt1", (res.pow$prqt1/i)))
print(paste("Avg pvalue rqt2", (res.pow$prqt2/i)))
print(paste("Avg pvalue rqt3", (res.pow$prqt3/i)))
###
print(paste("Avg pvalue qt1", (res.pow$pqt1/i)))
print(paste("Avg pvalue qt2", (res.pow$pqt2/i)))
print(paste("Avg pvalue qt3", (res.pow$pqt3/i)))
###
print(paste("Avg pvalue pskat", (res.pow$pskat/i)))
print(paste("Avg pvalue pskato", (res.pow$pskato/i)))
# Cleaning up #
rm(p, d, obj.b, res.skat.o, data, pheno, geno, obj, res, res.qt, geno.obj)
system(paste("rm", paste(proj.dir, "test.bin", i, ".dat", sep='')))
system(paste("rm", paste(proj.dir, "testout_pheno_bin.txt*", sep='')))
system(paste("rm", paste(proj.dir, "sim.plink.*", sep='')))
}
# Printing results #
print(paste("Type II error rate for p.rQT1 in percentage is", (res.pow$rQT1/n)*100,"%"))
print(paste("Type II error rate for p.rQT2 in percentage is", (res.pow$rQT2/n)*100,"%"))
print(paste("Type II error rate for p.rQT3 in percentage is", (res.pow$rQT3/n)*100,"%"))
print(paste("Type II error rate for p.QT1 in percentage is", (res.pow$QT1/i)*100,"%"))
print(paste("Type II error rate for p.QT2 in percentage is", (res.pow$QT2/i)*100,"%"))
print(paste("Type II error rate for p.QT3 in percentage is", (res.pow$QT3/i)*100,"%"))
print(paste("Type II error rate for p.skat in percentage is", (res.pow$p.skat/n)*100,"%"))
print(paste("Type II error rate for p.skat.o in percentage is", (res.pow$p.skat.o/n)*100,"%"))
###
print(paste("Avg pooled variance rQT", (res.pow$var.pool.rqt/n)))
print(paste("Avg pooled variance QT", (res.pow$var.pool.qt/n)))
###
print(paste("Avg mean vif rQT", (res.pow$vif.rqt/n)))
print(paste("Avg mean vif QT", (res.pow$vif.qt/n)))
print(paste("Avg pvalue rqt1", (res.pow$prqt1/n)))
print(paste("Avg pvalue rqt2", (res.pow$prqt2/n)))
print(paste("Avg pvalue rqt3", (res.pow$prqt3/n)))
###
print(paste("Avg pvalue qt1", (res.pow$pqt1/n)))
print(paste("Avg pvalue qt2", (res.pow$pqt2/n)))
print(paste("Avg pvalue qt3", (res.pow$pqt3/n)))
###
print(paste("Avg pvalue pskat", (res.pow$pskat/n)))
print(paste("Avg pvalue pskato", (res.pow$pskato/n)))
res.table[as.character(s),] <- unlist(res.pow)
write.table(x = res.pow, file = paste(proj.dir, "res.pow", s, ".txt", sep=""), row.names = F, quote=F)
res.table.case[as.character(s), 1] <- res.table.case[as.character(s), 1]/n
res.table[as.character(s), ] <- res.table[as.character(s), ]/n
print(res.table)
}
print("!!! Final results !!!")
print(res.table)
write.table(x = res.table, file = paste(proj.dir, "res.table.pow.txt", sep=""))
write.table(x = res.table.case, file = paste(proj.dir, "res.casecont.txt", sep="")) | /power.tests/power.ld.dich/power.ld.dich_lasso.R | no_license | izhbannikov/rqt_appnote_data | R | false | false | 11,514 | r | # Power test for rqt LASSO method, dichotomous outcome, LD presented#
library(rqt)
library(SKAT)
# how to run: > source("~/Dropbox/rqt/AppNote/MajorRevision/tests/power.tests/power.ld.dich/power.ld.dich_lasso.R")
set.seed(100500)
proj.dir <- "/Users/ilya/Projects/rqt.dev/tests/power.tests/power.ld.dich_lasso/" # project directory, change it if you need.
n.snp <- 50
n <- 1000 # testing 1,000 times
alpha.level <- 1e-3
res.table <- data.frame(matrix(nrow=2, ncol=20))
colnames(res.table) <- c("rQT1", "rQT2", "rQT3",
"QT1", "QT2", "QT3",
"p.skat", "p.skat.o",
"var.pool.rqt", "var.pool.qt",
"vif.rqt", "vif.qt",
"prqt1", "prqt2", "prqt3", "pqt1", "pqt2", "pqt3", "pskat", "pskato")
rownames(res.table) <- c("0.1","0.25")
res.table.case <- data.frame(matrix(nrow=2, ncol=1, 0))
colnames(res.table.case) <- c("case")
rownames(res.table.case) <- c("0.1","0.25")
ncs <- 1500 # Number of cases
nct <- 1500 # Number of controls
for(s in c(0.1, 0.25)) {
res.pow <- list(rQT1=0, rQT2=0, rQT3=0,
QT1=0, QT2=0, QT3=0,
p.skat=0, p.skat.o=0,
var.pool.rqt=0, var.pool.qt=0,
vif.rqt=0, vif.qt=0,
prqt1=0, prqt2=0, prqt3=0, pqt1=0, pqt2=0, pqt3=0, pskat=0, pskato=0)
n.caus <- round(n.snp*s,digits = 0)
eff <- sapply(0:(n.caus-1), function(n) { paste(n, ':', ifelse( (runif(1) > 0.5), 0.2, -0.2 ), sep='') })
write(x = eff, paste(proj.dir, "effects.txt", sep=""))
ld <- c()
for(j in 0:(n.snp-2)) {
if(j %% 4) {
ld <- c(ld, paste( j, ',', j+1, ',', 0.95, sep=''))
}
}
write(x = ld, paste(proj.dir, "ldfile.txt", sep=""))
for (i in 1:n){
print(paste("n.caus.snp:", n.caus, "Iteration:", i))
# Data simulation #
# Prepare raw files with cophesim () and plink #
system(paste("plink --simulate-ncases ", ncs, " --simulate-ncontrols ", nct, " --simulate ", proj.dir, "wgas.sim ", " --out ", proj.dir, "sim.plink ", " --make-bed >/dev/null", sep=''))
system(paste("python /Users/ilya/Projects/cophesim_stable/cophesim.py -i ", proj.dir, "sim.plink -o ", proj.dir, "testout" , " -ce ", proj.dir, "effects.txt", " -LD ", proj.dir, "ldfile.txt", " >/dev/null", sep=''))
system(paste("plink --file ", proj.dir, "testout_pheno_bin.txt", " --recodeA --out ", proj.dir, "testout_pheno_bin.txt >/dev/null", sep=''))
# Combining Phenotype and Genotype #
## Dichotomous ##
p <- read.table(paste(proj.dir, "testout_pheno_bin.txt", sep=''), header=TRUE)
p <- p[["pheno"]]
g <- read.table(paste(proj.dir, "testout_pheno_bin.txt.raw", sep=''), header=TRUE)
g <- g[,7:dim(g)[2]]
colnames(g) <- paste("snp", 1:n.snp, sep="")
d <- cbind(pheno=p, g)
write.table(x = d, file=paste(proj.dir, "test.bin", i, ".dat", sep=''), quote = FALSE, row.names = FALSE)
res.table.case[as.character(s), 1] <- res.table.case[as.character(s), 1] + length(which(p==1))
# Tests #
## RQT ##
data <- data.matrix(read.table(paste(proj.dir, "test.bin", i, ".dat", sep=''), header=TRUE))
pheno <- data[,1]
geno <- data[, 2:dim(data)[2]]
colnames(geno) <- paste(seq(1, dim(geno)[2]))
geno.obj <- SummarizedExperiment(geno)
obj <- rqt(phenotype=pheno, genotype=geno.obj)
res <- geneTest(obj, method="lasso", out.type = "D", cumvar.threshold = 70, weight=F)
print(paste("p.rQT1", results(res)$pValue$pVal1))
print(paste("p.rQT2", results(res)$pValue$pVal2))
print(paste("p.rQT3", results(res)$pValue$pVal3))
print(paste("Var pooled rQT: ", results(res)$var.pooled))
print(paste("Mean vif rQT: ", results(res)$mean.vif))
## QT ##
res.qt <- geneTest(obj, method="none", out.type = "D", weight=F)
print(paste("p.QT1", results(res.qt)$pValue$pVal1))
print(paste("p.QT2", results(res.qt)$pValue$pVal2))
print(paste("p.QT3", results(res.qt)$pValue$pVal3))
print(paste("Var pooled QT: ", results(res.qt)$var.pooled))
print(paste("Mean vif QT: ", results(res.qt)$mean.vif))
## SKAT ##
obj.b<-SKAT_Null_Model(pheno ~ 1, out_type="D")
res.skat <- SKAT(geno, obj.b)$p.value
print(paste("skat", res.skat))
## SKAT-O ##
obj.b<-SKAT_Null_Model(pheno ~ 1, out_type="D")
res.skat.o <- SKAT(geno, obj.b, method = "optimal.adj")$p.value
print(paste("skat.o", res.skat.o))
# Calculating power #
### rQT ###
if(!is.na(results(res)$pValue$pVal1)) {
if (results(res)$pValue$pVal1 > alpha.level) (res.pow$rQT1 <- res.pow$rQT1 + 1)
res.pow$prqt1 <- res.pow$prqt1 + results(res)$pValue$pVal1
} else {
res.pow$rQT1 <- res.pow$rQT1 + 1
res.pow$prqt1 <- res.pow$prqt1 + 1
}
if(!is.na(results(res)$pValue$pVal2)) {
if (results(res)$pValue$pVal2 > alpha.level) (res.pow$rQT2 <- res.pow$rQT2 + 1)
res.pow$prqt2 <- res.pow$prqt2 + results(res)$pValue$pVal2
} else {
res.pow$rQT2 <- res.pow$rQT2 + 1
res.pow$prqt2 <- res.pow$prqt2 + 1
}
if(!is.na(results(res)$pValue$pVal3)) {
if (results(res)$pValue$pVal3 > alpha.level) (res.pow$rQT3 <- res.pow$rQT3 + 1)
res.pow$prqt3 <- res.pow$prqt3 + results(res)$pValue$pVal3
} else {
res.pow$rQT3 <- res.pow$rQT3 + 1
res.pow$prqt3 <- res.pow$prqt3 + 1
}
### QT ###
if(!is.na(results(res.qt)$pValue$pVal1)) {
if (results(res.qt)$pValue$pVal1 > alpha.level) (res.pow$QT1 <- res.pow$QT1 + 1)
res.pow$pqt1 <- res.pow$pqt1 + results(res.qt)$pValue$pVal1
} else {
res.pow$QT1 <- res.pow$QT1 + 1
res.pow$pqt1 <- res.pow$pqt1 + 1
}
if(!is.na(results(res.qt)$pValue$pVal2)) {
if (results(res.qt)$pValue$pVal2 > alpha.level) (res.pow$QT2 <- res.pow$QT2 + 1)
res.pow$pqt2 <- res.pow$pqt2 + results(res.qt)$pValue$pVal2
} else {
res.pow$QT2 <- res.pow$QT2 + 1
res.pow$pqt2 <- res.pow$pqt2 + 1
}
if(!is.na(results(res.qt)$pValue$pVal3)) {
if (results(res.qt)$pValue$pVal3 > alpha.level) (res.pow$QT3 <- res.pow$QT3 + 1)
res.pow$pqt3 <- res.pow$pqt3 + results(res.qt)$pValue$pVal3
} else {
res.pow$QT3 <- res.pow$QT3 + 1
res.pow$pqt3 <- res.pow$pqt3 + 1
}
### SKAT/SKAT-O ###
if (res.skat > alpha.level) (res.pow$p.skat <- res.pow$p.skat + 1)
res.pow$pskat <- res.pow$pskat + res.skat
if (res.skat.o > alpha.level) (res.pow$p.skat.o <- res.pow$p.skat.o + 1)
res.pow$pskato <- res.pow$pskato + res.skat.o
### Pooled variance ###
res.pow$var.pool.rqt <- res.pow$var.pool.rqt + results(res)$var.pooled
res.pow$var.pool.qt <- res.pow$var.pool.qt + results(res.qt)$var.pooled
### Mean VIF ###
res.pow$vif.rqt <- res.pow$vif.rqt + results(res)$mean.vif
res.pow$vif.qt <- res.pow$vif.qt + results(res.qt)$mean.vif
# Printing results #
print(paste("Type II error rate for p.rQT1 in percentage is", (res.pow$rQT1/i)*100,"%"))
print(paste("Type II error rate for p.rQT2 in percentage is", (res.pow$rQT2/i)*100,"%"))
print(paste("Type II error rate for p.rQT3 in percentage is", (res.pow$rQT3/i)*100,"%"))
###
print(paste("Type II error rate for p.QT1 in percentage is", (res.pow$QT1/i)*100,"%"))
print(paste("Type II error rate for p.QT2 in percentage is", (res.pow$QT2/i)*100,"%"))
print(paste("Type II error rate for p.QT3 in percentage is", (res.pow$QT3/i)*100,"%"))
###
print(paste("Type II error rate for p.skat in percentage is", (res.pow$p.skat/i)*100,"%"))
print(paste("Type II error rate for p.skat.o in percentage is", (res.pow$p.skat.o/i)*100,"%"))
###
print(paste("Avg pooled variance rQT", (res.pow$var.pool.rqt/i)))
print(paste("Avg pooled variance QT", (res.pow$var.pool.qt/i)))
###
print(paste("Avg mean vif rQT", (res.pow$vif.rqt/i)))
print(paste("Avg mean vif QT", (res.pow$vif.qt/i)))
### P-values ###
print(paste("Avg pvalue rqt1", (res.pow$prqt1/i)))
print(paste("Avg pvalue rqt2", (res.pow$prqt2/i)))
print(paste("Avg pvalue rqt3", (res.pow$prqt3/i)))
###
print(paste("Avg pvalue qt1", (res.pow$pqt1/i)))
print(paste("Avg pvalue qt2", (res.pow$pqt2/i)))
print(paste("Avg pvalue qt3", (res.pow$pqt3/i)))
###
print(paste("Avg pvalue pskat", (res.pow$pskat/i)))
print(paste("Avg pvalue pskato", (res.pow$pskato/i)))
# Cleaning up #
rm(p, d, obj.b, res.skat.o, data, pheno, geno, obj, res, res.qt, geno.obj)
system(paste("rm", paste(proj.dir, "test.bin", i, ".dat", sep='')))
system(paste("rm", paste(proj.dir, "testout_pheno_bin.txt*", sep='')))
system(paste("rm", paste(proj.dir, "sim.plink.*", sep='')))
}
# Printing results #
print(paste("Type II error rate for p.rQT1 in percentage is", (res.pow$rQT1/n)*100,"%"))
print(paste("Type II error rate for p.rQT2 in percentage is", (res.pow$rQT2/n)*100,"%"))
print(paste("Type II error rate for p.rQT3 in percentage is", (res.pow$rQT3/n)*100,"%"))
print(paste("Type II error rate for p.QT1 in percentage is", (res.pow$QT1/i)*100,"%"))
print(paste("Type II error rate for p.QT2 in percentage is", (res.pow$QT2/i)*100,"%"))
print(paste("Type II error rate for p.QT3 in percentage is", (res.pow$QT3/i)*100,"%"))
print(paste("Type II error rate for p.skat in percentage is", (res.pow$p.skat/n)*100,"%"))
print(paste("Type II error rate for p.skat.o in percentage is", (res.pow$p.skat.o/n)*100,"%"))
###
print(paste("Avg pooled variance rQT", (res.pow$var.pool.rqt/n)))
print(paste("Avg pooled variance QT", (res.pow$var.pool.qt/n)))
###
print(paste("Avg mean vif rQT", (res.pow$vif.rqt/n)))
print(paste("Avg mean vif QT", (res.pow$vif.qt/n)))
print(paste("Avg pvalue rqt1", (res.pow$prqt1/n)))
print(paste("Avg pvalue rqt2", (res.pow$prqt2/n)))
print(paste("Avg pvalue rqt3", (res.pow$prqt3/n)))
###
print(paste("Avg pvalue qt1", (res.pow$pqt1/n)))
print(paste("Avg pvalue qt2", (res.pow$pqt2/n)))
print(paste("Avg pvalue qt3", (res.pow$pqt3/n)))
###
print(paste("Avg pvalue pskat", (res.pow$pskat/n)))
print(paste("Avg pvalue pskato", (res.pow$pskato/n)))
res.table[as.character(s),] <- unlist(res.pow)
write.table(x = res.pow, file = paste(proj.dir, "res.pow", s, ".txt", sep=""), row.names = F, quote=F)
res.table.case[as.character(s), 1] <- res.table.case[as.character(s), 1]/n
res.table[as.character(s), ] <- res.table[as.character(s), ]/n
print(res.table)
}
print("!!! Final results !!!")
print(res.table)
write.table(x = res.table, file = paste(proj.dir, "res.table.pow.txt", sep=""))
write.table(x = res.table.case, file = paste(proj.dir, "res.casecont.txt", sep="")) |
k<-4
set.seed(1)
res_train<-createFolds(behavior$genotype,k, list = TRUE, returnTrain = TRUE)
set.seed(1)
res_test<-createFolds(behavior$genotype,k)
f1train<-mydfb[res_train$Fold1,]
f1test<-mydfb[res_test$Fold1,]
write.csv(f1test,file = "f1test.csv")
write.csv(f1train,file = "f1train.csv")
f2train<-mydfb[res_train$Fold2,]
f2test<-mydfb[res_test$Fold2,]
write.csv(f2test,file = "f2test.csv")
write.csv(f2train,file = "f2train.csv")
f3train<-mydfb[res_train$Fold3,]
f3test<-mydfb[res_test$Fold3,]
write.csv(f3test,file = "f3test.csv")
write.csv(f3train,file = "f3train.csv")
f4train<-mydfb[res_train$Fold4,]
f4test<-mydfb[res_test$Fold4,]
write.csv(f4test,file = "f4test.csv")
write.csv(f4train,file = "f4train.csv")
| /R/savefoldds.R | no_license | portokalh/adforesight | R | false | false | 721 | r | k<-4
set.seed(1)
res_train<-createFolds(behavior$genotype,k, list = TRUE, returnTrain = TRUE)
set.seed(1)
res_test<-createFolds(behavior$genotype,k)
f1train<-mydfb[res_train$Fold1,]
f1test<-mydfb[res_test$Fold1,]
write.csv(f1test,file = "f1test.csv")
write.csv(f1train,file = "f1train.csv")
f2train<-mydfb[res_train$Fold2,]
f2test<-mydfb[res_test$Fold2,]
write.csv(f2test,file = "f2test.csv")
write.csv(f2train,file = "f2train.csv")
f3train<-mydfb[res_train$Fold3,]
f3test<-mydfb[res_test$Fold3,]
write.csv(f3test,file = "f3test.csv")
write.csv(f3train,file = "f3train.csv")
f4train<-mydfb[res_train$Fold4,]
f4test<-mydfb[res_test$Fold4,]
write.csv(f4test,file = "f4test.csv")
write.csv(f4train,file = "f4train.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/AllMethods.R
\docType{methods}
\name{DE}
\alias{DE}
\alias{DE,Transcriptogram-method}
\alias{DE-method}
\title{Get DE}
\usage{
DE(object)
\S4method{DE}{Transcriptogram}(object)
}
\arguments{
\item{object}{An object of class Transcriptogram.}
}
\value{
This method returns the content of the DE slot of an object of
class Transcriptogram.
}
\description{
Gets the content of the DE slot of an object of class Transcriptogram.
}
\examples{
transcriptogram <- transcriptogramPreprocess(association, Hs900, 50)
\dontrun{
transcriptogram <- transcriptogramStep1(transcriptogram, GSE9988, GPL570)
transcriptogram <- transcriptogramStep2(transcriptogram)
levels <- c(rep(FALSE, 3), rep(TRUE, 3))
transcriptogram <- differentiallyExpressed(transcriptogram, levels, 0.01)
DE(transcriptogram)
}
}
\seealso{
\link[transcriptogramer]{Hs900},
\link[transcriptogramer]{association},
\link[transcriptogramer]{transcriptogramPreprocess}
}
\author{
Diego Morais
}
| /man/DE-method.Rd | no_license | arthurvinx/transcriptogramer | R | false | true | 1,086 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/AllMethods.R
\docType{methods}
\name{DE}
\alias{DE}
\alias{DE,Transcriptogram-method}
\alias{DE-method}
\title{Get DE}
\usage{
DE(object)
\S4method{DE}{Transcriptogram}(object)
}
\arguments{
\item{object}{An object of class Transcriptogram.}
}
\value{
This method returns the content of the DE slot of an object of
class Transcriptogram.
}
\description{
Gets the content of the DE slot of an object of class Transcriptogram.
}
\examples{
transcriptogram <- transcriptogramPreprocess(association, Hs900, 50)
\dontrun{
transcriptogram <- transcriptogramStep1(transcriptogram, GSE9988, GPL570)
transcriptogram <- transcriptogramStep2(transcriptogram)
levels <- c(rep(FALSE, 3), rep(TRUE, 3))
transcriptogram <- differentiallyExpressed(transcriptogram, levels, 0.01)
DE(transcriptogram)
}
}
\seealso{
\link[transcriptogramer]{Hs900},
\link[transcriptogramer]{association},
\link[transcriptogramer]{transcriptogramPreprocess}
}
\author{
Diego Morais
}
|
library(magrittr)
library(gener)
source('~/Documents/software/R/packages/texer/R/tmtools.R')
source('~/Documents/software/R/packages/texer/R/textminer.R')
load("~/Documents/software/R/projects/tutorials/script/texer/woolworth.RData")
dat = all_reveiws_woolworth %>% unlist %>% as.data.frame
colnames(dat) = 'text'
dat$text %<>% as.character
ss = genDefaultSettings() %>% list.edit(tm_package = 'text2vec')
ss$stop_words %<>% c('woolworth', 'woolworths', 'woolies')
x = TEXT.MINER(dat, settings = ss)
x$clust(5)
x$plot.wordCloud(weighting = 'tfidf', cn = 5)
x$plot.wordCloud(weighting = 'freq', cn = 5)
x$plot.wordCloud(weighting = 'tfidf', cn = 4)
x$plot.wordCloud(weighting = 'freq', cn = 4)
x$plot.wordCloud(weighting = 'tfidf', cn = 3)
x$plot.wordCloud(weighting = 'freq', cn = 3)
x$plot.wordCloud(weighting = 'tfidf', cn = 2)
x$plot.wordCloud(weighting = 'freq', cn = 2)
x$plot.wordCloud(weighting = 'tfidf', cn = 1)
x$plot.wordCloud(weighting = 'freq', cn = 1)
lda = x$get.lda(4)
lda$plot()
| /script/texer/test_1.R | no_license | genpack/tutorials | R | false | false | 1,012 | r | library(magrittr)
library(gener)
source('~/Documents/software/R/packages/texer/R/tmtools.R')
source('~/Documents/software/R/packages/texer/R/textminer.R')
load("~/Documents/software/R/projects/tutorials/script/texer/woolworth.RData")
dat = all_reveiws_woolworth %>% unlist %>% as.data.frame
colnames(dat) = 'text'
dat$text %<>% as.character
ss = genDefaultSettings() %>% list.edit(tm_package = 'text2vec')
ss$stop_words %<>% c('woolworth', 'woolworths', 'woolies')
x = TEXT.MINER(dat, settings = ss)
x$clust(5)
x$plot.wordCloud(weighting = 'tfidf', cn = 5)
x$plot.wordCloud(weighting = 'freq', cn = 5)
x$plot.wordCloud(weighting = 'tfidf', cn = 4)
x$plot.wordCloud(weighting = 'freq', cn = 4)
x$plot.wordCloud(weighting = 'tfidf', cn = 3)
x$plot.wordCloud(weighting = 'freq', cn = 3)
x$plot.wordCloud(weighting = 'tfidf', cn = 2)
x$plot.wordCloud(weighting = 'freq', cn = 2)
x$plot.wordCloud(weighting = 'tfidf', cn = 1)
x$plot.wordCloud(weighting = 'freq', cn = 1)
lda = x$get.lda(4)
lda$plot()
|
#==============================================================================================#
# Script created by Lindsay Chaney 2019 - lindsay.chaney@snow.edu
# Script created in version R 3.6.1
# This script is used to LOAD data and packages needed for
# Chaney & Buacom 2019 "The soil microbial community alters patterns of
#selection on flowering time and fitness related traits in Ipomoea purpurea"
#==============================================================================================#
pal <- wes_palette("Zissou", 100, type = "continuous") #set color pallete
#inoculum
xFGi <- cbind(mgdat2i$HeightRGRC, mgdat2i$DOFF)
outFGi <- Tps(xFGi, mgdat2i$RelFit)
#surface plot
FG.surface.In <- surface(outFGi, type="p", xlab="Growth", ylab="Flowering Day", zlab="Fitness", add.legend=FALSE, col= pal, border = NA)
#contour plot
FG.contour.In <- surface(outFGi, type="C", xlab="Growth", ylab="Flowering Day", add.legend = TRUE, col = pal)
points(xFGi, pch = 2, col = "gray20", cex = .8)
#autoclave
xFGac <- cbind(mgdat2ac$HeightRGRC, mgdat2ac$DOFF)
outFGac <- Tps(xFGac, mgdat2ac$RelFit)
#surface plot
FG.surface.Au <- surface(outFGac, type="p", xlab="Growth", ylab="Flowering Day", zlab="RelFit", add.legend = FALSE, col= pal, border = NA)
#contour plot
FG.contour.Au <- surface(outFGac, type="C", xlab="Growth", ylab="Flowering Day", zlab="RelFit", add.legend = TRUE, col = pal)
points(xFGac, pch = 2, col = "gray20", cex = .8)
| /Analysis/03_Func_selection_gradient_surfaceplot_treatment.R | permissive | lchaney/MG_Microbe | R | false | false | 1,465 | r | #==============================================================================================#
# Script created by Lindsay Chaney 2019 - lindsay.chaney@snow.edu
# Script created in version R 3.6.1
# This script is used to LOAD data and packages needed for
# Chaney & Buacom 2019 "The soil microbial community alters patterns of
#selection on flowering time and fitness related traits in Ipomoea purpurea"
#==============================================================================================#
pal <- wes_palette("Zissou", 100, type = "continuous") #set color pallete
#inoculum
xFGi <- cbind(mgdat2i$HeightRGRC, mgdat2i$DOFF)
outFGi <- Tps(xFGi, mgdat2i$RelFit)
#surface plot
FG.surface.In <- surface(outFGi, type="p", xlab="Growth", ylab="Flowering Day", zlab="Fitness", add.legend=FALSE, col= pal, border = NA)
#contour plot
FG.contour.In <- surface(outFGi, type="C", xlab="Growth", ylab="Flowering Day", add.legend = TRUE, col = pal)
points(xFGi, pch = 2, col = "gray20", cex = .8)
#autoclave
xFGac <- cbind(mgdat2ac$HeightRGRC, mgdat2ac$DOFF)
outFGac <- Tps(xFGac, mgdat2ac$RelFit)
#surface plot
FG.surface.Au <- surface(outFGac, type="p", xlab="Growth", ylab="Flowering Day", zlab="RelFit", add.legend = FALSE, col= pal, border = NA)
#contour plot
FG.contour.Au <- surface(outFGac, type="C", xlab="Growth", ylab="Flowering Day", zlab="RelFit", add.legend = TRUE, col = pal)
points(xFGac, pch = 2, col = "gray20", cex = .8)
|
### zonal extraction and summary of pressure data
### MRF: Feb 25 2015
########## NOTE: For future versions make rgn_id into sp_id for CCAMLR regions!!!
tmpdir <- '~/big/R_raster_tmp'
rasterOptions(tmpdir=tmpdir)
source('../ohiprep/src/R/common.R')
library(raster)
library(rgdal)
library(dplyr)
# raster/zonal data
rast_loc <- file.path(dir_neptune_data, "git-annex/Global/NCEAS-Regions_v2014/data/sp_mol_raster_1km")
zones <- raster(file.path(rast_loc, "sp_mol_raster_1km.tif")) # raster data
rgn_data <- read.csv(file.path(rast_loc, 'regionData.csv')) # data for sp_id's used in raster
# save location
save_loc <- "globalprep/PressuresRegionExtract"
#### Acid ----
# read in acid data (should be 10 layers, with values 0 to 1)
rasts <- paste0('/var/data/ohi/git-annex/globalprep/Pressures_acid/v2015/output/annual_oa_rescaled_1km_int_clip_', c(2005:2014), '.tif')
pressure_stack <- stack()
for(i in 1:length(rasts)){ #i=1
tmp <- raster(rasts[i])
pressure_stack <- stack(pressure_stack, tmp)
}
# extract data for each region:
regions_stats <- zonal(pressure_stack, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone") %>%
gather("year", "pressure_score", starts_with("annual")) %>%
mutate(year=as.numeric(gsub('annual_oa_rescaled_1km_int_clip_', '', year)))
write.csv(data, file.path(save_loc, "tmp/acid.csv"), row.names=FALSE)
## save toolbox data for different years/regions
# function to extract data more easily
saveData <- function(regionType, newYear){
criteria_year <- ~year == newYear
criteria_rgn <- ~sp_type == regionType
if(regionType == 'eez-ccamlr'){
acid <- data %>%
filter_(criteria_rgn) %>%
filter_(criteria_year) %>%
dplyr::select(rgn_id=sp_id, pressure_score) %>%
arrange(rgn_id)
} else{
acid <- data %>%
filter_(criteria_rgn) %>%
filter_(criteria_year) %>%
dplyr::select(rgn_id, pressure_score) %>%
arrange(rgn_id)
}
write.csv(acid, file.path(save_loc, sprintf('data/acid_%s_%s.csv', regionType, newYear)), row.names=FALSE)
}
### extract data
for(newYear in 2011:2014){
saveData("eez", newYear)
}
for(newYear in 2011:2014){
saveData("eez-ccamlr", newYear)
}
for(newYear in 2011:2014){
saveData("fao", newYear)
}
### try visualizing the data using googleVis plot
library(googleVis)
plotData <- data %>%
filter(sp_type == "eez") %>%
dplyr::select(rgn_name, year, pressure_score)
library(googleVis)
Motion=gvisMotionChart(plotData,
idvar="rgn_name",
timevar="year")
plot(Motion)
print(Motion, file=file.path(save_loc, 'acid.html'))
### get estimate of gap-filling
# rasts <- raster('/var/data/ohi/git-annex/globalprep/Pressures_acid/v2015/output/oa_interpolated_cells.tif')
# reclassify(rasts, c(-Inf, Inf, 1), filename='/var/data/ohi/git-annex/globalprep/Pressures_acid/v2015/output/oa_interpolated_cells_yes_no.tif', progress="text")
interp <- raster('/var/data/ohi/git-annex/globalprep/Pressures_acid/v2015/output/oa_interpolated_cells_yes_no.tif')
# extract data for each region:
regions_stats <- zonal(interp, zones, fun="sum", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone") %>%
dplyr::select(sp_id, rgn_id, sp_type, rgn_name, interpolated=sum)
write.csv(data, file.path(save_loc, "tmp/acid_interpolated_cells.csv"), row.names=FALSE)
## Get all cell counts for each region
# rc_count <- freq(zones, progress="text")
# rc_count <- data.frame(rc_count)
# write.csv(rc_count, file.path(save_loc, "sp_id_areas.csv"), row.names=FALSE)
rc_count2 <- read.csv(file.path(save_loc, "sp_id_areas.csv")) %>%
dplyr::select(sp_id=value, cellNum=count) %>%
left_join(data, by='sp_id') %>%
filter(!is.na(sp_id)) %>%
mutate(prop_gap_filled = interpolated/cellNum)
write.csv(rc_count2, file.path(save_loc, "tmp/acid_prop_interpolated.csv"), row.names=FALSE)
final_gap <- rc_count2 %>%
dplyr::filter(sp_type == "eez") %>%
dplyr::select(rgn_id, gap_filled = prop_gap_filled) %>%
mutate(gap_filled = round(gap_filled, 2)) %>%
arrange(rgn_id)
write.csv(final_gap, file.path(save_loc, "data/acid_gap_fill_attr.csv"), row.names=FALSE)
final_gap <- read.csv(file.path(save_loc, "data/acid_gap_fill_attr.csv"))
library(ggplot2)
ggplot(final_gap, aes(gap_filled)) +
geom_histogram() +
theme_bw() +
labs(title="Acid: Proportion gap-filled")
sum(final_gap$gap_filled > 0.9)
#########################################
#### SLR ----
#########################################
# https://github.com/OHI-Science/issues/issues/374
# the following raster is log transformed and then the 99.99th quantile was used to establish the standardization value.
# The outcome was that most regions had a pressure score of around 0.7 - which seemed high for this pressure. This
# suggested that we should probably avoid log transforming these particular data.
# rast <- raster('/var/data/ohi/git-annex/globalprep/AVISO-SeaLevelRise_v2015/output/slr_final.tif')
rast <- raster('/var/data/ohi/git-annex/globalprep/AVISO-SeaLevelRise_v2015/output/slr_nonlog_final.tif')
# extract data for each region:
regions_stats <- zonal(rast, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone")
## save data for toolbox
eez <- data %>%
filter(sp_type=="eez") %>%
dplyr::select(rgn_id, pressure_score=mean)
write.csv(eez, file.path(save_loc, 'data/slr_eez_2015.csv'), row.names=FALSE)
eez <- read.csv(file.path(save_loc, 'data/slr_eez_2015.csv'))
# fao <- data %>% ## probably not a pressure in high seas
# filter(sp_type=="fao") %>%
# dplyr::select(rgn_id, pressure_score=mean)
# write.csv(fao, file.path(save_loc, 'data/slr_fao_2015.csv'), row.names=FALSE)
## should go through and eliminate the regions that do not have land
antarctica <- data %>%
filter(sp_type=="eez-ccamlr") %>%
dplyr::select(rgn_id = sp_id, pressure_score=mean)
write.csv(antarctica, file.path(save_loc, 'data/slr_ccamlr_2015.csv'), row.names=FALSE)
## plot the data to make sure range of values for regions is reasonable
library(ggplot2)
ggplot(eez, aes(pressure_score)) +
geom_histogram(fill="gray", color="black") +
theme_bw() +
labs(title="Region scores for SLR")
quantile(eez$pressure_score)
## extract data to show proportion of gap-filling
# rasts <- raster('/var/data/ohi/git-annex/globalprep/AVISO-SeaLevelRise_v2015/output/slr_interpolated_cells.tif')
# reclassify(rasts, c(-Inf, Inf, 1), filename='/var/data/ohi/git-annex/globalprep/AVISO-SeaLevelRise_v2015/output/slr_interpolated_cells_yes_no.tif', progress="text")
interp <- raster('/var/data/ohi/git-annex/globalprep/AVISO-SeaLevelRise_v2015/output/slr_interpolated_cells_yes_no.tif')
# extract data for each region:
regions_stats <- zonal(interp, zones, fun="sum", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone") %>%
dplyr::select(sp_id, rgn_id, sp_type, rgn_name, interpolated=sum)
write.csv(data, file.path(save_loc, "tmp/slr_interpolated_cells.csv"), row.names=FALSE)
rc_count2 <- read.csv(file.path(save_loc, "sp_id_areas.csv")) %>%
dplyr::select(sp_id=value, cellNum=count) %>%
left_join(data, by='sp_id') %>%
filter(!is.na(sp_id)) %>%
mutate(prop_gap_filled = interpolated/cellNum)
write.csv(rc_count2, file.path(save_loc, "tmp/slr_prop_interpolated.csv"), row.names=FALSE)
final_gap <- rc_count2 %>%
dplyr::filter(sp_type == "eez") %>%
dplyr::select(rgn_id, gap_filled = prop_gap_filled) %>%
mutate(gap_filled = round(gap_filled, 2)) %>%
arrange(rgn_id)
write.csv(final_gap, file.path(save_loc, "data/slr_gap_fill_attr.csv"), row.names=FALSE)
final_gap <- read.csv(file.path(save_loc, "data/slr_gap_fill_attr.csv"))
#library(ggplot2)
ggplot(final_gap, aes(gap_filled)) +
geom_histogram() +
theme_bw() +
labs(title="SLR: Proportion area gap-filled")
sum(final_gap$gap_filled > 0.5)
#########################################
## Trash ----
#########################################
# some issues dealing with the preparation of these data:
# https://github.com/OHI-Science/issues/issues/306#issuecomment-72252954
# also want to apply ice mask so as to eliminate these regions
## creating data with ice mask
# trash <- raster('/var/data/ohi/git-annex/globalprep/FiveGyres_MarinePlastics_CW/v2015/output/weight_rescale.tif')
# ice_mask_resampled <- raster("/var/data/ohi/git-annex/Global/NCEAS-Pressures-Summaries_frazier2013/ice_mask_resampled")
# s <- stack(ice_mask_resampled, trash)
# overlay(s, fun=function(x,y) x*y,
# filename="/var/data/ohi/git-annex/globalprep/FiveGyres_MarinePlastics_CW/v2015/output/weight_rescale_icemask.tif",
# progress="text", overwrite=TRUE)
rast <- raster("/var/data/ohi/git-annex/globalprep/FiveGyres_MarinePlastics_CW/v2015/output/weight_rescale_icemask.tif")
# extract data for each region:
regions_stats <- zonal(rast, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone")
## save data for toolbox
eez <- data %>%
filter(sp_type=="eez") %>%
dplyr::select(rgn_id, pressure_score=mean)
#write.csv(eez, file.path(save_loc, 'data/trash_eez_2015.csv'), row.names=FALSE)
eez <- read.csv(file.path(save_loc, 'data/trash_eez_2015.csv'))
fao <- data %>% ## probably not a pressure in high seas
filter(sp_type=="fao") %>%
dplyr::select(rgn_id, pressure_score=mean)
# write.csv(fao, file.path(save_loc, 'data/trash_fao_2015.csv'), row.names=FALSE)
antarctica <- data %>%
filter(sp_type=="eez-ccamlr") %>%
dplyr::select(rgn_id = sp_id, pressure_score=mean)
#write.csv(antarctica, file.path(save_loc, 'data/trash_ccamlr_2015.csv'), row.names=FALSE)
## plot the data to make sure range of values for regions is reasonable
library(ggplot2)
ggplot(eez, aes(pressure_score)) +
geom_histogram(fill="gray", color="black") +
theme_bw() +
labs(title="Region scores for trash")
quantile(eez$pressure_score)
data %>%
filter(sp_type=="eez") %>%
arrange(mean)
#########################################
#### UV ----
#########################################
# https://github.com/OHI-Science/issues/issues/377
# 2 raster choices: logged and non-logged. Going with the logged version mainly out of tradition
rast <- raster('/var/data/ohi/git-annex/globalprep/Pressures_UV/output/uv_anomaly_diff_moll_1km_log_resc.tif')
# extract data for each region:
regions_stats <- zonal(rast, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone")
## save data for toolbox
eez <- data %>%
filter(sp_type=="eez") %>%
dplyr::select(rgn_id, pressure_score=mean)
#write.csv(eez, file.path(save_loc, 'data/uv_eez_2015.csv'), row.names=FALSE)
eez <- read.csv(file.path(save_loc, 'data/uv_eez_2015.csv'))
fao <- data %>%
filter(sp_type=="fao") %>%
dplyr::select(rgn_id, pressure_score=mean)
# write.csv(fao, file.path(save_loc, 'data/uv_fao_2015.csv'), row.names=FALSE)
antarctica <- data %>%
filter(sp_type=="eez-ccamlr") %>%
dplyr::select(rgn_id = sp_id, pressure_score=mean)
#write.csv(antarctica, file.path(save_loc, 'data/uv_ccamlr_2015.csv'), row.names=FALSE)
## plot the data to make sure range of values for regions is reasonable
library(ggplot2)
ggplot(eez, aes(pressure_score)) +
geom_histogram(fill="gray", color="black") +
theme_bw() +
labs(title="Region scores for UV")
quantile(eez$pressure_score)
data %>%
filter(sp_type=="eez") %>%
arrange(mean)
#########################################
#### SST ----
#########################################
# https://github.com/OHI-Science/issues/issues/499
# load and check relevant rasters
rast_2012 <- raster(file.path(dir_neptune_data,
'git-annex/globalprep/Pressures_SST/v2015/output/sst_2005_2009-1985_1989_rescaled_v2.tif'))
rast_2012
rast_2013 <- raster(file.path(dir_neptune_data,
'git-annex/globalprep/Pressures_SST/v2015/output/sst_2006_2010-1985_1989_rescaled_v2.tif'))
rast_2013
rast_2014 <- raster(file.path(dir_neptune_data,
'git-annex/globalprep/Pressures_SST/v2015/output/sst_2007_2011-1985_1989_rescaled_v2.tif'))
rast_2014
rast_2015 <- raster(file.path(dir_neptune_data,
'git-annex/globalprep/Pressures_SST/v2015/output/sst_2008_2012-1985_1989_rescaled_v2.tif'))
rast_2015
# apply ice mask
ice_mask <- raster("/var/data/ohi/git-annex/Global/NCEAS-Pressures-Summaries_frazier2013/ice_mask_resampled")
for(i in 2012:2015){ #i=2012
rast <- get(paste0("rast_", i))
overlay(rast, ice_mask, fun=function(x,y) x*y, progress='text',
filename=file.path(dir_neptune_data,
sprintf('git-annex/globalprep/Pressures_SST/v2015/output/sst_stack_%s_rescaled_icemask', i)),
overwrite=TRUE)
}
# extract data
sst_2012_ice <- raster(file.path(dir_neptune_data, 'git-annex/globalprep/Pressures_SST/v2015/output/sst_stack_2012_rescaled_icemask'))
names(sst_2012_ice) <- "sst_2012"
sst_2013_ice <- raster(file.path(dir_neptune_data, 'git-annex/globalprep/Pressures_SST/v2015/output/sst_stack_2013_rescaled_icemask'))
names(sst_2013_ice) <- "sst_2013"
plot(sst_2013_ice)
sst_2014_ice <- raster(file.path(dir_neptune_data, 'git-annex/globalprep/Pressures_SST/v2015/output/sst_stack_2014_rescaled_icemask'))
names(sst_2014_ice) <- "sst_2014"
sst_2015_ice <- raster(file.path(dir_neptune_data, 'git-annex/globalprep/Pressures_SST/v2015/output/sst_stack_2015_rescaled_icemask'))
names(sst_2015_ice) <- "sst_2015"
sst_stack <- stack(sst_2012_ice, sst_2013_ice, sst_2014_ice, sst_2015_ice)
# extract data
regions_stats <- zonal(sst_stack, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone")
#write.csv(data, file.path(save_loc, "tmp/sst.csv"), row.names=FALSE)
data <- read.csv(file.path(save_loc, "tmp/sst.csv"))
## save data for toolbox
for(years in c(2012:2015)){ #years="2012"
scenario <- sprintf("sst_%s", years)
eez <- filter(data, sp_type == "eez")
eez <- eez[, c('rgn_id', scenario)]
names(eez)[names(eez) == scenario] <- "pressure_score"
write.csv(eez, sprintf('globalprep/PressuresRegionExtract/data/sst_eez_%s.csv', years), row.names=FALSE)
ant <- filter(data, sp_type == "eez-ccamlr")
ant <- ant[, c('sp_id', scenario)]
names(ant)[names(ant) == scenario] <- "pressure_score"
names(ant)[names(ant) == 'sp_id'] <- "rgn_id"
write.csv(ant, sprintf('globalprep/PressuresRegionExtract/data/sst_eez-ccamlr_%s', years), row.names=FALSE)
fao <- filter(data, sp_type == "fao")
fao <- fao[, c('rgn_id', scenario)]
names(fao)[names(fao) == scenario] <- "pressure_score"
write.csv(fao, sprintf('globalprep/PressuresRegionExtract/data/sst_fao_%s', years), row.names=FALSE)
}
## plot the data to make sure range of values for regions is reasonable
# 1. compare with last years data
old_sst <- read.csv(file.path(dir_neptune_data, "model/GL-NCEAS-Pressures_v2013a/data/cc_sst_2013_NEW.csv"))
compare <- old_sst %>%
dplyr::select(rgn_id, old_pressure_score=pressure_score) %>%
left_join(data) %>%
filter(!(is.na(rgn_name))) %>%
filter(sp_type=="eez") %>%
dplyr::select(rgn_id, rgn_name, old_pressure_score, sst_2012, sst_2013, sst_2014, sst_2015)
#filtered out these, but wanted to make sure they didn't reflect underlying issues:
# compare[is.na(compare$sp_id), ] # ones that don't match new data are antarctica high seas regions (268, 271, 278), an NA high seas region (265), and conflict areas (255)
# compare[is.na(compare$old_pressure_score), ] # often Bosnia/Herzegovina falls out of raster analyses due to very small eez region
library(ggplot2)
ggplot(compare, aes(x=old_pressure_score, y=sst_2013)) +
geom_point(shape=19) +
theme_bw() +
geom_abline(intercept=0, slope=1) +
labs(title="SST comparison")
ggplot(compare, aes(x=sst_2013)) +
geom_histogram(fill="gray", color="black") +
theme_bw() +
labs(title="SST 2013")
quantile(compare$sst_2013)
library(tidyr)
compare_plot <- gather(compare, "year", "pressure_score", 3:7) %>%
filter(year != "old_pressure_score") %>%
mutate(year = as.numeric(gsub("sst_", "", year))) %>%
dplyr::select(rgn_name, year, pressure_score)
library(googleVis)
Motion=gvisMotionChart(compare_plot,
idvar="rgn_name",
timevar="year")
plot(Motion)
print(Motion, file=file.path(save_loc, 'sst.html'))
#### Fisheries ----
# read in fisheries pressure data (should be 8 layers, with values 0 to 1)
#check an example:
tmp <- raster('/var/data/ohi/git-annex/globalprep/Pressures_fishing/v2015/output/catch_03_07_npp_hb_rescaled.tif')
files <- list.files('/var/data/ohi/git-annex/globalprep/Pressures_fishing/v2015/output')
rescaled_files <- grep("_rescaled", files, value=TRUE)
pressure_stack <- stack()
for(rast in rescaled_files){ # rast = 'catch_03_07_npp_hb_rescaled.tif'
tmp <- raster(file.path('/var/data/ohi/git-annex/globalprep/Pressures_fishing/v2015/output', rast))
pressure_stack <- stack(pressure_stack, tmp)
}
# extract data for each region:
regions_stats <- zonal(pressure_stack, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone")
write.csv(data, file.path(save_loc, "tmp/fisheries.csv"), row.names=FALSE)
data[is.na(data$catch_03_07_npp_hb_rescaled), ]
data <- read.csv(file.path(save_loc, "tmp/fisheries.csv"))
data_long <- data %>%
gather("layer", "pressure_score", starts_with("catch")) %>%
mutate(layer = gsub('_rescaled', '', layer))
# mutate(pressure_score = ifelse(is.na(pressure_score), 0, pressure_score))
## record gap-filled regions:
convert_year <- data.frame(layer =c('catch_06_10_npp_hb', 'catch_05_09_npp_hb', 'catch_04_08_npp_hb', 'catch_03_07_npp_hb',
'catch_06_10_npp_lb', 'catch_05_09_npp_lb', 'catch_04_08_npp_lb', 'catch_03_07_npp_lb'),
year = 2015:2012)
gap_record <- data_long %>%
left_join(convert_year) %>%
mutate(gap_filled = ifelse(is.na(pressure_score), "gap-filled", "no"))
write.csv(gap_record, file.path(save_loc, "data/fisheries_gap_filling.csv"), row.names=FALSE)
### gap-fill some eez regions:
regions <- read.csv("src/LookupTables/rgn_georegions_wide_2013b.csv") %>%
dplyr::select(-rgn_nam)
## fill in a couple missing values:
# replace Bosnia with Croatio values
croatia <- data_long[data_long$rgn_id == 187,] %>%
dplyr::select(layer, pressure_score2=pressure_score) %>%
mutate(rgn_id = 232)
data_gapfill <- data_long %>%
left_join(croatia) %>%
mutate(pressure_score = ifelse(is.na(pressure_score), pressure_score2, pressure_score)) %>%
dplyr::select(-pressure_score2)
# replace arctic and Bouvet Island with zeros
data_gapfill$pressure_score[data_gapfill$rgn_id %in% c(105, 260)] <- 0
# regional gap-filling for remaining: Bulgaria (71), Romania (72), Georgia (74), Ukraine (75), Jordan (215)
eez_gap_fill <- data_gapfill %>%
filter(sp_type == "eez") %>%
left_join(regions, by="rgn_id") %>%
group_by(layer, r2) %>%
mutate(mean_pressure_score = mean(pressure_score, na.rm=TRUE)) %>%
mutate(pressure_score = ifelse(is.na(pressure_score), mean_pressure_score, pressure_score)) %>%
ungroup() %>%
dplyr::select(sp_id, sp_type, rgn_id, rgn_name, layer, pressure_score)
## the two r2 regions that need gap-filled data:
data.frame(eez_gap_fill[eez_gap_fill$r2 %in% c("151"), ])
data.frame(eez_gap_fill[eez_gap_fill$r2 %in% c(145), ])
### replacing previous eez data with gapfilled eez data:
pressure_data <- data_gapfill %>%
filter(sp_type != "eez") %>%
bind_rows(eez_gap_fill)
layerType <- unique(pressure_data$layer)
for(layer in layerType){ #layer="catch_03_07_npp_hb"
pressureData <- pressure_data[pressure_data$layer %in% layer, ]
# eez data
data <- pressureData %>%
filter(sp_type == 'eez') %>%
dplyr::select(rgn_id = rgn_id, pressure_score) %>%
arrange(rgn_id)
write.csv(data, file.path(save_loc, sprintf('data/%s_eez.csv', layer)), row.names=FALSE)
# hs data
data <- pressureData %>%
filter(sp_type == 'fao') %>%
dplyr::select(rgn_id = rgn_id, pressure_score) %>%
arrange(rgn_id)
write.csv(data, file.path(save_loc, sprintf('data/%s_fao.csv', layer)), row.names=FALSE)
# antarctica data
data <- pressureData %>%
filter(sp_type == 'eez-ccamlr') %>%
dplyr::select(rgn_id = sp_id, pressure_score) %>%
arrange(rgn_id)
write.csv(data, file.path(save_loc, sprintf('data/%s_ccamlr.csv', layer)), row.names=FALSE)
}
### visualizing the data using googleVis plot
library(googleVis)
high_bycatch <- pressure_data %>%
filter(sp_type == "eez") %>%
filter(layer %in% grep("_hb", layerType, value=TRUE)) %>%
left_join(convert_year) %>%
dplyr::select(rgn_name, year, pressure_score)
Motion=gvisMotionChart(high_bycatch,
idvar="rgn_name",
timevar="year")
plot(Motion)
print(Motion, file=file.path(save_loc, 'high_bycatch.html'))
low_bycatch <- pressure_data %>%
filter(sp_type == "eez") %>%
filter(layer %in% grep("_lb", layerType, value=TRUE)) %>%
left_join(convert_year) %>%
dplyr::select(rgn_name, year, pressure_score)
Motion=gvisMotionChart(high_bycatch,
idvar="rgn_name",
timevar="year")
plot(Motion)
print(Motion, file=file.path(save_loc, 'low_bycatch.html'))
#########################################
#### Land-based fertilizer and pesticide plume data prep ----
#########################################
rast_locs <- file.path(dir_halpern2008, "mnt/storage/marine_threats/impact_layers_2013_redo/impact_layers/work/land_based/before_2007/raw_global_results")
## peak at raster to see what is up:
check <- raster(file.path(rast_locs, 'global_plumes_fert_2012_raw.tif'))
## darn: different extents and such...need to make these the same
quantiles <- data.frame(plumeData <- list.files(rast_locs), quantile_9999_ln=NA)
files <- grep(".tif", list.files(rast_locs), value=TRUE)
for(plume in files){ #plume='global_plumes_fert_2007_raw.tif'
tmp <- raster(file.path(rast_locs, plume))
saveName <- gsub(".tif", "", plume)
calc(tmp, function(x){log(x+1)}, progress="text", filename=file.path(rast_locs, sprintf("Frazier/%s_log.tif", saveName)), overwrite=TRUE)
tmp <- raster(file.path(rast_locs, sprintf("Frazier/%s_log.tif", saveName)))
quantiles$quantile_9999_ln[plumeData == plume] <- quantile(tmp, .9999)
extend(tmp, zones, filename=file.path(rast_locs, sprintf("Frazier/%s_log_extend.tif", saveName)), progress="text", overwrite=TRUE)
tmp <- raster(file.path(rast_locs, sprintf("Frazier/%s_log_extend.tif", saveName)))
unlink(file.path(rast_locs, sprintf('Frazier/%s_log.tif', saveName)))
}
#############################
## fertilizer ----
#############################
## scaling coefficient for fertlizer = 5.594088 (file with these values: ohiprep/globalprep/PressuresRegionExtract/land_based_quantiles.csv)
fert_scalar <- 5.594088
list_fert <- files <- grep("_fert", list.files(file.path(rast_locs, "Frazier")), value=TRUE)
for(fert in list_fert){ #fert="global_plumes_fert_2007_raw_log_extend.tif"
tmp <- raster(file.path(rast_locs, "Frazier", fert))
saveName <- gsub('.tif', '', fert)
calc(tmp, fun=function(x){ifelse(x>fert_scalar, 1, x/fert_scalar)},
progress='text',
filename=file.path(rast_locs, sprintf("Frazier/%s_scaled.tif", saveName)), overwrite=TRUE)
}
list_fert <- files <- grep("_fert", list.files(file.path(rast_locs, "Frazier")), value=TRUE)
list_fert_scaled <- grep("_scaled", list_fert, value=TRUE)
pressure_stack <- stack()
for(rast in list_fert_scaled){
tmp <- raster(file.path(rast_locs, "Frazier", rast))
pressure_stack <- stack(pressure_stack, tmp)
}
# extract data for each eez region:
regions_stats <- zonal(pressure_stack, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone")
write.csv(data, file.path(save_loc, "tmp/nutrients_plume_data.csv"), row.names=FALSE)
data <- read.csv(file.path(save_loc, "tmp/nutrients_plume_data.csv"))
data <- gather(data, "year", "pressure_score", starts_with("global"))
data <- data %>%
mutate(year = gsub("global_plumes_fert_", "", year)) %>%
mutate(year = gsub("_raw_log_extend_scaled", "", year)) %>%
mutate(year = as.numeric(as.character(year))) %>%
filter(sp_type == "eez") %>% # this doesn't really apply to high seas regions and Antarctica is all zeros
dplyr::select(rgn_id, rgn_name, year, pressure_score)
# calculate pressure data for each year
## trend should be calculated on 3nm (not eez)
for(scenario_year in 2012:2015){ #scenario_year=2015
#calculate/save pressure score data
score_data <- data %>%
filter(year == (scenario_year-3)) %>%
dplyr::select(rgn_id, pressure_score)
write.csv(score_data, file.path(save_loc, sprintf('data/cw_fertilizers_score_%s.csv', scenario_year)), row.names=FALSE)
}
## extract at 3 nm (in addition to a pressure, this will be used for CW and the CW trend)
# (going to try using the polygon, rather than converting to raster)
offshore_3nm_poly <- readOGR(dsn="/var/data/ohi/git-annex/Global/NCEAS-Regions_v2014/data", "rgn_offshore3nm_mol")
offshore_3nm_poly <- offshore_3nm_poly[offshore_3nm_poly@data$rgn_type == "eez", ]
# this is here in case I decide to use this method instead of using the polygons to extract the data:
# tmp <- raster(file.path(rast_locs, "Frazier/global_plumes_fert_2007_raw_log_extend.tif"))
# rasterize(inland_3nm_poly, tmp, field='rgn_id',
# filename=file.path(rast_locs, "Frazier/inland_3nm.tif"), overwrite=TRUE,
# progress='text')
data <- raster::extract(pressure_stack, offshore_3nm_poly, na.rm=TRUE, normalizeWeights=FALSE, fun=mean, df=TRUE, progress="text")
data2 <- cbind(data, offshore_3nm_poly@data)
write.csv(data2, file.path(save_loc, "tmp/nutrients_plume_data_offshore_3nm.csv"), row.names=FALSE)
data <- read.csv(file.path(save_loc, "tmp/nutrients_plume_data_offshore_3nm.csv"))
data <- gather(data, "year", "pressure_score", starts_with("global"))
data <- data %>%
mutate(year = gsub("global_plumes_fert_", "", year)) %>%
mutate(year = gsub("_raw_log_extend_scaled", "", year)) %>%
mutate(year = as.numeric(as.character(year))) %>%
dplyr::select(rgn_id, rgn_name, year, pressure_score) %>%
filter(!is.na(pressure_score))#NA is Antarctica - this is fine
# calculate pressure data for each year
## trend should be calculated on 3nm (not eez)
for(scenario_year in 2012:2015){ #scenario_year=2015
#calculate/save trend data
trend_data <- data %>%
filter(year %in% (scenario_year-7):(scenario_year-3)) %>%
group_by(rgn_id) %>%
do(mdl = lm(pressure_score ~ year, data = .)) %>%
summarize(rgn_id,
trend = coef(mdl)['year'] * 5) %>%
ungroup()
write.csv(trend_data, file.path(save_loc, sprintf('data/cw_fertilizers_trend_%s.csv', scenario_year)), row.names=FALSE)
#calculate/save pressure score data
score_data <- data %>%
filter(year == (scenario_year-3)) %>%
dplyr::select(rgn_id, pressure_score)
write.csv(score_data, file.path(save_loc, sprintf('data/cw_fertilizers_score_3nm_%s.csv', scenario_year)), row.names=FALSE)
}
#############################
## pesticides ----
#############################
## scaling coefficient for pesticides = 1.91788700716876 (file with these values: ohiprep/globalprep/PressuresRegionExtract/land_based_quantiles.csv)
pest_scalar <- 1.91788700716876
list_pest <- grep("_pest", list.files(file.path(rast_locs, "Frazier")), value=TRUE)
for(pest in list_pest){ #pest="global_plumes_pest_2007_raw_log_extend.tif"
tmp <- raster(file.path(rast_locs, "Frazier", pest))
saveName <- gsub('.tif', '', pest)
calc(tmp, fun=function(x){ifelse(x>pest_scalar, 1, x/pest_scalar)},
progress='text',
filename=file.path(rast_locs, sprintf("Frazier/%s_scaled.tif", saveName)), overwrite=TRUE)
}
##################
## to get the chemical pressure: pesticides + ocean pollution + inorganic pollution
## need to make the op and ip rasters have the same extent:
pest_rast <- raster(file.path(rast_locs, "Frazier", "global_plumes_pest_2007_raw_log_extend_scaled.tif"))
# only one ocean pollution raster for both time periods (so only normalized by one time period)
library(spatial.tools)
op <- raster('/var/cache/halpern-et-al/mnt/storage/marine_threats/impact_layers_2013_redo/impact_layers/final_impact_layers/threats_2013_final/normalized_by_one_time_period/ocean_pollution.tif')
op_extend <- modify_raster_margins(op, extent_delta=c(1,0,1,0))
extent(op_extend) = extent(pest_rast)
writeRaster(op_extend, file.path(rast_locs, "Frazier/ocean_pollution_extend.tif"), overwrite=TRUE)
# two rasters for inorganic pollution (2003-2006 and 2007-2010)
# I used the 2007-2010 raster (normalized by both time periods):
# ip_07_10 <- raster('/var/cache/halpern-et-al/mnt/storage/marine_threats/impact_layers_2013_redo/impact_layers/final_impact_layers/threats_2013_final/normalized_by_two_time_periods/inorganic.tif')
# extend(ip_07_10, pest_rast, filename=file.path(rast_locs, "Frazier/inorganic_pollution_07_10_extend.tif"), progress='text')
ip_07_10_extend <- raster(file.path(rast_locs, "Frazier/inorganic_pollution_07_10_extend.tif"))
# but, it might be better to use the earlier raster for some time periods, if so, here is the link:
#ip_03_06 <- raster('/var/cache/halpern-et-al/mnt/storage/marine_threats/impact_layers_2013_redo/impact_layers/final_impact_layers/threats_2008_final/normalized_by_two_time_periods/inorganic.tif')
for(pest_year in 2007:2012){ #pest_year = 2007
pest_rast <- raster(file.path(rast_locs, "Frazier", sprintf("global_plumes_pest_%s_raw_log_extend_scaled.tif", pest_year)))
chem_stack <- stack(pest_rast, op_extend, ip_07_10_extend)
calc(chem_stack,
sum, na.rm=TRUE,
progress='text',
filename=file.path(rast_locs, sprintf("Frazier/chemical_pollution_%s.tif", pest_year)), overwrite=TRUE)
}
## take a look at the distribution of scores
raster <- raster(file.path(rast_locs, "Frazier/chemical_pollution_2007.tif"))
quantile(raster, c(0.25, 0.50, 0.75, 0.9, 0.99, 0.999, 0.9999))
raster <- raster(file.path(rast_locs, "Frazier/chemical_pollution_2012.tif"))
quantile(raster, c(0.25, 0.50, 0.75, 0.9, 0.99, 0.999, 0.9999))
for(chem_year in 2007:2012){ #chem_year=2012
tmp <- raster(file.path(rast_locs, sprintf("Frazier/chemical_pollution_%s.tif", chem_year)))
calc(tmp, fun=function(x){ifelse(x>1, 1, x)},
progress='text',
filename=file.path(rast_locs, sprintf("Frazier/chemical_pollution_%s_scaled.tif", chem_year)), overwrite=TRUE)
}
## delete intermediate files due to lack of space on neptune:
for(delete_year in 2007:2012){
unlink(file.path(rast_locs, sprintf("Frazier/chemical_pollution_%s.tif", delete_year)))
}
list_chem <- files <- grep("chemical_pollution", list.files(file.path(rast_locs, "Frazier")), value=TRUE)
list_chem_scaled <- grep("_scaled", list_chem, value=TRUE)
pressure_stack <- stack()
for(rast in list_chem_scaled){
tmp <- raster(file.path(rast_locs, "Frazier", rast))
pressure_stack <- stack(pressure_stack, tmp)
}
# extract data for each eez region:
regions_stats <- zonal(pressure_stack, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone")
write.csv(data, file.path(save_loc, "tmp/chemical_data.csv"), row.names=FALSE)
data <- read.csv(file.path(save_loc, "tmp/chemical_data.csv"))
data <- gather(data, "year", "pressure_score", starts_with("chemical"))
data <- data %>%
mutate(year = gsub("chemical_pollution_", "", year)) %>%
mutate(year = gsub("_scaled", "", year)) %>%
mutate(year = as.numeric(as.character(year))) %>%
filter(sp_type == "eez") %>% # this doesn't really apply to high seas regions and Antarctica is all zeros
dplyr::select(rgn_id, rgn_name, year, pressure_score)
# calculate pressure data for each year
## trend should be calculated on 3nm (not eez)
for(scenario_year in 2012:2015){ #scenario_year=2015
#calculate/save pressure score data
score_data <- data %>%
filter(year == (scenario_year-3)) %>%
dplyr::select(rgn_id, pressure_score)
write.csv(score_data, file.path(save_loc, sprintf('data/cw_chemical_score_%s.csv', scenario_year)), row.names=FALSE)
}
## extract at 3 nm (in addition to a pressure, this will be used for CW and the CW trend)
# (going to try using the polygon, rather than converting to raster)
offshore_3nm_poly <- readOGR(dsn="/var/data/ohi/git-annex/Global/NCEAS-Regions_v2014/data", "rgn_offshore3nm_mol")
offshore_3nm_poly <- offshore_3nm_poly[offshore_3nm_poly@data$rgn_type == "eez", ]
# this is here in case I decide to use this method instead of using the polygons to extract the data:
# tmp <- raster(file.path(rast_locs, "Frazier/global_plumes_fert_2007_raw_log_extend.tif"))
# rasterize(inland_3nm_poly, tmp, field='rgn_id',
# filename=file.path(rast_locs, "Frazier/inland_3nm.tif"), overwrite=TRUE,
# progress='text')
data <- raster::extract(pressure_stack, offshore_3nm_poly, na.rm=TRUE, normalizeWeights=FALSE, fun=mean, df=TRUE, progress="text")
data2 <- cbind(data, offshore_3nm_poly@data)
write.csv(data2, file.path(save_loc, "tmp/chemical_data_offshore_3nm.csv"), row.names=FALSE)
data <- read.csv(file.path(save_loc, "tmp/chemical_data_offshore_3nm.csv"))
data <- gather(data, "year", "pressure_score", starts_with("chemical"))
data <- data %>%
mutate(year = gsub("chemical_pollution_", "", year)) %>%
mutate(year = gsub("_scaled", "", year)) %>%
mutate(year = as.numeric(as.character(year))) %>%
dplyr::select(rgn_id, rgn_name, year, pressure_score) %>%
filter(!is.na(pressure_score))#NA is Antarctica - this is fine
# calculate pressure data for each year
## trend should be calculated on 3nm (not eez)
for(scenario_year in 2012:2015){ #scenario_year=2015
#calculate/save trend data
trend_data <- data %>%
filter(year %in% (scenario_year-7):(scenario_year-3)) %>%
group_by(rgn_id) %>%
do(mdl = lm(pressure_score ~ year, data = .)) %>%
summarize(rgn_id,
trend = coef(mdl)['year'] * 5) %>%
ungroup()
write.csv(trend_data, file.path(save_loc, sprintf('data/cw_chemical_trend_%s.csv', scenario_year)), row.names=FALSE)
#calculate/save pressure score data
score_data <- data %>%
filter(year == (scenario_year-3)) %>%
dplyr::select(rgn_id, pressure_score)
write.csv(score_data, file.path(save_loc, sprintf('data/cw_chemical_score_3nm_%s.csv', scenario_year)), row.names=FALSE)
}
## Visualizing the data using GoogleVis
### visualizing the data using googleVis plot
plume_files <- grep("cw_", list.files(file.path(save_loc, 'data')), value=TRUE)
plume_types <- c('cw_chemical_score',
'cw_chemical_score_3nm',
'cw_chemical_trend',
'cw_fertilizers_score',
'cw_fertilizers_score_3nm',
'cw_fertilizers_trend')
rgns <- read.csv(file.path(save_loc, "data/cw_chemical_score_2015.csv")) %>%
dplyr::select(rgn_id)
allData <- expand.grid(rgn_id = rgns$rgn_id, year=2012:2015)
for(plume in plume_types) { #plume = 'cw_chemical_score'
data <- data.frame()
for(year in 2012:2015){#year = 2012
tmp <- read.csv(file.path(save_loc, 'data', paste0(plume, sprintf("_%s.csv", year))))
tmp$year <- year
names(tmp)[which(names(tmp)=="pressure_score" | names(tmp)=="trend")] <- plume
data <- rbind(data, tmp)
}
allData <- left_join(allData, data, by=c('rgn_id', 'year'))
}
regions <- rgn_data %>%
dplyr::select(rgn_id, rgn_name)
allData <- left_join(allData, regions, by="rgn_id") %>%
dplyr::select(-rgn_id)
library(googleVis)
Motion=gvisMotionChart(allData,
idvar="rgn_name",
timevar="year")
plot(Motion)
print(Motion, file=file.path(save_loc, 'plumes.html'))
| /globalprep/prs_uv/v2015/ZonalExtract.R | no_license | OHI-Science/ohiprep_v2018 | R | false | false | 37,866 | r | ### zonal extraction and summary of pressure data
### MRF: Feb 25 2015
########## NOTE: For future versions make rgn_id into sp_id for CCAMLR regions!!!
tmpdir <- '~/big/R_raster_tmp'
rasterOptions(tmpdir=tmpdir)
source('../ohiprep/src/R/common.R')
library(raster)
library(rgdal)
library(dplyr)
# raster/zonal data
rast_loc <- file.path(dir_neptune_data, "git-annex/Global/NCEAS-Regions_v2014/data/sp_mol_raster_1km")
zones <- raster(file.path(rast_loc, "sp_mol_raster_1km.tif")) # raster data
rgn_data <- read.csv(file.path(rast_loc, 'regionData.csv')) # data for sp_id's used in raster
# save location
save_loc <- "globalprep/PressuresRegionExtract"
#### Acid ----
# read in acid data (should be 10 layers, with values 0 to 1)
rasts <- paste0('/var/data/ohi/git-annex/globalprep/Pressures_acid/v2015/output/annual_oa_rescaled_1km_int_clip_', c(2005:2014), '.tif')
pressure_stack <- stack()
for(i in 1:length(rasts)){ #i=1
tmp <- raster(rasts[i])
pressure_stack <- stack(pressure_stack, tmp)
}
# extract data for each region:
regions_stats <- zonal(pressure_stack, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone") %>%
gather("year", "pressure_score", starts_with("annual")) %>%
mutate(year=as.numeric(gsub('annual_oa_rescaled_1km_int_clip_', '', year)))
write.csv(data, file.path(save_loc, "tmp/acid.csv"), row.names=FALSE)
## save toolbox data for different years/regions
# function to extract data more easily
saveData <- function(regionType, newYear){
criteria_year <- ~year == newYear
criteria_rgn <- ~sp_type == regionType
if(regionType == 'eez-ccamlr'){
acid <- data %>%
filter_(criteria_rgn) %>%
filter_(criteria_year) %>%
dplyr::select(rgn_id=sp_id, pressure_score) %>%
arrange(rgn_id)
} else{
acid <- data %>%
filter_(criteria_rgn) %>%
filter_(criteria_year) %>%
dplyr::select(rgn_id, pressure_score) %>%
arrange(rgn_id)
}
write.csv(acid, file.path(save_loc, sprintf('data/acid_%s_%s.csv', regionType, newYear)), row.names=FALSE)
}
### extract data
for(newYear in 2011:2014){
saveData("eez", newYear)
}
for(newYear in 2011:2014){
saveData("eez-ccamlr", newYear)
}
for(newYear in 2011:2014){
saveData("fao", newYear)
}
### try visualizing the data using googleVis plot
library(googleVis)
plotData <- data %>%
filter(sp_type == "eez") %>%
dplyr::select(rgn_name, year, pressure_score)
library(googleVis)
Motion=gvisMotionChart(plotData,
idvar="rgn_name",
timevar="year")
plot(Motion)
print(Motion, file=file.path(save_loc, 'acid.html'))
### get estimate of gap-filling
# rasts <- raster('/var/data/ohi/git-annex/globalprep/Pressures_acid/v2015/output/oa_interpolated_cells.tif')
# reclassify(rasts, c(-Inf, Inf, 1), filename='/var/data/ohi/git-annex/globalprep/Pressures_acid/v2015/output/oa_interpolated_cells_yes_no.tif', progress="text")
interp <- raster('/var/data/ohi/git-annex/globalprep/Pressures_acid/v2015/output/oa_interpolated_cells_yes_no.tif')
# extract data for each region:
regions_stats <- zonal(interp, zones, fun="sum", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone") %>%
dplyr::select(sp_id, rgn_id, sp_type, rgn_name, interpolated=sum)
write.csv(data, file.path(save_loc, "tmp/acid_interpolated_cells.csv"), row.names=FALSE)
## Get all cell counts for each region
# rc_count <- freq(zones, progress="text")
# rc_count <- data.frame(rc_count)
# write.csv(rc_count, file.path(save_loc, "sp_id_areas.csv"), row.names=FALSE)
rc_count2 <- read.csv(file.path(save_loc, "sp_id_areas.csv")) %>%
dplyr::select(sp_id=value, cellNum=count) %>%
left_join(data, by='sp_id') %>%
filter(!is.na(sp_id)) %>%
mutate(prop_gap_filled = interpolated/cellNum)
write.csv(rc_count2, file.path(save_loc, "tmp/acid_prop_interpolated.csv"), row.names=FALSE)
final_gap <- rc_count2 %>%
dplyr::filter(sp_type == "eez") %>%
dplyr::select(rgn_id, gap_filled = prop_gap_filled) %>%
mutate(gap_filled = round(gap_filled, 2)) %>%
arrange(rgn_id)
write.csv(final_gap, file.path(save_loc, "data/acid_gap_fill_attr.csv"), row.names=FALSE)
final_gap <- read.csv(file.path(save_loc, "data/acid_gap_fill_attr.csv"))
library(ggplot2)
ggplot(final_gap, aes(gap_filled)) +
geom_histogram() +
theme_bw() +
labs(title="Acid: Proportion gap-filled")
sum(final_gap$gap_filled > 0.9)
#########################################
#### SLR ----
#########################################
# https://github.com/OHI-Science/issues/issues/374
# the following raster is log transformed and then the 99.99th quantile was used to establish the standardization value.
# The outcome was that most regions had a pressure score of around 0.7 - which seemed high for this pressure. This
# suggested that we should probably avoid log transforming these particular data.
# rast <- raster('/var/data/ohi/git-annex/globalprep/AVISO-SeaLevelRise_v2015/output/slr_final.tif')
rast <- raster('/var/data/ohi/git-annex/globalprep/AVISO-SeaLevelRise_v2015/output/slr_nonlog_final.tif')
# extract data for each region:
regions_stats <- zonal(rast, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone")
## save data for toolbox
eez <- data %>%
filter(sp_type=="eez") %>%
dplyr::select(rgn_id, pressure_score=mean)
write.csv(eez, file.path(save_loc, 'data/slr_eez_2015.csv'), row.names=FALSE)
eez <- read.csv(file.path(save_loc, 'data/slr_eez_2015.csv'))
# fao <- data %>% ## probably not a pressure in high seas
# filter(sp_type=="fao") %>%
# dplyr::select(rgn_id, pressure_score=mean)
# write.csv(fao, file.path(save_loc, 'data/slr_fao_2015.csv'), row.names=FALSE)
## should go through and eliminate the regions that do not have land
antarctica <- data %>%
filter(sp_type=="eez-ccamlr") %>%
dplyr::select(rgn_id = sp_id, pressure_score=mean)
write.csv(antarctica, file.path(save_loc, 'data/slr_ccamlr_2015.csv'), row.names=FALSE)
## plot the data to make sure range of values for regions is reasonable
library(ggplot2)
ggplot(eez, aes(pressure_score)) +
geom_histogram(fill="gray", color="black") +
theme_bw() +
labs(title="Region scores for SLR")
quantile(eez$pressure_score)
## extract data to show proportion of gap-filling
# rasts <- raster('/var/data/ohi/git-annex/globalprep/AVISO-SeaLevelRise_v2015/output/slr_interpolated_cells.tif')
# reclassify(rasts, c(-Inf, Inf, 1), filename='/var/data/ohi/git-annex/globalprep/AVISO-SeaLevelRise_v2015/output/slr_interpolated_cells_yes_no.tif', progress="text")
interp <- raster('/var/data/ohi/git-annex/globalprep/AVISO-SeaLevelRise_v2015/output/slr_interpolated_cells_yes_no.tif')
# extract data for each region:
regions_stats <- zonal(interp, zones, fun="sum", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone") %>%
dplyr::select(sp_id, rgn_id, sp_type, rgn_name, interpolated=sum)
write.csv(data, file.path(save_loc, "tmp/slr_interpolated_cells.csv"), row.names=FALSE)
rc_count2 <- read.csv(file.path(save_loc, "sp_id_areas.csv")) %>%
dplyr::select(sp_id=value, cellNum=count) %>%
left_join(data, by='sp_id') %>%
filter(!is.na(sp_id)) %>%
mutate(prop_gap_filled = interpolated/cellNum)
write.csv(rc_count2, file.path(save_loc, "tmp/slr_prop_interpolated.csv"), row.names=FALSE)
final_gap <- rc_count2 %>%
dplyr::filter(sp_type == "eez") %>%
dplyr::select(rgn_id, gap_filled = prop_gap_filled) %>%
mutate(gap_filled = round(gap_filled, 2)) %>%
arrange(rgn_id)
write.csv(final_gap, file.path(save_loc, "data/slr_gap_fill_attr.csv"), row.names=FALSE)
final_gap <- read.csv(file.path(save_loc, "data/slr_gap_fill_attr.csv"))
#library(ggplot2)
ggplot(final_gap, aes(gap_filled)) +
geom_histogram() +
theme_bw() +
labs(title="SLR: Proportion area gap-filled")
sum(final_gap$gap_filled > 0.5)
#########################################
## Trash ----
#########################################
# some issues dealing with the preparation of these data:
# https://github.com/OHI-Science/issues/issues/306#issuecomment-72252954
# also want to apply ice mask so as to eliminate these regions
## creating data with ice mask
# trash <- raster('/var/data/ohi/git-annex/globalprep/FiveGyres_MarinePlastics_CW/v2015/output/weight_rescale.tif')
# ice_mask_resampled <- raster("/var/data/ohi/git-annex/Global/NCEAS-Pressures-Summaries_frazier2013/ice_mask_resampled")
# s <- stack(ice_mask_resampled, trash)
# overlay(s, fun=function(x,y) x*y,
# filename="/var/data/ohi/git-annex/globalprep/FiveGyres_MarinePlastics_CW/v2015/output/weight_rescale_icemask.tif",
# progress="text", overwrite=TRUE)
rast <- raster("/var/data/ohi/git-annex/globalprep/FiveGyres_MarinePlastics_CW/v2015/output/weight_rescale_icemask.tif")
# extract data for each region:
regions_stats <- zonal(rast, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone")
## save data for toolbox
eez <- data %>%
filter(sp_type=="eez") %>%
dplyr::select(rgn_id, pressure_score=mean)
#write.csv(eez, file.path(save_loc, 'data/trash_eez_2015.csv'), row.names=FALSE)
eez <- read.csv(file.path(save_loc, 'data/trash_eez_2015.csv'))
fao <- data %>% ## probably not a pressure in high seas
filter(sp_type=="fao") %>%
dplyr::select(rgn_id, pressure_score=mean)
# write.csv(fao, file.path(save_loc, 'data/trash_fao_2015.csv'), row.names=FALSE)
antarctica <- data %>%
filter(sp_type=="eez-ccamlr") %>%
dplyr::select(rgn_id = sp_id, pressure_score=mean)
#write.csv(antarctica, file.path(save_loc, 'data/trash_ccamlr_2015.csv'), row.names=FALSE)
## plot the data to make sure range of values for regions is reasonable
library(ggplot2)
ggplot(eez, aes(pressure_score)) +
geom_histogram(fill="gray", color="black") +
theme_bw() +
labs(title="Region scores for trash")
quantile(eez$pressure_score)
data %>%
filter(sp_type=="eez") %>%
arrange(mean)
#########################################
#### UV ----
#########################################
# https://github.com/OHI-Science/issues/issues/377
# 2 raster choices: logged and non-logged. Going with the logged version mainly out of tradition
rast <- raster('/var/data/ohi/git-annex/globalprep/Pressures_UV/output/uv_anomaly_diff_moll_1km_log_resc.tif')
# extract data for each region:
regions_stats <- zonal(rast, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone")
## save data for toolbox
eez <- data %>%
filter(sp_type=="eez") %>%
dplyr::select(rgn_id, pressure_score=mean)
#write.csv(eez, file.path(save_loc, 'data/uv_eez_2015.csv'), row.names=FALSE)
eez <- read.csv(file.path(save_loc, 'data/uv_eez_2015.csv'))
fao <- data %>%
filter(sp_type=="fao") %>%
dplyr::select(rgn_id, pressure_score=mean)
# write.csv(fao, file.path(save_loc, 'data/uv_fao_2015.csv'), row.names=FALSE)
antarctica <- data %>%
filter(sp_type=="eez-ccamlr") %>%
dplyr::select(rgn_id = sp_id, pressure_score=mean)
#write.csv(antarctica, file.path(save_loc, 'data/uv_ccamlr_2015.csv'), row.names=FALSE)
## plot the data to make sure range of values for regions is reasonable
library(ggplot2)
ggplot(eez, aes(pressure_score)) +
geom_histogram(fill="gray", color="black") +
theme_bw() +
labs(title="Region scores for UV")
quantile(eez$pressure_score)
data %>%
filter(sp_type=="eez") %>%
arrange(mean)
#########################################
#### SST ----
#########################################
# https://github.com/OHI-Science/issues/issues/499
# load and check relevant rasters
rast_2012 <- raster(file.path(dir_neptune_data,
'git-annex/globalprep/Pressures_SST/v2015/output/sst_2005_2009-1985_1989_rescaled_v2.tif'))
rast_2012
rast_2013 <- raster(file.path(dir_neptune_data,
'git-annex/globalprep/Pressures_SST/v2015/output/sst_2006_2010-1985_1989_rescaled_v2.tif'))
rast_2013
rast_2014 <- raster(file.path(dir_neptune_data,
'git-annex/globalprep/Pressures_SST/v2015/output/sst_2007_2011-1985_1989_rescaled_v2.tif'))
rast_2014
rast_2015 <- raster(file.path(dir_neptune_data,
'git-annex/globalprep/Pressures_SST/v2015/output/sst_2008_2012-1985_1989_rescaled_v2.tif'))
rast_2015
# apply ice mask
ice_mask <- raster("/var/data/ohi/git-annex/Global/NCEAS-Pressures-Summaries_frazier2013/ice_mask_resampled")
for(i in 2012:2015){ #i=2012
rast <- get(paste0("rast_", i))
overlay(rast, ice_mask, fun=function(x,y) x*y, progress='text',
filename=file.path(dir_neptune_data,
sprintf('git-annex/globalprep/Pressures_SST/v2015/output/sst_stack_%s_rescaled_icemask', i)),
overwrite=TRUE)
}
# extract data
sst_2012_ice <- raster(file.path(dir_neptune_data, 'git-annex/globalprep/Pressures_SST/v2015/output/sst_stack_2012_rescaled_icemask'))
names(sst_2012_ice) <- "sst_2012"
sst_2013_ice <- raster(file.path(dir_neptune_data, 'git-annex/globalprep/Pressures_SST/v2015/output/sst_stack_2013_rescaled_icemask'))
names(sst_2013_ice) <- "sst_2013"
plot(sst_2013_ice)
sst_2014_ice <- raster(file.path(dir_neptune_data, 'git-annex/globalprep/Pressures_SST/v2015/output/sst_stack_2014_rescaled_icemask'))
names(sst_2014_ice) <- "sst_2014"
sst_2015_ice <- raster(file.path(dir_neptune_data, 'git-annex/globalprep/Pressures_SST/v2015/output/sst_stack_2015_rescaled_icemask'))
names(sst_2015_ice) <- "sst_2015"
sst_stack <- stack(sst_2012_ice, sst_2013_ice, sst_2014_ice, sst_2015_ice)
# extract data
regions_stats <- zonal(sst_stack, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone")
#write.csv(data, file.path(save_loc, "tmp/sst.csv"), row.names=FALSE)
data <- read.csv(file.path(save_loc, "tmp/sst.csv"))
## save data for toolbox
for(years in c(2012:2015)){ #years="2012"
scenario <- sprintf("sst_%s", years)
eez <- filter(data, sp_type == "eez")
eez <- eez[, c('rgn_id', scenario)]
names(eez)[names(eez) == scenario] <- "pressure_score"
write.csv(eez, sprintf('globalprep/PressuresRegionExtract/data/sst_eez_%s.csv', years), row.names=FALSE)
ant <- filter(data, sp_type == "eez-ccamlr")
ant <- ant[, c('sp_id', scenario)]
names(ant)[names(ant) == scenario] <- "pressure_score"
names(ant)[names(ant) == 'sp_id'] <- "rgn_id"
write.csv(ant, sprintf('globalprep/PressuresRegionExtract/data/sst_eez-ccamlr_%s', years), row.names=FALSE)
fao <- filter(data, sp_type == "fao")
fao <- fao[, c('rgn_id', scenario)]
names(fao)[names(fao) == scenario] <- "pressure_score"
write.csv(fao, sprintf('globalprep/PressuresRegionExtract/data/sst_fao_%s', years), row.names=FALSE)
}
## plot the data to make sure range of values for regions is reasonable
# 1. compare with last years data
old_sst <- read.csv(file.path(dir_neptune_data, "model/GL-NCEAS-Pressures_v2013a/data/cc_sst_2013_NEW.csv"))
compare <- old_sst %>%
dplyr::select(rgn_id, old_pressure_score=pressure_score) %>%
left_join(data) %>%
filter(!(is.na(rgn_name))) %>%
filter(sp_type=="eez") %>%
dplyr::select(rgn_id, rgn_name, old_pressure_score, sst_2012, sst_2013, sst_2014, sst_2015)
#filtered out these, but wanted to make sure they didn't reflect underlying issues:
# compare[is.na(compare$sp_id), ] # ones that don't match new data are antarctica high seas regions (268, 271, 278), an NA high seas region (265), and conflict areas (255)
# compare[is.na(compare$old_pressure_score), ] # often Bosnia/Herzegovina falls out of raster analyses due to very small eez region
library(ggplot2)
ggplot(compare, aes(x=old_pressure_score, y=sst_2013)) +
geom_point(shape=19) +
theme_bw() +
geom_abline(intercept=0, slope=1) +
labs(title="SST comparison")
ggplot(compare, aes(x=sst_2013)) +
geom_histogram(fill="gray", color="black") +
theme_bw() +
labs(title="SST 2013")
quantile(compare$sst_2013)
library(tidyr)
compare_plot <- gather(compare, "year", "pressure_score", 3:7) %>%
filter(year != "old_pressure_score") %>%
mutate(year = as.numeric(gsub("sst_", "", year))) %>%
dplyr::select(rgn_name, year, pressure_score)
library(googleVis)
Motion=gvisMotionChart(compare_plot,
idvar="rgn_name",
timevar="year")
plot(Motion)
print(Motion, file=file.path(save_loc, 'sst.html'))
#### Fisheries ----
# read in fisheries pressure data (should be 8 layers, with values 0 to 1)
#check an example:
tmp <- raster('/var/data/ohi/git-annex/globalprep/Pressures_fishing/v2015/output/catch_03_07_npp_hb_rescaled.tif')
files <- list.files('/var/data/ohi/git-annex/globalprep/Pressures_fishing/v2015/output')
rescaled_files <- grep("_rescaled", files, value=TRUE)
pressure_stack <- stack()
for(rast in rescaled_files){ # rast = 'catch_03_07_npp_hb_rescaled.tif'
tmp <- raster(file.path('/var/data/ohi/git-annex/globalprep/Pressures_fishing/v2015/output', rast))
pressure_stack <- stack(pressure_stack, tmp)
}
# extract data for each region:
regions_stats <- zonal(pressure_stack, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone")
write.csv(data, file.path(save_loc, "tmp/fisheries.csv"), row.names=FALSE)
data[is.na(data$catch_03_07_npp_hb_rescaled), ]
data <- read.csv(file.path(save_loc, "tmp/fisheries.csv"))
data_long <- data %>%
gather("layer", "pressure_score", starts_with("catch")) %>%
mutate(layer = gsub('_rescaled', '', layer))
# mutate(pressure_score = ifelse(is.na(pressure_score), 0, pressure_score))
## record gap-filled regions:
convert_year <- data.frame(layer =c('catch_06_10_npp_hb', 'catch_05_09_npp_hb', 'catch_04_08_npp_hb', 'catch_03_07_npp_hb',
'catch_06_10_npp_lb', 'catch_05_09_npp_lb', 'catch_04_08_npp_lb', 'catch_03_07_npp_lb'),
year = 2015:2012)
gap_record <- data_long %>%
left_join(convert_year) %>%
mutate(gap_filled = ifelse(is.na(pressure_score), "gap-filled", "no"))
write.csv(gap_record, file.path(save_loc, "data/fisheries_gap_filling.csv"), row.names=FALSE)
### gap-fill some eez regions:
regions <- read.csv("src/LookupTables/rgn_georegions_wide_2013b.csv") %>%
dplyr::select(-rgn_nam)
## fill in a couple missing values:
# replace Bosnia with Croatio values
croatia <- data_long[data_long$rgn_id == 187,] %>%
dplyr::select(layer, pressure_score2=pressure_score) %>%
mutate(rgn_id = 232)
data_gapfill <- data_long %>%
left_join(croatia) %>%
mutate(pressure_score = ifelse(is.na(pressure_score), pressure_score2, pressure_score)) %>%
dplyr::select(-pressure_score2)
# replace arctic and Bouvet Island with zeros
data_gapfill$pressure_score[data_gapfill$rgn_id %in% c(105, 260)] <- 0
# regional gap-filling for remaining: Bulgaria (71), Romania (72), Georgia (74), Ukraine (75), Jordan (215)
eez_gap_fill <- data_gapfill %>%
filter(sp_type == "eez") %>%
left_join(regions, by="rgn_id") %>%
group_by(layer, r2) %>%
mutate(mean_pressure_score = mean(pressure_score, na.rm=TRUE)) %>%
mutate(pressure_score = ifelse(is.na(pressure_score), mean_pressure_score, pressure_score)) %>%
ungroup() %>%
dplyr::select(sp_id, sp_type, rgn_id, rgn_name, layer, pressure_score)
## the two r2 regions that need gap-filled data:
data.frame(eez_gap_fill[eez_gap_fill$r2 %in% c("151"), ])
data.frame(eez_gap_fill[eez_gap_fill$r2 %in% c(145), ])
### replacing previous eez data with gapfilled eez data:
pressure_data <- data_gapfill %>%
filter(sp_type != "eez") %>%
bind_rows(eez_gap_fill)
layerType <- unique(pressure_data$layer)
for(layer in layerType){ #layer="catch_03_07_npp_hb"
pressureData <- pressure_data[pressure_data$layer %in% layer, ]
# eez data
data <- pressureData %>%
filter(sp_type == 'eez') %>%
dplyr::select(rgn_id = rgn_id, pressure_score) %>%
arrange(rgn_id)
write.csv(data, file.path(save_loc, sprintf('data/%s_eez.csv', layer)), row.names=FALSE)
# hs data
data <- pressureData %>%
filter(sp_type == 'fao') %>%
dplyr::select(rgn_id = rgn_id, pressure_score) %>%
arrange(rgn_id)
write.csv(data, file.path(save_loc, sprintf('data/%s_fao.csv', layer)), row.names=FALSE)
# antarctica data
data <- pressureData %>%
filter(sp_type == 'eez-ccamlr') %>%
dplyr::select(rgn_id = sp_id, pressure_score) %>%
arrange(rgn_id)
write.csv(data, file.path(save_loc, sprintf('data/%s_ccamlr.csv', layer)), row.names=FALSE)
}
### visualizing the data using googleVis plot
library(googleVis)
high_bycatch <- pressure_data %>%
filter(sp_type == "eez") %>%
filter(layer %in% grep("_hb", layerType, value=TRUE)) %>%
left_join(convert_year) %>%
dplyr::select(rgn_name, year, pressure_score)
Motion=gvisMotionChart(high_bycatch,
idvar="rgn_name",
timevar="year")
plot(Motion)
print(Motion, file=file.path(save_loc, 'high_bycatch.html'))
low_bycatch <- pressure_data %>%
filter(sp_type == "eez") %>%
filter(layer %in% grep("_lb", layerType, value=TRUE)) %>%
left_join(convert_year) %>%
dplyr::select(rgn_name, year, pressure_score)
Motion=gvisMotionChart(high_bycatch,
idvar="rgn_name",
timevar="year")
plot(Motion)
print(Motion, file=file.path(save_loc, 'low_bycatch.html'))
#########################################
#### Land-based fertilizer and pesticide plume data prep ----
#########################################
rast_locs <- file.path(dir_halpern2008, "mnt/storage/marine_threats/impact_layers_2013_redo/impact_layers/work/land_based/before_2007/raw_global_results")
## peak at raster to see what is up:
check <- raster(file.path(rast_locs, 'global_plumes_fert_2012_raw.tif'))
## darn: different extents and such...need to make these the same
quantiles <- data.frame(plumeData <- list.files(rast_locs), quantile_9999_ln=NA)
files <- grep(".tif", list.files(rast_locs), value=TRUE)
for(plume in files){ #plume='global_plumes_fert_2007_raw.tif'
tmp <- raster(file.path(rast_locs, plume))
saveName <- gsub(".tif", "", plume)
calc(tmp, function(x){log(x+1)}, progress="text", filename=file.path(rast_locs, sprintf("Frazier/%s_log.tif", saveName)), overwrite=TRUE)
tmp <- raster(file.path(rast_locs, sprintf("Frazier/%s_log.tif", saveName)))
quantiles$quantile_9999_ln[plumeData == plume] <- quantile(tmp, .9999)
extend(tmp, zones, filename=file.path(rast_locs, sprintf("Frazier/%s_log_extend.tif", saveName)), progress="text", overwrite=TRUE)
tmp <- raster(file.path(rast_locs, sprintf("Frazier/%s_log_extend.tif", saveName)))
unlink(file.path(rast_locs, sprintf('Frazier/%s_log.tif', saveName)))
}
#############################
## fertilizer ----
#############################
## scaling coefficient for fertlizer = 5.594088 (file with these values: ohiprep/globalprep/PressuresRegionExtract/land_based_quantiles.csv)
fert_scalar <- 5.594088
list_fert <- files <- grep("_fert", list.files(file.path(rast_locs, "Frazier")), value=TRUE)
for(fert in list_fert){ #fert="global_plumes_fert_2007_raw_log_extend.tif"
tmp <- raster(file.path(rast_locs, "Frazier", fert))
saveName <- gsub('.tif', '', fert)
calc(tmp, fun=function(x){ifelse(x>fert_scalar, 1, x/fert_scalar)},
progress='text',
filename=file.path(rast_locs, sprintf("Frazier/%s_scaled.tif", saveName)), overwrite=TRUE)
}
list_fert <- files <- grep("_fert", list.files(file.path(rast_locs, "Frazier")), value=TRUE)
list_fert_scaled <- grep("_scaled", list_fert, value=TRUE)
pressure_stack <- stack()
for(rast in list_fert_scaled){
tmp <- raster(file.path(rast_locs, "Frazier", rast))
pressure_stack <- stack(pressure_stack, tmp)
}
# extract data for each eez region:
regions_stats <- zonal(pressure_stack, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone")
write.csv(data, file.path(save_loc, "tmp/nutrients_plume_data.csv"), row.names=FALSE)
data <- read.csv(file.path(save_loc, "tmp/nutrients_plume_data.csv"))
data <- gather(data, "year", "pressure_score", starts_with("global"))
data <- data %>%
mutate(year = gsub("global_plumes_fert_", "", year)) %>%
mutate(year = gsub("_raw_log_extend_scaled", "", year)) %>%
mutate(year = as.numeric(as.character(year))) %>%
filter(sp_type == "eez") %>% # this doesn't really apply to high seas regions and Antarctica is all zeros
dplyr::select(rgn_id, rgn_name, year, pressure_score)
# calculate pressure data for each year
## trend should be calculated on 3nm (not eez)
for(scenario_year in 2012:2015){ #scenario_year=2015
#calculate/save pressure score data
score_data <- data %>%
filter(year == (scenario_year-3)) %>%
dplyr::select(rgn_id, pressure_score)
write.csv(score_data, file.path(save_loc, sprintf('data/cw_fertilizers_score_%s.csv', scenario_year)), row.names=FALSE)
}
## extract at 3 nm (in addition to a pressure, this will be used for CW and the CW trend)
# (going to try using the polygon, rather than converting to raster)
offshore_3nm_poly <- readOGR(dsn="/var/data/ohi/git-annex/Global/NCEAS-Regions_v2014/data", "rgn_offshore3nm_mol")
offshore_3nm_poly <- offshore_3nm_poly[offshore_3nm_poly@data$rgn_type == "eez", ]
# this is here in case I decide to use this method instead of using the polygons to extract the data:
# tmp <- raster(file.path(rast_locs, "Frazier/global_plumes_fert_2007_raw_log_extend.tif"))
# rasterize(inland_3nm_poly, tmp, field='rgn_id',
# filename=file.path(rast_locs, "Frazier/inland_3nm.tif"), overwrite=TRUE,
# progress='text')
data <- raster::extract(pressure_stack, offshore_3nm_poly, na.rm=TRUE, normalizeWeights=FALSE, fun=mean, df=TRUE, progress="text")
data2 <- cbind(data, offshore_3nm_poly@data)
write.csv(data2, file.path(save_loc, "tmp/nutrients_plume_data_offshore_3nm.csv"), row.names=FALSE)
data <- read.csv(file.path(save_loc, "tmp/nutrients_plume_data_offshore_3nm.csv"))
data <- gather(data, "year", "pressure_score", starts_with("global"))
data <- data %>%
mutate(year = gsub("global_plumes_fert_", "", year)) %>%
mutate(year = gsub("_raw_log_extend_scaled", "", year)) %>%
mutate(year = as.numeric(as.character(year))) %>%
dplyr::select(rgn_id, rgn_name, year, pressure_score) %>%
filter(!is.na(pressure_score))#NA is Antarctica - this is fine
# calculate pressure data for each year
## trend should be calculated on 3nm (not eez)
for(scenario_year in 2012:2015){ #scenario_year=2015
#calculate/save trend data
trend_data <- data %>%
filter(year %in% (scenario_year-7):(scenario_year-3)) %>%
group_by(rgn_id) %>%
do(mdl = lm(pressure_score ~ year, data = .)) %>%
summarize(rgn_id,
trend = coef(mdl)['year'] * 5) %>%
ungroup()
write.csv(trend_data, file.path(save_loc, sprintf('data/cw_fertilizers_trend_%s.csv', scenario_year)), row.names=FALSE)
#calculate/save pressure score data
score_data <- data %>%
filter(year == (scenario_year-3)) %>%
dplyr::select(rgn_id, pressure_score)
write.csv(score_data, file.path(save_loc, sprintf('data/cw_fertilizers_score_3nm_%s.csv', scenario_year)), row.names=FALSE)
}
#############################
## pesticides ----
#############################
## scaling coefficient for pesticides = 1.91788700716876 (file with these values: ohiprep/globalprep/PressuresRegionExtract/land_based_quantiles.csv)
pest_scalar <- 1.91788700716876
list_pest <- grep("_pest", list.files(file.path(rast_locs, "Frazier")), value=TRUE)
for(pest in list_pest){ #pest="global_plumes_pest_2007_raw_log_extend.tif"
tmp <- raster(file.path(rast_locs, "Frazier", pest))
saveName <- gsub('.tif', '', pest)
calc(tmp, fun=function(x){ifelse(x>pest_scalar, 1, x/pest_scalar)},
progress='text',
filename=file.path(rast_locs, sprintf("Frazier/%s_scaled.tif", saveName)), overwrite=TRUE)
}
##################
## to get the chemical pressure: pesticides + ocean pollution + inorganic pollution
## need to make the op and ip rasters have the same extent:
pest_rast <- raster(file.path(rast_locs, "Frazier", "global_plumes_pest_2007_raw_log_extend_scaled.tif"))
# only one ocean pollution raster for both time periods (so only normalized by one time period)
library(spatial.tools)
op <- raster('/var/cache/halpern-et-al/mnt/storage/marine_threats/impact_layers_2013_redo/impact_layers/final_impact_layers/threats_2013_final/normalized_by_one_time_period/ocean_pollution.tif')
op_extend <- modify_raster_margins(op, extent_delta=c(1,0,1,0))
extent(op_extend) = extent(pest_rast)
writeRaster(op_extend, file.path(rast_locs, "Frazier/ocean_pollution_extend.tif"), overwrite=TRUE)
# two rasters for inorganic pollution (2003-2006 and 2007-2010)
# I used the 2007-2010 raster (normalized by both time periods):
# ip_07_10 <- raster('/var/cache/halpern-et-al/mnt/storage/marine_threats/impact_layers_2013_redo/impact_layers/final_impact_layers/threats_2013_final/normalized_by_two_time_periods/inorganic.tif')
# extend(ip_07_10, pest_rast, filename=file.path(rast_locs, "Frazier/inorganic_pollution_07_10_extend.tif"), progress='text')
ip_07_10_extend <- raster(file.path(rast_locs, "Frazier/inorganic_pollution_07_10_extend.tif"))
# but, it might be better to use the earlier raster for some time periods, if so, here is the link:
#ip_03_06 <- raster('/var/cache/halpern-et-al/mnt/storage/marine_threats/impact_layers_2013_redo/impact_layers/final_impact_layers/threats_2008_final/normalized_by_two_time_periods/inorganic.tif')
for(pest_year in 2007:2012){ #pest_year = 2007
pest_rast <- raster(file.path(rast_locs, "Frazier", sprintf("global_plumes_pest_%s_raw_log_extend_scaled.tif", pest_year)))
chem_stack <- stack(pest_rast, op_extend, ip_07_10_extend)
calc(chem_stack,
sum, na.rm=TRUE,
progress='text',
filename=file.path(rast_locs, sprintf("Frazier/chemical_pollution_%s.tif", pest_year)), overwrite=TRUE)
}
## take a look at the distribution of scores
raster <- raster(file.path(rast_locs, "Frazier/chemical_pollution_2007.tif"))
quantile(raster, c(0.25, 0.50, 0.75, 0.9, 0.99, 0.999, 0.9999))
raster <- raster(file.path(rast_locs, "Frazier/chemical_pollution_2012.tif"))
quantile(raster, c(0.25, 0.50, 0.75, 0.9, 0.99, 0.999, 0.9999))
for(chem_year in 2007:2012){ #chem_year=2012
tmp <- raster(file.path(rast_locs, sprintf("Frazier/chemical_pollution_%s.tif", chem_year)))
calc(tmp, fun=function(x){ifelse(x>1, 1, x)},
progress='text',
filename=file.path(rast_locs, sprintf("Frazier/chemical_pollution_%s_scaled.tif", chem_year)), overwrite=TRUE)
}
## delete intermediate files due to lack of space on neptune:
for(delete_year in 2007:2012){
unlink(file.path(rast_locs, sprintf("Frazier/chemical_pollution_%s.tif", delete_year)))
}
list_chem <- files <- grep("chemical_pollution", list.files(file.path(rast_locs, "Frazier")), value=TRUE)
list_chem_scaled <- grep("_scaled", list_chem, value=TRUE)
pressure_stack <- stack()
for(rast in list_chem_scaled){
tmp <- raster(file.path(rast_locs, "Frazier", rast))
pressure_stack <- stack(pressure_stack, tmp)
}
# extract data for each eez region:
regions_stats <- zonal(pressure_stack, zones, fun="mean", na.rm=TRUE, progress="text")
regions_stats2 <- data.frame(regions_stats)
setdiff(regions_stats2$zone, rgn_data$sp_id) #should be none
setdiff(rgn_data$sp_id, regions_stats2$zone) #should be none
data <- merge(rgn_data, regions_stats, all.y=TRUE, by.x="sp_id", by.y="zone")
write.csv(data, file.path(save_loc, "tmp/chemical_data.csv"), row.names=FALSE)
data <- read.csv(file.path(save_loc, "tmp/chemical_data.csv"))
data <- gather(data, "year", "pressure_score", starts_with("chemical"))
data <- data %>%
mutate(year = gsub("chemical_pollution_", "", year)) %>%
mutate(year = gsub("_scaled", "", year)) %>%
mutate(year = as.numeric(as.character(year))) %>%
filter(sp_type == "eez") %>% # this doesn't really apply to high seas regions and Antarctica is all zeros
dplyr::select(rgn_id, rgn_name, year, pressure_score)
# calculate pressure data for each year
## trend should be calculated on 3nm (not eez)
for(scenario_year in 2012:2015){ #scenario_year=2015
#calculate/save pressure score data
score_data <- data %>%
filter(year == (scenario_year-3)) %>%
dplyr::select(rgn_id, pressure_score)
write.csv(score_data, file.path(save_loc, sprintf('data/cw_chemical_score_%s.csv', scenario_year)), row.names=FALSE)
}
## extract at 3 nm (in addition to a pressure, this will be used for CW and the CW trend)
# (going to try using the polygon, rather than converting to raster)
offshore_3nm_poly <- readOGR(dsn="/var/data/ohi/git-annex/Global/NCEAS-Regions_v2014/data", "rgn_offshore3nm_mol")
offshore_3nm_poly <- offshore_3nm_poly[offshore_3nm_poly@data$rgn_type == "eez", ]
# this is here in case I decide to use this method instead of using the polygons to extract the data:
# tmp <- raster(file.path(rast_locs, "Frazier/global_plumes_fert_2007_raw_log_extend.tif"))
# rasterize(inland_3nm_poly, tmp, field='rgn_id',
# filename=file.path(rast_locs, "Frazier/inland_3nm.tif"), overwrite=TRUE,
# progress='text')
data <- raster::extract(pressure_stack, offshore_3nm_poly, na.rm=TRUE, normalizeWeights=FALSE, fun=mean, df=TRUE, progress="text")
data2 <- cbind(data, offshore_3nm_poly@data)
write.csv(data2, file.path(save_loc, "tmp/chemical_data_offshore_3nm.csv"), row.names=FALSE)
data <- read.csv(file.path(save_loc, "tmp/chemical_data_offshore_3nm.csv"))
data <- gather(data, "year", "pressure_score", starts_with("chemical"))
data <- data %>%
mutate(year = gsub("chemical_pollution_", "", year)) %>%
mutate(year = gsub("_scaled", "", year)) %>%
mutate(year = as.numeric(as.character(year))) %>%
dplyr::select(rgn_id, rgn_name, year, pressure_score) %>%
filter(!is.na(pressure_score))#NA is Antarctica - this is fine
# calculate pressure data for each year
## trend should be calculated on 3nm (not eez)
for(scenario_year in 2012:2015){ #scenario_year=2015
#calculate/save trend data
trend_data <- data %>%
filter(year %in% (scenario_year-7):(scenario_year-3)) %>%
group_by(rgn_id) %>%
do(mdl = lm(pressure_score ~ year, data = .)) %>%
summarize(rgn_id,
trend = coef(mdl)['year'] * 5) %>%
ungroup()
write.csv(trend_data, file.path(save_loc, sprintf('data/cw_chemical_trend_%s.csv', scenario_year)), row.names=FALSE)
#calculate/save pressure score data
score_data <- data %>%
filter(year == (scenario_year-3)) %>%
dplyr::select(rgn_id, pressure_score)
write.csv(score_data, file.path(save_loc, sprintf('data/cw_chemical_score_3nm_%s.csv', scenario_year)), row.names=FALSE)
}
## Visualizing the data using GoogleVis
### visualizing the data using googleVis plot
plume_files <- grep("cw_", list.files(file.path(save_loc, 'data')), value=TRUE)
plume_types <- c('cw_chemical_score',
'cw_chemical_score_3nm',
'cw_chemical_trend',
'cw_fertilizers_score',
'cw_fertilizers_score_3nm',
'cw_fertilizers_trend')
rgns <- read.csv(file.path(save_loc, "data/cw_chemical_score_2015.csv")) %>%
dplyr::select(rgn_id)
allData <- expand.grid(rgn_id = rgns$rgn_id, year=2012:2015)
for(plume in plume_types) { #plume = 'cw_chemical_score'
data <- data.frame()
for(year in 2012:2015){#year = 2012
tmp <- read.csv(file.path(save_loc, 'data', paste0(plume, sprintf("_%s.csv", year))))
tmp$year <- year
names(tmp)[which(names(tmp)=="pressure_score" | names(tmp)=="trend")] <- plume
data <- rbind(data, tmp)
}
allData <- left_join(allData, data, by=c('rgn_id', 'year'))
}
regions <- rgn_data %>%
dplyr::select(rgn_id, rgn_name)
allData <- left_join(allData, regions, by="rgn_id") %>%
dplyr::select(-rgn_id)
library(googleVis)
Motion=gvisMotionChart(allData,
idvar="rgn_name",
timevar="year")
plot(Motion)
print(Motion, file=file.path(save_loc, 'plumes.html'))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.r
\docType{data}
\name{testset_SNPs_2Row}
\alias{testset_SNPs_2Row}
\title{Testfile in DArT format (as provided by DArT)}
\format{
csv
}
\description{
This test data set is provided to show a typical DArT file format. Can be used to create a genlight object using the read.dart function.
}
\author{
Arthur Georges (bugs? Post to \url{https://groups.google.com/d/forum/dartr}
}
\keyword{datasets}
| /man/testset_SNPs_2Row.Rd | no_license | carlopacioni/dartR | R | false | true | 483 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.r
\docType{data}
\name{testset_SNPs_2Row}
\alias{testset_SNPs_2Row}
\title{Testfile in DArT format (as provided by DArT)}
\format{
csv
}
\description{
This test data set is provided to show a typical DArT file format. Can be used to create a genlight object using the read.dart function.
}
\author{
Arthur Georges (bugs? Post to \url{https://groups.google.com/d/forum/dartr}
}
\keyword{datasets}
|
TSR_1award_1Peer = function(T,rf,price1_beg,price1_ref,Peer1_vol,Award1,Peer1_Return_History,n_sims,seed){
set.seed(seed)
# library(readxl)
# library(pracma)
# # library(xlsx) #load the package
# read_excel_allsheets <- function(filename) {
# sheets <- excel_sheets(filename)
# x <- lapply(sheets, function(X) read_excel(filename, sheet = X))
# names(x) <- sheets
# list2env(x,envir=.GlobalEnv)
# }
# cleaned = function(df){
# df = as.data.frame(df)
# df = df[complete.cases(df),]
# df = as.matrix(df)
# df= apply(df,MARGIN = 2,FUN = as.numeric)
# return(df)
# }
# setwd("C:/Users/soleysi/Desktop/Channel 1/MMP_LTIP_2 February 2017/Magellan_LTIP_2 Feb 2017/E. EY Analysis/R corroboration")
# read_excel_allsheets(filename = "raw data input.xlsx")
# n_sims = 1000000
# T = Basic_Assumptions[1,2]
# rf = Basic_Assumptions[5,2]
#price1_beg = as.matrix(cleaned(price1_beg))
#price1_ref = as.matrix(cleaned(price1_ref))
price1_beg = as.numeric(price1_beg)
price1_ref = as.numeric(price1_ref)
#Award1 = cleaned(Award1)
#Peer1_Returns = cleaned(Peer1_Return_History)
Peer1_Returns = (Peer1_Return_History)
vol1 = as.numeric(Peer1_vol)
corr_matrix1 = cor(Peer1_Returns)
# corr_output1 = corr_matrix1
# corr_output1[upper.tri(x = corr_output1,diag = F)] = NA
# write.xlsx(x = corr_output1, file = "output_results.xlsx", sheetName = "Correlation1", row.names = TRUE)
n_peer1 = nrow(corr_matrix1)
eigen_sys1 = eigen(corr_matrix1)
eigen_vec1 = eigen_sys1$vectors
eigen_vec1_inv = solve(eigen_vec1)
sim_mat1 = eigen_vec1 %*% diag(sqrt(eigen_sys1$values)) %*% eigen_vec1_inv
uncorr_sample1 = matrix(rnorm((n_peer1)*n_sims), nrow = n_peer1)
corr_sample1 = sim_mat1 %*% uncorr_sample1
price1_end = price1_beg *exp((rf- vol1^2/2)*T + corr_sample1*vol1*sqrt(T))
## check! these values should be near zero
(test = price1_beg - rowMeans(price1_end)*exp(-rf*T))
TSR_return1 = price1_end/price1_ref - 1
TSR1_rank_subject_reverse = apply(X = TSR_return1,FUN = rank,MARGIN = 2) # higher is better, 1 means worst!
TSR1_rank_subject = nrow(TSR_return1) - TSR1_rank_subject_reverse +1
TSR1_percentile = (TSR1_rank_subject_reverse-1)/(nrow(TSR1_rank_subject_reverse)-1)
# hist(TSR1_rank_subject[1,],breaks = 100)
# hist(TSR1_percentile[1,],breaks = 100)
mean(TSR1_percentile[1,])
interp1(x = Award1[,1],y = Award1[,2],xi = mean(TSR1_percentile[1,]),method = "linear")
award1_peer1_vec = interp1(x = Award1[,1],y = Award1[,2],xi = TSR1_percentile[1,],method = "linear")
(mean(award1_peer1_vec))
return(award1_peer1 = mean(award1_peer1_vec * price1_end[1,] * exp(-rf*T)))
}
#(award1_peer1 = mean(award1_peer1_vec * price1_end[1,] * exp(-rf*T)))
### for sensivity table refer to previous code. ### | /Code/TSR BERT function_1 award.R | no_license | uhasan1/TSR | R | false | false | 2,714 | r | TSR_1award_1Peer = function(T,rf,price1_beg,price1_ref,Peer1_vol,Award1,Peer1_Return_History,n_sims,seed){
set.seed(seed)
# library(readxl)
# library(pracma)
# # library(xlsx) #load the package
# read_excel_allsheets <- function(filename) {
# sheets <- excel_sheets(filename)
# x <- lapply(sheets, function(X) read_excel(filename, sheet = X))
# names(x) <- sheets
# list2env(x,envir=.GlobalEnv)
# }
# cleaned = function(df){
# df = as.data.frame(df)
# df = df[complete.cases(df),]
# df = as.matrix(df)
# df= apply(df,MARGIN = 2,FUN = as.numeric)
# return(df)
# }
# setwd("C:/Users/soleysi/Desktop/Channel 1/MMP_LTIP_2 February 2017/Magellan_LTIP_2 Feb 2017/E. EY Analysis/R corroboration")
# read_excel_allsheets(filename = "raw data input.xlsx")
# n_sims = 1000000
# T = Basic_Assumptions[1,2]
# rf = Basic_Assumptions[5,2]
#price1_beg = as.matrix(cleaned(price1_beg))
#price1_ref = as.matrix(cleaned(price1_ref))
price1_beg = as.numeric(price1_beg)
price1_ref = as.numeric(price1_ref)
#Award1 = cleaned(Award1)
#Peer1_Returns = cleaned(Peer1_Return_History)
Peer1_Returns = (Peer1_Return_History)
vol1 = as.numeric(Peer1_vol)
corr_matrix1 = cor(Peer1_Returns)
# corr_output1 = corr_matrix1
# corr_output1[upper.tri(x = corr_output1,diag = F)] = NA
# write.xlsx(x = corr_output1, file = "output_results.xlsx", sheetName = "Correlation1", row.names = TRUE)
n_peer1 = nrow(corr_matrix1)
eigen_sys1 = eigen(corr_matrix1)
eigen_vec1 = eigen_sys1$vectors
eigen_vec1_inv = solve(eigen_vec1)
sim_mat1 = eigen_vec1 %*% diag(sqrt(eigen_sys1$values)) %*% eigen_vec1_inv
uncorr_sample1 = matrix(rnorm((n_peer1)*n_sims), nrow = n_peer1)
corr_sample1 = sim_mat1 %*% uncorr_sample1
price1_end = price1_beg *exp((rf- vol1^2/2)*T + corr_sample1*vol1*sqrt(T))
## check! these values should be near zero
(test = price1_beg - rowMeans(price1_end)*exp(-rf*T))
TSR_return1 = price1_end/price1_ref - 1
TSR1_rank_subject_reverse = apply(X = TSR_return1,FUN = rank,MARGIN = 2) # higher is better, 1 means worst!
TSR1_rank_subject = nrow(TSR_return1) - TSR1_rank_subject_reverse +1
TSR1_percentile = (TSR1_rank_subject_reverse-1)/(nrow(TSR1_rank_subject_reverse)-1)
# hist(TSR1_rank_subject[1,],breaks = 100)
# hist(TSR1_percentile[1,],breaks = 100)
mean(TSR1_percentile[1,])
interp1(x = Award1[,1],y = Award1[,2],xi = mean(TSR1_percentile[1,]),method = "linear")
award1_peer1_vec = interp1(x = Award1[,1],y = Award1[,2],xi = TSR1_percentile[1,],method = "linear")
(mean(award1_peer1_vec))
return(award1_peer1 = mean(award1_peer1_vec * price1_end[1,] * exp(-rf*T)))
}
#(award1_peer1 = mean(award1_peer1_vec * price1_end[1,] * exp(-rf*T)))
### for sensivity table refer to previous code. ### |
# ================================================
# Step 1 -
# merge nmme data and create usable files
# S. Baker, July 2017
# ================================================
rm(list=ls())
## Load libraries
library(ncdf4)
library(dplyr)
## Directories
dir_in = '/home/sabaker/s2s/nmme/files/HUC_hcst/'
#dir_out = '/home/sabaker/s2s/nmme/files/R_output/'
dir_out = '/d2/hydrofcst/s2s/nmme_processing/R_output/'
## Input data
var = c('prate', 'tmp2m')
fcsts = c('01','02','03','04','05','06','07','08','09','10','11','12')
models = c('CFSv2', 'CMC1', 'CMC2', 'GFDL', 'GFDL_FLOR', 'NASA', 'NCAR_CCSM4')
### Read and save data (takes ~8-9 minutes)
beg_time = Sys.time()
# variable loop
for (i in 1:2) {
print(var[i])
df_all = NULL
# model loop
for (k in 1:length(models)) {
print(models[k])
df_model = NULL
setwd(paste0(dir_in, var[i]))
# month loop
for (j in 1:12) {
# year loop
for (m in 0:34) {
# read netcdf
file = paste0(var[i],'.',fcsts[j],'0100.ensmean.',models[k],'.fcst.198201-201612.1x1.ITDIM-',m,'.nc')
nc_temp = nc_open(file)
## read variables & combine
var_raw = ncvar_get(nc_temp, var[i]) # [hru (x), timestep (y)]
hru_vec = ncvar_get(nc_temp, 'hru') # ncvar_get works on dimensions as well as variables
yr = 1982 + m
df = cbind(var[i], models[k], yr, j, hru_vec, var_raw)
## merge with previous data
df_model = rbind(df_model, df)
## print errors with files - (makes script slower!)
#r = range(df[,6])
#if (var[i] == 'prate' & (max(as.numeric(r)) > 50 | min(as.numeric(r)) < 0)) { print(paste(r,file)) }
#if (var[i] == 'tmp2m' & (max(as.numeric(r)) > 400 | min(as.numeric(r)) < 200)) { print(paste(r,file)) }
}
## print max and min in model/var combo
r = range(df_model[,6])
if (var[i] == 'prate' & (max(as.numeric(r)) > 50 | min(as.numeric(r)) < 0)) { print(r) }
if (var[i] == 'tmp2m' & (max(as.numeric(r)) > 400 | min(as.numeric(r)) < 200)) { print(r) }
}
setwd(dir_out)
colnames(df_model) <- c('var', 'mdl', 'yr', 'mon', 'hru', 'lead 0', 'lead 1', 'lead 2', 'lead 3', 'lead 4', 'lead 5', 'lead 6', 'lead 7')
saveRDS(df_model, file = paste0(var[i],'_',models[k],'_ensmean.rds'))
df_all = rbind(df_all, df_model)
}
saveRDS(df_all, file = paste0(var[i],'_NMME_ensmean_198201-201612.rds'))
}
Sys.time() - beg_time # total time to run
# save entire data set
#saveRDS(df_all, file = 'NMME_ensmean_198201-201612.rds') | /scripts/nmme_scripts/hcst_scripts/1_merge_nmme_hcst.R | no_license | jemsethio/S2S-Climate-Forecasts-for-Watersheds | R | false | false | 2,602 | r | # ================================================
# Step 1 -
# merge nmme data and create usable files
# S. Baker, July 2017
# ================================================
rm(list=ls())
## Load libraries
library(ncdf4)
library(dplyr)
## Directories
dir_in = '/home/sabaker/s2s/nmme/files/HUC_hcst/'
#dir_out = '/home/sabaker/s2s/nmme/files/R_output/'
dir_out = '/d2/hydrofcst/s2s/nmme_processing/R_output/'
## Input data
var = c('prate', 'tmp2m')
fcsts = c('01','02','03','04','05','06','07','08','09','10','11','12')
models = c('CFSv2', 'CMC1', 'CMC2', 'GFDL', 'GFDL_FLOR', 'NASA', 'NCAR_CCSM4')
### Read and save data (takes ~8-9 minutes)
beg_time = Sys.time()
# variable loop
for (i in 1:2) {
print(var[i])
df_all = NULL
# model loop
for (k in 1:length(models)) {
print(models[k])
df_model = NULL
setwd(paste0(dir_in, var[i]))
# month loop
for (j in 1:12) {
# year loop
for (m in 0:34) {
# read netcdf
file = paste0(var[i],'.',fcsts[j],'0100.ensmean.',models[k],'.fcst.198201-201612.1x1.ITDIM-',m,'.nc')
nc_temp = nc_open(file)
## read variables & combine
var_raw = ncvar_get(nc_temp, var[i]) # [hru (x), timestep (y)]
hru_vec = ncvar_get(nc_temp, 'hru') # ncvar_get works on dimensions as well as variables
yr = 1982 + m
df = cbind(var[i], models[k], yr, j, hru_vec, var_raw)
## merge with previous data
df_model = rbind(df_model, df)
## print errors with files - (makes script slower!)
#r = range(df[,6])
#if (var[i] == 'prate' & (max(as.numeric(r)) > 50 | min(as.numeric(r)) < 0)) { print(paste(r,file)) }
#if (var[i] == 'tmp2m' & (max(as.numeric(r)) > 400 | min(as.numeric(r)) < 200)) { print(paste(r,file)) }
}
## print max and min in model/var combo
r = range(df_model[,6])
if (var[i] == 'prate' & (max(as.numeric(r)) > 50 | min(as.numeric(r)) < 0)) { print(r) }
if (var[i] == 'tmp2m' & (max(as.numeric(r)) > 400 | min(as.numeric(r)) < 200)) { print(r) }
}
setwd(dir_out)
colnames(df_model) <- c('var', 'mdl', 'yr', 'mon', 'hru', 'lead 0', 'lead 1', 'lead 2', 'lead 3', 'lead 4', 'lead 5', 'lead 6', 'lead 7')
saveRDS(df_model, file = paste0(var[i],'_',models[k],'_ensmean.rds'))
df_all = rbind(df_all, df_model)
}
saveRDS(df_all, file = paste0(var[i],'_NMME_ensmean_198201-201612.rds'))
}
Sys.time() - beg_time # total time to run
# save entire data set
#saveRDS(df_all, file = 'NMME_ensmean_198201-201612.rds') |
#Install needed package.
install.packages("stringi")
#Load needed libraries.
library(stringi)
library(ggplot2)
#Read in main data set. Subset with only the relevant columns.
dataSet <- read.table("DataSet.txt", header = TRUE, sep = ",")
dataSet <- subset(dataSet, select = c("fips","PST045213","EDU635212"))
colnames(dataSet) <- c("County Code", "2013 Population", "High School Educated People")
#Cleanse dataSet of individual states' populations, and the overall US population entry.
for (i in 1:length(dataSet$`County Code`)) {
if (stri_sub(dataSet$`County Code`[i], -3, -1) == "000" || dataSet$`County Code`[i] == 0) {
dataSet$`County Code`[i] = NA
}
}
dataSet <- na.omit(dataSet)
#Read in FIPS County Code data. Create and fill a third column for state.
countyCodes <- read.fwf("FIPS_CountyName.txt", skip = 1, widths = (c(5, 38)))
countyCodes[, "state"] <- c(stri_sub(countyCodes[,2], -2, -1))
#Map county code in dataSet to countyCodes, and replace dataSet's county codes with state acronyms.
dataSet$`County Code` <- with(countyCodes, countyCodes$state[match(dataSet$`County Code`, countyCodes$V1)])
#Plot as a scatterplot with facets. Set X and Y axis labels, remove legend, add in linear best fit line.
graph <- ggplot(data = dataSet, aes(dataSet$`High School Educated People`, dataSet$`2013 Population`)) + geom_point(aes(colour = factor(dataSet$`County Code`)))
graph + facet_wrap(~dataSet$`County Code`) + theme(legend.position = "none") + stat_smooth(method = "lm") + labs(x = "Percent of Population High School Educated", y = "2013 County Population", title = "Population vs. Education")
| /popVsEducation.R | no_license | Stanupa/Population-vs-Education | R | false | false | 1,648 | r | #Install needed package.
install.packages("stringi")
#Load needed libraries.
library(stringi)
library(ggplot2)
#Read in main data set. Subset with only the relevant columns.
dataSet <- read.table("DataSet.txt", header = TRUE, sep = ",")
dataSet <- subset(dataSet, select = c("fips","PST045213","EDU635212"))
colnames(dataSet) <- c("County Code", "2013 Population", "High School Educated People")
#Cleanse dataSet of individual states' populations, and the overall US population entry.
for (i in 1:length(dataSet$`County Code`)) {
if (stri_sub(dataSet$`County Code`[i], -3, -1) == "000" || dataSet$`County Code`[i] == 0) {
dataSet$`County Code`[i] = NA
}
}
dataSet <- na.omit(dataSet)
#Read in FIPS County Code data. Create and fill a third column for state.
countyCodes <- read.fwf("FIPS_CountyName.txt", skip = 1, widths = (c(5, 38)))
countyCodes[, "state"] <- c(stri_sub(countyCodes[,2], -2, -1))
#Map county code in dataSet to countyCodes, and replace dataSet's county codes with state acronyms.
dataSet$`County Code` <- with(countyCodes, countyCodes$state[match(dataSet$`County Code`, countyCodes$V1)])
#Plot as a scatterplot with facets. Set X and Y axis labels, remove legend, add in linear best fit line.
graph <- ggplot(data = dataSet, aes(dataSet$`High School Educated People`, dataSet$`2013 Population`)) + geom_point(aes(colour = factor(dataSet$`County Code`)))
graph + facet_wrap(~dataSet$`County Code`) + theme(legend.position = "none") + stat_smooth(method = "lm") + labs(x = "Percent of Population High School Educated", y = "2013 County Population", title = "Population vs. Education")
|
library(raster)
#Scenario 1 (two targets)
### raster results for rejection 2 targets
ras.rej2 <- rasterize(mydat_2targets_rej$beta,
mydat_2targets_rej$gamma)
# plot(ras.rej2,
# col=grey(100:1/100),
# useRaster=F,
# main="Rejection ABC")
ras.rej2.mat <- as.matrix(ras.rej2)
sum(ras.rej2.mat)
### raster results for Sequential 2 targets
ras.seq2 <- rasterize(mydat_2targets_seq$beta,
mydat_2targets_seq$gamma)
# plot(ras.seq2,
# col=grey(100:1/100),
# useRaster=F,
# main="Sequential ABC")
ras.seq2.mat <- as.matrix(ras.seq2)
sum(ras.seq2.mat)
### raster results for abcsmc 2 targets
ras.abcsmc2 <- rasterize(mydat_2targets_abcsmc$beta,
mydat_2targets_abcsmc$gamma)
# plot(ras.abcsmc2,
# col=grey(100:1/100),
# useRaster=F,
# main="abcsmcuential ABC")
ras.abcsmc2.mat <- as.matrix(ras.abcsmc2)
sum(ras.abcsmc2.mat)
### raster results for bmle 2 targets
ras.BC_SIR2 <- rasterize(mydat_2targets_BC_SIR$beta,
mydat_2targets_BC_SIR$gamma)
# plot(ras.bmle2,
# col=grey(100:1/100),
# useRaster=F,
# main="BMLE")
ras.BC_SIR2.mat <- as.matrix(ras.BC_SIR2)
sum(ras.BC_SIR2.mat)
### raster results for imis 2 targets
ras.imis2 <- rasterize(mydat_2targets_imis$beta,
mydat_2targets_imis$gamma)
# plot(ras.imis2,
# col=grey(100:1/100),
# useRaster=F,
# main="imis ABC")
ras.imis2.mat <- as.matrix(ras.imis2)
sum(ras.imis2.mat)
##############################################################
# scenario 2 , 3 targets
#par(mfrow=c(2,2))
### raster results for rejection 3 targets
ras.rej3 <- rasterize(mydat_3targets_rej$beta,
mydat_3targets_rej$gamma)
# plot(ras.rej3,
# col=grey(100:1/100),
# useRaster=F,
# main="Rejection ABC")
ras.rej3.mat <- as.matrix(ras.rej3)
sum(ras.rej3.mat)
### raster results for Sequential 3 targets
ras.seq3 <- rasterize(mydat_3targets_seq$beta,
mydat_3targets_seq$gamma)
# plot(ras.seq3,
# col=grey(100:1/100),
# useRaster=F,
# main="Sequential ABC")
ras.seq3.mat <- as.matrix(ras.seq3)
sum(ras.seq3.mat)
### raster results for abcsmc 3 targets
ras.abcsmc3 <- rasterize(mydat_3targets_abcsmc$beta,
mydat_3targets_abcsmc$gamma)
# plot(ras.abcsmc3,
# col=grey(100:1/100),
# useRaster=F,
# main="abcsmc")
ras.abcsmc3.mat <- as.matrix(ras.abcsmc3)
sum(ras.abcsmc3.mat)
### raster results for bmle 3 targets
ras.BC_SIR3 <- rasterize(mydat_3targets_BC_SIR$beta,
mydat_3targets_BC_SIR$gamma)
# plot(ras.BC_SIR3,
# col=grey(100:1/100),
# useRaster=F,
# main="BMLE")
ras.BC_SIR3.mat <- as.matrix(ras.BC_SIR3)
sum(ras.BC_SIR3.mat)
### raster results for imis 3 targets
ras.imis3 <- rasterize(mydat_3targets_imis$beta,
mydat_3targets_imis$gamma)
# plot(ras.imis3,
# col=grey(100:1/100),
# useRaster=F,
# main="imis")
ras.imis3.mat <- as.matrix(ras.imis3)
sum(ras.imis3.mat)
##############################################################################
######################################################################
# ref sample
#par(mfrow=c(1,2))
ras.ref2 <- rasterize(mydat_2targets_ref$beta,
mydat_2targets_ref$gamma)
# plot(ras.ref2,
# col=grey(100:1/100),
# useRaster=F,
# main="Reference posterior")
ras.ref2.mat <- as.matrix(ras.ref2)
sum(ras.ref2.mat)
ras.ref3 <- rasterize(mydat_3targets_ref$beta,
mydat_3targets_ref$gamma)
# plot(ras.ref3,
# col=grey(100:1/100),
# useRaster=F,
# main="Reference posterior")
ras.ref3.mat <- as.matrix(ras.ref3)
sum(ras.ref3.mat)
| /Posterior_Data_n_Analysis/apply raster on results.R | no_license | zenabu-suboi/Calibration_manuscript | R | false | false | 3,771 | r |
library(raster)
#Scenario 1 (two targets)
### raster results for rejection 2 targets
ras.rej2 <- rasterize(mydat_2targets_rej$beta,
mydat_2targets_rej$gamma)
# plot(ras.rej2,
# col=grey(100:1/100),
# useRaster=F,
# main="Rejection ABC")
ras.rej2.mat <- as.matrix(ras.rej2)
sum(ras.rej2.mat)
### raster results for Sequential 2 targets
ras.seq2 <- rasterize(mydat_2targets_seq$beta,
mydat_2targets_seq$gamma)
# plot(ras.seq2,
# col=grey(100:1/100),
# useRaster=F,
# main="Sequential ABC")
ras.seq2.mat <- as.matrix(ras.seq2)
sum(ras.seq2.mat)
### raster results for abcsmc 2 targets
ras.abcsmc2 <- rasterize(mydat_2targets_abcsmc$beta,
mydat_2targets_abcsmc$gamma)
# plot(ras.abcsmc2,
# col=grey(100:1/100),
# useRaster=F,
# main="abcsmcuential ABC")
ras.abcsmc2.mat <- as.matrix(ras.abcsmc2)
sum(ras.abcsmc2.mat)
### raster results for bmle 2 targets
ras.BC_SIR2 <- rasterize(mydat_2targets_BC_SIR$beta,
mydat_2targets_BC_SIR$gamma)
# plot(ras.bmle2,
# col=grey(100:1/100),
# useRaster=F,
# main="BMLE")
ras.BC_SIR2.mat <- as.matrix(ras.BC_SIR2)
sum(ras.BC_SIR2.mat)
### raster results for imis 2 targets
ras.imis2 <- rasterize(mydat_2targets_imis$beta,
mydat_2targets_imis$gamma)
# plot(ras.imis2,
# col=grey(100:1/100),
# useRaster=F,
# main="imis ABC")
ras.imis2.mat <- as.matrix(ras.imis2)
sum(ras.imis2.mat)
##############################################################
# scenario 2 , 3 targets
#par(mfrow=c(2,2))
### raster results for rejection 3 targets
ras.rej3 <- rasterize(mydat_3targets_rej$beta,
mydat_3targets_rej$gamma)
# plot(ras.rej3,
# col=grey(100:1/100),
# useRaster=F,
# main="Rejection ABC")
ras.rej3.mat <- as.matrix(ras.rej3)
sum(ras.rej3.mat)
### raster results for Sequential 3 targets
ras.seq3 <- rasterize(mydat_3targets_seq$beta,
mydat_3targets_seq$gamma)
# plot(ras.seq3,
# col=grey(100:1/100),
# useRaster=F,
# main="Sequential ABC")
ras.seq3.mat <- as.matrix(ras.seq3)
sum(ras.seq3.mat)
### raster results for abcsmc 3 targets
ras.abcsmc3 <- rasterize(mydat_3targets_abcsmc$beta,
mydat_3targets_abcsmc$gamma)
# plot(ras.abcsmc3,
# col=grey(100:1/100),
# useRaster=F,
# main="abcsmc")
ras.abcsmc3.mat <- as.matrix(ras.abcsmc3)
sum(ras.abcsmc3.mat)
### raster results for bmle 3 targets
ras.BC_SIR3 <- rasterize(mydat_3targets_BC_SIR$beta,
mydat_3targets_BC_SIR$gamma)
# plot(ras.BC_SIR3,
# col=grey(100:1/100),
# useRaster=F,
# main="BMLE")
ras.BC_SIR3.mat <- as.matrix(ras.BC_SIR3)
sum(ras.BC_SIR3.mat)
### raster results for imis 3 targets
ras.imis3 <- rasterize(mydat_3targets_imis$beta,
mydat_3targets_imis$gamma)
# plot(ras.imis3,
# col=grey(100:1/100),
# useRaster=F,
# main="imis")
ras.imis3.mat <- as.matrix(ras.imis3)
sum(ras.imis3.mat)
##############################################################################
######################################################################
# ref sample
#par(mfrow=c(1,2))
ras.ref2 <- rasterize(mydat_2targets_ref$beta,
mydat_2targets_ref$gamma)
# plot(ras.ref2,
# col=grey(100:1/100),
# useRaster=F,
# main="Reference posterior")
ras.ref2.mat <- as.matrix(ras.ref2)
sum(ras.ref2.mat)
ras.ref3 <- rasterize(mydat_3targets_ref$beta,
mydat_3targets_ref$gamma)
# plot(ras.ref3,
# col=grey(100:1/100),
# useRaster=F,
# main="Reference posterior")
ras.ref3.mat <- as.matrix(ras.ref3)
sum(ras.ref3.mat)
|
clist <- read.csv("country_iso.csv")
wpr_iso2 <- clist$iso2
wpr_iso3 <- clist$iso3
#c_name <- "MN" # specify country
st <- 1900
en <- 2017
mycols <- c("#F8766D", "#7CAE00", "#00BFC4", "#C77CFF")
# # Cause of death, by communicable diseases and maternal, prenatal and nutrition conditions (% of total)
# com <- WDI(country = wpr_iso2, indicator = "SH.DTH.COMM.ZS", start = st, end = en, extra = TRUE, cache = NULL)
# com <- com %>% select(-c(capital,longitude, latitude,lending , region))
# names(com)[3] <- "com"
# head(com)
#
#
# ncd <- WDI(country = wpr_iso2, indicator = "SH.DTH.NCOM.ZS", start = st, end = en, extra = TRUE, cache = NULL)
# ncd <- ncd %>% select(-c(capital,longitude, latitude,lending , region))
# names(ncd)[3] <- "ncd"
# head(ncd)
#
mort <- read.csv("./GBD/total_disease_burden_by_cause.csv") # data from GDB through Our World in Data
names(mort) <- c("country", "iso3", "year", "ncd", "com", "inj")
mort <- merge(mort, g_iso3, by="iso3")
mort_long <- melt(mort, id.vars = c("iso3", "country", "year", "who_region"))
mort_region_long <- aggregate(value ~ who_region + year+ variable, data = mort_long, sum, na.rm = TRUE)
mort_global_long <- aggregate(value ~ year + variable, data=mort_region_long, sum, na.rm=T)
# Global mortality trend
mort_global <- dcast(mort_global, year ~ variable)
mort_global.p <- data.frame(cbind(mort_global$year,prop.table(as.matrix(mort_global[,c(2:4)]), 1)*100))
names(mort_global.p)[1] <- "year"
mort_global.p_long <- melt(mort_global.p, id="year")
mort_wpr_long <- mort_region_long %>% filter(who_region=="WPR")
mort_afr_long <- mort_region_long %>% filter(who_region=="AFR")
mort_amr_long <- mort_region_long %>% filter(who_region=="AMR")
mort_emr_long <- mort_region_long %>% filter(who_region=="EMR")
mort_eur_long <- mort_region_long %>% filter(who_region=="EUR")
mort_sea_long <- mort_region_long %>% filter(who_region=="SEA")
mort_wpr <- dcast(mort_wpr_long, year + who_region ~ variable)
mort_afr <- dcast(mort_afr_long, year + who_region ~ variable)
mort_amr <- dcast(mort_amr_long, year + who_region ~ variable)
mort_emr <- dcast(mort_emr_long, year + who_region ~ variable)
mort_eur <- dcast(mort_eur_long, year + who_region ~ variable)
mort_sea <- dcast(mort_sea_long, year + who_region ~ variable)
mort_wpr.p <- data.frame(cbind(mort_wpr[,c(1,2)], prop.table(as.matrix(mort_wpr[,c(3:5)]), 1)*100))
mort_afr.p <- data.frame(cbind(mort_afr[,c(1,2)], prop.table(as.matrix(mort_afr[,c(3:5)]), 1)*100))
mort_amr.p <- data.frame(cbind(mort_amr[,c(1,2)], prop.table(as.matrix(mort_amr[,c(3:5)]), 1)*100))
mort_emr.p <- data.frame(cbind(mort_emr[,c(1,2)], prop.table(as.matrix(mort_emr[,c(3:5)]), 1)*100))
mort_eur.p <- data.frame(cbind(mort_eur[,c(1,2)], prop.table(as.matrix(mort_eur[,c(3:5)]), 1)*100))
mort_sea.p <- data.frame(cbind(mort_sea[,c(1,2)], prop.table(as.matrix(mort_sea[,c(3:5)]), 1)*100))
mort_region.p <- rbind(mort_wpr.p,
mort_afr.p,
mort_amr.p,
mort_emr.p,
mort_eur.p,
mort_sea.p)
mort_region.p_long <- melt(mort_region.p,id.vars = c("year", "who_region"))
# plot global diease burden
mort_global.p_long$variable <- factor(mort_global.p_long$variable, levels=c("ncd","inj","com"),labels=c("Non-communicable disease (NCDs)", "Injuries", "Communicable, maternal, neonatal, and nutritional diseases"))
p <- ggplot(mort_global.p_long, aes(x=year, y=value, colour=variable))
p <- p + geom_line()
# p <- p + geom_smooth(aes(x=year, y=value, fill=variable), method = "loess", alpha=0.16, size=0.4)
p <- p + labs(title= "% of global disease burden by cause, 1990-2016", y = "Percentage (%)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=8)) + guides(colour=guide_legend(nrow=2,byrow=TRUE))
p
pdf(file="disease_burden_global_prop.pdf", width=8,height=6) # write PDF
p
dev.off()
mort_global_long$value2 <- mort_global_long$value/1000000
mort_global_long$variable <- factor(mort_global_long$variable, levels=c("ncd","inj","com"),labels=c("Non-communicable disease (NCDs)", "Injuries", "Communicable, maternal, neonatal, and nutritional diseases"))
p <- ggplot(mort_global_long, aes(x=year, y=value2, colour=variable))
p <- p + geom_line()
# p <- p + geom_smooth(aes(x=year, y=value, fill=variable), method = "loess", alpha=0.16, size=0.4)
p <- p + labs(title= "Global disease burden by cause, 1990-2016", y = "Number of DALYs per year (million)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=8)) + guides(colour=guide_legend(nrow=2,byrow=TRUE))
p
pdf(file="disease_burden_global.pdf", width=8,height=6) # write PDF
p
dev.off()
#"Total disease burden measured as the number of DALYs (Disability-Adjusted Life Years) per year. DALYs are used tomeasure total burden of disease - both from years of life lost and years lived with a disability. One DALY equals one lostyear of healthy life"
# by region
mort_region.p_long$variable <- factor(mort_region.p_long$variable, levels=c("ncd","inj","com"),labels=c("Non-communicable disease (NCDs)", "Injuries", "Communicable, maternal, neonatal, and nutritional diseases"))
p <- ggplot(mort_region.p_long, aes(x=year, y=value, colour=variable))
p <- p + geom_line() + facet_wrap(~who_region)
p <- p + labs(title= "% of disease burden by cause by WHO region, 1990-2016", y = "Percentage (%)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=10)) + guides(colour=guide_legend(nrow=2,byrow=TRUE))
p
pdf(file="disease_burden_region_prop.pdf", width=9,height=8) # write PDF
p
dev.off()
mort_region_long$value2 <- mort_region_long$value/100000
mort_region_long$variable <- factor(mort_region_long$variable, levels=c("ncd","inj","com"),labels=c("Non-communicable disease (NCDs)", "Injuries", "Communicable, maternal, neonatal, and nutritional diseases"))
p <- ggplot(mort_region_long, aes(x=year, y=value2, colour=variable))
p <- p + geom_line() + facet_wrap(~who_region,scales="free")
p <- p + labs(title= "Disease burden by cause by WHO region, 1990-2016", y = "Number of DALYs per year (x 100,000)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=10)) + guides(colour=guide_legend(nrow=2,byrow=TRUE))
p
pdf(file="disease_burden_region.pdf", width=9,height=8) # write PDF
p
dev.off()
# by country
mort_long_wpr <- mort_long %>% filter(who_region=="WPR")
mort_long_wpr$variable <- factor(mort_long_wpr$variable, levels=c("ncd","inj","com"),labels=c("Non-communicable disease (NCDs)", "Injuries", "Communicable, maternal, neonatal, and nutritional diseases"))
mort_long_wpr$value2 <- mort_long_wpr$value/100000
mort_long_wpr$country <- as.character(mort_long_wpr$country)
mort_long_wpr$country[mort_long_wpr$iso3=="BRN"] <- "Brunei Darussalam"
mort_long_wpr$country[mort_long_wpr$iso3=="LAO"] <- "Lao PDR"
mort_long_wpr$country[mort_long_wpr$iso3=="FSM"] <- "Micronesia (Fed. States of)"
mort_long_wpr$country[mort_long_wpr$iso3=="VNM"] <- "Viet Nam"
mort_long_wpr$country[mort_long_wpr$iso3=="KOR"] <- "Rep. of Korea"
p <- ggplot(mort_long_wpr, aes(x=year, y=value2, colour=variable))
p <- p + geom_line() + facet_wrap(~ country, scales="free")
p <- p + labs(title= "Disease burden by cause by country in WPR, 1990-2016", y = "Number of DALYs per year (x 100,000)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=9),
axis.text = element_text(size=6))
p <- p + guides(colour=guide_legend(nrow=2,byrow=TRUE))
pdf(file="disease_burden_country_WPR.pdf", width=10,height=12) # write PDF
p
dev.off()
######### 2016 comparison across country
mort_long_wpr_16 <- mort_long_wpr %>% filter(year==2016)
mort_long_wpr_16 <- mort_long_wpr_16[,-7]
mort_wpr_16 <- dcast(mort_long_wpr_16, country + iso3 ~ variable)
mort_wpr_16.p <- data.frame(cbind(mort_wpr_16, rowSums(mort_wpr_16[,c(3,5)]), prop.table(as.matrix(mort_wpr_16[,c(3:5)]), 1)*100))
names(mort_wpr_16.p) <- c("country", "iso3", "ncd", "inj", "com", "com_ncd", "ncd.p", "inj.p", "com.p")
income_wpr <- gdp_wpr_17[,c(2,1, 5,6)]
#write.csv(income_wpr, file = "income_wpr.csv",row.names = F)
mort_wpr_16.p <- merge(mort_wpr_16.p, income_wpr)
mort_wpr_16.p$com_ncd2 <- mort_wpr_16.p$com_ncd/1000000
p <- ggplot(mort_wpr_16.p, aes(x = ncd.p, y = com.p, label = country, colour=income, size=com_ncd2))
p <- p + geom_point(alpha=0.6)
p <- p + geom_text_repel(show.legend = FALSE, size=3)
p <- p + geom_smooth(aes(x=ncd.p, y=com.p, fill=income), method = "lm", alpha=0.13, size=0.4, linetype=0)
p <- p + labs(title= "Disease burdedn, NCD vs Communicable and other diseases, 2016", y = "Communicable and other diseases (% of total DALYs)", x="NCDs (% of total DALYs)", size="DALYs per year (million)", col="Income classification")
p <- p + guides(fill=FALSE)
p <- p + theme(legend.title= element_text(size=9),
plot.title = element_text(size = 11.5),
axis.title=element_text(size=9))
p
pdf(file="disease_burden_scatterplot_WPR.pdf", width=8,height=5) # write PDF
p
dev.off()
# p <- ggplot(mort_wpr_16.p, aes(x = ncd, y = com, label = country, colour=income, size=com_ncd2))
# p <- p + geom_point(alpha=0.6)
# p <- p + geom_text_repel(show.legend = FALSE, size=3)
# p <- p + scale_x_log10(breaks=pretty_breaks())
# p <- p + scale_y_log10(breaks=pretty_breaks())
#################################################
# NCDs
#################################################
ncds <- read.csv("./GBD/disease-burden-from-ncds.csv") # data from GDB through Our World in Data
names(ncds) <- c("country", "iso3", "year", "car", "can", "res", "dm", "oth", "liv", "men", "neu", "mus", "diges")
ncds <- merge(ncds, g_iso3, by="iso3")
ncds_long <- melt(ncds, id.vars = c("iso3", "country", "year", "who_region"))
ncds_region_long <- aggregate(value ~ who_region + year+ variable, data = ncds_long, sum, na.rm = TRUE)
ncds_global_long <- aggregate(value ~ year + variable, data=ncds_region_long, sum, na.rm=T)
# Global burden trend
ncds_global <- dcast(ncds_global_long, year ~ variable)
ncds_global.p <- data.frame(cbind(ncds_global$year,prop.table(as.matrix(ncds_global[,c(2:11)]), 1)*100))
names(ncds_global.p)[1] <- "year"
ncds_global.p_long <- melt(ncds_global.p, id="year")
ncds_wpr_long <- ncds_region_long %>% filter(who_region=="WPR")
ncds_afr_long <- ncds_region_long %>% filter(who_region=="AFR")
ncds_amr_long <- ncds_region_long %>% filter(who_region=="AMR")
ncds_emr_long <- ncds_region_long %>% filter(who_region=="EMR")
ncds_eur_long <- ncds_region_long %>% filter(who_region=="EUR")
ncds_sea_long <- ncds_region_long %>% filter(who_region=="SEA")
ncds_wpr <- dcast(ncds_wpr_long, year + who_region ~ variable)
ncds_afr <- dcast(ncds_afr_long, year + who_region ~ variable)
ncds_amr <- dcast(ncds_amr_long, year + who_region ~ variable)
ncds_emr <- dcast(ncds_emr_long, year + who_region ~ variable)
ncds_eur <- dcast(ncds_eur_long, year + who_region ~ variable)
ncds_sea <- dcast(ncds_sea_long, year + who_region ~ variable)
ncds_wpr.p <- data.frame(cbind(ncds_wpr[,c(1,2)], prop.table(as.matrix(ncds_wpr[,c(3:12)]), 1)*100))
ncds_afr.p <- data.frame(cbind(ncds_afr[,c(1,2)], prop.table(as.matrix(ncds_afr[,c(3:12)]), 1)*100))
ncds_amr.p <- data.frame(cbind(ncds_amr[,c(1,2)], prop.table(as.matrix(ncds_amr[,c(3:12)]), 1)*100))
ncds_emr.p <- data.frame(cbind(ncds_emr[,c(1,2)], prop.table(as.matrix(ncds_emr[,c(3:12)]), 1)*100))
ncds_eur.p <- data.frame(cbind(ncds_eur[,c(1,2)], prop.table(as.matrix(ncds_eur[,c(3:12)]), 1)*100))
ncds_sea.p <- data.frame(cbind(ncds_sea[,c(1,2)], prop.table(as.matrix(ncds_sea[,c(3:12)]), 1)*100))
ncds_region.p <- rbind(ncds_wpr.p,
ncds_afr.p,
ncds_amr.p,
ncds_emr.p,
ncds_eur.p,
ncds_sea.p)
ncds_region.p_long <- melt(ncds_region.p,id.vars = c("year", "who_region"))
# plot global NCDs burden
ncds_global.p_long$variable <- factor(ncds_global.p_long$variable, labels=c("Cardiovascular diseases", "Cancers", "Respiratory diseases", "Diabetes & endocrine diseases", "Other NCDs", "Liver disease", "Mental & substance use disorders", "Neurological disorders", "Musculoskeletal disorders", "Digestive diseases"))
p <- ggplot(ncds_global.p_long, aes(x=year, y=value, colour=variable))
p <- p + geom_line()
# p <- p + geom_smooth(aes(x=year, y=value, fill=variable), method = "loess", alpha=0.16, size=0.4)
p <- p + labs(title= "% of global burden from NCDs by cause, 1990-2016", y = "Percentage (%)", x="", col="")
p <- p + theme(legend.position="right",
legend.text = element_text(size=10)) #+ guides(colour=guide_legend(nrow=4,byrow=TRUE))
p
pdf(file="disease_NCDs_global_prop.pdf", width=11,height=6) # write PDF
p
dev.off()
ncds_global_long$value2 <- ncds_global_long$value/1000000
ncds_global_long$variable <- factor(ncds_global_long$variable, labels=c("Cardiovascular diseases", "Cancers", "Respiratory diseases", "Diabetes & endocrine diseases", "Other NCDs", "Liver disease", "Mental & substance use disorders", "Neurological disorders", "Musculoskeletal disorders", "Digestive diseases"))
p <- ggplot(ncds_global_long, aes(x=year, y=value2, colour=variable))
p <- p + geom_line()
# p <- p + geom_smooth(aes(x=year, y=value, fill=variable), method = "loess", alpha=0.16, size=0.4)
p <- p + labs(title= "Global burden from NCDs by cause, 1990-2016", y = "Number of DALYs per year (million)", x="", col="")
p <- p + theme(legend.position="right",
legend.text = element_text(size=10))# + guides(colour=guide_legend(nrow=2,byrow=TRUE))
p
pdf(file="disease_NCDs_burden_global.pdf", width=11,height=6) # write PDF
p
dev.off()
# by region
ncds_region.p_long$variable <- factor(ncds_region.p_long$variable, labels=c("Cardiovascular diseases", "Cancers", "Respiratory diseases", "Diabetes & endocrine diseases", "Other NCDs", "Liver disease", "Mental & substance use disorders", "Neurological disorders", "Musculoskeletal disorders", "Digestive diseases"))
p <- ggplot(ncds_region.p_long, aes(x=year, y=value, colour=variable))
p <- p + geom_line() + facet_wrap(~who_region)
p <- p + labs(title= "% of burden from NCDs by cause by WHO region, 1990-2016", y = "Percentage (%)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=10)) + guides(colour=guide_legend(nrow=4,byrow=TRUE))
p
pdf(file="NCDs_burden_region_prop.pdf", width=9,height=8) # write PDF
p
dev.off()
ncds_region_long$value2 <- ncds_region_long$value/100000
ncds_region_long$variable <- factor(ncds_region_long$variable, labels=c("Cardiovascular diseases", "Cancers", "Respiratory diseases", "Diabetes & endocrine diseases", "Other NCDs", "Liver disease", "Mental & substance use disorders", "Neurological disorders", "Musculoskeletal disorders", "Digestive diseases"))
p <- ggplot(ncds_region_long, aes(x=year, y=value2, colour=variable))
p <- p + geom_line() + facet_wrap(~who_region,scales="free")
p <- p + labs(title= "NCDs burden by cause by WHO region, 1990-2016", y = "Number of DALYs per year (x 100,000)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=10)) + guides(colour=guide_legend(nrow=4,byrow=TRUE))
p
pdf(file="NCDs_burden_region.pdf", width=9,height=8) # write PDF
p
dev.off()
# by country
ncds_long_wpr <- ncds_long %>% filter(who_region=="WPR")
ncds_long_wpr$variable <- factor(ncds_long_wpr$variable, labels=c("Cardiovascular diseases", "Cancers", "Respiratory diseases", "Diabetes & endocrine diseases", "Other NCDs", "Liver disease", "Mental & substance use disorders", "Neurological disorders", "Musculoskeletal disorders", "Digestive diseases"))
ncds_long_wpr$value2 <- ncds_long_wpr$value/100000
ncds_long_wpr$country <- as.character(ncds_long_wpr$country)
ncds_long_wpr$country[ncds_long_wpr$iso3=="BRN"] <- "Brunei Darussalam"
ncds_long_wpr$country[ncds_long_wpr$iso3=="LAO"] <- "Lao PDR"
ncds_long_wpr$country[ncds_long_wpr$iso3=="FSM"] <- "Micronesia (Fed. States of)"
ncds_long_wpr$country[ncds_long_wpr$iso3=="VNM"] <- "Viet Nam"
ncds_long_wpr$country[ncds_long_wpr$iso3=="KOR"] <- "Rep. of Korea"
p <- ggplot(ncds_long_wpr, aes(x=year, y=value2, colour=variable))
p <- p + geom_line() + facet_wrap(~ country, scales="free")
p <- p + labs(title= "NCDs burden by cause by country in WPR, 1990-2016", y = "Number of DALYs per year (x 100,000)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=9),
axis.text = element_text(size=6))
p <- p + guides(colour=guide_legend(nrow=3,byrow=TRUE))
pdf(file="NCDs_burden_country_WPR.pdf", width=10,height=13) # write PDF
p
dev.off()
#################################################
# Communicable and other diseases
#################################################
coms <- read.csv("./GBD/disease-burden-from-communicable-diseases.csv") # data from GDB through Our World in Data
names(coms) <- c("country", "iso3", "year", "mat", "neo", "nut", "mal", "dia", "hiv", "tb", "oth")
coms <- merge(coms, g_iso3, by="iso3")
coms_long <- melt(coms, id.vars = c("iso3", "country", "year", "who_region"))
coms_region_long <- aggregate(value ~ who_region + year+ variable, data = coms_long, sum, na.rm = TRUE)
coms_global_long <- aggregate(value ~ year + variable, data=coms_region_long, sum, na.rm=T)
# Global burden trend
coms_global <- dcast(coms_global_long, year ~ variable)
coms_global.p <- data.frame(cbind(coms_global$year,prop.table(as.matrix(coms_global[,c(2:9)]), 1)*100))
names(coms_global.p)[1] <- "year"
coms_global.p_long <- melt(coms_global.p, id="year")
coms_wpr_long <- coms_region_long %>% filter(who_region=="WPR")
coms_afr_long <- coms_region_long %>% filter(who_region=="AFR")
coms_amr_long <- coms_region_long %>% filter(who_region=="AMR")
coms_emr_long <- coms_region_long %>% filter(who_region=="EMR")
coms_eur_long <- coms_region_long %>% filter(who_region=="EUR")
coms_sea_long <- coms_region_long %>% filter(who_region=="SEA")
coms_wpr <- dcast(coms_wpr_long, year + who_region ~ variable)
coms_afr <- dcast(coms_afr_long, year + who_region ~ variable)
coms_amr <- dcast(coms_amr_long, year + who_region ~ variable)
coms_emr <- dcast(coms_emr_long, year + who_region ~ variable)
coms_eur <- dcast(coms_eur_long, year + who_region ~ variable)
coms_sea <- dcast(coms_sea_long, year + who_region ~ variable)
coms_wpr.p <- data.frame(cbind(coms_wpr[,c(1,2)], prop.table(as.matrix(coms_wpr[,c(3:10)]), 1)*100))
coms_afr.p <- data.frame(cbind(coms_afr[,c(1,2)], prop.table(as.matrix(coms_afr[,c(3:10)]), 1)*100))
coms_amr.p <- data.frame(cbind(coms_amr[,c(1,2)], prop.table(as.matrix(coms_amr[,c(3:10)]), 1)*100))
coms_emr.p <- data.frame(cbind(coms_emr[,c(1,2)], prop.table(as.matrix(coms_emr[,c(3:10)]), 1)*100))
coms_eur.p <- data.frame(cbind(coms_eur[,c(1,2)], prop.table(as.matrix(coms_eur[,c(3:10)]), 1)*100))
coms_sea.p <- data.frame(cbind(coms_sea[,c(1,2)], prop.table(as.matrix(coms_sea[,c(3:10)]), 1)*100))
coms_region.p <- rbind(coms_wpr.p,
coms_afr.p,
coms_amr.p,
coms_emr.p,
coms_eur.p,
coms_sea.p)
coms_region.p_long <- melt(coms_region.p,id.vars = c("year", "who_region"))
# plot global Coms burden
coms_global.p_long$variable <- factor(coms_global.p_long$variable, labels=c("Maternal disorders", "Neonatal disorders", "Nutritional deficiencies", "Malaria & NTDs", "Diarrhea, lower respiraotry & infectious diseases", "HIV/AIDS", "Tuberculosis", "Other communicable disease"))
p <- ggplot(coms_global.p_long, aes(x=year, y=value, colour=variable))
p <- p + geom_line()
# p <- p + geom_smooth(aes(x=year, y=value, fill=variable), method = "loess", alpha=0.16, size=0.4)
p <- p + labs(title= "% of global burden from communicable, maternal, neonatal, and nutritional diseases, 1990-2016", y = "Percentage (%)", x="", col="")
p <- p + theme(legend.position="right",
legend.text = element_text(size=10)) #+ guides(colour=guide_legend(nrow=4,byrow=TRUE))
p
pdf(file="disease_coms_global_prop.pdf", width=11,height=6) # write PDF
p
dev.off()
coms_global_long$value2 <- coms_global_long$value/1000000
coms_global_long$variable <- factor(coms_global_long$variable, labels=c("Maternal disorders", "Neonatal disorders", "Nutritional deficiencies", "Malaria & NTDs", "Diarrhea, lower respiraotry & infectious diseases", "HIV/AIDS", "Tuberculosis", "Other communicable disease"))
p <- ggplot(coms_global_long, aes(x=year, y=value2, colour=variable))
p <- p + geom_line()
# p <- p + geom_smooth(aes(x=year, y=value, fill=variable), method = "loess", alpha=0.16, size=0.4)
p <- p + labs(title= "Global burden from communicable, maternal, neonatal, and nutritional diseases, 1990-2016", y = "Number of DALYs per year (million)", x="", col="")
p <- p + theme(legend.position="right",
legend.text = element_text(size=10))# + guides(colour=guide_legend(nrow=2,byrow=TRUE))
p
pdf(file="disease_coms_burden_global.pdf", width=11,height=6) # write PDF
p
dev.off()
# by region
coms_region.p_long$variable <- factor(coms_region.p_long$variable, labels=c("Maternal disorders", "Neonatal disorders", "Nutritional deficiencies", "Malaria & NTDs", "Diarrhea, lower respiraotry & infectious diseases", "HIV/AIDS", "Tuberculosis", "Other communicable disease"))
p <- ggplot(coms_region.p_long, aes(x=year, y=value, colour=variable))
p <- p + geom_line() + facet_wrap(~who_region)
p <- p + labs(title= "% of burden from communicable, maternal, neonatal, and nutritional diseases, by WHO region, 1990-2016", y = "Percentage (%)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=10)) + guides(colour=guide_legend(nrow=4,byrow=TRUE))
p
pdf(file="coms_burden_region_prop.pdf", width=9,height=8) # write PDF
p
dev.off()
coms_region_long$value2 <- coms_region_long$value/100000
coms_region_long$variable <- factor(coms_region_long$variable, labels=c("Maternal disorders", "Neonatal disorders", "Nutritional deficiencies", "Malaria & NTDs", "Diarrhea, lower respiraotry & infectious diseases", "HIV/AIDS", "Tuberculosis", "Other communicable disease"))
p <- ggplot(coms_region_long, aes(x=year, y=value2, colour=variable))
p <- p + geom_line() + facet_wrap(~who_region,scales="free")
p <- p + labs(title= "Burden from communicable, maternal, neonatal, and nutritional diseases, by WHO region, 1990-2016", y = "Number of DALYs per year (x 100,000)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=10)) + guides(colour=guide_legend(nrow=4,byrow=TRUE))
p
pdf(file="coms_burden_region.pdf", width=9,height=8) # write PDF
p
dev.off()
# by country
coms_long_wpr <- coms_long %>% filter(who_region=="WPR")
coms_long_wpr$variable <- factor(coms_long_wpr$variable, labels=c("Maternal disorders", "Neonatal disorders", "Nutritional deficiencies", "Malaria & NTDs", "Diarrhea, lower respiraotry & infectious diseases", "HIV/AIDS", "Tuberculosis", "Other communicable disease"))
coms_long_wpr$value2 <- coms_long_wpr$value/100000
coms_long_wpr$country <- as.character(coms_long_wpr$country)
coms_long_wpr$country[coms_long_wpr$iso3=="BRN"] <- "Brunei Darussalam"
coms_long_wpr$country[coms_long_wpr$iso3=="LAO"] <- "Lao PDR"
coms_long_wpr$country[coms_long_wpr$iso3=="FSM"] <- "Micronesia (Fed. States of)"
coms_long_wpr$country[coms_long_wpr$iso3=="VNM"] <- "Viet Nam"
coms_long_wpr$country[coms_long_wpr$iso3=="KOR"] <- "Rep. of Korea"
p <- ggplot(coms_long_wpr, aes(x=year, y=value2, colour=variable))
p <- p + geom_line() + facet_wrap(~ country, scales="free")
p <- p + labs(title= "Burden from communicable, maternal, neonatal, and nutritional diseases, by country in WPR, 1990-2016", y = "Number of DALYs per year (x 100,000)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=9),
axis.text = element_text(size=6))
p <- p + guides(colour=guide_legend(nrow=3,byrow=TRUE))
pdf(file="coms_burden_country_WPR.pdf", width=10,height=13) # write PDF
p
dev.off()
| /scripts/5_Visualizing health transition.R | permissive | tomhiatt/wprtrend | R | false | false | 24,429 | r |
clist <- read.csv("country_iso.csv")
wpr_iso2 <- clist$iso2
wpr_iso3 <- clist$iso3
#c_name <- "MN" # specify country
st <- 1900
en <- 2017
mycols <- c("#F8766D", "#7CAE00", "#00BFC4", "#C77CFF")
# # Cause of death, by communicable diseases and maternal, prenatal and nutrition conditions (% of total)
# com <- WDI(country = wpr_iso2, indicator = "SH.DTH.COMM.ZS", start = st, end = en, extra = TRUE, cache = NULL)
# com <- com %>% select(-c(capital,longitude, latitude,lending , region))
# names(com)[3] <- "com"
# head(com)
#
#
# ncd <- WDI(country = wpr_iso2, indicator = "SH.DTH.NCOM.ZS", start = st, end = en, extra = TRUE, cache = NULL)
# ncd <- ncd %>% select(-c(capital,longitude, latitude,lending , region))
# names(ncd)[3] <- "ncd"
# head(ncd)
#
mort <- read.csv("./GBD/total_disease_burden_by_cause.csv") # data from GDB through Our World in Data
names(mort) <- c("country", "iso3", "year", "ncd", "com", "inj")
mort <- merge(mort, g_iso3, by="iso3")
mort_long <- melt(mort, id.vars = c("iso3", "country", "year", "who_region"))
mort_region_long <- aggregate(value ~ who_region + year+ variable, data = mort_long, sum, na.rm = TRUE)
mort_global_long <- aggregate(value ~ year + variable, data=mort_region_long, sum, na.rm=T)
# Global mortality trend
mort_global <- dcast(mort_global, year ~ variable)
mort_global.p <- data.frame(cbind(mort_global$year,prop.table(as.matrix(mort_global[,c(2:4)]), 1)*100))
names(mort_global.p)[1] <- "year"
mort_global.p_long <- melt(mort_global.p, id="year")
mort_wpr_long <- mort_region_long %>% filter(who_region=="WPR")
mort_afr_long <- mort_region_long %>% filter(who_region=="AFR")
mort_amr_long <- mort_region_long %>% filter(who_region=="AMR")
mort_emr_long <- mort_region_long %>% filter(who_region=="EMR")
mort_eur_long <- mort_region_long %>% filter(who_region=="EUR")
mort_sea_long <- mort_region_long %>% filter(who_region=="SEA")
mort_wpr <- dcast(mort_wpr_long, year + who_region ~ variable)
mort_afr <- dcast(mort_afr_long, year + who_region ~ variable)
mort_amr <- dcast(mort_amr_long, year + who_region ~ variable)
mort_emr <- dcast(mort_emr_long, year + who_region ~ variable)
mort_eur <- dcast(mort_eur_long, year + who_region ~ variable)
mort_sea <- dcast(mort_sea_long, year + who_region ~ variable)
mort_wpr.p <- data.frame(cbind(mort_wpr[,c(1,2)], prop.table(as.matrix(mort_wpr[,c(3:5)]), 1)*100))
mort_afr.p <- data.frame(cbind(mort_afr[,c(1,2)], prop.table(as.matrix(mort_afr[,c(3:5)]), 1)*100))
mort_amr.p <- data.frame(cbind(mort_amr[,c(1,2)], prop.table(as.matrix(mort_amr[,c(3:5)]), 1)*100))
mort_emr.p <- data.frame(cbind(mort_emr[,c(1,2)], prop.table(as.matrix(mort_emr[,c(3:5)]), 1)*100))
mort_eur.p <- data.frame(cbind(mort_eur[,c(1,2)], prop.table(as.matrix(mort_eur[,c(3:5)]), 1)*100))
mort_sea.p <- data.frame(cbind(mort_sea[,c(1,2)], prop.table(as.matrix(mort_sea[,c(3:5)]), 1)*100))
mort_region.p <- rbind(mort_wpr.p,
mort_afr.p,
mort_amr.p,
mort_emr.p,
mort_eur.p,
mort_sea.p)
mort_region.p_long <- melt(mort_region.p,id.vars = c("year", "who_region"))
# plot global diease burden
mort_global.p_long$variable <- factor(mort_global.p_long$variable, levels=c("ncd","inj","com"),labels=c("Non-communicable disease (NCDs)", "Injuries", "Communicable, maternal, neonatal, and nutritional diseases"))
p <- ggplot(mort_global.p_long, aes(x=year, y=value, colour=variable))
p <- p + geom_line()
# p <- p + geom_smooth(aes(x=year, y=value, fill=variable), method = "loess", alpha=0.16, size=0.4)
p <- p + labs(title= "% of global disease burden by cause, 1990-2016", y = "Percentage (%)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=8)) + guides(colour=guide_legend(nrow=2,byrow=TRUE))
p
pdf(file="disease_burden_global_prop.pdf", width=8,height=6) # write PDF
p
dev.off()
mort_global_long$value2 <- mort_global_long$value/1000000
mort_global_long$variable <- factor(mort_global_long$variable, levels=c("ncd","inj","com"),labels=c("Non-communicable disease (NCDs)", "Injuries", "Communicable, maternal, neonatal, and nutritional diseases"))
p <- ggplot(mort_global_long, aes(x=year, y=value2, colour=variable))
p <- p + geom_line()
# p <- p + geom_smooth(aes(x=year, y=value, fill=variable), method = "loess", alpha=0.16, size=0.4)
p <- p + labs(title= "Global disease burden by cause, 1990-2016", y = "Number of DALYs per year (million)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=8)) + guides(colour=guide_legend(nrow=2,byrow=TRUE))
p
pdf(file="disease_burden_global.pdf", width=8,height=6) # write PDF
p
dev.off()
#"Total disease burden measured as the number of DALYs (Disability-Adjusted Life Years) per year. DALYs are used tomeasure total burden of disease - both from years of life lost and years lived with a disability. One DALY equals one lostyear of healthy life"
# by region
mort_region.p_long$variable <- factor(mort_region.p_long$variable, levels=c("ncd","inj","com"),labels=c("Non-communicable disease (NCDs)", "Injuries", "Communicable, maternal, neonatal, and nutritional diseases"))
p <- ggplot(mort_region.p_long, aes(x=year, y=value, colour=variable))
p <- p + geom_line() + facet_wrap(~who_region)
p <- p + labs(title= "% of disease burden by cause by WHO region, 1990-2016", y = "Percentage (%)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=10)) + guides(colour=guide_legend(nrow=2,byrow=TRUE))
p
pdf(file="disease_burden_region_prop.pdf", width=9,height=8) # write PDF
p
dev.off()
mort_region_long$value2 <- mort_region_long$value/100000
mort_region_long$variable <- factor(mort_region_long$variable, levels=c("ncd","inj","com"),labels=c("Non-communicable disease (NCDs)", "Injuries", "Communicable, maternal, neonatal, and nutritional diseases"))
p <- ggplot(mort_region_long, aes(x=year, y=value2, colour=variable))
p <- p + geom_line() + facet_wrap(~who_region,scales="free")
p <- p + labs(title= "Disease burden by cause by WHO region, 1990-2016", y = "Number of DALYs per year (x 100,000)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=10)) + guides(colour=guide_legend(nrow=2,byrow=TRUE))
p
pdf(file="disease_burden_region.pdf", width=9,height=8) # write PDF
p
dev.off()
# by country
mort_long_wpr <- mort_long %>% filter(who_region=="WPR")
mort_long_wpr$variable <- factor(mort_long_wpr$variable, levels=c("ncd","inj","com"),labels=c("Non-communicable disease (NCDs)", "Injuries", "Communicable, maternal, neonatal, and nutritional diseases"))
mort_long_wpr$value2 <- mort_long_wpr$value/100000
mort_long_wpr$country <- as.character(mort_long_wpr$country)
mort_long_wpr$country[mort_long_wpr$iso3=="BRN"] <- "Brunei Darussalam"
mort_long_wpr$country[mort_long_wpr$iso3=="LAO"] <- "Lao PDR"
mort_long_wpr$country[mort_long_wpr$iso3=="FSM"] <- "Micronesia (Fed. States of)"
mort_long_wpr$country[mort_long_wpr$iso3=="VNM"] <- "Viet Nam"
mort_long_wpr$country[mort_long_wpr$iso3=="KOR"] <- "Rep. of Korea"
p <- ggplot(mort_long_wpr, aes(x=year, y=value2, colour=variable))
p <- p + geom_line() + facet_wrap(~ country, scales="free")
p <- p + labs(title= "Disease burden by cause by country in WPR, 1990-2016", y = "Number of DALYs per year (x 100,000)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=9),
axis.text = element_text(size=6))
p <- p + guides(colour=guide_legend(nrow=2,byrow=TRUE))
pdf(file="disease_burden_country_WPR.pdf", width=10,height=12) # write PDF
p
dev.off()
######### 2016 comparison across country
mort_long_wpr_16 <- mort_long_wpr %>% filter(year==2016)
mort_long_wpr_16 <- mort_long_wpr_16[,-7]
mort_wpr_16 <- dcast(mort_long_wpr_16, country + iso3 ~ variable)
mort_wpr_16.p <- data.frame(cbind(mort_wpr_16, rowSums(mort_wpr_16[,c(3,5)]), prop.table(as.matrix(mort_wpr_16[,c(3:5)]), 1)*100))
names(mort_wpr_16.p) <- c("country", "iso3", "ncd", "inj", "com", "com_ncd", "ncd.p", "inj.p", "com.p")
income_wpr <- gdp_wpr_17[,c(2,1, 5,6)]
#write.csv(income_wpr, file = "income_wpr.csv",row.names = F)
mort_wpr_16.p <- merge(mort_wpr_16.p, income_wpr)
mort_wpr_16.p$com_ncd2 <- mort_wpr_16.p$com_ncd/1000000
p <- ggplot(mort_wpr_16.p, aes(x = ncd.p, y = com.p, label = country, colour=income, size=com_ncd2))
p <- p + geom_point(alpha=0.6)
p <- p + geom_text_repel(show.legend = FALSE, size=3)
p <- p + geom_smooth(aes(x=ncd.p, y=com.p, fill=income), method = "lm", alpha=0.13, size=0.4, linetype=0)
p <- p + labs(title= "Disease burdedn, NCD vs Communicable and other diseases, 2016", y = "Communicable and other diseases (% of total DALYs)", x="NCDs (% of total DALYs)", size="DALYs per year (million)", col="Income classification")
p <- p + guides(fill=FALSE)
p <- p + theme(legend.title= element_text(size=9),
plot.title = element_text(size = 11.5),
axis.title=element_text(size=9))
p
pdf(file="disease_burden_scatterplot_WPR.pdf", width=8,height=5) # write PDF
p
dev.off()
# p <- ggplot(mort_wpr_16.p, aes(x = ncd, y = com, label = country, colour=income, size=com_ncd2))
# p <- p + geom_point(alpha=0.6)
# p <- p + geom_text_repel(show.legend = FALSE, size=3)
# p <- p + scale_x_log10(breaks=pretty_breaks())
# p <- p + scale_y_log10(breaks=pretty_breaks())
#################################################
# NCDs
#################################################
ncds <- read.csv("./GBD/disease-burden-from-ncds.csv") # data from GDB through Our World in Data
names(ncds) <- c("country", "iso3", "year", "car", "can", "res", "dm", "oth", "liv", "men", "neu", "mus", "diges")
ncds <- merge(ncds, g_iso3, by="iso3")
ncds_long <- melt(ncds, id.vars = c("iso3", "country", "year", "who_region"))
ncds_region_long <- aggregate(value ~ who_region + year+ variable, data = ncds_long, sum, na.rm = TRUE)
ncds_global_long <- aggregate(value ~ year + variable, data=ncds_region_long, sum, na.rm=T)
# Global burden trend
ncds_global <- dcast(ncds_global_long, year ~ variable)
ncds_global.p <- data.frame(cbind(ncds_global$year,prop.table(as.matrix(ncds_global[,c(2:11)]), 1)*100))
names(ncds_global.p)[1] <- "year"
ncds_global.p_long <- melt(ncds_global.p, id="year")
ncds_wpr_long <- ncds_region_long %>% filter(who_region=="WPR")
ncds_afr_long <- ncds_region_long %>% filter(who_region=="AFR")
ncds_amr_long <- ncds_region_long %>% filter(who_region=="AMR")
ncds_emr_long <- ncds_region_long %>% filter(who_region=="EMR")
ncds_eur_long <- ncds_region_long %>% filter(who_region=="EUR")
ncds_sea_long <- ncds_region_long %>% filter(who_region=="SEA")
ncds_wpr <- dcast(ncds_wpr_long, year + who_region ~ variable)
ncds_afr <- dcast(ncds_afr_long, year + who_region ~ variable)
ncds_amr <- dcast(ncds_amr_long, year + who_region ~ variable)
ncds_emr <- dcast(ncds_emr_long, year + who_region ~ variable)
ncds_eur <- dcast(ncds_eur_long, year + who_region ~ variable)
ncds_sea <- dcast(ncds_sea_long, year + who_region ~ variable)
ncds_wpr.p <- data.frame(cbind(ncds_wpr[,c(1,2)], prop.table(as.matrix(ncds_wpr[,c(3:12)]), 1)*100))
ncds_afr.p <- data.frame(cbind(ncds_afr[,c(1,2)], prop.table(as.matrix(ncds_afr[,c(3:12)]), 1)*100))
ncds_amr.p <- data.frame(cbind(ncds_amr[,c(1,2)], prop.table(as.matrix(ncds_amr[,c(3:12)]), 1)*100))
ncds_emr.p <- data.frame(cbind(ncds_emr[,c(1,2)], prop.table(as.matrix(ncds_emr[,c(3:12)]), 1)*100))
ncds_eur.p <- data.frame(cbind(ncds_eur[,c(1,2)], prop.table(as.matrix(ncds_eur[,c(3:12)]), 1)*100))
ncds_sea.p <- data.frame(cbind(ncds_sea[,c(1,2)], prop.table(as.matrix(ncds_sea[,c(3:12)]), 1)*100))
ncds_region.p <- rbind(ncds_wpr.p,
ncds_afr.p,
ncds_amr.p,
ncds_emr.p,
ncds_eur.p,
ncds_sea.p)
ncds_region.p_long <- melt(ncds_region.p,id.vars = c("year", "who_region"))
# plot global NCDs burden
ncds_global.p_long$variable <- factor(ncds_global.p_long$variable, labels=c("Cardiovascular diseases", "Cancers", "Respiratory diseases", "Diabetes & endocrine diseases", "Other NCDs", "Liver disease", "Mental & substance use disorders", "Neurological disorders", "Musculoskeletal disorders", "Digestive diseases"))
p <- ggplot(ncds_global.p_long, aes(x=year, y=value, colour=variable))
p <- p + geom_line()
# p <- p + geom_smooth(aes(x=year, y=value, fill=variable), method = "loess", alpha=0.16, size=0.4)
p <- p + labs(title= "% of global burden from NCDs by cause, 1990-2016", y = "Percentage (%)", x="", col="")
p <- p + theme(legend.position="right",
legend.text = element_text(size=10)) #+ guides(colour=guide_legend(nrow=4,byrow=TRUE))
p
pdf(file="disease_NCDs_global_prop.pdf", width=11,height=6) # write PDF
p
dev.off()
ncds_global_long$value2 <- ncds_global_long$value/1000000
ncds_global_long$variable <- factor(ncds_global_long$variable, labels=c("Cardiovascular diseases", "Cancers", "Respiratory diseases", "Diabetes & endocrine diseases", "Other NCDs", "Liver disease", "Mental & substance use disorders", "Neurological disorders", "Musculoskeletal disorders", "Digestive diseases"))
p <- ggplot(ncds_global_long, aes(x=year, y=value2, colour=variable))
p <- p + geom_line()
# p <- p + geom_smooth(aes(x=year, y=value, fill=variable), method = "loess", alpha=0.16, size=0.4)
p <- p + labs(title= "Global burden from NCDs by cause, 1990-2016", y = "Number of DALYs per year (million)", x="", col="")
p <- p + theme(legend.position="right",
legend.text = element_text(size=10))# + guides(colour=guide_legend(nrow=2,byrow=TRUE))
p
pdf(file="disease_NCDs_burden_global.pdf", width=11,height=6) # write PDF
p
dev.off()
# by region
ncds_region.p_long$variable <- factor(ncds_region.p_long$variable, labels=c("Cardiovascular diseases", "Cancers", "Respiratory diseases", "Diabetes & endocrine diseases", "Other NCDs", "Liver disease", "Mental & substance use disorders", "Neurological disorders", "Musculoskeletal disorders", "Digestive diseases"))
p <- ggplot(ncds_region.p_long, aes(x=year, y=value, colour=variable))
p <- p + geom_line() + facet_wrap(~who_region)
p <- p + labs(title= "% of burden from NCDs by cause by WHO region, 1990-2016", y = "Percentage (%)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=10)) + guides(colour=guide_legend(nrow=4,byrow=TRUE))
p
pdf(file="NCDs_burden_region_prop.pdf", width=9,height=8) # write PDF
p
dev.off()
ncds_region_long$value2 <- ncds_region_long$value/100000
ncds_region_long$variable <- factor(ncds_region_long$variable, labels=c("Cardiovascular diseases", "Cancers", "Respiratory diseases", "Diabetes & endocrine diseases", "Other NCDs", "Liver disease", "Mental & substance use disorders", "Neurological disorders", "Musculoskeletal disorders", "Digestive diseases"))
p <- ggplot(ncds_region_long, aes(x=year, y=value2, colour=variable))
p <- p + geom_line() + facet_wrap(~who_region,scales="free")
p <- p + labs(title= "NCDs burden by cause by WHO region, 1990-2016", y = "Number of DALYs per year (x 100,000)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=10)) + guides(colour=guide_legend(nrow=4,byrow=TRUE))
p
pdf(file="NCDs_burden_region.pdf", width=9,height=8) # write PDF
p
dev.off()
# by country
ncds_long_wpr <- ncds_long %>% filter(who_region=="WPR")
ncds_long_wpr$variable <- factor(ncds_long_wpr$variable, labels=c("Cardiovascular diseases", "Cancers", "Respiratory diseases", "Diabetes & endocrine diseases", "Other NCDs", "Liver disease", "Mental & substance use disorders", "Neurological disorders", "Musculoskeletal disorders", "Digestive diseases"))
ncds_long_wpr$value2 <- ncds_long_wpr$value/100000
ncds_long_wpr$country <- as.character(ncds_long_wpr$country)
ncds_long_wpr$country[ncds_long_wpr$iso3=="BRN"] <- "Brunei Darussalam"
ncds_long_wpr$country[ncds_long_wpr$iso3=="LAO"] <- "Lao PDR"
ncds_long_wpr$country[ncds_long_wpr$iso3=="FSM"] <- "Micronesia (Fed. States of)"
ncds_long_wpr$country[ncds_long_wpr$iso3=="VNM"] <- "Viet Nam"
ncds_long_wpr$country[ncds_long_wpr$iso3=="KOR"] <- "Rep. of Korea"
p <- ggplot(ncds_long_wpr, aes(x=year, y=value2, colour=variable))
p <- p + geom_line() + facet_wrap(~ country, scales="free")
p <- p + labs(title= "NCDs burden by cause by country in WPR, 1990-2016", y = "Number of DALYs per year (x 100,000)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=9),
axis.text = element_text(size=6))
p <- p + guides(colour=guide_legend(nrow=3,byrow=TRUE))
pdf(file="NCDs_burden_country_WPR.pdf", width=10,height=13) # write PDF
p
dev.off()
#################################################
# Communicable and other diseases
#################################################
coms <- read.csv("./GBD/disease-burden-from-communicable-diseases.csv") # data from GDB through Our World in Data
names(coms) <- c("country", "iso3", "year", "mat", "neo", "nut", "mal", "dia", "hiv", "tb", "oth")
coms <- merge(coms, g_iso3, by="iso3")
coms_long <- melt(coms, id.vars = c("iso3", "country", "year", "who_region"))
coms_region_long <- aggregate(value ~ who_region + year+ variable, data = coms_long, sum, na.rm = TRUE)
coms_global_long <- aggregate(value ~ year + variable, data=coms_region_long, sum, na.rm=T)
# Global burden trend
coms_global <- dcast(coms_global_long, year ~ variable)
coms_global.p <- data.frame(cbind(coms_global$year,prop.table(as.matrix(coms_global[,c(2:9)]), 1)*100))
names(coms_global.p)[1] <- "year"
coms_global.p_long <- melt(coms_global.p, id="year")
coms_wpr_long <- coms_region_long %>% filter(who_region=="WPR")
coms_afr_long <- coms_region_long %>% filter(who_region=="AFR")
coms_amr_long <- coms_region_long %>% filter(who_region=="AMR")
coms_emr_long <- coms_region_long %>% filter(who_region=="EMR")
coms_eur_long <- coms_region_long %>% filter(who_region=="EUR")
coms_sea_long <- coms_region_long %>% filter(who_region=="SEA")
coms_wpr <- dcast(coms_wpr_long, year + who_region ~ variable)
coms_afr <- dcast(coms_afr_long, year + who_region ~ variable)
coms_amr <- dcast(coms_amr_long, year + who_region ~ variable)
coms_emr <- dcast(coms_emr_long, year + who_region ~ variable)
coms_eur <- dcast(coms_eur_long, year + who_region ~ variable)
coms_sea <- dcast(coms_sea_long, year + who_region ~ variable)
coms_wpr.p <- data.frame(cbind(coms_wpr[,c(1,2)], prop.table(as.matrix(coms_wpr[,c(3:10)]), 1)*100))
coms_afr.p <- data.frame(cbind(coms_afr[,c(1,2)], prop.table(as.matrix(coms_afr[,c(3:10)]), 1)*100))
coms_amr.p <- data.frame(cbind(coms_amr[,c(1,2)], prop.table(as.matrix(coms_amr[,c(3:10)]), 1)*100))
coms_emr.p <- data.frame(cbind(coms_emr[,c(1,2)], prop.table(as.matrix(coms_emr[,c(3:10)]), 1)*100))
coms_eur.p <- data.frame(cbind(coms_eur[,c(1,2)], prop.table(as.matrix(coms_eur[,c(3:10)]), 1)*100))
coms_sea.p <- data.frame(cbind(coms_sea[,c(1,2)], prop.table(as.matrix(coms_sea[,c(3:10)]), 1)*100))
coms_region.p <- rbind(coms_wpr.p,
coms_afr.p,
coms_amr.p,
coms_emr.p,
coms_eur.p,
coms_sea.p)
coms_region.p_long <- melt(coms_region.p,id.vars = c("year", "who_region"))
# plot global Coms burden
coms_global.p_long$variable <- factor(coms_global.p_long$variable, labels=c("Maternal disorders", "Neonatal disorders", "Nutritional deficiencies", "Malaria & NTDs", "Diarrhea, lower respiraotry & infectious diseases", "HIV/AIDS", "Tuberculosis", "Other communicable disease"))
p <- ggplot(coms_global.p_long, aes(x=year, y=value, colour=variable))
p <- p + geom_line()
# p <- p + geom_smooth(aes(x=year, y=value, fill=variable), method = "loess", alpha=0.16, size=0.4)
p <- p + labs(title= "% of global burden from communicable, maternal, neonatal, and nutritional diseases, 1990-2016", y = "Percentage (%)", x="", col="")
p <- p + theme(legend.position="right",
legend.text = element_text(size=10)) #+ guides(colour=guide_legend(nrow=4,byrow=TRUE))
p
pdf(file="disease_coms_global_prop.pdf", width=11,height=6) # write PDF
p
dev.off()
coms_global_long$value2 <- coms_global_long$value/1000000
coms_global_long$variable <- factor(coms_global_long$variable, labels=c("Maternal disorders", "Neonatal disorders", "Nutritional deficiencies", "Malaria & NTDs", "Diarrhea, lower respiraotry & infectious diseases", "HIV/AIDS", "Tuberculosis", "Other communicable disease"))
p <- ggplot(coms_global_long, aes(x=year, y=value2, colour=variable))
p <- p + geom_line()
# p <- p + geom_smooth(aes(x=year, y=value, fill=variable), method = "loess", alpha=0.16, size=0.4)
p <- p + labs(title= "Global burden from communicable, maternal, neonatal, and nutritional diseases, 1990-2016", y = "Number of DALYs per year (million)", x="", col="")
p <- p + theme(legend.position="right",
legend.text = element_text(size=10))# + guides(colour=guide_legend(nrow=2,byrow=TRUE))
p
pdf(file="disease_coms_burden_global.pdf", width=11,height=6) # write PDF
p
dev.off()
# by region
coms_region.p_long$variable <- factor(coms_region.p_long$variable, labels=c("Maternal disorders", "Neonatal disorders", "Nutritional deficiencies", "Malaria & NTDs", "Diarrhea, lower respiraotry & infectious diseases", "HIV/AIDS", "Tuberculosis", "Other communicable disease"))
p <- ggplot(coms_region.p_long, aes(x=year, y=value, colour=variable))
p <- p + geom_line() + facet_wrap(~who_region)
p <- p + labs(title= "% of burden from communicable, maternal, neonatal, and nutritional diseases, by WHO region, 1990-2016", y = "Percentage (%)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=10)) + guides(colour=guide_legend(nrow=4,byrow=TRUE))
p
pdf(file="coms_burden_region_prop.pdf", width=9,height=8) # write PDF
p
dev.off()
coms_region_long$value2 <- coms_region_long$value/100000
coms_region_long$variable <- factor(coms_region_long$variable, labels=c("Maternal disorders", "Neonatal disorders", "Nutritional deficiencies", "Malaria & NTDs", "Diarrhea, lower respiraotry & infectious diseases", "HIV/AIDS", "Tuberculosis", "Other communicable disease"))
p <- ggplot(coms_region_long, aes(x=year, y=value2, colour=variable))
p <- p + geom_line() + facet_wrap(~who_region,scales="free")
p <- p + labs(title= "Burden from communicable, maternal, neonatal, and nutritional diseases, by WHO region, 1990-2016", y = "Number of DALYs per year (x 100,000)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=10)) + guides(colour=guide_legend(nrow=4,byrow=TRUE))
p
pdf(file="coms_burden_region.pdf", width=9,height=8) # write PDF
p
dev.off()
# by country
coms_long_wpr <- coms_long %>% filter(who_region=="WPR")
coms_long_wpr$variable <- factor(coms_long_wpr$variable, labels=c("Maternal disorders", "Neonatal disorders", "Nutritional deficiencies", "Malaria & NTDs", "Diarrhea, lower respiraotry & infectious diseases", "HIV/AIDS", "Tuberculosis", "Other communicable disease"))
coms_long_wpr$value2 <- coms_long_wpr$value/100000
coms_long_wpr$country <- as.character(coms_long_wpr$country)
coms_long_wpr$country[coms_long_wpr$iso3=="BRN"] <- "Brunei Darussalam"
coms_long_wpr$country[coms_long_wpr$iso3=="LAO"] <- "Lao PDR"
coms_long_wpr$country[coms_long_wpr$iso3=="FSM"] <- "Micronesia (Fed. States of)"
coms_long_wpr$country[coms_long_wpr$iso3=="VNM"] <- "Viet Nam"
coms_long_wpr$country[coms_long_wpr$iso3=="KOR"] <- "Rep. of Korea"
p <- ggplot(coms_long_wpr, aes(x=year, y=value2, colour=variable))
p <- p + geom_line() + facet_wrap(~ country, scales="free")
p <- p + labs(title= "Burden from communicable, maternal, neonatal, and nutritional diseases, by country in WPR, 1990-2016", y = "Number of DALYs per year (x 100,000)", x="", col="")
p <- p + theme(legend.position="bottom",
legend.text = element_text(size=9),
axis.text = element_text(size=6))
p <- p + guides(colour=guide_legend(nrow=3,byrow=TRUE))
pdf(file="coms_burden_country_WPR.pdf", width=10,height=13) # write PDF
p
dev.off()
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/album.R
\name{get_album_tracks}
\alias{get_album_tracks}
\title{Get a list of songs on the album}
\usage{
get_album_tracks(album_id, page_size = 100, simplify = TRUE, ...)
}
\arguments{
\item{album_id}{ID of album on musiXmatch}
}
\value{
a data.frame or list containing the data from the API call
}
\description{
Get a list of songs on the album
}
| /man/get_album_tracks.Rd | no_license | rweyant/musixmatch | R | false | false | 436 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/album.R
\name{get_album_tracks}
\alias{get_album_tracks}
\title{Get a list of songs on the album}
\usage{
get_album_tracks(album_id, page_size = 100, simplify = TRUE, ...)
}
\arguments{
\item{album_id}{ID of album on musiXmatch}
}
\value{
a data.frame or list containing the data from the API call
}
\description{
Get a list of songs on the album
}
|
library("doBy")
library("ggplot2")
#read data from test files. Test files are stored locally
#The general process for loading the data was taken from:
# https://github.com/wehrley/wehrley.github.io/blob/master/SOUPTONUTS.md
#
#The structure for the rest of the code was influenced by a MATLAB project from a previous class
#The project was to try and beat Netflix's recommnedation algorithm
#
#This solution uses the idea that similar items are rated similarly.
#If person x lives and their vector is similar to person y, then it is likely that person y also lived
#We use cosine similarity to determine how similar persons are.
#
#NOTE: with movies, the rankings are generally between 0 and 5, but with life and death its between 0 and 1
#create function to readData in
readData <- function(file.name, column.types, missing.types) {
read.csv(file.name, colClasses= column.types, na.strings=missing.types)
}
#Cosine Similarity function
cosineSimilarity <- function(passenger.vector, neighbor.vector, columns) {
#calculate the cosine similarity between the passenger and the neighbor vectors
# :param passenger.vector: a vector describing a passenger from the test set
# :param neighbor.vector: a vector describing a passenger from the training set
# :param columns: the columns we are using to calculate cosine similarity.
#only use the columns in columns
neighbor.vector = neighbor.vector[,c(columns)]
passenger.vector = passenger.vector[,c(columns)]
#filter out NA values. They do nothing for us.
#filter the two vectors down so that each column as a value that is NOT NA
passenger.filtered = passenger.vector[,!apply(is.na(passenger.vector),2,all)]
neighbor.filtered = neighbor.vector[,!apply(is.na(neighbor.vector),2,all)]
col_intersect = intersect(colnames(neighbor.filtered), colnames(passenger.filtered))
passenger.filtered = passenger.filtered[,c(col_intersect)]
neighbor.filtered = neighbor.filtered[,c(col_intersect)]
#print(neighbor.filtered)
#print(ncol(passenger.filtered))
#print(1/(1+dist(rbind(neighbor.filtered,passenger.filtered))))
similarity = 0
#calculate cosine similarity
if (ncol(passenger.filtered) > 1) {
#print("COSINE")
similarity = sum(neighbor.filtered*passenger.filtered)/sqrt(sum(neighbor.filtered^2)*sum(passenger.filtered^2))
}
else if(ncol(passenger.filtered == 1)){
#if there is only 1 co-marked item, then cosine similarity will always return 1
#use Euclidean Distance instead
#print("EUCLIDEAN")
similarity = 1/(1+dist(rbind(neighbor.filtered,passenger.filtered)))
}
else {
similarity = 0
}
return(similarity)
}
#create Pearson Corellation Function
#Load data
##############################################################
train.data.file <- "train.csv" #training data
test.data.file <- "test.csv" #test data
missing.types <- c("NA","") #what to fill for missing types
#define the data types we want to use for each column
train.column.types <- c(
'integer', # PassengerId
'factor', # Survived
'numeric', # Pclass
'character', # Name
'factor', # Sex
'numeric', # Age
'integer', # SibSp
'integer', # Parch
'character', # Ticket
'numeric', # Fare
'character', # Cabin
'factor' # Embarked
)
#test data doesn't include the Survived column
test.column.types <- train.column.types[-2]
#load data
train.raw <- readData(train.data.file, train.column.types, missing.types)
test.raw <- readData(test.data.file,test.column.types,missing.types)
df.train = train.raw
df.test = test.raw
#force some columns from factor to numeric
df.train$Sex = as.numeric(df.train$Sex) #1 = female, 2 = male
df.train$Embarked = as.numeric(df.train$Embarked) #1 = C, 2 = Q, 3 = S
df.test$Sex = as.numeric(df.test$Sex) #1 = female, 2 = male
df.test$Embarked = as.numeric(df.test$Embarked) #1 = C, 2 = Q, 3 = S
#pick out which columns we want to use for cosine similarity
#the columns we chose are all ones that can be mapped to a value
#AND seemd to have a good deal of significance on survival
columns = c("Pclass","Sex" ,"Age" , "SibSp" ,"Parch" ,"Fare","Embarked")
##RUN ANALYSIS
###########################################################
test_rows = nrow(df.test)
train_rows = nrow(df.train)
predictions = data.frame(PassengerId = integer(test_rows), Survived=integer(test_rows))
for (p in 1:nrow(df.test)) {
print(paste("Passenger: ", p))
similarities = data.frame(Similar=numeric(train_rows), Survived = integer(train_rows))
for (n in 1:nrow(df.train)) {
similarities$Similar[n] = cosineSimilarity(df.test[p,], df.train[n,], columns)
similarities$Survived[n] = df.train$Survived[n]
}
most_similar = subset(similarities, similarities$Similar > .9)
predictions$Survived[p] = round(mean(most_similar$Similar * most_similar$Survived))
predictions$PassengerId[p] = df.test$PassengerId[p]
}
#predctions$Survived has values of 1 and 2, with 2 being survived, and 1 being did not survive
#subtract 1 from this column to get the values we want
predictions$Survived = predictions$Survived - 1
write.csv(predictions, file="CosineSimilarityModel.csv", row.names= FALSE) | /HW2/hw2.r | no_license | TheF1rstPancake/CSCI183 | R | false | false | 5,214 | r | library("doBy")
library("ggplot2")
#read data from test files. Test files are stored locally
#The general process for loading the data was taken from:
# https://github.com/wehrley/wehrley.github.io/blob/master/SOUPTONUTS.md
#
#The structure for the rest of the code was influenced by a MATLAB project from a previous class
#The project was to try and beat Netflix's recommnedation algorithm
#
#This solution uses the idea that similar items are rated similarly.
#If person x lives and their vector is similar to person y, then it is likely that person y also lived
#We use cosine similarity to determine how similar persons are.
#
#NOTE: with movies, the rankings are generally between 0 and 5, but with life and death its between 0 and 1
#create function to readData in
readData <- function(file.name, column.types, missing.types) {
read.csv(file.name, colClasses= column.types, na.strings=missing.types)
}
#Cosine Similarity function
cosineSimilarity <- function(passenger.vector, neighbor.vector, columns) {
#calculate the cosine similarity between the passenger and the neighbor vectors
# :param passenger.vector: a vector describing a passenger from the test set
# :param neighbor.vector: a vector describing a passenger from the training set
# :param columns: the columns we are using to calculate cosine similarity.
#only use the columns in columns
neighbor.vector = neighbor.vector[,c(columns)]
passenger.vector = passenger.vector[,c(columns)]
#filter out NA values. They do nothing for us.
#filter the two vectors down so that each column as a value that is NOT NA
passenger.filtered = passenger.vector[,!apply(is.na(passenger.vector),2,all)]
neighbor.filtered = neighbor.vector[,!apply(is.na(neighbor.vector),2,all)]
col_intersect = intersect(colnames(neighbor.filtered), colnames(passenger.filtered))
passenger.filtered = passenger.filtered[,c(col_intersect)]
neighbor.filtered = neighbor.filtered[,c(col_intersect)]
#print(neighbor.filtered)
#print(ncol(passenger.filtered))
#print(1/(1+dist(rbind(neighbor.filtered,passenger.filtered))))
similarity = 0
#calculate cosine similarity
if (ncol(passenger.filtered) > 1) {
#print("COSINE")
similarity = sum(neighbor.filtered*passenger.filtered)/sqrt(sum(neighbor.filtered^2)*sum(passenger.filtered^2))
}
else if(ncol(passenger.filtered == 1)){
#if there is only 1 co-marked item, then cosine similarity will always return 1
#use Euclidean Distance instead
#print("EUCLIDEAN")
similarity = 1/(1+dist(rbind(neighbor.filtered,passenger.filtered)))
}
else {
similarity = 0
}
return(similarity)
}
#create Pearson Corellation Function
#Load data
##############################################################
train.data.file <- "train.csv" #training data
test.data.file <- "test.csv" #test data
missing.types <- c("NA","") #what to fill for missing types
#define the data types we want to use for each column
train.column.types <- c(
'integer', # PassengerId
'factor', # Survived
'numeric', # Pclass
'character', # Name
'factor', # Sex
'numeric', # Age
'integer', # SibSp
'integer', # Parch
'character', # Ticket
'numeric', # Fare
'character', # Cabin
'factor' # Embarked
)
#test data doesn't include the Survived column
test.column.types <- train.column.types[-2]
#load data
train.raw <- readData(train.data.file, train.column.types, missing.types)
test.raw <- readData(test.data.file,test.column.types,missing.types)
df.train = train.raw
df.test = test.raw
#force some columns from factor to numeric
df.train$Sex = as.numeric(df.train$Sex) #1 = female, 2 = male
df.train$Embarked = as.numeric(df.train$Embarked) #1 = C, 2 = Q, 3 = S
df.test$Sex = as.numeric(df.test$Sex) #1 = female, 2 = male
df.test$Embarked = as.numeric(df.test$Embarked) #1 = C, 2 = Q, 3 = S
#pick out which columns we want to use for cosine similarity
#the columns we chose are all ones that can be mapped to a value
#AND seemd to have a good deal of significance on survival
columns = c("Pclass","Sex" ,"Age" , "SibSp" ,"Parch" ,"Fare","Embarked")
##RUN ANALYSIS
###########################################################
test_rows = nrow(df.test)
train_rows = nrow(df.train)
predictions = data.frame(PassengerId = integer(test_rows), Survived=integer(test_rows))
for (p in 1:nrow(df.test)) {
print(paste("Passenger: ", p))
similarities = data.frame(Similar=numeric(train_rows), Survived = integer(train_rows))
for (n in 1:nrow(df.train)) {
similarities$Similar[n] = cosineSimilarity(df.test[p,], df.train[n,], columns)
similarities$Survived[n] = df.train$Survived[n]
}
most_similar = subset(similarities, similarities$Similar > .9)
predictions$Survived[p] = round(mean(most_similar$Similar * most_similar$Survived))
predictions$PassengerId[p] = df.test$PassengerId[p]
}
#predctions$Survived has values of 1 and 2, with 2 being survived, and 1 being did not survive
#subtract 1 from this column to get the values we want
predictions$Survived = predictions$Survived - 1
write.csv(predictions, file="CosineSimilarityModel.csv", row.names= FALSE) |
\name{ig.HPCM}
\alias{ig.HPCM}
\title{Human Phenotype Clinical Modifier (HPCM).}
\usage{
ig.HPCM <- dRDataLoader(RData='ig.HPCM')
}
\description{
An R object that contains information on Human Phenotype Clinical Modifier terms. These terms are organised as a direct acyclic graph (DAG), which is further stored as an object of the class 'igraph' (see \url{http://igraph.org/r/doc/aaa-igraph-package.html}). This data is prepared based on \url{http://purl.obolibrary.org/obo/hp.obo}.
}
\value{
an object of class "igraph". As a direct graph, it has attributes to vertices/nodes and edges:
\itemize{
\item{\code{vertex attributes}: "name" (i.e. "Term ID"), "term_id" (i.e. "Term ID"), "term_name" (i.e "Term Name") and "term_distance" (i.e. Term Distance: the distance to the root; always 0 for the root itself)}
\item{\code{edge attributes}: "relation" (either 'is_a' or 'part_of')}
}
}
\references{
Robinson et al. (2012) The Human Phenotype Ontology: a tool for annotating and analyzing human hereditary disease. \emph{Am J Hum Genet}, 83:610-615.
}
\keyword{datasets}
\examples{
ig.HPCM <- dRDataLoader(RData='ig.HPCM')
ig.HPCM
}
| /dnet/1.0.6/man/ig.HPCM.Rd | no_license | jinshaw16/RDataCentre | R | false | false | 1,169 | rd | \name{ig.HPCM}
\alias{ig.HPCM}
\title{Human Phenotype Clinical Modifier (HPCM).}
\usage{
ig.HPCM <- dRDataLoader(RData='ig.HPCM')
}
\description{
An R object that contains information on Human Phenotype Clinical Modifier terms. These terms are organised as a direct acyclic graph (DAG), which is further stored as an object of the class 'igraph' (see \url{http://igraph.org/r/doc/aaa-igraph-package.html}). This data is prepared based on \url{http://purl.obolibrary.org/obo/hp.obo}.
}
\value{
an object of class "igraph". As a direct graph, it has attributes to vertices/nodes and edges:
\itemize{
\item{\code{vertex attributes}: "name" (i.e. "Term ID"), "term_id" (i.e. "Term ID"), "term_name" (i.e "Term Name") and "term_distance" (i.e. Term Distance: the distance to the root; always 0 for the root itself)}
\item{\code{edge attributes}: "relation" (either 'is_a' or 'part_of')}
}
}
\references{
Robinson et al. (2012) The Human Phenotype Ontology: a tool for annotating and analyzing human hereditary disease. \emph{Am J Hum Genet}, 83:610-615.
}
\keyword{datasets}
\examples{
ig.HPCM <- dRDataLoader(RData='ig.HPCM')
ig.HPCM
}
|
#' get coordinates from a variety of different object classes
#'
#' @param x coordinates. \code{sf} 'POINT' or 'MULTIPOINT', \code{SpatVector}, \code{data.frame} or \code{matrix} containing the locations coordinates
#'
#' @author Joseph Lewis
#'
#' @return \code{matrix} matrix of coordinates
#'
#' @export
get_coordinates <- function(x) {
if(inherits(x, "sf")) {
coords <- sf::st_coordinates(x)[, 1:2, drop = FALSE]
}
else if (inherits(x, "SpatVector")) {
coords <- terra::crds(x)
}
else if (inherits(x, "data.frame")) {
coords <- as.matrix(x)
}
else if (inherits(x, "matrix")) {
coords <- x
}
else if (inherits(x, "numeric")) {
coords <- matrix(x, nrow = 1)
}
return(coords)
}
| /R/get_coordinates.R | no_license | josephlewis/leastcostpath | R | false | false | 741 | r | #' get coordinates from a variety of different object classes
#'
#' @param x coordinates. \code{sf} 'POINT' or 'MULTIPOINT', \code{SpatVector}, \code{data.frame} or \code{matrix} containing the locations coordinates
#'
#' @author Joseph Lewis
#'
#' @return \code{matrix} matrix of coordinates
#'
#' @export
get_coordinates <- function(x) {
if(inherits(x, "sf")) {
coords <- sf::st_coordinates(x)[, 1:2, drop = FALSE]
}
else if (inherits(x, "SpatVector")) {
coords <- terra::crds(x)
}
else if (inherits(x, "data.frame")) {
coords <- as.matrix(x)
}
else if (inherits(x, "matrix")) {
coords <- x
}
else if (inherits(x, "numeric")) {
coords <- matrix(x, nrow = 1)
}
return(coords)
}
|
#!/usr/bin/Rscript
# set up workspace
rm(list=ls())
setwd('~/github/gdelt-to-mids')
source('start.R')
# tree libraries
library(rpart) # Popular decision tree algorithm
library(rattle) # Fancy tree plot
library(rpart.plot) # Enhanced tree plots
library(RColorBrewer) # Color selection for fancy tree plot
library(party) # Alternative decision tree algorithm
library(partykit) # Convert rpart object to BinaryTree
# library(RWeka) # Weka decision tree J48
# load data
setwd(pathData)
load('output4.rda')
dim(newdata)
data = newdata[which(data$year<=1998), ]
dim(data)
names(data)
head(data)
# todo: name variables more nicely for final trees
# todo: consider rpart.control(minsplit=x, cp=y, xval=n, maxdepth=k)
# dvs: HostlevMax, hostile1, ..., hostile5
summary(data$HostlevMax)
summary(data$hostile4)
summary(data$hostile5)
# compute lags and first-differences
quads = paste("quad", 1:4, sep="")
events = paste("event", 1:20, sep="")
vars = c(quads, events)
lag_vars = paste(vars, ".l1", sep="")
lag_vars
# replace NA in lags with 0
for(i in 1:length(vars)){
lag = lag_vars[i]
command = paste("data$", lag, "[which(is.na(data$", lag, "))] = 0" , sep="")
print(command)
eval(parse(text=command))
}
length(which(is.na(data$event1.l1)))
diff_vars = paste(vars, ".d1", sep="")
diff_vars
for(i in 1:length(vars)){
var = vars[i]
lag = lag_vars[i]
dif = diff_vars[i]
command = paste("data$", dif, " = data$", var, " - data$", lag, sep="")
print(command)
eval(parse(text=command))
}
############
# my model
form_mine = as.formula(hostile4 ~ actorMIL +
quad1p.l1 + quad2p.l1 + quad3p.l1 + quad4p.l1 +
event1.d1 + event2.d1 + event3.d1 + event4.d1 + event5.d1 +
event6.d1 + event7.d1 + event8.d1 + event9.d1 + event10.d1 +
event11.d1+ event12.d1+ event13.d1+ event14.d1+ event15.d1 +
event16.d1+ event17.d1+ event18.d1+ event19.d1+ event20.d1)
# specify a loss matrix or split='gini' or split='information'
# loss_matrix = matrix(c(0,1,100,0), nrow=2, ncol=2)
ctrl = rpart.control(cp=1e-4)
start = Sys.time()
tree_mine = rpart(form_mine, data=data,
method='class', control=ctrl,
parms=list(split='information'))
runtime = Sys.time() - start
runtime
yhat = predict(tree_mine, type='class')
yobs = data$hostile4
length(which(yhat==0 & yobs==1))
sum(as.numeric(yhat!=yobs))
sum(yobs)
sum(as.numeric(yhat!=yobs))/sum(yobs) # 0.865
tree_mine
summary(tree_mine)
printcp(tree_mine)
prp(tree_mine)
rsq.rpart(tree_mine)
# snip.rpart(x, toss)
# prune(x, cp=)
tree_mine_pruned = prune(tree_mine, cp=0.00019)
prp(tree_mine_pruned)
fancyRpartPlot(tree_mine_pruned)
yhat = predict(tree_mine_pruned, type='class')
sum(as.numeric(yhat!=yobs))/sum(yobs) # 0.877
save(tree_mine, file="tree_mine.rda")
save(tree_mine_pruned, file="tree_mine_pruned.rda")
# todo: cross validation
# rpart(xval=k)
# xpred.rpart(tree, xval=k) | /build-trees-final.R | no_license | mcdickenson/gdelt-to-mids | R | false | false | 2,853 | r | #!/usr/bin/Rscript
# set up workspace
rm(list=ls())
setwd('~/github/gdelt-to-mids')
source('start.R')
# tree libraries
library(rpart) # Popular decision tree algorithm
library(rattle) # Fancy tree plot
library(rpart.plot) # Enhanced tree plots
library(RColorBrewer) # Color selection for fancy tree plot
library(party) # Alternative decision tree algorithm
library(partykit) # Convert rpart object to BinaryTree
# library(RWeka) # Weka decision tree J48
# load data
setwd(pathData)
load('output4.rda')
dim(newdata)
data = newdata[which(data$year<=1998), ]
dim(data)
names(data)
head(data)
# todo: name variables more nicely for final trees
# todo: consider rpart.control(minsplit=x, cp=y, xval=n, maxdepth=k)
# dvs: HostlevMax, hostile1, ..., hostile5
summary(data$HostlevMax)
summary(data$hostile4)
summary(data$hostile5)
# compute lags and first-differences
quads = paste("quad", 1:4, sep="")
events = paste("event", 1:20, sep="")
vars = c(quads, events)
lag_vars = paste(vars, ".l1", sep="")
lag_vars
# replace NA in lags with 0
for(i in 1:length(vars)){
lag = lag_vars[i]
command = paste("data$", lag, "[which(is.na(data$", lag, "))] = 0" , sep="")
print(command)
eval(parse(text=command))
}
length(which(is.na(data$event1.l1)))
diff_vars = paste(vars, ".d1", sep="")
diff_vars
for(i in 1:length(vars)){
var = vars[i]
lag = lag_vars[i]
dif = diff_vars[i]
command = paste("data$", dif, " = data$", var, " - data$", lag, sep="")
print(command)
eval(parse(text=command))
}
############
# my model
form_mine = as.formula(hostile4 ~ actorMIL +
quad1p.l1 + quad2p.l1 + quad3p.l1 + quad4p.l1 +
event1.d1 + event2.d1 + event3.d1 + event4.d1 + event5.d1 +
event6.d1 + event7.d1 + event8.d1 + event9.d1 + event10.d1 +
event11.d1+ event12.d1+ event13.d1+ event14.d1+ event15.d1 +
event16.d1+ event17.d1+ event18.d1+ event19.d1+ event20.d1)
# specify a loss matrix or split='gini' or split='information'
# loss_matrix = matrix(c(0,1,100,0), nrow=2, ncol=2)
ctrl = rpart.control(cp=1e-4)
start = Sys.time()
tree_mine = rpart(form_mine, data=data,
method='class', control=ctrl,
parms=list(split='information'))
runtime = Sys.time() - start
runtime
yhat = predict(tree_mine, type='class')
yobs = data$hostile4
length(which(yhat==0 & yobs==1))
sum(as.numeric(yhat!=yobs))
sum(yobs)
sum(as.numeric(yhat!=yobs))/sum(yobs) # 0.865
tree_mine
summary(tree_mine)
printcp(tree_mine)
prp(tree_mine)
rsq.rpart(tree_mine)
# snip.rpart(x, toss)
# prune(x, cp=)
tree_mine_pruned = prune(tree_mine, cp=0.00019)
prp(tree_mine_pruned)
fancyRpartPlot(tree_mine_pruned)
yhat = predict(tree_mine_pruned, type='class')
sum(as.numeric(yhat!=yobs))/sum(yobs) # 0.877
save(tree_mine, file="tree_mine.rda")
save(tree_mine_pruned, file="tree_mine_pruned.rda")
# todo: cross validation
# rpart(xval=k)
# xpred.rpart(tree, xval=k) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.lightsail_operations.R
\name{delete_domain}
\alias{delete_domain}
\title{Deletes the specified domain recordset and all of its domain records}
\usage{
delete_domain(domainName)
}
\arguments{
\item{domainName}{[required] The specific domain name to delete.}
}
\description{
Deletes the specified domain recordset and all of its domain records.
}
\details{
The \code{delete domain} operation supports tag-based access control via resource tags applied to the resource identified by domainName. For more information, see the \href{https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags}{Lightsail Dev Guide}.
}
\section{Accepted Parameters}{
\preformatted{delete_domain(
domainName = "string"
)
}
}
| /service/paws.lightsail/man/delete_domain.Rd | permissive | CR-Mercado/paws | R | false | true | 827 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.lightsail_operations.R
\name{delete_domain}
\alias{delete_domain}
\title{Deletes the specified domain recordset and all of its domain records}
\usage{
delete_domain(domainName)
}
\arguments{
\item{domainName}{[required] The specific domain name to delete.}
}
\description{
Deletes the specified domain recordset and all of its domain records.
}
\details{
The \code{delete domain} operation supports tag-based access control via resource tags applied to the resource identified by domainName. For more information, see the \href{https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags}{Lightsail Dev Guide}.
}
\section{Accepted Parameters}{
\preformatted{delete_domain(
domainName = "string"
)
}
}
|
# Load library
library(dplyr)
# Get data from data.world
mom_2020w14 <- read.csv("https://query.data.world/s/f7tarmnf7pafmzypgdaagw52pjqobz")
# Filter for USA
us_results <- filter(mom_2020w14,Country == "United States of America")
# Reduce, reorder and rename columns
us_results <- us_results[,c(1,3,6,5,8,7)]
names(us_results) <- tolower(names(us_results))
names(us_results)[4] <- 'time_use'
names(us_results)[6] <- 'avg_time_hours'
# Write results to csv for d3
write.csv(us_results,"mom_2020w14_us_results.csv",row.names = FALSE) | /2020w14/data_prep.R | permissive | wjsutton/Makeover-Monday | R | false | false | 536 | r | # Load library
library(dplyr)
# Get data from data.world
mom_2020w14 <- read.csv("https://query.data.world/s/f7tarmnf7pafmzypgdaagw52pjqobz")
# Filter for USA
us_results <- filter(mom_2020w14,Country == "United States of America")
# Reduce, reorder and rename columns
us_results <- us_results[,c(1,3,6,5,8,7)]
names(us_results) <- tolower(names(us_results))
names(us_results)[4] <- 'time_use'
names(us_results)[6] <- 'avg_time_hours'
# Write results to csv for d3
write.csv(us_results,"mom_2020w14_us_results.csv",row.names = FALSE) |
# This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
cppTest <- function() {
invisible(.Call('MELD_cppTest', PACKAGE = 'MELD'))
}
| /R/RcppExports.R | no_license | kath-o-reilly/MELD | R | false | false | 192 | r | # This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
cppTest <- function() {
invisible(.Call('MELD_cppTest', PACKAGE = 'MELD'))
}
|
# tests for sweep
context("sweep")
opts <- options(verbose=FALSE)
rrt_path <- "~/rrttemp"
source(system.file("tests/testthat/0-common-functions.R", package="RRT"))
cleanRRTfolder()
options(repos=c(CRAN="http://cran.revolutionanalytics.com/"))
test_that("sweep works as expected", {
checkpoint(repo=rrt_path, snapshotdate="2014-08-01", verbose = FALSE)
cat("library(stringr)", file=file.path(rrt_path, "code.R"))
checkpoint(repo=rrt_path)
expect_true(is_rrt(rrt_path, FALSE))
expect_false(is_rrt("~/", FALSE))
expect_equal(
list.files(rrt_path),
c("code.R", "manifest.yml", "rrt")
)
installed <- list.files(rrtPath(rrt_path, "lib"))
installed_pre <- installed[!installed %in% "src"]
src_pre <- list.files(rrtPath(rrt_path, "src"))
expect_equal(installed_pre, c("stringr"))
})
test_that("sweep returns messages", {
expect_message(
rrt_sweep(repo=rrt_path),
"Checking to make sure repository exists"
)
expect_message(
rrt_sweep(repo=rrt_path),
"Package sources removed"
)
expect_message(
rrt_sweep(repo=rrt_path),
"Checking to make sure rrt directory exists inside your repository"
)
expect_equal(
list.files(rrt_path),
c("code.R", "manifest.yml", "rrt")
)
})
test_that("sweep actually removes installed packages and sources", {
options(verbose=FALSE)
installed <- list.files(rrtPath(rrt_path, "lib"))
installed_post <- installed[!installed %in% "src"]
src_post <- list.files(rrtPath(rrt_path, "src"))
expect_equal(installed_post, character(0))
expect_equal(src_post, character(0))
})
# cleanup
options(opts)
cleanRRTfolder()
| /tests/testthat/test-9-rrt-sweep.R | no_license | revodavid/checkpoint | R | false | false | 1,642 | r | # tests for sweep
context("sweep")
opts <- options(verbose=FALSE)
rrt_path <- "~/rrttemp"
source(system.file("tests/testthat/0-common-functions.R", package="RRT"))
cleanRRTfolder()
options(repos=c(CRAN="http://cran.revolutionanalytics.com/"))
test_that("sweep works as expected", {
checkpoint(repo=rrt_path, snapshotdate="2014-08-01", verbose = FALSE)
cat("library(stringr)", file=file.path(rrt_path, "code.R"))
checkpoint(repo=rrt_path)
expect_true(is_rrt(rrt_path, FALSE))
expect_false(is_rrt("~/", FALSE))
expect_equal(
list.files(rrt_path),
c("code.R", "manifest.yml", "rrt")
)
installed <- list.files(rrtPath(rrt_path, "lib"))
installed_pre <- installed[!installed %in% "src"]
src_pre <- list.files(rrtPath(rrt_path, "src"))
expect_equal(installed_pre, c("stringr"))
})
test_that("sweep returns messages", {
expect_message(
rrt_sweep(repo=rrt_path),
"Checking to make sure repository exists"
)
expect_message(
rrt_sweep(repo=rrt_path),
"Package sources removed"
)
expect_message(
rrt_sweep(repo=rrt_path),
"Checking to make sure rrt directory exists inside your repository"
)
expect_equal(
list.files(rrt_path),
c("code.R", "manifest.yml", "rrt")
)
})
test_that("sweep actually removes installed packages and sources", {
options(verbose=FALSE)
installed <- list.files(rrtPath(rrt_path, "lib"))
installed_post <- installed[!installed %in% "src"]
src_post <- list.files(rrtPath(rrt_path, "src"))
expect_equal(installed_post, character(0))
expect_equal(src_post, character(0))
})
# cleanup
options(opts)
cleanRRTfolder()
|
#data_processing2
memo <- readLines("data/memo.txt",encoding = "UTF-8")
memo
#gsub("[[:punct:]]",replacement = "", x=memo[1])
memo[1] <- gsub("[&|$|!|#|@|%]",replacement = "", x=memo[1])
memo[2] <- gsub("[[:lower:]]","E",x=memo[2])
memo[3] <- gsub("[[:digit:]]","",x=memo[3])
memo[4] <- gsub("[A-z]","",x=memo[4])
memo[5] <- gsub("[A-z|[:punct:]|[:digit:]]","",x=memo[5])
memo[6] <- gsub("\\s","",x=memo[6]) #gsub("[[:space:]]","",x=memo[6])
memo[7] <- paste(gsub("YOU","you",x=substr(memo[7],start = 1,stop = 10)),gsub("OK","ok",x=substring(memo[7],11)))
memo[7] <- gsub(pattern= '([[:upper:]])', perl=T, replacement='\\L\\1', memo[7])
write.table(memo,file = "memo_new3.txt",sep = "\n",fileEncoding = "UTF-8",row.names = F,col.names = F )
#case2
| /실습/data_processing2.R | no_license | yummygyudon/R_Self | R | false | false | 764 | r | #data_processing2
memo <- readLines("data/memo.txt",encoding = "UTF-8")
memo
#gsub("[[:punct:]]",replacement = "", x=memo[1])
memo[1] <- gsub("[&|$|!|#|@|%]",replacement = "", x=memo[1])
memo[2] <- gsub("[[:lower:]]","E",x=memo[2])
memo[3] <- gsub("[[:digit:]]","",x=memo[3])
memo[4] <- gsub("[A-z]","",x=memo[4])
memo[5] <- gsub("[A-z|[:punct:]|[:digit:]]","",x=memo[5])
memo[6] <- gsub("\\s","",x=memo[6]) #gsub("[[:space:]]","",x=memo[6])
memo[7] <- paste(gsub("YOU","you",x=substr(memo[7],start = 1,stop = 10)),gsub("OK","ok",x=substring(memo[7],11)))
memo[7] <- gsub(pattern= '([[:upper:]])', perl=T, replacement='\\L\\1', memo[7])
write.table(memo,file = "memo_new3.txt",sep = "\n",fileEncoding = "UTF-8",row.names = F,col.names = F )
#case2
|
# Load packages
install.packages("NHANES")
library(NHANES)
library(ggplot2)
# What are the variables in the NHANES dataset?
colnames(NHANES)
# Create bar plot for Home Ownership by Gender
ggplot(NHANES, aes(x = Gender, fill = HomeOwn)) +
# Set the position to fill
geom_bar(position = "fill") +
ylab("Relative frequencies")
# Density plot of SleepHrsNight colored by SleepTrouble
ggplot(NHANES, aes(x = SleepHrsNight, color = SleepTrouble)) +
# Adjust by 2
geom_density(adjust = 2) +
# Facet by HealthGen
facet_wrap(~ HealthGen)
# As seen in the video, natural variability can be modeled from shuffling observations around to remove any relationships that might exist in the population. However, before you permute the data, you need to calculate the original observed statistic. In this exercise, you will calculate the difference in proportion of home owners who are men versus women.
install.packages("infer")
library(infer)
homes <- NHANES %>%
# Select Gender and HomeOwn
select(Gender, HomeOwn) %>%
# Filter for HomeOwn equal to "Own" or "Rent"
filter(HomeOwn %in% c("Own", "Rent"))
# Find the observed difference in proportions of men who own and women who own.
diff_orig <- homes %>%
# Group by gender
group_by(Gender) %>%
# Summarize proportion of homeowners
summarize(prop_own = mean(HomeOwn == "Own")) %>%
# Summarize difference in proportion of homeowners
summarize(obs_diff_prop = diff(prop_own)) # male - female
# The infer package will allow you to model a particular null hypothesis and then randomize the data to calculate permuted statistics. In this exercise, after specifying your null hypothesis you will permute the home ownership variable 10 times. By doing so, you will ensure that there is no relationship between home ownership and gender, so any difference in home ownership proportion for female versus male will be due only to natural variability.
# This exercise will demonstrate the first three steps from the infer package: + specify will specify the response and explanatory variables. + hypothesize will declare the null hypothesis. + generate will generate resamples, permutations, or simulations.
# Specify variables
homeown_perm <- homes %>%
specify(HomeOwn ~ Gender, success = "Own")
# Print results to console
homeown_perm
# Hypothesize independence
homeown_perm <- homes %>%
specify(HomeOwn ~ Gender, success = "Own") %>%
hypothesize(null = "independence")
# Print results to console
homeown_perm
# Perform 10 permutations
homeown_perm <- homes %>%
specify(HomeOwn ~ Gender, success = "Own") %>%
hypothesize(null = "independence") %>%
generate(reps = 10, type = "permute")
# Print results to console
homeown_perm
# Perform 100 permutations
homeown_perm <- homes %>%
specify(HomeOwn ~ Gender, success = "Own") %>%
hypothesize(null = "independence") %>%
generate(reps = 100, type = "permute") %>%
calculate("diff in props", order = c("male", "female"))
# Print results to console
homeown_perm
# Dotplot of 100 permuted differences in proportions
ggplot(homeown_perm, aes(x = stat)) +
geom_dotplot(binwidth = 0.001)
# Using 100 repetitions allows you to understand the mechanism of permuting. However, 100 is not enough to observe the full range of likely values for the null differences in proportions.
# Recall the four steps of inference. These are the same four steps that will be used in all inference exercises in this course and future statistical inference courses. Use the names of the functions to help you recall the analysis process. + specify will specify the response and explanatory variables. + hypothesize will declare the null hypothesis. + generate will generate resamples, permutations, or simulations. + calculate will calculate summary statistics.
# Perform 1000 permutations
homeown_perm <- homes %>%
# Specify HomeOwn vs. Gender, with `"Own" as success
specify(HomeOwn ~ Gender, success = "Own") %>%
# Use a null hypothesis of independence
hypothesize(null = "independence") %>%
# Generate 1000 repetitions (by permutation)
generate(reps = 1000, type = "permute") %>%
# Calculate the difference in proportions (male then female)
calculate("diff in props", order = c("male", "female"))
# Density plot of 1000 permuted differences in proportions
ggplot(homeown_perm, aes(x = stat)) +
geom_density()
# You can now see that the distribution is approximately normally distributed around -0.01, but what can we conclude from it?
# Plot permuted differences, diff_perm
ggplot(homeown_perm, aes(x = diff_perm)) +
# Add a density layer
geom_density() +
# Add a vline layer with intercept diff_orig
geom_vline(aes(xintercept = diff_orig), color = "red")
# Compare permuted differences to observed difference
homeown_perm %>%
summarize(sum(diff_perm <= diff_orig))
disc <- read_csv('disc.csv')
disc %>%
# Count the rows by promote and sex
count(promote, sex)
# Find proportion of each sex who were promoted
disc %>%
# Group by sex
group_by(sex) %>%
# Calculate proportion promoted summary stat
summarize(promoted_prop = mean(promote == "promoted"))
# Replicate the entire data frame, permuting the promote variable
disc_perm <- disc %>%
specify(promote ~ sex, success = "promoted") %>%
hypothesize(null = "independence") %>%
generate(reps = 5, type = "permute")
disc_perm %>%
# Group by replicate
group_by(replicate) %>%
# Count per group
count(promote, sex)
disc_perm %>%
# Calculate difference in proportion, male then female
calculate(stat = "diff in props", order = c("male", "female"))
# Recall that we are considering a situation where the number of men and women are fixed (representing the resumes) and the number of people promoted is fixed (the managers were able to promote only 35 individuals).
#
# In this exercise, you'll create a randomization distribution of the null statistic with 1000 replicates as opposed to just 5 in the previous exercise. As a reminder, the statistic of interest is the difference in proportions promoted between genders (i.e. proportion for males minus proportion for females). From the original dataset, you can calculate how the promotion rates differ between males and females. Using the specify-hypothesis-generate-calculate workflow in infer, you can calculate the same statistic, but instead of getting a single number, you get a whole distribution. In this exercise, you'll compare that single number from the original dataset to the distribution made by the simulation.
# Calculate the observed difference in promotion rate
diff_orig <- disc %>%
# Group by sex
group_by(sex) %>%
# Summarize to calculate fraction promoted
summarize(prop_prom = mean(promote == "promoted")) %>%
# Summarize to calculate difference
summarize(stat = diff(prop_prom)) %>%
pull() #pulls out just the value
# See the result
diff_orig
# Create data frame of permuted differences in promotion rates
disc_perm <- disc %>%
# Specify promote vs. sex
specify(promote ~ sex, success = "promoted") %>%
# Set null hypothesis as independence
hypothesize(null = "independence") %>%
# Generate 1000 permutations
generate(reps = 1000, type = "permute") %>%
# Calculate difference in proportions
calculate(stat = "diff in props", order = c("male", "female"))
# Using permutation data, plot stat
ggplot(disc_perm, aes(x = stat)) +
# Add a histogram layer
geom_histogram(binwidth = 0.01) +
# Using original data, add a vertical line at stat
geom_vline(aes(xintercept = diff_orig), color = "red")
disc_perm %>%
summarize(
# Find the 0.9 quantile of diff_perm's stat
q.90 = quantile(stat, p = 0.9),
# ... and the 0.95 quantile
q.95 = quantile(stat, p = 0.95),
# ... and the 0.99 quantile
q.99 = quantile(stat, p = 0.99)
)
# For the discrimination data, the question at hand is whether or not women were promoted less often than men. However, there are often scenarios where the research question centers around a difference without directionality.
#
# For example, you might be interested in whether the rate of promotion for men and women is different. In that case, a difference in proportions of -0.29 is just as "extreme" as a difference of positive 0.29.
#
# If you had seen that women were promoted more often, what would the other side of the distribution of permuted differences look like? That is, what are the smallest (negative) values of the distribution of permuted differences?
# Use disc_perm
disc_perm %>%
# ... to calculate summary stats
summarize(
# Find the 0.01 quantile of stat
q.01 = quantile(stat, p = 0.01),
# ... and 0.05
q.05 = quantile(stat, p = 0.05),
# ... and 0.1
q.10 = quantile(stat, p = 0.1)
)
disc_small <- readRDS('disc_small.rds')
disc_big <- readRDS('disc_big.rds')
# Tabulate the small dataset
disc_small %>%
# Select sex and promote
select(sex, promote) %>%
count(sex, promote)
# Do the same for disc_big
disc_big %>%
# Select sex and promote
select(sex, promote) %>%
count(sex, promote)
diff_orig_small <- 0.25
disc_perm_small <- disc_small %>%
# Specify promote vs. sex
specify(promote ~ sex, success = "promoted") %>%
# Set null hypothesis as independence
hypothesize(null = "independence") %>%
# Generate 1000 permutations
generate(reps = 1000, type = "permute") %>%
# Calculate difference in proportions
calculate(stat = "diff in props", order = c("male", "female"))
# Using disc_perm_small, plot stat
ggplot(disc_perm_small, aes(x = stat)) +
# Add a histogram layer with binwidth 0.01
geom_histogram(binwidth = 0.01) +
# Add a vline layer, crossing x-axis at diff_orig_small
geom_vline(aes(xintercept = diff_orig_small), color = "red")
diff_orig_big <- 0.2916667
disc_perm_big <- disc_big %>%
# Specify promote vs. sex
specify(promote ~ sex, success = "promoted") %>%
# Set null hypothesis as independence
hypothesize(null = "independence") %>%
# Generate 1000 permutations
generate(reps = 1000, type = "permute") %>%
# Calculate difference in proportions
calculate(stat = "diff in props", order = c("male", "female"))
# Swap the dataset to disc_perm_big
ggplot(disc_perm_big, aes(x = stat)) +
geom_histogram(binwidth = 0.01) +
# Change the x-axis intercept to diff_orig_big
geom_vline(aes(xintercept = diff_orig_big), color = "red")
calc_upper_quantiles <- function(dataset) {
dataset %>%
summarize(
q.90 = quantile(stat, p = 0.90),
q.95 = quantile(stat, p = 0.95),
q.99 = quantile(stat, p = 0.99)
)
}
# Recall the quantiles associated with the original dataset
calc_upper_quantiles(disc_perm)
# Calculate the quantiles associated with the small dataset
calc_upper_quantiles(disc_perm_small)
# Calculate the quantiles associated with the big dataset
calc_upper_quantiles(disc_perm_big)
# In the video, you learned that a p-value measures the degree of disagreement between the data and the null hypothesis. Here, you will calculate the p-value for the original discrimination dataset as well as the small and big versions, disc_small and disc_big.
#
# Recall that you're only interested in the one-sided hypothesis test here. That is, you're trying to answer the question, "Are men more likely to be promoted than women?"
# Visualize and calculate the p-value for the original dataset
disc_perm %>%
visualize(obs_stat = diff_orig, direction = "greater")
disc_perm %>%
summarize(p_value = mean(diff_orig <= stat))
# Visualize and calculate the p-value for the small dataset
disc_perm_small %>%
visualize(obs_stat = diff_orig_small, direction = "greater")
disc_perm_small %>%
summarize(p_value = mean(diff_orig_small <= stat))
# Visualize and calculate the p-value for the original dataset
disc_perm_big %>%
visualize(obs_stat = diff_orig_big, direction = "greater")
disc_perm_big %>%
summarize(p_value = mean(diff_orig_big <= stat))
# can play around with "two-sided", "less" for direction
disc_new <- readRDS('disc_new.rds')
# Recall the original data
disc %>%
count(sex, promote)
# Tabulate the new data
disc_new %>%
count(sex, promote)
diff_orig_new <- 0.04166667
# Recall the distribution of the original permuted differences
ggplot(disc_perm, aes(x = stat)) +
geom_histogram() +
geom_vline(aes(xintercept = diff_orig), color = "red")
# Plot the distribution of the new permuted differences
ggplot(disc_perm_new, aes(x = stat)) +
geom_histogram() +
geom_vline(aes(xintercept = diff_orig_new), color = "red")
# Recall the p-value from the original data
disc_perm %>%
summarize(p_value = mean(diff_orig <= stat))
# Find the p-value from the new data
disc_perm_new %>%
summarize(p_value = mean(diff_orig_new <= stat))
# What if the original research hypothesis had focused on any difference in promotion rates between men and women instead of focusing on whether men are more likely to be promoted than women? In this case, a difference like the one observed would occur twice as often (by chance) because sometimes the difference would be positive and sometimes it would be negative.
# When there is no directionality to the alternative hypothesis, the hypothesis and p-value are considered to be two-sided. In a two-sided setting, the p-value is double the one-sided p-value.
# In this exercise, you'll calculate a two-sided p-value given the original randomization distribution and dataset.
# Calculate the two-sided p-value
disc_perm %>%
summarize(p_value = 2*mean(diff_orig <= stat)) | /archive_2019/r-programming/data-science-track-r/FoundationsOfInference.R | no_license | acjones27/data-science-projects | R | false | false | 13,526 | r | # Load packages
install.packages("NHANES")
library(NHANES)
library(ggplot2)
# What are the variables in the NHANES dataset?
colnames(NHANES)
# Create bar plot for Home Ownership by Gender
ggplot(NHANES, aes(x = Gender, fill = HomeOwn)) +
# Set the position to fill
geom_bar(position = "fill") +
ylab("Relative frequencies")
# Density plot of SleepHrsNight colored by SleepTrouble
ggplot(NHANES, aes(x = SleepHrsNight, color = SleepTrouble)) +
# Adjust by 2
geom_density(adjust = 2) +
# Facet by HealthGen
facet_wrap(~ HealthGen)
# As seen in the video, natural variability can be modeled from shuffling observations around to remove any relationships that might exist in the population. However, before you permute the data, you need to calculate the original observed statistic. In this exercise, you will calculate the difference in proportion of home owners who are men versus women.
install.packages("infer")
library(infer)
homes <- NHANES %>%
# Select Gender and HomeOwn
select(Gender, HomeOwn) %>%
# Filter for HomeOwn equal to "Own" or "Rent"
filter(HomeOwn %in% c("Own", "Rent"))
# Find the observed difference in proportions of men who own and women who own.
diff_orig <- homes %>%
# Group by gender
group_by(Gender) %>%
# Summarize proportion of homeowners
summarize(prop_own = mean(HomeOwn == "Own")) %>%
# Summarize difference in proportion of homeowners
summarize(obs_diff_prop = diff(prop_own)) # male - female
# The infer package will allow you to model a particular null hypothesis and then randomize the data to calculate permuted statistics. In this exercise, after specifying your null hypothesis you will permute the home ownership variable 10 times. By doing so, you will ensure that there is no relationship between home ownership and gender, so any difference in home ownership proportion for female versus male will be due only to natural variability.
# This exercise will demonstrate the first three steps from the infer package: + specify will specify the response and explanatory variables. + hypothesize will declare the null hypothesis. + generate will generate resamples, permutations, or simulations.
# Specify variables
homeown_perm <- homes %>%
specify(HomeOwn ~ Gender, success = "Own")
# Print results to console
homeown_perm
# Hypothesize independence
homeown_perm <- homes %>%
specify(HomeOwn ~ Gender, success = "Own") %>%
hypothesize(null = "independence")
# Print results to console
homeown_perm
# Perform 10 permutations
homeown_perm <- homes %>%
specify(HomeOwn ~ Gender, success = "Own") %>%
hypothesize(null = "independence") %>%
generate(reps = 10, type = "permute")
# Print results to console
homeown_perm
# Perform 100 permutations
homeown_perm <- homes %>%
specify(HomeOwn ~ Gender, success = "Own") %>%
hypothesize(null = "independence") %>%
generate(reps = 100, type = "permute") %>%
calculate("diff in props", order = c("male", "female"))
# Print results to console
homeown_perm
# Dotplot of 100 permuted differences in proportions
ggplot(homeown_perm, aes(x = stat)) +
geom_dotplot(binwidth = 0.001)
# Using 100 repetitions allows you to understand the mechanism of permuting. However, 100 is not enough to observe the full range of likely values for the null differences in proportions.
# Recall the four steps of inference. These are the same four steps that will be used in all inference exercises in this course and future statistical inference courses. Use the names of the functions to help you recall the analysis process. + specify will specify the response and explanatory variables. + hypothesize will declare the null hypothesis. + generate will generate resamples, permutations, or simulations. + calculate will calculate summary statistics.
# Perform 1000 permutations
homeown_perm <- homes %>%
# Specify HomeOwn vs. Gender, with `"Own" as success
specify(HomeOwn ~ Gender, success = "Own") %>%
# Use a null hypothesis of independence
hypothesize(null = "independence") %>%
# Generate 1000 repetitions (by permutation)
generate(reps = 1000, type = "permute") %>%
# Calculate the difference in proportions (male then female)
calculate("diff in props", order = c("male", "female"))
# Density plot of 1000 permuted differences in proportions
ggplot(homeown_perm, aes(x = stat)) +
geom_density()
# You can now see that the distribution is approximately normally distributed around -0.01, but what can we conclude from it?
# Plot permuted differences, diff_perm
ggplot(homeown_perm, aes(x = diff_perm)) +
# Add a density layer
geom_density() +
# Add a vline layer with intercept diff_orig
geom_vline(aes(xintercept = diff_orig), color = "red")
# Compare permuted differences to observed difference
homeown_perm %>%
summarize(sum(diff_perm <= diff_orig))
disc <- read_csv('disc.csv')
disc %>%
# Count the rows by promote and sex
count(promote, sex)
# Find proportion of each sex who were promoted
disc %>%
# Group by sex
group_by(sex) %>%
# Calculate proportion promoted summary stat
summarize(promoted_prop = mean(promote == "promoted"))
# Replicate the entire data frame, permuting the promote variable
disc_perm <- disc %>%
specify(promote ~ sex, success = "promoted") %>%
hypothesize(null = "independence") %>%
generate(reps = 5, type = "permute")
disc_perm %>%
# Group by replicate
group_by(replicate) %>%
# Count per group
count(promote, sex)
disc_perm %>%
# Calculate difference in proportion, male then female
calculate(stat = "diff in props", order = c("male", "female"))
# Recall that we are considering a situation where the number of men and women are fixed (representing the resumes) and the number of people promoted is fixed (the managers were able to promote only 35 individuals).
#
# In this exercise, you'll create a randomization distribution of the null statistic with 1000 replicates as opposed to just 5 in the previous exercise. As a reminder, the statistic of interest is the difference in proportions promoted between genders (i.e. proportion for males minus proportion for females). From the original dataset, you can calculate how the promotion rates differ between males and females. Using the specify-hypothesis-generate-calculate workflow in infer, you can calculate the same statistic, but instead of getting a single number, you get a whole distribution. In this exercise, you'll compare that single number from the original dataset to the distribution made by the simulation.
# Calculate the observed difference in promotion rate
diff_orig <- disc %>%
# Group by sex
group_by(sex) %>%
# Summarize to calculate fraction promoted
summarize(prop_prom = mean(promote == "promoted")) %>%
# Summarize to calculate difference
summarize(stat = diff(prop_prom)) %>%
pull() #pulls out just the value
# See the result
diff_orig
# Create data frame of permuted differences in promotion rates
disc_perm <- disc %>%
# Specify promote vs. sex
specify(promote ~ sex, success = "promoted") %>%
# Set null hypothesis as independence
hypothesize(null = "independence") %>%
# Generate 1000 permutations
generate(reps = 1000, type = "permute") %>%
# Calculate difference in proportions
calculate(stat = "diff in props", order = c("male", "female"))
# Using permutation data, plot stat
ggplot(disc_perm, aes(x = stat)) +
# Add a histogram layer
geom_histogram(binwidth = 0.01) +
# Using original data, add a vertical line at stat
geom_vline(aes(xintercept = diff_orig), color = "red")
disc_perm %>%
summarize(
# Find the 0.9 quantile of diff_perm's stat
q.90 = quantile(stat, p = 0.9),
# ... and the 0.95 quantile
q.95 = quantile(stat, p = 0.95),
# ... and the 0.99 quantile
q.99 = quantile(stat, p = 0.99)
)
# For the discrimination data, the question at hand is whether or not women were promoted less often than men. However, there are often scenarios where the research question centers around a difference without directionality.
#
# For example, you might be interested in whether the rate of promotion for men and women is different. In that case, a difference in proportions of -0.29 is just as "extreme" as a difference of positive 0.29.
#
# If you had seen that women were promoted more often, what would the other side of the distribution of permuted differences look like? That is, what are the smallest (negative) values of the distribution of permuted differences?
# Use disc_perm
disc_perm %>%
# ... to calculate summary stats
summarize(
# Find the 0.01 quantile of stat
q.01 = quantile(stat, p = 0.01),
# ... and 0.05
q.05 = quantile(stat, p = 0.05),
# ... and 0.1
q.10 = quantile(stat, p = 0.1)
)
disc_small <- readRDS('disc_small.rds')
disc_big <- readRDS('disc_big.rds')
# Tabulate the small dataset
disc_small %>%
# Select sex and promote
select(sex, promote) %>%
count(sex, promote)
# Do the same for disc_big
disc_big %>%
# Select sex and promote
select(sex, promote) %>%
count(sex, promote)
diff_orig_small <- 0.25
disc_perm_small <- disc_small %>%
# Specify promote vs. sex
specify(promote ~ sex, success = "promoted") %>%
# Set null hypothesis as independence
hypothesize(null = "independence") %>%
# Generate 1000 permutations
generate(reps = 1000, type = "permute") %>%
# Calculate difference in proportions
calculate(stat = "diff in props", order = c("male", "female"))
# Using disc_perm_small, plot stat
ggplot(disc_perm_small, aes(x = stat)) +
# Add a histogram layer with binwidth 0.01
geom_histogram(binwidth = 0.01) +
# Add a vline layer, crossing x-axis at diff_orig_small
geom_vline(aes(xintercept = diff_orig_small), color = "red")
diff_orig_big <- 0.2916667
disc_perm_big <- disc_big %>%
# Specify promote vs. sex
specify(promote ~ sex, success = "promoted") %>%
# Set null hypothesis as independence
hypothesize(null = "independence") %>%
# Generate 1000 permutations
generate(reps = 1000, type = "permute") %>%
# Calculate difference in proportions
calculate(stat = "diff in props", order = c("male", "female"))
# Swap the dataset to disc_perm_big
ggplot(disc_perm_big, aes(x = stat)) +
geom_histogram(binwidth = 0.01) +
# Change the x-axis intercept to diff_orig_big
geom_vline(aes(xintercept = diff_orig_big), color = "red")
calc_upper_quantiles <- function(dataset) {
dataset %>%
summarize(
q.90 = quantile(stat, p = 0.90),
q.95 = quantile(stat, p = 0.95),
q.99 = quantile(stat, p = 0.99)
)
}
# Recall the quantiles associated with the original dataset
calc_upper_quantiles(disc_perm)
# Calculate the quantiles associated with the small dataset
calc_upper_quantiles(disc_perm_small)
# Calculate the quantiles associated with the big dataset
calc_upper_quantiles(disc_perm_big)
# In the video, you learned that a p-value measures the degree of disagreement between the data and the null hypothesis. Here, you will calculate the p-value for the original discrimination dataset as well as the small and big versions, disc_small and disc_big.
#
# Recall that you're only interested in the one-sided hypothesis test here. That is, you're trying to answer the question, "Are men more likely to be promoted than women?"
# Visualize and calculate the p-value for the original dataset
disc_perm %>%
visualize(obs_stat = diff_orig, direction = "greater")
disc_perm %>%
summarize(p_value = mean(diff_orig <= stat))
# Visualize and calculate the p-value for the small dataset
disc_perm_small %>%
visualize(obs_stat = diff_orig_small, direction = "greater")
disc_perm_small %>%
summarize(p_value = mean(diff_orig_small <= stat))
# Visualize and calculate the p-value for the original dataset
disc_perm_big %>%
visualize(obs_stat = diff_orig_big, direction = "greater")
disc_perm_big %>%
summarize(p_value = mean(diff_orig_big <= stat))
# can play around with "two-sided", "less" for direction
disc_new <- readRDS('disc_new.rds')
# Recall the original data
disc %>%
count(sex, promote)
# Tabulate the new data
disc_new %>%
count(sex, promote)
diff_orig_new <- 0.04166667
# Recall the distribution of the original permuted differences
ggplot(disc_perm, aes(x = stat)) +
geom_histogram() +
geom_vline(aes(xintercept = diff_orig), color = "red")
# Plot the distribution of the new permuted differences
ggplot(disc_perm_new, aes(x = stat)) +
geom_histogram() +
geom_vline(aes(xintercept = diff_orig_new), color = "red")
# Recall the p-value from the original data
disc_perm %>%
summarize(p_value = mean(diff_orig <= stat))
# Find the p-value from the new data
disc_perm_new %>%
summarize(p_value = mean(diff_orig_new <= stat))
# What if the original research hypothesis had focused on any difference in promotion rates between men and women instead of focusing on whether men are more likely to be promoted than women? In this case, a difference like the one observed would occur twice as often (by chance) because sometimes the difference would be positive and sometimes it would be negative.
# When there is no directionality to the alternative hypothesis, the hypothesis and p-value are considered to be two-sided. In a two-sided setting, the p-value is double the one-sided p-value.
# In this exercise, you'll calculate a two-sided p-value given the original randomization distribution and dataset.
# Calculate the two-sided p-value
disc_perm %>%
summarize(p_value = 2*mean(diff_orig <= stat)) |
#there are over 500 families in baseline payments data and 1114 female householders in family composition data
#compare by merging a la Stata
bp <- read_excel("W:/WU/Projekte/mincome/Mincome/Data/base_pay.data_revised_Dec 11, 2019.xlsx")
familydata <- read_excel("W:/WU/Projekte/mincome/Mincome/Data/familydata.xlsx")
stata.merge <- function(x,y, by = intersect(names(x), names(y))){
x[is.na(x)] <- Inf
y[is.na(y)] <- Inf
matched <- merge(x, y, by.x = by, by.y = by, all = TRUE)
matched <- matched[complete.cases(matched),]
matched$merge <- "matched"
master <- merge(x, y, by.x = by, by.y = by, all.x = TRUE)
master <- master[!complete.cases(master),]
master$merge <- "master"
using <- merge(x, y, by.x = by, by.y = by, all.y = TRUE)
using <- using[!complete.cases(using),]
using$merge <- "using"
df <- rbind(matched, master,using)
df[sapply(df, is.infinite)] <- NA
df
}
bp$FAMNUM <- bp$`Fam Num`
bp$FAMNUM <- as.factor(bp$FAMNUM)
familydata$FAMNUM<- familydata$FAMNUM...1
familydata$FAMNUM <- as.factor(familydata$FAMNUM)
bp <- bp[which(bp$`WPG Site = 1` == 1), ]
CHECK <- stata.merge(familydata,bp, by = "FAMNUM")
CHECK <- CHECK[which(CHECK$merge != "matched"), ]
length(which(CHECK$merge == "using"))
length(which(CHECK$merge == "master"))
length(which(CHECK$merge == "matched"))
CHECK$FAMNUM[which(CHECK$merge == "using")]
CHECK$FAMNUM[which(CHECK$merge == "master")]
| /Mincome/Codes/Data Cleaning/check_overlaps.R | no_license | DrSnowtree/MINCOME | R | false | false | 1,466 | r |
#there are over 500 families in baseline payments data and 1114 female householders in family composition data
#compare by merging a la Stata
bp <- read_excel("W:/WU/Projekte/mincome/Mincome/Data/base_pay.data_revised_Dec 11, 2019.xlsx")
familydata <- read_excel("W:/WU/Projekte/mincome/Mincome/Data/familydata.xlsx")
stata.merge <- function(x,y, by = intersect(names(x), names(y))){
x[is.na(x)] <- Inf
y[is.na(y)] <- Inf
matched <- merge(x, y, by.x = by, by.y = by, all = TRUE)
matched <- matched[complete.cases(matched),]
matched$merge <- "matched"
master <- merge(x, y, by.x = by, by.y = by, all.x = TRUE)
master <- master[!complete.cases(master),]
master$merge <- "master"
using <- merge(x, y, by.x = by, by.y = by, all.y = TRUE)
using <- using[!complete.cases(using),]
using$merge <- "using"
df <- rbind(matched, master,using)
df[sapply(df, is.infinite)] <- NA
df
}
bp$FAMNUM <- bp$`Fam Num`
bp$FAMNUM <- as.factor(bp$FAMNUM)
familydata$FAMNUM<- familydata$FAMNUM...1
familydata$FAMNUM <- as.factor(familydata$FAMNUM)
bp <- bp[which(bp$`WPG Site = 1` == 1), ]
CHECK <- stata.merge(familydata,bp, by = "FAMNUM")
CHECK <- CHECK[which(CHECK$merge != "matched"), ]
length(which(CHECK$merge == "using"))
length(which(CHECK$merge == "master"))
length(which(CHECK$merge == "matched"))
CHECK$FAMNUM[which(CHECK$merge == "using")]
CHECK$FAMNUM[which(CHECK$merge == "master")]
|
#' ---
#' output: github_document
#' ---
## remember to restart R here!
## make a barchart from the frequency table in data/add-on-packages-freqtable.csv
library(readr)
library(here)
df <- readr::read_csv(here::here('data/freq_table.csv'))
## read that csv into a data frame
## hint: readr::read_csv() or read.csv()
## idea: try using here::here() to create the file path
bp <- barplot(df$Freq, names.arg = df$Var1, horiz = TRUE)
# write_file(bp, path = here::here('figs/barplotMM.png'))
# png(filename='figs/barplotMM.png')
# plot(bp)
# dev.off()
library(ggplot2)
## if you use ggplot2, code like this will work:
barplot2 <- ggplot(df, aes(x = Var1, y = Freq)) +
geom_bar(stat = "identity")
ggsave(barplot2, path = here::here('figs/barplotMM.png'))
ggsave(barplot2,path = 'figs/barplotMM.png')
## write this barchart to figs/built-barchart.png
## if you use ggplot2, ggsave() will help
## idea: try using here::here() to create the file path
## YES overwrite the file that is there now
## that came from me (Jenny)
| /R/03_barchart-packages-built.R | no_license | michaellynnmorris/explore-libraries | R | false | false | 1,035 | r | #' ---
#' output: github_document
#' ---
## remember to restart R here!
## make a barchart from the frequency table in data/add-on-packages-freqtable.csv
library(readr)
library(here)
df <- readr::read_csv(here::here('data/freq_table.csv'))
## read that csv into a data frame
## hint: readr::read_csv() or read.csv()
## idea: try using here::here() to create the file path
bp <- barplot(df$Freq, names.arg = df$Var1, horiz = TRUE)
# write_file(bp, path = here::here('figs/barplotMM.png'))
# png(filename='figs/barplotMM.png')
# plot(bp)
# dev.off()
library(ggplot2)
## if you use ggplot2, code like this will work:
barplot2 <- ggplot(df, aes(x = Var1, y = Freq)) +
geom_bar(stat = "identity")
ggsave(barplot2, path = here::here('figs/barplotMM.png'))
ggsave(barplot2,path = 'figs/barplotMM.png')
## write this barchart to figs/built-barchart.png
## if you use ggplot2, ggsave() will help
## idea: try using here::here() to create the file path
## YES overwrite the file that is there now
## that came from me (Jenny)
|
\name{returns}
\title{Calculations of Financial Returns}
\alias{returns}
\alias{returns.default}
\alias{returns.timeSeries}
% \alias{returns.zoo}
\alias{returnSeries}
\alias{getReturns}
\description{
Functions to calculate financial returns.
}
\usage{
returns(x, \dots)
\method{returns}{default}(x, method = c("continuous", "discrete", "compound", "simple"),
percentage = FALSE, \dots)
\method{returns}{timeSeries}(x, method = c("continuous", "discrete", "compound", "simple"),
percentage = FALSE, na.rm = TRUE, trim = TRUE, \dots)
% \method{returns}{zoo}(x, method = c("continuous", "discrete", "compound", "simple"),
% percentage = FALSE, na.rm = TRUE, trim = TRUE, \dots)
getReturns(\dots)
returnSeries(\dots)
}
\arguments{
\item{percentage}{
a logical value. By default \code{FALSE}, if \code{TRUE} the
series will be expressed in percentage changes.
}
\item{method}{
...
}
\item{na.rm}{
...
}
\item{trim}{
...
}
\item{x}{
an object of class \code{timeSeries}.
}
\item{\dots}{
arguments to be passed.
}
}
\value{
all functions return an object of class \code{timeSeries}.
}
\note{
The functions \code{returnSeries}, \code{getReturns},
are synonymes for \code{returns.timeSeries}.
}
\author{
Diethelm Wuertz for the Rmetrics \R-port.
}
\examples{
## data -
# Microsoft Data:
myFinCenter <<- "GMT"
MSFT = as.timeSeries(data(msft.dat))[1:10, 1:4]
head(MSFT)
## returnSeries -
# Continuous Returns:
returns(MSFT)
# Discrete Returns:
returns(MSFT, type = "discrete")
# Don't trim:
returns(MSFT, trim = FALSE)
# Use Percentage Values:
returns(MSFT, percentage = TRUE, trim = FALSE)
}
\keyword{chron}
| /man/returns.Rd | no_license | cran/fSeries | R | false | false | 1,890 | rd | \name{returns}
\title{Calculations of Financial Returns}
\alias{returns}
\alias{returns.default}
\alias{returns.timeSeries}
% \alias{returns.zoo}
\alias{returnSeries}
\alias{getReturns}
\description{
Functions to calculate financial returns.
}
\usage{
returns(x, \dots)
\method{returns}{default}(x, method = c("continuous", "discrete", "compound", "simple"),
percentage = FALSE, \dots)
\method{returns}{timeSeries}(x, method = c("continuous", "discrete", "compound", "simple"),
percentage = FALSE, na.rm = TRUE, trim = TRUE, \dots)
% \method{returns}{zoo}(x, method = c("continuous", "discrete", "compound", "simple"),
% percentage = FALSE, na.rm = TRUE, trim = TRUE, \dots)
getReturns(\dots)
returnSeries(\dots)
}
\arguments{
\item{percentage}{
a logical value. By default \code{FALSE}, if \code{TRUE} the
series will be expressed in percentage changes.
}
\item{method}{
...
}
\item{na.rm}{
...
}
\item{trim}{
...
}
\item{x}{
an object of class \code{timeSeries}.
}
\item{\dots}{
arguments to be passed.
}
}
\value{
all functions return an object of class \code{timeSeries}.
}
\note{
The functions \code{returnSeries}, \code{getReturns},
are synonymes for \code{returns.timeSeries}.
}
\author{
Diethelm Wuertz for the Rmetrics \R-port.
}
\examples{
## data -
# Microsoft Data:
myFinCenter <<- "GMT"
MSFT = as.timeSeries(data(msft.dat))[1:10, 1:4]
head(MSFT)
## returnSeries -
# Continuous Returns:
returns(MSFT)
# Discrete Returns:
returns(MSFT, type = "discrete")
# Don't trim:
returns(MSFT, trim = FALSE)
# Use Percentage Values:
returns(MSFT, percentage = TRUE, trim = FALSE)
}
\keyword{chron}
|
# ----------------------------------------------
# convert PalEON Phase 1 MIP annual CO2 concentrations to monthly seasonal files based on MsTMIP
# Original Script: Jaclyn Hatala Matthes
# 17 March, 2014
# Updated Script: Christine Rollinson, crollinson@gmail.com
# 30 September, 2015
#
# --------------
# CO2 Correction Proceedure
# --------------
# 1) Download and crop MsTMIP CO2 driver
# 2) Calculate annual MsTMIP annual mean,
# create monthly adjustment value
# 3) use 1700 seasonal variability for 0850-1700
# use calculated variability for 1700-2010
# --------------
#
# ----------------------------------------------
# ----------------------------------------------
# Load libaries, Set up Directories, etc
# ----------------------------------------------
library(ncdf4); library(raster); library(rgdal)
paleon.mask <- "~/Desktop/Research/PalEON_CR/env_regional/env_paleon/domain_mask/paleon_domain.nc"
co2.bjorn <- "~/Desktop/Research/PalEON_CR/env_regional/env_drivers_raw/co2/bjorn/paleon_co2_mix.nc"
co2.mstmip <- "~/Desktop/Research/PalEON_CR/env_regional/env_drivers_raw/co2/NACP_MSTMIP_MODEL_DRIVER/data/mstmip_driver_global_hd_co2_v1.nc4"
outpath <- "~/Desktop/Research/PalEON_CR/env_regional/env_paleon/co2/"
# Create the driver folder if it doesn't already exist
if(!dir.exists(soil.out)) dir.create(soil.out)
# dummy fill values
fillv <- 1e30
# Load the paleon mask
paleon <- raster(paleon.mask)
paleon
# ----------------------------------------------
# ----------------------------------------------
pl.nc <- nc_open(co2.bjorn)
pl.time <- ncvar_get(pl.nc,"time")
pl.co2 <- ncvar_get(pl.nc,"CO2air")
nc_close(pl.nc)
ms.nc <- nc_open(co2.mstmip)
ms.time <- ncvar_get(ms.nc,"time") #monthly, starting in 01-1700
ms.lon <- ncvar_get(ms.nc,"lon")
ms.lat <- ncvar_get(ms.nc,"lat")
ms.co2 <- ncvar_get(ms.nc,"CO2")
ms.yr <- 1700:2010
nc_close(ms.nc)
length(ms.yr)*12; length(ms.time)
#loop over mstmip time, crop to PalEON domain and get monthly variability
dom.lon <- which(ms.lon > xmin(paleon) & ms.lon < xmax(paleon))
dom.lat <- which(ms.lat > ymin(paleon) & ms.lat < ymax(paleon))
co2.mon.var <- vector()
for(t in 1:(length(ms.time)/12)){
t.ind <- ((t-1)*12+1):(t*12)
co2.avg <- apply(ms.co2[dom.lon,dom.lat,t.ind],c(1,2),mean) #annual mean
for(m in 1:12){
co2.var <- co2.avg - ms.co2[dom.lon,dom.lat,(min(t.ind)+m-1)]
co2.mon.var[min(t.ind)+m-1] <- mean(co2.var)
}
}
plot(seq(1700,2011-1/12,by=1/12),co2.mon.var,main="MsTMIP CO2 monthly variability",xlab="time",ylab="CO2 variability [ppm]", type="l")
#add mstmip seasonal cycle to PalEON CO2 record
# Note: here is where we get rid of the extra first 850 years of bjorn's record
co2.mon.new <- vector()
for(y in 850:2010){
for(m in 1:12){
if(y<1700){
co2.mon.new[(y-850)*12+m] <- pl.co2[y] + co2.mon.var[m]
} else {
co2.mon.new[(y-850)*12+m] <- pl.co2[y] + co2.mon.var[(y-1700)*12+m]
}
}
}
# plot(seq(850,2011-1/12,by=1/12),co2.mon.new,main="PalEON CO2 monthly variability",xlab="time",ylab="CO2 [ppm]", type="l")
# plot(co2.mon.new[1:(100*12)], type="l")
# plot(co2.mon.new[(length(co2.mon.new)-100*12):length(co2.mon.new)], type="l")
# length(co2.mon.new)
#format monthly netCDF file for output
# Specify time units for this year and month
nc_time_units <- paste('months since 0850-01-01 00:00:00', sep='')
nc.time <- (seq(850,2011-1/12,by=1/12)-850)*12
time <- ncdim_def("time",nc_time_units,nc.time,unlim=TRUE)
data <- co2.mon.new
nc_variable_long_name <- 'Average monthly atmospheric CO2 concentration [ppm]'
nc_variable_units='ppm'
fillv <- 1e+30
nc_var <- ncvar_def('co2',nc_variable_units,
list(time), fillv, longname=nc_variable_long_name,prec="double")
ofname <- paste(outpath,"paleon_monthly_co2.nc",sep="")
newfile <- nc_create( ofname, nc_var ) # Initialize file
ncatt_put( newfile, nc_var, time, 'monthly')
ncatt_put( newfile, 0, 'description',"PalEON annual CO2 with MsTMIP seasonal CO2 variability imposed")
ncvar_put(newfile, nc_var, data) # Write netCDF file
nc_close(newfile)
#format netCDF file for output
# Specify time units for this year and month
nc_time_units <- paste('years since 0850-01-01 00:00:00', sep='')
nc.time <- 850:2010-850
time <- ncdim_def("time",nc_time_units,nc.time,unlim=TRUE)
data <- pl.co2[850:2010]
nc_variable_long_name <- 'Average annual atmospheric CO2 concentration [ppm]'
nc_variable_units='ppm'
fillv <- 1e+30
nc_var <- ncvar_def('co2',nc_variable_units,
list(time), fillv, longname=nc_variable_long_name,prec="double")
ofname <- paste(outpath,"paleon_annual_co2.nc",sep="")
newfile <- nc_create( ofname, nc_var ) # Initialize file
ncatt_put( newfile, nc_var, time, 'yearly')
ncatt_put( newfile, 0, 'description',"PalEON annual CO2 concentrations")
ncvar_put(newfile, nc_var, data) # Write netCDF file
nc_close(newfile)
| /2_seasonal_co2.R | no_license | PalEON-Project/env_regional | R | false | false | 4,955 | r | # ----------------------------------------------
# convert PalEON Phase 1 MIP annual CO2 concentrations to monthly seasonal files based on MsTMIP
# Original Script: Jaclyn Hatala Matthes
# 17 March, 2014
# Updated Script: Christine Rollinson, crollinson@gmail.com
# 30 September, 2015
#
# --------------
# CO2 Correction Proceedure
# --------------
# 1) Download and crop MsTMIP CO2 driver
# 2) Calculate annual MsTMIP annual mean,
# create monthly adjustment value
# 3) use 1700 seasonal variability for 0850-1700
# use calculated variability for 1700-2010
# --------------
#
# ----------------------------------------------
# ----------------------------------------------
# Load libaries, Set up Directories, etc
# ----------------------------------------------
library(ncdf4); library(raster); library(rgdal)
paleon.mask <- "~/Desktop/Research/PalEON_CR/env_regional/env_paleon/domain_mask/paleon_domain.nc"
co2.bjorn <- "~/Desktop/Research/PalEON_CR/env_regional/env_drivers_raw/co2/bjorn/paleon_co2_mix.nc"
co2.mstmip <- "~/Desktop/Research/PalEON_CR/env_regional/env_drivers_raw/co2/NACP_MSTMIP_MODEL_DRIVER/data/mstmip_driver_global_hd_co2_v1.nc4"
outpath <- "~/Desktop/Research/PalEON_CR/env_regional/env_paleon/co2/"
# Create the driver folder if it doesn't already exist
if(!dir.exists(soil.out)) dir.create(soil.out)
# dummy fill values
fillv <- 1e30
# Load the paleon mask
paleon <- raster(paleon.mask)
paleon
# ----------------------------------------------
# ----------------------------------------------
pl.nc <- nc_open(co2.bjorn)
pl.time <- ncvar_get(pl.nc,"time")
pl.co2 <- ncvar_get(pl.nc,"CO2air")
nc_close(pl.nc)
ms.nc <- nc_open(co2.mstmip)
ms.time <- ncvar_get(ms.nc,"time") #monthly, starting in 01-1700
ms.lon <- ncvar_get(ms.nc,"lon")
ms.lat <- ncvar_get(ms.nc,"lat")
ms.co2 <- ncvar_get(ms.nc,"CO2")
ms.yr <- 1700:2010
nc_close(ms.nc)
length(ms.yr)*12; length(ms.time)
#loop over mstmip time, crop to PalEON domain and get monthly variability
dom.lon <- which(ms.lon > xmin(paleon) & ms.lon < xmax(paleon))
dom.lat <- which(ms.lat > ymin(paleon) & ms.lat < ymax(paleon))
co2.mon.var <- vector()
for(t in 1:(length(ms.time)/12)){
t.ind <- ((t-1)*12+1):(t*12)
co2.avg <- apply(ms.co2[dom.lon,dom.lat,t.ind],c(1,2),mean) #annual mean
for(m in 1:12){
co2.var <- co2.avg - ms.co2[dom.lon,dom.lat,(min(t.ind)+m-1)]
co2.mon.var[min(t.ind)+m-1] <- mean(co2.var)
}
}
plot(seq(1700,2011-1/12,by=1/12),co2.mon.var,main="MsTMIP CO2 monthly variability",xlab="time",ylab="CO2 variability [ppm]", type="l")
#add mstmip seasonal cycle to PalEON CO2 record
# Note: here is where we get rid of the extra first 850 years of bjorn's record
co2.mon.new <- vector()
for(y in 850:2010){
for(m in 1:12){
if(y<1700){
co2.mon.new[(y-850)*12+m] <- pl.co2[y] + co2.mon.var[m]
} else {
co2.mon.new[(y-850)*12+m] <- pl.co2[y] + co2.mon.var[(y-1700)*12+m]
}
}
}
# plot(seq(850,2011-1/12,by=1/12),co2.mon.new,main="PalEON CO2 monthly variability",xlab="time",ylab="CO2 [ppm]", type="l")
# plot(co2.mon.new[1:(100*12)], type="l")
# plot(co2.mon.new[(length(co2.mon.new)-100*12):length(co2.mon.new)], type="l")
# length(co2.mon.new)
#format monthly netCDF file for output
# Specify time units for this year and month
nc_time_units <- paste('months since 0850-01-01 00:00:00', sep='')
nc.time <- (seq(850,2011-1/12,by=1/12)-850)*12
time <- ncdim_def("time",nc_time_units,nc.time,unlim=TRUE)
data <- co2.mon.new
nc_variable_long_name <- 'Average monthly atmospheric CO2 concentration [ppm]'
nc_variable_units='ppm'
fillv <- 1e+30
nc_var <- ncvar_def('co2',nc_variable_units,
list(time), fillv, longname=nc_variable_long_name,prec="double")
ofname <- paste(outpath,"paleon_monthly_co2.nc",sep="")
newfile <- nc_create( ofname, nc_var ) # Initialize file
ncatt_put( newfile, nc_var, time, 'monthly')
ncatt_put( newfile, 0, 'description',"PalEON annual CO2 with MsTMIP seasonal CO2 variability imposed")
ncvar_put(newfile, nc_var, data) # Write netCDF file
nc_close(newfile)
#format netCDF file for output
# Specify time units for this year and month
nc_time_units <- paste('years since 0850-01-01 00:00:00', sep='')
nc.time <- 850:2010-850
time <- ncdim_def("time",nc_time_units,nc.time,unlim=TRUE)
data <- pl.co2[850:2010]
nc_variable_long_name <- 'Average annual atmospheric CO2 concentration [ppm]'
nc_variable_units='ppm'
fillv <- 1e+30
nc_var <- ncvar_def('co2',nc_variable_units,
list(time), fillv, longname=nc_variable_long_name,prec="double")
ofname <- paste(outpath,"paleon_annual_co2.nc",sep="")
newfile <- nc_create( ofname, nc_var ) # Initialize file
ncatt_put( newfile, nc_var, time, 'yearly')
ncatt_put( newfile, 0, 'description',"PalEON annual CO2 concentrations")
ncvar_put(newfile, nc_var, data) # Write netCDF file
nc_close(newfile)
|
#' Payoff Matrix
#'
#' This function helps you plot a payoff matrix and identify pure strategy Nash equilibria.
#' Used for two player normal form games and finite, though possibly different, strategy sets.
#' Credit for base code: Neelanjan Sircar
#' @param X Payoff matrix for player 1. Defaults to coordination.
#' @param Y Payoff matrix for player 2. Defaults to coordination.
#' @param P1 name of player 1 (row)
#' @param P2 Name of player 2 (column)
#' @param labels1 Names of strategies for player 1
#' @param labels2 Names of strategies for player 2
#' @param labelsize Size of labels
#' @param arrow1 draw response arrows for player 1
#' @param arrow2 draw response arrows for player 2
#' @param arrow1col color of best response arrows; same for arrow2col
#' @param width1 Width of arrows; same for width2
#' @param nash mark the Nash equilibrium, if there are any
#' @param alength length of arrow head, defaults to .25
#' @keywords Payoff matrix, Nash
#' @export
#' @examples
#' M1 <- matrix(1:12, 3, 4)
#' gt_bimatrix(M1, M1, labels1 =paste(1:3), labels2 = paste(1:4), main = "Asymmetric", mainline = -1)
#' # Here is a more conservative style:
#' gt_bimatrix(matrix(c(1,0,0,0), 2, 2),
#' labels1 = c("U","D"), labels2 = c("L","R"),
#' pty = "m", matrixfill=NULL, nash = FALSE, arrow1= FALSE,
#' asp = .45, tfont = "serif", tscale = .8)
gt_bimatrix <- function(
# Payoff matrix
X = matrix(c(1,0,0,1),2),
Y=t(X),
P1="Player 1",
P2="Player 2",
labels1 = NULL,
labels2 = NULL,
labelsize=NULL,
# Arrows
arrow1=TRUE,
arrow2=arrow1,
arrow1col=gray(.4),
arrow2col=arrow1col,
width1=3,
width2=width1,
arrowdist=.2,
alength = .25,
space=max(arrowdist, radius+.075),
# Nash
nash=TRUE,
radius=.2,
starfill="red",
starborderlwd = 1.5, # Thickness of border of Nash star
tips=8,
nashborder="black",
# Formating
tfont=NULL,
pty="s",
asp = NULL,
srt1=90,
srt2=0,
mar=c(.7,.7,.7,.7),
# Colors
matrixfill=gray(.7),
pcol1="black",
pcol2=pcol1, # player colors
numcol1=pcol1,
numcol2=pcol2, # payoff colors
scol1=pcol1,
scol2=pcol2, #strategy colors
col.main="red",
bordercol="white",
# Lines
gameborder=TRUE,
borderdim = NULL,
lwd=1.5,
offset1=c(.8,.8),
offset2=c(.2,.2),
# Scales
u=NULL,
playersize=NULL,
labelsdim = NULL,
pdim =NULL,
tscale=1, # scale text
maincex=1,
# Arguments for plot title
main="",
mainline= -.5
){
if(!is.null(labels1)) labels2sub <- labels1
if(is.null(labels1)) labels2sub <-mapply(paste, "Y",1:ncol(Y), sep="")
if(is.null(labels2)) labels2 <- labels2sub
if(is.null(labels1)) labels1 <- mapply(paste, "X",1:nrow(X), sep="")
# Checks
if ((nrow(X)!=nrow(Y))|(ncol(X)!=ncol(Y))) stop("Dimensions of X and Y are not equal")
if (length(labels1)!=nrow(X)) stop ("Row labels do not match options")
if (length(labels2)!=ncol(Y)) stop ("Col labels do not match options")
if (is.character(labels1)==FALSE | is.character(labels2)==FALSE) stop ("Labels are not character strings")
# Prepare Scaling (May be modified wth kscale)
k <- tscale*2.4*(.85)^(max(length(labels1), length(labels2))-2)
if (is.null(u)) u <- tscale*(3 - ((max(nchar(as.character(c(X,Y)))))- 1)*.5 - .25*(max(length(labels1), length(labels2)) - 1))
if (is.null(labelsize)) labelsize <- k; if (is.null(playersize)) playersize<- k
if (is.null(borderdim)) borderdim <- c(-.3*max(length(labels1), length(labels2)) - .1*labelsize, .25*max(length(labels1), length(labels2)) + .1*labelsize)
# Start Graph
par(pty=pty, mar=mar)
if(is.null(asp)) asp <- par("pin")[1]/par("pin")[2]
plot(1,1, xlim=c(borderdim[1],ncol(X)+.1), ylim=c(-0.1,nrow(X)+borderdim[2]), ann=F, axes=F, asp=asp, type="n")
title(main=main, cex.main = maincex, col.main=col.main, line = mainline)
if(gameborder) polygon(c(borderdim[1],borderdim[1], ncol(X)+.1, ncol(X)+.1, borderdim[1]), c(-.1, nrow(X)+ borderdim[2], nrow(X)+borderdim[2], -.1, -.1), col=bordercol, border=NA)
polygon(c(0,ncol(X),ncol(X),0), c(0,0,nrow(X),nrow(X)), lwd=lwd, col=matrixfill)
segments(1:(ncol(X)-1), rep(0,(ncol(X)-1)), 1:(ncol(X)-1), rep(nrow(X),(ncol(X)-1)), lwd=lwd)
segments(rep(0,(ncol(X)-1)), 1:(nrow(X)-1), rep(ncol(X),(ncol(X)-1)), 1:(nrow(X)-1), lwd=lwd)
a1 <- rep((1-offset1[1]),nrow(X)); for(i in 2:ncol(X)) a1<-c(a1,rep((i-offset1[1]),nrow(X)))
b1 <- rep(seq((nrow(X)-offset1[2]), (1-offset1[2]),-1), ncol(X))
a2 <- rep((1-offset2[1]),nrow(X)); for(i in 2:ncol(X)) a2<-c(a2,rep((i-offset2[1]),nrow(X)))
b2 <- rep(seq((nrow(X)-offset2[2]),(1-offset2[2]),-1), ncol(X))
text(a1,b1, as.character(as.vector(X)), cex=u, col=numcol1)
text(a2,b2, as.character(as.vector(Y)), cex=u, col=numcol2)
if (is.null(labelsdim)) labelsdim <- c(-.10*max(length(labels1), length(labels2)), .08*max(length(labels1), length(labels2)))
# if (is.null(pdim)) pdim <-c(-.3*max(length(labels1), length(labels2)), .25*max(length(labels1), length(labels2)))
if (is.null(pdim)) pdim <-c(-.2*max(length(labels1), length(labels2)), .15*max(length(labels1), length(labels2)))
# Action Labels
text(rep(labelsdim[1],nrow(X)), (rep(nrow(X)+.5,nrow(X)) - 1:nrow(X)), labels1, cex=labelsize, family=tfont, font=2, srt=srt1, col=scol1)
text(1:ncol(X)-.5, rep(nrow(X)+ labelsdim[2],ncol(X)), labels2, cex=labelsize, family=tfont, font=2, srt=srt2, col=scol2)
# Player Labels
text(pdim[1], nrow(X)/2, P1, cex=playersize, srt=90, family=tfont, font=2, col=pcol1)
text(ncol(X)/2, nrow(X)+ pdim[2], P2, cex=playersize, family=tfont, font=2, col=pcol2)
if (arrow1) gt_BRarrow(X,Y,space=space, nash=nash, color=arrow1col, width=width1, arrowdist=arrowdist, alength = alength)
if (arrow2) gt_BRarrow(X,Y,space=space, nash=nash, color=arrow2col, width=width2, arrowdist=arrowdist, vert=TRUE, alength = alength)
pureNEx <- array(NA, c(nrow(X), ncol(X)))
pureNEy = pureNEx
for (i in 1:nrow(X)){
for (j in 1:ncol(X)){
pureNEx[i,j] <- ifelse((X[i,j]==max(X[,j]) & (Y[i,j]==max(Y[i,]))),j-.5, NA)
pureNEy[i,j] <- ifelse((X[i,j]==max(X[,j]) & (Y[i,j]==max(Y[i,]))),nrow(X)-i+.5, NA)
}}
if (nash) gt_star(as.vector(pureNEx), as.vector(pureNEy),
rad=radius, phi=0, starfill=starfill, tips=tips,
starborderlwd=starborderlwd)
}
| /R/gt_bimatrix.R | no_license | macartan/hop | R | false | false | 6,432 | r | #' Payoff Matrix
#'
#' This function helps you plot a payoff matrix and identify pure strategy Nash equilibria.
#' Used for two player normal form games and finite, though possibly different, strategy sets.
#' Credit for base code: Neelanjan Sircar
#' @param X Payoff matrix for player 1. Defaults to coordination.
#' @param Y Payoff matrix for player 2. Defaults to coordination.
#' @param P1 name of player 1 (row)
#' @param P2 Name of player 2 (column)
#' @param labels1 Names of strategies for player 1
#' @param labels2 Names of strategies for player 2
#' @param labelsize Size of labels
#' @param arrow1 draw response arrows for player 1
#' @param arrow2 draw response arrows for player 2
#' @param arrow1col color of best response arrows; same for arrow2col
#' @param width1 Width of arrows; same for width2
#' @param nash mark the Nash equilibrium, if there are any
#' @param alength length of arrow head, defaults to .25
#' @keywords Payoff matrix, Nash
#' @export
#' @examples
#' M1 <- matrix(1:12, 3, 4)
#' gt_bimatrix(M1, M1, labels1 =paste(1:3), labels2 = paste(1:4), main = "Asymmetric", mainline = -1)
#' # Here is a more conservative style:
#' gt_bimatrix(matrix(c(1,0,0,0), 2, 2),
#' labels1 = c("U","D"), labels2 = c("L","R"),
#' pty = "m", matrixfill=NULL, nash = FALSE, arrow1= FALSE,
#' asp = .45, tfont = "serif", tscale = .8)
gt_bimatrix <- function(
# Payoff matrix
X = matrix(c(1,0,0,1),2),
Y=t(X),
P1="Player 1",
P2="Player 2",
labels1 = NULL,
labels2 = NULL,
labelsize=NULL,
# Arrows
arrow1=TRUE,
arrow2=arrow1,
arrow1col=gray(.4),
arrow2col=arrow1col,
width1=3,
width2=width1,
arrowdist=.2,
alength = .25,
space=max(arrowdist, radius+.075),
# Nash
nash=TRUE,
radius=.2,
starfill="red",
starborderlwd = 1.5, # Thickness of border of Nash star
tips=8,
nashborder="black",
# Formating
tfont=NULL,
pty="s",
asp = NULL,
srt1=90,
srt2=0,
mar=c(.7,.7,.7,.7),
# Colors
matrixfill=gray(.7),
pcol1="black",
pcol2=pcol1, # player colors
numcol1=pcol1,
numcol2=pcol2, # payoff colors
scol1=pcol1,
scol2=pcol2, #strategy colors
col.main="red",
bordercol="white",
# Lines
gameborder=TRUE,
borderdim = NULL,
lwd=1.5,
offset1=c(.8,.8),
offset2=c(.2,.2),
# Scales
u=NULL,
playersize=NULL,
labelsdim = NULL,
pdim =NULL,
tscale=1, # scale text
maincex=1,
# Arguments for plot title
main="",
mainline= -.5
){
if(!is.null(labels1)) labels2sub <- labels1
if(is.null(labels1)) labels2sub <-mapply(paste, "Y",1:ncol(Y), sep="")
if(is.null(labels2)) labels2 <- labels2sub
if(is.null(labels1)) labels1 <- mapply(paste, "X",1:nrow(X), sep="")
# Checks
if ((nrow(X)!=nrow(Y))|(ncol(X)!=ncol(Y))) stop("Dimensions of X and Y are not equal")
if (length(labels1)!=nrow(X)) stop ("Row labels do not match options")
if (length(labels2)!=ncol(Y)) stop ("Col labels do not match options")
if (is.character(labels1)==FALSE | is.character(labels2)==FALSE) stop ("Labels are not character strings")
# Prepare Scaling (May be modified wth kscale)
k <- tscale*2.4*(.85)^(max(length(labels1), length(labels2))-2)
if (is.null(u)) u <- tscale*(3 - ((max(nchar(as.character(c(X,Y)))))- 1)*.5 - .25*(max(length(labels1), length(labels2)) - 1))
if (is.null(labelsize)) labelsize <- k; if (is.null(playersize)) playersize<- k
if (is.null(borderdim)) borderdim <- c(-.3*max(length(labels1), length(labels2)) - .1*labelsize, .25*max(length(labels1), length(labels2)) + .1*labelsize)
# Start Graph
par(pty=pty, mar=mar)
if(is.null(asp)) asp <- par("pin")[1]/par("pin")[2]
plot(1,1, xlim=c(borderdim[1],ncol(X)+.1), ylim=c(-0.1,nrow(X)+borderdim[2]), ann=F, axes=F, asp=asp, type="n")
title(main=main, cex.main = maincex, col.main=col.main, line = mainline)
if(gameborder) polygon(c(borderdim[1],borderdim[1], ncol(X)+.1, ncol(X)+.1, borderdim[1]), c(-.1, nrow(X)+ borderdim[2], nrow(X)+borderdim[2], -.1, -.1), col=bordercol, border=NA)
polygon(c(0,ncol(X),ncol(X),0), c(0,0,nrow(X),nrow(X)), lwd=lwd, col=matrixfill)
segments(1:(ncol(X)-1), rep(0,(ncol(X)-1)), 1:(ncol(X)-1), rep(nrow(X),(ncol(X)-1)), lwd=lwd)
segments(rep(0,(ncol(X)-1)), 1:(nrow(X)-1), rep(ncol(X),(ncol(X)-1)), 1:(nrow(X)-1), lwd=lwd)
a1 <- rep((1-offset1[1]),nrow(X)); for(i in 2:ncol(X)) a1<-c(a1,rep((i-offset1[1]),nrow(X)))
b1 <- rep(seq((nrow(X)-offset1[2]), (1-offset1[2]),-1), ncol(X))
a2 <- rep((1-offset2[1]),nrow(X)); for(i in 2:ncol(X)) a2<-c(a2,rep((i-offset2[1]),nrow(X)))
b2 <- rep(seq((nrow(X)-offset2[2]),(1-offset2[2]),-1), ncol(X))
text(a1,b1, as.character(as.vector(X)), cex=u, col=numcol1)
text(a2,b2, as.character(as.vector(Y)), cex=u, col=numcol2)
if (is.null(labelsdim)) labelsdim <- c(-.10*max(length(labels1), length(labels2)), .08*max(length(labels1), length(labels2)))
# if (is.null(pdim)) pdim <-c(-.3*max(length(labels1), length(labels2)), .25*max(length(labels1), length(labels2)))
if (is.null(pdim)) pdim <-c(-.2*max(length(labels1), length(labels2)), .15*max(length(labels1), length(labels2)))
# Action Labels
text(rep(labelsdim[1],nrow(X)), (rep(nrow(X)+.5,nrow(X)) - 1:nrow(X)), labels1, cex=labelsize, family=tfont, font=2, srt=srt1, col=scol1)
text(1:ncol(X)-.5, rep(nrow(X)+ labelsdim[2],ncol(X)), labels2, cex=labelsize, family=tfont, font=2, srt=srt2, col=scol2)
# Player Labels
text(pdim[1], nrow(X)/2, P1, cex=playersize, srt=90, family=tfont, font=2, col=pcol1)
text(ncol(X)/2, nrow(X)+ pdim[2], P2, cex=playersize, family=tfont, font=2, col=pcol2)
if (arrow1) gt_BRarrow(X,Y,space=space, nash=nash, color=arrow1col, width=width1, arrowdist=arrowdist, alength = alength)
if (arrow2) gt_BRarrow(X,Y,space=space, nash=nash, color=arrow2col, width=width2, arrowdist=arrowdist, vert=TRUE, alength = alength)
pureNEx <- array(NA, c(nrow(X), ncol(X)))
pureNEy = pureNEx
for (i in 1:nrow(X)){
for (j in 1:ncol(X)){
pureNEx[i,j] <- ifelse((X[i,j]==max(X[,j]) & (Y[i,j]==max(Y[i,]))),j-.5, NA)
pureNEy[i,j] <- ifelse((X[i,j]==max(X[,j]) & (Y[i,j]==max(Y[i,]))),nrow(X)-i+.5, NA)
}}
if (nash) gt_star(as.vector(pureNEx), as.vector(pureNEy),
rad=radius, phi=0, starfill=starfill, tips=tips,
starborderlwd=starborderlwd)
}
|
#' @export
tseries_query <-
function(conn, ems_name, new_data = FALSE)
{
obj <- list()
class(obj) <- 'TsQuery'
# Instantiating other objects
obj$connection <- conn
obj$ems <- ems(conn)
obj$ems_id <- get_id(obj$ems, ems_name)
obj$analytic <- analytic(conn, obj$ems_id, new_data)
# object data
obj$queryset <- list()
obj$columns <- list()
obj <- reset(obj)
return(obj)
}
#' @export
reset.TsQuery <-
function(qry)
{
qry$queryset <- list()
qry$columns <- list()
qry
}
#' @export
select.TsQuery <-
function(qry, ...)
{
keywords <- list(...)
save_table = F
for ( kw in keywords ) {
# Get the param from the param table
prm <- get_param(qry$analytic, kw)
if ( prm$id=="" ) {
# If param's not found, search from EMS API
res <- search_param(qry$analytic, kw)
# Stack them into the param_table to reuse them
df <- data.frame(matrix(NA, nrow = length(res), ncol = length(res[[1]])), stringsAsFactors = F)
names(df) <- names(res[[1]])
for (i in seq_along(res)) {
df[i, ] <- res[[i]]
}
qry$analytic$param_table <- rbind(qry$analytic$param_table, df)
prm <- res[[1]]
save_table <- T
}
# Add the selected param into the query set
n_sel <- length(qry$columns)
qry$columns[[n_sel+1]] <- prm
}
if ( save_table) {
save_paramtable(qry$analytic)
}
return(qry)
}
#' @export
range <-
function(qry, tstart, tend)
{
if ( !is.numeric(c(tstart, tend)) ) {
stop(sprintf("The values for time range should be numeric. Given values are from %s to %s.", tstart, tend))
}
qry$queryset[["start"]] <- tstart
qry$queryset[["end"]] <- tend
return(qry)
}
#' @export
run.TsQuery <-
function(qry, flight, start = NULL, end = NULL, timepoint = NULL)
{
for (i in seq_along(qry$columns)) {
if ( !(is.null(start) || is.null(end)) ) {
qry <- range(qry, start, end)
}
if ( !is.null(timepoint) ) {
warning("run.TsQuery: Defining time points is not yet supported.")
}
p <- qry$columns[[i]]
q <- qry$queryset
q$select <- list(list(analyticId = p$id))
r <- request(qry$connection, rtype="POST",
uri_keys = c("analytic", "query"),
uri_args = c(qry$ems_id, flight),
jsondata = q)
res <- content(r)
if ( !is.null(res$message) ) {
stop(sprintf('API query for flight = %s, parameter = "%s" was unsuccessful.\nHere is the massage from API: %s',
flight, p$name, res$message))
}
if (i == 1) {
df <- data.frame(unlist(res$offsets))
names(df) <- "Time (sec)"
}
df <- cbind(df, unlist(res$results[[1]]$values))
names(df)[i+1] <- p$name
}
return(df)
}
#' @export
run_multiflts <-
function(qry, flight, start = NULL, end = NULL, timepoint = NULL)
{
# input argument "flight" as multi-column data
res <- list()
attr_flag <- F
if ( class(flight) == "data.frame" ) {
FR <- flight[ , "Flight Record"]
attr_flag <- T
} else {
FR <- flight
}
cat(sprintf("=== Start running time series data querying for %d flights ===\n", length(FR)))
for (i in 1:length(FR)) {
cat(sprintf("%d / %d: FR %d\n", i, length(FR), FR[i]))
res[[i]] <- list()
if ( attr_flag ) {
res[[i]]$flt_data <- as.list(flight[i, ])
} else {
res[[i]]$flt_data <- list("Flight Record" = FR[i])
}
res[[i]]$ts_data <- run.TsQuery(qry, FR[i], start = start[i], end = end[i], timepoint = timepoint[i])
}
return(res)
}
| /r/R/ts_query.R | permissive | c-owens/ems-api-sdk | R | false | false | 3,757 | r | #' @export
tseries_query <-
function(conn, ems_name, new_data = FALSE)
{
obj <- list()
class(obj) <- 'TsQuery'
# Instantiating other objects
obj$connection <- conn
obj$ems <- ems(conn)
obj$ems_id <- get_id(obj$ems, ems_name)
obj$analytic <- analytic(conn, obj$ems_id, new_data)
# object data
obj$queryset <- list()
obj$columns <- list()
obj <- reset(obj)
return(obj)
}
#' @export
reset.TsQuery <-
function(qry)
{
qry$queryset <- list()
qry$columns <- list()
qry
}
#' @export
select.TsQuery <-
function(qry, ...)
{
keywords <- list(...)
save_table = F
for ( kw in keywords ) {
# Get the param from the param table
prm <- get_param(qry$analytic, kw)
if ( prm$id=="" ) {
# If param's not found, search from EMS API
res <- search_param(qry$analytic, kw)
# Stack them into the param_table to reuse them
df <- data.frame(matrix(NA, nrow = length(res), ncol = length(res[[1]])), stringsAsFactors = F)
names(df) <- names(res[[1]])
for (i in seq_along(res)) {
df[i, ] <- res[[i]]
}
qry$analytic$param_table <- rbind(qry$analytic$param_table, df)
prm <- res[[1]]
save_table <- T
}
# Add the selected param into the query set
n_sel <- length(qry$columns)
qry$columns[[n_sel+1]] <- prm
}
if ( save_table) {
save_paramtable(qry$analytic)
}
return(qry)
}
#' @export
range <-
function(qry, tstart, tend)
{
if ( !is.numeric(c(tstart, tend)) ) {
stop(sprintf("The values for time range should be numeric. Given values are from %s to %s.", tstart, tend))
}
qry$queryset[["start"]] <- tstart
qry$queryset[["end"]] <- tend
return(qry)
}
#' @export
run.TsQuery <-
function(qry, flight, start = NULL, end = NULL, timepoint = NULL)
{
for (i in seq_along(qry$columns)) {
if ( !(is.null(start) || is.null(end)) ) {
qry <- range(qry, start, end)
}
if ( !is.null(timepoint) ) {
warning("run.TsQuery: Defining time points is not yet supported.")
}
p <- qry$columns[[i]]
q <- qry$queryset
q$select <- list(list(analyticId = p$id))
r <- request(qry$connection, rtype="POST",
uri_keys = c("analytic", "query"),
uri_args = c(qry$ems_id, flight),
jsondata = q)
res <- content(r)
if ( !is.null(res$message) ) {
stop(sprintf('API query for flight = %s, parameter = "%s" was unsuccessful.\nHere is the massage from API: %s',
flight, p$name, res$message))
}
if (i == 1) {
df <- data.frame(unlist(res$offsets))
names(df) <- "Time (sec)"
}
df <- cbind(df, unlist(res$results[[1]]$values))
names(df)[i+1] <- p$name
}
return(df)
}
#' @export
run_multiflts <-
function(qry, flight, start = NULL, end = NULL, timepoint = NULL)
{
# input argument "flight" as multi-column data
res <- list()
attr_flag <- F
if ( class(flight) == "data.frame" ) {
FR <- flight[ , "Flight Record"]
attr_flag <- T
} else {
FR <- flight
}
cat(sprintf("=== Start running time series data querying for %d flights ===\n", length(FR)))
for (i in 1:length(FR)) {
cat(sprintf("%d / %d: FR %d\n", i, length(FR), FR[i]))
res[[i]] <- list()
if ( attr_flag ) {
res[[i]]$flt_data <- as.list(flight[i, ])
} else {
res[[i]]$flt_data <- list("Flight Record" = FR[i])
}
res[[i]]$ts_data <- run.TsQuery(qry, FR[i], start = start[i], end = end[i], timepoint = timepoint[i])
}
return(res)
}
|
# top ---------------------------------------------------------------------
rm(list = ls())
# install.packages("dplyr")
# install.packages("plyr")
# install.packages("e1071")
# install.packages("pastecs")
# install.packages("ggplot2")
# install.packages("lubridate")
# install.packages("arules")
# install.packages("MASS")
# install.packages("vcd")
# install.packages("prettyR")
# install.packages("data.table")
# install.packages("descr")
# install.packages("caret")
# install.packages("aod")
# install.packages("ROCR")
library(dplyr)
library(plyr)
# library(e1071)
# library(pastecs)
# library(ggplot2)
# library(lubridate)
# library(arules)
# library(MASS)
# library(vcd)
# library(prettyR)
# library(data.table)
# library(descr)
library(caret)
library(aod)
library(ROCR)
#' Get the selected data, saved in 02-selection.R
data <- readRDS("data.rds")
#' Set output path
path.output <- "/Users/jameshedges/Documents/Projects/terrorism/output"
# 04-001-01-outcome -------------------------------------------------------
#' Make outcome variables based on nkill
#' (1) kills greater than 0
#' 1=yes, >0 kills
#' 2-no, 0 kills
#' (2) kills, log of
ind.nkill.gt0 <-
data$ind.nkill!=1
y1.nkill.gt0 <-
as.numeric(ind.nkill.gt0)
nkill.log <-
log(as.numeric(unlist(data[which(ind.nkill.gt0), "nkill"])))
y2.nkill.log <-
rep(NA, length(ind.nkill.gt0))
y2.nkill.log <-
replace(y2.nkill.log, which(ind.nkill.gt0), nkill.log)
data <-
mutate(data, y1.nkill.gt0=y1.nkill.gt0, y2.nkill.log=y2.nkill.log)
# 04-001-02-plots ---------------------------------------------------------
#' Mosaic plots to y1 (greater than 0 kills)
fig.name.pre <- "04-001-02"
vars.crosstab <- list(
c("iyear", "y1.nkill.gt0"),
c("extended", "y1.nkill.gt0"),
c("region", "y1.nkill.gt0"),
c("multiple", "y1.nkill.gt0"),
c("suicide", "y1.nkill.gt0"),
c("attacktype1", "y1.nkill.gt0"),
c("claimed", "y1.nkill.gt0"),
c("weaptype1", "y1.nkill.gt0")
)
for (i in vars.crosstab) {
fig.name <- paste(path.output,
paste(
paste(fig.name.pre,
paste(toupper(i), collapse="-"), sep="-"),
"pdf", sep="."),
sep="/")
pdf(file=fig.name)
crosstab(data[[i[[1]]]], data[[i[[2]]]],
xlab=i[[1]], ylab=i[[2]])
dev.off()
}
#' (1) XXX ADD SUMMARY
#' Box plots to y2 (for gt0 kills, log(kills))
fig.name.pre <- "04-001-02"
vars.crosstab <- list(
c("iyear", "y2.nkill.log"),
c("extended", "y2.nkill.log"),
c("region", "y2.nkill.log"),
c("multiple", "y2.nkill.log"),
c("suicide", "y2.nkill.log"),
c("attacktype1", "y2.nkill.log"),
c("claimed", "y2.nkill.log"),
c("weaptype1", "y2.nkill.log")
)
for (i in vars.crosstab) {
fig.name <- paste(path.output,
paste(
paste(fig.name.pre,
paste(toupper(i), collapse="-"), sep="-"),
"pdf", sep="."),
sep="/")
pdf(file=fig.name)
boxplot(get(i[[2]])~get(i[[1]]), data=data, xlab=i[[1]], ylab=i[[2]])
dev.off()
}
#' (1) XXX ADD SUMMARY
# 04-002-01-partition -----------------------------------------------------
set.seed(3456)
ind.train <- createDataPartition(data$y1.nkill.gt0, p=0.7, list=FALSE, times=1)
head(ind.train)
data.train <- data[ind.train,]
data.test <- data[-ind.train,]
# 04-003-01-logistic ------------------------------------------------------
mod.logit = glm(
y1.nkill.gt0 ~
extended +
region +
suicide +
attacktype1 +
claimed +
weaptype1,
family=binomial(logit),
data=data.train)
mod.logit.summary <- summary(mod.logit)
# Call:
# glm(formula = y1.nkill.gt0 ~ extended + region + suicide + attacktype1 +
# claimed + weaptype1, family = binomial(logit), data = data.train)
#
# Deviance Residuals:
# Min 1Q Median 3Q Max
# -3.645 -0.891 0.051 0.786 3.259
#
# Coefficients:
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) 2.989335 1.289577 2.32 0.02045 *
# extended1 -0.396945 0.136897 -2.90 0.00374 **
# region2 -0.238416 0.742793 -0.32 0.74823
# region3 0.424098 0.407504 1.04 0.29801
# region4 2.267816 0.600009 3.78 0.00016 ***
# region5 0.660312 0.392815 1.68 0.09277 .
# region6 1.030795 0.389407 2.65 0.00812 **
# region7 -1.272810 1.087317 -1.17 0.24176
# region8 -1.655852 0.466763 -3.55 0.00039 ***
# region9 -0.759227 0.620892 -1.22 0.22141
# region10 1.554028 0.389987 3.98 0.000067530254832 ***
# region11 1.610873 0.393586 4.09 0.000042618149524 ***
# region12 0.430974 0.402491 1.07 0.28427
# region13 -0.000891 1.350251 0.00 0.99947
# suicide1 6.835571 0.722795 9.46 < 0.0000000000000002 ***
# attacktype12 -4.383903 0.583412 -7.51 0.000000000000057 ***
# attacktype13 -5.356724 0.589194 -9.09 < 0.0000000000000002 ***
# attacktype14 -8.180578 0.854953 -9.57 < 0.0000000000000002 ***
# attacktype15 -6.518312 0.776560 -8.39 < 0.0000000000000002 ***
# attacktype16 -5.590092 0.596050 -9.38 < 0.0000000000000002 ***
# attacktype17 -7.192484 0.604338 -11.90 < 0.0000000000000002 ***
# attacktype18 -7.308572 0.674997 -10.83 < 0.0000000000000002 ***
# attacktype19 7.282968 137.910031 0.05 0.95788
# claimed1 0.559636 0.054851 10.20 < 0.0000000000000002 ***
# weaptype15 1.751979 1.080778 1.62 0.10501
# weaptype16 0.618216 1.088424 0.57 0.57004
# weaptype17 -6.805785 196.971634 -0.03 0.97244
# weaptype18 -0.005351 1.090670 0.00 0.99609
# weaptype19 2.352771 1.068585 2.20 0.02768 *
# weaptype110 1.503176 1.448941 1.04 0.29954
# weaptype111 -0.413271 1.509714 -0.27 0.78428
# weaptype112 1.846236 1.314407 1.40 0.16014
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# (Dispersion parameter for binomial family taken to be 1)
#
# Null deviance: 28088 on 20339 degrees of freedom
# Residual deviance: 20683 on 20308 degrees of freedom
# AIC: 20747
#
# Number of Fisher Scoring iterations: 10
#' Sorted coefficients
mod.logit.summary.coef <- mod.logit.summary$coefficients
mod.logit.summary.coef[order(mod.logit.summary.coef[,"Estimate"],
decreasing=TRUE),]
# Estimate Std. Error z value Pr(>|z|)
# attacktype19 7.2829683579 137.91003072 5.280956e-02 9.578836e-01
# suicide1 6.8355714668 0.72279536 9.457132e+00 3.165037e-21
# (Intercept) 2.9893354706 1.28957684 2.318075e+00 2.044526e-02
# weaptype19 2.3527708671 1.06858462 2.201764e+00 2.768200e-02
# region4 2.2678163198 0.60000932 3.779635e+00 1.570583e-04
# weaptype112 1.8462363900 1.31440694 1.404616e+00 1.601356e-01
# weaptype15 1.7519792154 1.08077825 1.621035e+00 1.050102e-01
# region11 1.6108734512 0.39358642 4.092808e+00 4.261815e-05
# region10 1.5540284847 0.38998676 3.984824e+00 6.753025e-05
# weaptype110 1.5031762247 1.44894133 1.037431e+00 2.995352e-01
# region6 1.0307951504 0.38940660 2.647092e+00 8.118723e-03
# region5 0.6603120692 0.39281497 1.680975e+00 9.276781e-02
# weaptype16 0.6182160868 1.08842401 5.679920e-01 5.700404e-01
# claimed1 0.5596358775 0.05485053 1.020293e+01 1.923888e-24
# region12 0.4309740356 0.40249142 1.070766e+00 2.842748e-01
# region3 0.4240979631 0.40750440 1.040720e+00 2.980055e-01
# region13 -0.0008905702 1.35025127 -6.595589e-04 9.994737e-01
# weaptype18 -0.0053510358 1.09067044 -4.906189e-03 9.960854e-01
# region2 -0.2384155769 0.74279312 -3.209717e-01 7.482318e-01
# extended1 -0.3969452342 0.13689705 -2.899589e+00 3.736518e-03
# weaptype111 -0.4132707776 1.50971353 -2.737412e-01 7.842835e-01
# region9 -0.7592268393 0.62089229 -1.222800e+00 2.214054e-01
# region7 -1.2728096269 1.08731700 -1.170597e+00 2.417609e-01
# region8 -1.6558524480 0.46676272 -3.547525e+00 3.888688e-04
# attacktype12 -4.3839029682 0.58341248 -7.514243e+00 5.724131e-14
# attacktype13 -5.3567242918 0.58919352 -9.091621e+00 9.757317e-20
# attacktype16 -5.5900922418 0.59605001 -9.378562e+00 6.687830e-21
# attacktype15 -6.5183116674 0.77656025 -8.393826e+00 4.705722e-17
# weaptype17 -6.8057854851 196.97163353 -3.455211e-02 9.724369e-01
# attacktype17 -7.1924838135 0.60433826 -1.190142e+01 1.163494e-32
# attacktype18 -7.3085720943 0.67499665 -1.082757e+01 2.548338e-27
# attacktype14 -8.1805775435 0.85495250 -9.568458e+00 1.085115e-21
#' (1) suicide has a very big impact on log odds, with 6.82 value
#' (2) attacktype1 and weaptype19 seem suspect and the prior esp
#' consider removing them and redoing, or removing some levels or something
#' (3) regions 4,10,11,6 all showing significant positive effects
#' (4) claimed has a small, but positive, sig. B
#' (5) region8 has significant negative effect of -1.6
#' (6) if they are real attack types 2,3,6,5,7,8,4 all have negative effects
#' (7) weapon type 7 also has sig. neg. B; this effect has very large error
# 04-004-01-conf ints -----------------------------------------------------
confint(mod.logit)
# 2.5 % 97.5 %
# (Intercept) -0.171 5.34 *
# extended1 -0.665 -0.13 **
# region2 -1.762 1.18
# region3 -0.350 1.25
# region4 1.124 3.49 *** East Asia
# region5 -0.083 1.46 .
# region6 0.295 1.83 ** South Asia
# region7 -3.535 0.76
# region8 -2.563 -0.73 *** Western Europe
# region9 -2.021 0.44
# region10 0.817 2.35 *** Middle East & North Africa
# region11 0.866 2.42 *** Sub-Saharan Africa
# region12 -0.333 1.25
# region13 -3.243 2.40
# suicide1 5.674 8.65 ***
# attacktype12 -5.784 -3.42 *** Armed Assault
# attacktype13 -6.764 -4.37 *** Bombing/Explosion
# attacktype14 -10.084 -6.67 *** Hijacking
# attacktype15 -8.218 -5.11 *** Hostage Taking (Barricade Incident)
# attacktype16 -7.007 -4.59 *** Hostage Taking (Kidnapping)
# attacktype17 -8.621 -6.17 *** Facility/Infrastructure Attack
# attacktype18 -8.838 -6.12 *** Unarmed Assault
# attacktype19 -14.185 NA
# claimed1 0.452 0.67 ***
# weaptype15 0.018 4.69
# weaptype16 -1.137 3.57
# weaptype17 NA 27.00
# weaptype18 -1.767 2.95
# weaptype19 0.654 5.28 * Melee
# weaptype110 -1.391 4.82
# weaptype111 -3.772 2.94
# weaptype112 -0.554 5.04
confint.default(mod.logit)
exp(cbind(OR=coef(mod.logit), confint(mod.logit)))
# OR 2.5 % 97.5 %
# (Intercept) 19.87247 0.84260327 207.7527
# extended1 0.67237 0.51401828 0.8793
# region2 0.78788 0.17161847 3.2589
# region3 1.52821 0.70445279 3.5042
# region4 9.65829 3.07570210 32.6959
# region5 1.93540 0.92013960 4.3226
# region6 2.80329 1.34245578 6.2234
# region7 0.28004 0.02916424 2.1486
# region8 0.19093 0.07703580 0.4843
# region9 0.46803 0.13251168 1.5464
# region10 4.73049 2.26274417 10.5135
# region11 5.00718 2.37709366 11.2003
# region12 1.53876 0.71684169 3.4968
# region13 0.99911 0.03905851 10.9689
# suicide1 930.35987 291.05424516 5718.5431
# attacktype12 0.01248 0.00307649 0.0328
# attacktype13 0.00472 0.00115421 0.0126
# attacktype14 0.00028 0.00004176 0.0013
# attacktype15 0.00148 0.00026964 0.0060
# attacktype16 0.00373 0.00090558 0.0102
# attacktype17 0.00075 0.00018034 0.0021
# attacktype18 0.00067 0.00014508 0.0022
# attacktype19 1455.30148 0.00000069 NA -seems error; low data
# claimed1 1.75004 1.57200131 1.9491
# weaptype15 5.76600 1.01835220 109.3549
# weaptype16 1.85561 0.32074356 35.4727
# weaptype17 0.00111 NA 529705868740.7859 -seems error; low data
# weaptype18 0.99466 0.17083183 19.0583
# weaptype19 10.51466 1.92276007 196.9670
# weaptype110 4.49595 0.24871265 123.7160
# weaptype111 0.66148 0.02299465 18.9764
# weaptype112 6.33593 0.57488838 154.1015
#' Test overall significance of region
wald.test(b=coef(mod.logit), Sigma=vcov(mod.logit), Terms=3:14)
# Wald test:
# ----------
#
# Chi-squared test:
# X2 = 568.7, df = 12, P(> X2) = 0.0
#' Test overall significance of attacktype1
wald.test(b=coef(mod.logit), Sigma=vcov(mod.logit), Terms=3:14)
# Wald test:
# ----------
#
# Chi-squared test:
# X2 = 568.0, df = 8, P(> X2) = 0.0
#' Test overall significance of weapontype1
wald.test(b=coef(mod.logit), Sigma=vcov(mod.logit), Terms=25:32)
# Wald test:
# ----------
#
# Chi-squared test:
# X2 = 201.3, df = 8, P(> X2) = 0.0
with(mod.logit, null.deviance - deviance)
with(mod.logit, pchisq(null.deviance - deviance, df.null - df.residual, lower.tail=FALSE))
with(mod.logit, pchisq(null.deviance - deviance, df.null - df.residual, lower.tail=FALSE))
anova(mod.logit, test="Chi")
table(data.train$y1.nkill.gt0>0, fitted(mod.logit)>0.5)
# TEST --------------------------------------------------------------------
probs.test <- predict(mod.logit, data.test, type="response")
#' Version 1
y1.nkill.gt0.est <- rep(0, length(probs.test))
y1.nkill.gt0.est[probs.test >= 0.5] <- 1
table(data.test$y1.nkill.gt0, y1.nkill.gt0.est)
#' Version 2
y1.nkill.gt0.est.v2 <- cut(probs.test, breaks=c(-Inf, 0.5, Inf), labels=c(0,1))
table(data.test$y1.nkill.gt0, y1.nkill.gt0.est.v2)
#' Confusion matrix
confusionMatrix(data.test$y1.nkill.gt0, y1.nkill.gt0.est.v2)
#' ROC curve
pred.fit = prediction(probs.test, data.test$y1.nkill.gt0)
perf.fit = performance(pred.fit, "tpr", "fpr")
plot(perf.fit,
col="blue",
lwd=2,
main="ROC curve: logit model on y1 (nkill > 0)")
abline(a=0, b=1, lwd=2, lty=2, col="gray")
# FIT WITH CARET ----------------------------------------------------------
mod.logit.v2 = train(y1.nkill.gt0 ~ extended + region + suicide + attacktype1 + claimed + weaptype1,
method="glm",
data=data.train,
family=binomial(link='logit'))
mod.logit.v2.summary <- summary(mod.logit.v2)
# Call:
# NULL
#
# Deviance Residuals:
# Min 1Q Median 3Q Max
# -3.6446 -0.8912 0.0511 0.7855 3.2587
#
# Coefficients: (3 not defined because of singularities)
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) 5.395e+00 1.071e+00 5.039 4.68e-07 ***
# extended1 -3.969e-01 1.369e-01 -2.900 0.003737 **
# region2 -2.384e-01 7.428e-01 -0.321 0.748232
# region3 4.241e-01 4.075e-01 1.041 0.298006
# region4 2.268e+00 6.000e-01 3.780 0.000157 ***
# region5 6.603e-01 3.928e-01 1.681 0.092768 .
# region6 1.031e+00 3.894e-01 2.647 0.008119 **
# region7 -1.273e+00 1.087e+00 -1.171 0.241761
# region8 -1.656e+00 4.668e-01 -3.548 0.000389 ***
# region9 -7.592e-01 6.209e-01 -1.223 0.221405
# region10 1.554e+00 3.900e-01 3.985 6.75e-05 ***
# region11 1.611e+00 3.936e-01 4.093 4.26e-05 ***
# region12 4.310e-01 4.025e-01 1.071 0.284275
# region13 -8.906e-04 1.350e+00 -0.001 0.999474
# suicide1 6.836e+00 7.228e-01 9.457 < 2e-16 ***
# attacktype12 -4.384e+00 5.834e-01 -7.514 5.72e-14 ***
# attacktype13 -5.357e+00 5.892e-01 -9.092 < 2e-16 ***
# attacktype14 -8.181e+00 8.550e-01 -9.568 < 2e-16 ***
# attacktype15 -6.518e+00 7.766e-01 -8.394 < 2e-16 ***
# attacktype16 -5.590e+00 5.960e-01 -9.379 < 2e-16 ***
# attacktype17 -7.192e+00 6.043e-01 -11.901 < 2e-16 ***
# attacktype18 -7.309e+00 6.750e-01 -10.828 < 2e-16 ***
# attacktype19 7.283e+00 1.379e+02 0.053 0.957884
# claimed0 -5.596e-01 5.485e-02 -10.203 < 2e-16 ***
# claimed1 NA NA NA NA
# weaptype12 -1.846e+00 1.314e+00 -1.405 0.160136
# weaptype15 -9.426e-02 8.076e-01 -0.117 0.907084
# weaptype16 -1.228e+00 8.184e-01 -1.501 0.133466
# weaptype17 -8.652e+00 1.970e+02 -0.044 0.964964
# weaptype18 -1.852e+00 8.193e-01 -2.260 0.023821 *
# weaptype19 5.065e-01 8.100e-01 0.625 0.531755
# weaptype110 -3.431e-01 1.280e+00 -0.268 0.788681
# weaptype111 -2.260e+00 1.326e+00 -1.703 0.088487 .
# weaptype112 NA NA NA NA
# weaptype113 NA NA NA NA
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# (Dispersion parameter for binomial family taken to be 1)
#
# Null deviance: 28088 on 20339 degrees of freedom
# Residual deviance: 20683 on 20308 degrees of freedom
# AIC: 20747
#
# Number of Fisher Scoring iterations: 10
# probs.test.v2 <- predict(mod.logit.v2, data.test)
# y1.nkill.gt0.est.vX <- cut(probs.test.v2, breaks=c(-Inf, 0.5, Inf), labels=c(0,1))
# confusionMatrix(data.test$y1.nkill.gt0, y1.nkill.gt0.est.vX)
# bottom ------------------------------------------------------------------
#' Save the data
# saveRDS(data, "data.rds") | /04.model1.R | no_license | jhedges3/terrorism | R | false | false | 17,947 | r | # top ---------------------------------------------------------------------
rm(list = ls())
# install.packages("dplyr")
# install.packages("plyr")
# install.packages("e1071")
# install.packages("pastecs")
# install.packages("ggplot2")
# install.packages("lubridate")
# install.packages("arules")
# install.packages("MASS")
# install.packages("vcd")
# install.packages("prettyR")
# install.packages("data.table")
# install.packages("descr")
# install.packages("caret")
# install.packages("aod")
# install.packages("ROCR")
library(dplyr)
library(plyr)
# library(e1071)
# library(pastecs)
# library(ggplot2)
# library(lubridate)
# library(arules)
# library(MASS)
# library(vcd)
# library(prettyR)
# library(data.table)
# library(descr)
library(caret)
library(aod)
library(ROCR)
#' Get the selected data, saved in 02-selection.R
data <- readRDS("data.rds")
#' Set output path
path.output <- "/Users/jameshedges/Documents/Projects/terrorism/output"
# 04-001-01-outcome -------------------------------------------------------
#' Make outcome variables based on nkill
#' (1) kills greater than 0
#' 1=yes, >0 kills
#' 2-no, 0 kills
#' (2) kills, log of
ind.nkill.gt0 <-
data$ind.nkill!=1
y1.nkill.gt0 <-
as.numeric(ind.nkill.gt0)
nkill.log <-
log(as.numeric(unlist(data[which(ind.nkill.gt0), "nkill"])))
y2.nkill.log <-
rep(NA, length(ind.nkill.gt0))
y2.nkill.log <-
replace(y2.nkill.log, which(ind.nkill.gt0), nkill.log)
data <-
mutate(data, y1.nkill.gt0=y1.nkill.gt0, y2.nkill.log=y2.nkill.log)
# 04-001-02-plots ---------------------------------------------------------
#' Mosaic plots to y1 (greater than 0 kills)
fig.name.pre <- "04-001-02"
vars.crosstab <- list(
c("iyear", "y1.nkill.gt0"),
c("extended", "y1.nkill.gt0"),
c("region", "y1.nkill.gt0"),
c("multiple", "y1.nkill.gt0"),
c("suicide", "y1.nkill.gt0"),
c("attacktype1", "y1.nkill.gt0"),
c("claimed", "y1.nkill.gt0"),
c("weaptype1", "y1.nkill.gt0")
)
for (i in vars.crosstab) {
fig.name <- paste(path.output,
paste(
paste(fig.name.pre,
paste(toupper(i), collapse="-"), sep="-"),
"pdf", sep="."),
sep="/")
pdf(file=fig.name)
crosstab(data[[i[[1]]]], data[[i[[2]]]],
xlab=i[[1]], ylab=i[[2]])
dev.off()
}
#' (1) XXX ADD SUMMARY
#' Box plots to y2 (for gt0 kills, log(kills))
fig.name.pre <- "04-001-02"
vars.crosstab <- list(
c("iyear", "y2.nkill.log"),
c("extended", "y2.nkill.log"),
c("region", "y2.nkill.log"),
c("multiple", "y2.nkill.log"),
c("suicide", "y2.nkill.log"),
c("attacktype1", "y2.nkill.log"),
c("claimed", "y2.nkill.log"),
c("weaptype1", "y2.nkill.log")
)
for (i in vars.crosstab) {
fig.name <- paste(path.output,
paste(
paste(fig.name.pre,
paste(toupper(i), collapse="-"), sep="-"),
"pdf", sep="."),
sep="/")
pdf(file=fig.name)
boxplot(get(i[[2]])~get(i[[1]]), data=data, xlab=i[[1]], ylab=i[[2]])
dev.off()
}
#' (1) XXX ADD SUMMARY
# 04-002-01-partition -----------------------------------------------------
set.seed(3456)
ind.train <- createDataPartition(data$y1.nkill.gt0, p=0.7, list=FALSE, times=1)
head(ind.train)
data.train <- data[ind.train,]
data.test <- data[-ind.train,]
# 04-003-01-logistic ------------------------------------------------------
mod.logit = glm(
y1.nkill.gt0 ~
extended +
region +
suicide +
attacktype1 +
claimed +
weaptype1,
family=binomial(logit),
data=data.train)
mod.logit.summary <- summary(mod.logit)
# Call:
# glm(formula = y1.nkill.gt0 ~ extended + region + suicide + attacktype1 +
# claimed + weaptype1, family = binomial(logit), data = data.train)
#
# Deviance Residuals:
# Min 1Q Median 3Q Max
# -3.645 -0.891 0.051 0.786 3.259
#
# Coefficients:
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) 2.989335 1.289577 2.32 0.02045 *
# extended1 -0.396945 0.136897 -2.90 0.00374 **
# region2 -0.238416 0.742793 -0.32 0.74823
# region3 0.424098 0.407504 1.04 0.29801
# region4 2.267816 0.600009 3.78 0.00016 ***
# region5 0.660312 0.392815 1.68 0.09277 .
# region6 1.030795 0.389407 2.65 0.00812 **
# region7 -1.272810 1.087317 -1.17 0.24176
# region8 -1.655852 0.466763 -3.55 0.00039 ***
# region9 -0.759227 0.620892 -1.22 0.22141
# region10 1.554028 0.389987 3.98 0.000067530254832 ***
# region11 1.610873 0.393586 4.09 0.000042618149524 ***
# region12 0.430974 0.402491 1.07 0.28427
# region13 -0.000891 1.350251 0.00 0.99947
# suicide1 6.835571 0.722795 9.46 < 0.0000000000000002 ***
# attacktype12 -4.383903 0.583412 -7.51 0.000000000000057 ***
# attacktype13 -5.356724 0.589194 -9.09 < 0.0000000000000002 ***
# attacktype14 -8.180578 0.854953 -9.57 < 0.0000000000000002 ***
# attacktype15 -6.518312 0.776560 -8.39 < 0.0000000000000002 ***
# attacktype16 -5.590092 0.596050 -9.38 < 0.0000000000000002 ***
# attacktype17 -7.192484 0.604338 -11.90 < 0.0000000000000002 ***
# attacktype18 -7.308572 0.674997 -10.83 < 0.0000000000000002 ***
# attacktype19 7.282968 137.910031 0.05 0.95788
# claimed1 0.559636 0.054851 10.20 < 0.0000000000000002 ***
# weaptype15 1.751979 1.080778 1.62 0.10501
# weaptype16 0.618216 1.088424 0.57 0.57004
# weaptype17 -6.805785 196.971634 -0.03 0.97244
# weaptype18 -0.005351 1.090670 0.00 0.99609
# weaptype19 2.352771 1.068585 2.20 0.02768 *
# weaptype110 1.503176 1.448941 1.04 0.29954
# weaptype111 -0.413271 1.509714 -0.27 0.78428
# weaptype112 1.846236 1.314407 1.40 0.16014
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# (Dispersion parameter for binomial family taken to be 1)
#
# Null deviance: 28088 on 20339 degrees of freedom
# Residual deviance: 20683 on 20308 degrees of freedom
# AIC: 20747
#
# Number of Fisher Scoring iterations: 10
#' Sorted coefficients
mod.logit.summary.coef <- mod.logit.summary$coefficients
mod.logit.summary.coef[order(mod.logit.summary.coef[,"Estimate"],
decreasing=TRUE),]
# Estimate Std. Error z value Pr(>|z|)
# attacktype19 7.2829683579 137.91003072 5.280956e-02 9.578836e-01
# suicide1 6.8355714668 0.72279536 9.457132e+00 3.165037e-21
# (Intercept) 2.9893354706 1.28957684 2.318075e+00 2.044526e-02
# weaptype19 2.3527708671 1.06858462 2.201764e+00 2.768200e-02
# region4 2.2678163198 0.60000932 3.779635e+00 1.570583e-04
# weaptype112 1.8462363900 1.31440694 1.404616e+00 1.601356e-01
# weaptype15 1.7519792154 1.08077825 1.621035e+00 1.050102e-01
# region11 1.6108734512 0.39358642 4.092808e+00 4.261815e-05
# region10 1.5540284847 0.38998676 3.984824e+00 6.753025e-05
# weaptype110 1.5031762247 1.44894133 1.037431e+00 2.995352e-01
# region6 1.0307951504 0.38940660 2.647092e+00 8.118723e-03
# region5 0.6603120692 0.39281497 1.680975e+00 9.276781e-02
# weaptype16 0.6182160868 1.08842401 5.679920e-01 5.700404e-01
# claimed1 0.5596358775 0.05485053 1.020293e+01 1.923888e-24
# region12 0.4309740356 0.40249142 1.070766e+00 2.842748e-01
# region3 0.4240979631 0.40750440 1.040720e+00 2.980055e-01
# region13 -0.0008905702 1.35025127 -6.595589e-04 9.994737e-01
# weaptype18 -0.0053510358 1.09067044 -4.906189e-03 9.960854e-01
# region2 -0.2384155769 0.74279312 -3.209717e-01 7.482318e-01
# extended1 -0.3969452342 0.13689705 -2.899589e+00 3.736518e-03
# weaptype111 -0.4132707776 1.50971353 -2.737412e-01 7.842835e-01
# region9 -0.7592268393 0.62089229 -1.222800e+00 2.214054e-01
# region7 -1.2728096269 1.08731700 -1.170597e+00 2.417609e-01
# region8 -1.6558524480 0.46676272 -3.547525e+00 3.888688e-04
# attacktype12 -4.3839029682 0.58341248 -7.514243e+00 5.724131e-14
# attacktype13 -5.3567242918 0.58919352 -9.091621e+00 9.757317e-20
# attacktype16 -5.5900922418 0.59605001 -9.378562e+00 6.687830e-21
# attacktype15 -6.5183116674 0.77656025 -8.393826e+00 4.705722e-17
# weaptype17 -6.8057854851 196.97163353 -3.455211e-02 9.724369e-01
# attacktype17 -7.1924838135 0.60433826 -1.190142e+01 1.163494e-32
# attacktype18 -7.3085720943 0.67499665 -1.082757e+01 2.548338e-27
# attacktype14 -8.1805775435 0.85495250 -9.568458e+00 1.085115e-21
#' (1) suicide has a very big impact on log odds, with 6.82 value
#' (2) attacktype1 and weaptype19 seem suspect and the prior esp
#' consider removing them and redoing, or removing some levels or something
#' (3) regions 4,10,11,6 all showing significant positive effects
#' (4) claimed has a small, but positive, sig. B
#' (5) region8 has significant negative effect of -1.6
#' (6) if they are real attack types 2,3,6,5,7,8,4 all have negative effects
#' (7) weapon type 7 also has sig. neg. B; this effect has very large error
# 04-004-01-conf ints -----------------------------------------------------
confint(mod.logit)
# 2.5 % 97.5 %
# (Intercept) -0.171 5.34 *
# extended1 -0.665 -0.13 **
# region2 -1.762 1.18
# region3 -0.350 1.25
# region4 1.124 3.49 *** East Asia
# region5 -0.083 1.46 .
# region6 0.295 1.83 ** South Asia
# region7 -3.535 0.76
# region8 -2.563 -0.73 *** Western Europe
# region9 -2.021 0.44
# region10 0.817 2.35 *** Middle East & North Africa
# region11 0.866 2.42 *** Sub-Saharan Africa
# region12 -0.333 1.25
# region13 -3.243 2.40
# suicide1 5.674 8.65 ***
# attacktype12 -5.784 -3.42 *** Armed Assault
# attacktype13 -6.764 -4.37 *** Bombing/Explosion
# attacktype14 -10.084 -6.67 *** Hijacking
# attacktype15 -8.218 -5.11 *** Hostage Taking (Barricade Incident)
# attacktype16 -7.007 -4.59 *** Hostage Taking (Kidnapping)
# attacktype17 -8.621 -6.17 *** Facility/Infrastructure Attack
# attacktype18 -8.838 -6.12 *** Unarmed Assault
# attacktype19 -14.185 NA
# claimed1 0.452 0.67 ***
# weaptype15 0.018 4.69
# weaptype16 -1.137 3.57
# weaptype17 NA 27.00
# weaptype18 -1.767 2.95
# weaptype19 0.654 5.28 * Melee
# weaptype110 -1.391 4.82
# weaptype111 -3.772 2.94
# weaptype112 -0.554 5.04
confint.default(mod.logit)
exp(cbind(OR=coef(mod.logit), confint(mod.logit)))
# OR 2.5 % 97.5 %
# (Intercept) 19.87247 0.84260327 207.7527
# extended1 0.67237 0.51401828 0.8793
# region2 0.78788 0.17161847 3.2589
# region3 1.52821 0.70445279 3.5042
# region4 9.65829 3.07570210 32.6959
# region5 1.93540 0.92013960 4.3226
# region6 2.80329 1.34245578 6.2234
# region7 0.28004 0.02916424 2.1486
# region8 0.19093 0.07703580 0.4843
# region9 0.46803 0.13251168 1.5464
# region10 4.73049 2.26274417 10.5135
# region11 5.00718 2.37709366 11.2003
# region12 1.53876 0.71684169 3.4968
# region13 0.99911 0.03905851 10.9689
# suicide1 930.35987 291.05424516 5718.5431
# attacktype12 0.01248 0.00307649 0.0328
# attacktype13 0.00472 0.00115421 0.0126
# attacktype14 0.00028 0.00004176 0.0013
# attacktype15 0.00148 0.00026964 0.0060
# attacktype16 0.00373 0.00090558 0.0102
# attacktype17 0.00075 0.00018034 0.0021
# attacktype18 0.00067 0.00014508 0.0022
# attacktype19 1455.30148 0.00000069 NA -seems error; low data
# claimed1 1.75004 1.57200131 1.9491
# weaptype15 5.76600 1.01835220 109.3549
# weaptype16 1.85561 0.32074356 35.4727
# weaptype17 0.00111 NA 529705868740.7859 -seems error; low data
# weaptype18 0.99466 0.17083183 19.0583
# weaptype19 10.51466 1.92276007 196.9670
# weaptype110 4.49595 0.24871265 123.7160
# weaptype111 0.66148 0.02299465 18.9764
# weaptype112 6.33593 0.57488838 154.1015
#' Test overall significance of region
wald.test(b=coef(mod.logit), Sigma=vcov(mod.logit), Terms=3:14)
# Wald test:
# ----------
#
# Chi-squared test:
# X2 = 568.7, df = 12, P(> X2) = 0.0
#' Test overall significance of attacktype1
wald.test(b=coef(mod.logit), Sigma=vcov(mod.logit), Terms=3:14)
# Wald test:
# ----------
#
# Chi-squared test:
# X2 = 568.0, df = 8, P(> X2) = 0.0
#' Test overall significance of weapontype1
wald.test(b=coef(mod.logit), Sigma=vcov(mod.logit), Terms=25:32)
# Wald test:
# ----------
#
# Chi-squared test:
# X2 = 201.3, df = 8, P(> X2) = 0.0
with(mod.logit, null.deviance - deviance)
with(mod.logit, pchisq(null.deviance - deviance, df.null - df.residual, lower.tail=FALSE))
with(mod.logit, pchisq(null.deviance - deviance, df.null - df.residual, lower.tail=FALSE))
anova(mod.logit, test="Chi")
table(data.train$y1.nkill.gt0>0, fitted(mod.logit)>0.5)
# TEST --------------------------------------------------------------------
probs.test <- predict(mod.logit, data.test, type="response")
#' Version 1
y1.nkill.gt0.est <- rep(0, length(probs.test))
y1.nkill.gt0.est[probs.test >= 0.5] <- 1
table(data.test$y1.nkill.gt0, y1.nkill.gt0.est)
#' Version 2
y1.nkill.gt0.est.v2 <- cut(probs.test, breaks=c(-Inf, 0.5, Inf), labels=c(0,1))
table(data.test$y1.nkill.gt0, y1.nkill.gt0.est.v2)
#' Confusion matrix
confusionMatrix(data.test$y1.nkill.gt0, y1.nkill.gt0.est.v2)
#' ROC curve
pred.fit = prediction(probs.test, data.test$y1.nkill.gt0)
perf.fit = performance(pred.fit, "tpr", "fpr")
plot(perf.fit,
col="blue",
lwd=2,
main="ROC curve: logit model on y1 (nkill > 0)")
abline(a=0, b=1, lwd=2, lty=2, col="gray")
# FIT WITH CARET ----------------------------------------------------------
mod.logit.v2 = train(y1.nkill.gt0 ~ extended + region + suicide + attacktype1 + claimed + weaptype1,
method="glm",
data=data.train,
family=binomial(link='logit'))
mod.logit.v2.summary <- summary(mod.logit.v2)
# Call:
# NULL
#
# Deviance Residuals:
# Min 1Q Median 3Q Max
# -3.6446 -0.8912 0.0511 0.7855 3.2587
#
# Coefficients: (3 not defined because of singularities)
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) 5.395e+00 1.071e+00 5.039 4.68e-07 ***
# extended1 -3.969e-01 1.369e-01 -2.900 0.003737 **
# region2 -2.384e-01 7.428e-01 -0.321 0.748232
# region3 4.241e-01 4.075e-01 1.041 0.298006
# region4 2.268e+00 6.000e-01 3.780 0.000157 ***
# region5 6.603e-01 3.928e-01 1.681 0.092768 .
# region6 1.031e+00 3.894e-01 2.647 0.008119 **
# region7 -1.273e+00 1.087e+00 -1.171 0.241761
# region8 -1.656e+00 4.668e-01 -3.548 0.000389 ***
# region9 -7.592e-01 6.209e-01 -1.223 0.221405
# region10 1.554e+00 3.900e-01 3.985 6.75e-05 ***
# region11 1.611e+00 3.936e-01 4.093 4.26e-05 ***
# region12 4.310e-01 4.025e-01 1.071 0.284275
# region13 -8.906e-04 1.350e+00 -0.001 0.999474
# suicide1 6.836e+00 7.228e-01 9.457 < 2e-16 ***
# attacktype12 -4.384e+00 5.834e-01 -7.514 5.72e-14 ***
# attacktype13 -5.357e+00 5.892e-01 -9.092 < 2e-16 ***
# attacktype14 -8.181e+00 8.550e-01 -9.568 < 2e-16 ***
# attacktype15 -6.518e+00 7.766e-01 -8.394 < 2e-16 ***
# attacktype16 -5.590e+00 5.960e-01 -9.379 < 2e-16 ***
# attacktype17 -7.192e+00 6.043e-01 -11.901 < 2e-16 ***
# attacktype18 -7.309e+00 6.750e-01 -10.828 < 2e-16 ***
# attacktype19 7.283e+00 1.379e+02 0.053 0.957884
# claimed0 -5.596e-01 5.485e-02 -10.203 < 2e-16 ***
# claimed1 NA NA NA NA
# weaptype12 -1.846e+00 1.314e+00 -1.405 0.160136
# weaptype15 -9.426e-02 8.076e-01 -0.117 0.907084
# weaptype16 -1.228e+00 8.184e-01 -1.501 0.133466
# weaptype17 -8.652e+00 1.970e+02 -0.044 0.964964
# weaptype18 -1.852e+00 8.193e-01 -2.260 0.023821 *
# weaptype19 5.065e-01 8.100e-01 0.625 0.531755
# weaptype110 -3.431e-01 1.280e+00 -0.268 0.788681
# weaptype111 -2.260e+00 1.326e+00 -1.703 0.088487 .
# weaptype112 NA NA NA NA
# weaptype113 NA NA NA NA
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# (Dispersion parameter for binomial family taken to be 1)
#
# Null deviance: 28088 on 20339 degrees of freedom
# Residual deviance: 20683 on 20308 degrees of freedom
# AIC: 20747
#
# Number of Fisher Scoring iterations: 10
# probs.test.v2 <- predict(mod.logit.v2, data.test)
# y1.nkill.gt0.est.vX <- cut(probs.test.v2, breaks=c(-Inf, 0.5, Inf), labels=c(0,1))
# confusionMatrix(data.test$y1.nkill.gt0, y1.nkill.gt0.est.vX)
# bottom ------------------------------------------------------------------
#' Save the data
# saveRDS(data, "data.rds") |
% --- Source file: plotadditivePenal.Rd ---
\name{plot.additivePenal}
\Rdversion{1.1}
\alias{plot.additivePenal}
\alias{lines.additivePenal}
\title{Plot Method for an Additive frailty model.}
\description{
Plots estimated baseline survival and hazard functions of an additive frailty model, more generally of a class `additivePenal' object. Confidence bands are allowed.
}
\usage{
\method{plot}{additivePenal}(x, type.plot="Hazard", conf.bands=TRUE, pos.legend="topright",
cex.legend=0.7, main, color=2, Xlab = "Time",
Ylab = "Hazard function", ...)
}
\arguments{
\item{x}{ A fitted additive frailty model (output from calling \code{additivePenal})}
\item{type.plot}{ a character string specifying the type of curve. Possible
value are "Hazard", or "Survival". The default
is "Hazard". Only the first words are required, e.g "Haz", "Su"}
\item{conf.bands}{ logical value. Determines whether confidence bands will be plotted. The default is to do so.}
\item{pos.legend}{The location of the legend can be specified by setting this argument to a single keyword from the list '"bottomright"', '"bottom"', '"bottomleft"', '"left"', '"topleft"', '"top"', '"topright"', '"right"' and
'"center"'. The default is '"topright"'}
\item{cex.legend}{character expansion factor *relative* to current
'par("cex")'. Default is 0.7}
\item{main}{plot title}
\item{color}{curve color (integer)}
\item{Xlab}{Label of x-axis. Default is '"Time"'}
\item{Ylab}{Label of y-axis. Default is '"Hazard function"'}
\item{\dots}{ Other graphical parameters like those in \code{\link{plot.frailtyPenal}}}
}
\value{
Print a plot of HR and survival function of a class \code{additivePenal} object
}
\seealso{
\code{\link{additivePenal}}
}
\examples{
\dontrun{
data(dataAdditive)
modAdd <- additivePenal(Surv(t1,t2,event)~cluster(group)+var1+slope(var1),
correlation=TRUE,data=dataAdditive,n.knots=8,kappa=862,hazard="Splines")
#-- 'var1' is boolean as a treatment variable
plot(modAdd)
}
}
\keyword{methods}
| /man/plotAdditive.Rd | no_license | aminKMT/frailtypack | R | false | false | 2,151 | rd | % --- Source file: plotadditivePenal.Rd ---
\name{plot.additivePenal}
\Rdversion{1.1}
\alias{plot.additivePenal}
\alias{lines.additivePenal}
\title{Plot Method for an Additive frailty model.}
\description{
Plots estimated baseline survival and hazard functions of an additive frailty model, more generally of a class `additivePenal' object. Confidence bands are allowed.
}
\usage{
\method{plot}{additivePenal}(x, type.plot="Hazard", conf.bands=TRUE, pos.legend="topright",
cex.legend=0.7, main, color=2, Xlab = "Time",
Ylab = "Hazard function", ...)
}
\arguments{
\item{x}{ A fitted additive frailty model (output from calling \code{additivePenal})}
\item{type.plot}{ a character string specifying the type of curve. Possible
value are "Hazard", or "Survival". The default
is "Hazard". Only the first words are required, e.g "Haz", "Su"}
\item{conf.bands}{ logical value. Determines whether confidence bands will be plotted. The default is to do so.}
\item{pos.legend}{The location of the legend can be specified by setting this argument to a single keyword from the list '"bottomright"', '"bottom"', '"bottomleft"', '"left"', '"topleft"', '"top"', '"topright"', '"right"' and
'"center"'. The default is '"topright"'}
\item{cex.legend}{character expansion factor *relative* to current
'par("cex")'. Default is 0.7}
\item{main}{plot title}
\item{color}{curve color (integer)}
\item{Xlab}{Label of x-axis. Default is '"Time"'}
\item{Ylab}{Label of y-axis. Default is '"Hazard function"'}
\item{\dots}{ Other graphical parameters like those in \code{\link{plot.frailtyPenal}}}
}
\value{
Print a plot of HR and survival function of a class \code{additivePenal} object
}
\seealso{
\code{\link{additivePenal}}
}
\examples{
\dontrun{
data(dataAdditive)
modAdd <- additivePenal(Surv(t1,t2,event)~cluster(group)+var1+slope(var1),
correlation=TRUE,data=dataAdditive,n.knots=8,kappa=862,hazard="Splines")
#-- 'var1' is boolean as a treatment variable
plot(modAdd)
}
}
\keyword{methods}
|
require(ROCR)
auc <- function(predict, target) {
rocr <- prediction(predict[, 2], target)
roc <- performance(rocr, "tpr", "fpr")
# plot(roc, colorize = TRUE)
performance(rocr, "auc")@y.values
}
Anscombe_Transform <- function(x){
for(i in 1:ncol(x)){
x[,i] <- as.numeric(x[,i])
x[,i] <- sqrt(x[,i]+(3/8))
}
return(x)
}
shuffle <- function(sf){
sf[,'id2'] <- sample(1:nrow(sf), nrow(sf), replace=T)
sf <- sf[order(sf$id2),]
sf[,'id2'] <- NULL
return (sf)
}
rangeScale <- function(x){
(x-min(x))/(max(x)-min(x))
}
center_scale <- function(x) {
scale(x, scale = FALSE)
} | /Main/0_function.R | no_license | Sandy4321/KDD2015-4 | R | false | false | 638 | r | require(ROCR)
auc <- function(predict, target) {
rocr <- prediction(predict[, 2], target)
roc <- performance(rocr, "tpr", "fpr")
# plot(roc, colorize = TRUE)
performance(rocr, "auc")@y.values
}
Anscombe_Transform <- function(x){
for(i in 1:ncol(x)){
x[,i] <- as.numeric(x[,i])
x[,i] <- sqrt(x[,i]+(3/8))
}
return(x)
}
shuffle <- function(sf){
sf[,'id2'] <- sample(1:nrow(sf), nrow(sf), replace=T)
sf <- sf[order(sf$id2),]
sf[,'id2'] <- NULL
return (sf)
}
rangeScale <- function(x){
(x-min(x))/(max(x)-min(x))
}
center_scale <- function(x) {
scale(x, scale = FALSE)
} |
#' Import a Python module
#'
#' Import the specified Python module for calling from R.
#'
#' @param module Module name
#' @param as Alias for module name (affects names of R classes)
#' @param path Path to import from
#' @param convert `TRUE` to automatically convert Python objects to their R
#' equivalent. If you pass `FALSE` you can do manual conversion using the
#' [py_to_r()] function.
#' @param delay_load `TRUE` to delay loading the module until it is first used.
#' `FALSE` to load the module immediately. If a function is provided then it
#' will be called once the module is loaded. If a list containing `on_load()`
#' and `on_error(e)` elements is provided then `on_load()` will be called on
#' successful load and `on_error(e)` if an error occurs.
#'
#' @details The `import_from_path` function imports a Python module from an
#' arbitrary filesystem path (the directory of the specified python script is
#' automatically added to the `sys.path`).
#'
#' @return A Python module
#'
#' @examples
#' \dontrun{
#' main <- import_main()
#' sys <- import("sys")
#' }
#'
#' @export
import <- function(module, as = NULL, convert = TRUE, delay_load = FALSE) {
# if there is an as argument then register a filter for it
if (!is.null(as)) {
register_class_filter(function(classes) {
sub(paste0("^", module), as, classes)
})
}
# resolve delay load
delay_load_environment <- NULL
delay_load_priority <- 0
delay_load_functions <- NULL
if (is.function(delay_load)) {
delay_load_functions <- list(on_load = delay_load)
delay_load <- TRUE
} else if (is.list(delay_load)) {
delay_load_environment <- delay_load$environment
delay_load_functions <- delay_load
if (!is.null(delay_load$priority))
delay_load_priority <- delay_load$priority
delay_load <- TRUE
}
# normal case (load immediately)
if (!delay_load || is_python_initialized()) {
# ensure that python is initialized (pass top level module as
# a hint as to which version of python to choose)
ensure_python_initialized(required_module = module)
# import the module
py_module_import(module, convert = convert)
}
# delay load case (wait until first access)
else {
if (is.null(.globals$delay_load_module) || (delay_load_priority > .globals$delay_load_priority)) {
.globals$delay_load_module <- module
.globals$delay_load_environment <- delay_load_environment
.globals$delay_load_priority <- delay_load_priority
}
module_proxy <- new.env(parent = emptyenv())
module_proxy$module <- module
module_proxy$convert <- convert
if (!is.null(delay_load_functions)) {
module_proxy$get_module <- delay_load_functions$get_module
module_proxy$on_load <- delay_load_functions$on_load
module_proxy$on_error <- delay_load_functions$on_error
}
attr(module_proxy, "class") <- c("python.builtin.module",
"python.builtin.object")
module_proxy
}
}
#' @rdname import
#' @export
import_main <- function(convert = TRUE) {
ensure_python_initialized()
import("__main__", convert = convert)
}
#' @rdname import
#' @export
import_builtins <- function(convert = TRUE) {
ensure_python_initialized()
if (is_python3())
import("builtins", convert = convert)
else
import("__builtin__", convert = convert)
}
#' @rdname import
#' @export
import_from_path <- function(module, path = ".", convert = TRUE) {
# normalize path
path <- normalizePath(path)
# add the path to sys.path if it isn't already there
sys <- import("sys", convert = FALSE)
sys$path$insert(as.integer(0), path)
# import
import(module, convert = convert)
# Remove from path
sys$path$pop(as.integer(0))
}
#' @export
print.python.builtin.object <- function(x, ...) {
str(x, ...)
}
#' @importFrom utils str
#' @export
str.python.builtin.object <- function(object, ...) {
if (!py_available() || py_is_null_xptr(object))
cat("<pointer: 0x0>\n")
else
cat(py_str(object), "\n", sep="")
}
#' @export
str.python.builtin.module <- function(object, ...) {
if (py_is_module_proxy(object)) {
cat("Module(", get("module", envir = object), ")\n", sep = "")
} else {
cat(py_str(object), "\n", sep = "")
}
}
#' @export
as.character.python.builtin.object <- function(x, ...) {
py_str(x)
}
#' @export
"==.python.builtin.object" <- function(a, b) {
py_compare(a, b, "==")
}
#' @export
"!=.python.builtin.object" <- function(a, b) {
py_compare(a, b, "!=")
}
#' @export
"<.python.builtin.object" <- function(a, b) {
py_compare(a, b, "<")
}
#' @export
">.python.builtin.object" <- function(a, b) {
py_compare(a, b, ">")
}
#' @export
">=.python.builtin.object" <- function(a, b) {
py_compare(a, b, ">=")
}
#' @export
"<=.python.builtin.object" <- function(a, b) {
py_compare(a, b, "<=")
}
py_compare <- function(a, b, op) {
ensure_python_initialized()
py_validate_xptr(a)
if (!inherits(b, "python.builtin.object"))
b <- r_to_py(b)
py_validate_xptr(b)
py_compare_impl(a, b, op)
}
#' @export
summary.python.builtin.object <- function(object, ...) {
str(object)
}
#' @export
`$.python.builtin.module` <- function(x, name) {
# resolve module proxies
if (py_is_module_proxy(x))
py_resolve_module_proxy(x)
`$.python.builtin.object`(x, name)
}
py_has_convert <- function(x) {
# resolve wrapped environment
x <- as.environment(x)
# get convert flag
if (exists("convert", x, inherits = FALSE))
get("convert", x, inherits = FALSE)
else
TRUE
}
#' @export
`$.python.builtin.object` <- function(x, name) {
# resolve module proxies
if (py_is_module_proxy(x))
py_resolve_module_proxy(x)
# skip if this is a NULL xptr
if (py_is_null_xptr(x) || !py_available())
return(NULL)
# deterimine whether this object converts to python
convert <- py_has_convert(x)
# special handling for embedded modules (which don't always show
# up as "attributes")
if (py_is_module(x) && !py_has_attr(x, name)) {
module <- py_get_submodule(x, name, convert)
if (!is.null(module))
return(module)
}
# get the attrib
if (is.numeric(name) && (length(name) == 1) && py_has_attr(x, "__getitem__"))
attrib <- x$`__getitem__`(as.integer(name))
else if (inherits(x, "python.builtin.dict"))
attrib <- py_dict_get_item(x, name)
else
attrib <- py_get_attr(x, name)
# convert
if (convert || py_is_callable(attrib)) {
# capture previous convert for attr
attrib_convert <- py_has_convert(attrib)
# temporarily change convert so we can call py_to_r and get S3 dispatch
envir <- as.environment(attrib)
assign("convert", convert, envir = envir)
on.exit(assign("convert", attrib_convert, envir = envir), add = TRUE)
# call py_to_r
py_to_r(attrib)
}
else
attrib
}
# the as.environment generic enables pytyhon objects that manifest
# as R functions (e.g. for functions, classes, callables, etc.) to
# be automatically converted to enviroments during the construction
# of PyObjectRef. This makes them a seamless drop-in for standard
# python objects represented as environments
#' @export
as.environment.python.builtin.object <- function(x) {
if (is.function(x))
attr(x, "py_object")
else
x
}
#' @export
`[[.python.builtin.object` <- `$.python.builtin.object`
#' @export
`$<-.python.builtin.object` <- function(x, name, value) {
if (!py_is_null_xptr(x) && py_available())
py_set_attr(x, name, value)
else
stop("Unable to assign value (object reference is NULL)")
x
}
#' @export
`[[<-.python.builtin.object` <- `$<-.python.builtin.object`
#' @export
`$<-.python.builtin.dict` <- function(x, name, value) {
if (!py_is_null_xptr(x) && py_available())
py_dict_set_item(x, name, value)
else
stop("Unable to assign value (dict reference is NULL)")
x
}
#' @export
`[[<-.python.builtin.dict` <- `$<-.python.builtin.dict`
#' @export
length.python.builtin.dict <- function(x) {
if (py_is_null_xptr(x) || !py_available())
0L
else
py_dict_length(x)
}
#' @export
.DollarNames.python.builtin.module <- function(x, pattern = "") {
# resolve module proxies (ignore errors since this is occurring during completion)
result <- tryCatch({
if (py_is_module_proxy(x))
py_resolve_module_proxy(x)
TRUE
}, error = clear_error_handler(FALSE))
if (!result)
return(character())
# delegate
.DollarNames.python.builtin.object(x, pattern)
}
#' @importFrom utils .DollarNames
#' @export
.DollarNames.python.builtin.object <- function(x, pattern = "") {
# skip if this is a NULL xptr
if (py_is_null_xptr(x) || !py_available())
return(character())
# check for dictionary
if (inherits(x, "python.builtin.dict")) {
names <- py_dict_get_keys_as_str(x)
names <- names[substr(names, 1, 1) != '_']
Encoding(names) <- "UTF-8"
types <- rep_len(0L, length(names))
} else {
# get the names and filter out internal attributes (_*)
names <- py_suppress_warnings(py_list_attributes(x))
names <- names[substr(names, 1, 1) != '_']
# replace function with `function`
names <- sub("^function$", "`function`", names)
names <- sort(names, decreasing = FALSE)
# get the types
types <- py_suppress_warnings(py_get_attribute_types(x, names))
}
# if this is a module then add submodules
if (inherits(x, "python.builtin.module")) {
name <- py_get_name(x)
if (!is.null(name)) {
submodules <- sort(py_list_submodules(name), decreasing = FALSE)
Encoding(submodules) <- "UTF-8"
names <- c(names, submodules)
types <- c(types, rep_len(5L, length(submodules)))
}
}
if (length(names) > 0) {
# set types
attr(names, "types") <- types
# specify a help_handler
attr(names, "helpHandler") <- "reticulate:::help_handler"
}
# return
names
}
#' @export
names.python.builtin.object <- function(x) {
as.character(.DollarNames(x))
}
#' @export
names.python.builtin.module <- function(x) {
as.character(.DollarNames(x))
}
#' @export
as.array.numpy.ndarray <- function(x, ...) {
py_to_r(x)
}
#' @export
as.matrix.numpy.ndarray <- function(x, ...) {
py_to_r(x)
}
#' @export
as.vector.numpy.ndarray <- function(x, mode = "any") {
a <- as.array(x)
as.vector(a, mode = mode)
}
#' @export
as.double.numpy.ndarray <- function(x, ...) {
a <- as.array(x)
as.double(a)
}
#' @importFrom graphics plot
#' @export
plot.numpy.ndarray <- function(x, y, ...) {
plot(as.array(x))
}
#' Create Python dictionary
#'
#' Create a Python dictionary object, including a dictionary whose keys are
#' other Python objects rather than character vectors.
#'
#' @param ... Name/value pairs for dictionary (or a single named list to be
#' converted to a dictionary).
#' @param keys Keys to dictionary (can be Python objects)
#' @param values Values for dictionary
#' @param convert `TRUE` to automatically convert Python objects to their R
#' equivalent. If you pass `FALSE` you can do manual conversion using the
#' [py_to_r()] function.
#'
#' @return A Python dictionary
#'
#' @note The returned dictionary will not automatically convert it's elements
#' from Python to R. You can do manual converstion with the [py_to_r()]
#' function or pass `convert = TRUE` to request automatic conversion.
#'
#' @export
dict <- function(..., convert = FALSE) {
ensure_python_initialized()
# get the args
values <- list(...)
# flag indicating whether we should scan the parent frame for python
# objects that should serve as the key (e.g. a Tensor)
scan_parent_frame <- TRUE
# if there is a single element and it's a list then use that
if (length(values) == 1 && is.null(names(values)) && is.list(values[[1]])) {
values <- values[[1]]
scan_parent_frame <- FALSE
}
# get names
names <- names(values)
# evaluate names in parent env to get keys
frame <- parent.frame()
keys <- lapply(names, function(name) {
# allow python objects to serve as keys
if (scan_parent_frame && exists(name, envir = frame, inherits = TRUE)) {
key <- get(name, envir = frame, inherits = TRUE)
if (inherits(key, "python.builtin.object"))
key
else
name
} else {
if (grepl("^[0-9]+$", name))
name <- as.integer(name)
else
name
}
})
# construct dict
py_dict_impl(keys, values, convert = convert)
}
#' @rdname dict
#' @export
py_dict <- function(keys, values, convert = FALSE) {
ensure_python_initialized()
py_dict_impl(keys, values, convert = convert)
}
#' Create Python tuple
#'
#' Create a Python tuple object
#'
#' @inheritParams dict
#' @param ... Values for tuple (or a single list to be converted to a tuple).
#'
#' @return A Python tuple
#' @note The returned tuple will not automatically convert it's elements from
#' Python to R. You can do manual converstion with the [py_to_r()] function or
#' pass `convert = TRUE` to request automatic conversion.
#'
#' @export
tuple <- function(..., convert = FALSE) {
ensure_python_initialized()
# get the args
values <- list(...)
# if it's a single value then maybe do some special resolution
if (length(values) == 1) {
# alias value
value <- values[[1]]
# reflect tuples back
if (inherits(value, "python.builtin.tuple"))
return(value)
# if it's a list then use the list as the values
if (is.list(value))
values <- value
}
# construct tuple
py_tuple(values, convert = convert)
}
#' @export
length.python.builtin.tuple <- function(x) {
if (py_is_null_xptr(x) || !py_available())
0L
else
py_tuple_length(x)
}
#' Length of Python object
#'
#' Get the length of a Python object (equivalent to the Python `len()`
#' built in function).
#'
#' @param x Python object
#'
#' @return Length as integer
#'
#' @export
py_len <- function(x) {
if (py_is_null_xptr(x) || !py_available())
0L
else
as_r_value(x$`__len__`())
}
#' @export
length.python.builtin.list <- function(x) {
py_len(x)
}
#' Convert to Python Unicode Object
#'
#' @param str Single element character vector to convert
#'
#' @details By default R character vectors are converted to Python strings.
#' In Python 3 these values are unicode objects however in Python 2
#' they are 8-bit string objects. This function enables you to
#' obtain a Python unicode object from an R character vector
#' when running under Python 2 (under Python 3 a standard Python
#' string object is returend).
#'
#' @export
py_unicode <- function(str) {
ensure_python_initialized()
if (is_python3()) {
r_to_py(str)
} else {
py <- import_builtins()
py_call(py_get_attr(py, "unicode"), str)
}
}
#' Evaluate an expression within a context.
#'
#' The \code{with} method for objects of type \code{python.builtin.object}
#' implements the context manager protocol used by the Python \code{with}
#' statement. The passed object must implement the
#' \href{https://docs.python.org/2/reference/datamodel.html#context-managers}{context
#' manager} (\code{__enter__} and \code{__exit__} methods.
#'
#' @param data Context to enter and exit
#' @param expr Expression to evaluate within the context
#' @param as Name of variable to assign context to for the duration of the
#' expression's evaluation (optional).
#' @param ... Unused
#'
#' @export
with.python.builtin.object <- function(data, expr, as = NULL, ...) {
ensure_python_initialized()
# enter the context
context <- data$`__enter__`()
# check for as and as_envir
if (!missing(as)) {
as <- deparse(substitute(as))
as <- gsub("\"", "", as)
} else {
as <- attr(data, "as")
}
envir <- attr(data, "as_envir")
if (is.null(envir))
envir <- parent.frame()
# assign the context if we have an as parameter
asRestore <- NULL
if (!is.null(as)) {
if (exists(as, envir = envir))
asRestore <- get(as, envir = envir)
assign(as, context, envir = envir)
}
# evaluate the expression and exit the context
tryCatch(force(expr),
finally = {
data$`__exit__`(NULL, NULL, NULL)
if (!is.null(as)) {
remove(list = as, envir = envir)
if (!is.null(asRestore))
assign(as, asRestore, envir = envir)
}
}
)
}
#' Create local alias for objects in \code{with} statements.
#'
#' @param object Object to alias
#' @param name Alias name
#'
#' @name with-as-operator
#'
#' @keywords internal
#' @export
"%as%" <- function(object, name) {
as <- deparse(substitute(name))
as <- gsub("\"", "", as)
attr(object, "as") <- as
attr(object, "as_envir") <- parent.frame()
object
}
#' Traverse a Python iterator or generator
#'
#' @param it Python iterator or generator
#' @param f Function to apply to each item. By default applies the
#' \code{identity} function which just reflects back the value of the item.
#' @param simplify Should the result be simplified to a vector if possible?
#' @param completed Sentinel value to return from `iter_next()` if the iteration
#' completes (defaults to `NULL` but can be any R value you specify).
#'
#' @return For `iterate()`, A list or vector containing the results of calling
#' \code{f} on each item in \code{x} (invisibly); For `iter_next()`, the next
#' value in the iteration (or the sentinel `completed` value if the iteration
#' is complete).
#'
#' @details Simplification is only attempted all elements are length 1 vectors
#' of type "character", "complex", "double", "integer", or "logical".
#'
#' @export
iterate <- function(it, f = base::identity, simplify = TRUE) {
ensure_python_initialized()
# validate
if (!inherits(it, "python.builtin.iterator"))
stop("iterate function called with non-iterator argument")
# perform iteration
result <- py_iterate(it, f)
# simplify if requested and appropriate
if (simplify) {
# attempt to simplify if all elements are length 1
lengths <- sapply(result, length)
unique_length <- unique(lengths)
if (length(unique_length) == 1 && unique_length == 1) {
# then only simplify if we have a common primitive type
classes <- sapply(result, class)
unique_class <- unique(classes)
if (length(unique_class) == 1 &&
unique_class %in% c("character", "complex", "double", "integer", "logical")) {
result <- unlist(result)
}
}
}
# return invisibly
invisible(result)
}
#' @rdname iterate
#' @export
iter_next <- function(it, completed = NULL) {
# validate
if (!inherits(it, "python.builtin.iterator"))
stop("iter_next function called with non-iterator argument")
# call iterator
py_iter_next(it, completed)
}
#' Call a Python callable object
#'
#' @param ... Arguments to function (named and/or unnamed)
#'
#' @return Return value of call as a Python object.
#'
#' @keywords internal
#'
#' @export
py_call <- function(x, ...) {
ensure_python_initialized()
dots <- py_resolve_dots(list(...))
py_call_impl(x, dots$args, dots$keywords)
}
#' Check if a Python object has an attribute
#'
#' Check whether a Python object \code{x} has an attribute
#' \code{name}.
#'
#' @param x A python object.
#' @param name The attribute to be accessed.
#'
#' @return \code{TRUE} if the object has the attribute \code{name}, and
#' \code{FALSE} otherwise.
#' @export
py_has_attr <- function(x, name) {
ensure_python_initialized()
if (py_is_module_proxy(x))
py_resolve_module_proxy(x)
py_has_attr_impl(x, name)
}
#' Get an attribute of a Python object
#'
#' @param x Python object
#' @param name Attribute name
#' @param silent \code{TRUE} to return \code{NULL} if the attribute
#' doesn't exist (default is \code{FALSE} which will raise an error)
#'
#' @return Attribute of Python object
#' @export
py_get_attr <- function(x, name, silent = FALSE) {
ensure_python_initialized()
if (py_is_module_proxy(x))
py_resolve_module_proxy(x)
py_get_attr_impl(x, name, silent)
}
#' Set an attribute of a Python object
#'
#' @param x Python object
#' @param name Attribute name
#' @param value Attribute value
#'
#' @export
py_set_attr <- function(x, name, value) {
ensure_python_initialized()
if (py_is_module_proxy(x))
py_resolve_module_proxy(x)
py_set_attr_impl(x, name, value)
}
#' List all attributes of a Python object
#'
#'
#' @param x Python object
#'
#' @return Character vector of attributes
#' @export
py_list_attributes <- function(x) {
ensure_python_initialized()
if (py_is_module_proxy(x))
py_resolve_module_proxy(x)
attrs <- py_list_attributes_impl(x)
Encoding(attrs) <- "UTF-8"
attrs
}
#' Unique identifer for Python object
#'
#' Get a globally unique identifer for a Python object.
#'
#' @note In the current implementation of CPython this is the
#' memory address of the object.
#'
#' @param object Python object
#'
#' @return Unique identifer (as integer) or `NULL`
#'
#' @export
py_id <- function(object) {
if (py_is_null_xptr(object) || !py_available())
NULL
else {
py <- import_builtins()
py$id(object)
}
}
#' An S3 method for getting the string representation of a Python object
#'
#' @param object Python object
#' @param ... Unused
#'
#' @return Character vector
#'
#' @details The default implementation will call `PyObject_Str` on the object.
#'
#' @export
py_str <- function(object, ...) {
if (!inherits(object, "python.builtin.object"))
py_str.default(object)
else if (py_is_null_xptr(object) || !py_available())
"<pointer: 0x0>"
else
UseMethod("py_str")
}
#' @export
py_str.default <- function(object, ...) {
"<not a python object>"
}
#' @export
py_str.python.builtin.object <- function(object, ...) {
# get default rep
str <- py_str_impl(object)
# remove e.g. 'object at 0x10d084710'
str <- gsub(" object at 0x\\w{4,}", "", str)
# return
str
}
#' @export
py_str.python.builtin.module <- function(object, ...) {
paste0("Module(", py_get_name(object), ")")
}
#' @export
py_str.python.builtin.list <- function(object, ...) {
py_collection_str("List", object)
}
#' @export
py_str.python.builtin.dict <- function(object, ...) {
py_collection_str("Dict", object)
}
#' @export
py_str.python.builtin.tuple <- function(object, ...) {
py_collection_str("Tuple", object)
}
py_collection_str <- function(name, object) {
len <- py_collection_len(object)
if (len > 10)
paste0(name, " (", len, " items)")
else
py_str.python.builtin.object(object)
}
py_collection_len <- function(object) {
# do this dance so we can call __len__ on dictionaries (which
# otherwise overload the $)
len <- py_get_attr(object, "__len__")
py_to_r(py_call(len))
}
#' Suppress Python warnings for an expression
#'
#' @param expr Expression to suppress warnings for
#'
#' @return Result of evaluating expression
#'
#' @export
py_suppress_warnings <- function(expr) {
ensure_python_initialized()
# ignore any registered warning output types (e.g. tf warnings)
contexts <- lapply(.globals$suppress_warnings_handlers, function(handler) {
handler$suppress()
})
on.exit({
if (length(contexts) > 0) {
for (i in 1:length(contexts)) {
handler <- .globals$suppress_warnings_handlers[[i]]
handler$restore(contexts[[i]])
}
}
}, add = TRUE)
# evaluate while ignoring python warnings
warnings <- import("warnings")
with(warnings$catch_warnings(), expr)
}
#' Register a handler for calls to py_suppress_warnings
#'
#' @param handler Handler
#'
#' @details Enables packages to register a pair of functions
#' to be called to suppress and then re-enable warnings
#'
#' @keywords internal
#' @export
register_suppress_warnings_handler <- function(handler) {
.globals$suppress_warnings_handlers[[length(.globals$suppress_warnings_handlers) + 1]] <- handler
}
#' Register a filter for class names
#'
#' @param filter Function which takes a class name and maps it to an alternate
#' name
#'
#' @keywords internal
#' @export
register_class_filter <- function(filter) {
.globals$class_filters[[length(.globals$class_filters) + 1]] <- filter
}
#' Capture and return Python output
#'
#' @param expr Expression to capture stdout for
#' @param type Streams to capture (defaults to both stdout and stderr)
#'
#' @return Character vector with output
#'
#' @export
py_capture_output <- function(expr, type = c("stdout", "stderr")) {
# initialize python if necessary
ensure_python_initialized()
# resolve type argument
type <- match.arg(type, several.ok = TRUE)
# get output tools helper functions
output_tools <- import("rpytools.output")
# handle stdout
restore_stdout <- NULL
if ("stdout" %in% type) {
restore_stdout <- output_tools$start_stdout_capture()
on.exit({
if (!is.null(restore_stdout))
output_tools$end_stdout_capture(restore_stdout)
}, add = TRUE)
}
# handle stderr
restore_stderr <- NULL
if ("stderr" %in% type) {
restore_stderr <- output_tools$start_stderr_capture()
on.exit({
if (!is.null(restore_stderr))
output_tools$end_stderr_capture(restore_stderr)
}, add = TRUE)
}
# evaluate the expression
force(expr)
# collect the output
output <- ""
if (!is.null(restore_stdout)) {
std_out <- output_tools$end_stdout_capture(restore_stdout)
output <- paste0(output, std_out)
if (nzchar(std_out))
output <- paste0(output, "\n")
restore_stdout <- NULL
}
if (!is.null(restore_stderr)) {
std_err <- output_tools$end_stderr_capture(restore_stderr)
output <- paste0(output, std_err)
if (nzchar(std_err))
output <- paste0(output, "\n")
restore_stderr <- NULL
}
# return the output
output
}
#' Run Python code
#'
#' Execute code within the the \code{__main__} Python module.
#'
#' @inheritParams import
#' @param code Code to execute
#' @param file Source file
#' @param local Whether to create objects in a local/private namespace (if
#' `FALSE`, objects are created within the main module).
#'
#' @return For `py_eval()`, the result of evaluating the expression; For
#' `py_run_string()` and `py_run_file()`, the dictionary associated with
#' the code execution.
#'
#' @name py_run
#'
#' @export
py_run_string <- function(code, local = FALSE, convert = TRUE) {
ensure_python_initialized()
invisible(py_run_string_impl(code, local, convert))
}
#' @rdname py_run
#' @export
py_run_file <- function(file, local = FALSE, convert = TRUE) {
ensure_python_initialized()
invisible(py_run_file_impl(file, local, convert))
}
#' @rdname py_run
#' @export
py_eval <- function(code, convert = TRUE) {
ensure_python_initialized()
py_eval_impl(code, convert)
}
py_callable_as_function <- function(callable, convert) {
function(...) {
dots <- py_resolve_dots(list(...))
result <- py_call_impl(callable, dots$args, dots$keywords)
if (convert) {
result <- py_to_r(result)
if (is.null(result))
invisible(result)
else
result
}
else {
result
}
}
}
py_resolve_dots <- function(dots) {
args <- list()
keywords <- list()
names <- names(dots)
if (!is.null(names)) {
for (i in 1:length(dots)) {
name <- names[[i]]
if (nzchar(name))
if (is.null(dots[[i]]))
keywords[name] <- list(NULL)
else
keywords[[name]] <- dots[[i]]
else
if (is.null(dots[[i]]))
args[length(args) + 1] <- list(NULL)
else
args[[length(args) + 1]] <- dots[[i]]
}
} else {
args <- dots
}
list(
args = args,
keywords = keywords
)
}
py_is_module <- function(x) {
inherits(x, "python.builtin.module")
}
py_is_module_proxy <- function(x) {
inherits(x, "python.builtin.module") && exists("module", envir = x)
}
py_resolve_module_proxy <- function(proxy) {
# collect module proxy hooks
collect_value <- function(name) {
if (exists(name, envir = proxy, inherits = FALSE)) {
value <- get(name, envir = proxy, inherits = FALSE)
remove(list = name, envir = proxy)
value
} else {
NULL
}
}
# name of module to import (allow just in time customization via hook)
get_module <- collect_value("get_module")
if (!is.null(get_module))
assign("module", get_module(), envir = proxy)
# get module name
module <- get("module", envir = proxy)
# load and error handlers
on_load <- collect_value("on_load")
on_error <- collect_value("on_error")
# perform the import -- capture error and ammend it with
# python configuration information if we have it
result <- tryCatch(import(module), error = clear_error_handler())
if (inherits(result, "error")) {
if (!is.null(on_error)) {
# call custom error handler
on_error(result)
# error handler can and should call `stop`, this is just a failsafe
stop("Error loading Python module ", module, call. = FALSE)
} else {
# default error message/handler
message <- py_config_error_message(paste("Python module", module, "was not found."))
stop(message, call. = FALSE)
}
}
# fixup the proxy
py_module_proxy_import(proxy)
# clear the global tracking of delay load modules
.globals$delay_load_module <- NULL
.globals$delay_load_environment <- NULL
.globals$delay_load_priority <- 0
# call on_load if specifed
if (!is.null(on_load))
on_load()
}
py_get_name <- function(x) {
py_to_r(py_get_attr(x, "__name__"))
}
py_get_submodule <- function(x, name, convert = TRUE) {
module_name <- paste(py_get_name(x), name, sep=".")
result <- tryCatch(import(module_name, convert = convert),
error = clear_error_handler())
if (inherits(result, "error"))
NULL
else
result
}
py_filter_classes <- function(classes) {
for (filter in .globals$class_filters)
classes <- filter(classes)
classes
}
| /R/python.R | permissive | bcipolli/reticulate | R | false | false | 29,963 | r |
#' Import a Python module
#'
#' Import the specified Python module for calling from R.
#'
#' @param module Module name
#' @param as Alias for module name (affects names of R classes)
#' @param path Path to import from
#' @param convert `TRUE` to automatically convert Python objects to their R
#' equivalent. If you pass `FALSE` you can do manual conversion using the
#' [py_to_r()] function.
#' @param delay_load `TRUE` to delay loading the module until it is first used.
#' `FALSE` to load the module immediately. If a function is provided then it
#' will be called once the module is loaded. If a list containing `on_load()`
#' and `on_error(e)` elements is provided then `on_load()` will be called on
#' successful load and `on_error(e)` if an error occurs.
#'
#' @details The `import_from_path` function imports a Python module from an
#' arbitrary filesystem path (the directory of the specified python script is
#' automatically added to the `sys.path`).
#'
#' @return A Python module
#'
#' @examples
#' \dontrun{
#' main <- import_main()
#' sys <- import("sys")
#' }
#'
#' @export
import <- function(module, as = NULL, convert = TRUE, delay_load = FALSE) {
# if there is an as argument then register a filter for it
if (!is.null(as)) {
register_class_filter(function(classes) {
sub(paste0("^", module), as, classes)
})
}
# resolve delay load
delay_load_environment <- NULL
delay_load_priority <- 0
delay_load_functions <- NULL
if (is.function(delay_load)) {
delay_load_functions <- list(on_load = delay_load)
delay_load <- TRUE
} else if (is.list(delay_load)) {
delay_load_environment <- delay_load$environment
delay_load_functions <- delay_load
if (!is.null(delay_load$priority))
delay_load_priority <- delay_load$priority
delay_load <- TRUE
}
# normal case (load immediately)
if (!delay_load || is_python_initialized()) {
# ensure that python is initialized (pass top level module as
# a hint as to which version of python to choose)
ensure_python_initialized(required_module = module)
# import the module
py_module_import(module, convert = convert)
}
# delay load case (wait until first access)
else {
if (is.null(.globals$delay_load_module) || (delay_load_priority > .globals$delay_load_priority)) {
.globals$delay_load_module <- module
.globals$delay_load_environment <- delay_load_environment
.globals$delay_load_priority <- delay_load_priority
}
module_proxy <- new.env(parent = emptyenv())
module_proxy$module <- module
module_proxy$convert <- convert
if (!is.null(delay_load_functions)) {
module_proxy$get_module <- delay_load_functions$get_module
module_proxy$on_load <- delay_load_functions$on_load
module_proxy$on_error <- delay_load_functions$on_error
}
attr(module_proxy, "class") <- c("python.builtin.module",
"python.builtin.object")
module_proxy
}
}
#' @rdname import
#' @export
import_main <- function(convert = TRUE) {
ensure_python_initialized()
import("__main__", convert = convert)
}
#' @rdname import
#' @export
import_builtins <- function(convert = TRUE) {
ensure_python_initialized()
if (is_python3())
import("builtins", convert = convert)
else
import("__builtin__", convert = convert)
}
#' @rdname import
#' @export
import_from_path <- function(module, path = ".", convert = TRUE) {
# normalize path
path <- normalizePath(path)
# add the path to sys.path if it isn't already there
sys <- import("sys", convert = FALSE)
sys$path$insert(as.integer(0), path)
# import
import(module, convert = convert)
# Remove from path
sys$path$pop(as.integer(0))
}
#' @export
print.python.builtin.object <- function(x, ...) {
str(x, ...)
}
#' @importFrom utils str
#' @export
str.python.builtin.object <- function(object, ...) {
if (!py_available() || py_is_null_xptr(object))
cat("<pointer: 0x0>\n")
else
cat(py_str(object), "\n", sep="")
}
#' @export
str.python.builtin.module <- function(object, ...) {
if (py_is_module_proxy(object)) {
cat("Module(", get("module", envir = object), ")\n", sep = "")
} else {
cat(py_str(object), "\n", sep = "")
}
}
#' @export
as.character.python.builtin.object <- function(x, ...) {
py_str(x)
}
#' @export
"==.python.builtin.object" <- function(a, b) {
py_compare(a, b, "==")
}
#' @export
"!=.python.builtin.object" <- function(a, b) {
py_compare(a, b, "!=")
}
#' @export
"<.python.builtin.object" <- function(a, b) {
py_compare(a, b, "<")
}
#' @export
">.python.builtin.object" <- function(a, b) {
py_compare(a, b, ">")
}
#' @export
">=.python.builtin.object" <- function(a, b) {
py_compare(a, b, ">=")
}
#' @export
"<=.python.builtin.object" <- function(a, b) {
py_compare(a, b, "<=")
}
py_compare <- function(a, b, op) {
ensure_python_initialized()
py_validate_xptr(a)
if (!inherits(b, "python.builtin.object"))
b <- r_to_py(b)
py_validate_xptr(b)
py_compare_impl(a, b, op)
}
#' @export
summary.python.builtin.object <- function(object, ...) {
str(object)
}
#' @export
`$.python.builtin.module` <- function(x, name) {
# resolve module proxies
if (py_is_module_proxy(x))
py_resolve_module_proxy(x)
`$.python.builtin.object`(x, name)
}
py_has_convert <- function(x) {
# resolve wrapped environment
x <- as.environment(x)
# get convert flag
if (exists("convert", x, inherits = FALSE))
get("convert", x, inherits = FALSE)
else
TRUE
}
#' @export
`$.python.builtin.object` <- function(x, name) {
# resolve module proxies
if (py_is_module_proxy(x))
py_resolve_module_proxy(x)
# skip if this is a NULL xptr
if (py_is_null_xptr(x) || !py_available())
return(NULL)
# deterimine whether this object converts to python
convert <- py_has_convert(x)
# special handling for embedded modules (which don't always show
# up as "attributes")
if (py_is_module(x) && !py_has_attr(x, name)) {
module <- py_get_submodule(x, name, convert)
if (!is.null(module))
return(module)
}
# get the attrib
if (is.numeric(name) && (length(name) == 1) && py_has_attr(x, "__getitem__"))
attrib <- x$`__getitem__`(as.integer(name))
else if (inherits(x, "python.builtin.dict"))
attrib <- py_dict_get_item(x, name)
else
attrib <- py_get_attr(x, name)
# convert
if (convert || py_is_callable(attrib)) {
# capture previous convert for attr
attrib_convert <- py_has_convert(attrib)
# temporarily change convert so we can call py_to_r and get S3 dispatch
envir <- as.environment(attrib)
assign("convert", convert, envir = envir)
on.exit(assign("convert", attrib_convert, envir = envir), add = TRUE)
# call py_to_r
py_to_r(attrib)
}
else
attrib
}
# the as.environment generic enables pytyhon objects that manifest
# as R functions (e.g. for functions, classes, callables, etc.) to
# be automatically converted to enviroments during the construction
# of PyObjectRef. This makes them a seamless drop-in for standard
# python objects represented as environments
#' @export
as.environment.python.builtin.object <- function(x) {
if (is.function(x))
attr(x, "py_object")
else
x
}
#' @export
`[[.python.builtin.object` <- `$.python.builtin.object`
#' @export
`$<-.python.builtin.object` <- function(x, name, value) {
if (!py_is_null_xptr(x) && py_available())
py_set_attr(x, name, value)
else
stop("Unable to assign value (object reference is NULL)")
x
}
#' @export
`[[<-.python.builtin.object` <- `$<-.python.builtin.object`
#' @export
`$<-.python.builtin.dict` <- function(x, name, value) {
if (!py_is_null_xptr(x) && py_available())
py_dict_set_item(x, name, value)
else
stop("Unable to assign value (dict reference is NULL)")
x
}
#' @export
`[[<-.python.builtin.dict` <- `$<-.python.builtin.dict`
#' @export
length.python.builtin.dict <- function(x) {
if (py_is_null_xptr(x) || !py_available())
0L
else
py_dict_length(x)
}
#' @export
.DollarNames.python.builtin.module <- function(x, pattern = "") {
# resolve module proxies (ignore errors since this is occurring during completion)
result <- tryCatch({
if (py_is_module_proxy(x))
py_resolve_module_proxy(x)
TRUE
}, error = clear_error_handler(FALSE))
if (!result)
return(character())
# delegate
.DollarNames.python.builtin.object(x, pattern)
}
#' @importFrom utils .DollarNames
#' @export
.DollarNames.python.builtin.object <- function(x, pattern = "") {
# skip if this is a NULL xptr
if (py_is_null_xptr(x) || !py_available())
return(character())
# check for dictionary
if (inherits(x, "python.builtin.dict")) {
names <- py_dict_get_keys_as_str(x)
names <- names[substr(names, 1, 1) != '_']
Encoding(names) <- "UTF-8"
types <- rep_len(0L, length(names))
} else {
# get the names and filter out internal attributes (_*)
names <- py_suppress_warnings(py_list_attributes(x))
names <- names[substr(names, 1, 1) != '_']
# replace function with `function`
names <- sub("^function$", "`function`", names)
names <- sort(names, decreasing = FALSE)
# get the types
types <- py_suppress_warnings(py_get_attribute_types(x, names))
}
# if this is a module then add submodules
if (inherits(x, "python.builtin.module")) {
name <- py_get_name(x)
if (!is.null(name)) {
submodules <- sort(py_list_submodules(name), decreasing = FALSE)
Encoding(submodules) <- "UTF-8"
names <- c(names, submodules)
types <- c(types, rep_len(5L, length(submodules)))
}
}
if (length(names) > 0) {
# set types
attr(names, "types") <- types
# specify a help_handler
attr(names, "helpHandler") <- "reticulate:::help_handler"
}
# return
names
}
#' @export
names.python.builtin.object <- function(x) {
as.character(.DollarNames(x))
}
#' @export
names.python.builtin.module <- function(x) {
as.character(.DollarNames(x))
}
#' @export
as.array.numpy.ndarray <- function(x, ...) {
py_to_r(x)
}
#' @export
as.matrix.numpy.ndarray <- function(x, ...) {
py_to_r(x)
}
#' @export
as.vector.numpy.ndarray <- function(x, mode = "any") {
a <- as.array(x)
as.vector(a, mode = mode)
}
#' @export
as.double.numpy.ndarray <- function(x, ...) {
a <- as.array(x)
as.double(a)
}
#' @importFrom graphics plot
#' @export
plot.numpy.ndarray <- function(x, y, ...) {
plot(as.array(x))
}
#' Create Python dictionary
#'
#' Create a Python dictionary object, including a dictionary whose keys are
#' other Python objects rather than character vectors.
#'
#' @param ... Name/value pairs for dictionary (or a single named list to be
#' converted to a dictionary).
#' @param keys Keys to dictionary (can be Python objects)
#' @param values Values for dictionary
#' @param convert `TRUE` to automatically convert Python objects to their R
#' equivalent. If you pass `FALSE` you can do manual conversion using the
#' [py_to_r()] function.
#'
#' @return A Python dictionary
#'
#' @note The returned dictionary will not automatically convert it's elements
#' from Python to R. You can do manual converstion with the [py_to_r()]
#' function or pass `convert = TRUE` to request automatic conversion.
#'
#' @export
dict <- function(..., convert = FALSE) {
ensure_python_initialized()
# get the args
values <- list(...)
# flag indicating whether we should scan the parent frame for python
# objects that should serve as the key (e.g. a Tensor)
scan_parent_frame <- TRUE
# if there is a single element and it's a list then use that
if (length(values) == 1 && is.null(names(values)) && is.list(values[[1]])) {
values <- values[[1]]
scan_parent_frame <- FALSE
}
# get names
names <- names(values)
# evaluate names in parent env to get keys
frame <- parent.frame()
keys <- lapply(names, function(name) {
# allow python objects to serve as keys
if (scan_parent_frame && exists(name, envir = frame, inherits = TRUE)) {
key <- get(name, envir = frame, inherits = TRUE)
if (inherits(key, "python.builtin.object"))
key
else
name
} else {
if (grepl("^[0-9]+$", name))
name <- as.integer(name)
else
name
}
})
# construct dict
py_dict_impl(keys, values, convert = convert)
}
#' @rdname dict
#' @export
py_dict <- function(keys, values, convert = FALSE) {
ensure_python_initialized()
py_dict_impl(keys, values, convert = convert)
}
#' Create Python tuple
#'
#' Create a Python tuple object
#'
#' @inheritParams dict
#' @param ... Values for tuple (or a single list to be converted to a tuple).
#'
#' @return A Python tuple
#' @note The returned tuple will not automatically convert it's elements from
#' Python to R. You can do manual converstion with the [py_to_r()] function or
#' pass `convert = TRUE` to request automatic conversion.
#'
#' @export
tuple <- function(..., convert = FALSE) {
ensure_python_initialized()
# get the args
values <- list(...)
# if it's a single value then maybe do some special resolution
if (length(values) == 1) {
# alias value
value <- values[[1]]
# reflect tuples back
if (inherits(value, "python.builtin.tuple"))
return(value)
# if it's a list then use the list as the values
if (is.list(value))
values <- value
}
# construct tuple
py_tuple(values, convert = convert)
}
#' @export
length.python.builtin.tuple <- function(x) {
if (py_is_null_xptr(x) || !py_available())
0L
else
py_tuple_length(x)
}
#' Length of Python object
#'
#' Get the length of a Python object (equivalent to the Python `len()`
#' built in function).
#'
#' @param x Python object
#'
#' @return Length as integer
#'
#' @export
py_len <- function(x) {
if (py_is_null_xptr(x) || !py_available())
0L
else
as_r_value(x$`__len__`())
}
#' @export
length.python.builtin.list <- function(x) {
py_len(x)
}
#' Convert to Python Unicode Object
#'
#' @param str Single element character vector to convert
#'
#' @details By default R character vectors are converted to Python strings.
#' In Python 3 these values are unicode objects however in Python 2
#' they are 8-bit string objects. This function enables you to
#' obtain a Python unicode object from an R character vector
#' when running under Python 2 (under Python 3 a standard Python
#' string object is returend).
#'
#' @export
py_unicode <- function(str) {
ensure_python_initialized()
if (is_python3()) {
r_to_py(str)
} else {
py <- import_builtins()
py_call(py_get_attr(py, "unicode"), str)
}
}
#' Evaluate an expression within a context.
#'
#' The \code{with} method for objects of type \code{python.builtin.object}
#' implements the context manager protocol used by the Python \code{with}
#' statement. The passed object must implement the
#' \href{https://docs.python.org/2/reference/datamodel.html#context-managers}{context
#' manager} (\code{__enter__} and \code{__exit__} methods.
#'
#' @param data Context to enter and exit
#' @param expr Expression to evaluate within the context
#' @param as Name of variable to assign context to for the duration of the
#' expression's evaluation (optional).
#' @param ... Unused
#'
#' @export
with.python.builtin.object <- function(data, expr, as = NULL, ...) {
ensure_python_initialized()
# enter the context
context <- data$`__enter__`()
# check for as and as_envir
if (!missing(as)) {
as <- deparse(substitute(as))
as <- gsub("\"", "", as)
} else {
as <- attr(data, "as")
}
envir <- attr(data, "as_envir")
if (is.null(envir))
envir <- parent.frame()
# assign the context if we have an as parameter
asRestore <- NULL
if (!is.null(as)) {
if (exists(as, envir = envir))
asRestore <- get(as, envir = envir)
assign(as, context, envir = envir)
}
# evaluate the expression and exit the context
tryCatch(force(expr),
finally = {
data$`__exit__`(NULL, NULL, NULL)
if (!is.null(as)) {
remove(list = as, envir = envir)
if (!is.null(asRestore))
assign(as, asRestore, envir = envir)
}
}
)
}
#' Create local alias for objects in \code{with} statements.
#'
#' @param object Object to alias
#' @param name Alias name
#'
#' @name with-as-operator
#'
#' @keywords internal
#' @export
"%as%" <- function(object, name) {
as <- deparse(substitute(name))
as <- gsub("\"", "", as)
attr(object, "as") <- as
attr(object, "as_envir") <- parent.frame()
object
}
#' Traverse a Python iterator or generator
#'
#' @param it Python iterator or generator
#' @param f Function to apply to each item. By default applies the
#' \code{identity} function which just reflects back the value of the item.
#' @param simplify Should the result be simplified to a vector if possible?
#' @param completed Sentinel value to return from `iter_next()` if the iteration
#' completes (defaults to `NULL` but can be any R value you specify).
#'
#' @return For `iterate()`, A list or vector containing the results of calling
#' \code{f} on each item in \code{x} (invisibly); For `iter_next()`, the next
#' value in the iteration (or the sentinel `completed` value if the iteration
#' is complete).
#'
#' @details Simplification is only attempted all elements are length 1 vectors
#' of type "character", "complex", "double", "integer", or "logical".
#'
#' @export
iterate <- function(it, f = base::identity, simplify = TRUE) {
ensure_python_initialized()
# validate
if (!inherits(it, "python.builtin.iterator"))
stop("iterate function called with non-iterator argument")
# perform iteration
result <- py_iterate(it, f)
# simplify if requested and appropriate
if (simplify) {
# attempt to simplify if all elements are length 1
lengths <- sapply(result, length)
unique_length <- unique(lengths)
if (length(unique_length) == 1 && unique_length == 1) {
# then only simplify if we have a common primitive type
classes <- sapply(result, class)
unique_class <- unique(classes)
if (length(unique_class) == 1 &&
unique_class %in% c("character", "complex", "double", "integer", "logical")) {
result <- unlist(result)
}
}
}
# return invisibly
invisible(result)
}
#' @rdname iterate
#' @export
iter_next <- function(it, completed = NULL) {
# validate
if (!inherits(it, "python.builtin.iterator"))
stop("iter_next function called with non-iterator argument")
# call iterator
py_iter_next(it, completed)
}
#' Call a Python callable object
#'
#' @param ... Arguments to function (named and/or unnamed)
#'
#' @return Return value of call as a Python object.
#'
#' @keywords internal
#'
#' @export
py_call <- function(x, ...) {
ensure_python_initialized()
dots <- py_resolve_dots(list(...))
py_call_impl(x, dots$args, dots$keywords)
}
#' Check if a Python object has an attribute
#'
#' Check whether a Python object \code{x} has an attribute
#' \code{name}.
#'
#' @param x A python object.
#' @param name The attribute to be accessed.
#'
#' @return \code{TRUE} if the object has the attribute \code{name}, and
#' \code{FALSE} otherwise.
#' @export
py_has_attr <- function(x, name) {
ensure_python_initialized()
if (py_is_module_proxy(x))
py_resolve_module_proxy(x)
py_has_attr_impl(x, name)
}
#' Get an attribute of a Python object
#'
#' @param x Python object
#' @param name Attribute name
#' @param silent \code{TRUE} to return \code{NULL} if the attribute
#' doesn't exist (default is \code{FALSE} which will raise an error)
#'
#' @return Attribute of Python object
#' @export
py_get_attr <- function(x, name, silent = FALSE) {
ensure_python_initialized()
if (py_is_module_proxy(x))
py_resolve_module_proxy(x)
py_get_attr_impl(x, name, silent)
}
#' Set an attribute of a Python object
#'
#' @param x Python object
#' @param name Attribute name
#' @param value Attribute value
#'
#' @export
py_set_attr <- function(x, name, value) {
ensure_python_initialized()
if (py_is_module_proxy(x))
py_resolve_module_proxy(x)
py_set_attr_impl(x, name, value)
}
#' List all attributes of a Python object
#'
#'
#' @param x Python object
#'
#' @return Character vector of attributes
#' @export
py_list_attributes <- function(x) {
ensure_python_initialized()
if (py_is_module_proxy(x))
py_resolve_module_proxy(x)
attrs <- py_list_attributes_impl(x)
Encoding(attrs) <- "UTF-8"
attrs
}
#' Unique identifer for Python object
#'
#' Get a globally unique identifer for a Python object.
#'
#' @note In the current implementation of CPython this is the
#' memory address of the object.
#'
#' @param object Python object
#'
#' @return Unique identifer (as integer) or `NULL`
#'
#' @export
py_id <- function(object) {
if (py_is_null_xptr(object) || !py_available())
NULL
else {
py <- import_builtins()
py$id(object)
}
}
#' An S3 method for getting the string representation of a Python object
#'
#' @param object Python object
#' @param ... Unused
#'
#' @return Character vector
#'
#' @details The default implementation will call `PyObject_Str` on the object.
#'
#' @export
py_str <- function(object, ...) {
if (!inherits(object, "python.builtin.object"))
py_str.default(object)
else if (py_is_null_xptr(object) || !py_available())
"<pointer: 0x0>"
else
UseMethod("py_str")
}
#' @export
py_str.default <- function(object, ...) {
"<not a python object>"
}
#' @export
py_str.python.builtin.object <- function(object, ...) {
# get default rep
str <- py_str_impl(object)
# remove e.g. 'object at 0x10d084710'
str <- gsub(" object at 0x\\w{4,}", "", str)
# return
str
}
#' @export
py_str.python.builtin.module <- function(object, ...) {
paste0("Module(", py_get_name(object), ")")
}
#' @export
py_str.python.builtin.list <- function(object, ...) {
py_collection_str("List", object)
}
#' @export
py_str.python.builtin.dict <- function(object, ...) {
py_collection_str("Dict", object)
}
#' @export
py_str.python.builtin.tuple <- function(object, ...) {
py_collection_str("Tuple", object)
}
py_collection_str <- function(name, object) {
len <- py_collection_len(object)
if (len > 10)
paste0(name, " (", len, " items)")
else
py_str.python.builtin.object(object)
}
py_collection_len <- function(object) {
# do this dance so we can call __len__ on dictionaries (which
# otherwise overload the $)
len <- py_get_attr(object, "__len__")
py_to_r(py_call(len))
}
#' Suppress Python warnings for an expression
#'
#' @param expr Expression to suppress warnings for
#'
#' @return Result of evaluating expression
#'
#' @export
py_suppress_warnings <- function(expr) {
ensure_python_initialized()
# ignore any registered warning output types (e.g. tf warnings)
contexts <- lapply(.globals$suppress_warnings_handlers, function(handler) {
handler$suppress()
})
on.exit({
if (length(contexts) > 0) {
for (i in 1:length(contexts)) {
handler <- .globals$suppress_warnings_handlers[[i]]
handler$restore(contexts[[i]])
}
}
}, add = TRUE)
# evaluate while ignoring python warnings
warnings <- import("warnings")
with(warnings$catch_warnings(), expr)
}
#' Register a handler for calls to py_suppress_warnings
#'
#' @param handler Handler
#'
#' @details Enables packages to register a pair of functions
#' to be called to suppress and then re-enable warnings
#'
#' @keywords internal
#' @export
register_suppress_warnings_handler <- function(handler) {
.globals$suppress_warnings_handlers[[length(.globals$suppress_warnings_handlers) + 1]] <- handler
}
#' Register a filter for class names
#'
#' @param filter Function which takes a class name and maps it to an alternate
#' name
#'
#' @keywords internal
#' @export
register_class_filter <- function(filter) {
.globals$class_filters[[length(.globals$class_filters) + 1]] <- filter
}
#' Capture and return Python output
#'
#' @param expr Expression to capture stdout for
#' @param type Streams to capture (defaults to both stdout and stderr)
#'
#' @return Character vector with output
#'
#' @export
py_capture_output <- function(expr, type = c("stdout", "stderr")) {
# initialize python if necessary
ensure_python_initialized()
# resolve type argument
type <- match.arg(type, several.ok = TRUE)
# get output tools helper functions
output_tools <- import("rpytools.output")
# handle stdout
restore_stdout <- NULL
if ("stdout" %in% type) {
restore_stdout <- output_tools$start_stdout_capture()
on.exit({
if (!is.null(restore_stdout))
output_tools$end_stdout_capture(restore_stdout)
}, add = TRUE)
}
# handle stderr
restore_stderr <- NULL
if ("stderr" %in% type) {
restore_stderr <- output_tools$start_stderr_capture()
on.exit({
if (!is.null(restore_stderr))
output_tools$end_stderr_capture(restore_stderr)
}, add = TRUE)
}
# evaluate the expression
force(expr)
# collect the output
output <- ""
if (!is.null(restore_stdout)) {
std_out <- output_tools$end_stdout_capture(restore_stdout)
output <- paste0(output, std_out)
if (nzchar(std_out))
output <- paste0(output, "\n")
restore_stdout <- NULL
}
if (!is.null(restore_stderr)) {
std_err <- output_tools$end_stderr_capture(restore_stderr)
output <- paste0(output, std_err)
if (nzchar(std_err))
output <- paste0(output, "\n")
restore_stderr <- NULL
}
# return the output
output
}
#' Run Python code
#'
#' Execute code within the the \code{__main__} Python module.
#'
#' @inheritParams import
#' @param code Code to execute
#' @param file Source file
#' @param local Whether to create objects in a local/private namespace (if
#' `FALSE`, objects are created within the main module).
#'
#' @return For `py_eval()`, the result of evaluating the expression; For
#' `py_run_string()` and `py_run_file()`, the dictionary associated with
#' the code execution.
#'
#' @name py_run
#'
#' @export
py_run_string <- function(code, local = FALSE, convert = TRUE) {
ensure_python_initialized()
invisible(py_run_string_impl(code, local, convert))
}
#' @rdname py_run
#' @export
py_run_file <- function(file, local = FALSE, convert = TRUE) {
ensure_python_initialized()
invisible(py_run_file_impl(file, local, convert))
}
#' @rdname py_run
#' @export
py_eval <- function(code, convert = TRUE) {
ensure_python_initialized()
py_eval_impl(code, convert)
}
py_callable_as_function <- function(callable, convert) {
function(...) {
dots <- py_resolve_dots(list(...))
result <- py_call_impl(callable, dots$args, dots$keywords)
if (convert) {
result <- py_to_r(result)
if (is.null(result))
invisible(result)
else
result
}
else {
result
}
}
}
py_resolve_dots <- function(dots) {
args <- list()
keywords <- list()
names <- names(dots)
if (!is.null(names)) {
for (i in 1:length(dots)) {
name <- names[[i]]
if (nzchar(name))
if (is.null(dots[[i]]))
keywords[name] <- list(NULL)
else
keywords[[name]] <- dots[[i]]
else
if (is.null(dots[[i]]))
args[length(args) + 1] <- list(NULL)
else
args[[length(args) + 1]] <- dots[[i]]
}
} else {
args <- dots
}
list(
args = args,
keywords = keywords
)
}
py_is_module <- function(x) {
inherits(x, "python.builtin.module")
}
py_is_module_proxy <- function(x) {
inherits(x, "python.builtin.module") && exists("module", envir = x)
}
py_resolve_module_proxy <- function(proxy) {
# collect module proxy hooks
collect_value <- function(name) {
if (exists(name, envir = proxy, inherits = FALSE)) {
value <- get(name, envir = proxy, inherits = FALSE)
remove(list = name, envir = proxy)
value
} else {
NULL
}
}
# name of module to import (allow just in time customization via hook)
get_module <- collect_value("get_module")
if (!is.null(get_module))
assign("module", get_module(), envir = proxy)
# get module name
module <- get("module", envir = proxy)
# load and error handlers
on_load <- collect_value("on_load")
on_error <- collect_value("on_error")
# perform the import -- capture error and ammend it with
# python configuration information if we have it
result <- tryCatch(import(module), error = clear_error_handler())
if (inherits(result, "error")) {
if (!is.null(on_error)) {
# call custom error handler
on_error(result)
# error handler can and should call `stop`, this is just a failsafe
stop("Error loading Python module ", module, call. = FALSE)
} else {
# default error message/handler
message <- py_config_error_message(paste("Python module", module, "was not found."))
stop(message, call. = FALSE)
}
}
# fixup the proxy
py_module_proxy_import(proxy)
# clear the global tracking of delay load modules
.globals$delay_load_module <- NULL
.globals$delay_load_environment <- NULL
.globals$delay_load_priority <- 0
# call on_load if specifed
if (!is.null(on_load))
on_load()
}
py_get_name <- function(x) {
py_to_r(py_get_attr(x, "__name__"))
}
py_get_submodule <- function(x, name, convert = TRUE) {
module_name <- paste(py_get_name(x), name, sep=".")
result <- tryCatch(import(module_name, convert = convert),
error = clear_error_handler())
if (inherits(result, "error"))
NULL
else
result
}
py_filter_classes <- function(classes) {
for (filter in .globals$class_filters)
classes <- filter(classes)
classes
}
|
use_live_data <- FALSE
if (use_live_data) {
library(kwb.pilot)
library(dplyr)
siteData_raw_list <- kwb.pilot::import_data_basel()
print("### Step 4: Performing temporal aggregation ##########################")
system.time(
siteData_10min_list <- kwb.pilot::group_datetime(siteData_raw_list,
by = 10*60))
system.time(
siteData_hour_list <- kwb.pilot::group_datetime(siteData_raw_list,
by = 60*60))
system.time(
siteData_day_list <- kwb.pilot::group_datetime(siteData_raw_list,
by = "day"))
saveRDS(siteData_raw_list, file = "data/siteData_raw_list.Rds")
saveRDS(siteData_10min_list, file = "data/siteData_10min_list.Rds")
saveRDS(siteData_hour_list, file = "data/siteData_hour_list.Rds")
saveRDS(siteData_day_list, file = "data/siteData_day_list.Rds")
} else {
#siteData_raw_list <- readRDS("data/siteData_raw_list.Rds")
siteData_10min_list <- readRDS("data/siteData_10min_list.Rds")
#siteData_hour_list <- readRDS("data/siteData_hour_list.Rds")
#siteData_day_list <- readRDS("data/siteData_day_list.Rds")
}
print("### Step 5: Importing threshold information ##########################")
threshold_file <- kwb.pilot:::package_file("shiny/basel/data/thresholds.csv")
thresholds <- kwb.pilot::get_thresholds(threshold_file)
print("### Step 6: Specify available months for reporting ##########################")
report_months <- kwb.pilot::create_monthly_selection(startDate = "2017-05-01")
#print("### Step 7: Add default calculated operational parameters ##########################")
report_calc_paras <- "NOT_IMPLEMENTED_YET"
| /inst/shiny/basel/global.R | permissive | KWB-R/kwb.pilot | R | false | false | 1,716 | r | use_live_data <- FALSE
if (use_live_data) {
library(kwb.pilot)
library(dplyr)
siteData_raw_list <- kwb.pilot::import_data_basel()
print("### Step 4: Performing temporal aggregation ##########################")
system.time(
siteData_10min_list <- kwb.pilot::group_datetime(siteData_raw_list,
by = 10*60))
system.time(
siteData_hour_list <- kwb.pilot::group_datetime(siteData_raw_list,
by = 60*60))
system.time(
siteData_day_list <- kwb.pilot::group_datetime(siteData_raw_list,
by = "day"))
saveRDS(siteData_raw_list, file = "data/siteData_raw_list.Rds")
saveRDS(siteData_10min_list, file = "data/siteData_10min_list.Rds")
saveRDS(siteData_hour_list, file = "data/siteData_hour_list.Rds")
saveRDS(siteData_day_list, file = "data/siteData_day_list.Rds")
} else {
#siteData_raw_list <- readRDS("data/siteData_raw_list.Rds")
siteData_10min_list <- readRDS("data/siteData_10min_list.Rds")
#siteData_hour_list <- readRDS("data/siteData_hour_list.Rds")
#siteData_day_list <- readRDS("data/siteData_day_list.Rds")
}
print("### Step 5: Importing threshold information ##########################")
threshold_file <- kwb.pilot:::package_file("shiny/basel/data/thresholds.csv")
thresholds <- kwb.pilot::get_thresholds(threshold_file)
print("### Step 6: Specify available months for reporting ##########################")
report_months <- kwb.pilot::create_monthly_selection(startDate = "2017-05-01")
#print("### Step 7: Add default calculated operational parameters ##########################")
report_calc_paras <- "NOT_IMPLEMENTED_YET"
|
(a_list <- list(
c(1,1,2,5,35,67),
month.abb,
matrix(c(3,-8,1,-3),nrow=2),
asin
))
names(a_list) <- c("catalan","months","involutary","arcsin")
a_list
(main_list <- list(
middle_list = list(
element_in_middle_list = diag(3),
inner_list = list(
element_in_inner_list = pi^(1:4),
another_element_in_inner_list = "a"
)
),
element_in_main_list = log10(1:10)
))
is.atomic(list())
is.recursive(list())
is.atomic(numeric())
is.recursive(numeric())
length(a_list)
length(main_list)
dim(a_list)
nrow(a_list)
ncol(a_list)
NROW(a_list)
NCOL(a_list)
l1 <- list(1:5)
l2 <- list(6:10)
l1[[1]]+l2[[1]]
l <- list(
first = 1,
second = 2,
third = list(
alpha = 3.1,
beta = 3.2
)
)
l[1:2]
l[-3]
l[c(TRUE,TRUE,FALSE)]
l[[1]]
l[["first"]]
is.list(l[1])
is.list(l[[1]])
l$first
l$f
l[["third"]]["beta"]
is.list(l[["thrid"]]["beta"])
is.list(l[["thrid"]][["beta"]])
l[c(4,3,5)]
l[["fourth"]]
l$fourth
busy_beaver <- c(1,6,21,107)
as.list(busy_beaver)
as.numeric(list(1,2,33,444))
(prime_factors <- list(
two = 2,
three = 3,
four = c(2, 2),
five = 5,
six = c(2, 3),
seven = 7,
eight = c(2, 2, 2),
nine = c(3, 3),
ten = c(2, 5)
))
new_factors <- unlist(prime_factors)
new_factors
new_factors[1]
new_factors[[1]]
is.list(prime_factors)
is.list(new_factors)
is.list(new_factors[1])
is.list(new_factors[[1]])
c(list(a=1,b=2),list(3))
c(list(a=1,b=2),3)
matrix_list_hybrid <- cbind(list(a=1,b=2),list(c=3,list(d=4)))
matrix_list_hybrid
str(matrix_list_hybrid)
china_holiday <- list(
Jan = "Near Year's Day",
Feb = "Spring Festival",
Mar = NULL,
Apr = "Qingming Festival",
May = "May Day",
Jun = "Dragon Boat Festival",
Jul = NULL,
Aug = NULL,
Sep = "Moon Festival",
Oct = "National Day",
Nov = NULL,
Dec = NULL
)
china_holiday
length(NULL)
length(NA)
is.null(NULL)
is.null(NA)
china_holiday$Sep <- NULL
china_holiday
china_holiday$Jun <- list(NULL)
china_holiday
(arguments_of_sd <- formals(sd))
class(arguments_of_sd)
pairlist()
list()
(a_data_frame <- data.frame(
x = letters[1:5],
y = rnorm(5),
z = runif(5)>0.5
))
class(a_data_frame)
y <- rnorm(5)
names(y) <- month.name[1:5]
data.frame(
x = letters[1:5],
y = rnorm(5),
z = runif(5)>0.5
)
data.frame(
x = letters[1:5],
y = rnorm(5),
z = runif(5)>0.5,
row.names = NULL
)
data.frame(
x = letters[1:5],
y = y,
z = runif(5)>0.5,
row.names = c("jackie","Tito","Jermaine","Marlon","Michael")
)
rownames(a_data_frame)
colnames(a_data_frame)
dimnames(a_data_frame)
nrow(a_data_frame)
ncol(a_data_frame)
dim(a_data_frame)
length(a_data_frame)
names(a_data_frame)
data.frame(
x = 1,
y = 2:3,
z = 4:7
)
data.frame(
"A column" = letters[1:5],
"..." = rnorm(5),
"..." = runif(5) > 0.5,
check.names = TRUE
)
data.frame(
"A column" = letters[1:5],
"..." = rnorm(5),
"..." = runif(5) > 0.5,
check.names = FALSE
)
a_data_frame[2:3,-3]
a_data_frame[c(FALSE,TRUE,TRUE,FALSE,FALSE),c("x","y")]
a_data_frame[2:3,1]
class(a_data_frame[2:3,1])
class(a_data_frame[2:3,-3])
a_data_frame$x[2:3]
a_data_frame[[1]][2:3]
a_data_frame[["x"]][2:3]
a_data_frame[a_data_frame$y>0|a_data_frame$z,"x"]
subset(a_data_frame,y>0|z,x)
t(a_data_frame)
class(t(a_data_frame))
anoher_data_frame <- data.frame(
z = rlnorm(5),
y = sample(5),
x = letters[3:7]
)
rbind(a_data_frame,anoher_data_frame)
cbind(a_data_frame,anoher_data_frame)
merge(a_data_frame,anoher_data_frame,by = "x")
merge(a_data_frame,anoher_data_frame,by = "x",all = TRUE)
colSums(a_data_frame[,2:3])
colMeans(a_data_frame[,2:3])
| /05_R列表和数据框.R | no_license | 2003LinJiaFei/R- | R | false | false | 3,651 | r | (a_list <- list(
c(1,1,2,5,35,67),
month.abb,
matrix(c(3,-8,1,-3),nrow=2),
asin
))
names(a_list) <- c("catalan","months","involutary","arcsin")
a_list
(main_list <- list(
middle_list = list(
element_in_middle_list = diag(3),
inner_list = list(
element_in_inner_list = pi^(1:4),
another_element_in_inner_list = "a"
)
),
element_in_main_list = log10(1:10)
))
is.atomic(list())
is.recursive(list())
is.atomic(numeric())
is.recursive(numeric())
length(a_list)
length(main_list)
dim(a_list)
nrow(a_list)
ncol(a_list)
NROW(a_list)
NCOL(a_list)
l1 <- list(1:5)
l2 <- list(6:10)
l1[[1]]+l2[[1]]
l <- list(
first = 1,
second = 2,
third = list(
alpha = 3.1,
beta = 3.2
)
)
l[1:2]
l[-3]
l[c(TRUE,TRUE,FALSE)]
l[[1]]
l[["first"]]
is.list(l[1])
is.list(l[[1]])
l$first
l$f
l[["third"]]["beta"]
is.list(l[["thrid"]]["beta"])
is.list(l[["thrid"]][["beta"]])
l[c(4,3,5)]
l[["fourth"]]
l$fourth
busy_beaver <- c(1,6,21,107)
as.list(busy_beaver)
as.numeric(list(1,2,33,444))
(prime_factors <- list(
two = 2,
three = 3,
four = c(2, 2),
five = 5,
six = c(2, 3),
seven = 7,
eight = c(2, 2, 2),
nine = c(3, 3),
ten = c(2, 5)
))
new_factors <- unlist(prime_factors)
new_factors
new_factors[1]
new_factors[[1]]
is.list(prime_factors)
is.list(new_factors)
is.list(new_factors[1])
is.list(new_factors[[1]])
c(list(a=1,b=2),list(3))
c(list(a=1,b=2),3)
matrix_list_hybrid <- cbind(list(a=1,b=2),list(c=3,list(d=4)))
matrix_list_hybrid
str(matrix_list_hybrid)
china_holiday <- list(
Jan = "Near Year's Day",
Feb = "Spring Festival",
Mar = NULL,
Apr = "Qingming Festival",
May = "May Day",
Jun = "Dragon Boat Festival",
Jul = NULL,
Aug = NULL,
Sep = "Moon Festival",
Oct = "National Day",
Nov = NULL,
Dec = NULL
)
china_holiday
length(NULL)
length(NA)
is.null(NULL)
is.null(NA)
china_holiday$Sep <- NULL
china_holiday
china_holiday$Jun <- list(NULL)
china_holiday
(arguments_of_sd <- formals(sd))
class(arguments_of_sd)
pairlist()
list()
(a_data_frame <- data.frame(
x = letters[1:5],
y = rnorm(5),
z = runif(5)>0.5
))
class(a_data_frame)
y <- rnorm(5)
names(y) <- month.name[1:5]
data.frame(
x = letters[1:5],
y = rnorm(5),
z = runif(5)>0.5
)
data.frame(
x = letters[1:5],
y = rnorm(5),
z = runif(5)>0.5,
row.names = NULL
)
data.frame(
x = letters[1:5],
y = y,
z = runif(5)>0.5,
row.names = c("jackie","Tito","Jermaine","Marlon","Michael")
)
rownames(a_data_frame)
colnames(a_data_frame)
dimnames(a_data_frame)
nrow(a_data_frame)
ncol(a_data_frame)
dim(a_data_frame)
length(a_data_frame)
names(a_data_frame)
data.frame(
x = 1,
y = 2:3,
z = 4:7
)
data.frame(
"A column" = letters[1:5],
"..." = rnorm(5),
"..." = runif(5) > 0.5,
check.names = TRUE
)
data.frame(
"A column" = letters[1:5],
"..." = rnorm(5),
"..." = runif(5) > 0.5,
check.names = FALSE
)
a_data_frame[2:3,-3]
a_data_frame[c(FALSE,TRUE,TRUE,FALSE,FALSE),c("x","y")]
a_data_frame[2:3,1]
class(a_data_frame[2:3,1])
class(a_data_frame[2:3,-3])
a_data_frame$x[2:3]
a_data_frame[[1]][2:3]
a_data_frame[["x"]][2:3]
a_data_frame[a_data_frame$y>0|a_data_frame$z,"x"]
subset(a_data_frame,y>0|z,x)
t(a_data_frame)
class(t(a_data_frame))
anoher_data_frame <- data.frame(
z = rlnorm(5),
y = sample(5),
x = letters[3:7]
)
rbind(a_data_frame,anoher_data_frame)
cbind(a_data_frame,anoher_data_frame)
merge(a_data_frame,anoher_data_frame,by = "x")
merge(a_data_frame,anoher_data_frame,by = "x",all = TRUE)
colSums(a_data_frame[,2:3])
colMeans(a_data_frame[,2:3])
|
setwd("D:/docs/studying/Coursera/ExData_proj2")
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip",destfile="proj2.zip")
unzip("proj2.zip",list=FALSE)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
NEI_Baltimore <- NEI[NEI$fips=="24510",]
yearly_emissions <- tapply(NEI_Baltimore$Emissions,NEI_Baltimore$year,sum)
yearly_emissions <- as.data.frame(yearly_emissions)
years <-c("1999","2002","2005","2008")
yearly_emissions <- cbind(years,yearly_emissions)
names(yearly_emissions)[] <- c("Year","total emissions from PM2.5")
yearly_emissions$Year <- as.numeric(as.character(yearly_emissions$Year))
png(filename = "plot2.png")
plot(yearly_emissions,type="p",pch=20,xaxt="n")
lines(yearly_emissions)
axis(1,at=yearly_emissions$Year,labels=years)
dev.off()
| /plot2.R | no_license | giladsa/ExData_proj2 | R | false | false | 828 | r | setwd("D:/docs/studying/Coursera/ExData_proj2")
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip",destfile="proj2.zip")
unzip("proj2.zip",list=FALSE)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
NEI_Baltimore <- NEI[NEI$fips=="24510",]
yearly_emissions <- tapply(NEI_Baltimore$Emissions,NEI_Baltimore$year,sum)
yearly_emissions <- as.data.frame(yearly_emissions)
years <-c("1999","2002","2005","2008")
yearly_emissions <- cbind(years,yearly_emissions)
names(yearly_emissions)[] <- c("Year","total emissions from PM2.5")
yearly_emissions$Year <- as.numeric(as.character(yearly_emissions$Year))
png(filename = "plot2.png")
plot(yearly_emissions,type="p",pch=20,xaxt="n")
lines(yearly_emissions)
axis(1,at=yearly_emissions$Year,labels=years)
dev.off()
|
source("utils/models.R")
source("utils/create_X.R")
source("utils/R_jm_utils.R")
logit = list(
# delta is MANDATORY
LLVec = function(b, args){
n = length(args$Y)
nalt = max(args$Y) - min(args$Y) + 1
expU = exp(args$X %*% b)
num = expU[seq(from = 1, length = n, by = nalt) + args$Y]
denum = rowSums(matrix(expU, ncol = nalt, byrow = T))
num / denum
},
computeArgs = function(spec, D){
X = getDiscreteMatrix(spec, D)
Y = D[[spec$Y]]
Y = Y - min(Y)
delta = getElem(spec, name = "delta", default = 0.001)
list(X = X, Y = Y, delta = delta)
},
computeStart = function(spec, D){
rep(0, getNumVar(spec, D))
},
computeOther = function(spec, D){
list(names = getNames(spec, D))
}
) # logit function list
logitApply = function(b, X, nalt, n){
expU = matrix(exp( X %*% b), n, nalt, byrow = TRUE)
# divides each column by the sum of each rows
apply(expU, 2, function(col) col / rowSums(expU))
}
#genChoice = function(specLogit, cx, D){
# n = nrow(D)
# nalt = length(specLogit$specific)
# X = getDiscreteMatrix(specLogit, D)
# U = matrix(X %*% cx + rgumbel(n*nalt), nrow = n, ncol = nalt, byrow = T)
# apply(U,1,which.max)
# }
| /models/logit.R | no_license | YanL89/B_DiscreteContinuousChoiceModelV4_singleRegression | R | false | false | 1,174 | r | source("utils/models.R")
source("utils/create_X.R")
source("utils/R_jm_utils.R")
logit = list(
# delta is MANDATORY
LLVec = function(b, args){
n = length(args$Y)
nalt = max(args$Y) - min(args$Y) + 1
expU = exp(args$X %*% b)
num = expU[seq(from = 1, length = n, by = nalt) + args$Y]
denum = rowSums(matrix(expU, ncol = nalt, byrow = T))
num / denum
},
computeArgs = function(spec, D){
X = getDiscreteMatrix(spec, D)
Y = D[[spec$Y]]
Y = Y - min(Y)
delta = getElem(spec, name = "delta", default = 0.001)
list(X = X, Y = Y, delta = delta)
},
computeStart = function(spec, D){
rep(0, getNumVar(spec, D))
},
computeOther = function(spec, D){
list(names = getNames(spec, D))
}
) # logit function list
logitApply = function(b, X, nalt, n){
expU = matrix(exp( X %*% b), n, nalt, byrow = TRUE)
# divides each column by the sum of each rows
apply(expU, 2, function(col) col / rowSums(expU))
}
#genChoice = function(specLogit, cx, D){
# n = nrow(D)
# nalt = length(specLogit$specific)
# X = getDiscreteMatrix(specLogit, D)
# U = matrix(X %*% cx + rgumbel(n*nalt), nrow = n, ncol = nalt, byrow = T)
# apply(U,1,which.max)
# }
|
# Script for Ingesting Cristian Estop-Aragones Circumpolar 14C Database
# Gavin McNicol
# setup
require(dplyr)
library(openxlsx)
library(tidyverse)
library(devtools)
library(rcrossref)
library(lubridate)
devtools::install_github("International-Soil-Radiocarbon-Database/ISRaD", ref="master")
library(ISRaD)
## clear workspace
rm(list=ls())
# read in template file from ISRaD Package
template_file <- system.file("extdata", "ISRaD_Master_Template.xlsx", package = "ISRaD")
template <- lapply(getSheetNames(template_file), function(s) read.xlsx(template_file, sheet=s))
names(template) <- getSheetNames(template_file)
template <- lapply(template, function(x) x %>% mutate_all(as.character))
# take a look at template structure
glimpse(template)
head(template$metadata)
# load dataset
Aragones_dataset <- read.csv("/Users/macbook/Desktop/Dropbox Temp/ISRaD/ISCN Collaboration/14C_Dataset_Final_Cristian.csv", na.strings = c("","NA"),
stringsAsFactors = FALSE)
glimpse(Aragones_dataset)
length(Aragones_dataset)
Aragones_tidy <- Aragones_dataset %>%
mutate(Dataset = as.factor(Dataset),
Study = as.factor(str_replace_all(Study, fixed(" "), "_")),
Yedoma = as.factor(Yedoma),
LAR = as.factor(LAR),
PF = as.factor(PF),
Thermokarst = as.factor(Thermokarst),
Yukon_Kolyma_origin = as.factor(Yukon_Kolyma_origin),
Flux_type = as.factor(str_replace_all(Flux_type, fixed(" "), "_")),
Depth_cm = as.numeric(Depth_cm),
Aerob_anaerob_incub = as.factor(Aerob_anaerob_incub),
Org_Min_Incub = as.factor(Org_Min_Incub),
Autotrophic_type = as.factor(str_replace_all(Autotrophic_type, fixed(" "), "_")),
Manipulation_study = as.factor(Manipulation_study),
Sampling_date = if(length(str_replace_all(Sampling_date,fixed("/"),"")) == 5) {
mdy(paste("0",str_replace_all(Sampling_date,fixed("/"),"")))} else {
mdy(str_replace_all(Sampling_date,fixed("/"),""))
}) %>%
select(1:67)
# str(Aragones_tidy)
# many conventions different between water and gas data. split to treat differently
Aragones_gas <- Aragones_tidy %>%
filter(!is.na(Latitude_decimal_degrees)) %>%
filter(Dataset == "Gas")
# work with gas data first
# str(Aragones_gas)
Aragones_gas %>% arrange(ID_merged) %>%
select(Study,
Full_class,
General_ecosystem,
PF,
Yedoma,
Grouped_Data_source,
Specific_ecosystem,
Flux_type,
Depth_cm,
Aerob_anaerob_incub,
Org_Min_Incub,
Autotrophic_type,
Manipulation_study,
Sampling_year_fraction,
Sampling_date,
DOY,
Gral_description,
WT_cm,
Latitude_decimal_degrees,
Longitude_decimal_degrees,
d18O_permil,
d13C_Soil,
d13C_Atm_CO2,
d13C_CO2,
d13C_CH4,
H_CH4,
H_H2O,
d13C_DOC,
d13C_POC,
DOC_mgC_L,
POC_mgC_L,
TOC_mgC_L,
Fm_Soil,
Fm_Atm_CO2,
Fm_CO2,
Fm_CH4,
Fm_DOC,
Fm_POC,
Detailed_ecosystem_classification,
Basin_Area_Drainage_Area_km2,
MainRiver_name,
Instantaneous_discharge_m3_per_s) %>%
glimpse()
## fill entry_name ## I use Aragones_water here because it makes the two sets match (there was one fewer DOIs in the Aragones_gas data)
str(template$metadata)
entry_name <- Aragones_gas %>%
select("Study") %>%
distinct() %>%
mutate(entry_name = Study) %>%
select("entry_name") %>%
arrange(as.factor(entry_name))
# read in doi csv
Aragones_doi <- read.csv("/Users/macbook/Desktop/Dropbox Temp/ISRaD/ISCN Collaboration/Estop Aragones DOI List.csv", na.strings = c("","NA"),
stringsAsFactors = FALSE)
# format study names and join to dois
doi <- Aragones_doi %>%
mutate(entry_name = str_replace_all(Study, fixed(" "), "_")) %>%
arrange(as.factor(entry_name)) %>%
select("entry_name", doi = "DOI")
## be careful here, I think a couple of dois are lost/doubled up based on Alison's excel file
metadata <- full_join(entry_name, doi)
# fill metadata (55 unique studies in gas data)
metadata <- metadata %>%
mutate(compilation_doi = NA,
curator_name = "Gavin McNicol",
curator_organization = "Stanford University",
curator_email = "gmcnicol@stanford.edu",
modification_date_y = "2019",
modification_date_m = "08",
modification_date_d = "12",
contact_name = "Cristian Estop-Aragones",
contact_email = "estopara@ualberta.ca",
contact_orcid_id = NA,
bibliographical_reference = "Estop-Aragones 2018",
metadata_note = NA,
associated_datasets = NA,
template_version = 20190812
) %>%
arrange(entry_name)
# start by defining sites as unique lat longs
site <- as_tibble(cbind(
Aragones_gas %>%
select(entry_name = str_replace_all("Study", fixed(" "), "_"),
site_latitude = "Latitude_decimal_degrees",
site_longitude = "Longitude_decimal_degrees",
Data_source_comments, MainRiver_name, More_description) %>%
mutate(site_name = paste(site_latitude, site_longitude, sep = " ")) %>% # note double space sep
select(entry_name, site_name) %>%
# filter(site_name != "NA NA") %>%
distinct(entry_name, site_name) %>%
arrange(entry_name,site_name)
)
)
## get site_note variables to paste()
site_note_df <- as_tibble(Aragones_gas) %>%
mutate(Yedoma = as.character(Yedoma),
Thermokarst = as.character(Thermokarst)) %>%
mutate(Yedoma = replace(Yedoma, Yedoma %in% c("No","No?"), "Not Yedoma"),
Yedoma = replace(Yedoma, Yedoma %in% c("Yes","Probably"), "Yedoma"),
Yedoma = replace(Yedoma, Yedoma %in% c("?","Unknown"), "Yedoma Unknown")) %>%
mutate(site_note = paste(Full_class, Yedoma)) %>%
select(site_note,entry_name = str_replace_all("Study", fixed(" "), "_"),
site_latitude = "Latitude_decimal_degrees",
site_longitude = "Longitude_decimal_degrees",
Data_source_comments, More_description) %>%
mutate(site_name = paste(site_latitude, site_longitude, sep = " ")) %>%
select(site_name, site_note) %>%
# filter(site_name != "NA NA") %>%
group_by(site_name) %>%
summarize(site_note = site_note[1])
#100 unique lat long and site note combinations
# now fill in other site variables
site <- site %>%
group_by(site_name) %>%
mutate(site_latlong = strsplit(site_name[[1]], " ", fixed = TRUE),
site_datum = NA,
site_elevation = NA) %>%
mutate(site_lat = site_latlong[[1]][1],
site_long = site_latlong[[1]][2]) %>%
select(entry_name,
site_name, site_lat, site_long, site_datum,
site_elevation) %>%
arrange(entry_name,site_name)
# join site_note to site tab
site <- site %>% left_join(site_note_df)
## Fill profile tab
# get number of individual rows per site in Aragones database
num.aragones.rows <- as_tibble(Aragones_gas) %>%
mutate(entry_name = str_replace_all(Study, fixed(" "),("_")),
site_latitude = Latitude_decimal_degrees,
site_longitude = Longitude_decimal_degrees,
site_name = paste(site_latitude, site_longitude, sep = " "),
pro_name_chr = paste(Specific_location, Specific_ecosystem, Manipulation, sep = "_")) %>%
select(entry_name, site_name,pro_name_chr,
Gral_description, Detailed_ecosystem_classification, General_ecosystem,
Thermokarst, PF, Basin_Area_Drainage_Area_km2, MainRiver_name,
Manipulation_study, Manipulation, AL_cm,
Year, Month, Day,
Grouped_Data_source, Data_source,
Flux_type, Sampling_method, Sample_treatment,
Specific_discharge_m3_per_s_per_km2, Instantaneous_discharge_m3_per_s) %>%
group_by(entry_name, site_name) %>%
summarize(aragones.rows = n(),
gral = Gral_description[1],
detailed_class = Detailed_ecosystem_classification[1],
treatment = Manipulation_study[1],
treatment_note = Manipulation[1],
thaw_depth = AL_cm[1],
pro_name_chr = pro_name_chr[1],
pro_catchment_area = Basin_Area_Drainage_Area_km2[1],
pro_water_body = General_ecosystem[1],
pro_water_body_name = MainRiver_name[1],
pro_permafrost = as.character(PF[1]),
pro_thermokarst = as.character(Thermokarst[1])
)
# how many measurements of 13c and Fm in total?
sum(num.aragones.rows$aragones.rows) #1163
## replicate reference columns and columns for pro_note field 1163 times
aragones.rows.vector <- list()
sitenames.vector <- list()
entrynames.vector <- list()
gral.vector <- list()
detail.vector <- list()
t.vector <- list()
t_note.vector <- list()
thaw.vector <- list()
name_chr.v <- list()
pc_area <- list()
pw_body <- list()
pw_body_name <- list()
pp <- list()
pt <- list()
for (i in 1:length(num.aragones.rows$site_name)){
aragones.rows.vector[[i]] <- c(seq(1,num.aragones.rows$aragones.rows[i],1))
sitenames.vector[[i]] <- c(rep(num.aragones.rows$site_name[i],num.aragones.rows$aragones.rows[i]))
entrynames.vector[[i]] <- c(rep(as.character(num.aragones.rows$entry_name[i]),num.aragones.rows$aragones.rows[i]))
gral.vector[[i]] <- c(rep(num.aragones.rows$gral[i], num.aragones.rows$aragones.rows[i]))
detail.vector[[i]] <- c(rep(num.aragones.rows$detailed_class[i], num.aragones.rows$aragones.rows[i]))
t.vector[[i]] <- c(rep(num.aragones.rows$treatment[i], num.aragones.rows$aragones.rows[i]))
t_note.vector[[i]] <- c(rep(num.aragones.rows$treatment_note[i], num.aragones.rows$aragones.rows[i]))
thaw.vector[[i]] <- c(rep(num.aragones.rows$thaw_depth[i], num.aragones.rows$aragones.rows[i]))
name_chr.v[[i]] <- c(rep(num.aragones.rows$pro_name_chr[i], num.aragones.rows$aragones.rows[i]))
pc_area[[i]] <- c(rep(num.aragones.rows$pro_catchment_area[i], num.aragones.rows$aragones.rows[i]))
pw_body[[i]] <- c(rep(num.aragones.rows$pro_water_body[i], num.aragones.rows$aragones.rows[i]))
pw_body_name[[i]] <- c(rep(num.aragones.rows$pro_water_body_name[i], num.aragones.rows$aragones.rows[i]))
pp[[i]] <- c(rep(num.aragones.rows$pro_permafrost[i], num.aragones.rows$aragones.rows[i]))
pt[[i]] <- c(rep(num.aragones.rows$pro_thermokarst[i], num.aragones.rows$aragones.rows[i]))
}
# unlist all vectors
aragones.rows.vector <- unlist(aragones.rows.vector)
sitenames.vector <- unlist(sitenames.vector)
entrynames.vector <- unlist(entrynames.vector)
gral.vector <- unlist(gral.vector)
detail.vector <- unlist(detail.vector)
t.vector <- unlist(t.vector)
t_note.vector <- unlist(t_note.vector)
thaw.vector <- unlist(thaw.vector)
name_chr.v <- unlist(name_chr.v)
pc_area <- unlist(pc_area)
pw_body <- unlist(pw_body)
pw_body_name <- unlist(pw_body_name)
pp <- unlist(pp)
pt <- unlist(pt)
# create a tibble to do a left join
profiles <- as_tibble(cbind(sitenames.vector,aragones.rows.vector, entrynames.vector,gral.vector,
detail.vector, t.vector, t_note.vector, thaw.vector, name_chr.v,
pc_area, pw_body, pw_body_name, pp, pt))
profiles <- profiles %>% mutate(site_name = sitenames.vector,
aragones_rows = aragones.rows.vector,
entry_name = entrynames.vector,
plot_name = paste(gral.vector, detail.vector, sep = " "),
pro_treatment = t.vector,
pro_treatment_note = t_note.vector,
pro_thaw_note = thaw.vector,
pro_name_chr = name_chr.v,
pro_catchment_area = pc_area,
pro_water_body = pw_body,
pro_water_body_name = pw_body_name,
pro_permafrost = pp,
pro_thermokarst = pt) %>%
select(entry_name, site_name, pro_name_chr, aragones_rows, plot_name,
pro_treatment, pro_treatment_note, pro_thaw_note,
pro_name_chr, pro_catchment_area, pro_water_body, pro_water_body_name,
pro_permafrost, pro_thermokarst)
# temporary profile tab, still need to add in pro_note from flux (below)
profile <- profiles %>%
mutate(entry_name = entry_name,
site_name = site_name,
plot_name = plot_name,
pro_name = replace_na(pro_name_chr, 1),
aragones_rows = aragones_rows,
pro_lat = NA,
pro_long = NA,
pro_elevation = NA,
pro_treatment = recode_factor(factor(pro_treatment), `1` = "control", `2` = "treatment"),
pro_treatment_note = pro_treatment_note,
pro_thaw_note = pro_thaw_note,
pro_catchment_area = pro_catchment_area,
pro_water_body = pro_water_body,
pro_water_body_name = pro_water_body_name,
pro_permafrost = recode_factor(factor(pro_permafrost), `Not PF` = "", `PF` = "yes"),
pro_thermokarst = replace(pro_thermokarst,pro_thermokarst %in% "No", ""),
pro_thermokarst = replace(pro_thermokarst,pro_thermokarst %in% "Yes", "yes"),
pro_thermokarst = replace(pro_thermokarst, pro_thermokarst %in% c("Probably","Probably "), "yes")) %>%
mutate(pro_treatment = replace_na(pro_treatment, 'control')) %>%
select(entry_name,
site_name,
plot_name,
pro_name,
aragones_rows,
pro_lat,
pro_long,
pro_elevation,
pro_treatment,
pro_treatment_note,
pro_thaw_note,
pro_catchment_area, pro_water_body, pro_water_body_name,
pro_permafrost, pro_thermokarst) %>%
arrange(entry_name,site_name)
View(profile)
##################
## fill out a 'measurements' tab, from which we split fluxes, then layer, interstitial and incubation data
# take a look at the tab structures
# get the actual values again
measurements <- as_tibble(Aragones_gas) %>%
mutate(entry_name = str_replace_all(Study, fixed(" "),("_")),
site_latitude = Latitude_decimal_degrees,
site_longitude = Longitude_decimal_degrees,
site_name = paste(site_latitude, site_longitude, sep = " "),
pro_name = replace_na(Specific_location, 1)) %>%
select(entry_name, site_name, pro_name,
Year, Month, Day, Grouped_Data_source, Data_source,
Flux_type, Sampling_method, Sample_treatment,
Specific_discharge_m3_per_s_per_km2,
Data_source_comments, MainRiver_name, More_description,
Depth_cm, Aerob_anaerob_incub, Org_Min_Incub,Autotrophic_type,
Instantaneous_discharge_m3_per_s,
H_CH4, H_H2O, d18O_permil,
DOC_mgC_L, POC_mgC_L, TOC_mgC_L,
d13C_Soil, d13C_Atm_CO2, d13C_CO2, d13C_CH4, d13C_DOC, d13C_POC,
Fm_Soil, Fm_Atm_CO2, Fm_CO2, Fm_CH4, Fm_DOC, Fm_POC) %>%
arrange(entry_name,site_name, pro_name)
## bind first 3 fields of profile to the measurements
measurements <- as_tibble(cbind(
profile$pro_name,profile$aragones_rows,
measurements
))
# make a dummy tab called template$measurements
# select all fields plus a concatenated column for pro_note (needs to be moved to profile tab )
measurements <- measurements %>%
mutate(pro_note = paste(Data_source_comments, More_description, sep = " ")) %>%
gather(key = "measurement_name",
value = "measurement_value",
c("H_CH4", "H_H2O",
"DOC_mgC_L", "POC_mgC_L", "TOC_mgC_L",
"d13C_Soil","d13C_Atm_CO2","d13C_CO2",
"d13C_CH4","d13C_DOC","d13C_POC",
"Fm_Soil","Fm_Atm_CO2","Fm_CO2",
"Fm_CH4","Fm_DOC","Fm_POC")) %>%
mutate(measurement_analyte = measurement_name,
measurement_index = measurement_name) %>%
mutate(measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_Atm_CO2","Fm_Atm_CO2"), "Atm_CO2"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_CO2","Fm_CO2"), "CO2"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_CH4","Fm_CH4","H_CH4"), "CH4"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_DOC","Fm_DOC","DOC_mgC_L"), "DOC"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_Soil","Fm_Soil"), "Soil"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_POC","Fm_POC", "POC_mgC_L"), "POC"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("TOC_mgC_L"), "TOC"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("H_H2O"), "H2O"),
measurement_index = replace(measurement_index, measurement_index %in% c("H_CH4", "H_H2O"), "flx_2h"),
measurement_index = replace(measurement_index, measurement_index %in% c("DOC_mgC_L", "POC_mgC_L", "TOC_mgC_L"), "flx_analyte_conc"),
measurement_index = replace(measurement_index, measurement_index %in% c("d13C_Soil","d13C_Atm_CO2","d13C_CO2",
"d13C_CH4","d13C_DOC","d13C_POC"), "d13c"),
measurement_index = replace(measurement_index, measurement_index %in% c("Fm_Soil","Fm_Atm_CO2","Fm_CO2",
"Fm_CH4","Fm_DOC","Fm_POC"), "Fm"),
Aerob_anaerob_incub = recode_factor(Aerob_anaerob_incub, `Aerobic` = ""),
Aerob_anaerob_incub = recode_factor(Aerob_anaerob_incub, `Anaerobic` = "yes")) %>%
spread(key = measurement_index, value = measurement_value) %>%
select(entry_name,site_name, pro_name = "profile$pro_name",
aragones_rows = "profile$aragones_rows",
measurement_obs_date_y = Year,
measurement_obs_date_m = Month,
measurement_obs_date_d = Day,
measurement_pathway = Grouped_Data_source, # for splitting flux, incub, inter
measurement_pathway_note = Data_source,
# measurement_analyte = Flux_type,
measurement_ecosystem_component = Flux_type,
measurement_method_note = Sampling_method,
measurement_method_note2 =Sample_treatment,
measurement_rate = Specific_discharge_m3_per_s_per_km2,
measurement_depth = Depth_cm,
measurement_incubation_headspace = Aerob_anaerob_incub,
measurement_incubation_soil = Org_Min_Incub,
measurement_incubation_auto_type = Autotrophic_type,
measurement_analyte,
Instantaneous_discharge_m3_per_s,
flx_2h, d18O_permil,
flx_analyte_conc,
d13c,Fm, pro_note)
View(measurements)
# select only rows with data (either d13c or Fm or flx_2h)
measurements <- measurements %>%
filter(!is.na(flx_2h) | !is.na(d13c) | !is.na(Fm))
# arrange
measurements <- measurements %>% arrange(entry_name, site_name, pro_name)
# gather, remove NAs and spread again to match 13c and Fm values for each original aragones_row entry
measurements <- measurements %>% gather(key = "13c or Fm or 2h", value = "value",
c("flx_2h","d13c","Fm")) %>% arrange(entry_name, site_name, pro_name) %>% filter(!is.na(value)) %>%
spread(key = "13c or Fm or 2h", value = "value")
# summarize pro_note by profile
pro_note_summary <- measurements %>%
group_by(pro_name) %>%
summarize(pro_note = pro_note[1])
# finalize profile by adding in pro_note from measurements tab, finalize pro_thaw (by alterning pro_thaw_note)
profile <- as_tibble(left_join(profile, pro_note_summary, by = "pro_name")) %>%
mutate(pro_thaw = as.factor(pro_thaw_note)) %>%
mutate(pro_thaw_depth = recode_factor(pro_thaw,
`40 to 60 in the site, not in the aquatic system` = "50",
`46 to 55` = '50',
`60 to 120` = '90')) %>%
mutate(pro_thaw_depth = as.numeric(as.character(pro_thaw))) %>%
select(entry_name,
site_name,
plot_name,
pro_name,
pro_note,
pro_lat,
pro_long,
pro_elevation,
pro_treatment,
pro_treatment_note,
pro_thaw_depth,
pro_catchment_area,
pro_permafrost,
pro_thermokarst,
pro_water_body,
pro_water_body_name) %>%
distinct() %>%
arrange(entry_name,site_name,pro_name)
# remove pro_note from the measurements tab
measurements <- measurements %>%
select(entry_name,
site_name,
pro_name,
measurement_obs_date_y,
measurement_obs_date_m,
measurement_obs_date_d,
measurement_pathway, # for splitting flux, incub, inter
measurement_pathway_note,
# measurement_analyte,
measurement_ecosystem_component,
measurement_method_note,
measurement_method_note2,
measurement_rate,
measurement_depth,
measurement_incubation_headspace,
measurement_incubation_soil,
measurement_incubation_auto_type,
flx_discharge_rate = Instantaneous_discharge_m3_per_s,
measurement_analyte,
flx_2h, d18O_permil,
flx_analyte_conc,
d13c, Fm,
-pro_note)
# split the data into flux, interstitial and incubation
# NOTE: i include all bubble data in flux even tho not all were emitted naturally (some by stirring sediment)
# because there is no specific layer that these observations can be attributed to
flux <- measurements %>%
filter(measurement_pathway %in% c("Flux","Bubbles","Water"))
# assign final names and correct controlled vocab
#flx_method
flux$measurement_method_note <- as.factor(flux$measurement_method_note)
levels(flux$measurement_method_note) <- c(rep("chamber",7),"grab sample",rep("chamber",6),"grab sample",rep("chamber",11))
# create index vector to make the flx_name unique
flx_x <- flux %>%
group_by(entry_name, site_name) %>%
summarize(aragones.rows = n())
x <- list()
for (i in 1:length(flx_x$aragones.rows)){
x[[i]] <- c(1:flx_x$aragones.rows[i])
}
x <- unlist(x)
#finalize
flux <- flux %>%
mutate(flx_pathway = as.character(measurement_pathway),
measurement_ecosystem_component = as.character(measurement_ecosystem_component),
index = x) %>%
mutate(flx_pathway = replace(flx_pathway, measurement_pathway == "Bubbles", "bubble ebullition"),
flx_pathway = replace(flx_pathway, measurement_pathway == "Flux", "soil emission"),
flx_pathway = replace(flx_pathway, measurement_pathway == "Water", "dissolved"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component %in%
c("CH4","Heterotrophic_respiration","Soil"), "heterotrophic"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Autotrophic_respiration", "autotrophic"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Ecosystem_respiration", "ecosystem"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Water_export", "aquatic"),
flx_name = paste(pro_name, measurement_analyte,index, sep = " ")) %>%
select(entry_name,
site_name,
pro_name,
flx_name,
flx_obs_date_y = measurement_obs_date_y,
flx_obs_date_m = measurement_obs_date_m,
flx_obs_date_d = measurement_obs_date_d,
flx_pathway = flx_pathway,
flx_pathway_note = measurement_pathway_note,
flx_ecosystem_component = measurement_ecosystem_component,
flx_method = measurement_method_note,
flx_method_note = measurement_method_note2,
flx_rate = measurement_rate,
flx_analyte = measurement_analyte,
flx_analyte_conc,
flx_discharge_rate,
flx_18o = d18O_permil,
flx_2h,
flx_13c = d13c,
flx_fraction_modern = Fm) %>%
mutate(flx_obs_date_y = as.character(flx_obs_date_y),
flx_obs_date_m = as.character(flx_obs_date_m),
flx_obs_date_d = as.character(flx_obs_date_d))
# replace Atm_CO2 with CO2 and associated ecosystem component with atmosphere
flux <- flux %>%
mutate(flx_analyte = as.factor(flx_analyte)) %>%
mutate(flx_analyte = recode_factor(flx_analyte, Atm_CO2 = "CO2"),
flx_ecosystem_component = recode_factor(flx_ecosystem_component, Ecosystem_Respiration = "atmosphere")) %>%
arrange(entry_name, site_name, pro_name)
View(flux)
## correct two Arargones data issues (row 107 and 123 are "Soil" but should be "Soil porespace", based on c13 which looks like methane)
measurements$measurement_pathway[c(107,123)] <- "Soil porespace"
# filter measurements tab for Soil (layer measurements) and corresponding Soil porespace (interstitial) and Incub (incubation) data
# replicate the profiles pasted with the depth for dummy layer names then specify top and bottom depth with depth +/- 5 cm
layer <- measurements %>%
filter(measurement_pathway %in% c("Soil","Soil porespace","Incub"))
## all depths reported from surface so set lyr_all_org_neg = 'yes'
layer <- layer %>%
mutate(lyr_name = paste(site_name, pro_name, measurement_depth, sep = "_"),
lyr_top = measurement_depth - 5, lyr_bot = measurement_depth + 5,
lyr_all_org_neg = 'yes') %>%
select(entry_name,
site_name,
pro_name,
lyr_name,
lyr_obs_date_y = measurement_obs_date_y,
lyr_obs_date_m = measurement_obs_date_m,
lyr_obs_date_d = measurement_obs_date_d,
lyr_all_org_neg,
lyr_top,
lyr_bot,
measurement_pathway,
lyr_13c = d13c,
lyr_fraction_modern = Fm) %>%
mutate(lyr_obs_date_y = as.character(lyr_obs_date_y),
lyr_obs_date_m = as.character(lyr_obs_date_m),
lyr_obs_date_d = as.character(lyr_obs_date_d))
# subset non-Soil (layer) data and assign the 13c and Fm values to NA
layer.red1 <- layer %>%
filter(measurement_pathway != "Soil") %>%
mutate(lyr_13c = NA,
lyr_fraction_modern = NA) %>%
group_by(entry_name, site_name, pro_name, lyr_name) %>%
summarize(lyr_obs_date_y = lyr_obs_date_y[1],
lyr_obs_date_m = lyr_obs_date_m[1],
lyr_obs_date_d = lyr_obs_date_d[1],
lyr_all_org_neg = lyr_all_org_neg[1],
lyr_top = lyr_top[1],
lyr_bot = lyr_bot[1],
measurement_pathway = measurement_pathway[1],
lyr_13c = lyr_13c[1],
lyr_fraction_modern = lyr_fraction_modern[1]) %>%
filter(!is.na(lyr_top) & !is.na(lyr_bot)) %>%
select(-measurement_pathway) %>%
distinct() %>%
arrange(entry_name, site_name)
# subset Soil (actual layer) data with observations of 13c and fm
layer.soil <- layer %>%
filter(measurement_pathway == "Soil") %>%
select(-measurement_pathway) %>%
filter(!is.na(lyr_13c) & !is.na(lyr_fraction_modern)) %>%
distinct() %>%
arrange(entry_name, site_name)
# join together, retaining only
layer.red.final <- bind_rows(layer.red1, layer.soil) %>% distinct() %>% arrange(entry_name, site_name)
#
# # get layer names
# site.names <- layer.red2 %>%
# select(site_name) %>%
# distinct() %>% pull()
# # create a list of vectors for number of fluxes per site
# x <- list()
# for (i in 1:length(site.names)){
# x[[i]] <- layer.red2 %>%
# filter(site_name == site.names[i]) %>%
# mutate(index = 1:n()) %>%
# select(index)
# }
# x <- bind_rows(x)
#
# # finalize layer names
# layer.red.final <- layer.red2 %>%
# mutate(index = x$index) %>%
# mutate(lyr_name = paste(site_name, lyr_name, index, sep = "_")) %>%
# arrange(entry_name,site_name) %>%
# select(-index)
# extract the interstitial data
interstitial <- measurements %>%
filter(measurement_pathway %in% c("Soil porespace"))
# get interstitial names
site.names <- interstitial %>%
select(site_name) %>%
distinct() %>% pull()
# create a list of vectors for number of fluxes per site
x <- list()
for (i in 1:length(site.names)){
x[[i]] <- interstitial %>%
filter(site_name == site.names[i]) %>%
mutate(index = 1:n()) %>%
select(index)
}
x <- bind_rows(x)
## assign final field names and correct controlled vocab
interstitial <- interstitial %>%
mutate(index = x$index) %>%
mutate(ist_depth = measurement_depth,
ist_analyte = measurement_analyte,
ist_notes = paste("3 notes sep by &: atmosphere", measurement_method_note, measurement_method_note2, sep = " & "),
ist_13c = d13c,
ist_fraction_modern = Fm) %>%
mutate(ist_name = paste(ist_depth, ist_analyte, index, sep = "_")) %>%
arrange(entry_name,site_name) %>%
select(entry_name,
site_name,
pro_name,
ist_name,
ist_obs_date_y = measurement_obs_date_y,
ist_obs_date_m = measurement_obs_date_m,
ist_obs_date_d = measurement_obs_date_d,
ist_depth,
ist_analyte,
ist_notes,
ist_2h = flx_2h,
ist_18o = d18O_permil,
ist_13c,
ist_fraction_modern) %>%
mutate(ist_obs_date_y = as.character(ist_obs_date_y),
ist_obs_date_m = as.character(ist_obs_date_m),
ist_obs_date_d = as.character(ist_obs_date_d))
# get first 4 columns of layer and left_join interstitial based upon lyr_name
interstitial <- profile[,c(1,2,4)] %>% inner_join(interstitial) %>% distinct()
# replace Atm_CO2 with CO2 and associated ecosystem component with atmosphere
interstitial <- interstitial %>%
mutate(ist_analyte = as.factor(ist_analyte)) %>%
mutate(ist_analyte = recode_factor(ist_analyte, Atm_CO2 = "CO2", `Soil` = "POC"))
# extract the incubation data
incubation <- measurements %>%
filter(measurement_pathway %in% c("Incub"))
# get incubation names
site.names <- incubation %>%
select(site_name) %>%
distinct() %>% pull()
# create a list of vectors for number of fluxes per site
x <- list()
for (i in 1:length(site.names)){
x[[i]] <- incubation %>%
filter(site_name == site.names[i]) %>%
mutate(index = 1:n()) %>%
select(index)
}
x <- bind_rows(x)
# assign final field names and correct controlled vocab
incubation <- incubation %>%
mutate(measurement_ecosystem_component = as.character(measurement_ecosystem_component),
measurement_incubation_soil = as.character(measurement_incubation_soil),
index = x$index) %>%
mutate(lyr_name = paste(site_name, pro_name, measurement_depth, sep = "_"),
inc_name = paste(lyr_name, measurement_ecosystem_component,index, sep = "_"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Heterotrophic_respiration", "heterotrophic"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Autotrophic_respiration", "autotrophic"),
inc_headspace = measurement_incubation_headspace,
measurement_incubation_soil = replace(measurement_incubation_soil, measurement_incubation_soil %in% c("Autotrophic","Roots"),
"live roots"),
measurement_incubation_soil = replace(measurement_incubation_soil, measurement_incubation_soil == "Mineral",
"root-picked soil"),
inc_note = paste(measurement_ecosystem_component, inc_headspace, sep = " "),
inc_depth = measurement_depth) %>%
arrange(entry_name,site_name,inc_depth) %>%
select(entry_name,
site_name,
pro_name,
lyr_name,
inc_name,
inc_type = measurement_incubation_soil,
inc_note,
inc_anaerobic = inc_headspace,
inc_obs_date_y = measurement_obs_date_y,
inc_obs_date_m = measurement_obs_date_m,
inc_obs_date_d = measurement_obs_date_d,
inc_analyte = measurement_analyte,
inc_13c = d13c,
inc_fraction_modern = Fm) %>%
mutate(inc_obs_date_y = as.character(inc_obs_date_y),
inc_obs_date_m = as.character(inc_obs_date_m),
inc_obs_date_d = as.character(inc_obs_date_d),
inc_analyte = replace(inc_analyte, inc_analyte == "CO2", ""))
# replace Atm_CO2 with CO2 and associated ecosystem component with atmosphere
incubation <- incubation %>%
mutate(inc_type = recode_factor(factor(inc_type), `Organic` = "root-picked soil", `Organic ` = "root-picked soil"))
# get first 4 columns of layer and left_join incubation based upon lyr_name
incubation <- layer.red.final[,1:4] %>% inner_join(incubation) %>% distinct()
# set all columns as character
metadata <- metadata %>%
mutate_all(as.character)
site <- site %>%
mutate_all(as.character)
profile <- profile %>%
mutate_all(as.character)
flux <- flux %>%
mutate_all(as.character)
layer <- layer.red.final %>%
mutate_all(as.character)
interstitial <- interstitial %>%
mutate_all(as.character)
incubation <- incubation %>%
mutate_all(as.character)
# for each entry_name, pull in the corresponding rows for each tab into a list, where elemnts are different entry_names
names <- metadata %>% select(entry_name) %>% pull()
# metadata
metadata.entries <- list()
for (i in 1:length(names)){
metadata %>%
filter(entry_name == names[i]) -> metadata.entries[[i]]
}
# site
site.entries <- list()
for (i in 1:length(names)){
site %>%
filter(entry_name == names[i]) -> site.entries[[i]]
}
# profile
profile.entries <- list()
for (i in 1:length(names)){
profile %>%
filter(entry_name == names[i]) -> profile.entries[[i]]
}
# flux
flux.entries <- list()
for (i in 1:length(names)){
flux %>%
filter(entry_name == names[i]) -> flux.entries[[i]]
}
# layer
layer.entries <- list()
for (i in 1:length(names)){
layer %>%
filter(entry_name == names[i]) -> layer.entries[[i]]
}
# interstitial
interstitial.entries <- list()
for (i in 1:length(names)){
interstitial %>%
filter(entry_name == names[i]) -> interstitial.entries[[i]]
}
# incubation
incubation.entries <- list()
for (i in 1:length(names)){
incubation %>%
filter(entry_name == names[i]) -> incubation.entries[[i]]
}
# template$metadata
#
# toutput.metadata <- list()
# toutput.site <- list()
# toutput.profile <- list()
# toutput.flux <- list()
# toutput.layer <- list()
# toutput.interstitial <- list()
# toutput.incubation <- list()
# toutput.fraction <- list()
# toutput.cvocab <- list()
#
# for (i in 1:length(names)) {
# # merge with template
# toutput.metadata[[i]] <- bind_rows(template$metadata, metadata.entries[[i]])
# toutput.metadata[[i]] <- toutput.metadata[[i]][-3,] # not sure why an extra row of NaN is added
# toutput.site[[i]] <- bind_rows(template$site, site.entries[[i]])
# toutput.profile[[i]] <- bind_rows(template$profile, profile.entries[[i]])
# toutput.flux[[i]] <- bind_rows(template$flux, flux.entries[[i]])
# toutput.layer[[i]] <- bind_rows(template$layer, layer.entries[[i]])
# toutput.interstitial[[i]] <- bind_rows(template$interstitial, interstitial.entries[[i]])
# toutput.fraction[[i]] <- template$fraction
# toutput.incubation[[i]] <- bind_rows(template$incubation, incubation.entries[[i]])
# toutput.cvocab[[i]] <- template$`controlled vocabulary`
# }
#
#
# toutput.byentry <- list()
# for (i in 1:length(names)){
# toutput.byentry[[i]] <- list(toutput.metadata[[i]], toutput.site[[i]], toutput.profile[[i]], toutput.flux[[i]],
# toutput.layer[[i]], toutput.interstitial[[i]], toutput.fraction[[i]] , toutput.incubation[[i]],
# toutput.cvocab[[i]])
# }
#
# # save gas template
# for (i in 1:length(names)){
# names(toutput.byentry[[i]]) <- c("metadata","site","profile","flux","layer","interstitial","fraction","incubation",'controlled vocabulary')
# write.xlsx(toutput.byentry[[i]], paste("/Users/macbook/Desktop/Dropbox Temp/ISRaD/ISCN Collaboration/Aragones Ingest Files/By Entry/",names[i],".xlsx", sep = ""),
# keepNA = FALSE)
# }
#
template$profile
############################################ WATER
# read in template file from ISRaD Package
template_file <- system.file("extdata", "ISRaD_Master_Template.xlsx", package = "ISRaD")
template <- lapply(getSheetNames(template_file), function(s) read.xlsx(template_file, sheet=s))
names(template) <- getSheetNames(template_file)
template <- lapply(template, function(x) x %>% mutate_all(as.character))
# take a look at template structure
glimpse(template)
# load dataset
Aragones_dataset <- read.csv("/Users/macbook/Desktop/Dropbox Temp/ISRaD/ISCN Collaboration/14C_Dataset_Final_Cristian.csv", na.strings = c("","NA"),
stringsAsFactors = FALSE)
Aragones_tidy <- Aragones_dataset %>%
mutate(Dataset = as.factor(Dataset),
Study = as.factor(str_replace_all(Study, fixed(" "), "_")),
Yedoma = as.factor(Yedoma),
LAR = as.factor(LAR),
PF = as.factor(PF),
Thermokarst = as.factor(Thermokarst),
Yukon_Kolyma_origin = as.factor(Yukon_Kolyma_origin),
Flux_type = as.factor(str_replace_all(Flux_type, fixed(" "), "_")),
Depth_cm = as.numeric(Depth_cm),
Aerob_anaerob_incub = as.factor(Aerob_anaerob_incub),
Org_Min_Incub = as.factor(Org_Min_Incub),
Autotrophic_type = as.factor(str_replace_all(Autotrophic_type, fixed(" "), "_")),
Manipulation_study = as.factor(Manipulation_study),
Sampling_date = if(length(str_replace_all(Sampling_date,fixed("/"),"")) == 5) {
mdy(paste("0",str_replace_all(Sampling_date,fixed("/"),"")))} else {
mdy(str_replace_all(Sampling_date,fixed("/"),""))
}) %>%
select(1:67)
# many conventions different between water and gas data. split to treat differently
Aragones_water <- Aragones_tidy %>%
filter(Dataset == "Water")
# work with gas data first
str(Aragones_water)
Aragones_water %>% arrange(ID_merged) %>%
select(Study,
Full_class,
General_ecosystem,
PF,
Yedoma,
Grouped_Data_source,
Specific_ecosystem,
Flux_type,
Depth_cm,
Aerob_anaerob_incub,
Org_Min_Incub,
Autotrophic_type,
Manipulation_study,
Sampling_year_fraction,
Sampling_date,
DOY,
Gral_description,
WT_cm,
Latitude_decimal_degrees,
Longitude_decimal_degrees,
d18O_permil,
d13C_Soil,
d13C_Atm_CO2,
d13C_CO2,
d13C_CH4,
H_CH4,
H_H2O,
d13C_DOC,
d13C_POC,
DOC_mgC_L,
POC_mgC_L,
TOC_mgC_L,
Fm_Soil,
Fm_Atm_CO2,
Fm_CO2,
Fm_CH4,
Fm_DOC,
Fm_POC,
Detailed_ecosystem_classification,
Basin_Area_Drainage_Area_km2,
MainRiver_name,
Instantaneous_discharge_m3_per_s) %>%
glimpse()
## fill entry_name
str(template$metadata)
entry_name <- Aragones_water %>%
select("Study") %>%
distinct() %>%
mutate(entry_name = Study) %>%
select("entry_name") %>%
arrange(as.factor(entry_name))
# read in doi csv
Aragones_doi <- read.csv("/Users/macbook/Desktop/Dropbox Temp/ISRaD/ISCN Collaboration/Estop Aragones DOI List.csv", na.strings = c("","NA"),
stringsAsFactors = FALSE)
# format study names and join to dois
doi <- Aragones_doi %>%
mutate(entry_name = str_replace_all(Study, fixed(" "), "_")) %>%
arrange(as.factor(entry_name)) %>%
select("entry_name","DOI")
## be careful here, I think a couple of dois are lost/doubled up based on Alison's excel file
metadata <- full_join(entry_name, doi)
# fill metadata (56 unique studies in water data)
metadata <- metadata %>%
mutate(compilation_doi = NA,
curator_name = "Gavin McNicol",
curator_organization = "Stanford University",
curator_email = "gmcnicol@stanford.edu",
modification_date_y = "2019",
modification_date_m = "08",
modification_date_d = "12",
contact_name = "Cristian Estop-Aragones",
contact_email = "estopara@ualberta.ca",
contact_orcid_id = NA,
bibliographical_reference = "Estop-Aragones 2018",
metadata_note = NA,
associated_datasets = NA,
template_version = 20190812
) %>%
arrange(entry_name)
# start by defining sites as unique lat longs
site <- as_tibble(cbind(
Aragones_water %>%
select(entry_name = str_replace_all("Study", fixed(" "), "_"),
site_latitude = "Latitude_decimal_degrees",
site_longitude = "Longitude_decimal_degrees",
Data_source_comments, MainRiver_name, More_description) %>%
mutate(site_name = paste(site_latitude, site_longitude, sep = " ")) %>% # note double space sep
select(entry_name, site_name) %>%
# filter(site_name != "NA NA") %>%
distinct(entry_name, site_name) %>%
arrange(entry_name, site_name)
)
)
## get site_note variables to paste()
site_note_df <- as_tibble(Aragones_water) %>%
mutate(Yedoma = as.character(Yedoma),
Thermokarst = as.character(Thermokarst)) %>%
mutate(Yedoma = replace(Yedoma, Yedoma %in% c("No","No?"), "Not Yedoma"),
Yedoma = replace(Yedoma, Yedoma %in% c("Yes","Probably"), "Yedoma"),
Yedoma = replace(Yedoma, Yedoma %in% c("?","Unknown"), "Yedoma Unknown")) %>%
mutate(site_note = paste(Full_class, Yedoma)) %>%
select(site_note,entry_name = str_replace_all("Study", fixed(" "), "_"),
site_latitude = "Latitude_decimal_degrees",
site_longitude = "Longitude_decimal_degrees",
Data_source_comments, More_description) %>%
mutate(site_name = paste(site_latitude, site_longitude, sep = " ")) %>%
select(site_name, site_note) %>%
# filter(site_name != "NA NA") %>%
group_by(site_name) %>%
summarize(site_note = site_note[1])
#136 unique lat long and site note combinations
# now fill in other site variables
site <- site %>%
group_by(site_name) %>%
mutate(site_latlong = strsplit(site_name[[1]], " ", fixed = TRUE),
site_datum = NA,
site_elevation = NA) %>%
mutate(site_lat = site_latlong[[1]][1],
site_long = site_latlong[[1]][2]) %>%
select(entry_name,
site_name, site_lat, site_long, site_datum,
site_elevation) %>%
arrange(entry_name, site_name)
site <- site %>% left_join(site_note_df)
## Fill profile tab
# get number of individual rows per site in Aragones database
num.aragones.rows <- as_tibble(Aragones_water) %>%
mutate(entry_name = str_replace_all(Study, fixed(" "),("_")),
site_latitude = Latitude_decimal_degrees,
site_longitude = Longitude_decimal_degrees,
site_name = paste(site_latitude, site_longitude, sep = " "),
pro_name_chr = paste(Specific_location, Specific_ecosystem, Manipulation, sep = "_")) %>%
select(entry_name, site_name,pro_name_chr,
Gral_description, Detailed_ecosystem_classification, General_ecosystem,
Thermokarst, PF, Basin_Area_Drainage_Area_km2, MainRiver_name,
Manipulation_study, Manipulation, AL_cm,
Year, Month, Day,
Grouped_Data_source, Data_source,
Flux_type, Sampling_method, Sample_treatment,
Specific_discharge_m3_per_s_per_km2, Instantaneous_discharge_m3_per_s) %>%
group_by(entry_name, site_name) %>%
summarize(aragones.rows = n(),
gral = Gral_description[1],
detailed_class = Detailed_ecosystem_classification[1],
treatment = Manipulation_study[1],
treatment_note = Manipulation[1],
thaw_depth = AL_cm[1],
pro_name_chr = pro_name_chr[1],
pro_catchment_area = Basin_Area_Drainage_Area_km2[1],
pro_water_body = General_ecosystem[1],
pro_water_body_name = MainRiver_name[1],
pro_permafrost = PF[1],
pro_thermokarst = Thermokarst[1]
)
## replicate reference columns and columns for pro_note field 1163 times
aragones.rows.vector <- list()
sitenames.vector <- list()
entrynames.vector <- list()
gral.vector <- list()
detail.vector <- list()
t.vector <- list()
t_note.vector <- list()
thaw.vector <- list()
name_chr.v <- list()
pc_area <- list()
pw_body <- list()
pw_body_name <- list()
pp <- list()
pt <- list()
for (i in 1:length(num.aragones.rows$site_name)){
aragones.rows.vector[[i]] <- c(seq(1,num.aragones.rows$aragones.rows[i],1))
sitenames.vector[[i]] <- c(rep(num.aragones.rows$site_name[i],num.aragones.rows$aragones.rows[i]))
entrynames.vector[[i]] <- c(rep(as.character(num.aragones.rows$entry_name[i]),num.aragones.rows$aragones.rows[i]))
gral.vector[[i]] <- c(rep(num.aragones.rows$gral[i], num.aragones.rows$aragones.rows[i]))
detail.vector[[i]] <- c(rep(num.aragones.rows$detailed_class[i], num.aragones.rows$aragones.rows[i]))
t.vector[[i]] <- c(rep(num.aragones.rows$treatment[i], num.aragones.rows$aragones.rows[i]))
t_note.vector[[i]] <- c(rep(num.aragones.rows$treatment_note[i], num.aragones.rows$aragones.rows[i]))
thaw.vector[[i]] <- c(rep(num.aragones.rows$thaw_depth[i], num.aragones.rows$aragones.rows[i]))
name_chr.v[[i]] <- c(rep(num.aragones.rows$pro_name_chr[i], num.aragones.rows$aragones.rows[i]))
pc_area[[i]] <- c(rep(num.aragones.rows$pro_catchment_area[i], num.aragones.rows$aragones.rows[i]))
pw_body[[i]] <- c(rep(num.aragones.rows$pro_water_body[i], num.aragones.rows$aragones.rows[i]))
pw_body_name[[i]] <- c(rep(num.aragones.rows$pro_water_body_name[i], num.aragones.rows$aragones.rows[i]))
pp[[i]] <- c(rep(num.aragones.rows$pro_permafrost[i], num.aragones.rows$aragones.rows[i]))
pt[[i]] <- c(rep(num.aragones.rows$pro_thermokarst[i], num.aragones.rows$aragones.rows[i]))
}
# unlist all vectors
aragones.rows.vector <- unlist(aragones.rows.vector)
sitenames.vector <- unlist(sitenames.vector)
entrynames.vector <- unlist(entrynames.vector)
gral.vector <- unlist(gral.vector)
detail.vector <- unlist(detail.vector)
t.vector <- unlist(t.vector)
t_note.vector <- unlist(t_note.vector)
thaw.vector <- unlist(thaw.vector)
name_chr.v <- unlist(name_chr.v)
pc_area <- unlist(pc_area)
pw_body <- unlist(pw_body)
pw_body_name <- unlist(pw_body_name)
pp <- unlist(pp)
pt <- unlist(pt)
# create a tibble to do a left join
profiles <- as_tibble(cbind(sitenames.vector,aragones.rows.vector, entrynames.vector,gral.vector,
detail.vector, t.vector, t_note.vector, thaw.vector, name_chr.v,
pc_area, pw_body, pw_body_name, pp, pt))
profiles <- profiles %>% mutate(site_name = sitenames.vector,
aragones_rows = aragones.rows.vector,
entry_name = entrynames.vector,
plot_name = paste(gral.vector, detail.vector, sep = " "),
pro_treatment = t.vector,
pro_treatment_note = t_note.vector,
pro_thaw_note = thaw.vector,
pro_name_chr = name_chr.v,
pro_catchment_area = pc_area,
pro_water_body = pw_body,
pro_water_body_name = pw_body_name,
pro_permafrost = pp,
pro_thermokarst = pt) %>%
select(entry_name, site_name, pro_name_chr, aragones_rows, plot_name,
pro_treatment, pro_treatment_note, pro_thaw_note,
pro_name_chr, pro_catchment_area, pro_water_body, pro_water_body_name,
pro_permafrost, pro_thermokarst)
# temporary profile tab, still need to add in pro_note from flux (below)
profile <- profiles %>%
mutate(entry_name = entry_name,
site_name = site_name,
plot_name = plot_name,
pro_name = replace_na(pro_name_chr, 1),
aragones_rows = aragones_rows,
pro_lat = NA,
pro_long = NA,
pro_elevation = NA,
pro_treatment = recode_factor(factor(pro_treatment), `1` = "control", `2` = "treatment"),
pro_treatment_note = pro_treatment_note,
pro_thaw_note = pro_thaw_note,
pro_catchment_area = pro_catchment_area,
pro_water_body = pro_water_body,
pro_water_body_name = pro_water_body_name,
pro_permafrost = recode_factor(factor(pro_permafrost), `Not PF` = "", `PF` = "yes"),
pro_thermokarst = replace(pro_thermokarst,pro_thermokarst %in% "No", "Not Thermokarst"),
Thermokarst = replace(pro_thermokarst,pro_thermokarst %in% "Yes", "yes"),
Thermokarst = replace(pro_thermokarst, pro_thermokarst %in% c("Probably","Probably "), "yes")) %>%
mutate(pro_treatment = replace_na(pro_treatment, 'control')) %>%
select(entry_name,
site_name,
plot_name,
pro_name,
aragones_rows,
pro_lat,
pro_long,
pro_elevation,
pro_treatment,
pro_treatment_note,
pro_thaw_note,
pro_catchment_area, pro_water_body, pro_water_body_name,
pro_permafrost, pro_thermokarst) %>%
arrange(entry_name,site_name)
##################
## fill out a 'measurements' tab, from which we split fluxes, then layer, interstitial and incubation data
# take a look at the tab structures
# get the actual values again
measurements <- as_tibble(Aragones_water) %>%
mutate(entry_name = str_replace_all(Study, fixed(" "),("_")),
site_latitude = Latitude_decimal_degrees,
site_longitude = Longitude_decimal_degrees,
site_name = paste(site_latitude, site_longitude, sep = " "),
pro_name = replace_na(Specific_location, 1)) %>%
select(entry_name, site_name, pro_name,
Year, Month, Day, Grouped_Data_source, Data_source,
Flux_type, Sampling_method, Sample_treatment,
Specific_discharge_m3_per_s_per_km2,
Data_source_comments, MainRiver_name, More_description,
Depth_cm, Aerob_anaerob_incub, Org_Min_Incub,Autotrophic_type,
Instantaneous_discharge_m3_per_s,
H_CH4, H_H2O, d18O_permil,
DOC_mgC_L, POC_mgC_L, TOC_mgC_L,
d13C_Soil, d13C_Atm_CO2, d13C_CO2, d13C_CH4, d13C_DOC, d13C_POC,
Fm_Soil, Fm_Atm_CO2, Fm_CO2, Fm_CH4, Fm_DOC, Fm_POC) %>%
arrange(entry_name,site_name, pro_name)
## bind first 3 fields of profile to the measurements
measurements <- as_tibble(cbind(
profile$pro_name, profile$aragones_rows,
measurements
))
# make a dummy tab called template$measurements
# select all fields plus a concatenated column for pro_note (needs to be moved to profile tab )
measurements <- measurements %>%
mutate(pro_note = paste(Data_source_comments, More_description, sep = " ")) %>%
gather(key = "measurement_name",
value = "measurement_value",
c("H_CH4", "H_H2O",
"DOC_mgC_L", "POC_mgC_L", "TOC_mgC_L",
"d13C_Soil","d13C_Atm_CO2","d13C_CO2",
"d13C_CH4","d13C_DOC","d13C_POC",
"Fm_Soil","Fm_Atm_CO2","Fm_CO2",
"Fm_CH4","Fm_DOC","Fm_POC")) %>%
mutate(measurement_analyte = measurement_name,
measurement_index = measurement_name) %>%
mutate(measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_Atm_CO2","Fm_Atm_CO2"), "Atm_CO2"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_CO2","Fm_CO2"), "CO2"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_CH4","Fm_CH4","H_CH4"), "CH4"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_DOC","Fm_DOC","DOC_mgC_L"), "DOC"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_Soil","Fm_Soil"), "Soil"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_POC","Fm_POC", "POC_mgC_L"), "POC"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("TOC_mgC_L"), "TOC"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("H_H2O"), "H2O"),
measurement_index = replace(measurement_index, measurement_index %in% c("H_CH4", "H_H2O"), "flx_2h"),
measurement_index = replace(measurement_index, measurement_index %in% c("DOC_mgC_L", "POC_mgC_L", "TOC_mgC_L"), "flx_analyte_conc"),
measurement_index = replace(measurement_index, measurement_index %in% c("d13C_Soil","d13C_Atm_CO2","d13C_CO2",
"d13C_CH4","d13C_DOC","d13C_POC"), "d13c"),
measurement_index = replace(measurement_index, measurement_index %in% c("Fm_Soil","Fm_Atm_CO2","Fm_CO2",
"Fm_CH4","Fm_DOC","Fm_POC"), "Fm"),
Aerob_anaerob_incub = recode_factor(Aerob_anaerob_incub, `Aerobic` = ""),
Aerob_anaerob_incub = recode_factor(Aerob_anaerob_incub, `Anaerobic` = "yes")) %>%
spread(key = measurement_index, value = measurement_value) %>%
select(entry_name,site_name, pro_name = "profile$pro_name",
aragones_rows = "profile$aragones_rows",
measurement_obs_date_y = Year,
measurement_obs_date_m = Month,
measurement_obs_date_d = Day,
measurement_pathway = Grouped_Data_source, # for splitting flux, incub, inter
measurement_pathway_note = Data_source,
# measurement_analyte = Flux_type,
measurement_ecosystem_component = Flux_type,
measurement_method_note = Sampling_method,
measurement_method_note2 =Sample_treatment,
measurement_rate = Specific_discharge_m3_per_s_per_km2,
measurement_depth = Depth_cm,
measurement_incubation_headspace = Aerob_anaerob_incub,
measurement_incubation_soil = Org_Min_Incub,
measurement_incubation_auto_type = Autotrophic_type,
measurement_analyte,
Instantaneous_discharge_m3_per_s,
flx_2h, d18O_permil,
flx_analyte_conc,
d13c,Fm, pro_note)
# select only rows with data (either d13c or Fm or flx_2h)
measurements <- measurements %>%
filter(!is.na(flx_2h) | !is.na(d13c) | !is.na(Fm) | !is.na(d18O_permil) | !is.na(flx_analyte_conc))
# arrange
measurements <- measurements %>% arrange(entry_name, site_name)
# gather, remove NAs and spread again to match 13c and Fm values for each original aragones_row entry
measurements <- measurements %>% gather(key = "measurement", value = "value",
c("d13c","Fm", "flx_2h", "d18O_permil", "flx_analyte_conc")) %>%
arrange(entry_name, site_name) %>% filter(!is.na(value)) %>%
spread(key = "measurement", value = "value")
# summarize pro_note by profile
pro_note_summary <- measurements %>%
group_by(pro_name) %>%
summarize(pro_note = pro_note[1])
# finalize profile by adding in pro_note from flux tab ##### check for other unique pro_thaw labels
profile <- as_tibble(left_join(profile, pro_note_summary, by = "pro_name")) %>%
mutate(pro_thaw = as.factor(pro_thaw_note)) %>%
mutate(pro_thaw_depth = recode_factor(pro_thaw,
`40 to 60 in the site, not in the aquatic system` = "50",
`46 to 55` = '50',
`60 to 120` = '90')) %>%
mutate(pro_thaw_depth = as.numeric(as.character(pro_thaw))) %>%
select(entry_name,
site_name,
plot_name,
pro_name,
pro_note,
pro_lat,
pro_long,
pro_elevation,
pro_treatment,
pro_treatment_note,
pro_thaw_depth,
pro_catchment_area,
pro_permafrost,
pro_thermokarst,
pro_water_body,
pro_water_body_name) %>%
distinct() %>%
arrange(entry_name,site_name,pro_name)
# remove pro_note from the measurements tab
measurements <- measurements %>%
select(entry_name,
site_name,
pro_name,
measurement_obs_date_y,
measurement_obs_date_m,
measurement_obs_date_d,
measurement_pathway, # for splitting flux, incub, inter
measurement_pathway_note,
# measurement_analyte,
measurement_ecosystem_component,
measurement_method_note,
measurement_method_note2,
measurement_rate,
measurement_depth,
measurement_incubation_headspace,
measurement_incubation_soil,
measurement_incubation_auto_type,
flx_discharge_rate = Instantaneous_discharge_m3_per_s,
measurement_analyte,
flx_2h, d18O_permil,
flx_analyte_conc,
d13c, Fm,
-pro_note)
# split the data into flux, interstitial and incubation
# NOTE: i include all bubble data in flux even tho not all were emitted naturally (some by stirring sediment)
# because there is no specific layer that these observations can be attributed to
flux <- measurements %>%
filter(!measurement_pathway %in% c("Soil", "SoilDOC"))
### placeholder for correcting controlled vocab
# create index vector to make the flx_name unique
flx_x <- flux %>%
group_by(entry_name, site_name) %>%
summarize(aragones.rows = n())
x <- list()
for (i in 1:length(flx_x$aragones.rows)){
x[[i]] <- c(1:flx_x$aragones.rows[i])
}
x <- unlist(x)
#finalize
flux <- flux %>%
mutate(flx_pathway = as.character(measurement_pathway),
measurement_ecosystem_component = as.character(measurement_ecosystem_component),
index = x) %>%
mutate(flx_pathway = replace(flx_pathway, measurement_pathway == "Bubbles", "bubble ebullition"),
flx_pathway = replace(flx_pathway, measurement_pathway == "Flux", "soil emission"),
flx_pathway = replace(flx_pathway, measurement_pathway == "Water", "dissolved"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component %in%
c("CH4","Heterotrophic_respiration","Soil"), "heterotrophic"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Autotrophic_respiration", "autotrophic"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Ecosystem_respiration", "ecosystem"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Water_export", "aquatic"),
flx_name = paste(pro_name, measurement_analyte,index, sep = " ")) %>%
select(entry_name,
site_name,
pro_name,
flx_name,
flx_obs_date_y = measurement_obs_date_y,
flx_obs_date_m = measurement_obs_date_m,
flx_obs_date_d = measurement_obs_date_d,
flx_pathway = flx_pathway,
flx_pathway_note = measurement_pathway_note,
flx_ecosystem_component = measurement_ecosystem_component,
flx_method = measurement_method_note,
flx_method_note = measurement_method_note2,
flx_rate = measurement_rate,
flx_analyte = measurement_analyte,
flx_analyte_conc,
flx_discharge_rate,
flx_18o = d18O_permil,
flx_2h,
flx_13c = d13c,
flx_fraction_modern = Fm) %>%
mutate(flx_obs_date_y = as.character(flx_obs_date_y),
flx_obs_date_m = as.character(flx_obs_date_m),
flx_obs_date_d = as.character(flx_obs_date_d))
# replicate the profiles pasted with the depth for dummy layer names then specify top and bottom depth with depth +/- 5 cm
layer <- measurements %>%
filter(measurement_pathway %in% c("Soil")) %>%
mutate(lyr_name = paste(pro_name, measurement_depth, sep = "_"),
lyr_top = measurement_depth - 5, lyr_bot = measurement_depth + 5,
lyr_all_org_neg = 'yes') %>%
select(entry_name,
site_name,
pro_name,
lyr_name,
lyr_obs_date_y = measurement_obs_date_y,
lyr_obs_date_m = measurement_obs_date_m,
lyr_obs_date_d = measurement_obs_date_d,
lyr_top,
lyr_bot,
measurement_pathway,
lyr_13c = d13c,
lyr_fraction_modern = Fm) %>%
mutate(lyr_obs_date_y = as.character(lyr_obs_date_y),
lyr_obs_date_m = as.character(lyr_obs_date_m),
lyr_obs_date_d = as.character(lyr_obs_date_d))
# # extract the interstitial data --- there are zero interstitial records in water group
## finalize water data
# set all columns as character
metadata <- metadata %>%
mutate_all(as.character)
site <- site %>%
mutate_all(as.character)
profile <- profile %>%
mutate_all(as.character)
flux <- flux %>%
mutate_all(as.character)
layer <- layer.red.final %>%
mutate_all(as.character)
interstitial <- interstitial %>%
mutate_all(as.character)
incubation <- incubation %>%
mutate_all(as.character)
# for each entry_name, pull in the corresponding rows for each tab into a list, where elemnts are different entry_names
names <- metadata %>% select(entry_name) %>% pull()
# metadata
metadata.entries.water <- list()
for (i in 1:length(names)){
metadata %>%
filter(entry_name == names[i]) -> metadata.entries.water[[i]]
}
# site
site.entries.water <- list()
for (i in 1:length(names)){
site %>%
filter(entry_name == names[i]) -> site.entries.water[[i]]
}
# profile
profile.entries.water <- list()
for (i in 1:length(names)){
profile %>%
filter(entry_name == names[i]) -> profile.entries.water[[i]]
}
# flux
flux.entries.water <- list()
for (i in 1:length(names)){
flux %>%
filter(entry_name == names[i]) -> flux.entries.water[[i]]
}
# layer
layer.entries.water <- list()
for (i in 1:length(names)){
layer %>%
filter(entry_name == names[i]) -> layer.entries.water[[i]]
}
# interstitial
interstitial.entries.water <- list()
for (i in 1:length(names)){
interstitial %>%
filter(entry_name == names[i]) -> interstitial.entries.water[[i]]
}
# incubation
incubation.entries.water <- list()
for (i in 1:length(names)){
incubation %>%
filter(entry_name == names[i]) -> incubation.entries.water[[i]]
}
## write out final files (by entry)
toutput.metadata <- list()
toutput.site <- list()
toutput.profile <- list()
toutput.flux <- list()
toutput.layer <- list()
toutput.interstitial <- list()
toutput.incubation <- list()
toutput.fraction <- list()
toutput.cvocab <- list()
#merge with template
for (i in 1:length(names)) {
toutput.metadata[[i]] <- bind_rows(template$metadata, metadata.entries[[i]])
toutput.metadata[[i]] <- toutput.metadata[[i]][-3,] %>% distinct() # not sure why an extra row of NaN is added
toutput.site[[i]] <- bind_rows(template$site, site.entries[[i]],site.entries.water[[i]]) %>% distinct()
toutput.profile[[i]] <- bind_rows(template$profile, profile.entries[[i]],profile.entries.water[[i]]) %>% distinct()
toutput.flux[[i]] <- bind_rows(template$flux, flux.entries[[i]],flux.entries.water[[i]]) %>% distinct()
toutput.layer[[i]] <- bind_rows(template$layer, layer.entries[[i]],layer.entries.water[[i]]) %>% distinct()
toutput.interstitial[[i]] <- bind_rows(template$interstitial, interstitial.entries[[i]],interstitial.entries.water[[i]]) %>% distinct()
toutput.fraction[[i]] <- template$fraction
toutput.incubation[[i]] <- bind_rows(template$incubation, incubation.entries[[i]],incubation.entries.water[[i]]) %>% distinct()
toutput.cvocab[[i]] <- template$`controlled vocabulary`
}
toutput.byentry <- list()
for (i in 1:length(names)){
toutput.byentry[[i]] <- list(toutput.metadata[[i]], toutput.site[[i]], toutput.profile[[i]], toutput.flux[[i]],
toutput.layer[[i]], toutput.interstitial[[i]], toutput.fraction[[i]] , toutput.incubation[[i]],
toutput.cvocab[[i]])
}
# save water and gas template
for (i in 1:length(names)){
names(toutput.byentry[[i]]) <- c("metadata","site","profile","flux","layer","interstitial","fraction","incubation",'controlled vocabulary')
write.xlsx(toutput.byentry[[i]], paste("/Users/macbook/Desktop/Dropbox Temp/ISRaD/ISCN Collaboration/Aragones Ingest Files/By Entry v3/",names[i],".xlsx", sep = ""),
keepNA = FALSE)
}
| /devScripts/read_EstopAragones_database.R | no_license | xiajz/ISRaD | R | false | false | 67,711 | r | # Script for Ingesting Cristian Estop-Aragones Circumpolar 14C Database
# Gavin McNicol
# setup
require(dplyr)
library(openxlsx)
library(tidyverse)
library(devtools)
library(rcrossref)
library(lubridate)
devtools::install_github("International-Soil-Radiocarbon-Database/ISRaD", ref="master")
library(ISRaD)
## clear workspace
rm(list=ls())
# read in template file from ISRaD Package
template_file <- system.file("extdata", "ISRaD_Master_Template.xlsx", package = "ISRaD")
template <- lapply(getSheetNames(template_file), function(s) read.xlsx(template_file, sheet=s))
names(template) <- getSheetNames(template_file)
template <- lapply(template, function(x) x %>% mutate_all(as.character))
# take a look at template structure
glimpse(template)
head(template$metadata)
# load dataset
Aragones_dataset <- read.csv("/Users/macbook/Desktop/Dropbox Temp/ISRaD/ISCN Collaboration/14C_Dataset_Final_Cristian.csv", na.strings = c("","NA"),
stringsAsFactors = FALSE)
glimpse(Aragones_dataset)
length(Aragones_dataset)
Aragones_tidy <- Aragones_dataset %>%
mutate(Dataset = as.factor(Dataset),
Study = as.factor(str_replace_all(Study, fixed(" "), "_")),
Yedoma = as.factor(Yedoma),
LAR = as.factor(LAR),
PF = as.factor(PF),
Thermokarst = as.factor(Thermokarst),
Yukon_Kolyma_origin = as.factor(Yukon_Kolyma_origin),
Flux_type = as.factor(str_replace_all(Flux_type, fixed(" "), "_")),
Depth_cm = as.numeric(Depth_cm),
Aerob_anaerob_incub = as.factor(Aerob_anaerob_incub),
Org_Min_Incub = as.factor(Org_Min_Incub),
Autotrophic_type = as.factor(str_replace_all(Autotrophic_type, fixed(" "), "_")),
Manipulation_study = as.factor(Manipulation_study),
Sampling_date = if(length(str_replace_all(Sampling_date,fixed("/"),"")) == 5) {
mdy(paste("0",str_replace_all(Sampling_date,fixed("/"),"")))} else {
mdy(str_replace_all(Sampling_date,fixed("/"),""))
}) %>%
select(1:67)
# str(Aragones_tidy)
# many conventions different between water and gas data. split to treat differently
Aragones_gas <- Aragones_tidy %>%
filter(!is.na(Latitude_decimal_degrees)) %>%
filter(Dataset == "Gas")
# work with gas data first
# str(Aragones_gas)
Aragones_gas %>% arrange(ID_merged) %>%
select(Study,
Full_class,
General_ecosystem,
PF,
Yedoma,
Grouped_Data_source,
Specific_ecosystem,
Flux_type,
Depth_cm,
Aerob_anaerob_incub,
Org_Min_Incub,
Autotrophic_type,
Manipulation_study,
Sampling_year_fraction,
Sampling_date,
DOY,
Gral_description,
WT_cm,
Latitude_decimal_degrees,
Longitude_decimal_degrees,
d18O_permil,
d13C_Soil,
d13C_Atm_CO2,
d13C_CO2,
d13C_CH4,
H_CH4,
H_H2O,
d13C_DOC,
d13C_POC,
DOC_mgC_L,
POC_mgC_L,
TOC_mgC_L,
Fm_Soil,
Fm_Atm_CO2,
Fm_CO2,
Fm_CH4,
Fm_DOC,
Fm_POC,
Detailed_ecosystem_classification,
Basin_Area_Drainage_Area_km2,
MainRiver_name,
Instantaneous_discharge_m3_per_s) %>%
glimpse()
## fill entry_name ## I use Aragones_water here because it makes the two sets match (there was one fewer DOIs in the Aragones_gas data)
str(template$metadata)
entry_name <- Aragones_gas %>%
select("Study") %>%
distinct() %>%
mutate(entry_name = Study) %>%
select("entry_name") %>%
arrange(as.factor(entry_name))
# read in doi csv
Aragones_doi <- read.csv("/Users/macbook/Desktop/Dropbox Temp/ISRaD/ISCN Collaboration/Estop Aragones DOI List.csv", na.strings = c("","NA"),
stringsAsFactors = FALSE)
# format study names and join to dois
doi <- Aragones_doi %>%
mutate(entry_name = str_replace_all(Study, fixed(" "), "_")) %>%
arrange(as.factor(entry_name)) %>%
select("entry_name", doi = "DOI")
## be careful here, I think a couple of dois are lost/doubled up based on Alison's excel file
metadata <- full_join(entry_name, doi)
# fill metadata (55 unique studies in gas data)
metadata <- metadata %>%
mutate(compilation_doi = NA,
curator_name = "Gavin McNicol",
curator_organization = "Stanford University",
curator_email = "gmcnicol@stanford.edu",
modification_date_y = "2019",
modification_date_m = "08",
modification_date_d = "12",
contact_name = "Cristian Estop-Aragones",
contact_email = "estopara@ualberta.ca",
contact_orcid_id = NA,
bibliographical_reference = "Estop-Aragones 2018",
metadata_note = NA,
associated_datasets = NA,
template_version = 20190812
) %>%
arrange(entry_name)
# start by defining sites as unique lat longs
site <- as_tibble(cbind(
Aragones_gas %>%
select(entry_name = str_replace_all("Study", fixed(" "), "_"),
site_latitude = "Latitude_decimal_degrees",
site_longitude = "Longitude_decimal_degrees",
Data_source_comments, MainRiver_name, More_description) %>%
mutate(site_name = paste(site_latitude, site_longitude, sep = " ")) %>% # note double space sep
select(entry_name, site_name) %>%
# filter(site_name != "NA NA") %>%
distinct(entry_name, site_name) %>%
arrange(entry_name,site_name)
)
)
## get site_note variables to paste()
site_note_df <- as_tibble(Aragones_gas) %>%
mutate(Yedoma = as.character(Yedoma),
Thermokarst = as.character(Thermokarst)) %>%
mutate(Yedoma = replace(Yedoma, Yedoma %in% c("No","No?"), "Not Yedoma"),
Yedoma = replace(Yedoma, Yedoma %in% c("Yes","Probably"), "Yedoma"),
Yedoma = replace(Yedoma, Yedoma %in% c("?","Unknown"), "Yedoma Unknown")) %>%
mutate(site_note = paste(Full_class, Yedoma)) %>%
select(site_note,entry_name = str_replace_all("Study", fixed(" "), "_"),
site_latitude = "Latitude_decimal_degrees",
site_longitude = "Longitude_decimal_degrees",
Data_source_comments, More_description) %>%
mutate(site_name = paste(site_latitude, site_longitude, sep = " ")) %>%
select(site_name, site_note) %>%
# filter(site_name != "NA NA") %>%
group_by(site_name) %>%
summarize(site_note = site_note[1])
#100 unique lat long and site note combinations
# now fill in other site variables
site <- site %>%
group_by(site_name) %>%
mutate(site_latlong = strsplit(site_name[[1]], " ", fixed = TRUE),
site_datum = NA,
site_elevation = NA) %>%
mutate(site_lat = site_latlong[[1]][1],
site_long = site_latlong[[1]][2]) %>%
select(entry_name,
site_name, site_lat, site_long, site_datum,
site_elevation) %>%
arrange(entry_name,site_name)
# join site_note to site tab
site <- site %>% left_join(site_note_df)
## Fill profile tab
# get number of individual rows per site in Aragones database
num.aragones.rows <- as_tibble(Aragones_gas) %>%
mutate(entry_name = str_replace_all(Study, fixed(" "),("_")),
site_latitude = Latitude_decimal_degrees,
site_longitude = Longitude_decimal_degrees,
site_name = paste(site_latitude, site_longitude, sep = " "),
pro_name_chr = paste(Specific_location, Specific_ecosystem, Manipulation, sep = "_")) %>%
select(entry_name, site_name,pro_name_chr,
Gral_description, Detailed_ecosystem_classification, General_ecosystem,
Thermokarst, PF, Basin_Area_Drainage_Area_km2, MainRiver_name,
Manipulation_study, Manipulation, AL_cm,
Year, Month, Day,
Grouped_Data_source, Data_source,
Flux_type, Sampling_method, Sample_treatment,
Specific_discharge_m3_per_s_per_km2, Instantaneous_discharge_m3_per_s) %>%
group_by(entry_name, site_name) %>%
summarize(aragones.rows = n(),
gral = Gral_description[1],
detailed_class = Detailed_ecosystem_classification[1],
treatment = Manipulation_study[1],
treatment_note = Manipulation[1],
thaw_depth = AL_cm[1],
pro_name_chr = pro_name_chr[1],
pro_catchment_area = Basin_Area_Drainage_Area_km2[1],
pro_water_body = General_ecosystem[1],
pro_water_body_name = MainRiver_name[1],
pro_permafrost = as.character(PF[1]),
pro_thermokarst = as.character(Thermokarst[1])
)
# how many measurements of 13c and Fm in total?
sum(num.aragones.rows$aragones.rows) #1163
## replicate reference columns and columns for pro_note field 1163 times
aragones.rows.vector <- list()
sitenames.vector <- list()
entrynames.vector <- list()
gral.vector <- list()
detail.vector <- list()
t.vector <- list()
t_note.vector <- list()
thaw.vector <- list()
name_chr.v <- list()
pc_area <- list()
pw_body <- list()
pw_body_name <- list()
pp <- list()
pt <- list()
for (i in 1:length(num.aragones.rows$site_name)){
aragones.rows.vector[[i]] <- c(seq(1,num.aragones.rows$aragones.rows[i],1))
sitenames.vector[[i]] <- c(rep(num.aragones.rows$site_name[i],num.aragones.rows$aragones.rows[i]))
entrynames.vector[[i]] <- c(rep(as.character(num.aragones.rows$entry_name[i]),num.aragones.rows$aragones.rows[i]))
gral.vector[[i]] <- c(rep(num.aragones.rows$gral[i], num.aragones.rows$aragones.rows[i]))
detail.vector[[i]] <- c(rep(num.aragones.rows$detailed_class[i], num.aragones.rows$aragones.rows[i]))
t.vector[[i]] <- c(rep(num.aragones.rows$treatment[i], num.aragones.rows$aragones.rows[i]))
t_note.vector[[i]] <- c(rep(num.aragones.rows$treatment_note[i], num.aragones.rows$aragones.rows[i]))
thaw.vector[[i]] <- c(rep(num.aragones.rows$thaw_depth[i], num.aragones.rows$aragones.rows[i]))
name_chr.v[[i]] <- c(rep(num.aragones.rows$pro_name_chr[i], num.aragones.rows$aragones.rows[i]))
pc_area[[i]] <- c(rep(num.aragones.rows$pro_catchment_area[i], num.aragones.rows$aragones.rows[i]))
pw_body[[i]] <- c(rep(num.aragones.rows$pro_water_body[i], num.aragones.rows$aragones.rows[i]))
pw_body_name[[i]] <- c(rep(num.aragones.rows$pro_water_body_name[i], num.aragones.rows$aragones.rows[i]))
pp[[i]] <- c(rep(num.aragones.rows$pro_permafrost[i], num.aragones.rows$aragones.rows[i]))
pt[[i]] <- c(rep(num.aragones.rows$pro_thermokarst[i], num.aragones.rows$aragones.rows[i]))
}
# unlist all vectors
aragones.rows.vector <- unlist(aragones.rows.vector)
sitenames.vector <- unlist(sitenames.vector)
entrynames.vector <- unlist(entrynames.vector)
gral.vector <- unlist(gral.vector)
detail.vector <- unlist(detail.vector)
t.vector <- unlist(t.vector)
t_note.vector <- unlist(t_note.vector)
thaw.vector <- unlist(thaw.vector)
name_chr.v <- unlist(name_chr.v)
pc_area <- unlist(pc_area)
pw_body <- unlist(pw_body)
pw_body_name <- unlist(pw_body_name)
pp <- unlist(pp)
pt <- unlist(pt)
# create a tibble to do a left join
profiles <- as_tibble(cbind(sitenames.vector,aragones.rows.vector, entrynames.vector,gral.vector,
detail.vector, t.vector, t_note.vector, thaw.vector, name_chr.v,
pc_area, pw_body, pw_body_name, pp, pt))
profiles <- profiles %>% mutate(site_name = sitenames.vector,
aragones_rows = aragones.rows.vector,
entry_name = entrynames.vector,
plot_name = paste(gral.vector, detail.vector, sep = " "),
pro_treatment = t.vector,
pro_treatment_note = t_note.vector,
pro_thaw_note = thaw.vector,
pro_name_chr = name_chr.v,
pro_catchment_area = pc_area,
pro_water_body = pw_body,
pro_water_body_name = pw_body_name,
pro_permafrost = pp,
pro_thermokarst = pt) %>%
select(entry_name, site_name, pro_name_chr, aragones_rows, plot_name,
pro_treatment, pro_treatment_note, pro_thaw_note,
pro_name_chr, pro_catchment_area, pro_water_body, pro_water_body_name,
pro_permafrost, pro_thermokarst)
# temporary profile tab, still need to add in pro_note from flux (below)
profile <- profiles %>%
mutate(entry_name = entry_name,
site_name = site_name,
plot_name = plot_name,
pro_name = replace_na(pro_name_chr, 1),
aragones_rows = aragones_rows,
pro_lat = NA,
pro_long = NA,
pro_elevation = NA,
pro_treatment = recode_factor(factor(pro_treatment), `1` = "control", `2` = "treatment"),
pro_treatment_note = pro_treatment_note,
pro_thaw_note = pro_thaw_note,
pro_catchment_area = pro_catchment_area,
pro_water_body = pro_water_body,
pro_water_body_name = pro_water_body_name,
pro_permafrost = recode_factor(factor(pro_permafrost), `Not PF` = "", `PF` = "yes"),
pro_thermokarst = replace(pro_thermokarst,pro_thermokarst %in% "No", ""),
pro_thermokarst = replace(pro_thermokarst,pro_thermokarst %in% "Yes", "yes"),
pro_thermokarst = replace(pro_thermokarst, pro_thermokarst %in% c("Probably","Probably "), "yes")) %>%
mutate(pro_treatment = replace_na(pro_treatment, 'control')) %>%
select(entry_name,
site_name,
plot_name,
pro_name,
aragones_rows,
pro_lat,
pro_long,
pro_elevation,
pro_treatment,
pro_treatment_note,
pro_thaw_note,
pro_catchment_area, pro_water_body, pro_water_body_name,
pro_permafrost, pro_thermokarst) %>%
arrange(entry_name,site_name)
View(profile)
##################
## fill out a 'measurements' tab, from which we split fluxes, then layer, interstitial and incubation data
# take a look at the tab structures
# get the actual values again
measurements <- as_tibble(Aragones_gas) %>%
mutate(entry_name = str_replace_all(Study, fixed(" "),("_")),
site_latitude = Latitude_decimal_degrees,
site_longitude = Longitude_decimal_degrees,
site_name = paste(site_latitude, site_longitude, sep = " "),
pro_name = replace_na(Specific_location, 1)) %>%
select(entry_name, site_name, pro_name,
Year, Month, Day, Grouped_Data_source, Data_source,
Flux_type, Sampling_method, Sample_treatment,
Specific_discharge_m3_per_s_per_km2,
Data_source_comments, MainRiver_name, More_description,
Depth_cm, Aerob_anaerob_incub, Org_Min_Incub,Autotrophic_type,
Instantaneous_discharge_m3_per_s,
H_CH4, H_H2O, d18O_permil,
DOC_mgC_L, POC_mgC_L, TOC_mgC_L,
d13C_Soil, d13C_Atm_CO2, d13C_CO2, d13C_CH4, d13C_DOC, d13C_POC,
Fm_Soil, Fm_Atm_CO2, Fm_CO2, Fm_CH4, Fm_DOC, Fm_POC) %>%
arrange(entry_name,site_name, pro_name)
## bind first 3 fields of profile to the measurements
measurements <- as_tibble(cbind(
profile$pro_name,profile$aragones_rows,
measurements
))
# make a dummy tab called template$measurements
# select all fields plus a concatenated column for pro_note (needs to be moved to profile tab )
measurements <- measurements %>%
mutate(pro_note = paste(Data_source_comments, More_description, sep = " ")) %>%
gather(key = "measurement_name",
value = "measurement_value",
c("H_CH4", "H_H2O",
"DOC_mgC_L", "POC_mgC_L", "TOC_mgC_L",
"d13C_Soil","d13C_Atm_CO2","d13C_CO2",
"d13C_CH4","d13C_DOC","d13C_POC",
"Fm_Soil","Fm_Atm_CO2","Fm_CO2",
"Fm_CH4","Fm_DOC","Fm_POC")) %>%
mutate(measurement_analyte = measurement_name,
measurement_index = measurement_name) %>%
mutate(measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_Atm_CO2","Fm_Atm_CO2"), "Atm_CO2"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_CO2","Fm_CO2"), "CO2"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_CH4","Fm_CH4","H_CH4"), "CH4"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_DOC","Fm_DOC","DOC_mgC_L"), "DOC"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_Soil","Fm_Soil"), "Soil"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_POC","Fm_POC", "POC_mgC_L"), "POC"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("TOC_mgC_L"), "TOC"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("H_H2O"), "H2O"),
measurement_index = replace(measurement_index, measurement_index %in% c("H_CH4", "H_H2O"), "flx_2h"),
measurement_index = replace(measurement_index, measurement_index %in% c("DOC_mgC_L", "POC_mgC_L", "TOC_mgC_L"), "flx_analyte_conc"),
measurement_index = replace(measurement_index, measurement_index %in% c("d13C_Soil","d13C_Atm_CO2","d13C_CO2",
"d13C_CH4","d13C_DOC","d13C_POC"), "d13c"),
measurement_index = replace(measurement_index, measurement_index %in% c("Fm_Soil","Fm_Atm_CO2","Fm_CO2",
"Fm_CH4","Fm_DOC","Fm_POC"), "Fm"),
Aerob_anaerob_incub = recode_factor(Aerob_anaerob_incub, `Aerobic` = ""),
Aerob_anaerob_incub = recode_factor(Aerob_anaerob_incub, `Anaerobic` = "yes")) %>%
spread(key = measurement_index, value = measurement_value) %>%
select(entry_name,site_name, pro_name = "profile$pro_name",
aragones_rows = "profile$aragones_rows",
measurement_obs_date_y = Year,
measurement_obs_date_m = Month,
measurement_obs_date_d = Day,
measurement_pathway = Grouped_Data_source, # for splitting flux, incub, inter
measurement_pathway_note = Data_source,
# measurement_analyte = Flux_type,
measurement_ecosystem_component = Flux_type,
measurement_method_note = Sampling_method,
measurement_method_note2 =Sample_treatment,
measurement_rate = Specific_discharge_m3_per_s_per_km2,
measurement_depth = Depth_cm,
measurement_incubation_headspace = Aerob_anaerob_incub,
measurement_incubation_soil = Org_Min_Incub,
measurement_incubation_auto_type = Autotrophic_type,
measurement_analyte,
Instantaneous_discharge_m3_per_s,
flx_2h, d18O_permil,
flx_analyte_conc,
d13c,Fm, pro_note)
View(measurements)
# select only rows with data (either d13c or Fm or flx_2h)
measurements <- measurements %>%
filter(!is.na(flx_2h) | !is.na(d13c) | !is.na(Fm))
# arrange
measurements <- measurements %>% arrange(entry_name, site_name, pro_name)
# gather, remove NAs and spread again to match 13c and Fm values for each original aragones_row entry
measurements <- measurements %>% gather(key = "13c or Fm or 2h", value = "value",
c("flx_2h","d13c","Fm")) %>% arrange(entry_name, site_name, pro_name) %>% filter(!is.na(value)) %>%
spread(key = "13c or Fm or 2h", value = "value")
# summarize pro_note by profile
pro_note_summary <- measurements %>%
group_by(pro_name) %>%
summarize(pro_note = pro_note[1])
# finalize profile by adding in pro_note from measurements tab, finalize pro_thaw (by alterning pro_thaw_note)
profile <- as_tibble(left_join(profile, pro_note_summary, by = "pro_name")) %>%
mutate(pro_thaw = as.factor(pro_thaw_note)) %>%
mutate(pro_thaw_depth = recode_factor(pro_thaw,
`40 to 60 in the site, not in the aquatic system` = "50",
`46 to 55` = '50',
`60 to 120` = '90')) %>%
mutate(pro_thaw_depth = as.numeric(as.character(pro_thaw))) %>%
select(entry_name,
site_name,
plot_name,
pro_name,
pro_note,
pro_lat,
pro_long,
pro_elevation,
pro_treatment,
pro_treatment_note,
pro_thaw_depth,
pro_catchment_area,
pro_permafrost,
pro_thermokarst,
pro_water_body,
pro_water_body_name) %>%
distinct() %>%
arrange(entry_name,site_name,pro_name)
# remove pro_note from the measurements tab
measurements <- measurements %>%
select(entry_name,
site_name,
pro_name,
measurement_obs_date_y,
measurement_obs_date_m,
measurement_obs_date_d,
measurement_pathway, # for splitting flux, incub, inter
measurement_pathway_note,
# measurement_analyte,
measurement_ecosystem_component,
measurement_method_note,
measurement_method_note2,
measurement_rate,
measurement_depth,
measurement_incubation_headspace,
measurement_incubation_soil,
measurement_incubation_auto_type,
flx_discharge_rate = Instantaneous_discharge_m3_per_s,
measurement_analyte,
flx_2h, d18O_permil,
flx_analyte_conc,
d13c, Fm,
-pro_note)
# split the data into flux, interstitial and incubation
# NOTE: i include all bubble data in flux even tho not all were emitted naturally (some by stirring sediment)
# because there is no specific layer that these observations can be attributed to
flux <- measurements %>%
filter(measurement_pathway %in% c("Flux","Bubbles","Water"))
# assign final names and correct controlled vocab
#flx_method
flux$measurement_method_note <- as.factor(flux$measurement_method_note)
levels(flux$measurement_method_note) <- c(rep("chamber",7),"grab sample",rep("chamber",6),"grab sample",rep("chamber",11))
# create index vector to make the flx_name unique
flx_x <- flux %>%
group_by(entry_name, site_name) %>%
summarize(aragones.rows = n())
x <- list()
for (i in 1:length(flx_x$aragones.rows)){
x[[i]] <- c(1:flx_x$aragones.rows[i])
}
x <- unlist(x)
#finalize
flux <- flux %>%
mutate(flx_pathway = as.character(measurement_pathway),
measurement_ecosystem_component = as.character(measurement_ecosystem_component),
index = x) %>%
mutate(flx_pathway = replace(flx_pathway, measurement_pathway == "Bubbles", "bubble ebullition"),
flx_pathway = replace(flx_pathway, measurement_pathway == "Flux", "soil emission"),
flx_pathway = replace(flx_pathway, measurement_pathway == "Water", "dissolved"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component %in%
c("CH4","Heterotrophic_respiration","Soil"), "heterotrophic"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Autotrophic_respiration", "autotrophic"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Ecosystem_respiration", "ecosystem"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Water_export", "aquatic"),
flx_name = paste(pro_name, measurement_analyte,index, sep = " ")) %>%
select(entry_name,
site_name,
pro_name,
flx_name,
flx_obs_date_y = measurement_obs_date_y,
flx_obs_date_m = measurement_obs_date_m,
flx_obs_date_d = measurement_obs_date_d,
flx_pathway = flx_pathway,
flx_pathway_note = measurement_pathway_note,
flx_ecosystem_component = measurement_ecosystem_component,
flx_method = measurement_method_note,
flx_method_note = measurement_method_note2,
flx_rate = measurement_rate,
flx_analyte = measurement_analyte,
flx_analyte_conc,
flx_discharge_rate,
flx_18o = d18O_permil,
flx_2h,
flx_13c = d13c,
flx_fraction_modern = Fm) %>%
mutate(flx_obs_date_y = as.character(flx_obs_date_y),
flx_obs_date_m = as.character(flx_obs_date_m),
flx_obs_date_d = as.character(flx_obs_date_d))
# replace Atm_CO2 with CO2 and associated ecosystem component with atmosphere
flux <- flux %>%
mutate(flx_analyte = as.factor(flx_analyte)) %>%
mutate(flx_analyte = recode_factor(flx_analyte, Atm_CO2 = "CO2"),
flx_ecosystem_component = recode_factor(flx_ecosystem_component, Ecosystem_Respiration = "atmosphere")) %>%
arrange(entry_name, site_name, pro_name)
View(flux)
## correct two Arargones data issues (row 107 and 123 are "Soil" but should be "Soil porespace", based on c13 which looks like methane)
measurements$measurement_pathway[c(107,123)] <- "Soil porespace"
# filter measurements tab for Soil (layer measurements) and corresponding Soil porespace (interstitial) and Incub (incubation) data
# replicate the profiles pasted with the depth for dummy layer names then specify top and bottom depth with depth +/- 5 cm
layer <- measurements %>%
filter(measurement_pathway %in% c("Soil","Soil porespace","Incub"))
## all depths reported from surface so set lyr_all_org_neg = 'yes'
layer <- layer %>%
mutate(lyr_name = paste(site_name, pro_name, measurement_depth, sep = "_"),
lyr_top = measurement_depth - 5, lyr_bot = measurement_depth + 5,
lyr_all_org_neg = 'yes') %>%
select(entry_name,
site_name,
pro_name,
lyr_name,
lyr_obs_date_y = measurement_obs_date_y,
lyr_obs_date_m = measurement_obs_date_m,
lyr_obs_date_d = measurement_obs_date_d,
lyr_all_org_neg,
lyr_top,
lyr_bot,
measurement_pathway,
lyr_13c = d13c,
lyr_fraction_modern = Fm) %>%
mutate(lyr_obs_date_y = as.character(lyr_obs_date_y),
lyr_obs_date_m = as.character(lyr_obs_date_m),
lyr_obs_date_d = as.character(lyr_obs_date_d))
# subset non-Soil (layer) data and assign the 13c and Fm values to NA
layer.red1 <- layer %>%
filter(measurement_pathway != "Soil") %>%
mutate(lyr_13c = NA,
lyr_fraction_modern = NA) %>%
group_by(entry_name, site_name, pro_name, lyr_name) %>%
summarize(lyr_obs_date_y = lyr_obs_date_y[1],
lyr_obs_date_m = lyr_obs_date_m[1],
lyr_obs_date_d = lyr_obs_date_d[1],
lyr_all_org_neg = lyr_all_org_neg[1],
lyr_top = lyr_top[1],
lyr_bot = lyr_bot[1],
measurement_pathway = measurement_pathway[1],
lyr_13c = lyr_13c[1],
lyr_fraction_modern = lyr_fraction_modern[1]) %>%
filter(!is.na(lyr_top) & !is.na(lyr_bot)) %>%
select(-measurement_pathway) %>%
distinct() %>%
arrange(entry_name, site_name)
# subset Soil (actual layer) data with observations of 13c and fm
layer.soil <- layer %>%
filter(measurement_pathway == "Soil") %>%
select(-measurement_pathway) %>%
filter(!is.na(lyr_13c) & !is.na(lyr_fraction_modern)) %>%
distinct() %>%
arrange(entry_name, site_name)
# join together, retaining only
layer.red.final <- bind_rows(layer.red1, layer.soil) %>% distinct() %>% arrange(entry_name, site_name)
#
# # get layer names
# site.names <- layer.red2 %>%
# select(site_name) %>%
# distinct() %>% pull()
# # create a list of vectors for number of fluxes per site
# x <- list()
# for (i in 1:length(site.names)){
# x[[i]] <- layer.red2 %>%
# filter(site_name == site.names[i]) %>%
# mutate(index = 1:n()) %>%
# select(index)
# }
# x <- bind_rows(x)
#
# # finalize layer names
# layer.red.final <- layer.red2 %>%
# mutate(index = x$index) %>%
# mutate(lyr_name = paste(site_name, lyr_name, index, sep = "_")) %>%
# arrange(entry_name,site_name) %>%
# select(-index)
# extract the interstitial data
interstitial <- measurements %>%
filter(measurement_pathway %in% c("Soil porespace"))
# get interstitial names
site.names <- interstitial %>%
select(site_name) %>%
distinct() %>% pull()
# create a list of vectors for number of fluxes per site
x <- list()
for (i in 1:length(site.names)){
x[[i]] <- interstitial %>%
filter(site_name == site.names[i]) %>%
mutate(index = 1:n()) %>%
select(index)
}
x <- bind_rows(x)
## assign final field names and correct controlled vocab
interstitial <- interstitial %>%
mutate(index = x$index) %>%
mutate(ist_depth = measurement_depth,
ist_analyte = measurement_analyte,
ist_notes = paste("3 notes sep by &: atmosphere", measurement_method_note, measurement_method_note2, sep = " & "),
ist_13c = d13c,
ist_fraction_modern = Fm) %>%
mutate(ist_name = paste(ist_depth, ist_analyte, index, sep = "_")) %>%
arrange(entry_name,site_name) %>%
select(entry_name,
site_name,
pro_name,
ist_name,
ist_obs_date_y = measurement_obs_date_y,
ist_obs_date_m = measurement_obs_date_m,
ist_obs_date_d = measurement_obs_date_d,
ist_depth,
ist_analyte,
ist_notes,
ist_2h = flx_2h,
ist_18o = d18O_permil,
ist_13c,
ist_fraction_modern) %>%
mutate(ist_obs_date_y = as.character(ist_obs_date_y),
ist_obs_date_m = as.character(ist_obs_date_m),
ist_obs_date_d = as.character(ist_obs_date_d))
# get first 4 columns of layer and left_join interstitial based upon lyr_name
interstitial <- profile[,c(1,2,4)] %>% inner_join(interstitial) %>% distinct()
# replace Atm_CO2 with CO2 and associated ecosystem component with atmosphere
interstitial <- interstitial %>%
mutate(ist_analyte = as.factor(ist_analyte)) %>%
mutate(ist_analyte = recode_factor(ist_analyte, Atm_CO2 = "CO2", `Soil` = "POC"))
# extract the incubation data
incubation <- measurements %>%
filter(measurement_pathway %in% c("Incub"))
# get incubation names
site.names <- incubation %>%
select(site_name) %>%
distinct() %>% pull()
# create a list of vectors for number of fluxes per site
x <- list()
for (i in 1:length(site.names)){
x[[i]] <- incubation %>%
filter(site_name == site.names[i]) %>%
mutate(index = 1:n()) %>%
select(index)
}
x <- bind_rows(x)
# assign final field names and correct controlled vocab
incubation <- incubation %>%
mutate(measurement_ecosystem_component = as.character(measurement_ecosystem_component),
measurement_incubation_soil = as.character(measurement_incubation_soil),
index = x$index) %>%
mutate(lyr_name = paste(site_name, pro_name, measurement_depth, sep = "_"),
inc_name = paste(lyr_name, measurement_ecosystem_component,index, sep = "_"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Heterotrophic_respiration", "heterotrophic"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Autotrophic_respiration", "autotrophic"),
inc_headspace = measurement_incubation_headspace,
measurement_incubation_soil = replace(measurement_incubation_soil, measurement_incubation_soil %in% c("Autotrophic","Roots"),
"live roots"),
measurement_incubation_soil = replace(measurement_incubation_soil, measurement_incubation_soil == "Mineral",
"root-picked soil"),
inc_note = paste(measurement_ecosystem_component, inc_headspace, sep = " "),
inc_depth = measurement_depth) %>%
arrange(entry_name,site_name,inc_depth) %>%
select(entry_name,
site_name,
pro_name,
lyr_name,
inc_name,
inc_type = measurement_incubation_soil,
inc_note,
inc_anaerobic = inc_headspace,
inc_obs_date_y = measurement_obs_date_y,
inc_obs_date_m = measurement_obs_date_m,
inc_obs_date_d = measurement_obs_date_d,
inc_analyte = measurement_analyte,
inc_13c = d13c,
inc_fraction_modern = Fm) %>%
mutate(inc_obs_date_y = as.character(inc_obs_date_y),
inc_obs_date_m = as.character(inc_obs_date_m),
inc_obs_date_d = as.character(inc_obs_date_d),
inc_analyte = replace(inc_analyte, inc_analyte == "CO2", ""))
# replace Atm_CO2 with CO2 and associated ecosystem component with atmosphere
incubation <- incubation %>%
mutate(inc_type = recode_factor(factor(inc_type), `Organic` = "root-picked soil", `Organic ` = "root-picked soil"))
# get first 4 columns of layer and left_join incubation based upon lyr_name
incubation <- layer.red.final[,1:4] %>% inner_join(incubation) %>% distinct()
# set all columns as character
metadata <- metadata %>%
mutate_all(as.character)
site <- site %>%
mutate_all(as.character)
profile <- profile %>%
mutate_all(as.character)
flux <- flux %>%
mutate_all(as.character)
layer <- layer.red.final %>%
mutate_all(as.character)
interstitial <- interstitial %>%
mutate_all(as.character)
incubation <- incubation %>%
mutate_all(as.character)
# for each entry_name, pull in the corresponding rows for each tab into a list, where elemnts are different entry_names
names <- metadata %>% select(entry_name) %>% pull()
# metadata
metadata.entries <- list()
for (i in 1:length(names)){
metadata %>%
filter(entry_name == names[i]) -> metadata.entries[[i]]
}
# site
site.entries <- list()
for (i in 1:length(names)){
site %>%
filter(entry_name == names[i]) -> site.entries[[i]]
}
# profile
profile.entries <- list()
for (i in 1:length(names)){
profile %>%
filter(entry_name == names[i]) -> profile.entries[[i]]
}
# flux
flux.entries <- list()
for (i in 1:length(names)){
flux %>%
filter(entry_name == names[i]) -> flux.entries[[i]]
}
# layer
layer.entries <- list()
for (i in 1:length(names)){
layer %>%
filter(entry_name == names[i]) -> layer.entries[[i]]
}
# interstitial
interstitial.entries <- list()
for (i in 1:length(names)){
interstitial %>%
filter(entry_name == names[i]) -> interstitial.entries[[i]]
}
# incubation
incubation.entries <- list()
for (i in 1:length(names)){
incubation %>%
filter(entry_name == names[i]) -> incubation.entries[[i]]
}
# template$metadata
#
# toutput.metadata <- list()
# toutput.site <- list()
# toutput.profile <- list()
# toutput.flux <- list()
# toutput.layer <- list()
# toutput.interstitial <- list()
# toutput.incubation <- list()
# toutput.fraction <- list()
# toutput.cvocab <- list()
#
# for (i in 1:length(names)) {
# # merge with template
# toutput.metadata[[i]] <- bind_rows(template$metadata, metadata.entries[[i]])
# toutput.metadata[[i]] <- toutput.metadata[[i]][-3,] # not sure why an extra row of NaN is added
# toutput.site[[i]] <- bind_rows(template$site, site.entries[[i]])
# toutput.profile[[i]] <- bind_rows(template$profile, profile.entries[[i]])
# toutput.flux[[i]] <- bind_rows(template$flux, flux.entries[[i]])
# toutput.layer[[i]] <- bind_rows(template$layer, layer.entries[[i]])
# toutput.interstitial[[i]] <- bind_rows(template$interstitial, interstitial.entries[[i]])
# toutput.fraction[[i]] <- template$fraction
# toutput.incubation[[i]] <- bind_rows(template$incubation, incubation.entries[[i]])
# toutput.cvocab[[i]] <- template$`controlled vocabulary`
# }
#
#
# toutput.byentry <- list()
# for (i in 1:length(names)){
# toutput.byentry[[i]] <- list(toutput.metadata[[i]], toutput.site[[i]], toutput.profile[[i]], toutput.flux[[i]],
# toutput.layer[[i]], toutput.interstitial[[i]], toutput.fraction[[i]] , toutput.incubation[[i]],
# toutput.cvocab[[i]])
# }
#
# # save gas template
# for (i in 1:length(names)){
# names(toutput.byentry[[i]]) <- c("metadata","site","profile","flux","layer","interstitial","fraction","incubation",'controlled vocabulary')
# write.xlsx(toutput.byentry[[i]], paste("/Users/macbook/Desktop/Dropbox Temp/ISRaD/ISCN Collaboration/Aragones Ingest Files/By Entry/",names[i],".xlsx", sep = ""),
# keepNA = FALSE)
# }
#
template$profile
############################################ WATER
# read in template file from ISRaD Package
template_file <- system.file("extdata", "ISRaD_Master_Template.xlsx", package = "ISRaD")
template <- lapply(getSheetNames(template_file), function(s) read.xlsx(template_file, sheet=s))
names(template) <- getSheetNames(template_file)
template <- lapply(template, function(x) x %>% mutate_all(as.character))
# take a look at template structure
glimpse(template)
# load dataset
Aragones_dataset <- read.csv("/Users/macbook/Desktop/Dropbox Temp/ISRaD/ISCN Collaboration/14C_Dataset_Final_Cristian.csv", na.strings = c("","NA"),
stringsAsFactors = FALSE)
Aragones_tidy <- Aragones_dataset %>%
mutate(Dataset = as.factor(Dataset),
Study = as.factor(str_replace_all(Study, fixed(" "), "_")),
Yedoma = as.factor(Yedoma),
LAR = as.factor(LAR),
PF = as.factor(PF),
Thermokarst = as.factor(Thermokarst),
Yukon_Kolyma_origin = as.factor(Yukon_Kolyma_origin),
Flux_type = as.factor(str_replace_all(Flux_type, fixed(" "), "_")),
Depth_cm = as.numeric(Depth_cm),
Aerob_anaerob_incub = as.factor(Aerob_anaerob_incub),
Org_Min_Incub = as.factor(Org_Min_Incub),
Autotrophic_type = as.factor(str_replace_all(Autotrophic_type, fixed(" "), "_")),
Manipulation_study = as.factor(Manipulation_study),
Sampling_date = if(length(str_replace_all(Sampling_date,fixed("/"),"")) == 5) {
mdy(paste("0",str_replace_all(Sampling_date,fixed("/"),"")))} else {
mdy(str_replace_all(Sampling_date,fixed("/"),""))
}) %>%
select(1:67)
# many conventions different between water and gas data. split to treat differently
Aragones_water <- Aragones_tidy %>%
filter(Dataset == "Water")
# work with gas data first
str(Aragones_water)
Aragones_water %>% arrange(ID_merged) %>%
select(Study,
Full_class,
General_ecosystem,
PF,
Yedoma,
Grouped_Data_source,
Specific_ecosystem,
Flux_type,
Depth_cm,
Aerob_anaerob_incub,
Org_Min_Incub,
Autotrophic_type,
Manipulation_study,
Sampling_year_fraction,
Sampling_date,
DOY,
Gral_description,
WT_cm,
Latitude_decimal_degrees,
Longitude_decimal_degrees,
d18O_permil,
d13C_Soil,
d13C_Atm_CO2,
d13C_CO2,
d13C_CH4,
H_CH4,
H_H2O,
d13C_DOC,
d13C_POC,
DOC_mgC_L,
POC_mgC_L,
TOC_mgC_L,
Fm_Soil,
Fm_Atm_CO2,
Fm_CO2,
Fm_CH4,
Fm_DOC,
Fm_POC,
Detailed_ecosystem_classification,
Basin_Area_Drainage_Area_km2,
MainRiver_name,
Instantaneous_discharge_m3_per_s) %>%
glimpse()
## fill entry_name
str(template$metadata)
entry_name <- Aragones_water %>%
select("Study") %>%
distinct() %>%
mutate(entry_name = Study) %>%
select("entry_name") %>%
arrange(as.factor(entry_name))
# read in doi csv
Aragones_doi <- read.csv("/Users/macbook/Desktop/Dropbox Temp/ISRaD/ISCN Collaboration/Estop Aragones DOI List.csv", na.strings = c("","NA"),
stringsAsFactors = FALSE)
# format study names and join to dois
doi <- Aragones_doi %>%
mutate(entry_name = str_replace_all(Study, fixed(" "), "_")) %>%
arrange(as.factor(entry_name)) %>%
select("entry_name","DOI")
## be careful here, I think a couple of dois are lost/doubled up based on Alison's excel file
metadata <- full_join(entry_name, doi)
# fill metadata (56 unique studies in water data)
metadata <- metadata %>%
mutate(compilation_doi = NA,
curator_name = "Gavin McNicol",
curator_organization = "Stanford University",
curator_email = "gmcnicol@stanford.edu",
modification_date_y = "2019",
modification_date_m = "08",
modification_date_d = "12",
contact_name = "Cristian Estop-Aragones",
contact_email = "estopara@ualberta.ca",
contact_orcid_id = NA,
bibliographical_reference = "Estop-Aragones 2018",
metadata_note = NA,
associated_datasets = NA,
template_version = 20190812
) %>%
arrange(entry_name)
# start by defining sites as unique lat longs
site <- as_tibble(cbind(
Aragones_water %>%
select(entry_name = str_replace_all("Study", fixed(" "), "_"),
site_latitude = "Latitude_decimal_degrees",
site_longitude = "Longitude_decimal_degrees",
Data_source_comments, MainRiver_name, More_description) %>%
mutate(site_name = paste(site_latitude, site_longitude, sep = " ")) %>% # note double space sep
select(entry_name, site_name) %>%
# filter(site_name != "NA NA") %>%
distinct(entry_name, site_name) %>%
arrange(entry_name, site_name)
)
)
## get site_note variables to paste()
site_note_df <- as_tibble(Aragones_water) %>%
mutate(Yedoma = as.character(Yedoma),
Thermokarst = as.character(Thermokarst)) %>%
mutate(Yedoma = replace(Yedoma, Yedoma %in% c("No","No?"), "Not Yedoma"),
Yedoma = replace(Yedoma, Yedoma %in% c("Yes","Probably"), "Yedoma"),
Yedoma = replace(Yedoma, Yedoma %in% c("?","Unknown"), "Yedoma Unknown")) %>%
mutate(site_note = paste(Full_class, Yedoma)) %>%
select(site_note,entry_name = str_replace_all("Study", fixed(" "), "_"),
site_latitude = "Latitude_decimal_degrees",
site_longitude = "Longitude_decimal_degrees",
Data_source_comments, More_description) %>%
mutate(site_name = paste(site_latitude, site_longitude, sep = " ")) %>%
select(site_name, site_note) %>%
# filter(site_name != "NA NA") %>%
group_by(site_name) %>%
summarize(site_note = site_note[1])
#136 unique lat long and site note combinations
# now fill in other site variables
site <- site %>%
group_by(site_name) %>%
mutate(site_latlong = strsplit(site_name[[1]], " ", fixed = TRUE),
site_datum = NA,
site_elevation = NA) %>%
mutate(site_lat = site_latlong[[1]][1],
site_long = site_latlong[[1]][2]) %>%
select(entry_name,
site_name, site_lat, site_long, site_datum,
site_elevation) %>%
arrange(entry_name, site_name)
site <- site %>% left_join(site_note_df)
## Fill profile tab
# get number of individual rows per site in Aragones database
num.aragones.rows <- as_tibble(Aragones_water) %>%
mutate(entry_name = str_replace_all(Study, fixed(" "),("_")),
site_latitude = Latitude_decimal_degrees,
site_longitude = Longitude_decimal_degrees,
site_name = paste(site_latitude, site_longitude, sep = " "),
pro_name_chr = paste(Specific_location, Specific_ecosystem, Manipulation, sep = "_")) %>%
select(entry_name, site_name,pro_name_chr,
Gral_description, Detailed_ecosystem_classification, General_ecosystem,
Thermokarst, PF, Basin_Area_Drainage_Area_km2, MainRiver_name,
Manipulation_study, Manipulation, AL_cm,
Year, Month, Day,
Grouped_Data_source, Data_source,
Flux_type, Sampling_method, Sample_treatment,
Specific_discharge_m3_per_s_per_km2, Instantaneous_discharge_m3_per_s) %>%
group_by(entry_name, site_name) %>%
summarize(aragones.rows = n(),
gral = Gral_description[1],
detailed_class = Detailed_ecosystem_classification[1],
treatment = Manipulation_study[1],
treatment_note = Manipulation[1],
thaw_depth = AL_cm[1],
pro_name_chr = pro_name_chr[1],
pro_catchment_area = Basin_Area_Drainage_Area_km2[1],
pro_water_body = General_ecosystem[1],
pro_water_body_name = MainRiver_name[1],
pro_permafrost = PF[1],
pro_thermokarst = Thermokarst[1]
)
## replicate reference columns and columns for pro_note field 1163 times
aragones.rows.vector <- list()
sitenames.vector <- list()
entrynames.vector <- list()
gral.vector <- list()
detail.vector <- list()
t.vector <- list()
t_note.vector <- list()
thaw.vector <- list()
name_chr.v <- list()
pc_area <- list()
pw_body <- list()
pw_body_name <- list()
pp <- list()
pt <- list()
for (i in 1:length(num.aragones.rows$site_name)){
aragones.rows.vector[[i]] <- c(seq(1,num.aragones.rows$aragones.rows[i],1))
sitenames.vector[[i]] <- c(rep(num.aragones.rows$site_name[i],num.aragones.rows$aragones.rows[i]))
entrynames.vector[[i]] <- c(rep(as.character(num.aragones.rows$entry_name[i]),num.aragones.rows$aragones.rows[i]))
gral.vector[[i]] <- c(rep(num.aragones.rows$gral[i], num.aragones.rows$aragones.rows[i]))
detail.vector[[i]] <- c(rep(num.aragones.rows$detailed_class[i], num.aragones.rows$aragones.rows[i]))
t.vector[[i]] <- c(rep(num.aragones.rows$treatment[i], num.aragones.rows$aragones.rows[i]))
t_note.vector[[i]] <- c(rep(num.aragones.rows$treatment_note[i], num.aragones.rows$aragones.rows[i]))
thaw.vector[[i]] <- c(rep(num.aragones.rows$thaw_depth[i], num.aragones.rows$aragones.rows[i]))
name_chr.v[[i]] <- c(rep(num.aragones.rows$pro_name_chr[i], num.aragones.rows$aragones.rows[i]))
pc_area[[i]] <- c(rep(num.aragones.rows$pro_catchment_area[i], num.aragones.rows$aragones.rows[i]))
pw_body[[i]] <- c(rep(num.aragones.rows$pro_water_body[i], num.aragones.rows$aragones.rows[i]))
pw_body_name[[i]] <- c(rep(num.aragones.rows$pro_water_body_name[i], num.aragones.rows$aragones.rows[i]))
pp[[i]] <- c(rep(num.aragones.rows$pro_permafrost[i], num.aragones.rows$aragones.rows[i]))
pt[[i]] <- c(rep(num.aragones.rows$pro_thermokarst[i], num.aragones.rows$aragones.rows[i]))
}
# unlist all vectors
aragones.rows.vector <- unlist(aragones.rows.vector)
sitenames.vector <- unlist(sitenames.vector)
entrynames.vector <- unlist(entrynames.vector)
gral.vector <- unlist(gral.vector)
detail.vector <- unlist(detail.vector)
t.vector <- unlist(t.vector)
t_note.vector <- unlist(t_note.vector)
thaw.vector <- unlist(thaw.vector)
name_chr.v <- unlist(name_chr.v)
pc_area <- unlist(pc_area)
pw_body <- unlist(pw_body)
pw_body_name <- unlist(pw_body_name)
pp <- unlist(pp)
pt <- unlist(pt)
# create a tibble to do a left join
profiles <- as_tibble(cbind(sitenames.vector,aragones.rows.vector, entrynames.vector,gral.vector,
detail.vector, t.vector, t_note.vector, thaw.vector, name_chr.v,
pc_area, pw_body, pw_body_name, pp, pt))
profiles <- profiles %>% mutate(site_name = sitenames.vector,
aragones_rows = aragones.rows.vector,
entry_name = entrynames.vector,
plot_name = paste(gral.vector, detail.vector, sep = " "),
pro_treatment = t.vector,
pro_treatment_note = t_note.vector,
pro_thaw_note = thaw.vector,
pro_name_chr = name_chr.v,
pro_catchment_area = pc_area,
pro_water_body = pw_body,
pro_water_body_name = pw_body_name,
pro_permafrost = pp,
pro_thermokarst = pt) %>%
select(entry_name, site_name, pro_name_chr, aragones_rows, plot_name,
pro_treatment, pro_treatment_note, pro_thaw_note,
pro_name_chr, pro_catchment_area, pro_water_body, pro_water_body_name,
pro_permafrost, pro_thermokarst)
# temporary profile tab, still need to add in pro_note from flux (below)
profile <- profiles %>%
mutate(entry_name = entry_name,
site_name = site_name,
plot_name = plot_name,
pro_name = replace_na(pro_name_chr, 1),
aragones_rows = aragones_rows,
pro_lat = NA,
pro_long = NA,
pro_elevation = NA,
pro_treatment = recode_factor(factor(pro_treatment), `1` = "control", `2` = "treatment"),
pro_treatment_note = pro_treatment_note,
pro_thaw_note = pro_thaw_note,
pro_catchment_area = pro_catchment_area,
pro_water_body = pro_water_body,
pro_water_body_name = pro_water_body_name,
pro_permafrost = recode_factor(factor(pro_permafrost), `Not PF` = "", `PF` = "yes"),
pro_thermokarst = replace(pro_thermokarst,pro_thermokarst %in% "No", "Not Thermokarst"),
Thermokarst = replace(pro_thermokarst,pro_thermokarst %in% "Yes", "yes"),
Thermokarst = replace(pro_thermokarst, pro_thermokarst %in% c("Probably","Probably "), "yes")) %>%
mutate(pro_treatment = replace_na(pro_treatment, 'control')) %>%
select(entry_name,
site_name,
plot_name,
pro_name,
aragones_rows,
pro_lat,
pro_long,
pro_elevation,
pro_treatment,
pro_treatment_note,
pro_thaw_note,
pro_catchment_area, pro_water_body, pro_water_body_name,
pro_permafrost, pro_thermokarst) %>%
arrange(entry_name,site_name)
##################
## fill out a 'measurements' tab, from which we split fluxes, then layer, interstitial and incubation data
# take a look at the tab structures
# get the actual values again
measurements <- as_tibble(Aragones_water) %>%
mutate(entry_name = str_replace_all(Study, fixed(" "),("_")),
site_latitude = Latitude_decimal_degrees,
site_longitude = Longitude_decimal_degrees,
site_name = paste(site_latitude, site_longitude, sep = " "),
pro_name = replace_na(Specific_location, 1)) %>%
select(entry_name, site_name, pro_name,
Year, Month, Day, Grouped_Data_source, Data_source,
Flux_type, Sampling_method, Sample_treatment,
Specific_discharge_m3_per_s_per_km2,
Data_source_comments, MainRiver_name, More_description,
Depth_cm, Aerob_anaerob_incub, Org_Min_Incub,Autotrophic_type,
Instantaneous_discharge_m3_per_s,
H_CH4, H_H2O, d18O_permil,
DOC_mgC_L, POC_mgC_L, TOC_mgC_L,
d13C_Soil, d13C_Atm_CO2, d13C_CO2, d13C_CH4, d13C_DOC, d13C_POC,
Fm_Soil, Fm_Atm_CO2, Fm_CO2, Fm_CH4, Fm_DOC, Fm_POC) %>%
arrange(entry_name,site_name, pro_name)
## bind first 3 fields of profile to the measurements
measurements <- as_tibble(cbind(
profile$pro_name, profile$aragones_rows,
measurements
))
# make a dummy tab called template$measurements
# select all fields plus a concatenated column for pro_note (needs to be moved to profile tab )
measurements <- measurements %>%
mutate(pro_note = paste(Data_source_comments, More_description, sep = " ")) %>%
gather(key = "measurement_name",
value = "measurement_value",
c("H_CH4", "H_H2O",
"DOC_mgC_L", "POC_mgC_L", "TOC_mgC_L",
"d13C_Soil","d13C_Atm_CO2","d13C_CO2",
"d13C_CH4","d13C_DOC","d13C_POC",
"Fm_Soil","Fm_Atm_CO2","Fm_CO2",
"Fm_CH4","Fm_DOC","Fm_POC")) %>%
mutate(measurement_analyte = measurement_name,
measurement_index = measurement_name) %>%
mutate(measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_Atm_CO2","Fm_Atm_CO2"), "Atm_CO2"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_CO2","Fm_CO2"), "CO2"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_CH4","Fm_CH4","H_CH4"), "CH4"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_DOC","Fm_DOC","DOC_mgC_L"), "DOC"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_Soil","Fm_Soil"), "Soil"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("d13C_POC","Fm_POC", "POC_mgC_L"), "POC"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("TOC_mgC_L"), "TOC"),
measurement_analyte = replace(measurement_analyte, measurement_analyte %in% c("H_H2O"), "H2O"),
measurement_index = replace(measurement_index, measurement_index %in% c("H_CH4", "H_H2O"), "flx_2h"),
measurement_index = replace(measurement_index, measurement_index %in% c("DOC_mgC_L", "POC_mgC_L", "TOC_mgC_L"), "flx_analyte_conc"),
measurement_index = replace(measurement_index, measurement_index %in% c("d13C_Soil","d13C_Atm_CO2","d13C_CO2",
"d13C_CH4","d13C_DOC","d13C_POC"), "d13c"),
measurement_index = replace(measurement_index, measurement_index %in% c("Fm_Soil","Fm_Atm_CO2","Fm_CO2",
"Fm_CH4","Fm_DOC","Fm_POC"), "Fm"),
Aerob_anaerob_incub = recode_factor(Aerob_anaerob_incub, `Aerobic` = ""),
Aerob_anaerob_incub = recode_factor(Aerob_anaerob_incub, `Anaerobic` = "yes")) %>%
spread(key = measurement_index, value = measurement_value) %>%
select(entry_name,site_name, pro_name = "profile$pro_name",
aragones_rows = "profile$aragones_rows",
measurement_obs_date_y = Year,
measurement_obs_date_m = Month,
measurement_obs_date_d = Day,
measurement_pathway = Grouped_Data_source, # for splitting flux, incub, inter
measurement_pathway_note = Data_source,
# measurement_analyte = Flux_type,
measurement_ecosystem_component = Flux_type,
measurement_method_note = Sampling_method,
measurement_method_note2 =Sample_treatment,
measurement_rate = Specific_discharge_m3_per_s_per_km2,
measurement_depth = Depth_cm,
measurement_incubation_headspace = Aerob_anaerob_incub,
measurement_incubation_soil = Org_Min_Incub,
measurement_incubation_auto_type = Autotrophic_type,
measurement_analyte,
Instantaneous_discharge_m3_per_s,
flx_2h, d18O_permil,
flx_analyte_conc,
d13c,Fm, pro_note)
# select only rows with data (either d13c or Fm or flx_2h)
measurements <- measurements %>%
filter(!is.na(flx_2h) | !is.na(d13c) | !is.na(Fm) | !is.na(d18O_permil) | !is.na(flx_analyte_conc))
# arrange
measurements <- measurements %>% arrange(entry_name, site_name)
# gather, remove NAs and spread again to match 13c and Fm values for each original aragones_row entry
measurements <- measurements %>% gather(key = "measurement", value = "value",
c("d13c","Fm", "flx_2h", "d18O_permil", "flx_analyte_conc")) %>%
arrange(entry_name, site_name) %>% filter(!is.na(value)) %>%
spread(key = "measurement", value = "value")
# summarize pro_note by profile
pro_note_summary <- measurements %>%
group_by(pro_name) %>%
summarize(pro_note = pro_note[1])
# finalize profile by adding in pro_note from flux tab ##### check for other unique pro_thaw labels
profile <- as_tibble(left_join(profile, pro_note_summary, by = "pro_name")) %>%
mutate(pro_thaw = as.factor(pro_thaw_note)) %>%
mutate(pro_thaw_depth = recode_factor(pro_thaw,
`40 to 60 in the site, not in the aquatic system` = "50",
`46 to 55` = '50',
`60 to 120` = '90')) %>%
mutate(pro_thaw_depth = as.numeric(as.character(pro_thaw))) %>%
select(entry_name,
site_name,
plot_name,
pro_name,
pro_note,
pro_lat,
pro_long,
pro_elevation,
pro_treatment,
pro_treatment_note,
pro_thaw_depth,
pro_catchment_area,
pro_permafrost,
pro_thermokarst,
pro_water_body,
pro_water_body_name) %>%
distinct() %>%
arrange(entry_name,site_name,pro_name)
# remove pro_note from the measurements tab
measurements <- measurements %>%
select(entry_name,
site_name,
pro_name,
measurement_obs_date_y,
measurement_obs_date_m,
measurement_obs_date_d,
measurement_pathway, # for splitting flux, incub, inter
measurement_pathway_note,
# measurement_analyte,
measurement_ecosystem_component,
measurement_method_note,
measurement_method_note2,
measurement_rate,
measurement_depth,
measurement_incubation_headspace,
measurement_incubation_soil,
measurement_incubation_auto_type,
flx_discharge_rate = Instantaneous_discharge_m3_per_s,
measurement_analyte,
flx_2h, d18O_permil,
flx_analyte_conc,
d13c, Fm,
-pro_note)
# split the data into flux, interstitial and incubation
# NOTE: i include all bubble data in flux even tho not all were emitted naturally (some by stirring sediment)
# because there is no specific layer that these observations can be attributed to
flux <- measurements %>%
filter(!measurement_pathway %in% c("Soil", "SoilDOC"))
### placeholder for correcting controlled vocab
# create index vector to make the flx_name unique
flx_x <- flux %>%
group_by(entry_name, site_name) %>%
summarize(aragones.rows = n())
x <- list()
for (i in 1:length(flx_x$aragones.rows)){
x[[i]] <- c(1:flx_x$aragones.rows[i])
}
x <- unlist(x)
#finalize
flux <- flux %>%
mutate(flx_pathway = as.character(measurement_pathway),
measurement_ecosystem_component = as.character(measurement_ecosystem_component),
index = x) %>%
mutate(flx_pathway = replace(flx_pathway, measurement_pathway == "Bubbles", "bubble ebullition"),
flx_pathway = replace(flx_pathway, measurement_pathway == "Flux", "soil emission"),
flx_pathway = replace(flx_pathway, measurement_pathway == "Water", "dissolved"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component %in%
c("CH4","Heterotrophic_respiration","Soil"), "heterotrophic"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Autotrophic_respiration", "autotrophic"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Ecosystem_respiration", "ecosystem"),
measurement_ecosystem_component = replace(measurement_ecosystem_component, measurement_ecosystem_component ==
"Water_export", "aquatic"),
flx_name = paste(pro_name, measurement_analyte,index, sep = " ")) %>%
select(entry_name,
site_name,
pro_name,
flx_name,
flx_obs_date_y = measurement_obs_date_y,
flx_obs_date_m = measurement_obs_date_m,
flx_obs_date_d = measurement_obs_date_d,
flx_pathway = flx_pathway,
flx_pathway_note = measurement_pathway_note,
flx_ecosystem_component = measurement_ecosystem_component,
flx_method = measurement_method_note,
flx_method_note = measurement_method_note2,
flx_rate = measurement_rate,
flx_analyte = measurement_analyte,
flx_analyte_conc,
flx_discharge_rate,
flx_18o = d18O_permil,
flx_2h,
flx_13c = d13c,
flx_fraction_modern = Fm) %>%
mutate(flx_obs_date_y = as.character(flx_obs_date_y),
flx_obs_date_m = as.character(flx_obs_date_m),
flx_obs_date_d = as.character(flx_obs_date_d))
# replicate the profiles pasted with the depth for dummy layer names then specify top and bottom depth with depth +/- 5 cm
layer <- measurements %>%
filter(measurement_pathway %in% c("Soil")) %>%
mutate(lyr_name = paste(pro_name, measurement_depth, sep = "_"),
lyr_top = measurement_depth - 5, lyr_bot = measurement_depth + 5,
lyr_all_org_neg = 'yes') %>%
select(entry_name,
site_name,
pro_name,
lyr_name,
lyr_obs_date_y = measurement_obs_date_y,
lyr_obs_date_m = measurement_obs_date_m,
lyr_obs_date_d = measurement_obs_date_d,
lyr_top,
lyr_bot,
measurement_pathway,
lyr_13c = d13c,
lyr_fraction_modern = Fm) %>%
mutate(lyr_obs_date_y = as.character(lyr_obs_date_y),
lyr_obs_date_m = as.character(lyr_obs_date_m),
lyr_obs_date_d = as.character(lyr_obs_date_d))
# # extract the interstitial data --- there are zero interstitial records in water group
## finalize water data
# set all columns as character
metadata <- metadata %>%
mutate_all(as.character)
site <- site %>%
mutate_all(as.character)
profile <- profile %>%
mutate_all(as.character)
flux <- flux %>%
mutate_all(as.character)
layer <- layer.red.final %>%
mutate_all(as.character)
interstitial <- interstitial %>%
mutate_all(as.character)
incubation <- incubation %>%
mutate_all(as.character)
# for each entry_name, pull in the corresponding rows for each tab into a list, where elemnts are different entry_names
names <- metadata %>% select(entry_name) %>% pull()
# metadata
metadata.entries.water <- list()
for (i in 1:length(names)){
metadata %>%
filter(entry_name == names[i]) -> metadata.entries.water[[i]]
}
# site
site.entries.water <- list()
for (i in 1:length(names)){
site %>%
filter(entry_name == names[i]) -> site.entries.water[[i]]
}
# profile
profile.entries.water <- list()
for (i in 1:length(names)){
profile %>%
filter(entry_name == names[i]) -> profile.entries.water[[i]]
}
# flux
flux.entries.water <- list()
for (i in 1:length(names)){
flux %>%
filter(entry_name == names[i]) -> flux.entries.water[[i]]
}
# layer
layer.entries.water <- list()
for (i in 1:length(names)){
layer %>%
filter(entry_name == names[i]) -> layer.entries.water[[i]]
}
# interstitial
interstitial.entries.water <- list()
for (i in 1:length(names)){
interstitial %>%
filter(entry_name == names[i]) -> interstitial.entries.water[[i]]
}
# incubation
incubation.entries.water <- list()
for (i in 1:length(names)){
incubation %>%
filter(entry_name == names[i]) -> incubation.entries.water[[i]]
}
## write out final files (by entry)
toutput.metadata <- list()
toutput.site <- list()
toutput.profile <- list()
toutput.flux <- list()
toutput.layer <- list()
toutput.interstitial <- list()
toutput.incubation <- list()
toutput.fraction <- list()
toutput.cvocab <- list()
#merge with template
for (i in 1:length(names)) {
toutput.metadata[[i]] <- bind_rows(template$metadata, metadata.entries[[i]])
toutput.metadata[[i]] <- toutput.metadata[[i]][-3,] %>% distinct() # not sure why an extra row of NaN is added
toutput.site[[i]] <- bind_rows(template$site, site.entries[[i]],site.entries.water[[i]]) %>% distinct()
toutput.profile[[i]] <- bind_rows(template$profile, profile.entries[[i]],profile.entries.water[[i]]) %>% distinct()
toutput.flux[[i]] <- bind_rows(template$flux, flux.entries[[i]],flux.entries.water[[i]]) %>% distinct()
toutput.layer[[i]] <- bind_rows(template$layer, layer.entries[[i]],layer.entries.water[[i]]) %>% distinct()
toutput.interstitial[[i]] <- bind_rows(template$interstitial, interstitial.entries[[i]],interstitial.entries.water[[i]]) %>% distinct()
toutput.fraction[[i]] <- template$fraction
toutput.incubation[[i]] <- bind_rows(template$incubation, incubation.entries[[i]],incubation.entries.water[[i]]) %>% distinct()
toutput.cvocab[[i]] <- template$`controlled vocabulary`
}
toutput.byentry <- list()
for (i in 1:length(names)){
toutput.byentry[[i]] <- list(toutput.metadata[[i]], toutput.site[[i]], toutput.profile[[i]], toutput.flux[[i]],
toutput.layer[[i]], toutput.interstitial[[i]], toutput.fraction[[i]] , toutput.incubation[[i]],
toutput.cvocab[[i]])
}
# save water and gas template
for (i in 1:length(names)){
names(toutput.byentry[[i]]) <- c("metadata","site","profile","flux","layer","interstitial","fraction","incubation",'controlled vocabulary')
write.xlsx(toutput.byentry[[i]], paste("/Users/macbook/Desktop/Dropbox Temp/ISRaD/ISCN Collaboration/Aragones Ingest Files/By Entry v3/",names[i],".xlsx", sep = ""),
keepNA = FALSE)
}
|
### test
usecda <- function(x){
data <- data.frame(read.dta(deparse(substitute(x))))
return(data)
}
| /data/icprcda17-test.R | no_license | shawnana79/shawnana79.github.io | R | false | false | 111 | r | ### test
usecda <- function(x){
data <- data.frame(read.dta(deparse(substitute(x))))
return(data)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{fit.2g}
\alias{fit.2g}
\title{fit.2g}
\usage{
fit.2g(P, pars = c(0.5, 1.5), weights = rep(1, min(length(Z), dim(Z)[1])),
sigma_range = c(1, 100), rho = 0, ...)
}
\arguments{
\item{P}{numeric vector of observed data, either p-values or z-scores. If rho=0, should be one-dimensional vector; if rho is set, should be bivariate observations (P,Q)}
\item{pars}{initial values for parameters}
\item{weights}{optional weights for parameters}
\item{sigma_range}{range of possible values for sigma (closed interval). Default [1,100]}
}
\value{
a list containing parameters pars, likelihoods under h1 (Z distributed as above), likelihood under h0 (Z~N(0,1)) and likelihood ratio lr.
}
\description{
Fit a specific two Guassian mixture distribution to a set of P or Z values.
}
\details{
Assumes
Z ~ N(0,1) with probability pi0, Z ~ N(0,sigma^2) with probability 1-pi0
Returns MLE for pi0 and sigma. Uses R's optim function. Can weight observations.
}
\examples{
sigma=2; pi0 <- 0.8
n=10000; n0=round(pi0*n); n1=n-n0
Z = c(rnorm(n0,0,1),rnorm(n1,0,sqrt(1+ (sigma^2))))
fit=fit.2g(Z)
fit$pars
}
\author{
James Liley
}
| /man/fit.2g.Rd | permissive | biostatpzeng/cfdr | R | false | true | 1,210 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{fit.2g}
\alias{fit.2g}
\title{fit.2g}
\usage{
fit.2g(P, pars = c(0.5, 1.5), weights = rep(1, min(length(Z), dim(Z)[1])),
sigma_range = c(1, 100), rho = 0, ...)
}
\arguments{
\item{P}{numeric vector of observed data, either p-values or z-scores. If rho=0, should be one-dimensional vector; if rho is set, should be bivariate observations (P,Q)}
\item{pars}{initial values for parameters}
\item{weights}{optional weights for parameters}
\item{sigma_range}{range of possible values for sigma (closed interval). Default [1,100]}
}
\value{
a list containing parameters pars, likelihoods under h1 (Z distributed as above), likelihood under h0 (Z~N(0,1)) and likelihood ratio lr.
}
\description{
Fit a specific two Guassian mixture distribution to a set of P or Z values.
}
\details{
Assumes
Z ~ N(0,1) with probability pi0, Z ~ N(0,sigma^2) with probability 1-pi0
Returns MLE for pi0 and sigma. Uses R's optim function. Can weight observations.
}
\examples{
sigma=2; pi0 <- 0.8
n=10000; n0=round(pi0*n); n1=n-n0
Z = c(rnorm(n0,0,1),rnorm(n1,0,sqrt(1+ (sigma^2))))
fit=fit.2g(Z)
fit$pars
}
\author{
James Liley
}
|
#' Checks the java version on your computer and downloads MALLET jar files for use with this package.
#'
#' @return Does not return anything.
#' @export
download_mallet <- function(){
# determine if the user has a new enough version of Java (1.8 or higher)
system("java -version", intern = TRUE)
cat("You must have java version 1.8 or higher installed on your computer. You may update your java version by visiting the following website and then retry download. Website: http://www.oracle.com/technetwork/java/javase/downloads/index.html -- Make sure to select the JDK option from this page and then download the newest version.")
# get the right file names
directory <- system.file("extdata", package = "SpeedReader")[1]
f1 <- paste(directory,"/mallet.jar",sep = "")
f2 <- paste(directory,"/mallet-deps.jar",sep = "")
url <- "http://mjdenny.com/SpeedReader/JAR_Files/"
web1 <- paste(url,"mallet.jar",sep = "")
web2 <- paste(url,"mallet-deps.jar",sep = "")
# download the two jar files associated with the selected version
cat("Downloading JAR files...\n" )
cat("File 1 of 2...\n")
download.file(url = web1, destfile = f1, method = "auto")
cat("File 2 of 2...\n")
download.file(url = web2, destfile = f2, method = "auto")
cat("Downloads complete!\n")
#check to see that the download worked
test1 <- system.file("extdata","/mallet.jar", package = "SpeedReader")[1]
test2 <- system.file("extdata","/mallet-deps.jar", package = "SpeedReader")[1]
if(test1 != "" & test2 != ""){
cat("JAR file downloads appear to have been successful!\n")
}else{
stop("It appears that one or more of the files did not download successfully...\n")
}
}
| /R/download_mallet.R | no_license | bethanyleap/SpeedReader | R | false | false | 1,743 | r | #' Checks the java version on your computer and downloads MALLET jar files for use with this package.
#'
#' @return Does not return anything.
#' @export
download_mallet <- function(){
# determine if the user has a new enough version of Java (1.8 or higher)
system("java -version", intern = TRUE)
cat("You must have java version 1.8 or higher installed on your computer. You may update your java version by visiting the following website and then retry download. Website: http://www.oracle.com/technetwork/java/javase/downloads/index.html -- Make sure to select the JDK option from this page and then download the newest version.")
# get the right file names
directory <- system.file("extdata", package = "SpeedReader")[1]
f1 <- paste(directory,"/mallet.jar",sep = "")
f2 <- paste(directory,"/mallet-deps.jar",sep = "")
url <- "http://mjdenny.com/SpeedReader/JAR_Files/"
web1 <- paste(url,"mallet.jar",sep = "")
web2 <- paste(url,"mallet-deps.jar",sep = "")
# download the two jar files associated with the selected version
cat("Downloading JAR files...\n" )
cat("File 1 of 2...\n")
download.file(url = web1, destfile = f1, method = "auto")
cat("File 2 of 2...\n")
download.file(url = web2, destfile = f2, method = "auto")
cat("Downloads complete!\n")
#check to see that the download worked
test1 <- system.file("extdata","/mallet.jar", package = "SpeedReader")[1]
test2 <- system.file("extdata","/mallet-deps.jar", package = "SpeedReader")[1]
if(test1 != "" & test2 != ""){
cat("JAR file downloads appear to have been successful!\n")
}else{
stop("It appears that one or more of the files did not download successfully...\n")
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.