content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#!/usr/bin/env Rscript
library(lattice)
library(ggplot2)
args = commandArgs(TRUE)
if (length(args)==0){
stop("\n\nboxPlotFactors title factors.tab outfile.pdf\n\n")
}
titleRoot = args[1]
fileName = args[2]
plotOutFile = args[3]
#titleRoot = "BLCA"
#fileName = "factors.tab"
#plotOutFile = "output.pdf"
classifiers = read.table(fileName,sep="\t",header=TRUE)
#classifiers
# Make median ordered factors...
bymedianFilter = with(classifiers, reorder(filter, -roc, median))
bymedianclassAttr = with(classifiers, reorder(classAttribute, -roc, median))
bymedianAttrSel = with(classifiers, reorder(attributeSelection, -roc, median))
bymedianNumAttributes = with(classifiers, reorder(factor(numAttributes), -roc, median))
bymedianClassifier = with(classifiers, reorder(classifier, -roc, median))
pdf(file=plotOutFile)
q = qplot(bymedianFilter,roc,data=classifiers,geom="boxplot")
q+theme(axis.text.x = element_text(angle=45,hjust=1))+labs(title=paste(titleRoot,"Performance by Filter"))
q = qplot(bymedianclassAttr,roc,data=classifiers,geom="boxplot")
q+theme(axis.text.x = element_text(angle=45,hjust=1))+labs(title=paste(titleRoot,""))
q = qplot(bymedianAttrSel,roc,data=classifiers,geom="boxplot")
q+theme(axis.text.x = element_text(angle=45,hjust=1))+labs(title=paste(titleRoot,"Performance by Attribute Selection"))
q = qplot(bymedianNumAttributes,roc,data=classifiers,geom="boxplot")
q+theme(axis.text.x = element_text(angle=45,hjust=1))+labs(title=paste(titleRoot,"Performance by Number of Attributes"))
q = qplot(bymedianClassifier,roc,data=classifiers,geom="boxplot")
q+theme(axis.text.x = element_text(angle=45,hjust=1))+labs(title=paste(titleRoot,"Performance by Classifier Type"))
garbage = dev.off()
| /boxPlotFactors.R | permissive | jdurbin/wekaMine | R | false | false | 1,721 | r | #!/usr/bin/env Rscript
library(lattice)
library(ggplot2)
args = commandArgs(TRUE)
if (length(args)==0){
stop("\n\nboxPlotFactors title factors.tab outfile.pdf\n\n")
}
titleRoot = args[1]
fileName = args[2]
plotOutFile = args[3]
#titleRoot = "BLCA"
#fileName = "factors.tab"
#plotOutFile = "output.pdf"
classifiers = read.table(fileName,sep="\t",header=TRUE)
#classifiers
# Make median ordered factors...
bymedianFilter = with(classifiers, reorder(filter, -roc, median))
bymedianclassAttr = with(classifiers, reorder(classAttribute, -roc, median))
bymedianAttrSel = with(classifiers, reorder(attributeSelection, -roc, median))
bymedianNumAttributes = with(classifiers, reorder(factor(numAttributes), -roc, median))
bymedianClassifier = with(classifiers, reorder(classifier, -roc, median))
pdf(file=plotOutFile)
q = qplot(bymedianFilter,roc,data=classifiers,geom="boxplot")
q+theme(axis.text.x = element_text(angle=45,hjust=1))+labs(title=paste(titleRoot,"Performance by Filter"))
q = qplot(bymedianclassAttr,roc,data=classifiers,geom="boxplot")
q+theme(axis.text.x = element_text(angle=45,hjust=1))+labs(title=paste(titleRoot,""))
q = qplot(bymedianAttrSel,roc,data=classifiers,geom="boxplot")
q+theme(axis.text.x = element_text(angle=45,hjust=1))+labs(title=paste(titleRoot,"Performance by Attribute Selection"))
q = qplot(bymedianNumAttributes,roc,data=classifiers,geom="boxplot")
q+theme(axis.text.x = element_text(angle=45,hjust=1))+labs(title=paste(titleRoot,"Performance by Number of Attributes"))
q = qplot(bymedianClassifier,roc,data=classifiers,geom="boxplot")
q+theme(axis.text.x = element_text(angle=45,hjust=1))+labs(title=paste(titleRoot,"Performance by Classifier Type"))
garbage = dev.off()
|
#' Haplotype Diversity
#' @description This function calculates haplotipe diversity from DNAbin sequence file
#' @param x a DNAbin object
#' @return Number of haplotypes and haplotype diversity and of x.
#' @author Marcelo Gehara
#' @references Nei, M., & Tajima, F. (1981). DNA polymorphism detectable by restriction endonucleases. Genetics, 97, 145–163.
#' @note requires Pegas package
#' @export
H.div<-function(x){
h<-pegas::haplotype(x)
hap<-attr(h, "index")
n.hap<-length(hap)
h.freqs<-NULL
for(i in 1:n.hap){
freq<-length(hap[[i]])/nrow(x)
h.freqs<-c(h.freqs,freq)
}
H.d = (nrow(x)/(nrow(x)-1))*(1 - sum(h.freqs^2))
return(c(n.hap,H.d))
}
| /R/hap.div.R | no_license | gehara/PipeMaster | R | false | false | 676 | r | #' Haplotype Diversity
#' @description This function calculates haplotipe diversity from DNAbin sequence file
#' @param x a DNAbin object
#' @return Number of haplotypes and haplotype diversity and of x.
#' @author Marcelo Gehara
#' @references Nei, M., & Tajima, F. (1981). DNA polymorphism detectable by restriction endonucleases. Genetics, 97, 145–163.
#' @note requires Pegas package
#' @export
H.div<-function(x){
h<-pegas::haplotype(x)
hap<-attr(h, "index")
n.hap<-length(hap)
h.freqs<-NULL
for(i in 1:n.hap){
freq<-length(hap[[i]])/nrow(x)
h.freqs<-c(h.freqs,freq)
}
H.d = (nrow(x)/(nrow(x)-1))*(1 - sum(h.freqs^2))
return(c(n.hap,H.d))
}
|
library(DoE.base)
### Name: Class design and accessors
### Title: Class design and its accessor functions
### Aliases: design undesign redesign desnum desnum<- run.order run.order<-
### design.info design.info<- factor.names factor.names<- response.names
### response.names<- col.remove ord
### Keywords: array design
### ** Examples
oa12 <- oa.design(nlevels=c(2,2,6))
#### Examples for factor.names and response.names
factor.names(oa12)
## rename factors
factor.names(oa12) <- c("First.Factor", "Second.Factor", "Third.Factor")
## rename factors and relabel levels of first two factors
namen <- c(rep(list(c("current","new")),2),list(""))
names(namen) <- c("First.Factor", "Second.Factor", "Third.Factor")
factor.names(oa12) <- namen
oa12
## add a few variables to oa12
responses <- cbind(temp=sample(23:34),y1=rexp(12),y2=runif(12))
oa12 <- add.response(oa12, responses)
response.names(oa12)
## temp (for temperature) is not meant to be a response
## --> drop it from responselist but not from data
response.names(oa12) <- c("y1","y2")
## looking at attributes of the design
desnum(oa12)
run.order(oa12)
design.info(oa12)
## undesign and redesign
u.oa12 <- undesign(oa12)
str(u.oa12)
u.oa12$new <- rnorm(12)
r.oa12 <- redesign(oa12, u.oa12)
## make known that new is also a response
response.names(r.oa12) <- c(response.names(r.oa12), "new")
## look at design-specific summary
summary(r.oa12)
## look at data frame style summary instead
summary.data.frame(r.oa12)
| /data/genthat_extracted_code/DoE.base/examples/class-design.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,538 | r | library(DoE.base)
### Name: Class design and accessors
### Title: Class design and its accessor functions
### Aliases: design undesign redesign desnum desnum<- run.order run.order<-
### design.info design.info<- factor.names factor.names<- response.names
### response.names<- col.remove ord
### Keywords: array design
### ** Examples
oa12 <- oa.design(nlevels=c(2,2,6))
#### Examples for factor.names and response.names
factor.names(oa12)
## rename factors
factor.names(oa12) <- c("First.Factor", "Second.Factor", "Third.Factor")
## rename factors and relabel levels of first two factors
namen <- c(rep(list(c("current","new")),2),list(""))
names(namen) <- c("First.Factor", "Second.Factor", "Third.Factor")
factor.names(oa12) <- namen
oa12
## add a few variables to oa12
responses <- cbind(temp=sample(23:34),y1=rexp(12),y2=runif(12))
oa12 <- add.response(oa12, responses)
response.names(oa12)
## temp (for temperature) is not meant to be a response
## --> drop it from responselist but not from data
response.names(oa12) <- c("y1","y2")
## looking at attributes of the design
desnum(oa12)
run.order(oa12)
design.info(oa12)
## undesign and redesign
u.oa12 <- undesign(oa12)
str(u.oa12)
u.oa12$new <- rnorm(12)
r.oa12 <- redesign(oa12, u.oa12)
## make known that new is also a response
response.names(r.oa12) <- c(response.names(r.oa12), "new")
## look at design-specific summary
summary(r.oa12)
## look at data frame style summary instead
summary.data.frame(r.oa12)
|
trains <- 6
stations <- 27
Args <- commandArgs(); # retrieve args
folder = Args[8];
#Go through all train files
for(t in 0:trains){
#Create output files
reg_outfile <- paste(folder, "/train_", t, "_reg_plot.txt", sep="");
hand_outfile <- paste(folder, "/train_", t, "_hand_plot.txt", sep="");
#Read input files
reg_file <- paste(folder, "/train_", t, "_reg.dat", sep="");
hand_file <- paste(folder, "/train_", t, "_hand.dat", sep="");
reg_table <-read.table(reg_file,header=FALSE, sep="\t");
hand_table <-read.table(hand_file,header=FALSE, sep="\t");
mean_reg <- 0;
std_reg <- 0;
std_error_reg <- 0;
mean_hand <- 0;
std_hand <- 0;
std_error_hand <- 0;
for(s in 1:stations){
mean_reg[s] <- mean(reg_table[1:nrow(reg_table),s]);
std_reg[s] <- sd(reg_table[1:nrow(reg_table),s]);
std_error_reg[s] <- std_reg[s]/sqrt(nrow(reg_table));
mean_hand[s] <- mean(hand_table[1:nrow(hand_table),s]);
std_hand[s] <- sd(hand_table[1:nrow(hand_table),s]);
std_error_hand[s] <- std_hand[s]/sqrt(nrow(hand_table));
write( c(s, mean_reg[s], (std_error_reg[s]*1.96) ) , file=reg_outfile, append=T, sep=" ");
write( c(s, mean_hand[s], (std_error_hand[s]*1.96) ) , file=hand_outfile, append=T, sep=" ");
}
}
q()
| /scripts/train_results.R | no_license | DevanR/RailwaySimulator | R | false | false | 1,262 | r |
trains <- 6
stations <- 27
Args <- commandArgs(); # retrieve args
folder = Args[8];
#Go through all train files
for(t in 0:trains){
#Create output files
reg_outfile <- paste(folder, "/train_", t, "_reg_plot.txt", sep="");
hand_outfile <- paste(folder, "/train_", t, "_hand_plot.txt", sep="");
#Read input files
reg_file <- paste(folder, "/train_", t, "_reg.dat", sep="");
hand_file <- paste(folder, "/train_", t, "_hand.dat", sep="");
reg_table <-read.table(reg_file,header=FALSE, sep="\t");
hand_table <-read.table(hand_file,header=FALSE, sep="\t");
mean_reg <- 0;
std_reg <- 0;
std_error_reg <- 0;
mean_hand <- 0;
std_hand <- 0;
std_error_hand <- 0;
for(s in 1:stations){
mean_reg[s] <- mean(reg_table[1:nrow(reg_table),s]);
std_reg[s] <- sd(reg_table[1:nrow(reg_table),s]);
std_error_reg[s] <- std_reg[s]/sqrt(nrow(reg_table));
mean_hand[s] <- mean(hand_table[1:nrow(hand_table),s]);
std_hand[s] <- sd(hand_table[1:nrow(hand_table),s]);
std_error_hand[s] <- std_hand[s]/sqrt(nrow(hand_table));
write( c(s, mean_reg[s], (std_error_reg[s]*1.96) ) , file=reg_outfile, append=T, sep=" ");
write( c(s, mean_hand[s], (std_error_hand[s]*1.96) ) , file=hand_outfile, append=T, sep=" ");
}
}
q()
|
setwd("/Users/ruchirpatel/Documents/R/Assignment3/ExData_Plotting1/Assignment")
data <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE)
data$headerDate <- as.Date(data$headerDate, "%d/%m/%Y")
start <- as.Date("2007-02-01")
finish <- as.Date("2007-02-02")
data1 <- subset(data, data$headerDate== start | data$headerDate == finish)
data1$Global_active_power[data1$Global_active_power=="?"] <- "0"
data1$Global_active_power <- as.numeric(data1$Global_active_power)
datetime <- paste( data1$headerDate, data1$Time)
datetime <- as.POSIXct(datetime)
data1$date <- datetime
data1$Sub_metering_1<- as.numeric(data1$Sub_metering_1)
data1$Sub_metering_2 <- as.numeric(data1$Sub_metering_2)
data1$Sub_metering_3 <- as.numeric(data1$Sub_metering_3)
data1$Votage <- as.numeric((data1$Voltage))
data1[is.na(data1$Sub_metering_2)] <- 0
data1[is.na(data1$Sub_metering_3)] <- 0
png("plot3.png", width = 480, height = 480)
plot(data1$Sub_metering_1 ~ data1$date, type="l", ylab = "Energy sub metering", ylim = c(0, max(data1$Sub_metering_1, data1$Sub_metering_2, data1$Sub_metering_3)))
lines(data1$Sub_metering_2, type = "l", col = "red")
lines(data1$Sub_metering_3, type = "l", col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off() | /Plot3.R | no_license | ruchirpatel22/ExData_Plotting1 | R | false | false | 1,364 | r | setwd("/Users/ruchirpatel/Documents/R/Assignment3/ExData_Plotting1/Assignment")
data <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE)
data$headerDate <- as.Date(data$headerDate, "%d/%m/%Y")
start <- as.Date("2007-02-01")
finish <- as.Date("2007-02-02")
data1 <- subset(data, data$headerDate== start | data$headerDate == finish)
data1$Global_active_power[data1$Global_active_power=="?"] <- "0"
data1$Global_active_power <- as.numeric(data1$Global_active_power)
datetime <- paste( data1$headerDate, data1$Time)
datetime <- as.POSIXct(datetime)
data1$date <- datetime
data1$Sub_metering_1<- as.numeric(data1$Sub_metering_1)
data1$Sub_metering_2 <- as.numeric(data1$Sub_metering_2)
data1$Sub_metering_3 <- as.numeric(data1$Sub_metering_3)
data1$Votage <- as.numeric((data1$Voltage))
data1[is.na(data1$Sub_metering_2)] <- 0
data1[is.na(data1$Sub_metering_3)] <- 0
png("plot3.png", width = 480, height = 480)
plot(data1$Sub_metering_1 ~ data1$date, type="l", ylab = "Energy sub metering", ylim = c(0, max(data1$Sub_metering_1, data1$Sub_metering_2, data1$Sub_metering_3)))
lines(data1$Sub_metering_2, type = "l", col = "red")
lines(data1$Sub_metering_3, type = "l", col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compareFit.R
\name{compareFit}
\alias{compareFit}
\title{Build an object summarizing fit indices across multiple models}
\usage{
compareFit(..., nested = TRUE, argsLRT = list(), indices = TRUE,
moreIndices = FALSE, baseline.model = NULL, nPrior = 1)
}
\arguments{
\item{...}{fitted \code{lavaan} models or list(s) of \code{lavaan} objects.
\code{\linkS4class{lavaan.mi}} objects are also accepted, but all models
must belong to the same class.}
\item{nested}{\code{logical} indicating whether the models in \code{...} are
nested. See \code{\link{net}} for an empirical test of nesting.}
\item{argsLRT}{\code{list} of arguments to pass to
\code{\link[lavaan]{lavTestLRT}}, as well as to
\code{\link{lavTestLRT.mi}} and \code{\link{fitMeasures}} when
comparing \code{\linkS4class{lavaan.mi}} models.}
\item{indices}{\code{logical} indicating whether to return fit indices from
the \code{\link[lavaan]{fitMeasures}} function. Selecting particular
indices is controlled in the \code{summary} method; see
\code{\linkS4class{FitDiff}}.}
\item{moreIndices}{\code{logical} indicating whether to return fit indices
from the \code{\link{moreFitIndices}} function. Selecting particular
indices is controlled in the \code{summary} method; see
\code{\linkS4class{FitDiff}}.}
\item{baseline.model}{optional fitted \code{\linkS4class{lavaan}} model
passed to \code{\link[lavaan]{fitMeasures}} to calculate incremental fit
indices.}
\item{nPrior}{passed to \code{\link{moreFitIndices}}, if relevant}
}
\value{
A \code{\linkS4class{FitDiff}} object that saves model fit
comparisons across multiple models. If the models are not nested, only
fit indices for each model are returned. If the models are nested, the
differences in fit indices are additionally returned, as well as test
statistics comparing each sequential pair of models (ordered by their
degrees of freedom).
}
\description{
This function will create the template to compare fit indices across
multiple fitted lavaan objects. The results can be exported to a clipboard
or a file later.
}
\examples{
HS.model <- ' visual =~ x1 + x2 + x3
textual =~ x4 + x5 + x6
speed =~ x7 + x8 + x9 '
## non-nested models
fit1 <- cfa(HS.model, data = HolzingerSwineford1939)
m2 <- ' f1 =~ x1 + x2 + x3 + x4
f2 =~ x5 + x6 + x7 + x8 + x9 '
fit2 <- cfa(m2, data = HolzingerSwineford1939)
(out1 <- compareFit(fit1, fit2, nested = FALSE))
summary(out1)
## nested model comparisons: measurement equivalence/invariance
fit.config <- cfa(HS.model, data = HolzingerSwineford1939, group = "school")
fit.metric <- cfa(HS.model, data = HolzingerSwineford1939, group = "school",
group.equal = "loadings")
fit.scalar <- cfa(HS.model, data = HolzingerSwineford1939, group = "school",
group.equal = c("loadings","intercepts"))
fit.strict <- cfa(HS.model, data = HolzingerSwineford1939, group = "school",
group.equal = c("loadings","intercepts","residuals"))
measEqOut <- compareFit(fit.config, fit.metric, fit.scalar, fit.strict,
moreIndices = TRUE) # include moreFitIndices()
summary(measEqOut)
summary(measEqOut, fit.measures = "all")
summary(measEqOut, fit.measures = c("aic", "bic", "sic", "ibic"))
\dontrun{
## also applies to lavaan.mi objects (fit model to multiple imputations)
set.seed(12345)
HSMiss <- HolzingerSwineford1939[ , paste("x", 1:9, sep = "")]
HSMiss$x5 <- ifelse(HSMiss$x1 <= quantile(HSMiss$x1, .3), NA, HSMiss$x5)
HSMiss$x9 <- ifelse(is.na(HSMiss$x5), NA, HSMiss$x9)
HSMiss$school <- HolzingerSwineford1939$school
library(Amelia)
HS.amelia <- amelia(HSMiss, m = 20, noms = "school")
imps <- HS.amelia$imputations
## request robust test statistics
mgfit2 <- cfa.mi(HS.model, data = imps, group = "school", estimator = "mlm")
mgfit1 <- cfa.mi(HS.model, data = imps, group = "school", estimator = "mlm",
group.equal = "loadings")
mgfit0 <- cfa.mi(HS.model, data = imps, group = "school", estimator = "mlm",
group.equal = c("loadings","intercepts"))
## request the strictly-positive robust test statistics
out2 <- compareFit(scalar = mgfit0, metric = mgfit1, config = mgfit2,
argsLRT = list(asymptotic = TRUE,
method = "satorra.bentler.2010"))
## note that moreFitIndices() does not work for lavaan.mi objects, but the
## fitMeasures() method for lavaan.mi objects already returns gammaHat(s)
summary(out2, fit.measures = c("ariv","fmi","df","crmr","srmr",
"cfi.robust","tli.robust",
"adjGammaHat.scaled","rmsea.ci.lower.robust",
"rmsea.robust","rmsea.ci.upper.robust"))
}
}
\seealso{
\code{\linkS4class{FitDiff}}, \code{\link{clipboard}}
}
\author{
Terrence D. Jorgensen (University of Amsterdam;
\email{TJorgensen314@gmail.com})
Sunthud Pornprasertmanit (\email{psunthud@gmail.com})
}
| /semTools/man/compareFit.Rd | no_license | simsem/semTools | R | false | true | 5,030 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compareFit.R
\name{compareFit}
\alias{compareFit}
\title{Build an object summarizing fit indices across multiple models}
\usage{
compareFit(..., nested = TRUE, argsLRT = list(), indices = TRUE,
moreIndices = FALSE, baseline.model = NULL, nPrior = 1)
}
\arguments{
\item{...}{fitted \code{lavaan} models or list(s) of \code{lavaan} objects.
\code{\linkS4class{lavaan.mi}} objects are also accepted, but all models
must belong to the same class.}
\item{nested}{\code{logical} indicating whether the models in \code{...} are
nested. See \code{\link{net}} for an empirical test of nesting.}
\item{argsLRT}{\code{list} of arguments to pass to
\code{\link[lavaan]{lavTestLRT}}, as well as to
\code{\link{lavTestLRT.mi}} and \code{\link{fitMeasures}} when
comparing \code{\linkS4class{lavaan.mi}} models.}
\item{indices}{\code{logical} indicating whether to return fit indices from
the \code{\link[lavaan]{fitMeasures}} function. Selecting particular
indices is controlled in the \code{summary} method; see
\code{\linkS4class{FitDiff}}.}
\item{moreIndices}{\code{logical} indicating whether to return fit indices
from the \code{\link{moreFitIndices}} function. Selecting particular
indices is controlled in the \code{summary} method; see
\code{\linkS4class{FitDiff}}.}
\item{baseline.model}{optional fitted \code{\linkS4class{lavaan}} model
passed to \code{\link[lavaan]{fitMeasures}} to calculate incremental fit
indices.}
\item{nPrior}{passed to \code{\link{moreFitIndices}}, if relevant}
}
\value{
A \code{\linkS4class{FitDiff}} object that saves model fit
comparisons across multiple models. If the models are not nested, only
fit indices for each model are returned. If the models are nested, the
differences in fit indices are additionally returned, as well as test
statistics comparing each sequential pair of models (ordered by their
degrees of freedom).
}
\description{
This function will create the template to compare fit indices across
multiple fitted lavaan objects. The results can be exported to a clipboard
or a file later.
}
\examples{
HS.model <- ' visual =~ x1 + x2 + x3
textual =~ x4 + x5 + x6
speed =~ x7 + x8 + x9 '
## non-nested models
fit1 <- cfa(HS.model, data = HolzingerSwineford1939)
m2 <- ' f1 =~ x1 + x2 + x3 + x4
f2 =~ x5 + x6 + x7 + x8 + x9 '
fit2 <- cfa(m2, data = HolzingerSwineford1939)
(out1 <- compareFit(fit1, fit2, nested = FALSE))
summary(out1)
## nested model comparisons: measurement equivalence/invariance
fit.config <- cfa(HS.model, data = HolzingerSwineford1939, group = "school")
fit.metric <- cfa(HS.model, data = HolzingerSwineford1939, group = "school",
group.equal = "loadings")
fit.scalar <- cfa(HS.model, data = HolzingerSwineford1939, group = "school",
group.equal = c("loadings","intercepts"))
fit.strict <- cfa(HS.model, data = HolzingerSwineford1939, group = "school",
group.equal = c("loadings","intercepts","residuals"))
measEqOut <- compareFit(fit.config, fit.metric, fit.scalar, fit.strict,
moreIndices = TRUE) # include moreFitIndices()
summary(measEqOut)
summary(measEqOut, fit.measures = "all")
summary(measEqOut, fit.measures = c("aic", "bic", "sic", "ibic"))
\dontrun{
## also applies to lavaan.mi objects (fit model to multiple imputations)
set.seed(12345)
HSMiss <- HolzingerSwineford1939[ , paste("x", 1:9, sep = "")]
HSMiss$x5 <- ifelse(HSMiss$x1 <= quantile(HSMiss$x1, .3), NA, HSMiss$x5)
HSMiss$x9 <- ifelse(is.na(HSMiss$x5), NA, HSMiss$x9)
HSMiss$school <- HolzingerSwineford1939$school
library(Amelia)
HS.amelia <- amelia(HSMiss, m = 20, noms = "school")
imps <- HS.amelia$imputations
## request robust test statistics
mgfit2 <- cfa.mi(HS.model, data = imps, group = "school", estimator = "mlm")
mgfit1 <- cfa.mi(HS.model, data = imps, group = "school", estimator = "mlm",
group.equal = "loadings")
mgfit0 <- cfa.mi(HS.model, data = imps, group = "school", estimator = "mlm",
group.equal = c("loadings","intercepts"))
## request the strictly-positive robust test statistics
out2 <- compareFit(scalar = mgfit0, metric = mgfit1, config = mgfit2,
argsLRT = list(asymptotic = TRUE,
method = "satorra.bentler.2010"))
## note that moreFitIndices() does not work for lavaan.mi objects, but the
## fitMeasures() method for lavaan.mi objects already returns gammaHat(s)
summary(out2, fit.measures = c("ariv","fmi","df","crmr","srmr",
"cfi.robust","tli.robust",
"adjGammaHat.scaled","rmsea.ci.lower.robust",
"rmsea.robust","rmsea.ci.upper.robust"))
}
}
\seealso{
\code{\linkS4class{FitDiff}}, \code{\link{clipboard}}
}
\author{
Terrence D. Jorgensen (University of Amsterdam;
\email{TJorgensen314@gmail.com})
Sunthud Pornprasertmanit (\email{psunthud@gmail.com})
}
|
# File: 06_samtools_array_job.R
# Auth: umar.niazi@kcl.as.uk
# DESC: create a parameter file and shell script to run array job on hpc
# Date: 18/08/2017
## set variables and source libraries
source('header.R')
## connect to mysql database to get sample information
library('RMySQL')
##### connect to mysql database to get samples
db = dbConnect(MySQL(), user='rstudio', password='12345', dbname='Projects', host='127.0.0.1')
dbListTables(db)
# check how many files each sample has
g_did
q = paste0('select count(File.idSample) as files, Sample.idData, Sample.title, Sample.id as SampleID from File, Sample
where (Sample.idData = 15 and File.idSample = Sample.id) group by File.idSample')
dfQuery = dbGetQuery(db, q)
dfQuery$title = gsub(" ", "", dfQuery$title, fixed = T)
dfQuery
# get the count of files
q = paste0('select File.*, Sample.idData from File, Sample
where (Sample.idData = 15) and (File.idSample = Sample.id) and (File.type = "fastq")')
dfCounts = dbGetQuery(db, q)
head(dfCounts)
nrow(dfCounts)
# for each sample id, get the corresponding files
cvQueries = paste0('select File.*, Sample.title from File, Sample
where (Sample.idData = 15 and Sample.id =', dfQuery$SampleID, ') and (File.idSample = Sample.id) and
(File.type = "fastq")')
# set header variables
cvShell = '#!/bin/bash'
cvShell.2 = '#$ -S /bin/bash'
cvProcessors = '#$ -pe smp 3'
cvWorkingDir = '#$ -cwd'
cvJobName = '#$ -N samtools-array'
cvStdout = '#$ -j y'
cvMemoryReserve = '#$ -l h_vmem=19G'
cvArrayJob = paste0('#$ -t 1-', nrow(dfCounts)/2)
# using high memory queue with one slot and 19 Gigs of memory
# set the directory names
cvInput = 'input/'
cvSam = '/opt/apps/bioinformatics/samtools/1.3.1/bin/samtools'
cvPicard = '/opt/apps/bioinformatics/picard-tools/2.2.1/picard.jar'
# create a parameter file and shell script
dir.create('AutoScripts')
oFile.param = file('AutoScripts/samtools_param.txt', 'wt')
temp = lapply(cvQueries, function(x){
# get the file names
dfFiles = dbGetQuery(db, x)
# check for null return
if (nrow(dfFiles) == 0) return();
# remove white space from title
dfFiles$title = gsub(" ", "", dfFiles$title, fixed=T)
# split the file names into paired end 1 and 2, identified by R1 and R2 in the file name
f = dfFiles$name
d = grepl('_R1_', f)
d = as.character(d)
d[d == 'TRUE'] = 'R1'
d[d == 'FALSE'] = 'R2'
lf = split(f, d)
## no sam files made this time by bismark so skip this
## write samtools command variables
# in.s1 = paste0(cvInput, lf[[1]], '.sam')
# output file in bam format
## bismark creates its own name so add those changes
lf[[1]] = gsub('.fastq.gz', '_bismark_bt2_pe.bam', lf[[1]])
lf[[1]] = paste('trim_', lf[[1]], sep='')
s2 = paste0(cvInput, lf[[1]])
# remove low quality reads below 10
s3 = paste0(cvInput, lf[[1]], '_q10.bam')
# sort the file
s4 = paste0(cvInput, lf[[1]], '_q10_sort.bam')
# remove duplicates
s5 = paste0(cvInput, lf[[1]], '_q10_sort_rd.bam')
s6 = paste0(cvInput, lf[[1]], '_q10_sort_rd.report.txt')
s7 = paste0(cvInput, lf[[1]], '_q10_sort_rd_sort2.bam')
p1 = paste(s2, s3, s4, s5, s6, s7, sep=' ')
writeLines(p1, oFile.param)
return(data.frame(idSample=dfFiles$idSample[1], name=c(s2, s4, s5), type=c('original bam', 'quality 10 sorted bam',
'quality 10 sorted bam duplicates removed'),
group1=dfFiles$group1[1]))
})
close(oFile.param)
temp[sapply(temp, is.null)] = NULL
dfNewData = do.call(rbind, temp)
rownames(dfNewData) = NULL
# remove the word input/ from file name
dfNewData$name = gsub('input/(\\w+)', '\\1', dfNewData$name, perl=T)
oFile = file('AutoScripts/samtools.sh', 'wt')
writeLines(c('# Autogenerated script from write_samtools_script.R', paste('# date', date())), oFile)
writeLines(c('# make sure directory paths exist before running script'), oFile)
writeLines(c(cvShell, cvShell.2, cvProcessors, cvWorkingDir, cvJobName, cvStdout, cvMemoryReserve, cvArrayJob), oFile)
writeLines('\n\n', oFile)
# module load
writeLines(c('module load bioinformatics/samtools/1.3.1'), oFile)
writeLines(c('module load bioinformatics/picard-tools/2.2.1'), oFile)
writeLines('\n\n', oFile)
## write array job lines
writeLines("# Parse parameter file to get variables.
number=$SGE_TASK_ID
paramfile=samtools_param.txt
bamfile=`sed -n ${number}p $paramfile | awk '{print $1}'`
bamq10=`sed -n ${number}p $paramfile | awk '{print $2}'`
bamq10sort=`sed -n ${number}p $paramfile | awk '{print $3}'`
bamrd=`sed -n ${number}p $paramfile | awk '{print $4}'`
rdreport=`sed -n ${number}p $paramfile | awk '{print $5}'`
bamrdsort2=`sed -n ${number}p $paramfile | awk '{print $6}'`
# 9. Run the program. NOTE: using Picard tools for coordinate sorting for bismark compatibility", oFile)
# remove low quality reads
p1 = paste('samtools view -b -q 10', '$bamfile', '>', '$bamq10', sep=' ')
com2 = paste(p1)
# sort the file
p1 = paste('java -Xmx30G -jar', cvPicard, 'SortSam OUTPUT=$bamq10sort',
'INPUT=$bamq10 SORT_ORDER=coordinate VALIDATION_STRINGENCY=SILENT',
sep=' ')
com3 = paste(p1)
# remove duplicates, for paired end reads
p1 = paste('java -Xmx30G -jar', cvPicard, 'MarkDuplicates I=$bamq10sort', 'O=$bamrd', 'M=$rdreport',
'REMOVE_DUPLICATES=true VALIDATION_STRINGENCY=SILENT', sep=' ')
com4 = paste(p1)
# sort the file second time for using with bismark methylation extractor
p1 = paste('java -Xmx30G -jar', cvPicard, 'SortSam OUTPUT=$bamrdsort2',
'INPUT=$bamrd SORT_ORDER=queryname VALIDATION_STRINGENCY=SILENT',
sep=' ')
com5 = paste(p1)
# create index
## this step is done only on the coordinate sorted bam files
p1 = paste('samtools index', '$bamrd', sep=' ')
com6 = paste(p1)
writeLines(c(com2, com3, com4, com5, com6), oFile)
writeLines('\n\n', oFile)
close(oFile)
dbDisconnect(db)
### update database with the file names
# dfNewData$group1 = 'Generated from Trimmomatic standard input for bisulphite seq data S126'
# dbWriteTable(db, name='File', value = dfNewData, append=T, row.names=F)
# dbDisconnect(db) | /S126/06_samtools_array_job.R | permissive | uhkniazi/BRC_NeuralTube_Miho | R | false | false | 6,162 | r | # File: 06_samtools_array_job.R
# Auth: umar.niazi@kcl.as.uk
# DESC: create a parameter file and shell script to run array job on hpc
# Date: 18/08/2017
## set variables and source libraries
source('header.R')
## connect to mysql database to get sample information
library('RMySQL')
##### connect to mysql database to get samples
db = dbConnect(MySQL(), user='rstudio', password='12345', dbname='Projects', host='127.0.0.1')
dbListTables(db)
# check how many files each sample has
g_did
q = paste0('select count(File.idSample) as files, Sample.idData, Sample.title, Sample.id as SampleID from File, Sample
where (Sample.idData = 15 and File.idSample = Sample.id) group by File.idSample')
dfQuery = dbGetQuery(db, q)
dfQuery$title = gsub(" ", "", dfQuery$title, fixed = T)
dfQuery
# get the count of files
q = paste0('select File.*, Sample.idData from File, Sample
where (Sample.idData = 15) and (File.idSample = Sample.id) and (File.type = "fastq")')
dfCounts = dbGetQuery(db, q)
head(dfCounts)
nrow(dfCounts)
# for each sample id, get the corresponding files
cvQueries = paste0('select File.*, Sample.title from File, Sample
where (Sample.idData = 15 and Sample.id =', dfQuery$SampleID, ') and (File.idSample = Sample.id) and
(File.type = "fastq")')
# set header variables
cvShell = '#!/bin/bash'
cvShell.2 = '#$ -S /bin/bash'
cvProcessors = '#$ -pe smp 3'
cvWorkingDir = '#$ -cwd'
cvJobName = '#$ -N samtools-array'
cvStdout = '#$ -j y'
cvMemoryReserve = '#$ -l h_vmem=19G'
cvArrayJob = paste0('#$ -t 1-', nrow(dfCounts)/2)
# using high memory queue with one slot and 19 Gigs of memory
# set the directory names
cvInput = 'input/'
cvSam = '/opt/apps/bioinformatics/samtools/1.3.1/bin/samtools'
cvPicard = '/opt/apps/bioinformatics/picard-tools/2.2.1/picard.jar'
# create a parameter file and shell script
dir.create('AutoScripts')
oFile.param = file('AutoScripts/samtools_param.txt', 'wt')
temp = lapply(cvQueries, function(x){
# get the file names
dfFiles = dbGetQuery(db, x)
# check for null return
if (nrow(dfFiles) == 0) return();
# remove white space from title
dfFiles$title = gsub(" ", "", dfFiles$title, fixed=T)
# split the file names into paired end 1 and 2, identified by R1 and R2 in the file name
f = dfFiles$name
d = grepl('_R1_', f)
d = as.character(d)
d[d == 'TRUE'] = 'R1'
d[d == 'FALSE'] = 'R2'
lf = split(f, d)
## no sam files made this time by bismark so skip this
## write samtools command variables
# in.s1 = paste0(cvInput, lf[[1]], '.sam')
# output file in bam format
## bismark creates its own name so add those changes
lf[[1]] = gsub('.fastq.gz', '_bismark_bt2_pe.bam', lf[[1]])
lf[[1]] = paste('trim_', lf[[1]], sep='')
s2 = paste0(cvInput, lf[[1]])
# remove low quality reads below 10
s3 = paste0(cvInput, lf[[1]], '_q10.bam')
# sort the file
s4 = paste0(cvInput, lf[[1]], '_q10_sort.bam')
# remove duplicates
s5 = paste0(cvInput, lf[[1]], '_q10_sort_rd.bam')
s6 = paste0(cvInput, lf[[1]], '_q10_sort_rd.report.txt')
s7 = paste0(cvInput, lf[[1]], '_q10_sort_rd_sort2.bam')
p1 = paste(s2, s3, s4, s5, s6, s7, sep=' ')
writeLines(p1, oFile.param)
return(data.frame(idSample=dfFiles$idSample[1], name=c(s2, s4, s5), type=c('original bam', 'quality 10 sorted bam',
'quality 10 sorted bam duplicates removed'),
group1=dfFiles$group1[1]))
})
close(oFile.param)
temp[sapply(temp, is.null)] = NULL
dfNewData = do.call(rbind, temp)
rownames(dfNewData) = NULL
# remove the word input/ from file name
dfNewData$name = gsub('input/(\\w+)', '\\1', dfNewData$name, perl=T)
oFile = file('AutoScripts/samtools.sh', 'wt')
writeLines(c('# Autogenerated script from write_samtools_script.R', paste('# date', date())), oFile)
writeLines(c('# make sure directory paths exist before running script'), oFile)
writeLines(c(cvShell, cvShell.2, cvProcessors, cvWorkingDir, cvJobName, cvStdout, cvMemoryReserve, cvArrayJob), oFile)
writeLines('\n\n', oFile)
# module load
writeLines(c('module load bioinformatics/samtools/1.3.1'), oFile)
writeLines(c('module load bioinformatics/picard-tools/2.2.1'), oFile)
writeLines('\n\n', oFile)
## write array job lines
writeLines("# Parse parameter file to get variables.
number=$SGE_TASK_ID
paramfile=samtools_param.txt
bamfile=`sed -n ${number}p $paramfile | awk '{print $1}'`
bamq10=`sed -n ${number}p $paramfile | awk '{print $2}'`
bamq10sort=`sed -n ${number}p $paramfile | awk '{print $3}'`
bamrd=`sed -n ${number}p $paramfile | awk '{print $4}'`
rdreport=`sed -n ${number}p $paramfile | awk '{print $5}'`
bamrdsort2=`sed -n ${number}p $paramfile | awk '{print $6}'`
# 9. Run the program. NOTE: using Picard tools for coordinate sorting for bismark compatibility", oFile)
# remove low quality reads
p1 = paste('samtools view -b -q 10', '$bamfile', '>', '$bamq10', sep=' ')
com2 = paste(p1)
# sort the file
p1 = paste('java -Xmx30G -jar', cvPicard, 'SortSam OUTPUT=$bamq10sort',
'INPUT=$bamq10 SORT_ORDER=coordinate VALIDATION_STRINGENCY=SILENT',
sep=' ')
com3 = paste(p1)
# remove duplicates, for paired end reads
p1 = paste('java -Xmx30G -jar', cvPicard, 'MarkDuplicates I=$bamq10sort', 'O=$bamrd', 'M=$rdreport',
'REMOVE_DUPLICATES=true VALIDATION_STRINGENCY=SILENT', sep=' ')
com4 = paste(p1)
# sort the file second time for using with bismark methylation extractor
p1 = paste('java -Xmx30G -jar', cvPicard, 'SortSam OUTPUT=$bamrdsort2',
'INPUT=$bamrd SORT_ORDER=queryname VALIDATION_STRINGENCY=SILENT',
sep=' ')
com5 = paste(p1)
# create index
## this step is done only on the coordinate sorted bam files
p1 = paste('samtools index', '$bamrd', sep=' ')
com6 = paste(p1)
writeLines(c(com2, com3, com4, com5, com6), oFile)
writeLines('\n\n', oFile)
close(oFile)
dbDisconnect(db)
### update database with the file names
# dfNewData$group1 = 'Generated from Trimmomatic standard input for bisulphite seq data S126'
# dbWriteTable(db, name='File', value = dfNewData, append=T, row.names=F)
# dbDisconnect(db) |
twin.cells<- function(training_data){
train.tw<-data.frame(not.mut=c(), same=c(), one.mut=c(), both.mut=c(), rf=c())
for(ii in 1:nrow(training_data)){
xx = as.character(training_data[ii,]$ground)
x1<-strsplit(xx,"")[[1]]
x2<-match(x1,c("_"))
x2[is.na(x2)]<-0
for(i in 1:(length(x2)-18)){
if(x2[i]==1 & x2[i+15]==1) {
t1<-x1[(i+1):(i+10)]
t2<-x1[(i+16):(i+26)]
cnt<-c(0,0,0,0)
for(j in 1:10){
if(t1[j]==0 & t2[j]==0) cnt[1]<-cnt[1]+1
if(t1[j]==1 & t2[j]==1) cnt[2]<-cnt[2]+1
if(t1[j]==2 & t2[j]==2) cnt[2]<-cnt[2]+1
if(t1[j]==0 & t2[j]==1) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==0 & t2[j]==2) cnt[3]<-cnt[3]+1
if(t1[j]==2 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==2) cnt[4]<-cnt[4]+1
if(t1[j]==2 & t2[j]==1) cnt[4]<-cnt[4]+1
}
train.tw<-rbind(train.tw, data.frame(not.mut=cnt[1], same=cnt[2], one.mut=cnt[3], both.mut=cnt[4], rf=1))
}
if(x2[i]==1 & x2[i+16]==1) {
t1<-x1[(i+1):(i+10)]
t2<-x1[(i+17):(i+27)]
cnt<-c(0,0,0,0)
for(j in 1:10){
if(t1[j]==0 & t2[j]==0) cnt[1]<-cnt[1]+1
if(t1[j]==1 & t2[j]==1) cnt[2]<-cnt[2]+1
if(t1[j]==2 & t2[j]==2) cnt[2]<-cnt[2]+1
if(t1[j]==0 & t2[j]==1) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==0 & t2[j]==2) cnt[3]<-cnt[3]+1
if(t1[j]==2 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==2) cnt[4]<-cnt[4]+1
if(t1[j]==2 & t2[j]==1) cnt[4]<-cnt[4]+1
}
train.tw<-rbind(train.tw, data.frame(not.mut=cnt[1], same=cnt[2], one.mut=cnt[3], both.mut=cnt[4], rf=1))
}
if(x2[i]==1 & x2[i+17]==1) {
t1<-x1[(i+1):(i+10)]
t2<-x1[(i+18):(i+28)]
cnt<-c(0,0,0,0)
for(j in 1:10){
if(t1[j]==0 & t2[j]==0) cnt[1]<-cnt[1]+1
if(t1[j]==1 & t2[j]==1) cnt[2]<-cnt[2]+1
if(t1[j]==2 & t2[j]==2) cnt[2]<-cnt[2]+1
if(t1[j]==0 & t2[j]==1) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==0 & t2[j]==2) cnt[3]<-cnt[3]+1
if(t1[j]==2 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==2) cnt[4]<-cnt[4]+1
if(t1[j]==2 & t2[j]==1) cnt[4]<-cnt[4]+1
}
train.tw<-rbind(train.tw, data.frame(not.mut=cnt[1], same=cnt[2], one.mut=cnt[3], both.mut=cnt[4], rf=1))
}
}
}
return(train.tw)
}
not.twin.cells<-function(train.tw, train){
uniq.train.tw<-unique(train.tw)
train.ntw<-data.frame(not.mut=c(), same=c(), one.mut=c(), both.mut=c(), rf=c())
for(ii in 1:length(train)){
xx<-train[[ii]]
for(j1 in 1:nrow(xx)){
if(j1<nrow(xx)){
for(j2 in (j1+1):nrow(xx)){
cnt<-c(0,0,0,0)
t1<-strsplit(xx[j1,2],"")[[1]]
t2<-strsplit(xx[j2,2],"")[[1]]
for(j in 1:10){
if(t1[j]==0 & t2[j]==0) cnt[1]<-cnt[1]+1
if(t1[j]==1 & t2[j]==1) cnt[2]<-cnt[2]+1
if(t1[j]==2 & t2[j]==2) cnt[2]<-cnt[2]+1
if(t1[j]==0 & t2[j]==1) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==0 & t2[j]==2) cnt[3]<-cnt[3]+1
if(t1[j]==2 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==2) cnt[4]<-cnt[4]+1
if(t1[j]==2 & t2[j]==1) cnt[4]<-cnt[4]+1
}
ac.tw<-T
for(j in 1:nrow(uniq.train.tw)){
if(uniq.train.tw[j,1]==cnt[1] & uniq.train.tw[j,2]==cnt[2] & uniq.train.tw[j,3]==cnt[3] & uniq.train.tw[j,4]==cnt[4]) ac.tw<-F
}
if(ac.tw) {
train.ntw<-rbind(train.ntw, data.frame(not.mut=cnt[1], same=cnt[2], one.mut=cnt[3], both.mut=cnt[4], rf=0))
}
}
}
}
}
return(train.ntw)
}
| /train.R | no_license | rretkute/ml_cell_lineage_reconstruction | R | false | false | 3,877 | r |
twin.cells<- function(training_data){
train.tw<-data.frame(not.mut=c(), same=c(), one.mut=c(), both.mut=c(), rf=c())
for(ii in 1:nrow(training_data)){
xx = as.character(training_data[ii,]$ground)
x1<-strsplit(xx,"")[[1]]
x2<-match(x1,c("_"))
x2[is.na(x2)]<-0
for(i in 1:(length(x2)-18)){
if(x2[i]==1 & x2[i+15]==1) {
t1<-x1[(i+1):(i+10)]
t2<-x1[(i+16):(i+26)]
cnt<-c(0,0,0,0)
for(j in 1:10){
if(t1[j]==0 & t2[j]==0) cnt[1]<-cnt[1]+1
if(t1[j]==1 & t2[j]==1) cnt[2]<-cnt[2]+1
if(t1[j]==2 & t2[j]==2) cnt[2]<-cnt[2]+1
if(t1[j]==0 & t2[j]==1) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==0 & t2[j]==2) cnt[3]<-cnt[3]+1
if(t1[j]==2 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==2) cnt[4]<-cnt[4]+1
if(t1[j]==2 & t2[j]==1) cnt[4]<-cnt[4]+1
}
train.tw<-rbind(train.tw, data.frame(not.mut=cnt[1], same=cnt[2], one.mut=cnt[3], both.mut=cnt[4], rf=1))
}
if(x2[i]==1 & x2[i+16]==1) {
t1<-x1[(i+1):(i+10)]
t2<-x1[(i+17):(i+27)]
cnt<-c(0,0,0,0)
for(j in 1:10){
if(t1[j]==0 & t2[j]==0) cnt[1]<-cnt[1]+1
if(t1[j]==1 & t2[j]==1) cnt[2]<-cnt[2]+1
if(t1[j]==2 & t2[j]==2) cnt[2]<-cnt[2]+1
if(t1[j]==0 & t2[j]==1) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==0 & t2[j]==2) cnt[3]<-cnt[3]+1
if(t1[j]==2 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==2) cnt[4]<-cnt[4]+1
if(t1[j]==2 & t2[j]==1) cnt[4]<-cnt[4]+1
}
train.tw<-rbind(train.tw, data.frame(not.mut=cnt[1], same=cnt[2], one.mut=cnt[3], both.mut=cnt[4], rf=1))
}
if(x2[i]==1 & x2[i+17]==1) {
t1<-x1[(i+1):(i+10)]
t2<-x1[(i+18):(i+28)]
cnt<-c(0,0,0,0)
for(j in 1:10){
if(t1[j]==0 & t2[j]==0) cnt[1]<-cnt[1]+1
if(t1[j]==1 & t2[j]==1) cnt[2]<-cnt[2]+1
if(t1[j]==2 & t2[j]==2) cnt[2]<-cnt[2]+1
if(t1[j]==0 & t2[j]==1) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==0 & t2[j]==2) cnt[3]<-cnt[3]+1
if(t1[j]==2 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==2) cnt[4]<-cnt[4]+1
if(t1[j]==2 & t2[j]==1) cnt[4]<-cnt[4]+1
}
train.tw<-rbind(train.tw, data.frame(not.mut=cnt[1], same=cnt[2], one.mut=cnt[3], both.mut=cnt[4], rf=1))
}
}
}
return(train.tw)
}
not.twin.cells<-function(train.tw, train){
uniq.train.tw<-unique(train.tw)
train.ntw<-data.frame(not.mut=c(), same=c(), one.mut=c(), both.mut=c(), rf=c())
for(ii in 1:length(train)){
xx<-train[[ii]]
for(j1 in 1:nrow(xx)){
if(j1<nrow(xx)){
for(j2 in (j1+1):nrow(xx)){
cnt<-c(0,0,0,0)
t1<-strsplit(xx[j1,2],"")[[1]]
t2<-strsplit(xx[j2,2],"")[[1]]
for(j in 1:10){
if(t1[j]==0 & t2[j]==0) cnt[1]<-cnt[1]+1
if(t1[j]==1 & t2[j]==1) cnt[2]<-cnt[2]+1
if(t1[j]==2 & t2[j]==2) cnt[2]<-cnt[2]+1
if(t1[j]==0 & t2[j]==1) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==0 & t2[j]==2) cnt[3]<-cnt[3]+1
if(t1[j]==2 & t2[j]==0) cnt[3]<-cnt[3]+1
if(t1[j]==1 & t2[j]==2) cnt[4]<-cnt[4]+1
if(t1[j]==2 & t2[j]==1) cnt[4]<-cnt[4]+1
}
ac.tw<-T
for(j in 1:nrow(uniq.train.tw)){
if(uniq.train.tw[j,1]==cnt[1] & uniq.train.tw[j,2]==cnt[2] & uniq.train.tw[j,3]==cnt[3] & uniq.train.tw[j,4]==cnt[4]) ac.tw<-F
}
if(ac.tw) {
train.ntw<-rbind(train.ntw, data.frame(not.mut=cnt[1], same=cnt[2], one.mut=cnt[3], both.mut=cnt[4], rf=0))
}
}
}
}
}
return(train.ntw)
}
|
# install.packages("zoo")
# install.packages("ggplot2")
# library(ggplot2)
# library(zoo)
## Set working directory
# setwd("C:/Users/cj5/Desktop/Waste_TS/R")
setwd("C:/RWEM_processed_calvin/R")
#### STEP 1: IMPORT DATA - Set Zeros as Null Values
mydata <- read.csv("Quarterly_Data_v2.csv", na.string = 0, stringsAsFactors = FALSE)
# Set Index Class for Quarterly Data
library(zoo)
mydata$YYYY.QQ <- as.yearqtr(mydata$YYYY.QQ)
# Add date field for time-series
mydata$ts_date <- as.POSIXct(mydata$YYYY.QQ, tz = "Etc/GMT-1")
#### STEP 2: Produce time-series and regression plots
require(gridExtra)
require(ggplot2)
# Return the names of df columns (Remove first and last fields)
variables <- colnames(mydata)
variables <- variables[2:(length(variables)-1)]
# Time-Series Plots
p1_name <- paste(variables[1]) ### Site stopped
p2_name <- paste(variables[2]) ### Site identified
p3_name <- paste(variables[3]) ### Illegal exported waste
### Site stopped
p1 <- ggplot(mydata, aes_string("ts_date", p1_name)) +
geom_line(colour="black", size=1.5) + # Black lines
geom_point(size=3.5, colour="red") + # Red dots
theme_bw() + # Change background theme to white with grey grids
xlab("") + ylab(p1_name) +
ggtitle(p1_name)
### Site identified
p2 <- ggplot(mydata, aes_string("ts_date", p2_name)) +
geom_line(colour="steelblue", size=1.5) + # Blue lines
geom_point(size=3.5, colour="black") + # Black dots
theme_bw() + # Change background theme to white with grey grids
xlab("") + ylab(p2_name) +
ggtitle(p2_name)
### Illegal exported waste
p3 <- ggplot(mydata, aes_string("ts_date", p3_name)) +
geom_line(colour="red", size=1.5) + # Blue lines
geom_point(size=3.5, colour="black") + # Black dots
theme_bw() + # Change background theme to white with grey grids
xlab("") + ylab(p3_name) +
ggtitle(p3_name)
Site_Stopped <- p1
Site_Identified <- p2
Ill_Exp_Waste <- p3
## Linear regression plot
eq <- paste(paste("mydata$",p1_name,sep=""),"~", paste("mydata$",p2_name,sep=""))
reg <- lm(eq) # R basic regression to obtain performance information
fit_lm <- ggplot(mydata, aes_string(p1_name, p2_name)) +
geom_point(shape=1, size=5) + # Use hollow circles
theme_bw() + # Change background theme to white with grey grid
geom_smooth(method=lm, colour="red", size=1) + # Add linear regression line (includes 95% confidence)
xlab(p1_name) + ylab(p2_name) +
ggtitle(expression(bold("Waste Measurements"))) +
# Add regression performance information ot title
labs(title = paste("Adj R2 = ",signif(summary(reg)$adj.r.squared, 5),
" Intercept =",signif(reg$coef[[1]],5 ),
" Slope =",signif(reg$coef[[2]], 5),
" P =",signif(summary(reg)$coef[2,4], 5)))
## Export Plots
pdf(paste(p2_name,".pdf",sep=""), width = 23.39, height = 16.53) # Output set to A2 sheet dimensions
print(grid.arrange(arrangeGrob(p1, p2), fit_lm, ncol=2)) # require(gridExtra)
dev.off()
#### new Stuff from Federico ######
index <- c(1:22)
# LIST_ALL <- list(variables)
for (i in 1:22) {
p_name <- paste(variables[index[i]]) ### Site stopped
mypath <- file.path("C:","RWEM_processed_calvin","R","plots",
paste(p_name, index[i], ".jpg", sep = ""))
p <- ggplot(mydata, aes_string("ts_date", p_name)) +
geom_line(colour="black", size=1.5) + # Black lines
geom_point(size=3.5, colour="red") + # Red dots
theme_bw() + # Change background theme to white with grey grids
xlab("") + ylab(p_name) +
ggtitle(p_name)
ggsave(mypath, p)
}
| /Quarterly_Script.r | no_license | karaframe/waste | R | false | false | 3,869 | r | # install.packages("zoo")
# install.packages("ggplot2")
# library(ggplot2)
# library(zoo)
## Set working directory
# setwd("C:/Users/cj5/Desktop/Waste_TS/R")
setwd("C:/RWEM_processed_calvin/R")
#### STEP 1: IMPORT DATA - Set Zeros as Null Values
mydata <- read.csv("Quarterly_Data_v2.csv", na.string = 0, stringsAsFactors = FALSE)
# Set Index Class for Quarterly Data
library(zoo)
mydata$YYYY.QQ <- as.yearqtr(mydata$YYYY.QQ)
# Add date field for time-series
mydata$ts_date <- as.POSIXct(mydata$YYYY.QQ, tz = "Etc/GMT-1")
#### STEP 2: Produce time-series and regression plots
require(gridExtra)
require(ggplot2)
# Return the names of df columns (Remove first and last fields)
variables <- colnames(mydata)
variables <- variables[2:(length(variables)-1)]
# Time-Series Plots
p1_name <- paste(variables[1]) ### Site stopped
p2_name <- paste(variables[2]) ### Site identified
p3_name <- paste(variables[3]) ### Illegal exported waste
### Site stopped
p1 <- ggplot(mydata, aes_string("ts_date", p1_name)) +
geom_line(colour="black", size=1.5) + # Black lines
geom_point(size=3.5, colour="red") + # Red dots
theme_bw() + # Change background theme to white with grey grids
xlab("") + ylab(p1_name) +
ggtitle(p1_name)
### Site identified
p2 <- ggplot(mydata, aes_string("ts_date", p2_name)) +
geom_line(colour="steelblue", size=1.5) + # Blue lines
geom_point(size=3.5, colour="black") + # Black dots
theme_bw() + # Change background theme to white with grey grids
xlab("") + ylab(p2_name) +
ggtitle(p2_name)
### Illegal exported waste
p3 <- ggplot(mydata, aes_string("ts_date", p3_name)) +
geom_line(colour="red", size=1.5) + # Blue lines
geom_point(size=3.5, colour="black") + # Black dots
theme_bw() + # Change background theme to white with grey grids
xlab("") + ylab(p3_name) +
ggtitle(p3_name)
Site_Stopped <- p1
Site_Identified <- p2
Ill_Exp_Waste <- p3
## Linear regression plot
eq <- paste(paste("mydata$",p1_name,sep=""),"~", paste("mydata$",p2_name,sep=""))
reg <- lm(eq) # R basic regression to obtain performance information
fit_lm <- ggplot(mydata, aes_string(p1_name, p2_name)) +
geom_point(shape=1, size=5) + # Use hollow circles
theme_bw() + # Change background theme to white with grey grid
geom_smooth(method=lm, colour="red", size=1) + # Add linear regression line (includes 95% confidence)
xlab(p1_name) + ylab(p2_name) +
ggtitle(expression(bold("Waste Measurements"))) +
# Add regression performance information ot title
labs(title = paste("Adj R2 = ",signif(summary(reg)$adj.r.squared, 5),
" Intercept =",signif(reg$coef[[1]],5 ),
" Slope =",signif(reg$coef[[2]], 5),
" P =",signif(summary(reg)$coef[2,4], 5)))
## Export Plots
pdf(paste(p2_name,".pdf",sep=""), width = 23.39, height = 16.53) # Output set to A2 sheet dimensions
print(grid.arrange(arrangeGrob(p1, p2), fit_lm, ncol=2)) # require(gridExtra)
dev.off()
#### new Stuff from Federico ######
index <- c(1:22)
# LIST_ALL <- list(variables)
for (i in 1:22) {
p_name <- paste(variables[index[i]]) ### Site stopped
mypath <- file.path("C:","RWEM_processed_calvin","R","plots",
paste(p_name, index[i], ".jpg", sep = ""))
p <- ggplot(mydata, aes_string("ts_date", p_name)) +
geom_line(colour="black", size=1.5) + # Black lines
geom_point(size=3.5, colour="red") + # Red dots
theme_bw() + # Change background theme to white with grey grids
xlab("") + ylab(p_name) +
ggtitle(p_name)
ggsave(mypath, p)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importBreakpointBed.R
\name{importBreakpointBed}
\alias{importBreakpointBed}
\title{Import a breakpoint BED file.}
\arguments{
\item{breakpoint_fn}{the filename of the breakpoint bed file}
}
\value{
a Genomic Interactions Object
}
\description{
Imports a BED file with breakpoints or other interactions, in a dual position format.
}
\examples{
importBreakpointBed(breakpoint_fn = system.file("extdata",
"sample_breakpoints.bed",package = "CNVScope"))
closeAllConnections()
}
\keyword{bed}
| /man/importBreakpointBed.Rd | no_license | masoodzaka/CNVScope | R | false | true | 567 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importBreakpointBed.R
\name{importBreakpointBed}
\alias{importBreakpointBed}
\title{Import a breakpoint BED file.}
\arguments{
\item{breakpoint_fn}{the filename of the breakpoint bed file}
}
\value{
a Genomic Interactions Object
}
\description{
Imports a BED file with breakpoints or other interactions, in a dual position format.
}
\examples{
importBreakpointBed(breakpoint_fn = system.file("extdata",
"sample_breakpoints.bed",package = "CNVScope"))
closeAllConnections()
}
\keyword{bed}
|
# Ejemplo Inicial ---------------------------------------------------------
#Primero simulemos
#Vector de probabilidades para las X
f<-c(0.9,0.1*0.8,0.1*0.2)
regresa.una.S<-function(){
#Genera una N
N<-sample(x = c(0,3), #De este vector
size = 1, #Toma una muestra de tamaño 1
replace = T,#Con reemplazo (En este caso da igual)
prob = c(0.75,0.25))#Con las probabilidades correspondientes.
#Verifica si hubo reclamaciones.
if(N>0){
#Genera las que hubo.
Xi <- sample(x = 0:2, #De este vector
size = N, #Toma una muestra de tamaño N
replace = T,#Con reemplazo (Puede volver reclamar lo mismo)
prob = f)#Con las probabilidades correspondientes.
}else{
Xi <- 0 #Si no hubo, el total es cero.
}
#Regresa una S
return(sum(Xi))
}
#
n = 1000000
set.seed(9)
S = replicate(n = n, #Número de veces
expr = regresa.una.S()) #Expresión
##Probabilidades reales
PrilII <- function(x,n,f,todo=F){
#n := número de pólizas
#f := vector de probabilidades de X (ordenadas desde 0)
#Creamos un vector auxiliar para las probas de S.
g<-0:x
names(g)<-0:x
#Le ponemos nombres al vector de probas de f.
names(f)<-0:(length(f)-1)
#Fórmula De Pril II
for(s in 0:x){
if(s==0){
g["0"]=f["0"]^n
}else{aux = 0
for(j in 1:(min(s,length(f)-1))){
aux = aux + ((j*(n+1))/s - 1)*f[as.character(j)]*g[as.character(s-j)]
}
g[as.character(s)]=aux/f["0"]
}
}
if(todo){
return(g)
}else{
return(g[as.character(x)])
}
}
#Número de pólizas
n<-3
#Vector de probabilidades
f<-c(0.9,0.1*0.8,0.1*0.2)
#Probabilidades
Psd3<-PrilII(x = 6,n = n,f = f,todo = T) ; Psd3
sum(Psd3)
#función de densidad de la suma aleatoria S
fS <- function(s){
if(s==0){
return(0.75+Psd3[as.character(s)]*0.25)
}else if(s %in% 1:6){
return(0.25*Psd3[as.character(s)])
}else{
return(0)
}
}
#Probabilidades
pS <- sapply(0:6, fS) ; pS
#Proporciones simuladas
table(S)/length(S)
#¿Suma uno teórico?
sum(pS)
##Esperanza de la suma aleatoria S
#Teórica
0.25*sum(1:6*Psd3[as.character(1:6)])
mu <- sum(0:6*pS); mu
#Muestral
mean(S)
##Segundo momento
#Teórico
0.25*sum((1:6)^2*Psd3[as.character(1:6)])
mu2 <- sum((0:6)^2*pS) ; mu2
#Muestral
mean(S^2)
##Varianza
#Teórica
varianza <- mu2-mu^2 ; varianza
#Muestral
var(S)
##Desviación
#Teórica
sqrt(varianza)
#Muestral
sd(S)
#Con esperanza iterada:
EspX <- sum(0:2*f)
EspN <- (3*0.25)
mu ; EspX * EspN
# Modelo Colectivo --------------------------------------------------------
#Los siguientes ejemplos serán considerando Yj~Exp(100)
rate<-100
# Modelo Binomial Compuesto -----------------------------------------------
#Debemos generar variables aleatorias provenientes de S
n <- 10000 #Número de simulaciones de S
p <- 0.8 #parámetro de la binomial (p).
size <- 50 #parámetro de la binomial (n).
regresa.una.S<-function(){
#Genera una N
N<-rbinom(n = 1,size = size,prob = p)
#Verifica si hubo reclamaciones.
if(N>0){
Yj <- rexp(n = N,rate = rate) #Genera las que hubo.
}else{
Yj <- 0 #Si no hubo, el total es cero.
}
#Regresa una S
return(sum(Yj))
}
#
set.seed(27)
S = replicate(n = n, #Número de veces
expr = regresa.una.S()) #Expresión
#Momentos (Muestral Vs. Teórico)
##Esperanza
mean(S) ; size*p/rate
##Segundo momento
mean(S^2) ; size*p*(2/rate^2)+size*(size-1)*p^2/rate^2
##Varianza
var(S) ; size*p*(2/rate^2 - p/rate^2)
# Modelo Binomial Negativo ------------------------------------------------
library(actuar)
?rcompound
#Parámetros de la Binomial Negativa
k <- 10 ; p <- 0.8
S <- rcompound(n = n, #Genera n
model.freq = rnbinom(size = k,prob = p), #N~BinNeg(k,p)
model.sev = rexp(rate = rate)) #Y~Exp(rate)
#Momentos (Muestral Vs. Teórico)
##Esperanza
mean(S) ; k*(1/p-1)/rate
##Segundo momento
mean(S^2) ; k*(1/p-1)*(1/p)/rate^2+k*(1/p-1)*(2/rate^2-1/rate^2)+(k*(1/p-1)/rate)^2
##Varianza
var(S) ; k*(1/p-1)*(1/p)/rate^2+k*(1/p-1)*(2/rate^2-1/rate^2)
# Modelo Poisson Compuesto ------------------------------------------------
#Parámetro de la Poisson
lambda <- 10
rate=20
S <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda), # N~Poi(lambda)
model.sev = rexp(rate = rate)) # Y~Exp(rate)
#Momentos (Muestral Vs. Teórico)
##Esperanza
mean(S) ; lambda/rate
##Segundo momento
mean(S^2) ; lambda*(2/rate^2) + lambda^2*(1/rate^2)
##Varianza
var(S) ; lambda * 2/rate^2
# Distribución de la convolución de Poisson Compuesta ---------------------
n <- 1000000 ; library(actuar)
set.seed(9)
# Y's continuas -----------------------------------------------------------
#Parámetro de la Poisson
lambda1 <- 7 ; lambda2 <- 4 ; lambda3 <- 21
lambda <- lambda1 + lambda2 + lambda3
#Exponencial
rate <- 5
S1 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda1), #N~Poi(lambda1)
model.sev = rexp(rate = rate)) #Y~Exp(rate)
#Ji cuadrada
dfredom <- 20
S2 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda2), #N~Poi(lambda2)
model.sev = rchisq(df=dfredom)) #Y~JiCuadrada(dfredom)
#Pareto
shape <- 6 ; min <- 7
S3 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda3), #N~Poi(lambda3)
model.sev = rpareto1(shape = 6,min = 7)) #Y~pareto(shape,scale)
S <- S1 + S2 + S3
##Esperanza
#Muestral
mean(S)
#Teórica
mu1<-lambda*(lambda1*(1/rate) + #Esperanza de la Exponencial
lambda2*(dfredom) + #Esperanza de la Ji cuadrada
lambda3*(shape*min/(shape-1)) #Esperanza de la Pareto
)/lambda ; mu1
##Segundo momento
#Suma1 = lambda*E[Y^2]
Suma1<-lambda*(
#2do momento de la Exponencial
lambda1*(2/rate^2) +
#2do momento de la Ji cuadrada
lambda2*(2*dfredom+dfredom^2) +
#2do momento de la Pareto
lambda3*((shape*min^2)/((shape-1)^2*(shape-2))+(shape*min/(shape-1))^2)
)/lambda
#Suma2 = lambda^2*(E[Y])^2
Suma2<- lambda^2*((lambda1*(1/rate) + #Esperanza de la Exponencial
lambda2*(dfredom) + #Esperanza de la Ji cuadrada
lambda3*(shape*min/(shape-1)) #Esperanza de la Pareto
)/lambda)^2
#Teórica:
mu2 <- Suma1 + Suma2 ; mu2
#Muestral
mean(S^2)
##Varianza
#Muestral
var(S)
#Teórica
mu2-mu1^2
Suma1
# Y's discretas -----------------------------------------------------------
set.seed(2)
#Parámetro de la Poisson
lambda1 <- 7 ; lambda2 <- 4 ; lambda3 <- 21
lambda <- lambda1 + lambda2 + lambda3
#Binomial
size = 10 ; prob = 0.3
S1 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda1), #N~Poi(lambda1)
model.sev = rbinom(size = size,prob = prob)) #Y~Bin(size,prob)
#Poisson
bawr = 7
S2 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda2), #N~Poi(lambda2)
model.sev = rpois(lambda = bawr)) #Y~Poi(bawr)
#Binomial Negativa
k = 5 ; p = 0.9
S3 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda3), #N~Poi(lambda3)
model.sev = rnbinom(size = k,prob = p)) #Y~BinNeg(k,p)
S <- S1 + S2 + S3
##Esperanza
#Muestral
mean(S)
#Teórica
mu1<-lambda*(lambda1*(size*prob) + #Esperanza de la Binomial
lambda2*(bawr) + #Esperanza de la Poisson
lambda3*(k*(1-p)/p) #Esperanza de la Binomial Negativa
)/lambda ; mu1
##Segundo momento
#Suma1 = lambda*E[Y^2]
Suma1<-lambda*(
#2do momento de la Binomial
lambda1*(size*prob*(1-prob)+(size*prob)^2) +
#2do momento de la Poisson
lambda2*(bawr+bawr^2) +
#2do momento de la Binomial Negativa
lambda3*(k*(1-p)/p^2+(k*(1-p)/p)^2)
)/lambda
#Suma2 = lambda^2*(E[Y])^2
Suma2<- lambda^2*((lambda1*(size*prob) + #Esperanza de la Binomial
lambda2*(bawr) + #Esperanza de la Poisson
lambda3*(k*(1-p)/p) #Esperanza de la Binomial Negativa
)/lambda)^2
#Teórica:
mu2 <- Suma1 + Suma2 ; mu2
#Muestral
mean(S^2)
##Varianza
#Muestral
var(S)
#Teórica
mu2-mu1^2
Suma1
#Curiosidad
barplot(table(S))
# Y's Continuas & Discretas -----------------------------------------------
n <- 1234567
set.seed(9)
#Parámetro de la Poisson
lambda1 <- 7 ; lambda2 <- 4 ; lambda3 <- 21
lambda4 <- 10 ; lambda5 <- 9 ; lambda6 <- 6
lambda <- lambda1 + lambda2 + lambda3 + lambda4 + lambda5 + lambda6
##Continuas
#Exponencial
rate <- 5
S1 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda1), #N~Poi(lambda1)
model.sev = rexp(rate = rate)) #Y~Exp(rate)
#Ji cuadrada
dfredom <- 20
S2 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda2), #N~Poi(lambda2)
model.sev = rchisq(df=dfredom)) #Y~JiCuadrada(dfredom)
#Pareto
shape <- 6 ; min <- 7
S3 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda3), #N~Poi(lambda3)
model.sev = rpareto1(shape = 6,min = 7)) #Y~pareto(shape,scale)
##Discretas
#Binomial
size = 10 ; prob = 0.3
S4 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda4), #N~Poi(lambda4)
model.sev = rbinom(size = size,prob = prob)) #Y~Bin(size,prob)
#Poisson
bawr = 7
S5 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda5), #N~Poi(lambda5)
model.sev = rpois(lambda = bawr)) #Y~Poi(bawr)
#Binomial Negativa
k = 5 ; p = 0.9
S6 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda6), #N~Poi(lambda6)
model.sev = rnbinom(size = k,prob = p)) #Y~BinNeg(k,p)
##Comenzamos:
S <- S1 + S2 + S3 + S4 + S5 + S6
##Esperanza
#Teórica
mu1<-lambda*(lambda1*(1/rate) + #Esperanza de la Exponencial
lambda2*(dfredom) + #Esperanza de la Ji cuadrada
lambda3*(shape*min/(shape-1)) + #Esperanza de la Pareto
lambda4*(size*prob) + #Esperanza de la Binomial
lambda5*(bawr) + #Esperanza de la Poisson
lambda6*(k*(1-p)/p) #Esperanza de la Binomial Negativa
)/lambda ; mu1
#Muestral
mean(S)
##Segundo momento
#Suma1 = lambda*E[Y^2]
Suma1<-lambda*(
#2do momento de la Exponencial
lambda1*(2/rate^2) +
#2do momento de la Ji cuadrada
lambda2*(2*dfredom+dfredom^2) +
#2do momento de la Pareto
lambda3*((shape*min^2)/((shape-1)^2*(shape-2))+(shape*min/(shape-1))^2) +
#2do momento de la Binomial
lambda4*(size*prob*(1-prob)+(size*prob)^2) +
#2do momento de la Poisson
lambda5*(bawr+bawr^2) +
#2do momento de la Binomial Negativa
lambda6*(k*(1-p)/p^2+(k*(1-p)/p)^2)
)/lambda
#Suma2 = lambda^2*(E[Y])^2
Suma2<- lambda^2*(( lambda1*(1/rate) + #Esperanza de la Exponencial
lambda2*(dfredom) + #Esperanza de la Ji cuadrada
lambda3*(shape*min/(shape-1)) + #Esperanza de la Pareto
lambda4*(size*prob) + #Esperanza de la Binomial
lambda5*(bawr) + #Esperanza de la Poisson
lambda6*(k*(1-p)/p) #Esperanza de la Binomial Negativa
)/lambda)^2
#Teórica:
mu2 <- Suma1 + Suma2 ; mu2
#Muestral
mean(S^2)
##Varianza
#Muestral
var(S)
#Teórica
mu2-mu1^2
Suma1
##Desviación
#Muestral
sd(S)
#Teórica
sqrt(mu2-mu1^2)
#Curiosidad
hist(S,col="red",probability = T)
abline(v=mu1,col="blue",lwd=2)
#¡¿Es normal!?
goftest::ad.test(S,pnorm,mean=mu1,sd=sqrt(mu2-mu1^2))
#Uffff... no, eso sería MUY RARO... ¿Será algo..?
# Discretas y continuas sobre los reales ----------------------------------
n <- 1234567
set.seed(21)
#Parámetro de la Poisson
lambda1 <- 7 ; lambda2 <- 4 ; lambda3 <- 21
lambda4 <- 10 ; lambda5 <- 9 ; lambda6 <- 6
lambda7 <- 3.2
lambda <- lambda1 + lambda2 + lambda3 + lambda4 + lambda5 + lambda6 + lambda7
##Continuas
#Exponencial
rate <- 5
S1 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda1), #N~Poi(lambda1)
model.sev = rexp(rate = rate)) #Y~Exp(rate)
#Ji cuadrada
dfredom <- 20
S2 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda2), #N~Poi(lambda2)
model.sev = rchisq(df=dfredom)) #Y~JiCuadrada(dfredom)
#Pareto
shape <- 6 ; min <- 7
S3 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda3), #N~Poi(lambda3)
model.sev = rpareto1(shape = 6,min = 7)) #Y~pareto(shape,scale)
##Discretas
#Binomial
size = 10 ; prob = 0.3
S4 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda4), #N~Poi(lambda4)
model.sev = rbinom(size = size,prob = prob)) #Y~Bin(size,prob)
#Poisson
bawr = 7
S5 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda5), #N~Poi(lambda5)
model.sev = rpois(lambda = bawr)) #Y~Poi(bawr)
#Binomial Negativa
k = 5 ; p = 0.9
S6 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda6), #N~Poi(lambda6)
model.sev = rnbinom(size = k,prob = p)) #Y~BinNeg(k,p)
##Continuas sobre los reales
#Normal
media = -40 ; desv = 2
S7 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda7), #N~Poi(lambda6)
model.sev = rnorm(mean = media,sd = desv)) #Y~N(media,desv)
##Comenzamos:
S <- S1 + S2 + S3 + S4 + S5 + S6 + S7
##Esperanza
#Teórica
mu1<-lambda*(lambda1*(1/rate) + #Esperanza de la Exponencial
lambda2*(dfredom) + #Esperanza de la Ji cuadrada
lambda3*(shape*min/(shape-1)) + #Esperanza de la Pareto
lambda4*(size*prob) + #Esperanza de la Binomial
lambda5*(bawr) + #Esperanza de la Poisson
lambda6*(k*(1-p)/p) + #Esperanza de la Binomial Negativa
lambda7*media #Esperanza de la Normal
)/lambda ; mu1
#Muestral
mean(S)
##Segundo momento
#Suma1 = lambda*E[Y^2]
Suma1<-lambda*(
#2do momento de la Exponencial
lambda1*(2/rate^2) +
#2do momento de la Ji cuadrada
lambda2*(2*dfredom+dfredom^2) +
#2do momento de la Pareto
lambda3*((shape*min^2)/((shape-1)^2*(shape-2))+(shape*min/(shape-1))^2) +
#2do momento de la Binomial
lambda4*(size*prob*(1-prob)+(size*prob)^2) +
#2do momento de la Poisson
lambda5*(bawr+bawr^2) +
#2do momento de la Binomial Negativa
lambda6*(k*(1-p)/p^2+(k*(1-p)/p)^2) +
#2do momento de la Normal
lambda7*(desv^2+media^2)
)/lambda
#Suma2 = lambda^2*(E[Y])^2
Suma2<- lambda^2*(( lambda1*(1/rate) + #Esperanza de la Exponencial
lambda2*(dfredom) + #Esperanza de la Ji cuadrada
lambda3*(shape*min/(shape-1)) + #Esperanza de la Pareto
lambda4*(size*prob) + #Esperanza de la Binomial
lambda5*(bawr) + #Esperanza de la Poisson
lambda6*(k*(1-p)/p) + #Esperanza de la Binomial Negativa
lambda7*media #Esperanza de la Normal
)/lambda)^2
#Teórica:
mu2 <- Suma1 + Suma2 ; mu2
#Muestral
mean(S^2)
##Varianza
#Muestral
var(S)
#Teórica
mu2-mu1^2
Suma1
##Desviación
#Muestral
sd(S)
#Teórica
sqrt(mu2-mu1^2)
#Curiosidad
hist(S,col="red",probability = T)
abline(v=mu1,col="blue",lwd=2)
#¡¿Es normal!?
goftest::ad.test(S,pnorm,mean=mu1,sd=sqrt(mu2-mu1^2))
#Uffff... no, eso sería MUY RARO... ¿Será algo..?
# Modelo Colectivo con variables aleatorias de pérdida de una cia. --------
#Consideremos N~Binomial(n,p) & X~Exp(rate)
#Y La pérdida de una cia. con un contrato de seguros
#con inflación, deducible, monto máximo y deducible
#Debemos generar variables aleatorias provenientes de S
#Parámetros de la N
n <- 10000 #Número de simulaciones de S
p <- 0.8 #parámetro de la binomial (p).
size <- 50 #parámetro de la binomial (n).
#Parámetros de la X
rate<-1/100
#Parámetros de la Y
#Fijamos deducible y límite máximo
D<-25 ; U <- 175
#Tomemos un coeficiente de coaseguro
alpha<-0.25
#Fijemos una tasa de inflación
r<-0.15
regresa.una.S<-function(){
#Genera una N
N<-rbinom(n = 1,size = size,prob = p)
#Verifica si hubo reclamaciones.
if(N>0){
X <- rexp(n = N,rate = rate) #Genera las que hubo.
#Calculemos los pagos
Yj<-pmax(alpha*(pmin(X*(1+r),U)-D),0)
}else{
Yj <- 0 #Si no hubo, el total es cero.
}
#Regresa una S
return(sum(Yj))
}
#
set.seed(21)
S = replicate(n = n, #Número de veces
expr = regresa.una.S()) #Expresión
mean(S)
#Momentos (Muestral Vs. Teórico)
#Esperanza de Y
library(actuar)
fyL<-coverage(pdf = dexp,cdf = pexp,
limit=U,inflation=r,deductible=D,coinsurance=alpha,
per.loss=TRUE)
f<-function(x,lambda=1/100){fyL(x,lambda)}
#Esperanza teórica
yfYL<-function(y){
y*f(y)
}
#Integrando (Esperanza)
integral<-integrate(f = yfYL,lower = 0,upper = alpha*(U-D))
integral<-integral$value
#Parte continua + parte discreta
mu1y<-0*pexp(D/(1+r),rate=rate)+integral+(alpha*(U-D))*(1-pexp(U/(1+r),rate = rate))
#Segundo momento de Y
#Esperanza teórica
yfYL<-function(y){
y^2*f(y)
}
#Integrando (Esperanza)
integral<-integrate(f = yfYL,lower = 0,upper = alpha*(U-D))
integral<-integral$value
#Parte continua + parte discreta
mu2y<-integral+(alpha*(U-D))^2*(1-pexp(U/(1+r),rate = rate))
##Esperanza
mean(S) ; size*p*mu1y
##Segundo momento
mean(S^2) ; size*p*mu2y+size*(size-1)*p^2*(mu1y^2)
##Varianza
var(S) ; size*p*(mu2y - p*mu1y^2)
##Desviación
sd(S) ; sqrt(size*p*(mu2y - p*mu1y^2))
##Consideremos N~Binomial Negativa(n,p) & X~Exp(rate) ---
#Parámetros de la Binomial Negativa
k <- 10 ; p <- 0.8
#¿Cómo simulo una muestra de pagos Y's?
rPagoCia <- function(n){
X<-rexp(n,rate=rate)
Y<-pmax(alpha*(pmin(X*(1+r),U)-D),0)
return(Y)
}
#OJO: Estoy asumiendo que los siniestros son X~Exp(rate)
set.seed(21) ; n = 1000000
S <- rcompound(n = n, #Genera n
model.freq = rnbinom(size = k,prob = p), #N~BinNeg(k,p)
model.sev = rPagoCia()) #Y~Pago de una cia con el contrato dado
#Momentos (Muestral Vs. Teórico)
##Esperanza
mean(S) ; k*(1/p-1)*mu1y
##Segundo momento
mean(S^2)
k*(1/p-1)*(1/p)*mu1y^2+k*(1/p-1)*(mu2y-mu1y^2)+(k*(1/p-1)*mu1y)^2
##Varianza
var(S) ; k*(1/p-1)*(1/p)*mu1y^2+k*(1/p-1)*(mu2y-mu1y^2)
##Desviación
sd(S) ; sqrt(k*(1/p-1)*(1/p)*mu1y^2+k*(1/p-1)*(mu2y-mu1y^2))
##Consideremos N~Poi(lambda) & X~Exp(rate) ---
#Parámetro de la Poisson
lambda <- 10
set.seed(21)
S <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda), #N~Poi(lambda)
model.sev = rPagoCia()) #Y~Exp(rate)
#Momentos (Muestral Vs. Teórico)
##Esperanza
mean(S) ; lambda*mu1y
##Segundo momento
mean(S^2) ; lambda*(mu2y) + lambda^2*(mu1y^2)
##Varianza
var(S) ; lambda * mu2y
##Desviación
sd(S) ; sqrt(lambda * mu2y)
| /Teoría del Riesgo (UNAM)/_Scripts_/6.1 Modelo Colectivo.R | no_license | hejurado/R_Actuarial | R | false | false | 19,505 | r |
# Ejemplo Inicial ---------------------------------------------------------
#Primero simulemos
#Vector de probabilidades para las X
f<-c(0.9,0.1*0.8,0.1*0.2)
regresa.una.S<-function(){
#Genera una N
N<-sample(x = c(0,3), #De este vector
size = 1, #Toma una muestra de tamaño 1
replace = T,#Con reemplazo (En este caso da igual)
prob = c(0.75,0.25))#Con las probabilidades correspondientes.
#Verifica si hubo reclamaciones.
if(N>0){
#Genera las que hubo.
Xi <- sample(x = 0:2, #De este vector
size = N, #Toma una muestra de tamaño N
replace = T,#Con reemplazo (Puede volver reclamar lo mismo)
prob = f)#Con las probabilidades correspondientes.
}else{
Xi <- 0 #Si no hubo, el total es cero.
}
#Regresa una S
return(sum(Xi))
}
#
n = 1000000
set.seed(9)
S = replicate(n = n, #Número de veces
expr = regresa.una.S()) #Expresión
##Probabilidades reales
PrilII <- function(x,n,f,todo=F){
#n := número de pólizas
#f := vector de probabilidades de X (ordenadas desde 0)
#Creamos un vector auxiliar para las probas de S.
g<-0:x
names(g)<-0:x
#Le ponemos nombres al vector de probas de f.
names(f)<-0:(length(f)-1)
#Fórmula De Pril II
for(s in 0:x){
if(s==0){
g["0"]=f["0"]^n
}else{aux = 0
for(j in 1:(min(s,length(f)-1))){
aux = aux + ((j*(n+1))/s - 1)*f[as.character(j)]*g[as.character(s-j)]
}
g[as.character(s)]=aux/f["0"]
}
}
if(todo){
return(g)
}else{
return(g[as.character(x)])
}
}
#Número de pólizas
n<-3
#Vector de probabilidades
f<-c(0.9,0.1*0.8,0.1*0.2)
#Probabilidades
Psd3<-PrilII(x = 6,n = n,f = f,todo = T) ; Psd3
sum(Psd3)
#función de densidad de la suma aleatoria S
fS <- function(s){
if(s==0){
return(0.75+Psd3[as.character(s)]*0.25)
}else if(s %in% 1:6){
return(0.25*Psd3[as.character(s)])
}else{
return(0)
}
}
#Probabilidades
pS <- sapply(0:6, fS) ; pS
#Proporciones simuladas
table(S)/length(S)
#¿Suma uno teórico?
sum(pS)
##Esperanza de la suma aleatoria S
#Teórica
0.25*sum(1:6*Psd3[as.character(1:6)])
mu <- sum(0:6*pS); mu
#Muestral
mean(S)
##Segundo momento
#Teórico
0.25*sum((1:6)^2*Psd3[as.character(1:6)])
mu2 <- sum((0:6)^2*pS) ; mu2
#Muestral
mean(S^2)
##Varianza
#Teórica
varianza <- mu2-mu^2 ; varianza
#Muestral
var(S)
##Desviación
#Teórica
sqrt(varianza)
#Muestral
sd(S)
#Con esperanza iterada:
EspX <- sum(0:2*f)
EspN <- (3*0.25)
mu ; EspX * EspN
# Modelo Colectivo --------------------------------------------------------
#Los siguientes ejemplos serán considerando Yj~Exp(100)
rate<-100
# Modelo Binomial Compuesto -----------------------------------------------
#Debemos generar variables aleatorias provenientes de S
n <- 10000 #Número de simulaciones de S
p <- 0.8 #parámetro de la binomial (p).
size <- 50 #parámetro de la binomial (n).
regresa.una.S<-function(){
#Genera una N
N<-rbinom(n = 1,size = size,prob = p)
#Verifica si hubo reclamaciones.
if(N>0){
Yj <- rexp(n = N,rate = rate) #Genera las que hubo.
}else{
Yj <- 0 #Si no hubo, el total es cero.
}
#Regresa una S
return(sum(Yj))
}
#
set.seed(27)
S = replicate(n = n, #Número de veces
expr = regresa.una.S()) #Expresión
#Momentos (Muestral Vs. Teórico)
##Esperanza
mean(S) ; size*p/rate
##Segundo momento
mean(S^2) ; size*p*(2/rate^2)+size*(size-1)*p^2/rate^2
##Varianza
var(S) ; size*p*(2/rate^2 - p/rate^2)
# Modelo Binomial Negativo ------------------------------------------------
library(actuar)
?rcompound
#Parámetros de la Binomial Negativa
k <- 10 ; p <- 0.8
S <- rcompound(n = n, #Genera n
model.freq = rnbinom(size = k,prob = p), #N~BinNeg(k,p)
model.sev = rexp(rate = rate)) #Y~Exp(rate)
#Momentos (Muestral Vs. Teórico)
##Esperanza
mean(S) ; k*(1/p-1)/rate
##Segundo momento
mean(S^2) ; k*(1/p-1)*(1/p)/rate^2+k*(1/p-1)*(2/rate^2-1/rate^2)+(k*(1/p-1)/rate)^2
##Varianza
var(S) ; k*(1/p-1)*(1/p)/rate^2+k*(1/p-1)*(2/rate^2-1/rate^2)
# Modelo Poisson Compuesto ------------------------------------------------
#Parámetro de la Poisson
lambda <- 10
rate=20
S <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda), # N~Poi(lambda)
model.sev = rexp(rate = rate)) # Y~Exp(rate)
#Momentos (Muestral Vs. Teórico)
##Esperanza
mean(S) ; lambda/rate
##Segundo momento
mean(S^2) ; lambda*(2/rate^2) + lambda^2*(1/rate^2)
##Varianza
var(S) ; lambda * 2/rate^2
# Distribución de la convolución de Poisson Compuesta ---------------------
n <- 1000000 ; library(actuar)
set.seed(9)
# Y's continuas -----------------------------------------------------------
#Parámetro de la Poisson
lambda1 <- 7 ; lambda2 <- 4 ; lambda3 <- 21
lambda <- lambda1 + lambda2 + lambda3
#Exponencial
rate <- 5
S1 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda1), #N~Poi(lambda1)
model.sev = rexp(rate = rate)) #Y~Exp(rate)
#Ji cuadrada
dfredom <- 20
S2 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda2), #N~Poi(lambda2)
model.sev = rchisq(df=dfredom)) #Y~JiCuadrada(dfredom)
#Pareto
shape <- 6 ; min <- 7
S3 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda3), #N~Poi(lambda3)
model.sev = rpareto1(shape = 6,min = 7)) #Y~pareto(shape,scale)
S <- S1 + S2 + S3
##Esperanza
#Muestral
mean(S)
#Teórica
mu1<-lambda*(lambda1*(1/rate) + #Esperanza de la Exponencial
lambda2*(dfredom) + #Esperanza de la Ji cuadrada
lambda3*(shape*min/(shape-1)) #Esperanza de la Pareto
)/lambda ; mu1
##Segundo momento
#Suma1 = lambda*E[Y^2]
Suma1<-lambda*(
#2do momento de la Exponencial
lambda1*(2/rate^2) +
#2do momento de la Ji cuadrada
lambda2*(2*dfredom+dfredom^2) +
#2do momento de la Pareto
lambda3*((shape*min^2)/((shape-1)^2*(shape-2))+(shape*min/(shape-1))^2)
)/lambda
#Suma2 = lambda^2*(E[Y])^2
Suma2<- lambda^2*((lambda1*(1/rate) + #Esperanza de la Exponencial
lambda2*(dfredom) + #Esperanza de la Ji cuadrada
lambda3*(shape*min/(shape-1)) #Esperanza de la Pareto
)/lambda)^2
#Teórica:
mu2 <- Suma1 + Suma2 ; mu2
#Muestral
mean(S^2)
##Varianza
#Muestral
var(S)
#Teórica
mu2-mu1^2
Suma1
# Y's discretas -----------------------------------------------------------
set.seed(2)
#Parámetro de la Poisson
lambda1 <- 7 ; lambda2 <- 4 ; lambda3 <- 21
lambda <- lambda1 + lambda2 + lambda3
#Binomial
size = 10 ; prob = 0.3
S1 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda1), #N~Poi(lambda1)
model.sev = rbinom(size = size,prob = prob)) #Y~Bin(size,prob)
#Poisson
bawr = 7
S2 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda2), #N~Poi(lambda2)
model.sev = rpois(lambda = bawr)) #Y~Poi(bawr)
#Binomial Negativa
k = 5 ; p = 0.9
S3 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda3), #N~Poi(lambda3)
model.sev = rnbinom(size = k,prob = p)) #Y~BinNeg(k,p)
S <- S1 + S2 + S3
##Esperanza
#Muestral
mean(S)
#Teórica
mu1<-lambda*(lambda1*(size*prob) + #Esperanza de la Binomial
lambda2*(bawr) + #Esperanza de la Poisson
lambda3*(k*(1-p)/p) #Esperanza de la Binomial Negativa
)/lambda ; mu1
##Segundo momento
#Suma1 = lambda*E[Y^2]
Suma1<-lambda*(
#2do momento de la Binomial
lambda1*(size*prob*(1-prob)+(size*prob)^2) +
#2do momento de la Poisson
lambda2*(bawr+bawr^2) +
#2do momento de la Binomial Negativa
lambda3*(k*(1-p)/p^2+(k*(1-p)/p)^2)
)/lambda
#Suma2 = lambda^2*(E[Y])^2
Suma2<- lambda^2*((lambda1*(size*prob) + #Esperanza de la Binomial
lambda2*(bawr) + #Esperanza de la Poisson
lambda3*(k*(1-p)/p) #Esperanza de la Binomial Negativa
)/lambda)^2
#Teórica:
mu2 <- Suma1 + Suma2 ; mu2
#Muestral
mean(S^2)
##Varianza
#Muestral
var(S)
#Teórica
mu2-mu1^2
Suma1
#Curiosidad
barplot(table(S))
# Y's Continuas & Discretas -----------------------------------------------
n <- 1234567
set.seed(9)
#Parámetro de la Poisson
lambda1 <- 7 ; lambda2 <- 4 ; lambda3 <- 21
lambda4 <- 10 ; lambda5 <- 9 ; lambda6 <- 6
lambda <- lambda1 + lambda2 + lambda3 + lambda4 + lambda5 + lambda6
##Continuas
#Exponencial
rate <- 5
S1 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda1), #N~Poi(lambda1)
model.sev = rexp(rate = rate)) #Y~Exp(rate)
#Ji cuadrada
dfredom <- 20
S2 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda2), #N~Poi(lambda2)
model.sev = rchisq(df=dfredom)) #Y~JiCuadrada(dfredom)
#Pareto
shape <- 6 ; min <- 7
S3 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda3), #N~Poi(lambda3)
model.sev = rpareto1(shape = 6,min = 7)) #Y~pareto(shape,scale)
##Discretas
#Binomial
size = 10 ; prob = 0.3
S4 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda4), #N~Poi(lambda4)
model.sev = rbinom(size = size,prob = prob)) #Y~Bin(size,prob)
#Poisson
bawr = 7
S5 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda5), #N~Poi(lambda5)
model.sev = rpois(lambda = bawr)) #Y~Poi(bawr)
#Binomial Negativa
k = 5 ; p = 0.9
S6 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda6), #N~Poi(lambda6)
model.sev = rnbinom(size = k,prob = p)) #Y~BinNeg(k,p)
##Comenzamos:
S <- S1 + S2 + S3 + S4 + S5 + S6
##Esperanza
#Teórica
mu1<-lambda*(lambda1*(1/rate) + #Esperanza de la Exponencial
lambda2*(dfredom) + #Esperanza de la Ji cuadrada
lambda3*(shape*min/(shape-1)) + #Esperanza de la Pareto
lambda4*(size*prob) + #Esperanza de la Binomial
lambda5*(bawr) + #Esperanza de la Poisson
lambda6*(k*(1-p)/p) #Esperanza de la Binomial Negativa
)/lambda ; mu1
#Muestral
mean(S)
##Segundo momento
#Suma1 = lambda*E[Y^2]
Suma1<-lambda*(
#2do momento de la Exponencial
lambda1*(2/rate^2) +
#2do momento de la Ji cuadrada
lambda2*(2*dfredom+dfredom^2) +
#2do momento de la Pareto
lambda3*((shape*min^2)/((shape-1)^2*(shape-2))+(shape*min/(shape-1))^2) +
#2do momento de la Binomial
lambda4*(size*prob*(1-prob)+(size*prob)^2) +
#2do momento de la Poisson
lambda5*(bawr+bawr^2) +
#2do momento de la Binomial Negativa
lambda6*(k*(1-p)/p^2+(k*(1-p)/p)^2)
)/lambda
#Suma2 = lambda^2*(E[Y])^2
Suma2<- lambda^2*(( lambda1*(1/rate) + #Esperanza de la Exponencial
lambda2*(dfredom) + #Esperanza de la Ji cuadrada
lambda3*(shape*min/(shape-1)) + #Esperanza de la Pareto
lambda4*(size*prob) + #Esperanza de la Binomial
lambda5*(bawr) + #Esperanza de la Poisson
lambda6*(k*(1-p)/p) #Esperanza de la Binomial Negativa
)/lambda)^2
#Teórica:
mu2 <- Suma1 + Suma2 ; mu2
#Muestral
mean(S^2)
##Varianza
#Muestral
var(S)
#Teórica
mu2-mu1^2
Suma1
##Desviación
#Muestral
sd(S)
#Teórica
sqrt(mu2-mu1^2)
#Curiosidad
hist(S,col="red",probability = T)
abline(v=mu1,col="blue",lwd=2)
#¡¿Es normal!?
goftest::ad.test(S,pnorm,mean=mu1,sd=sqrt(mu2-mu1^2))
#Uffff... no, eso sería MUY RARO... ¿Será algo..?
# Discretas y continuas sobre los reales ----------------------------------
n <- 1234567
set.seed(21)
#Parámetro de la Poisson
lambda1 <- 7 ; lambda2 <- 4 ; lambda3 <- 21
lambda4 <- 10 ; lambda5 <- 9 ; lambda6 <- 6
lambda7 <- 3.2
lambda <- lambda1 + lambda2 + lambda3 + lambda4 + lambda5 + lambda6 + lambda7
##Continuas
#Exponencial
rate <- 5
S1 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda1), #N~Poi(lambda1)
model.sev = rexp(rate = rate)) #Y~Exp(rate)
#Ji cuadrada
dfredom <- 20
S2 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda2), #N~Poi(lambda2)
model.sev = rchisq(df=dfredom)) #Y~JiCuadrada(dfredom)
#Pareto
shape <- 6 ; min <- 7
S3 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda3), #N~Poi(lambda3)
model.sev = rpareto1(shape = 6,min = 7)) #Y~pareto(shape,scale)
##Discretas
#Binomial
size = 10 ; prob = 0.3
S4 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda4), #N~Poi(lambda4)
model.sev = rbinom(size = size,prob = prob)) #Y~Bin(size,prob)
#Poisson
bawr = 7
S5 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda5), #N~Poi(lambda5)
model.sev = rpois(lambda = bawr)) #Y~Poi(bawr)
#Binomial Negativa
k = 5 ; p = 0.9
S6 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda6), #N~Poi(lambda6)
model.sev = rnbinom(size = k,prob = p)) #Y~BinNeg(k,p)
##Continuas sobre los reales
#Normal
media = -40 ; desv = 2
S7 <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda7), #N~Poi(lambda6)
model.sev = rnorm(mean = media,sd = desv)) #Y~N(media,desv)
##Comenzamos:
S <- S1 + S2 + S3 + S4 + S5 + S6 + S7
##Esperanza
#Teórica
mu1<-lambda*(lambda1*(1/rate) + #Esperanza de la Exponencial
lambda2*(dfredom) + #Esperanza de la Ji cuadrada
lambda3*(shape*min/(shape-1)) + #Esperanza de la Pareto
lambda4*(size*prob) + #Esperanza de la Binomial
lambda5*(bawr) + #Esperanza de la Poisson
lambda6*(k*(1-p)/p) + #Esperanza de la Binomial Negativa
lambda7*media #Esperanza de la Normal
)/lambda ; mu1
#Muestral
mean(S)
##Segundo momento
#Suma1 = lambda*E[Y^2]
Suma1<-lambda*(
#2do momento de la Exponencial
lambda1*(2/rate^2) +
#2do momento de la Ji cuadrada
lambda2*(2*dfredom+dfredom^2) +
#2do momento de la Pareto
lambda3*((shape*min^2)/((shape-1)^2*(shape-2))+(shape*min/(shape-1))^2) +
#2do momento de la Binomial
lambda4*(size*prob*(1-prob)+(size*prob)^2) +
#2do momento de la Poisson
lambda5*(bawr+bawr^2) +
#2do momento de la Binomial Negativa
lambda6*(k*(1-p)/p^2+(k*(1-p)/p)^2) +
#2do momento de la Normal
lambda7*(desv^2+media^2)
)/lambda
#Suma2 = lambda^2*(E[Y])^2
Suma2<- lambda^2*(( lambda1*(1/rate) + #Esperanza de la Exponencial
lambda2*(dfredom) + #Esperanza de la Ji cuadrada
lambda3*(shape*min/(shape-1)) + #Esperanza de la Pareto
lambda4*(size*prob) + #Esperanza de la Binomial
lambda5*(bawr) + #Esperanza de la Poisson
lambda6*(k*(1-p)/p) + #Esperanza de la Binomial Negativa
lambda7*media #Esperanza de la Normal
)/lambda)^2
#Teórica:
mu2 <- Suma1 + Suma2 ; mu2
#Muestral
mean(S^2)
##Varianza
#Muestral
var(S)
#Teórica
mu2-mu1^2
Suma1
##Desviación
#Muestral
sd(S)
#Teórica
sqrt(mu2-mu1^2)
#Curiosidad
hist(S,col="red",probability = T)
abline(v=mu1,col="blue",lwd=2)
#¡¿Es normal!?
goftest::ad.test(S,pnorm,mean=mu1,sd=sqrt(mu2-mu1^2))
#Uffff... no, eso sería MUY RARO... ¿Será algo..?
# Modelo Colectivo con variables aleatorias de pérdida de una cia. --------
#Consideremos N~Binomial(n,p) & X~Exp(rate)
#Y La pérdida de una cia. con un contrato de seguros
#con inflación, deducible, monto máximo y deducible
#Debemos generar variables aleatorias provenientes de S
#Parámetros de la N
n <- 10000 #Número de simulaciones de S
p <- 0.8 #parámetro de la binomial (p).
size <- 50 #parámetro de la binomial (n).
#Parámetros de la X
rate<-1/100
#Parámetros de la Y
#Fijamos deducible y límite máximo
D<-25 ; U <- 175
#Tomemos un coeficiente de coaseguro
alpha<-0.25
#Fijemos una tasa de inflación
r<-0.15
regresa.una.S<-function(){
#Genera una N
N<-rbinom(n = 1,size = size,prob = p)
#Verifica si hubo reclamaciones.
if(N>0){
X <- rexp(n = N,rate = rate) #Genera las que hubo.
#Calculemos los pagos
Yj<-pmax(alpha*(pmin(X*(1+r),U)-D),0)
}else{
Yj <- 0 #Si no hubo, el total es cero.
}
#Regresa una S
return(sum(Yj))
}
#
set.seed(21)
S = replicate(n = n, #Número de veces
expr = regresa.una.S()) #Expresión
mean(S)
#Momentos (Muestral Vs. Teórico)
#Esperanza de Y
library(actuar)
fyL<-coverage(pdf = dexp,cdf = pexp,
limit=U,inflation=r,deductible=D,coinsurance=alpha,
per.loss=TRUE)
f<-function(x,lambda=1/100){fyL(x,lambda)}
#Esperanza teórica
yfYL<-function(y){
y*f(y)
}
#Integrando (Esperanza)
integral<-integrate(f = yfYL,lower = 0,upper = alpha*(U-D))
integral<-integral$value
#Parte continua + parte discreta
mu1y<-0*pexp(D/(1+r),rate=rate)+integral+(alpha*(U-D))*(1-pexp(U/(1+r),rate = rate))
#Segundo momento de Y
#Esperanza teórica
yfYL<-function(y){
y^2*f(y)
}
#Integrando (Esperanza)
integral<-integrate(f = yfYL,lower = 0,upper = alpha*(U-D))
integral<-integral$value
#Parte continua + parte discreta
mu2y<-integral+(alpha*(U-D))^2*(1-pexp(U/(1+r),rate = rate))
##Esperanza
mean(S) ; size*p*mu1y
##Segundo momento
mean(S^2) ; size*p*mu2y+size*(size-1)*p^2*(mu1y^2)
##Varianza
var(S) ; size*p*(mu2y - p*mu1y^2)
##Desviación
sd(S) ; sqrt(size*p*(mu2y - p*mu1y^2))
##Consideremos N~Binomial Negativa(n,p) & X~Exp(rate) ---
#Parámetros de la Binomial Negativa
k <- 10 ; p <- 0.8
#¿Cómo simulo una muestra de pagos Y's?
rPagoCia <- function(n){
X<-rexp(n,rate=rate)
Y<-pmax(alpha*(pmin(X*(1+r),U)-D),0)
return(Y)
}
#OJO: Estoy asumiendo que los siniestros son X~Exp(rate)
set.seed(21) ; n = 1000000
S <- rcompound(n = n, #Genera n
model.freq = rnbinom(size = k,prob = p), #N~BinNeg(k,p)
model.sev = rPagoCia()) #Y~Pago de una cia con el contrato dado
#Momentos (Muestral Vs. Teórico)
##Esperanza
mean(S) ; k*(1/p-1)*mu1y
##Segundo momento
mean(S^2)
k*(1/p-1)*(1/p)*mu1y^2+k*(1/p-1)*(mu2y-mu1y^2)+(k*(1/p-1)*mu1y)^2
##Varianza
var(S) ; k*(1/p-1)*(1/p)*mu1y^2+k*(1/p-1)*(mu2y-mu1y^2)
##Desviación
sd(S) ; sqrt(k*(1/p-1)*(1/p)*mu1y^2+k*(1/p-1)*(mu2y-mu1y^2))
##Consideremos N~Poi(lambda) & X~Exp(rate) ---
#Parámetro de la Poisson
lambda <- 10
set.seed(21)
S <- rcompound(n = n, #Genera n
model.freq = rpois(lambda = lambda), #N~Poi(lambda)
model.sev = rPagoCia()) #Y~Exp(rate)
#Momentos (Muestral Vs. Teórico)
##Esperanza
mean(S) ; lambda*mu1y
##Segundo momento
mean(S^2) ; lambda*(mu2y) + lambda^2*(mu1y^2)
##Varianza
var(S) ; lambda * mu2y
##Desviación
sd(S) ; sqrt(lambda * mu2y)
|
# Name : plotfit.R0.R
# Desc : A set of tweaked "plot" functions designed to easily plot R objects
# from any of the supported estimation methods.
# Date : 2011/11/09
# Update : 2023/03/03
# Author : Boelle, Obadia
###############################################################################
#' @title
#' Plot a model fit for `R0.R` objects
#'
#' @description
#' Plot the fit of a single model output to epidemic data.
#'
#' @details
#' For internal use. This function is called by the [plotfit()] S3 method.
#' Depending on the estimation method, either [plotfitRxx()], [plotfitRAR()] or
#' [plotfitRSB()] will be called.
#'
#' @param x An output of `est.R0.xx()` (class `R0.R`).
#' @param xscale Scale to be adjusted on x-axis. Can be `d` (day), `w` (week (default)), `f` (fornight), `m` (month).
#' @param SB.dist Boolean. Should the R distirbution throughout the epidemic be plotted for the SB method (defaults to `TRUE`) ?
#' @param ... Parameters passed to inner functions.
#'
#' @return
#' This function does not return any data.
#' Called for side effect. Draws the fit of one estimation method to the data.
#'
#' @importFrom grDevices dev.new
#' @importFrom graphics abline axis close.screen split.screen screen lines points
#'
#' @keywords internal
#'
#' @author Pierre-Yves Boelle, Thomas Obadia
# Function declaration
plotfit.R0.R <- function(
x,
xscale = "w",
SB.dist = TRUE,
...
)
# Code
{
#Make sure x is of class "R0.R"
if (!inherits(x, "R0.R")) stop("'x' must be of class R0.R")
if (x$method.code %in% c("EG","ML","TD")) {
do.call(plotfitRxx, args=list(x=x, xscale=xscale, ...) )
}
else {
do.call(paste("plotfitR",x$method.code,sep=""), args=list(x=x, xscale=xscale, SB.dist=SB.dist, ...) )
}
}
#' @title
#' Internal plotfit method for EG, ML and TD estimates
#'
#' @description
#' Plot the fit of a single model output to epidemic data when the method is
#' EG, ML or TD.
#'
#' @details
#' For internal use. This function is called by the [plotfit.R0.R()].
#'
#' @param x An output of [est.R0.EG()], [est.R0.ML()] or [est.R0.TD()] (class `R0.R`).
#' @param xscale Scale to be adjusted on x-axis. Can be `d` (day), `w` (week (default)), `f` (fornight), `m` (month).
#' @param ... Parameters passed to inner functions.
#'
#' @return
#' This function does not return any data.
#' Called for side effect. Draws the fit of one estimation method to the data.
#'
#' @keywords internal
#'
#' @author Pierre-Yves Boelle, Thomas Obadia
# Generic EG, ML and TD plotfit
plotfitRxx <- function(
x,
xscale,
...
)
# Code
{
epid <- x$epid
#Get data used for the fit
begin <- x$begin
begin.nb <- x$begin.nb
end <- x$end
end.nb <- x$end.nb
epid.used.for.fit <- list(incid=epid$incid[begin.nb:end.nb], t=epid$t[begin.nb:end.nb])
#Plot the whole original epidemic data
plot(epid$t,epid$incid, xlab="Time",ylab="Incidence",t='s', xaxt="n", main=paste("Epidemic curve & model (", x$method,")"), ...)
#Add a line showing predicted simulation
lines(epid.used.for.fit$t,x$pred,col='red')
#Highlight the original points
points(epid.used.for.fit$t,epid.used.for.fit$incid,pch=19)
#Finally, X-Axis labels
div <- get.scale(xscale)
#Where should labels be on axis
atLab <- pretty(epid$t, n=length(epid$t)/div)
#What should labels say
lab <- format(pretty(epid$t, n=length(epid$t)/div))
axis(1, at=atLab, labels=lab)
}
#' @title
#' Internal plotfit method for AR estimates
#'
#' @description
#' Plot the fit of a single model output to epidemic data when the method is AR.
#'
#' @details
#' For internal use. This function is called by the [plotfit.R0.R()].
#'
#' @param x An output of [est.R0.AR()] (class `R0.R`).
#' @param xscale Scale to be adjusted on x-axis. Can be `d` (day), `w` (week (default)), `f` (fornight), `m` (month).
#' @param ... Parameters passed to inner functions.
#'
#' @return
#' This function does not return any data.
#' Called for side effect. Draws the fit of one estimation method to the data.
#'
#' @keywords internal
#'
#' @author Pierre-Yves Boelle, Thomas Obadia
# AR plotfit
plotfitRAR <- function(
x,
xscale,
...
)
# Code
{
epid <- x$epid
epid.orig <- x$epid.orig
epid.used.for.fit <- list(incid=epid.orig$incid, t=epid.orig$t)
#Plot the whole original epidemic data
plot(epid$t,epid$incid, xlab="Time",ylab="Incidence",t='s', xaxt="n", main="Epidemic curve (Attack Rate)", ...)
#Highlight the original points
points(epid.used.for.fit$t,epid.used.for.fit$incid,pch=19)
#Finally, X-Axis labels
div <- get.scale(xscale)
#Where should labels be on axis
atLab <- pretty(epid$t, n=length(epid$t)/div)
#What should labels say
lab <- format(pretty(epid$t, n=length(epid$t)/div))
axis(1, at=atLab, labels=lab)
}
#' @title
#' Internal plotfit method for AR estimates
#'
#' @description
#' Plot the fit of a single model output to epidemic data when the method is SB.
#'
#' @details
#' For internal use. This function is called by the [plotfit.R0.R()].
#'
#' @param x An output of [est.R0.SB()] (class `R0.R`).
#' @param xscale Scale to be adjusted on x-axis. Can be `d` (day), `w` (week (default)), `f` (fornight), `m` (month).
#' @param SB.dist Boolean. Should the R distirbution throughout the epidemic be plotted for the SB method (defaults to `TRUE`) ?
#' @param ... Parameters passed to inner functions.
#'
#' @return
#' This function does not return any data.
#' Called for side effect. Draws the fit of one estimation method to the data.
#'
#' @keywords internal
#'
#' @author Pierre-Yves Boelle, Thomas Obadia
# SB plotfit
plotfitRSB <- function(
x,
xscale,
SB.dist,
...
)
# Code
{
epid <- x$epid
#Get data used for the fit
begin <- x$begin
begin.nb <- x$begin.nb
end <- x$end
end.nb <- x$end.nb
epid.used.for.fit <- list(incid=epid$incid[begin.nb:end.nb], t=epid$t[begin.nb:end.nb])
#Plot the whole original epidemic data
plot(epid$t,epid$incid, xlab="Time",ylab="Incidence",t='s', xaxt="n", main=paste("Epidemic curve & model (", x$method,")"), ...)
#Add a line showing predicted simulation
lines(epid.used.for.fit$t,x$pred,col='red')
#Highlight the original points
points(epid.used.for.fit$t,epid.used.for.fit$incid,pch=19)
#Finally, X-Axis labels
div <- get.scale(xscale)
#Where should labels be on axis
atLab <- pretty(epid$t, n=length(epid$t)/div)
#What should labels say
lab <- format(pretty(epid$t, n=length(epid$t)/div))
axis(1, at=atLab, labels=lab)
#When plotting Bayesian, if SB.dist is enabled, plot some R distributions throughout the epidemic
if (SB.dist == TRUE) {
#x11()
dev.new()
split.screen(c(3,3))
if (end.nb-begin.nb>8) {
num.to.plot <- c(1, rep(NA, 8))
}
else {
num.to.plot <- c(begin.nb:end.nb)
}
for (i in 1:length(num.to.plot)) {
if (i == 1) {
screen(1)
plot(y=x$proba.Rt[[num.to.plot[i]]], x=seq(from=0, to=(length(x$proba.Rt[[num.to.plot[i]]])/100-0.01), by=0.01), xlab="R value", ylab="PDF", type="l", main=paste("t=",num.to.plot[i]), ...)
abline(v=(which.max((cumsum(x$proba.Rt[[num.to.plot[i]]])) >= 0.025)-1)/100, col="red", lty="dotted")
abline(v=(which.max((cumsum(x$proba.Rt[[num.to.plot[i]]])) >= 0.975)-1)/100, col="red", lty="dotted")
next
}
if (is.na(num.to.plot[i])) {
num.to.plot[i] <- num.to.plot[i-1] + floor(length(x$epid$incid[begin.nb:end.nb])/9)
}
screen(i)
plot(x$proba.Rt[[num.to.plot[i]]], x=seq(from=0, to=(length(x$proba.Rt[[num.to.plot[i]]])/100-0.01), by=0.01), xlim=c(0,((length(x$proba.Rt[[num.to.plot[i]]]) - which.max(rev(x$proba.Rt[[num.to.plot[i]]])>0) + 1))/100 - 0.01), xlab="R value", ylab="PDF", pch=NA_integer_, type="l", main=paste("t=",num.to.plot[i]), ...)
abline(v=(which.max((cumsum(x$proba.Rt[[num.to.plot[i]]])) >= 0.025)-1)/100, col="red", lty="dotted")
abline(v=(which.max((cumsum(x$proba.Rt[[num.to.plot[i]]])) >= 0.975)-1)/100, col="red", lty="dotted")
}
#Closing devices
close.screen(all.screens=TRUE)
}
}
| /R/plotfit.R0.R.R | no_license | tobadia/R0 | R | false | false | 8,235 | r | # Name : plotfit.R0.R
# Desc : A set of tweaked "plot" functions designed to easily plot R objects
# from any of the supported estimation methods.
# Date : 2011/11/09
# Update : 2023/03/03
# Author : Boelle, Obadia
###############################################################################
#' @title
#' Plot a model fit for `R0.R` objects
#'
#' @description
#' Plot the fit of a single model output to epidemic data.
#'
#' @details
#' For internal use. This function is called by the [plotfit()] S3 method.
#' Depending on the estimation method, either [plotfitRxx()], [plotfitRAR()] or
#' [plotfitRSB()] will be called.
#'
#' @param x An output of `est.R0.xx()` (class `R0.R`).
#' @param xscale Scale to be adjusted on x-axis. Can be `d` (day), `w` (week (default)), `f` (fornight), `m` (month).
#' @param SB.dist Boolean. Should the R distirbution throughout the epidemic be plotted for the SB method (defaults to `TRUE`) ?
#' @param ... Parameters passed to inner functions.
#'
#' @return
#' This function does not return any data.
#' Called for side effect. Draws the fit of one estimation method to the data.
#'
#' @importFrom grDevices dev.new
#' @importFrom graphics abline axis close.screen split.screen screen lines points
#'
#' @keywords internal
#'
#' @author Pierre-Yves Boelle, Thomas Obadia
# Function declaration
plotfit.R0.R <- function(
x,
xscale = "w",
SB.dist = TRUE,
...
)
# Code
{
#Make sure x is of class "R0.R"
if (!inherits(x, "R0.R")) stop("'x' must be of class R0.R")
if (x$method.code %in% c("EG","ML","TD")) {
do.call(plotfitRxx, args=list(x=x, xscale=xscale, ...) )
}
else {
do.call(paste("plotfitR",x$method.code,sep=""), args=list(x=x, xscale=xscale, SB.dist=SB.dist, ...) )
}
}
#' @title
#' Internal plotfit method for EG, ML and TD estimates
#'
#' @description
#' Plot the fit of a single model output to epidemic data when the method is
#' EG, ML or TD.
#'
#' @details
#' For internal use. This function is called by the [plotfit.R0.R()].
#'
#' @param x An output of [est.R0.EG()], [est.R0.ML()] or [est.R0.TD()] (class `R0.R`).
#' @param xscale Scale to be adjusted on x-axis. Can be `d` (day), `w` (week (default)), `f` (fornight), `m` (month).
#' @param ... Parameters passed to inner functions.
#'
#' @return
#' This function does not return any data.
#' Called for side effect. Draws the fit of one estimation method to the data.
#'
#' @keywords internal
#'
#' @author Pierre-Yves Boelle, Thomas Obadia
# Generic EG, ML and TD plotfit
plotfitRxx <- function(
x,
xscale,
...
)
# Code
{
epid <- x$epid
#Get data used for the fit
begin <- x$begin
begin.nb <- x$begin.nb
end <- x$end
end.nb <- x$end.nb
epid.used.for.fit <- list(incid=epid$incid[begin.nb:end.nb], t=epid$t[begin.nb:end.nb])
#Plot the whole original epidemic data
plot(epid$t,epid$incid, xlab="Time",ylab="Incidence",t='s', xaxt="n", main=paste("Epidemic curve & model (", x$method,")"), ...)
#Add a line showing predicted simulation
lines(epid.used.for.fit$t,x$pred,col='red')
#Highlight the original points
points(epid.used.for.fit$t,epid.used.for.fit$incid,pch=19)
#Finally, X-Axis labels
div <- get.scale(xscale)
#Where should labels be on axis
atLab <- pretty(epid$t, n=length(epid$t)/div)
#What should labels say
lab <- format(pretty(epid$t, n=length(epid$t)/div))
axis(1, at=atLab, labels=lab)
}
#' @title
#' Internal plotfit method for AR estimates
#'
#' @description
#' Plot the fit of a single model output to epidemic data when the method is AR.
#'
#' @details
#' For internal use. This function is called by the [plotfit.R0.R()].
#'
#' @param x An output of [est.R0.AR()] (class `R0.R`).
#' @param xscale Scale to be adjusted on x-axis. Can be `d` (day), `w` (week (default)), `f` (fornight), `m` (month).
#' @param ... Parameters passed to inner functions.
#'
#' @return
#' This function does not return any data.
#' Called for side effect. Draws the fit of one estimation method to the data.
#'
#' @keywords internal
#'
#' @author Pierre-Yves Boelle, Thomas Obadia
# AR plotfit
plotfitRAR <- function(
x,
xscale,
...
)
# Code
{
epid <- x$epid
epid.orig <- x$epid.orig
epid.used.for.fit <- list(incid=epid.orig$incid, t=epid.orig$t)
#Plot the whole original epidemic data
plot(epid$t,epid$incid, xlab="Time",ylab="Incidence",t='s', xaxt="n", main="Epidemic curve (Attack Rate)", ...)
#Highlight the original points
points(epid.used.for.fit$t,epid.used.for.fit$incid,pch=19)
#Finally, X-Axis labels
div <- get.scale(xscale)
#Where should labels be on axis
atLab <- pretty(epid$t, n=length(epid$t)/div)
#What should labels say
lab <- format(pretty(epid$t, n=length(epid$t)/div))
axis(1, at=atLab, labels=lab)
}
#' @title
#' Internal plotfit method for AR estimates
#'
#' @description
#' Plot the fit of a single model output to epidemic data when the method is SB.
#'
#' @details
#' For internal use. This function is called by the [plotfit.R0.R()].
#'
#' @param x An output of [est.R0.SB()] (class `R0.R`).
#' @param xscale Scale to be adjusted on x-axis. Can be `d` (day), `w` (week (default)), `f` (fornight), `m` (month).
#' @param SB.dist Boolean. Should the R distirbution throughout the epidemic be plotted for the SB method (defaults to `TRUE`) ?
#' @param ... Parameters passed to inner functions.
#'
#' @return
#' This function does not return any data.
#' Called for side effect. Draws the fit of one estimation method to the data.
#'
#' @keywords internal
#'
#' @author Pierre-Yves Boelle, Thomas Obadia
# SB plotfit
plotfitRSB <- function(
x,
xscale,
SB.dist,
...
)
# Code
{
epid <- x$epid
#Get data used for the fit
begin <- x$begin
begin.nb <- x$begin.nb
end <- x$end
end.nb <- x$end.nb
epid.used.for.fit <- list(incid=epid$incid[begin.nb:end.nb], t=epid$t[begin.nb:end.nb])
#Plot the whole original epidemic data
plot(epid$t,epid$incid, xlab="Time",ylab="Incidence",t='s', xaxt="n", main=paste("Epidemic curve & model (", x$method,")"), ...)
#Add a line showing predicted simulation
lines(epid.used.for.fit$t,x$pred,col='red')
#Highlight the original points
points(epid.used.for.fit$t,epid.used.for.fit$incid,pch=19)
#Finally, X-Axis labels
div <- get.scale(xscale)
#Where should labels be on axis
atLab <- pretty(epid$t, n=length(epid$t)/div)
#What should labels say
lab <- format(pretty(epid$t, n=length(epid$t)/div))
axis(1, at=atLab, labels=lab)
#When plotting Bayesian, if SB.dist is enabled, plot some R distributions throughout the epidemic
if (SB.dist == TRUE) {
#x11()
dev.new()
split.screen(c(3,3))
if (end.nb-begin.nb>8) {
num.to.plot <- c(1, rep(NA, 8))
}
else {
num.to.plot <- c(begin.nb:end.nb)
}
for (i in 1:length(num.to.plot)) {
if (i == 1) {
screen(1)
plot(y=x$proba.Rt[[num.to.plot[i]]], x=seq(from=0, to=(length(x$proba.Rt[[num.to.plot[i]]])/100-0.01), by=0.01), xlab="R value", ylab="PDF", type="l", main=paste("t=",num.to.plot[i]), ...)
abline(v=(which.max((cumsum(x$proba.Rt[[num.to.plot[i]]])) >= 0.025)-1)/100, col="red", lty="dotted")
abline(v=(which.max((cumsum(x$proba.Rt[[num.to.plot[i]]])) >= 0.975)-1)/100, col="red", lty="dotted")
next
}
if (is.na(num.to.plot[i])) {
num.to.plot[i] <- num.to.plot[i-1] + floor(length(x$epid$incid[begin.nb:end.nb])/9)
}
screen(i)
plot(x$proba.Rt[[num.to.plot[i]]], x=seq(from=0, to=(length(x$proba.Rt[[num.to.plot[i]]])/100-0.01), by=0.01), xlim=c(0,((length(x$proba.Rt[[num.to.plot[i]]]) - which.max(rev(x$proba.Rt[[num.to.plot[i]]])>0) + 1))/100 - 0.01), xlab="R value", ylab="PDF", pch=NA_integer_, type="l", main=paste("t=",num.to.plot[i]), ...)
abline(v=(which.max((cumsum(x$proba.Rt[[num.to.plot[i]]])) >= 0.025)-1)/100, col="red", lty="dotted")
abline(v=(which.max((cumsum(x$proba.Rt[[num.to.plot[i]]])) >= 0.975)-1)/100, col="red", lty="dotted")
}
#Closing devices
close.screen(all.screens=TRUE)
}
}
|
#######################################################
#apply함수
#apply함수는 행렬의 행 또는 열 방향으로 특정 함수 적용
#######################################################
test <- matrix(1:12, ncol = 3)
apply(test, 1, sum) #두번째 인자는 행과 열의 방향을 의미 1은 행, 2는 열
#data.frame도 적용 가능한지 테스트
df_test <- as.data.frame(test)
df_test
apply(df_test, 1, sum) #matrix와 동일한 결과
#######################################################
#lpply함수
#lpply함수는 벡터, 리스트 함수 적용한 뒤에 리스트로 반환
#######################################################
test2 <- c(5,23,3,1,5,2)
test2 <- lapply(test2, function(x){x*10})
unlist(test2)
lapply(test2, nchar)
#######################################################
#spply함수
#lappy와 비슷하지만 행렬 혹은 벡터 등의 데이터 타입으로 반환
#######################################################
x <- sapply(iris[,1:4], mean)
as.data.frame(x)
as.data.frame(t(x))
sapply(iris, class) #각 칼럼의 데이터 타입 확인
y <- sapply(iris[,1:4], function(X){x > 3})
y
| /Day003/day3.R | no_license | woons/project_woons | R | false | false | 1,154 | r | #######################################################
#apply함수
#apply함수는 행렬의 행 또는 열 방향으로 특정 함수 적용
#######################################################
test <- matrix(1:12, ncol = 3)
apply(test, 1, sum) #두번째 인자는 행과 열의 방향을 의미 1은 행, 2는 열
#data.frame도 적용 가능한지 테스트
df_test <- as.data.frame(test)
df_test
apply(df_test, 1, sum) #matrix와 동일한 결과
#######################################################
#lpply함수
#lpply함수는 벡터, 리스트 함수 적용한 뒤에 리스트로 반환
#######################################################
test2 <- c(5,23,3,1,5,2)
test2 <- lapply(test2, function(x){x*10})
unlist(test2)
lapply(test2, nchar)
#######################################################
#spply함수
#lappy와 비슷하지만 행렬 혹은 벡터 등의 데이터 타입으로 반환
#######################################################
x <- sapply(iris[,1:4], mean)
as.data.frame(x)
as.data.frame(t(x))
sapply(iris, class) #각 칼럼의 데이터 타입 확인
y <- sapply(iris[,1:4], function(X){x > 3})
y
|
setwd("E:/EDA with R/coursera")
data<-read.csv("household_power_consumption.txt",sep=';')
dat<-data
dat$Date<-as.Date(dat$Date,"%d/%m/%Y")
dat<-subset(dat,Date=="2007-02-01" | Date=="2007-02-02")
dat<-subset(dat,!is.na(dat$Date))
x<-as.POSIXct(paste(dat$Date,dat$Time))
png(file="plot1.png",width=480,height=480)
hist(as.numeric(dat$Global_active_power)/500,xlab="Global Active power(kilowatts)",col="Red")
dev.off()
| /plot1.r | no_license | harsha-yel/ExData_Plotting1 | R | false | false | 422 | r | setwd("E:/EDA with R/coursera")
data<-read.csv("household_power_consumption.txt",sep=';')
dat<-data
dat$Date<-as.Date(dat$Date,"%d/%m/%Y")
dat<-subset(dat,Date=="2007-02-01" | Date=="2007-02-02")
dat<-subset(dat,!is.na(dat$Date))
x<-as.POSIXct(paste(dat$Date,dat$Time))
png(file="plot1.png",width=480,height=480)
hist(as.numeric(dat$Global_active_power)/500,xlab="Global Active power(kilowatts)",col="Red")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlayByPlayBoxScore.R
\name{season_play_by_play}
\alias{season_play_by_play}
\title{Parsed Descriptive Play-by-Play Function for a Full Season}
\usage{
season_play_by_play(Season)
}
\arguments{
\item{Season}{(numeric) A 4-digit year corresponding to an NFL season of
interest}
}
\value{
A dataframe contains all the play-by-play information for a single
season. This includes all the 62 variables collected in our
game_play_by_play function (see documentation for game_play_by_play for
details)
}
\description{
This function outputs all plays of an entire season in one dataframe.
It calls the game_play_by_play function and applies it over every
game in the season by extracting each game ID and url in the specified season.
}
\details{
This function calls the extracting_gameids,
proper_jsonurl_formatting, and game_play_by_play to aggregate all the plays
from a given season. This dataframe is prime for use with the dplyr and
plyr packages.
}
\examples{
# Play-by-Play Data from All games in 2010
pbp.data.2010 <- season_play_by_play(2010)
# Looking at all Baltimore Ravens Offensive Plays
subset(pbp.data.2010, posteam = "BAL")
}
| /man/season_play_by_play.Rd | no_license | bensoltoff/nflscrapR | R | false | true | 1,240 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlayByPlayBoxScore.R
\name{season_play_by_play}
\alias{season_play_by_play}
\title{Parsed Descriptive Play-by-Play Function for a Full Season}
\usage{
season_play_by_play(Season)
}
\arguments{
\item{Season}{(numeric) A 4-digit year corresponding to an NFL season of
interest}
}
\value{
A dataframe contains all the play-by-play information for a single
season. This includes all the 62 variables collected in our
game_play_by_play function (see documentation for game_play_by_play for
details)
}
\description{
This function outputs all plays of an entire season in one dataframe.
It calls the game_play_by_play function and applies it over every
game in the season by extracting each game ID and url in the specified season.
}
\details{
This function calls the extracting_gameids,
proper_jsonurl_formatting, and game_play_by_play to aggregate all the plays
from a given season. This dataframe is prime for use with the dplyr and
plyr packages.
}
\examples{
# Play-by-Play Data from All games in 2010
pbp.data.2010 <- season_play_by_play(2010)
# Looking at all Baltimore Ravens Offensive Plays
subset(pbp.data.2010, posteam = "BAL")
}
|
# Hsiang-Leng Wang
# 2019/11/07
# RStudio Exercise 2
# ------------------------
# Data wrangling
# read the data into memory
lrn14 <- read.table("http://www.helsinki.fi/~kvehkala/JYTmooc/JYTOPKYS3-data.txt", sep="\t", header=TRUE)
# Look at the dimensions of the data
dim(lrn14)
# Look at the structure of the data
str(lrn14)
# This data sheet is a 183 rows*60 columns(variables) table.
# Columns from 1 to 59 consist of integer numbers, and columns from 57 to 59 display the information about age, attitude and points as shown on the header of the table.
# Column 60 display the factor information about gender as "F" and "M".
# ------------------------
# The column Attitude in lrn14 is a sum of 10 questions related to students attitude towards statistics,
# each measured on the Likert scale (1-5). Here we'll scale the combination variable back to the 1-5 scale.
# create column 'attitude' by scaling the column "Attitude"
lrn14$attitude <- lrn14$Attitude / 10
# Access the dplyr library
library(dplyr)
# questions related to deep, surface and strategic learning
deep_questions <- c("D03", "D11", "D19", "D27", "D07", "D14", "D22", "D30","D06", "D15", "D23", "D31")
surface_questions <- c("SU02","SU10","SU18","SU26", "SU05","SU13","SU21","SU29","SU08","SU16","SU24","SU32")
strategic_questions <- c("ST01","ST09","ST17","ST25","ST04","ST12","ST20","ST28")
# select the columns related to deep learning and create column 'deep' by averaging
deep_columns <- select(lrn14, one_of(deep_questions))
lrn14$deep <- rowMeans(deep_columns)
# select the columns related to surface learning and create column 'surf' by averaging
surface_columns <- select(lrn14, one_of(surface_questions))
lrn14$surf <- rowMeans(surface_columns)
# select the columns related to strategic learning and create column 'stra' by averaging
strategic_columns <- select(lrn14, one_of(strategic_questions))
lrn14$stra <- rowMeans(strategic_columns)
# choose a handful of columns to keep
keep_columns <- c("gender","Age","attitude", "deep", "stra", "surf", "Points")
# select the 'keep_columns' to create a new dataset
learning2014 <- select(lrn14,one_of(keep_columns))
# select rows where points is greater than zero
learning2014 <- filter(learning2014, Points > 0)
# see the stucture of the new dataset
str(learning2014)
# ------------------------
# set working directory
setwd("/Users/mac/IODS-project/data")
# write and save table to working directory
write.table(learning2014, file = "learning2014.txt")
# read table again
lrn14_2 <- read.table("learning2014.txt", sep = "\t", header=TRUE)
lrn14_2
| /data/create_learning2014.R | no_license | hlengw/IODS-project | R | false | false | 2,594 | r | # Hsiang-Leng Wang
# 2019/11/07
# RStudio Exercise 2
# ------------------------
# Data wrangling
# read the data into memory
lrn14 <- read.table("http://www.helsinki.fi/~kvehkala/JYTmooc/JYTOPKYS3-data.txt", sep="\t", header=TRUE)
# Look at the dimensions of the data
dim(lrn14)
# Look at the structure of the data
str(lrn14)
# This data sheet is a 183 rows*60 columns(variables) table.
# Columns from 1 to 59 consist of integer numbers, and columns from 57 to 59 display the information about age, attitude and points as shown on the header of the table.
# Column 60 display the factor information about gender as "F" and "M".
# ------------------------
# The column Attitude in lrn14 is a sum of 10 questions related to students attitude towards statistics,
# each measured on the Likert scale (1-5). Here we'll scale the combination variable back to the 1-5 scale.
# create column 'attitude' by scaling the column "Attitude"
lrn14$attitude <- lrn14$Attitude / 10
# Access the dplyr library
library(dplyr)
# questions related to deep, surface and strategic learning
deep_questions <- c("D03", "D11", "D19", "D27", "D07", "D14", "D22", "D30","D06", "D15", "D23", "D31")
surface_questions <- c("SU02","SU10","SU18","SU26", "SU05","SU13","SU21","SU29","SU08","SU16","SU24","SU32")
strategic_questions <- c("ST01","ST09","ST17","ST25","ST04","ST12","ST20","ST28")
# select the columns related to deep learning and create column 'deep' by averaging
deep_columns <- select(lrn14, one_of(deep_questions))
lrn14$deep <- rowMeans(deep_columns)
# select the columns related to surface learning and create column 'surf' by averaging
surface_columns <- select(lrn14, one_of(surface_questions))
lrn14$surf <- rowMeans(surface_columns)
# select the columns related to strategic learning and create column 'stra' by averaging
strategic_columns <- select(lrn14, one_of(strategic_questions))
lrn14$stra <- rowMeans(strategic_columns)
# choose a handful of columns to keep
keep_columns <- c("gender","Age","attitude", "deep", "stra", "surf", "Points")
# select the 'keep_columns' to create a new dataset
learning2014 <- select(lrn14,one_of(keep_columns))
# select rows where points is greater than zero
learning2014 <- filter(learning2014, Points > 0)
# see the stucture of the new dataset
str(learning2014)
# ------------------------
# set working directory
setwd("/Users/mac/IODS-project/data")
# write and save table to working directory
write.table(learning2014, file = "learning2014.txt")
# read table again
lrn14_2 <- read.table("learning2014.txt", sep = "\t", header=TRUE)
lrn14_2
|
sea <- read_csv("data-processed/sea_processed2.csv")
sea %>%
filter(temperature == 5) %>%
filter(time_since_innoc_days > 30) %>%
summarise(mean_size = mean(cell_volume))
sea %>%
filter(temperature < 32) %>%
filter(time_since_innoc_days > 25) %>%
group_by(rep, temperature) %>%
summarise(mean_size = mean(cell_volume)) %>%
# ungroup() %>%
# ggplot(aes(x = temperature, y = mean_size)) +geom_point() +
# geom_smooth(method = "lm")
lm(mean_size ~ temperature, data = .) %>% summary()
sea %>%
filter(temperature < 32)
(-15.991 /1173)*100
| /Rscripts/21_messing_around.R | no_license | OConnor-Lab-UBC/J-TEMP | R | false | false | 569 | r |
sea <- read_csv("data-processed/sea_processed2.csv")
sea %>%
filter(temperature == 5) %>%
filter(time_since_innoc_days > 30) %>%
summarise(mean_size = mean(cell_volume))
sea %>%
filter(temperature < 32) %>%
filter(time_since_innoc_days > 25) %>%
group_by(rep, temperature) %>%
summarise(mean_size = mean(cell_volume)) %>%
# ungroup() %>%
# ggplot(aes(x = temperature, y = mean_size)) +geom_point() +
# geom_smooth(method = "lm")
lm(mean_size ~ temperature, data = .) %>% summary()
sea %>%
filter(temperature < 32)
(-15.991 /1173)*100
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_runner_internal.R
\name{update_temporal_params}
\alias{update_temporal_params}
\title{Updates parameters on a certain date}
\usage{
update_temporal_params(pars, pars_to_update, sd_update_metrics = NULL)
}
\arguments{
\item{pars}{existing parameters}
\item{pars_to_update}{one item from pars_temporal which are the parameters to update}
\item{sd_update_metrics}{optionally the current metrics for determining sd dynamically}
}
\value{
updated parameters
}
\description{
Updates parameters on a certain date
}
| /HutchCOVID/man/update_temporal_params.Rd | no_license | FredHutch/COVID_modeling_schools | R | false | true | 594 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_runner_internal.R
\name{update_temporal_params}
\alias{update_temporal_params}
\title{Updates parameters on a certain date}
\usage{
update_temporal_params(pars, pars_to_update, sd_update_metrics = NULL)
}
\arguments{
\item{pars}{existing parameters}
\item{pars_to_update}{one item from pars_temporal which are the parameters to update}
\item{sd_update_metrics}{optionally the current metrics for determining sd dynamically}
}
\value{
updated parameters
}
\description{
Updates parameters on a certain date
}
|
corr <- function(directory, threshold = 0) {
fileList <- list.files(directory, pattern = ".csv", full.names = TRUE);
df <- complete(directory)
# print(str(df)) # 332 obs
ids <- df[df["nobs"] > threshold,]$id # select dataframe rows , > threshold
# print(ids)
# str(ids) # 323 obs
corrr <- numeric()
for (i in ids) {
data <- read.csv(fileList[i])
complete_data <- data[complete.cases(data),]
corrr <- c(corrr , cor(complete_data$sulfate, complete_data$nitrate))
}
# print(complete_data)
return(corrr)
}
cr <- corr("specdata" , 150)
cr
# summary(cr)
| /corr_week_2.R | no_license | yeasinopu17/r_coursera | R | false | false | 589 | r | corr <- function(directory, threshold = 0) {
fileList <- list.files(directory, pattern = ".csv", full.names = TRUE);
df <- complete(directory)
# print(str(df)) # 332 obs
ids <- df[df["nobs"] > threshold,]$id # select dataframe rows , > threshold
# print(ids)
# str(ids) # 323 obs
corrr <- numeric()
for (i in ids) {
data <- read.csv(fileList[i])
complete_data <- data[complete.cases(data),]
corrr <- c(corrr , cor(complete_data$sulfate, complete_data$nitrate))
}
# print(complete_data)
return(corrr)
}
cr <- corr("specdata" , 150)
cr
# summary(cr)
|
########################################################################################################
# Process to generate a NetCDF file with raw data (L0 product) using location data
# Project: SOCIB Seaturtle
# Author: Chloé Dalleau
# Co-author: David March
# Date: 08/08/2016
#
# Description:
# This script is the first step of a global processing about the analysis of the turtles' trajectory.
# The following steps create a NetCDF file using the raw data of the turtle:
# - (1) Data import from CSV files or from Wildlife Computers (WC) data portal: time, latitude (Argos/GPS),
# longitude (Argos/GPS), location quality, number of satellites available from GPS, residual from GPS, temperature, depth, battery.
# Please note that a variable cannot be chosen several times from different csv files (except time and tagid).
# - (2) CSV file (generated by DAP software from Wildlife Computers) merging in a NetCDF file
#
# The process uses two additional CSV files :
# * meta_data: a file with all the information about the turtles such as: name, id, date deployment ...
# * allparameter: list of the variables or global attributes which will be created in the NetCDF file. This file allows an
# automation of the creation of the NetCDF file. It contains:
# - the name of the variable
# - the long name
# - the unit
# - the details
# - the NetCDF variable type (NC_DOUBLE,NC_CHAR,NC_INT ...)
# - specify if the variable is used in an another process (here : L0product, L1product and L2product)
# - if the type is NC_CHAR, specify the dimension
# - the dimension used in the NetCDF file in the different processes (here : dimL0product, dimL1product and dimL2product).
# Such as :
# var_name,long_name,units,details,value_type,L0product,L1product,L2product,dimCHAR,dimL0product,dimL1product,dimL2product
# time,time,seconds since 1970-01-01 00:00:00,,NC_INT,x,,,,time,,
# source_loc,source of the location,,,NC_CHAR,x,x,,max_string_32,time,time,
# NC_GLOBAL,otherTypeLoc,,,NC_CHAR,x,,,,,,
#
#
# WARNING:
# - There are differences in date formant between CSV files (eg. Locations vs. Series) and tutles (eg. processed by WC Data portal vs DAP).
# Main problem is the name of the month (eg. 'ago' or 'aug' for Spanish or English, respectively). Check this in Step 2. Suggestion: process all tracks in English (check manual for DAP)
# - We remove duplicates in time for each CSV file processed. Some duplications in time may have different data. We delete one of them. In future versions, check if we can use any quality control criteria.
# - Current script works for tags with temperature and pressure sensors (ie. Series.csv) and with battery information (Status.csv).
#
#
# Sources:
# - Michna, P. & Milton Woods. RNetCDF - A Package for Reading and Writing NetCDF Datasets. (2013).
# - Michna, P. & Milton Woods. Package 'RNetCDF'. (2016).
# - units: http://www.unidata.ucar.edu/software/udunits/udunits-2.2.20/udunits/udunits2-derived.xml
# - standard names: http://cfconventions.org/Data/cf-standard-names/34/build/cf-standard-name-table.html
########################################################################################################
### Remove the previous data
rm(list=ls())
### Import libraries
library(RNetCDF)
library(lubridate)
library(curl)
#library(wcUtils) # used in step 1, taken from https://github.com/jmlondon/wcUtils
### Set the turtle ID (Ptt) and if the data should be downloaded from the WC data portal
tag_id <- 00000 # modify the tag_id according to your turtle
download_from_WCDataPortal <- "no" # if "yes", step 1 will be processed
lan.setlocale = "English" # see parse_date_time
############################### Step 1: creation of the CSV file from WC Data Portal ##################
if (download_from_WCDataPortal == "yes") {
# Define location and file with auth keys
keyfile = "keyfile.json"
# User ID
owner = "xxxxxxxxxxxxxxxxxxxxxx" # this ID was obtained after inspecting the Data Portal with Chrome developer tools
# Folder to download data
path = paste("data/rawinput/",tag_id,"/.", sep= "")
# Argos Platform ID
ptt = tag_id
## Define function to dowload data
wcGetPttData <- function (keyfile, owner, path, ptt){
# Get deployment information
params = paste("action=get_deployments&owner_id=", owner, sep="")
wcpost <- wcPOST(keyfile= keyfile, params = params)
# Get PPT ID
id <- wcGetPttID(wcpost, ptt = ptt)$ids
# Download and extract ZIP files to obtain CSV files
zipPath <- wcGetZip(id, keyfile = keyfile) # download zip file in a temporal folder
unzip(zipPath, exdir = path) # extract all .CSV files from ZIP
# file.copy(from = file,to = newfile,overwrite = TRUE) # copy the zip in rawarchive
}
wcGetPttData(keyfile, owner, path, ptt)
}
###################################### Step 2: Import data ###########################################
### Meta data import
meta_data <- "data/turtles_metadata.csv"
meta_data <- read.csv(meta_data, sep=",", dec=".", header=TRUE, fill=TRUE)
colnames(meta_data)<-c("argosid", "name", "dateDeployment","refMaxSeaTemp","refMinSeaTemp","refMaxDepth","refMinDepth", "title", "author", "publisher","fileVersion","otherTypeLoc")
meta_data$argosid<-as.character(meta_data$argosid)
meta_data$name<-as.character(meta_data$name)
meta_data$dateDeployment <- as.POSIXct(meta_data$dateDeployment, "%Y-%m-%d %H:%M:%S", tz="GMT")
meta_data$refMaxSeaTemp<-as.numeric(as.character(meta_data$refMaxSeaTemp))
meta_data$refMinSeaTemp<-as.numeric(as.character(meta_data$refMinSeaTemp))
meta_data$refMaxDepth<-as.numeric(as.character(meta_data$refMaxDepth))
meta_data$refMinDepth<-as.numeric(as.character(meta_data$refMinDepth))
meta_data$title<-as.character(meta_data$title)
meta_data$author<-as.character(meta_data$author)
meta_data$publisher<-as.character(meta_data$publisher)
meta_data$fileVersion<-as.character(meta_data$fileVersion)
meta_data$otherTypeLoc<-as.character(meta_data$otherTypeLoc)
### Meta data selection using the turtle ID
select_data <- meta_data[which((meta_data$argosid == tag_id) == "TRUE"),]
### Select correct CSV file
## Locations : location of the turtle and quality location from Argos
## Series : temperature and depth and their errors
## Status : battery voltage just prior to transmission
## 1-FastGPS : location of the turtle and quality location from GPS
if (select_data$otherTypeLoc == "GPS") {
name_file <- c("Locations","Status","Series", "1-FastGPS") # Warning : the order is important
name_data <- c("loc_data","status_data","series_data","gps_data") # Warning : the order is important
} else {
name_file <- c("Locations","Status","Series")
name_data <- c("loc_data","status_data","series_data")
}
level <- "L0"
### Data import from CSV files using either WC data portal or the file version.
dirfile <- dir(path=paste("data/rawinput/",tag_id,sep=""), pattern="*.csv$") # select all the names of csv files contained in the path chosen
file <- c()
# Warning locations != Locations
# Warning let ".csv", if no select Series and SeriesRange
for (i in 1:length(name_file)) file[i] <- dirfile[grep(paste( tag_id,"-",name_file[i],".csv", sep = ""),dirfile)] # select the names of the csv files chosen in "name_file"
for (i in 1:length(file)){
if (name_data[i] == "gps_data"){
ff <- paste("data/rawinput/",tag_id,"/",file[i],sep="")
data <- read.csv(ff, sep=",", dec=".", header=TRUE, skip = 3)
assign(name_data[i],data)
rm("data")
} else {
ff <- paste("data/rawinput/",tag_id,"/",file[i],sep="")
data <- read.csv(ff, sep=",", dec=".", header=TRUE)
assign(name_data[i],data)
rm("data")
}
}
### Select interesting variables
## Locations.csv
loc_data <- loc_data[,c("DeployID","Date","Quality","Latitude","Longitude")]
colnames(loc_data)<- c("tagid", "time", "lc", "lat", "lon")
loc_data$tagid<-as.numeric(as.character(loc_data$tagid))
loc_data$time <- parse_date_time(loc_data$time, c("HMS dbY", "Ymd HMS"), locale=Sys.setlocale("LC_TIME", "English"), tz="GMT")
loc_data$lc<-as.character(loc_data$lc)
loc_data$lat<-as.numeric(as.character(loc_data$lat))
loc_data$lon<-as.numeric(as.character(loc_data$lon))
## Series.csv
series_data$Date <- parse_date_time(paste(series_data$Time, series_data$Day, sep=" "), c("HMS dbY", "Ymd HMS"), locale=Sys.setlocale("LC_TIME", lan.setlocale), tz="GMT")
series_data <- series_data[,c("DeployID","Date","Temperature","TRange","Depth","DRange")]
colnames(series_data) <- c("tagid", "time", "temp", "errorT", "depth", "errorD")
series_data$tagid <- as.numeric(as.character(series_data$tagid))
series_data$temp <- as.numeric(as.character(series_data$temp))
series_data$errorT <- as.numeric(as.character(series_data$errorT))
series_data$depth <- as.numeric(as.character(series_data$depth))
series_data$errorD <- as.numeric(as.character(series_data$errorD))
## Status.csv
status_data$Date <- status_data$Received
status_data <- status_data[,c("DeployID","Date","BattVoltage")]
colnames(status_data) <- c("tagid", "time", "batt")
status_data$tagid <- as.numeric(as.character(status_data$tagid))
status_data$time <- parse_date_time(status_data$time, c("HMS dbY", "Ymd HMS"), locale=Sys.setlocale("LC_TIME", "English"), tz="GMT")
status_data$batt <- as.numeric(as.character(status_data$batt))
## GPS.csv
if (select_data$otherTypeLoc == "GPS") {
gps_data$Date <- parse_date_time(paste(gps_data$Time, gps_data$Day, sep=" "), c("HMS dbY", "Ymd HMS"), locale=Sys.setlocale("LC_TIME", "English"), tz="GMT")
gps_data <- gps_data[,c("Name","Date","Satellites","Residual","Latitude","Longitude")]
colnames(gps_data) <- c("tagid", "time", "satellites","residual", "lat_gps", "lon_gps")
gps_data$tagid <- as.numeric(as.character(gps_data$tagid))
gps_data$time <- parse_date_time(gps_data$time, c("HMS dbY", "Ymd HMS"), locale=Sys.setlocale("LC_TIME", "English"), tz="GMT")
gps_data$satellites <- as.numeric(as.character(gps_data$satellites))
gps_data$residual <- as.numeric(as.character(gps_data$residual))
gps_data$lat_gps <- as.numeric(as.character(gps_data$lat_gps))
gps_data$lon_gps <- as.numeric(as.character(gps_data$lon_gps))
}
### Removing locations from testing environments before releasing
loc_data <- loc_data[which((loc_data$time > select_data$dateDeployment) == "TRUE"),]
series_data <- series_data[which((series_data$time > select_data$dateDeployment) == "TRUE"),]
status_data <- status_data[which((status_data$time > select_data$dateDeployment) == "TRUE"),]
if (select_data$otherTypeLoc == "GPS") gps_data <- gps_data[which((gps_data$time > select_data$dateDeployment) == "TRUE"),]
### Remove duplicate time measurements
if (length(which(duplicated(loc_data$time)))!=0) loc_data <- loc_data[-which(duplicated(loc_data$time)),]
if (length(which(duplicated(series_data$time)))!=0) series_data <- series_data[-which(duplicated(series_data$time)),]
if (length(which(duplicated(status_data$time)))!=0) status_data <- status_data[-which(duplicated(status_data$time)),]
if (select_data$otherTypeLoc == "GPS") {
if (length(which(duplicated(gps_data$time)))!=0) gps_data <- gps_data[-which(duplicated(gps_data$time)),]
}
### Merge the latitude and longitude observations from Argos and GPS
## If time measurements exist at the Argo and the GPS source, the latitude and the longitude from the GPS source are selected
if (select_data$otherTypeLoc == "GPS") {
mergeloc <- merge(loc_data,gps_data, all = T)
colnames(mergeloc) <- c("tagid","time","lc","lat_argos","lon_argos","satellites","residual","lat_gps","lon_gps")
mergeloc$lat <- NA
mergeloc$lon <- NA
mergeloc$source_loc <- name_file[1]
dimmerge <- dim(mergeloc)[1]
for (i in 1:dimmerge){
if (length(which(mergeloc$time[i] == gps_data$time)) != 0) {
mergeloc[i,c("lat","lon")] <- mergeloc[i,c("lat_gps","lon_gps")]
mergeloc[i,c("lc")] <- "gps"
mergeloc[i,c("source_loc")] <- name_file[4] # Warning : be careful with the order
} else {
mergeloc[i,c("lat","lon")] <- mergeloc[i,c("lat_argos","lon_argos")]
}
}
mergeloc <- mergeloc[,c("tagid","time","lc","satellites","residual","lat","lon","source_loc")]
} else {
mergeloc <- loc_data
mergeloc$source_loc <- rep(name_file[1]) # Warning : be careful with the order
}
### Add a column to describe the source data type for Series and Status
series_data$source_series <- rep(name_file[3]) # Warning : be careful with the order
status_data$source_status <- rep(name_file[2]) # Warning : be careful with the order
#######################################################################################################
###################################### Step 3: Merge existing data ####################################
### Merge the observations with the same time - Works only if a variable is chosen only once from csv
## files (except time and tagid)
fusion <- merge(mergeloc,series_data, all=TRUE)
fusion <- merge(fusion,status_data, all=TRUE)
#######################################################################################################
###################################### Step 4: Data export in a csv file ####################################
output.file <- paste("data/output/",tag_id,"/",level,"/",tag_id,"-",level,".csv", sep= "")
write.table(fusion, output.file, row.names=FALSE, sep=",", dec=".")
#######################################################################################################
###################################### Step 5: Prepare the data for the NetCDF document ###############
## To add a new variable with attributes (long_name,units,details) in the NetCDF: modify the previous step and the CSV
## file parameter_netcdf
## To add other attributes such as maximum, minimum, add them manualy (see also maximum and minimum for the temperature)
## The dimensions and the details for the NC_Global are inserted manually
### Import parameters from parameter_netcdf.csv
## WARNING the name of the data created in the previous step have to be the same as the var_name in
## allparameter (or long_name for NC_GLOBAL).
allparameter <- "data/parameter_netcdf.csv"
allparameter <- read.csv(allparameter, sep=",", dec=".", header=TRUE, fill=TRUE)
if (select_data$otherTypeLoc != "GPS") {
allparameter <- allparameter[-which(allparameter$var_name == "satellites"),]
allparameter <- allparameter[-which(allparameter$var_name == "residual"),]
}
product <- paste(level,"product",sep = "")
variables <- allparameter[which(allparameter[[product]] == "x"),]
for (i in 1:length(variables)) { variables[,i] <- as.character(variables[,i])}
dimvar <- dim(variables)[1]
glob_att<- "data/nc_global_att.csv"
glob_att <- read.csv(glob_att, sep=",", dec=".", header=TRUE, fill=TRUE)
glob_att <- glob_att[which(glob_att[[product]] == "x"),]
for (i in 1:length(glob_att)) { glob_att[,i] <- as.character(glob_att[,i])}
dimglob <- dim(glob_att)[1]
#######################################################################################################
###################################### Step 6: Creation of NetCDF ####################################
### Creation of new NetCDF file
data_type <- "netcdf"
filename <- paste("data/output/",tag_id,"/",level,"/",tag_id,"-",level,".nc", sep="")
dataset <- create.nc(filename)
### Creation of new NetCDF dimensions, the dimension is defined by the length of the variables
## A NetCDF file may only contain one unlim dimension. We choose the variable with the most observations (time)
dim.def.nc(dataset, "time", unlim=TRUE)
## For the variables with characters, there are 2 dimensions: (1) the number of observations (like "time")
## and (2) the maximum length of the string. This second dimension is created here:
dim.def.nc(dataset, "max_string_32", 32)
dim.def.nc(dataset, "max_string_4", 4)
### Creation of new NetCDF variables
## Definition of the variables in the NetCDF file with the format
## var.def.nc(netcdf_file, "variable_name", "value_type","dimension"), such as:
## - var.def.nc(dataset, "time", "NC_INT","time")
## - var.def.nc(dataset, "lat", "NC_DOUBLE","time")
## - var.def.nc(dataset, "source_loc", "NC_CHAR",c("max_string_32","time")): for the character,
## the UNLIM dimension has to be at last in the dimension vector
for ( i in 1:dimvar ) {
if (variables$value_type[i]=="NC_CHAR"){
var.def.nc(dataset, variables$var_name[i], variables$value_type[i], c(variables$dimCHAR[i],variables$dimL0product[i]))
} else {
var.def.nc(dataset, variables$var_name[i], variables$value_type[i], variables$dimL0product[i])
}
}
## To view the NetCDF file
# print.nc(dataset) # at this step the dimension of the time is NULL, because the observations are not added
### Put attributes in the variables or in the NC_GLOBAL
## The attributes are either the meta data of the variables or the global meta data of the NetCDF (NC_GLOBAL)
## the format is : att.put.nc(netcdf_file, "variables_name-or-NC_GLOBAL", "meta_data_name", "value_type", data), such as:
## - att.put.nc(dataset, "NC_GLOBAL", "title", "NC_CHAR", title)
## - att.put.nc(dataset, "time", "long_name" , "NC_CHAR", name_time)
## - att.put.nc(dataset, "temp", "_FillValue", "NC_DOUBLE", -99999.9), _FillValue has to be added for the creation of the figures
#
## For NC_GLOBAL
## WARNING the names of the data in select_data have to be the same as the colomn "att_name" in glob_att
for ( i in 1:dimglob ) {
if ( length(intersect(colnames(select_data),glob_att$att_name[i])) == 1) {
id.glob_att <- which(colnames(select_data) == glob_att$att_name[i])
id.glob_att <- as.numeric(id.glob_att)
att.put.nc(dataset, "NC_GLOBAL", glob_att$att_name[i], glob_att$value_type[i], format(select_data[,id.glob_att]) ) # format is to keep the format of the select_data
}
}
## Other attributes for NC_GLOBAL
detail_1 <- "L0 product : raw data."
att.put.nc(dataset, "NC_GLOBAL", "detail_1", "NC_CHAR", detail_1 )
if ( download_from_WCDataPortal == "yes"){
DATE <- format(Sys.time(), "%d-%b-%Y %X ")
detail_2 <- paste("Data from WC Data Portal (", DATE,").", sep="")
att.put.nc(dataset, "NC_GLOBAL", "detail_2", "NC_CHAR", detail_2 )
}
#
## For variables
for ( i in 1:dimvar ) {
if (variables$standard_name[i] != "") {
att.put.nc(dataset, variables$var_name[i], "standard name", "NC_CHAR", variables$standard_name[i]) # add standard name
}
if (variables$long_name[i] != "") {
att.put.nc(dataset, variables$var_name[i], "long_name", "NC_CHAR", variables$long_name[i]) # add a long name
}
if (variables$units[i] != "") {
att.put.nc(dataset, variables$var_name[i], "units", "NC_CHAR", variables$units[i]) # add the unit for the variables having an unit
}
if (variables$value_type[i] == "NC_DOUBLE"){
att.put.nc(dataset, variables$var_name[i], "_FillValue", "NC_DOUBLE", -99999.9) # -99999.9 to see Michna, P. & Milton Woods. RNetCDF - A Package for Reading and Writing NetCDF Datasets. (2013).
}
if (variables$details[i] != "") {
att.put.nc(dataset, variables$var_name[i], "details", "NC_CHAR", variables$details[i]) # add details
}
}
## Other attributes
max_temp <- max(fusion$temp, na.rm = TRUE)
min_temp <- min(fusion$temp, na.rm = TRUE)
max_depth <- max(fusion$depth, na.rm = TRUE)
min_depth <- min(fusion$depth, na.rm = TRUE)
att.put.nc(dataset, "temp", "max" , "NC_DOUBLE", max_temp )
att.put.nc(dataset, "temp", "min" , "NC_DOUBLE", min_temp )
att.put.nc(dataset, "depth", "max" , "NC_DOUBLE", max_depth )
att.put.nc(dataset, "depth", "min" , "NC_DOUBLE", min_depth )
### Write the contents of a NetCDF variable.
## format: var.put.nc(netcdf_file, varialable_name, data), such as: var.put.nc(dataset, "lon", fusion$lon)
## the time variable data must be temporarily converted to a UTC referenced date, format of the convertion: dataconvert <- utinvcal.nc(units, data)
## for CHAR the NA must be replaced by ""
## Warning: the var.put.nc will not work if the format of the data is different from the format given in var.def.nc
for (i in 1 : dimvar){
if (variables$var_name[i] =="time"){
mytime <- utinvcal.nc(variables$units[which(variables$var_name == "time")], fusion$time) #conversion of time
var.put.nc(dataset, "time", mytime)
} else if (variables$value_type[i] == "NC_CHAR"){
id.char <- as.numeric(which(colnames(fusion) == variables$var_name[i])) # select the id of variables using character in fusion
mydata <- fusion[,id.char] # select the data
mydata <- as.character(mydata) # warning : the "as.character" have to be before ' remplace NA by "" '
mydata[is.na(mydata)] <-"" # remplace NA by ""
var.put.nc(dataset, variables$var_name[i], mydata)
} else {
id.var <- as.numeric(which(colnames(fusion) == variables$var_name[i])) #select the other id
var.put.nc(dataset, variables$var_name[i], fusion[,id.var])
}
}
### View the NetCDF
# print.nc(dataset)
# var.get.nc(dataset, "source_loc")
### Close the opened NetCDF file
close.nc(dataset)
########################################## END ##############################################
| /process_netcdf_L0.R | no_license | cynsky/Gliding-turtles | R | false | false | 21,317 | r | ########################################################################################################
# Process to generate a NetCDF file with raw data (L0 product) using location data
# Project: SOCIB Seaturtle
# Author: Chloé Dalleau
# Co-author: David March
# Date: 08/08/2016
#
# Description:
# This script is the first step of a global processing about the analysis of the turtles' trajectory.
# The following steps create a NetCDF file using the raw data of the turtle:
# - (1) Data import from CSV files or from Wildlife Computers (WC) data portal: time, latitude (Argos/GPS),
# longitude (Argos/GPS), location quality, number of satellites available from GPS, residual from GPS, temperature, depth, battery.
# Please note that a variable cannot be chosen several times from different csv files (except time and tagid).
# - (2) CSV file (generated by DAP software from Wildlife Computers) merging in a NetCDF file
#
# The process uses two additional CSV files :
# * meta_data: a file with all the information about the turtles such as: name, id, date deployment ...
# * allparameter: list of the variables or global attributes which will be created in the NetCDF file. This file allows an
# automation of the creation of the NetCDF file. It contains:
# - the name of the variable
# - the long name
# - the unit
# - the details
# - the NetCDF variable type (NC_DOUBLE,NC_CHAR,NC_INT ...)
# - specify if the variable is used in an another process (here : L0product, L1product and L2product)
# - if the type is NC_CHAR, specify the dimension
# - the dimension used in the NetCDF file in the different processes (here : dimL0product, dimL1product and dimL2product).
# Such as :
# var_name,long_name,units,details,value_type,L0product,L1product,L2product,dimCHAR,dimL0product,dimL1product,dimL2product
# time,time,seconds since 1970-01-01 00:00:00,,NC_INT,x,,,,time,,
# source_loc,source of the location,,,NC_CHAR,x,x,,max_string_32,time,time,
# NC_GLOBAL,otherTypeLoc,,,NC_CHAR,x,,,,,,
#
#
# WARNING:
# - There are differences in date formant between CSV files (eg. Locations vs. Series) and tutles (eg. processed by WC Data portal vs DAP).
# Main problem is the name of the month (eg. 'ago' or 'aug' for Spanish or English, respectively). Check this in Step 2. Suggestion: process all tracks in English (check manual for DAP)
# - We remove duplicates in time for each CSV file processed. Some duplications in time may have different data. We delete one of them. In future versions, check if we can use any quality control criteria.
# - Current script works for tags with temperature and pressure sensors (ie. Series.csv) and with battery information (Status.csv).
#
#
# Sources:
# - Michna, P. & Milton Woods. RNetCDF - A Package for Reading and Writing NetCDF Datasets. (2013).
# - Michna, P. & Milton Woods. Package 'RNetCDF'. (2016).
# - units: http://www.unidata.ucar.edu/software/udunits/udunits-2.2.20/udunits/udunits2-derived.xml
# - standard names: http://cfconventions.org/Data/cf-standard-names/34/build/cf-standard-name-table.html
########################################################################################################
### Remove the previous data
rm(list=ls())
### Import libraries
library(RNetCDF)
library(lubridate)
library(curl)
#library(wcUtils) # used in step 1, taken from https://github.com/jmlondon/wcUtils
### Set the turtle ID (Ptt) and if the data should be downloaded from the WC data portal
tag_id <- 00000 # modify the tag_id according to your turtle
download_from_WCDataPortal <- "no" # if "yes", step 1 will be processed
lan.setlocale = "English" # see parse_date_time
############################### Step 1: creation of the CSV file from WC Data Portal ##################
if (download_from_WCDataPortal == "yes") {
# Define location and file with auth keys
keyfile = "keyfile.json"
# User ID
owner = "xxxxxxxxxxxxxxxxxxxxxx" # this ID was obtained after inspecting the Data Portal with Chrome developer tools
# Folder to download data
path = paste("data/rawinput/",tag_id,"/.", sep= "")
# Argos Platform ID
ptt = tag_id
## Define function to dowload data
wcGetPttData <- function (keyfile, owner, path, ptt){
# Get deployment information
params = paste("action=get_deployments&owner_id=", owner, sep="")
wcpost <- wcPOST(keyfile= keyfile, params = params)
# Get PPT ID
id <- wcGetPttID(wcpost, ptt = ptt)$ids
# Download and extract ZIP files to obtain CSV files
zipPath <- wcGetZip(id, keyfile = keyfile) # download zip file in a temporal folder
unzip(zipPath, exdir = path) # extract all .CSV files from ZIP
# file.copy(from = file,to = newfile,overwrite = TRUE) # copy the zip in rawarchive
}
wcGetPttData(keyfile, owner, path, ptt)
}
###################################### Step 2: Import data ###########################################
### Meta data import
meta_data <- "data/turtles_metadata.csv"
meta_data <- read.csv(meta_data, sep=",", dec=".", header=TRUE, fill=TRUE)
colnames(meta_data)<-c("argosid", "name", "dateDeployment","refMaxSeaTemp","refMinSeaTemp","refMaxDepth","refMinDepth", "title", "author", "publisher","fileVersion","otherTypeLoc")
meta_data$argosid<-as.character(meta_data$argosid)
meta_data$name<-as.character(meta_data$name)
meta_data$dateDeployment <- as.POSIXct(meta_data$dateDeployment, "%Y-%m-%d %H:%M:%S", tz="GMT")
meta_data$refMaxSeaTemp<-as.numeric(as.character(meta_data$refMaxSeaTemp))
meta_data$refMinSeaTemp<-as.numeric(as.character(meta_data$refMinSeaTemp))
meta_data$refMaxDepth<-as.numeric(as.character(meta_data$refMaxDepth))
meta_data$refMinDepth<-as.numeric(as.character(meta_data$refMinDepth))
meta_data$title<-as.character(meta_data$title)
meta_data$author<-as.character(meta_data$author)
meta_data$publisher<-as.character(meta_data$publisher)
meta_data$fileVersion<-as.character(meta_data$fileVersion)
meta_data$otherTypeLoc<-as.character(meta_data$otherTypeLoc)
### Meta data selection using the turtle ID
select_data <- meta_data[which((meta_data$argosid == tag_id) == "TRUE"),]
### Select correct CSV file
## Locations : location of the turtle and quality location from Argos
## Series : temperature and depth and their errors
## Status : battery voltage just prior to transmission
## 1-FastGPS : location of the turtle and quality location from GPS
if (select_data$otherTypeLoc == "GPS") {
name_file <- c("Locations","Status","Series", "1-FastGPS") # Warning : the order is important
name_data <- c("loc_data","status_data","series_data","gps_data") # Warning : the order is important
} else {
name_file <- c("Locations","Status","Series")
name_data <- c("loc_data","status_data","series_data")
}
level <- "L0"
### Data import from CSV files using either WC data portal or the file version.
dirfile <- dir(path=paste("data/rawinput/",tag_id,sep=""), pattern="*.csv$") # select all the names of csv files contained in the path chosen
file <- c()
# Warning locations != Locations
# Warning let ".csv", if no select Series and SeriesRange
for (i in 1:length(name_file)) file[i] <- dirfile[grep(paste( tag_id,"-",name_file[i],".csv", sep = ""),dirfile)] # select the names of the csv files chosen in "name_file"
for (i in 1:length(file)){
if (name_data[i] == "gps_data"){
ff <- paste("data/rawinput/",tag_id,"/",file[i],sep="")
data <- read.csv(ff, sep=",", dec=".", header=TRUE, skip = 3)
assign(name_data[i],data)
rm("data")
} else {
ff <- paste("data/rawinput/",tag_id,"/",file[i],sep="")
data <- read.csv(ff, sep=",", dec=".", header=TRUE)
assign(name_data[i],data)
rm("data")
}
}
### Select interesting variables
## Locations.csv
loc_data <- loc_data[,c("DeployID","Date","Quality","Latitude","Longitude")]
colnames(loc_data)<- c("tagid", "time", "lc", "lat", "lon")
loc_data$tagid<-as.numeric(as.character(loc_data$tagid))
loc_data$time <- parse_date_time(loc_data$time, c("HMS dbY", "Ymd HMS"), locale=Sys.setlocale("LC_TIME", "English"), tz="GMT")
loc_data$lc<-as.character(loc_data$lc)
loc_data$lat<-as.numeric(as.character(loc_data$lat))
loc_data$lon<-as.numeric(as.character(loc_data$lon))
## Series.csv
series_data$Date <- parse_date_time(paste(series_data$Time, series_data$Day, sep=" "), c("HMS dbY", "Ymd HMS"), locale=Sys.setlocale("LC_TIME", lan.setlocale), tz="GMT")
series_data <- series_data[,c("DeployID","Date","Temperature","TRange","Depth","DRange")]
colnames(series_data) <- c("tagid", "time", "temp", "errorT", "depth", "errorD")
series_data$tagid <- as.numeric(as.character(series_data$tagid))
series_data$temp <- as.numeric(as.character(series_data$temp))
series_data$errorT <- as.numeric(as.character(series_data$errorT))
series_data$depth <- as.numeric(as.character(series_data$depth))
series_data$errorD <- as.numeric(as.character(series_data$errorD))
## Status.csv
status_data$Date <- status_data$Received
status_data <- status_data[,c("DeployID","Date","BattVoltage")]
colnames(status_data) <- c("tagid", "time", "batt")
status_data$tagid <- as.numeric(as.character(status_data$tagid))
status_data$time <- parse_date_time(status_data$time, c("HMS dbY", "Ymd HMS"), locale=Sys.setlocale("LC_TIME", "English"), tz="GMT")
status_data$batt <- as.numeric(as.character(status_data$batt))
## GPS.csv
if (select_data$otherTypeLoc == "GPS") {
gps_data$Date <- parse_date_time(paste(gps_data$Time, gps_data$Day, sep=" "), c("HMS dbY", "Ymd HMS"), locale=Sys.setlocale("LC_TIME", "English"), tz="GMT")
gps_data <- gps_data[,c("Name","Date","Satellites","Residual","Latitude","Longitude")]
colnames(gps_data) <- c("tagid", "time", "satellites","residual", "lat_gps", "lon_gps")
gps_data$tagid <- as.numeric(as.character(gps_data$tagid))
gps_data$time <- parse_date_time(gps_data$time, c("HMS dbY", "Ymd HMS"), locale=Sys.setlocale("LC_TIME", "English"), tz="GMT")
gps_data$satellites <- as.numeric(as.character(gps_data$satellites))
gps_data$residual <- as.numeric(as.character(gps_data$residual))
gps_data$lat_gps <- as.numeric(as.character(gps_data$lat_gps))
gps_data$lon_gps <- as.numeric(as.character(gps_data$lon_gps))
}
### Removing locations from testing environments before releasing
loc_data <- loc_data[which((loc_data$time > select_data$dateDeployment) == "TRUE"),]
series_data <- series_data[which((series_data$time > select_data$dateDeployment) == "TRUE"),]
status_data <- status_data[which((status_data$time > select_data$dateDeployment) == "TRUE"),]
if (select_data$otherTypeLoc == "GPS") gps_data <- gps_data[which((gps_data$time > select_data$dateDeployment) == "TRUE"),]
### Remove duplicate time measurements
if (length(which(duplicated(loc_data$time)))!=0) loc_data <- loc_data[-which(duplicated(loc_data$time)),]
if (length(which(duplicated(series_data$time)))!=0) series_data <- series_data[-which(duplicated(series_data$time)),]
if (length(which(duplicated(status_data$time)))!=0) status_data <- status_data[-which(duplicated(status_data$time)),]
if (select_data$otherTypeLoc == "GPS") {
if (length(which(duplicated(gps_data$time)))!=0) gps_data <- gps_data[-which(duplicated(gps_data$time)),]
}
### Merge the latitude and longitude observations from Argos and GPS
## If time measurements exist at the Argo and the GPS source, the latitude and the longitude from the GPS source are selected
if (select_data$otherTypeLoc == "GPS") {
mergeloc <- merge(loc_data,gps_data, all = T)
colnames(mergeloc) <- c("tagid","time","lc","lat_argos","lon_argos","satellites","residual","lat_gps","lon_gps")
mergeloc$lat <- NA
mergeloc$lon <- NA
mergeloc$source_loc <- name_file[1]
dimmerge <- dim(mergeloc)[1]
for (i in 1:dimmerge){
if (length(which(mergeloc$time[i] == gps_data$time)) != 0) {
mergeloc[i,c("lat","lon")] <- mergeloc[i,c("lat_gps","lon_gps")]
mergeloc[i,c("lc")] <- "gps"
mergeloc[i,c("source_loc")] <- name_file[4] # Warning : be careful with the order
} else {
mergeloc[i,c("lat","lon")] <- mergeloc[i,c("lat_argos","lon_argos")]
}
}
mergeloc <- mergeloc[,c("tagid","time","lc","satellites","residual","lat","lon","source_loc")]
} else {
mergeloc <- loc_data
mergeloc$source_loc <- rep(name_file[1]) # Warning : be careful with the order
}
### Add a column to describe the source data type for Series and Status
series_data$source_series <- rep(name_file[3]) # Warning : be careful with the order
status_data$source_status <- rep(name_file[2]) # Warning : be careful with the order
#######################################################################################################
###################################### Step 3: Merge existing data ####################################
### Merge the observations with the same time - Works only if a variable is chosen only once from csv
## files (except time and tagid)
fusion <- merge(mergeloc,series_data, all=TRUE)
fusion <- merge(fusion,status_data, all=TRUE)
#######################################################################################################
###################################### Step 4: Data export in a csv file ####################################
output.file <- paste("data/output/",tag_id,"/",level,"/",tag_id,"-",level,".csv", sep= "")
write.table(fusion, output.file, row.names=FALSE, sep=",", dec=".")
#######################################################################################################
###################################### Step 5: Prepare the data for the NetCDF document ###############
## To add a new variable with attributes (long_name,units,details) in the NetCDF: modify the previous step and the CSV
## file parameter_netcdf
## To add other attributes such as maximum, minimum, add them manualy (see also maximum and minimum for the temperature)
## The dimensions and the details for the NC_Global are inserted manually
### Import parameters from parameter_netcdf.csv
## WARNING the name of the data created in the previous step have to be the same as the var_name in
## allparameter (or long_name for NC_GLOBAL).
allparameter <- "data/parameter_netcdf.csv"
allparameter <- read.csv(allparameter, sep=",", dec=".", header=TRUE, fill=TRUE)
if (select_data$otherTypeLoc != "GPS") {
allparameter <- allparameter[-which(allparameter$var_name == "satellites"),]
allparameter <- allparameter[-which(allparameter$var_name == "residual"),]
}
product <- paste(level,"product",sep = "")
variables <- allparameter[which(allparameter[[product]] == "x"),]
for (i in 1:length(variables)) { variables[,i] <- as.character(variables[,i])}
dimvar <- dim(variables)[1]
glob_att<- "data/nc_global_att.csv"
glob_att <- read.csv(glob_att, sep=",", dec=".", header=TRUE, fill=TRUE)
glob_att <- glob_att[which(glob_att[[product]] == "x"),]
for (i in 1:length(glob_att)) { glob_att[,i] <- as.character(glob_att[,i])}
dimglob <- dim(glob_att)[1]
#######################################################################################################
###################################### Step 6: Creation of NetCDF ####################################
### Creation of new NetCDF file
data_type <- "netcdf"
filename <- paste("data/output/",tag_id,"/",level,"/",tag_id,"-",level,".nc", sep="")
dataset <- create.nc(filename)
### Creation of new NetCDF dimensions, the dimension is defined by the length of the variables
## A NetCDF file may only contain one unlim dimension. We choose the variable with the most observations (time)
dim.def.nc(dataset, "time", unlim=TRUE)
## For the variables with characters, there are 2 dimensions: (1) the number of observations (like "time")
## and (2) the maximum length of the string. This second dimension is created here:
dim.def.nc(dataset, "max_string_32", 32)
dim.def.nc(dataset, "max_string_4", 4)
### Creation of new NetCDF variables
## Definition of the variables in the NetCDF file with the format
## var.def.nc(netcdf_file, "variable_name", "value_type","dimension"), such as:
## - var.def.nc(dataset, "time", "NC_INT","time")
## - var.def.nc(dataset, "lat", "NC_DOUBLE","time")
## - var.def.nc(dataset, "source_loc", "NC_CHAR",c("max_string_32","time")): for the character,
## the UNLIM dimension has to be at last in the dimension vector
for ( i in 1:dimvar ) {
if (variables$value_type[i]=="NC_CHAR"){
var.def.nc(dataset, variables$var_name[i], variables$value_type[i], c(variables$dimCHAR[i],variables$dimL0product[i]))
} else {
var.def.nc(dataset, variables$var_name[i], variables$value_type[i], variables$dimL0product[i])
}
}
## To view the NetCDF file
# print.nc(dataset) # at this step the dimension of the time is NULL, because the observations are not added
### Put attributes in the variables or in the NC_GLOBAL
## The attributes are either the meta data of the variables or the global meta data of the NetCDF (NC_GLOBAL)
## the format is : att.put.nc(netcdf_file, "variables_name-or-NC_GLOBAL", "meta_data_name", "value_type", data), such as:
## - att.put.nc(dataset, "NC_GLOBAL", "title", "NC_CHAR", title)
## - att.put.nc(dataset, "time", "long_name" , "NC_CHAR", name_time)
## - att.put.nc(dataset, "temp", "_FillValue", "NC_DOUBLE", -99999.9), _FillValue has to be added for the creation of the figures
#
## For NC_GLOBAL
## WARNING the names of the data in select_data have to be the same as the colomn "att_name" in glob_att
for ( i in 1:dimglob ) {
if ( length(intersect(colnames(select_data),glob_att$att_name[i])) == 1) {
id.glob_att <- which(colnames(select_data) == glob_att$att_name[i])
id.glob_att <- as.numeric(id.glob_att)
att.put.nc(dataset, "NC_GLOBAL", glob_att$att_name[i], glob_att$value_type[i], format(select_data[,id.glob_att]) ) # format is to keep the format of the select_data
}
}
## Other attributes for NC_GLOBAL
detail_1 <- "L0 product : raw data."
att.put.nc(dataset, "NC_GLOBAL", "detail_1", "NC_CHAR", detail_1 )
if ( download_from_WCDataPortal == "yes"){
DATE <- format(Sys.time(), "%d-%b-%Y %X ")
detail_2 <- paste("Data from WC Data Portal (", DATE,").", sep="")
att.put.nc(dataset, "NC_GLOBAL", "detail_2", "NC_CHAR", detail_2 )
}
#
## For variables
for ( i in 1:dimvar ) {
if (variables$standard_name[i] != "") {
att.put.nc(dataset, variables$var_name[i], "standard name", "NC_CHAR", variables$standard_name[i]) # add standard name
}
if (variables$long_name[i] != "") {
att.put.nc(dataset, variables$var_name[i], "long_name", "NC_CHAR", variables$long_name[i]) # add a long name
}
if (variables$units[i] != "") {
att.put.nc(dataset, variables$var_name[i], "units", "NC_CHAR", variables$units[i]) # add the unit for the variables having an unit
}
if (variables$value_type[i] == "NC_DOUBLE"){
att.put.nc(dataset, variables$var_name[i], "_FillValue", "NC_DOUBLE", -99999.9) # -99999.9 to see Michna, P. & Milton Woods. RNetCDF - A Package for Reading and Writing NetCDF Datasets. (2013).
}
if (variables$details[i] != "") {
att.put.nc(dataset, variables$var_name[i], "details", "NC_CHAR", variables$details[i]) # add details
}
}
## Other attributes
max_temp <- max(fusion$temp, na.rm = TRUE)
min_temp <- min(fusion$temp, na.rm = TRUE)
max_depth <- max(fusion$depth, na.rm = TRUE)
min_depth <- min(fusion$depth, na.rm = TRUE)
att.put.nc(dataset, "temp", "max" , "NC_DOUBLE", max_temp )
att.put.nc(dataset, "temp", "min" , "NC_DOUBLE", min_temp )
att.put.nc(dataset, "depth", "max" , "NC_DOUBLE", max_depth )
att.put.nc(dataset, "depth", "min" , "NC_DOUBLE", min_depth )
### Write the contents of a NetCDF variable.
## format: var.put.nc(netcdf_file, varialable_name, data), such as: var.put.nc(dataset, "lon", fusion$lon)
## the time variable data must be temporarily converted to a UTC referenced date, format of the convertion: dataconvert <- utinvcal.nc(units, data)
## for CHAR the NA must be replaced by ""
## Warning: the var.put.nc will not work if the format of the data is different from the format given in var.def.nc
for (i in 1 : dimvar){
if (variables$var_name[i] =="time"){
mytime <- utinvcal.nc(variables$units[which(variables$var_name == "time")], fusion$time) #conversion of time
var.put.nc(dataset, "time", mytime)
} else if (variables$value_type[i] == "NC_CHAR"){
id.char <- as.numeric(which(colnames(fusion) == variables$var_name[i])) # select the id of variables using character in fusion
mydata <- fusion[,id.char] # select the data
mydata <- as.character(mydata) # warning : the "as.character" have to be before ' remplace NA by "" '
mydata[is.na(mydata)] <-"" # remplace NA by ""
var.put.nc(dataset, variables$var_name[i], mydata)
} else {
id.var <- as.numeric(which(colnames(fusion) == variables$var_name[i])) #select the other id
var.put.nc(dataset, variables$var_name[i], fusion[,id.var])
}
}
### View the NetCDF
# print.nc(dataset)
# var.get.nc(dataset, "source_loc")
### Close the opened NetCDF file
close.nc(dataset)
########################################## END ##############################################
|
context("sp germplasm_details_study")
con <- ba_db()$sweetpotatobase
test_that("Germplasm_details study results are present", {
res <- ba_germplasm_details_study(con = con, studyDbId = "1207")
expect_that(nrow(res) >= 8, is_true())
})
test_that("Germplasm_details out formats work", {
res <- ba_germplasm_details_study(con = con, studyDbId = "1207", rclass = "json")
expect_that("json" %in% class(res), is_true())
res <- ba_germplasm_details_study(con = con, studyDbId = "1207", rclass = "list")
expect_that("list" %in% class(res), is_true())
res <- ba_germplasm_details_study(con = con, studyDbId = "1207", rclass = "data.frame")
expect_that("data.frame" %in% class(res), is_true())
})
| /tests/sweetpotatobase/test_sp_germplasm_details_study.R | no_license | ClayBirkett/brapi | R | false | false | 713 | r | context("sp germplasm_details_study")
con <- ba_db()$sweetpotatobase
test_that("Germplasm_details study results are present", {
res <- ba_germplasm_details_study(con = con, studyDbId = "1207")
expect_that(nrow(res) >= 8, is_true())
})
test_that("Germplasm_details out formats work", {
res <- ba_germplasm_details_study(con = con, studyDbId = "1207", rclass = "json")
expect_that("json" %in% class(res), is_true())
res <- ba_germplasm_details_study(con = con, studyDbId = "1207", rclass = "list")
expect_that("list" %in% class(res), is_true())
res <- ba_germplasm_details_study(con = con, studyDbId = "1207", rclass = "data.frame")
expect_that("data.frame" %in% class(res), is_true())
})
|
# Exercise 8: Pulitzer Prizes
# Read in the data
pulitzer <- read.csv("data/pulitzer-circulation-data.csv", stringsAsFactors = FALSE)
# Install and load the needed libraries
# Be sure to comment out the install.packages function so it won't install it every time it runs
# Remeber you only need to install a package once
#install.packages(dplyr)
# library(dplyr)
# View in the data set. Start to understand what the data columns contains
# Be sure to comment out the function so it won't view everytime you run the code.
# View(pulitzer)
# Use 'colnames' to print out the names of the columns
colnames(pulitzer)
# Use 'str' to print what types of values are contained in each column
# Did any value type surprise you? Why do you think they are that type?
str(pulitzer)
# Add a column in a dataframe called 'Pulitzer.Prize.Change` that contains the diffrence in changes
# in Pulitzer Prize Winners from 2004 to 2013 and Pultizer Prize Winners from 1990 to 2003.
# What publication gained the most pulitzer prizes from 2004-2014?
# Be sure to use the pipe operator!
# Which publication with at least 5 Pulitzers won from 2004-2014 had the biggest decrease(negative) in Daily circulation numbers?
# This publication should have Pulitzer prizes won a minimum of 5 Pulitzers, as well as the biggest decrease in circulation
# Your turn! An important part about being a data scientist is asking questions.
# Create a question and use dplyr to figure out the answer.
| /exercise-8/exercise.R | permissive | brendanjacobsen/m11-dplyr | R | false | false | 1,483 | r | # Exercise 8: Pulitzer Prizes
# Read in the data
pulitzer <- read.csv("data/pulitzer-circulation-data.csv", stringsAsFactors = FALSE)
# Install and load the needed libraries
# Be sure to comment out the install.packages function so it won't install it every time it runs
# Remeber you only need to install a package once
#install.packages(dplyr)
# library(dplyr)
# View in the data set. Start to understand what the data columns contains
# Be sure to comment out the function so it won't view everytime you run the code.
# View(pulitzer)
# Use 'colnames' to print out the names of the columns
colnames(pulitzer)
# Use 'str' to print what types of values are contained in each column
# Did any value type surprise you? Why do you think they are that type?
str(pulitzer)
# Add a column in a dataframe called 'Pulitzer.Prize.Change` that contains the diffrence in changes
# in Pulitzer Prize Winners from 2004 to 2013 and Pultizer Prize Winners from 1990 to 2003.
# What publication gained the most pulitzer prizes from 2004-2014?
# Be sure to use the pipe operator!
# Which publication with at least 5 Pulitzers won from 2004-2014 had the biggest decrease(negative) in Daily circulation numbers?
# This publication should have Pulitzer prizes won a minimum of 5 Pulitzers, as well as the biggest decrease in circulation
# Your turn! An important part about being a data scientist is asking questions.
# Create a question and use dplyr to figure out the answer.
|
#' Iteratively query a database for matches to a query vector.
#'
#' `iterative_select()` returns a tibble with all entries of the database that
#' match the query vector in any of the selected columns.
#'
#' @param query A character vector.
#' @param database A data.frame or tibble A database to be queried.
#' See [databases()] for a list of included databases.
#' @param match_cols A character vector. The columns in the data to look for matches
#' with the query. In order of preference, if matches to a column are
#' found, matches to subsequent columns are not reported by default, unless
#' return_all is `TRUE`.
#' @param return_all A logical indicating whether matches to subsequent columns,
#' after a match has already been found, should also be returned.
#' @return A tibble containing all rows of the dataframe that matched a query.
#' The new column `match__` contains the name of the column that matched
#' the query for this row.
#' @examples
#' iterative_select(c("FLG", "SGK2"),
#' hgnc,
#' c("symbol", "alias_symbol", "prev_symbol"))
#'
#' @export
iterative_select <- function(query, database, match_cols, return_all = FALSE) {
if (rlang::is_empty(query))
stop("'query' can not be empty.")
database <- database %>%
dplyr::mutate(uid__ = 1:n())
orig_query <- query
query <- unique(query)
remaining_query <- query
out_ids <- list()
for (i in seq_along(match_cols)) {
if (rlang::is_empty(remaining_query))
break()
query_cur <- if(return_all) query else remaining_query
c <- match_cols[i]
if (!(c %in% names(database)))
stop("match_col '", c, "' not in database")
# For using dplyr programmatically have to turn some of these variables
# into symbols or quosures, not exactly sure this is all done correctly,
# but seems to work
c_sym <- sym(c)
# Some columns in the datasets are list column which can have multiple entries
# per row. Flatten the list for merging and put back the list column afterwards
# Should find better strategy, because this is very slow
d <- database %>%
dplyr::select(uid__, !!c_sym) %>%
tidyr::drop_na(!! c_sym)
if (is.list(database[[c]])) {
d <- d %>%
tidyr::unnest(!! c_sym)
}
out <- tibble::tibble(
query = query_cur,
match__ = c
) %>%
dplyr::inner_join(d, by = c("query" = c))
remaining_query <- base::setdiff(remaining_query, out$query)
out_ids[[i]] <- out
}
if (!rlang::is_empty(remaining_query))
out_ids[["leftover"]] <- tibble::tibble(query = remaining_query, match__ = "none")
out_ids_df <- dplyr::bind_rows(out_ids)
# checking if query matched more than one entry in the database
multimatch <- out_ids_df %>%
dplyr::group_by(.data$match__) %>%
dplyr::count(.data$query) %>%
dplyr::ungroup() %>%
dplyr::filter(.data$n > 1) %>%
dplyr::mutate(message = paste0(.data$query, ": ", .data$n))
if (nrow(multimatch) > 0) {
warning(
"Multiple matches of same priority found for some queries. All matches are
reported.\n",
paste0(multimatch$message, "\n", collapse = " ")
)
}
# Warning user when any queries didn't match
if (length(remaining_query) > 0) {
warning(
"No matches found for some queries. Reporting NA for these queries.\n",
paste0(remaining_query, collapse = "\n")
)
}
out_df <- out_ids_df %>%
dplyr::left_join(database, by = "uid__") %>%
dplyr::select(-uid__)
out_df
}
#' Join results from a database into an existing dataframe.
#'
#' `join_results()` queries a database for matches to a column in the supplied
#' dataset and returns it together with the matches found.
#'
#' @param df A data.frame or tibble. The data used for querying the database.
#' @param query_col A character vector of length one. Name of the column in `df`
#' that will be used to query the database.
#' @param select_cols A character vector of column names in the database that
#' will be merged in the ouput.
#' @inheritParams iterative_select
#' @return The input dataframe merged with the selected matching columns from
#' the database.
#' @examples
#' d <- data.frame(a = 1:3, b = c("FLG", "SGK2", "CDK1"))
#' join_results(d, "b", hgnc,
#' match_cols = c("symbol", "alias_symbol", "prev_symbol"),
#' select_cols = c("entrez_id", "symbol", "refseq_accession"))
#'
#' @export
join_results <- function (df, query_col, database, match_cols, select_cols = NULL) {
hits <- iterative_select(df[[query_col]], database, match_cols)
if (!rlang::is_null(select_cols)) {
hits <- hits %>%
dplyr::select_at(unique(c(select_cols, "query")))
}
dplyr::left_join(df, hits, by = rlang::set_names(nm = query_col, x = "query"))
}
| /R/select.R | no_license | datarail/genebabel | R | false | false | 4,779 | r |
#' Iteratively query a database for matches to a query vector.
#'
#' `iterative_select()` returns a tibble with all entries of the database that
#' match the query vector in any of the selected columns.
#'
#' @param query A character vector.
#' @param database A data.frame or tibble A database to be queried.
#' See [databases()] for a list of included databases.
#' @param match_cols A character vector. The columns in the data to look for matches
#' with the query. In order of preference, if matches to a column are
#' found, matches to subsequent columns are not reported by default, unless
#' return_all is `TRUE`.
#' @param return_all A logical indicating whether matches to subsequent columns,
#' after a match has already been found, should also be returned.
#' @return A tibble containing all rows of the dataframe that matched a query.
#' The new column `match__` contains the name of the column that matched
#' the query for this row.
#' @examples
#' iterative_select(c("FLG", "SGK2"),
#' hgnc,
#' c("symbol", "alias_symbol", "prev_symbol"))
#'
#' @export
iterative_select <- function(query, database, match_cols, return_all = FALSE) {
if (rlang::is_empty(query))
stop("'query' can not be empty.")
database <- database %>%
dplyr::mutate(uid__ = 1:n())
orig_query <- query
query <- unique(query)
remaining_query <- query
out_ids <- list()
for (i in seq_along(match_cols)) {
if (rlang::is_empty(remaining_query))
break()
query_cur <- if(return_all) query else remaining_query
c <- match_cols[i]
if (!(c %in% names(database)))
stop("match_col '", c, "' not in database")
# For using dplyr programmatically have to turn some of these variables
# into symbols or quosures, not exactly sure this is all done correctly,
# but seems to work
c_sym <- sym(c)
# Some columns in the datasets are list column which can have multiple entries
# per row. Flatten the list for merging and put back the list column afterwards
# Should find better strategy, because this is very slow
d <- database %>%
dplyr::select(uid__, !!c_sym) %>%
tidyr::drop_na(!! c_sym)
if (is.list(database[[c]])) {
d <- d %>%
tidyr::unnest(!! c_sym)
}
out <- tibble::tibble(
query = query_cur,
match__ = c
) %>%
dplyr::inner_join(d, by = c("query" = c))
remaining_query <- base::setdiff(remaining_query, out$query)
out_ids[[i]] <- out
}
if (!rlang::is_empty(remaining_query))
out_ids[["leftover"]] <- tibble::tibble(query = remaining_query, match__ = "none")
out_ids_df <- dplyr::bind_rows(out_ids)
# checking if query matched more than one entry in the database
multimatch <- out_ids_df %>%
dplyr::group_by(.data$match__) %>%
dplyr::count(.data$query) %>%
dplyr::ungroup() %>%
dplyr::filter(.data$n > 1) %>%
dplyr::mutate(message = paste0(.data$query, ": ", .data$n))
if (nrow(multimatch) > 0) {
warning(
"Multiple matches of same priority found for some queries. All matches are
reported.\n",
paste0(multimatch$message, "\n", collapse = " ")
)
}
# Warning user when any queries didn't match
if (length(remaining_query) > 0) {
warning(
"No matches found for some queries. Reporting NA for these queries.\n",
paste0(remaining_query, collapse = "\n")
)
}
out_df <- out_ids_df %>%
dplyr::left_join(database, by = "uid__") %>%
dplyr::select(-uid__)
out_df
}
#' Join results from a database into an existing dataframe.
#'
#' `join_results()` queries a database for matches to a column in the supplied
#' dataset and returns it together with the matches found.
#'
#' @param df A data.frame or tibble. The data used for querying the database.
#' @param query_col A character vector of length one. Name of the column in `df`
#' that will be used to query the database.
#' @param select_cols A character vector of column names in the database that
#' will be merged in the ouput.
#' @inheritParams iterative_select
#' @return The input dataframe merged with the selected matching columns from
#' the database.
#' @examples
#' d <- data.frame(a = 1:3, b = c("FLG", "SGK2", "CDK1"))
#' join_results(d, "b", hgnc,
#' match_cols = c("symbol", "alias_symbol", "prev_symbol"),
#' select_cols = c("entrez_id", "symbol", "refseq_accession"))
#'
#' @export
join_results <- function (df, query_col, database, match_cols, select_cols = NULL) {
hits <- iterative_select(df[[query_col]], database, match_cols)
if (!rlang::is_null(select_cols)) {
hits <- hits %>%
dplyr::select_at(unique(c(select_cols, "query")))
}
dplyr::left_join(df, hits, by = rlang::set_names(nm = query_col, x = "query"))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test_general.R
\name{pFtest}
\alias{pFtest}
\alias{pFtest.formula}
\alias{pFtest.plm}
\title{F Test for Individual and/or Time Effects}
\usage{
pFtest(x, ...)
\method{pFtest}{formula}(x, data, ...)
\method{pFtest}{plm}(x, z, ...)
}
\arguments{
\item{x}{an object of class \code{"plm"} or of class \code{"formula"},}
\item{\dots}{further arguments.}
\item{data}{a \code{data.frame},}
\item{z}{an object of class \code{"plm"},}
}
\value{
An object of class \code{"htest"}.
}
\description{
Test of individual and/or time effects based on the comparison of the
\code{within} and the \code{pooling} model.
}
\details{
For the \code{plm} method, the argument of this function is two \code{plm}
objects, the first being a within model, the second a pooling
model. The effects tested are either individual, time or twoways,
depending on the effects introduced in the within model.
}
\examples{
data("Grunfeld", package="plm")
gp <- plm(inv ~ value + capital, data = Grunfeld, model = "pooling")
gi <- plm(inv ~ value + capital, data = Grunfeld,
effect = "individual", model = "within")
gt <- plm(inv ~ value + capital, data = Grunfeld,
effect = "time", model = "within")
gd <- plm(inv ~ value + capital, data = Grunfeld,
effect = "twoways", model = "within")
pFtest(gi, gp)
pFtest(gt, gp)
pFtest(gd, gp)
pFtest(inv ~ value + capital, data = Grunfeld, effect = "twoways")
}
\seealso{
\code{\link[=plmtest]{plmtest()}} for Lagrange multiplier tests of individuals
and/or time effects.
}
\author{
Yves Croissant
}
\keyword{htest}
| /man/pFtest.Rd | no_license | cran/plm | R | false | true | 1,634 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test_general.R
\name{pFtest}
\alias{pFtest}
\alias{pFtest.formula}
\alias{pFtest.plm}
\title{F Test for Individual and/or Time Effects}
\usage{
pFtest(x, ...)
\method{pFtest}{formula}(x, data, ...)
\method{pFtest}{plm}(x, z, ...)
}
\arguments{
\item{x}{an object of class \code{"plm"} or of class \code{"formula"},}
\item{\dots}{further arguments.}
\item{data}{a \code{data.frame},}
\item{z}{an object of class \code{"plm"},}
}
\value{
An object of class \code{"htest"}.
}
\description{
Test of individual and/or time effects based on the comparison of the
\code{within} and the \code{pooling} model.
}
\details{
For the \code{plm} method, the argument of this function is two \code{plm}
objects, the first being a within model, the second a pooling
model. The effects tested are either individual, time or twoways,
depending on the effects introduced in the within model.
}
\examples{
data("Grunfeld", package="plm")
gp <- plm(inv ~ value + capital, data = Grunfeld, model = "pooling")
gi <- plm(inv ~ value + capital, data = Grunfeld,
effect = "individual", model = "within")
gt <- plm(inv ~ value + capital, data = Grunfeld,
effect = "time", model = "within")
gd <- plm(inv ~ value + capital, data = Grunfeld,
effect = "twoways", model = "within")
pFtest(gi, gp)
pFtest(gt, gp)
pFtest(gd, gp)
pFtest(inv ~ value + capital, data = Grunfeld, effect = "twoways")
}
\seealso{
\code{\link[=plmtest]{plmtest()}} for Lagrange multiplier tests of individuals
and/or time effects.
}
\author{
Yves Croissant
}
\keyword{htest}
|
# DataScience01c
# Data Preparation
# Start fresh
rm(list=ls())
# Outlier Removal
# This is a vector
c(1, -1, -5, -1, -1, -19, 3, -1, -1, -5)
# assign the vector
Vector <- c(1, -1, -5, -1, -1, -19, 3, -1, -1, -5)
# Guestimate: -19 is the outlier
# Anything less than -6 can be removed
# The following indicates whether we want to keep the values:
Vector > -6
Vector <- Vector[Vector > -6]
Vector
# Start fresh
rm(list=ls())
Vector <- c(1, -1, -5, -1, -1, -19, 3, -1, -1, -5)
# Goal: Remove anything beyond 2 standard deviations from the mean
VectorMean <- mean(Vector)
VectorSd <- sd(Vector)
lowBoundary <- VectorMean - 2*VectorSd
HighBoundary <- VectorMean + 2*VectorSd
goodFlag <- (Vector > lowBoundary) & (Vector < HighBoundary)
Vector <- Vector[goodFlag]
Vector
# Outlier Removal
Vector <- c('a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'd', 'a', 'a', 'a', 'a')
# Category Outlier is 'd' because it occurs less than 5% of the time
Vector
Vector.modified <- Vector[Vector != 'd']
setdiff(Vector, Vector_mod)
# Relabel
c('BS', 'MS', 'PhD', 'HS', 'BSc', 'Masters', 'High School', 'Masters', 'Masters', 'BA', 'Bachelors', 'MS', 'MS')
Vector <- c('BS', 'MS', 'PhD', 'HS', 'BSc', 'Masters', 'High School', 'Masters', 'Masters', 'BA', 'Bachelors', 'MS', 'MS')
unique(Vector)
length(unique(Vector))
Vector[Vector == 'Bachelors'] <- 'BS'
length(unique(Vector))
Vector[Vector == 'BSc'] <- 'BS'
length(unique(Vector))
Vector[Vector == 'BA'] <- 'BS'
length(unique(Vector))
Vector[Vector == 'Masters'] <- 'MS'
length(unique(Vector))
Vector[Vector == 'High School'] <- 'HS'
length(unique(Vector))
Vector
# Turn codes into years of college
# Exercise
#Normalization
# Start fresh
rm(list=ls())
Vector <- c(1, -1, -5, -1, -1, -19, 3, -1, -1, -5)
# Linear normalization maps data in a linear fashion: normalizedVector <- Vector*slope + offset
# Min Max normalization maps from 0 to 1.
# y = a + bx
# OR:
# y = (x - c)/d; Where: a = -c/d; b = 1/d
# "c" adjusts min value to zero:
minValue <- min(Vector)
# range adjusts max value to 1
# range is the min subtracted from the max
range <- max(Vector) - minValue
Vector<- (Vector - minValue)/range
min(Vector)
max(Vector)
Vector
rm(list=ls())
# Relabel and cast this vector into a number:
c('one', 'two', 3, 4, 5, 6, 7, 8, 9, 0, 1)
rm(list=ls())
# Binarization:
# Binarization turns columns of categories into a columns of binaries:
# You start out with a vector called vehicle that can contain three categories: car, truck, bicycle
# Vehicle vector looks like the following:
# c(car, bicycle, bicycle, bicycle, car, car, truck, bicycle, truck, bicycle)
# You create three columns called car, truck, and bicycle:
# car <- c(1,0,0,0,1,1,0,0,0,0)
# truck <- c(0,0,0,0,0,0,1,0,1,0)
# bicycle <- c(0,1,1,1,0,0,0,1,0,1)
# Binning
Vector<- c(1, 1:5, 1:10, 1:20, 1:40, 100) # Vector<- c(runif(30))
Vector
hist(Vector)
numberOfBins <- 7
# Discretization into 4 bins
range <- max(Vector) - min(Vector)
binWidth <- range / numberOfBins
bin1Min <- -Inf
bin1Max <- min(Vector) + 1*binWidth
bin2Min <- bin1Max
bin2Max <- min(Vector) + 2*binWidth
bin3Min <- bin1Max
bin3Max <- min(Vector) + 3*binWidth
bin4Min <- bin3Max
bin4Max <- min(Vector) + 4*binWidth
bin5Min <- bin4Max
bin5Max <- min(Vector) + 5*binWidth
bin6Min <- bin5Max
bin6Max <- min(Vector) + 6*binWidth
bin7Min <- bin6Max
bin7Max <- Inf
xDiscretized <- rep(NA, length(Vector))
xDiscretized
xDiscretized[bin1Min < Vector & Vector <= bin1Max] <- "L1"
xDiscretized
xDiscretized[bin2Min < Vector & Vector <= bin2Max] <- "L2"
xDiscretized
xDiscretized[bin3Min < Vector & Vector <= bin3Max] <- "L3"
xDiscretized[bin4Min < Vector & Vector <= bin4Max] <- "L4"
xDiscretized[bin5Min < Vector & Vector <= bin5Max] <- "L5"
xDiscretized[bin6Min < Vector & Vector <= bin6Max] <- "L6"
xDiscretized[bin7Min < Vector & Vector <= bin7Max] <- "L7"
xDiscretized
quantileBinMax <- function(Vector=c(1,1,1,2,2,2,10), numberOfBins=2)
{
binMax <- NA
for (i in 1:(numberOfBins-1))
{
binMax[i] <- quantile(Vector, i/numberOfBins)
}
c(-Inf, binMax,+Inf)
}
binLimits <- quantileBinMax(Vector, numberOfBins)
cut(Vector, binLimits, right=T)
?hist
hist(Vector, c(min(Vector), binLimits[2:(length(binLimits)-1)], max(Vector)))
quantile(1:11, .49)
| /Lesson 01/R files/DataScience01c.R | no_license | samir72/What-is-Data-Science | R | false | false | 4,281 | r | # DataScience01c
# Data Preparation
# Start fresh
rm(list=ls())
# Outlier Removal
# This is a vector
c(1, -1, -5, -1, -1, -19, 3, -1, -1, -5)
# assign the vector
Vector <- c(1, -1, -5, -1, -1, -19, 3, -1, -1, -5)
# Guestimate: -19 is the outlier
# Anything less than -6 can be removed
# The following indicates whether we want to keep the values:
Vector > -6
Vector <- Vector[Vector > -6]
Vector
# Start fresh
rm(list=ls())
Vector <- c(1, -1, -5, -1, -1, -19, 3, -1, -1, -5)
# Goal: Remove anything beyond 2 standard deviations from the mean
VectorMean <- mean(Vector)
VectorSd <- sd(Vector)
lowBoundary <- VectorMean - 2*VectorSd
HighBoundary <- VectorMean + 2*VectorSd
goodFlag <- (Vector > lowBoundary) & (Vector < HighBoundary)
Vector <- Vector[goodFlag]
Vector
# Outlier Removal
Vector <- c('a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'd', 'a', 'a', 'a', 'a')
# Category Outlier is 'd' because it occurs less than 5% of the time
Vector
Vector.modified <- Vector[Vector != 'd']
setdiff(Vector, Vector_mod)
# Relabel
c('BS', 'MS', 'PhD', 'HS', 'BSc', 'Masters', 'High School', 'Masters', 'Masters', 'BA', 'Bachelors', 'MS', 'MS')
Vector <- c('BS', 'MS', 'PhD', 'HS', 'BSc', 'Masters', 'High School', 'Masters', 'Masters', 'BA', 'Bachelors', 'MS', 'MS')
unique(Vector)
length(unique(Vector))
Vector[Vector == 'Bachelors'] <- 'BS'
length(unique(Vector))
Vector[Vector == 'BSc'] <- 'BS'
length(unique(Vector))
Vector[Vector == 'BA'] <- 'BS'
length(unique(Vector))
Vector[Vector == 'Masters'] <- 'MS'
length(unique(Vector))
Vector[Vector == 'High School'] <- 'HS'
length(unique(Vector))
Vector
# Turn codes into years of college
# Exercise
#Normalization
# Start fresh
rm(list=ls())
Vector <- c(1, -1, -5, -1, -1, -19, 3, -1, -1, -5)
# Linear normalization maps data in a linear fashion: normalizedVector <- Vector*slope + offset
# Min Max normalization maps from 0 to 1.
# y = a + bx
# OR:
# y = (x - c)/d; Where: a = -c/d; b = 1/d
# "c" adjusts min value to zero:
minValue <- min(Vector)
# range adjusts max value to 1
# range is the min subtracted from the max
range <- max(Vector) - minValue
Vector<- (Vector - minValue)/range
min(Vector)
max(Vector)
Vector
rm(list=ls())
# Relabel and cast this vector into a number:
c('one', 'two', 3, 4, 5, 6, 7, 8, 9, 0, 1)
rm(list=ls())
# Binarization:
# Binarization turns columns of categories into a columns of binaries:
# You start out with a vector called vehicle that can contain three categories: car, truck, bicycle
# Vehicle vector looks like the following:
# c(car, bicycle, bicycle, bicycle, car, car, truck, bicycle, truck, bicycle)
# You create three columns called car, truck, and bicycle:
# car <- c(1,0,0,0,1,1,0,0,0,0)
# truck <- c(0,0,0,0,0,0,1,0,1,0)
# bicycle <- c(0,1,1,1,0,0,0,1,0,1)
# Binning
Vector<- c(1, 1:5, 1:10, 1:20, 1:40, 100) # Vector<- c(runif(30))
Vector
hist(Vector)
numberOfBins <- 7
# Discretization into 4 bins
range <- max(Vector) - min(Vector)
binWidth <- range / numberOfBins
bin1Min <- -Inf
bin1Max <- min(Vector) + 1*binWidth
bin2Min <- bin1Max
bin2Max <- min(Vector) + 2*binWidth
bin3Min <- bin1Max
bin3Max <- min(Vector) + 3*binWidth
bin4Min <- bin3Max
bin4Max <- min(Vector) + 4*binWidth
bin5Min <- bin4Max
bin5Max <- min(Vector) + 5*binWidth
bin6Min <- bin5Max
bin6Max <- min(Vector) + 6*binWidth
bin7Min <- bin6Max
bin7Max <- Inf
xDiscretized <- rep(NA, length(Vector))
xDiscretized
xDiscretized[bin1Min < Vector & Vector <= bin1Max] <- "L1"
xDiscretized
xDiscretized[bin2Min < Vector & Vector <= bin2Max] <- "L2"
xDiscretized
xDiscretized[bin3Min < Vector & Vector <= bin3Max] <- "L3"
xDiscretized[bin4Min < Vector & Vector <= bin4Max] <- "L4"
xDiscretized[bin5Min < Vector & Vector <= bin5Max] <- "L5"
xDiscretized[bin6Min < Vector & Vector <= bin6Max] <- "L6"
xDiscretized[bin7Min < Vector & Vector <= bin7Max] <- "L7"
xDiscretized
quantileBinMax <- function(Vector=c(1,1,1,2,2,2,10), numberOfBins=2)
{
binMax <- NA
for (i in 1:(numberOfBins-1))
{
binMax[i] <- quantile(Vector, i/numberOfBins)
}
c(-Inf, binMax,+Inf)
}
binLimits <- quantileBinMax(Vector, numberOfBins)
cut(Vector, binLimits, right=T)
?hist
hist(Vector, c(min(Vector), binLimits[2:(length(binLimits)-1)], max(Vector)))
quantile(1:11, .49)
|
code <- c(
"function [win, aver] = dice(B)",
"%Play the dice game B times",
"gains = [-1, 2, -3, 4, -5, 6];",
"plays = unidrnd(6, B, 1);",
"win = sum(gains(plays));",
"aver = win;"
)
setFunction(matlab, code)
evaluate(matlab, "[w, a] = dice(1000);")
res <- getVariable(matlab, c("w", "a"))
print(res)
| /incl/Matlab.setFunction.R | no_license | HenrikBengtsson/R.matlab | R | false | false | 314 | r | code <- c(
"function [win, aver] = dice(B)",
"%Play the dice game B times",
"gains = [-1, 2, -3, 4, -5, 6];",
"plays = unidrnd(6, B, 1);",
"win = sum(gains(plays));",
"aver = win;"
)
setFunction(matlab, code)
evaluate(matlab, "[w, a] = dice(1000);")
res <- getVariable(matlab, c("w", "a"))
print(res)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#' Create a (virtual) DuckDB table from an Arrow object
#'
#' This will do the necessary configuration to create a (virtual) table in DuckDB
#' that is backed by the Arrow object given. No data is copied or modified until
#' `collect()` or `compute()` are called or a query is run against the table.
#'
#' The result is a dbplyr-compatible object that can be used in d(b)plyr pipelines.
#'
#' If `auto_disconnect = TRUE`, the DuckDB table that is created will be configured
#' to be unregistered when the `tbl` object is garbage collected. This is helpful
#' if you don't want to have extra table objects in DuckDB after you've finished
#' using them.
#'
#' @param .data the Arrow object (e.g. Dataset, Table) to use for the DuckDB table
#' @param con a DuckDB connection to use (default will create one and store it
#' in `options("arrow_duck_con")`)
#' @param table_name a name to use in DuckDB for this object. The default is a
#' unique string `"arrow_"` followed by numbers.
#' @param auto_disconnect should the table be automatically cleaned up when the
#' resulting object is removed (and garbage collected)? Default: `TRUE`
#'
#' @return A `tbl` of the new table in DuckDB
#'
#' @name to_duckdb
#' @export
#' @examplesIf getFromNamespace("run_duckdb_examples", "arrow")()
#' library(dplyr)
#'
#' ds <- InMemoryDataset$create(mtcars)
#'
#' ds %>%
#' filter(mpg < 30) %>%
#' group_by(cyl) %>%
#' to_duckdb() %>%
#' slice_min(disp)
to_duckdb <- function(.data,
con = arrow_duck_connection(),
table_name = unique_arrow_tablename(),
auto_disconnect = TRUE) {
.data <- as_adq(.data)
if (!requireNamespace("duckdb", quietly = TRUE)) {
abort("Please install the `duckdb` package to pass data with `to_duckdb()`.")
}
duckdb::duckdb_register_arrow(con, table_name, .data)
tbl <- dplyr::tbl(con, table_name)
groups <- dplyr::groups(.data)
if (length(groups)) {
tbl <- dplyr::group_by(tbl, groups)
}
if (auto_disconnect) {
# this will add the correct connection disconnection when the tbl is gced.
# this is similar to what dbplyr does, though it calls it tbl$src$disco
tbl$src$.arrow_finalizer_environment <- duckdb_disconnector(con, table_name)
}
tbl
}
arrow_duck_connection <- function() {
con <- getOption("arrow_duck_con")
if (is.null(con) || !DBI::dbIsValid(con)) {
con <- DBI::dbConnect(duckdb::duckdb())
# Use the same CPU count that the arrow library is set to
DBI::dbExecute(con, paste0("PRAGMA threads=", cpu_count()))
options(arrow_duck_con = con)
}
con
}
# helper function to determine if duckdb examples should run
# see: https://github.com/r-lib/roxygen2/issues/1242
run_duckdb_examples <- function() {
arrow_with_dataset() &&
requireNamespace("duckdb", quietly = TRUE) &&
packageVersion("duckdb") > "0.2.7" &&
requireNamespace("dplyr", quietly = TRUE) &&
requireNamespace("dbplyr", quietly = TRUE) &&
getRversion() >= 4
}
# Adapted from dbplyr
unique_arrow_tablename <- function() {
i <- getOption("arrow_table_name", 0) + 1
options(arrow_table_name = i)
sprintf("arrow_%03i", i)
}
# Creates an environment that disconnects the database when it's GC'd
duckdb_disconnector <- function(con, tbl_name) {
force(tbl_name)
reg.finalizer(environment(), function(...) {
# remote the table we ephemerally created (though only if the connection is
# still valid)
duckdb::duckdb_unregister_arrow(con, tbl_name)
})
environment()
}
#' Create an Arrow object from others
#'
#' This can be used in pipelines that pass data back and forth between Arrow and
#' other processes (like DuckDB).
#'
#' @param .data the object to be converted
#' @return A `RecordBatchReader`.
#' @export
#'
#' @examplesIf getFromNamespace("run_duckdb_examples", "arrow")()
#' library(dplyr)
#'
#' ds <- InMemoryDataset$create(mtcars)
#'
#' ds %>%
#' filter(mpg < 30) %>%
#' to_duckdb() %>%
#' group_by(cyl) %>%
#' summarize(mean_mpg = mean(mpg, na.rm = TRUE)) %>%
#' to_arrow() %>%
#' collect()
to_arrow <- function(.data) {
# If this is an Arrow object already, return quickly since we're already Arrow
if (inherits(.data, c("arrow_dplyr_query", "ArrowObject"))) {
return(.data)
}
# For now, we only handle .data from duckdb, so check that it is that if we've
# gotten this far
if (!inherits(dbplyr::remote_con(.data), "duckdb_connection")) {
stop(
"to_arrow() currently only supports Arrow tables, Arrow datasets, ",
"Arrow queries, or dbplyr tbls from duckdb connections",
call. = FALSE
)
}
# Run the query
res <- DBI::dbSendQuery(dbplyr::remote_con(.data), dbplyr::remote_query(.data), arrow = TRUE)
duckdb::duckdb_fetch_record_batch(res)
}
| /r/R/duckdb.R | permissive | lidavidm/arrow | R | false | false | 5,567 | r | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#' Create a (virtual) DuckDB table from an Arrow object
#'
#' This will do the necessary configuration to create a (virtual) table in DuckDB
#' that is backed by the Arrow object given. No data is copied or modified until
#' `collect()` or `compute()` are called or a query is run against the table.
#'
#' The result is a dbplyr-compatible object that can be used in d(b)plyr pipelines.
#'
#' If `auto_disconnect = TRUE`, the DuckDB table that is created will be configured
#' to be unregistered when the `tbl` object is garbage collected. This is helpful
#' if you don't want to have extra table objects in DuckDB after you've finished
#' using them.
#'
#' @param .data the Arrow object (e.g. Dataset, Table) to use for the DuckDB table
#' @param con a DuckDB connection to use (default will create one and store it
#' in `options("arrow_duck_con")`)
#' @param table_name a name to use in DuckDB for this object. The default is a
#' unique string `"arrow_"` followed by numbers.
#' @param auto_disconnect should the table be automatically cleaned up when the
#' resulting object is removed (and garbage collected)? Default: `TRUE`
#'
#' @return A `tbl` of the new table in DuckDB
#'
#' @name to_duckdb
#' @export
#' @examplesIf getFromNamespace("run_duckdb_examples", "arrow")()
#' library(dplyr)
#'
#' ds <- InMemoryDataset$create(mtcars)
#'
#' ds %>%
#' filter(mpg < 30) %>%
#' group_by(cyl) %>%
#' to_duckdb() %>%
#' slice_min(disp)
to_duckdb <- function(.data,
con = arrow_duck_connection(),
table_name = unique_arrow_tablename(),
auto_disconnect = TRUE) {
.data <- as_adq(.data)
if (!requireNamespace("duckdb", quietly = TRUE)) {
abort("Please install the `duckdb` package to pass data with `to_duckdb()`.")
}
duckdb::duckdb_register_arrow(con, table_name, .data)
tbl <- dplyr::tbl(con, table_name)
groups <- dplyr::groups(.data)
if (length(groups)) {
tbl <- dplyr::group_by(tbl, groups)
}
if (auto_disconnect) {
# this will add the correct connection disconnection when the tbl is gced.
# this is similar to what dbplyr does, though it calls it tbl$src$disco
tbl$src$.arrow_finalizer_environment <- duckdb_disconnector(con, table_name)
}
tbl
}
arrow_duck_connection <- function() {
con <- getOption("arrow_duck_con")
if (is.null(con) || !DBI::dbIsValid(con)) {
con <- DBI::dbConnect(duckdb::duckdb())
# Use the same CPU count that the arrow library is set to
DBI::dbExecute(con, paste0("PRAGMA threads=", cpu_count()))
options(arrow_duck_con = con)
}
con
}
# helper function to determine if duckdb examples should run
# see: https://github.com/r-lib/roxygen2/issues/1242
run_duckdb_examples <- function() {
arrow_with_dataset() &&
requireNamespace("duckdb", quietly = TRUE) &&
packageVersion("duckdb") > "0.2.7" &&
requireNamespace("dplyr", quietly = TRUE) &&
requireNamespace("dbplyr", quietly = TRUE) &&
getRversion() >= 4
}
# Adapted from dbplyr
unique_arrow_tablename <- function() {
i <- getOption("arrow_table_name", 0) + 1
options(arrow_table_name = i)
sprintf("arrow_%03i", i)
}
# Creates an environment that disconnects the database when it's GC'd
duckdb_disconnector <- function(con, tbl_name) {
force(tbl_name)
reg.finalizer(environment(), function(...) {
# remote the table we ephemerally created (though only if the connection is
# still valid)
duckdb::duckdb_unregister_arrow(con, tbl_name)
})
environment()
}
#' Create an Arrow object from others
#'
#' This can be used in pipelines that pass data back and forth between Arrow and
#' other processes (like DuckDB).
#'
#' @param .data the object to be converted
#' @return A `RecordBatchReader`.
#' @export
#'
#' @examplesIf getFromNamespace("run_duckdb_examples", "arrow")()
#' library(dplyr)
#'
#' ds <- InMemoryDataset$create(mtcars)
#'
#' ds %>%
#' filter(mpg < 30) %>%
#' to_duckdb() %>%
#' group_by(cyl) %>%
#' summarize(mean_mpg = mean(mpg, na.rm = TRUE)) %>%
#' to_arrow() %>%
#' collect()
to_arrow <- function(.data) {
# If this is an Arrow object already, return quickly since we're already Arrow
if (inherits(.data, c("arrow_dplyr_query", "ArrowObject"))) {
return(.data)
}
# For now, we only handle .data from duckdb, so check that it is that if we've
# gotten this far
if (!inherits(dbplyr::remote_con(.data), "duckdb_connection")) {
stop(
"to_arrow() currently only supports Arrow tables, Arrow datasets, ",
"Arrow queries, or dbplyr tbls from duckdb connections",
call. = FALSE
)
}
# Run the query
res <- DBI::dbSendQuery(dbplyr::remote_con(.data), dbplyr::remote_query(.data), arrow = TRUE)
duckdb::duckdb_fetch_record_batch(res)
}
|
# Betting functions
even = function(x)
{
win = (x %% 2 == 0) & (x != 0)
ifelse(win, 1, -1)
}
high = function(x)
{
win = (18 < x) & (x != 0)
ifelse(win, 1, -1)
}
column1 = function(x)
{
ifelse(x %% 3 == 1, 2, -1)
}
single = function(x, n = 1)
{
ifelse(x == n, 35, -1)
}
# Construct a simple betting strategy
simple_strategy = function(bet = even)
{
function(x) cumsum(bet(x))
}
#' Simulate plays from a betting strategy
#'
#' @param strategy
#' @param nplayers number of players to use this strategy
#' @param ntimes number of times each player should play
play = function(strategy = simple_strategy()
, nplayers = 100L
, ntimes = 1000L
, ballvalues = 0:36
){
out = replicate(nplayers
, strategy(sample(ballvalues, size = ntimes, replace = TRUE))
, simplify = FALSE
)
data.frame(winnings = do.call(base::c, out)
, player = rep(seq(nplayers), each = ntimes)
, time = rep(seq(ntimes), times = nplayers)
)
}
doublebet = function(x, initialbet = 1, bet = even)
{
winnings = rep(NA, length(x))
betsize = initialbet
current_winnings = 0
for(i in seq_along(x)){
if(bet(x[i]) == 1){
current_winnings = current_winnings + betsize
betsize = initialbet
} else {
current_winnings = current_winnings - betsize
betsize = 2 * betsize
}
winnings[i] = current_winnings
}
winnings
}
| /roulette.R | no_license | clarkfitzg/stat128-fall20 | R | false | false | 1,484 | r | # Betting functions
even = function(x)
{
win = (x %% 2 == 0) & (x != 0)
ifelse(win, 1, -1)
}
high = function(x)
{
win = (18 < x) & (x != 0)
ifelse(win, 1, -1)
}
column1 = function(x)
{
ifelse(x %% 3 == 1, 2, -1)
}
single = function(x, n = 1)
{
ifelse(x == n, 35, -1)
}
# Construct a simple betting strategy
simple_strategy = function(bet = even)
{
function(x) cumsum(bet(x))
}
#' Simulate plays from a betting strategy
#'
#' @param strategy
#' @param nplayers number of players to use this strategy
#' @param ntimes number of times each player should play
play = function(strategy = simple_strategy()
, nplayers = 100L
, ntimes = 1000L
, ballvalues = 0:36
){
out = replicate(nplayers
, strategy(sample(ballvalues, size = ntimes, replace = TRUE))
, simplify = FALSE
)
data.frame(winnings = do.call(base::c, out)
, player = rep(seq(nplayers), each = ntimes)
, time = rep(seq(ntimes), times = nplayers)
)
}
doublebet = function(x, initialbet = 1, bet = even)
{
winnings = rep(NA, length(x))
betsize = initialbet
current_winnings = 0
for(i in seq_along(x)){
if(bet(x[i]) == 1){
current_winnings = current_winnings + betsize
betsize = initialbet
} else {
current_winnings = current_winnings - betsize
betsize = 2 * betsize
}
winnings[i] = current_winnings
}
winnings
}
|
#' Data for \pkg{brms} Models
#'
#' Generate data for \pkg{brms} models to be passed to \pkg{Stan}
#'
#' @inheritParams brm
#' @param control A named list currently for internal usage only
#' @param ... Other potential arguments
#'
#' @aliases brmdata
#'
#' @return A named list of objects containing the required data
#' to fit a \pkg{brms} model with \pkg{Stan}.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @examples
#' data1 <- make_standata(rating ~ treat + period + carry + (1|subject),
#' data = inhaler, family = "cumulative")
#' names(data1)
#'
#' data2 <- make_standata(count ~ log_Age_c + log_Base4_c * Trt_c
#' + (1|patient) + (1|visit),
#' data = epilepsy, family = "poisson")
#' names(data2)
#'
#' @export
make_standata <- function(formula, data = NULL, family = "gaussian",
prior = NULL, autocor = NULL, nonlinear = NULL,
partial = NULL, cov_ranef = NULL,
sample_prior = FALSE, control = NULL, ...) {
# internal control arguments:
# is_newdata: is make_standata is called with new data?
# not4stan: is make_standata called for use in S3 methods?
# save_order: should the initial order of the data be saved?
# omit_response: omit checking of the response?
# ntrials, ncat, Jm: standata based on the original data
dots <- list(...)
# use deprecated arguments if specified
cov_ranef <- use_alias(cov_ranef, dots$cov.ranef, warn = FALSE)
# some input checks
if (!(is.null(data) || is.list(data)))
stop("argument 'data' must be a data.frame or list", call. = FALSE)
family <- check_family(family)
nonlinear <- nonlinear2list(nonlinear)
formula <- update_formula(formula, data = data, family = family,
partial = partial, nonlinear = nonlinear)
autocor <- check_autocor(autocor)
is_linear <- is.linear(family)
is_ordinal <- is.ordinal(family)
is_count <- is.count(family)
is_forked <- is.forked(family)
is_categorical <- is.categorical(family)
et <- extract_time(autocor$formula)
ee <- extract_effects(formula, family = family, et$all,
nonlinear = nonlinear)
prior <- as.prior_frame(prior)
check_prior_content(prior, family = family)
na_action <- if (isTRUE(control$is_newdata)) na.pass else na.omit
data <- update_data(data, family = family, effects = ee, et$group,
drop.unused.levels = !isTRUE(control$is_newdata),
na.action = na_action)
# sort data in case of autocorrelation models
if (has_arma(autocor)) {
# amend if zero-inflated and hurdle models ever get
# autocorrelation structures as they are also using 'trait'
if (is_forked) {
stop("no autocorrelation allowed for this model", call. = FALSE)
}
if (is_linear && length(ee$response) > 1L) {
if (!grepl("^trait$|:trait$|^trait:|:trait:", et$group)) {
stop(paste("autocorrelation structures for multiple responses must",
"contain 'trait' as grouping variable"), call. = FALSE)
} else {
to_order <- rmNULL(list(data[["trait"]], data[[et$group]],
data[[et$time]]))
}
} else {
to_order <- rmNULL(list(data[[et$group]], data[[et$time]]))
}
if (length(to_order)) {
new_order <- do.call(order, to_order)
data <- data[new_order, ]
# old_order will allow to retrieve the initial order of the data
attr(data, "old_order") <- order(new_order)
}
}
# response variable
standata <- list(N = nrow(data), Y = unname(model.response(data)))
check_response <- !isTRUE(control$omit_response)
if (check_response) {
if (!(is_ordinal || family$family %in% c("bernoulli", "categorical")) &&
!is.numeric(standata$Y)) {
stop(paste("family", family$family, "expects numeric response variable"),
call. = FALSE)
}
# transform and check response variable for different families
regex_pos_int <- "(^|_)(binomial|poisson|negbinomial|geometric)$"
if (grepl(regex_pos_int, family$family)) {
if (!all(is.wholenumber(standata$Y)) || min(standata$Y) < 0) {
stop(paste("family", family$family, "expects response variable",
"of non-negative integers"), call. = FALSE)
}
} else if (family$family == "bernoulli") {
standata$Y <- as.numeric(as.factor(standata$Y)) - 1
if (any(!standata$Y %in% c(0,1))) {
stop(paste("family", family$family, "expects response variable",
"to contain only two different values"), call. = FALSE)
}
} else if (family$family %in% c("beta", "zero_inflated_beta")) {
lower <- if (family$family == "beta") any(standata$Y <= 0)
else any(standata$Y < 0)
upper <- any(standata$Y >= 1)
if (lower || upper) {
stop("beta regression requires responses between 0 and 1",
call. = FALSE)
}
} else if (is_categorical) {
standata$Y <- as.numeric(as.factor(standata$Y))
if (length(unique(standata$Y)) < 2L) {
stop("At least two response categories are required.", call. = FALSE)
}
} else if (is_ordinal) {
if (is.ordered(standata$Y)) {
standata$Y <- as.numeric(standata$Y)
} else if (all(is.wholenumber(standata$Y))) {
standata$Y <- standata$Y - min(standata$Y) + 1
} else {
stop(paste("family", family$family, "expects either integers or",
"ordered factors as response variables"), call. = FALSE)
}
if (length(unique(standata$Y)) < 2L) {
stop("At least two response categories are required.", call. = FALSE)
}
} else if (is.skewed(family)) {
if (min(standata$Y) <= 0) {
stop(paste("family", family$family, "requires response variable",
"to be positive"), call. = FALSE)
}
} else if (is.zero_inflated(family) || is.hurdle(family)) {
if (min(standata$Y) < 0) {
stop(paste("family", family$family, "requires response variable",
"to be non-negative"), call. = FALSE)
}
}
}
# data for various kinds of effects
if (length(nonlinear)) {
nlpars <- names(ee$nonlinear)
# matrix of covariates appearing in the non-linear formula
C <- get_model_matrix(ee$covars, data = data)
if (length(all.vars(ee$covars)) != ncol(C)) {
stop("Factors with more than two levels are not allowed as covariates",
call. = FALSE)
}
standata <- c(standata, list(KC = ncol(C), C = C))
for (i in seq_along(nlpars)) {
data_fixef <- data_fixef(ee$nonlinear[[i]], data = data,
family = family, nlpar = nlpars[i],
not4stan = isTRUE(control$not4stan))
data_monef <- data_monef(ee$nonlinear[[i]], data = data, prior = prior,
Jm = control[[paste0("Jm_", nlpars[i])]],
nlpar = nlpars[i])
data_ranef <- data_ranef(ee$nonlinear[[i]], data = data,
family = family, cov_ranef = cov_ranef,
is_newdata = isTRUE(control$is_newdata),
not4stan = isTRUE(control$not4stan),
nlpar = nlpars[i])
standata <- c(standata, data_fixef, data_monef, data_ranef)
}
} else {
data_fixef <- data_fixef(ee, data = data, family = family,
not4stan = isTRUE(control$not4stan))
data_monef <- data_monef(ee, data = data, prior = prior, Jm = control$Jm)
data_csef <- data_csef(ee, data = data)
data_ranef <- data_ranef(ee, data = data, family = family,
cov_ranef = cov_ranef,
is_newdata = isTRUE(control$is_newdata),
not4stan = isTRUE(control$not4stan))
standata <- c(standata, data_fixef, data_monef, data_csef, data_ranef)
# offsets are not yet implemented for non-linear models
standata$offset <- model.offset(data)
}
# data for specific families
if (has_trials(family)) {
if (!length(ee$trials)) {
if (!is.null(control$trials)) {
standata$trials <- control$trials
} else {
standata$trials <- max(standata$Y)
}
} else if (is.wholenumber(ee$trials)) {
standata$trials <- ee$trials
} else if (is.formula(ee$trials)) {
standata$trials <- .addition(formula = ee$trials, data = data)
} else stop("Response part of formula is invalid.")
standata$max_obs <- standata$trials # for backwards compatibility
if (max(standata$trials) == 1L && family$family == "binomial")
message(paste("Only 2 levels detected so that family bernoulli",
"might be a more efficient choice."))
if (check_response && any(standata$Y > standata$trials))
stop(paste("Number of trials is smaller than the response",
"variable would suggest."), call. = FALSE)
}
if (has_cat(family)) {
if (!length(ee$cat)) {
if (!is.null(control$ncat)) {
standata$ncat <- control$ncat
} else {
standata$ncat <- max(standata$Y)
}
} else if (is.wholenumber(ee$cat)) {
standata$ncat <- ee$cat
} else stop("Addition argument 'cat' is misspecified.", call. = FALSE)
standata$max_obs <- standata$ncat # for backwards compatibility
if (max(standata$ncat) == 2L) {
message(paste("Only 2 levels detected so that family bernoulli",
"might be a more efficient choice."))
}
if (check_response && any(standata$Y > standata$ncat)) {
stop(paste0("Number of categories is smaller than the response",
"variable would suggest."), call. = FALSE)
}
}
if (family$family == "inverse.gaussian" && check_response) {
# save as data to reduce computation time in Stan
if (is.formula(ee[c("weights", "cens")])) {
standata$log_Y <- log(standata$Y)
} else {
standata$log_Y <- sum(log(standata$Y))
}
standata$sqrt_Y <- sqrt(standata$Y)
}
# evaluate even if check_response is FALSE to ensure that N_trait is defined
if (is_linear && length(ee$response) > 1L) {
standata$Y <- matrix(standata$Y, ncol = length(ee$response))
NC_trait <- ncol(standata$Y) * (ncol(standata$Y) - 1L) / 2L
standata <- c(standata, list(N_trait = nrow(standata$Y),
K_trait = ncol(standata$Y),
NC_trait = NC_trait))
}
if (is_forked) {
# the second half of Y is only dummy data
# that was put into data to make melt_data work correctly
standata$N_trait <- nrow(data) / 2L
standata$Y <- standata$Y[1L:standata$N_trait]
}
if (is_categorical && !isTRUE(control$old_cat)) {
ncat1m <- standata$ncat - 1L
standata$N_trait <- nrow(data) / ncat1m
standata$Y <- standata$Y[1L:standata$N_trait]
standata$J_trait <- matrix(1L:standata$N, ncol = ncat1m)
}
# data for addition arguments
if (is.formula(ee$se)) {
standata <- c(standata, list(se = .addition(formula = ee$se, data = data)))
}
if (is.formula(ee$weights)) {
standata <- c(standata, list(weights = .addition(ee$weights, data = data)))
if (is.linear(family) && length(ee$response) > 1 || is_forked)
standata$weights <- standata$weights[1:standata$N_trait]
}
if (is.formula(ee$disp)) {
standata <- c(standata, list(disp = .addition(ee$disp, data = data)))
}
if (is.formula(ee$cens) && check_response) {
standata <- c(standata, list(cens = .addition(ee$cens, data = data)))
if (is.linear(family) && length(ee$response) > 1 || is_forked)
standata$cens <- standata$cens[1:standata$N_trait]
}
if (is.formula(ee$trunc)) {
standata <- c(standata, .addition(ee$trunc))
if (check_response && (min(standata$Y) < standata$lb ||
max(standata$Y) > standata$ub)) {
stop("Some responses are outside of the truncation boundaries.",
call. = FALSE)
}
}
# autocorrelation variables
if (has_arma(autocor)) {
tgroup <- data[[et$group]]
if (is.null(tgroup)) {
tgroup <- rep(1, standata$N)
}
Kar <- get_ar(autocor)
Kma <- get_ma(autocor)
Karr <- get_arr(autocor)
if (Kar || Kma) {
# ARMA effects (of residuals)
standata$tg <- as.numeric(as.factor(tgroup))
standata$Kar <- Kar
standata$Kma <- Kma
standata$Karma <- max(Kar, Kma)
if (use_cov(autocor)) {
# Modeling ARMA effects using a special covariance matrix
# requires additional data
standata$N_tg <- length(unique(standata$tg))
standata$begin_tg <- as.array(with(standata,
ulapply(unique(tgroup), match, tgroup)))
standata$nobs_tg <- as.array(with(standata,
c(if (N_tg > 1L) begin_tg[2:N_tg], N + 1) - begin_tg))
standata$end_tg <- with(standata, begin_tg + nobs_tg - 1)
if (!is.null(standata$se)) {
standata$se2 <- standata$se^2
} else {
standata$se2 <- rep(0, standata$N)
}
}
}
if (Karr) {
# ARR effects (autoregressive effects of the response)
standata$Yarr <- arr_design_matrix(Y = standata$Y, r = Karr,
group = tgroup)
standata$Karr <- Karr
}
}
if (is(autocor, "cor_fixed")) {
V <- autocor$V
rmd_rows <- attr(data, "na.action")
if (!is.null(rmd_rows)) {
V <- V[-rmd_rows, -rmd_rows, drop = FALSE]
}
if (nrow(V) != nrow(data)) {
stop("'V' must have the same number of rows as 'data'", call. = FALSE)
}
if (min(eigen(V)$values <= 0)) {
stop("'V' must be positive definite", call. = FALSE)
}
standata$V <- V
}
standata$prior_only <- ifelse(identical(sample_prior, "only"), 1L, 0L)
if (isTRUE(control$save_order)) {
attr(standata, "old_order") <- attr(data, "old_order")
}
standata
}
#' @export
brmdata <- function(formula, data = NULL, family = "gaussian",
autocor = NULL, partial = NULL,
cov_ranef = NULL, ...) {
# deprectated alias of make_standata
make_standata(formula = formula, data = data,
family = family, autocor = autocor,
partial = partial, cov_ranef = cov_ranef, ...)
}
| /brms/R/make_standata.R | no_license | ingted/R-Examples | R | false | false | 14,873 | r | #' Data for \pkg{brms} Models
#'
#' Generate data for \pkg{brms} models to be passed to \pkg{Stan}
#'
#' @inheritParams brm
#' @param control A named list currently for internal usage only
#' @param ... Other potential arguments
#'
#' @aliases brmdata
#'
#' @return A named list of objects containing the required data
#' to fit a \pkg{brms} model with \pkg{Stan}.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @examples
#' data1 <- make_standata(rating ~ treat + period + carry + (1|subject),
#' data = inhaler, family = "cumulative")
#' names(data1)
#'
#' data2 <- make_standata(count ~ log_Age_c + log_Base4_c * Trt_c
#' + (1|patient) + (1|visit),
#' data = epilepsy, family = "poisson")
#' names(data2)
#'
#' @export
make_standata <- function(formula, data = NULL, family = "gaussian",
prior = NULL, autocor = NULL, nonlinear = NULL,
partial = NULL, cov_ranef = NULL,
sample_prior = FALSE, control = NULL, ...) {
# internal control arguments:
# is_newdata: is make_standata is called with new data?
# not4stan: is make_standata called for use in S3 methods?
# save_order: should the initial order of the data be saved?
# omit_response: omit checking of the response?
# ntrials, ncat, Jm: standata based on the original data
dots <- list(...)
# use deprecated arguments if specified
cov_ranef <- use_alias(cov_ranef, dots$cov.ranef, warn = FALSE)
# some input checks
if (!(is.null(data) || is.list(data)))
stop("argument 'data' must be a data.frame or list", call. = FALSE)
family <- check_family(family)
nonlinear <- nonlinear2list(nonlinear)
formula <- update_formula(formula, data = data, family = family,
partial = partial, nonlinear = nonlinear)
autocor <- check_autocor(autocor)
is_linear <- is.linear(family)
is_ordinal <- is.ordinal(family)
is_count <- is.count(family)
is_forked <- is.forked(family)
is_categorical <- is.categorical(family)
et <- extract_time(autocor$formula)
ee <- extract_effects(formula, family = family, et$all,
nonlinear = nonlinear)
prior <- as.prior_frame(prior)
check_prior_content(prior, family = family)
na_action <- if (isTRUE(control$is_newdata)) na.pass else na.omit
data <- update_data(data, family = family, effects = ee, et$group,
drop.unused.levels = !isTRUE(control$is_newdata),
na.action = na_action)
# sort data in case of autocorrelation models
if (has_arma(autocor)) {
# amend if zero-inflated and hurdle models ever get
# autocorrelation structures as they are also using 'trait'
if (is_forked) {
stop("no autocorrelation allowed for this model", call. = FALSE)
}
if (is_linear && length(ee$response) > 1L) {
if (!grepl("^trait$|:trait$|^trait:|:trait:", et$group)) {
stop(paste("autocorrelation structures for multiple responses must",
"contain 'trait' as grouping variable"), call. = FALSE)
} else {
to_order <- rmNULL(list(data[["trait"]], data[[et$group]],
data[[et$time]]))
}
} else {
to_order <- rmNULL(list(data[[et$group]], data[[et$time]]))
}
if (length(to_order)) {
new_order <- do.call(order, to_order)
data <- data[new_order, ]
# old_order will allow to retrieve the initial order of the data
attr(data, "old_order") <- order(new_order)
}
}
# response variable
standata <- list(N = nrow(data), Y = unname(model.response(data)))
check_response <- !isTRUE(control$omit_response)
if (check_response) {
if (!(is_ordinal || family$family %in% c("bernoulli", "categorical")) &&
!is.numeric(standata$Y)) {
stop(paste("family", family$family, "expects numeric response variable"),
call. = FALSE)
}
# transform and check response variable for different families
regex_pos_int <- "(^|_)(binomial|poisson|negbinomial|geometric)$"
if (grepl(regex_pos_int, family$family)) {
if (!all(is.wholenumber(standata$Y)) || min(standata$Y) < 0) {
stop(paste("family", family$family, "expects response variable",
"of non-negative integers"), call. = FALSE)
}
} else if (family$family == "bernoulli") {
standata$Y <- as.numeric(as.factor(standata$Y)) - 1
if (any(!standata$Y %in% c(0,1))) {
stop(paste("family", family$family, "expects response variable",
"to contain only two different values"), call. = FALSE)
}
} else if (family$family %in% c("beta", "zero_inflated_beta")) {
lower <- if (family$family == "beta") any(standata$Y <= 0)
else any(standata$Y < 0)
upper <- any(standata$Y >= 1)
if (lower || upper) {
stop("beta regression requires responses between 0 and 1",
call. = FALSE)
}
} else if (is_categorical) {
standata$Y <- as.numeric(as.factor(standata$Y))
if (length(unique(standata$Y)) < 2L) {
stop("At least two response categories are required.", call. = FALSE)
}
} else if (is_ordinal) {
if (is.ordered(standata$Y)) {
standata$Y <- as.numeric(standata$Y)
} else if (all(is.wholenumber(standata$Y))) {
standata$Y <- standata$Y - min(standata$Y) + 1
} else {
stop(paste("family", family$family, "expects either integers or",
"ordered factors as response variables"), call. = FALSE)
}
if (length(unique(standata$Y)) < 2L) {
stop("At least two response categories are required.", call. = FALSE)
}
} else if (is.skewed(family)) {
if (min(standata$Y) <= 0) {
stop(paste("family", family$family, "requires response variable",
"to be positive"), call. = FALSE)
}
} else if (is.zero_inflated(family) || is.hurdle(family)) {
if (min(standata$Y) < 0) {
stop(paste("family", family$family, "requires response variable",
"to be non-negative"), call. = FALSE)
}
}
}
# data for various kinds of effects
if (length(nonlinear)) {
nlpars <- names(ee$nonlinear)
# matrix of covariates appearing in the non-linear formula
C <- get_model_matrix(ee$covars, data = data)
if (length(all.vars(ee$covars)) != ncol(C)) {
stop("Factors with more than two levels are not allowed as covariates",
call. = FALSE)
}
standata <- c(standata, list(KC = ncol(C), C = C))
for (i in seq_along(nlpars)) {
data_fixef <- data_fixef(ee$nonlinear[[i]], data = data,
family = family, nlpar = nlpars[i],
not4stan = isTRUE(control$not4stan))
data_monef <- data_monef(ee$nonlinear[[i]], data = data, prior = prior,
Jm = control[[paste0("Jm_", nlpars[i])]],
nlpar = nlpars[i])
data_ranef <- data_ranef(ee$nonlinear[[i]], data = data,
family = family, cov_ranef = cov_ranef,
is_newdata = isTRUE(control$is_newdata),
not4stan = isTRUE(control$not4stan),
nlpar = nlpars[i])
standata <- c(standata, data_fixef, data_monef, data_ranef)
}
} else {
data_fixef <- data_fixef(ee, data = data, family = family,
not4stan = isTRUE(control$not4stan))
data_monef <- data_monef(ee, data = data, prior = prior, Jm = control$Jm)
data_csef <- data_csef(ee, data = data)
data_ranef <- data_ranef(ee, data = data, family = family,
cov_ranef = cov_ranef,
is_newdata = isTRUE(control$is_newdata),
not4stan = isTRUE(control$not4stan))
standata <- c(standata, data_fixef, data_monef, data_csef, data_ranef)
# offsets are not yet implemented for non-linear models
standata$offset <- model.offset(data)
}
# data for specific families
if (has_trials(family)) {
if (!length(ee$trials)) {
if (!is.null(control$trials)) {
standata$trials <- control$trials
} else {
standata$trials <- max(standata$Y)
}
} else if (is.wholenumber(ee$trials)) {
standata$trials <- ee$trials
} else if (is.formula(ee$trials)) {
standata$trials <- .addition(formula = ee$trials, data = data)
} else stop("Response part of formula is invalid.")
standata$max_obs <- standata$trials # for backwards compatibility
if (max(standata$trials) == 1L && family$family == "binomial")
message(paste("Only 2 levels detected so that family bernoulli",
"might be a more efficient choice."))
if (check_response && any(standata$Y > standata$trials))
stop(paste("Number of trials is smaller than the response",
"variable would suggest."), call. = FALSE)
}
if (has_cat(family)) {
if (!length(ee$cat)) {
if (!is.null(control$ncat)) {
standata$ncat <- control$ncat
} else {
standata$ncat <- max(standata$Y)
}
} else if (is.wholenumber(ee$cat)) {
standata$ncat <- ee$cat
} else stop("Addition argument 'cat' is misspecified.", call. = FALSE)
standata$max_obs <- standata$ncat # for backwards compatibility
if (max(standata$ncat) == 2L) {
message(paste("Only 2 levels detected so that family bernoulli",
"might be a more efficient choice."))
}
if (check_response && any(standata$Y > standata$ncat)) {
stop(paste0("Number of categories is smaller than the response",
"variable would suggest."), call. = FALSE)
}
}
if (family$family == "inverse.gaussian" && check_response) {
# save as data to reduce computation time in Stan
if (is.formula(ee[c("weights", "cens")])) {
standata$log_Y <- log(standata$Y)
} else {
standata$log_Y <- sum(log(standata$Y))
}
standata$sqrt_Y <- sqrt(standata$Y)
}
# evaluate even if check_response is FALSE to ensure that N_trait is defined
if (is_linear && length(ee$response) > 1L) {
standata$Y <- matrix(standata$Y, ncol = length(ee$response))
NC_trait <- ncol(standata$Y) * (ncol(standata$Y) - 1L) / 2L
standata <- c(standata, list(N_trait = nrow(standata$Y),
K_trait = ncol(standata$Y),
NC_trait = NC_trait))
}
if (is_forked) {
# the second half of Y is only dummy data
# that was put into data to make melt_data work correctly
standata$N_trait <- nrow(data) / 2L
standata$Y <- standata$Y[1L:standata$N_trait]
}
if (is_categorical && !isTRUE(control$old_cat)) {
ncat1m <- standata$ncat - 1L
standata$N_trait <- nrow(data) / ncat1m
standata$Y <- standata$Y[1L:standata$N_trait]
standata$J_trait <- matrix(1L:standata$N, ncol = ncat1m)
}
# data for addition arguments
if (is.formula(ee$se)) {
standata <- c(standata, list(se = .addition(formula = ee$se, data = data)))
}
if (is.formula(ee$weights)) {
standata <- c(standata, list(weights = .addition(ee$weights, data = data)))
if (is.linear(family) && length(ee$response) > 1 || is_forked)
standata$weights <- standata$weights[1:standata$N_trait]
}
if (is.formula(ee$disp)) {
standata <- c(standata, list(disp = .addition(ee$disp, data = data)))
}
if (is.formula(ee$cens) && check_response) {
standata <- c(standata, list(cens = .addition(ee$cens, data = data)))
if (is.linear(family) && length(ee$response) > 1 || is_forked)
standata$cens <- standata$cens[1:standata$N_trait]
}
if (is.formula(ee$trunc)) {
standata <- c(standata, .addition(ee$trunc))
if (check_response && (min(standata$Y) < standata$lb ||
max(standata$Y) > standata$ub)) {
stop("Some responses are outside of the truncation boundaries.",
call. = FALSE)
}
}
# autocorrelation variables
if (has_arma(autocor)) {
tgroup <- data[[et$group]]
if (is.null(tgroup)) {
tgroup <- rep(1, standata$N)
}
Kar <- get_ar(autocor)
Kma <- get_ma(autocor)
Karr <- get_arr(autocor)
if (Kar || Kma) {
# ARMA effects (of residuals)
standata$tg <- as.numeric(as.factor(tgroup))
standata$Kar <- Kar
standata$Kma <- Kma
standata$Karma <- max(Kar, Kma)
if (use_cov(autocor)) {
# Modeling ARMA effects using a special covariance matrix
# requires additional data
standata$N_tg <- length(unique(standata$tg))
standata$begin_tg <- as.array(with(standata,
ulapply(unique(tgroup), match, tgroup)))
standata$nobs_tg <- as.array(with(standata,
c(if (N_tg > 1L) begin_tg[2:N_tg], N + 1) - begin_tg))
standata$end_tg <- with(standata, begin_tg + nobs_tg - 1)
if (!is.null(standata$se)) {
standata$se2 <- standata$se^2
} else {
standata$se2 <- rep(0, standata$N)
}
}
}
if (Karr) {
# ARR effects (autoregressive effects of the response)
standata$Yarr <- arr_design_matrix(Y = standata$Y, r = Karr,
group = tgroup)
standata$Karr <- Karr
}
}
if (is(autocor, "cor_fixed")) {
V <- autocor$V
rmd_rows <- attr(data, "na.action")
if (!is.null(rmd_rows)) {
V <- V[-rmd_rows, -rmd_rows, drop = FALSE]
}
if (nrow(V) != nrow(data)) {
stop("'V' must have the same number of rows as 'data'", call. = FALSE)
}
if (min(eigen(V)$values <= 0)) {
stop("'V' must be positive definite", call. = FALSE)
}
standata$V <- V
}
standata$prior_only <- ifelse(identical(sample_prior, "only"), 1L, 0L)
if (isTRUE(control$save_order)) {
attr(standata, "old_order") <- attr(data, "old_order")
}
standata
}
#' @export
brmdata <- function(formula, data = NULL, family = "gaussian",
autocor = NULL, partial = NULL,
cov_ranef = NULL, ...) {
# deprectated alias of make_standata
make_standata(formula = formula, data = data,
family = family, autocor = autocor,
partial = partial, cov_ranef = cov_ranef, ...)
}
|
#'
#'@title Complete spatiotemporal normalization
#'@description The script imports time series, calculates normalized series, estimates input uncertainty propagation in normalized values and plots normalized series with their uncertainty.
#'@param
#'@param
#'@examples
#
#
#'@author Effie Pavlidou
#'@export
#'
#import data (adjust names and formats depending on available data formats, naming and location)
#central<-read.table("central.txt")
#un_central<-read.table("un_central.txt")
#instead of reading an existing dataframe, possible to call function import: data<-import(n)
#data<-read.table("data.txt")
#uncertainties<-read.table("uncertainties.txt")
normbase<-function(central, data, uncertainties, un_central){
#normalize central pixel with the average of the frame
nr<-as.numeric(length(ts(central)))
normalized<-normalize(nr, central, data)
#uncertainty of normalized series
un<-uncert_norm(data, uncertainties, nr, central, un_central)
#upper and lower bounds
u<-normalized+un
l<-normalized-un
#plot detail. Just an example for the given dataset, adjust to dataset at-hand
display.c<-normalized[4008:4217]
display.u<-u[4008:4217]
display.l<-l[4008:4217]
y<-as.numeric(1:210)
y2<-c(y,210,rev(y),1)
plot(y,display.u,type="l",bty="L",xlab="time",ylab="Normalized values", ylim=c(0.995, 1.008), col="white", axes=FALSE)
par(new=T)
plot(y,display.l,type="l",bty="L",xlab="time",ylab="Normalized values", ylim=c(0.995, 1.008), col="white", axes=FALSE)
polygon(y2,c(display.u, display.u[210], rev(display.l), display.l[1]),col="skyblue", border="skyblue")
par(new=T)
plot.ts(display.c, lwd=2, col="black", ylim=c(0.995, 1.008), ylab="", xlab="", axes=FALSE)
axis(2)
axis(1, at=c(1, 72, 144, 192), labels=c("June 16", "June 19", "June 21", "June 23"))
box()
}
| /R/normbase.R | no_license | effie-pav/detex | R | false | false | 1,768 | r | #'
#'@title Complete spatiotemporal normalization
#'@description The script imports time series, calculates normalized series, estimates input uncertainty propagation in normalized values and plots normalized series with their uncertainty.
#'@param
#'@param
#'@examples
#
#
#'@author Effie Pavlidou
#'@export
#'
#import data (adjust names and formats depending on available data formats, naming and location)
#central<-read.table("central.txt")
#un_central<-read.table("un_central.txt")
#instead of reading an existing dataframe, possible to call function import: data<-import(n)
#data<-read.table("data.txt")
#uncertainties<-read.table("uncertainties.txt")
normbase<-function(central, data, uncertainties, un_central){
#normalize central pixel with the average of the frame
nr<-as.numeric(length(ts(central)))
normalized<-normalize(nr, central, data)
#uncertainty of normalized series
un<-uncert_norm(data, uncertainties, nr, central, un_central)
#upper and lower bounds
u<-normalized+un
l<-normalized-un
#plot detail. Just an example for the given dataset, adjust to dataset at-hand
display.c<-normalized[4008:4217]
display.u<-u[4008:4217]
display.l<-l[4008:4217]
y<-as.numeric(1:210)
y2<-c(y,210,rev(y),1)
plot(y,display.u,type="l",bty="L",xlab="time",ylab="Normalized values", ylim=c(0.995, 1.008), col="white", axes=FALSE)
par(new=T)
plot(y,display.l,type="l",bty="L",xlab="time",ylab="Normalized values", ylim=c(0.995, 1.008), col="white", axes=FALSE)
polygon(y2,c(display.u, display.u[210], rev(display.l), display.l[1]),col="skyblue", border="skyblue")
par(new=T)
plot.ts(display.c, lwd=2, col="black", ylim=c(0.995, 1.008), ylab="", xlab="", axes=FALSE)
axis(2)
axis(1, at=c(1, 72, 144, 192), labels=c("June 16", "June 19", "June 21", "June 23"))
box()
}
|
#' Title: Hult International Business School Student Spotlight
#' Purpose: Is there any bias in the showcased ambassadors' bio and interest?
#' Author: Stefania Menini
#' E-mail: smenini2019@student.hult.edu
#' Date: Mar 1 2021
################################################################################
# Analysis Objective ###########################################################
# The objective of the analysis is to ensure that the bio and the interest of #
# the showcased Hult students ambassadors do not present bias towards campuses,#
# topics, or other observed information. Achieving this goal will guarantee a #
# welcoming, diverse, and inclusive learning atmosphere (Kwartler, 2020). #
################################################################################
################################################################################
# Initial Set Up ###############################################################
################################################################################
# Setting the working directory
setwd("~/R/Git_hult_NLP_student/hult_NLP_student/cases/session II/student ambassadors")
# Loading basic packages required for the analysis
library(ggplot2)
library(ggthemes)
library(stringi)
library(stringr)
library(tm)
# Loading additional packages
library(lubridate)
library(wordcloud)
library(RColorBrewer)
library(plotrix)
library(ggalt)
library(tibble)
library(dplyr)
library(lexicon)
library(tidytext)
library(radarchart)
library(textdata)
library(magrittr)
library(corpus)
# library(hrbrthemes)
library(qdap)
library(igraph)
library(wordcloud2)
library(pbapply)
# Avoiding strings being counted as factors
options(stringsAsFactors = FALSE)
# Limiting errors by expanding the accepted number of location in character types
Sys.setlocale('LC_ALL','C')
# Loading "final_student_data" dataset and collecting dataset general info
student_text <- read.csv('final_student_data.csv', header = TRUE)
head(student_text,5) # Checking the first five rows of "final_student_data"
names(student_text) # Checking columns names of "final_student_data"
dim(student_text) # Checking "final_student_data" dimension
# Creating stopwords using the 'SMART'
stops <- c(stopwords('SMART'), 'hult', 'hult international business school')
################################################################################
# Customized functions #########################################################
################################################################################
# Defining "tryTolower"
tryTolower <- function(x){
# return NA when there is an error
y = NA
# tryCatch error
try_error = tryCatch(tolower(x), error = function(e) e)
# if not an error
if (!inherits(try_error, 'error'))
y = tolower(x)
return(y)}
# Defining "cleanCorpus"
cleanCorpus<-function(corpus, customStopwords){
corpus <- tm_map(corpus, content_transformer(qdapRegex::rm_url))
corpus <- tm_map(corpus, content_transformer(tryTolower))
corpus <- tm_map(corpus, removeWords, customStopwords)
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, removeNumbers)
corpus <- tm_map(corpus, stripWhitespace)
return(corpus)}
################################################################################
# Ambassadors General Info #####################################################
################################################################################
# Ambassadors by "campus"
ggplot(data = student_text,
mapping = aes(x = campus, fill = namSorGender.likelyGender)) +
geom_bar(position = "dodge", alpha = .9) +
labs(title = "Hult Ambassadors", x = "Hult Campuses", y = " ",
fill = "Gender") +
scale_fill_manual(values = c("hotpink1", "lightgrey")) +
theme_tufte()
###
# Ambassadors by "programTitle"
ggplot(data = student_text, mapping = aes(x = programTitle)) +
geom_bar(fill = "hotpink1", position = "dodge", alpha = .9) +
labs(title = "Ambassadors per Program Title", x = "", y = " ") +
coord_flip() +
theme_tufte()
###
# Counting Ambassadors per Hult campus
table(student_text$campus)
# Counting Ambassadors' per gender
table(student_text$namSorGender.likelyGender)
# Counting Ambassadors per program title
table(student_text$programTitle)
################################################################################
# Creating and Organize Dataset Subsets ########################################
################################################################################
# Concatenating "bio" and "interest"
student_text$student_allText <- paste(student_text$bio,student_text$interests)
# Renaming "student_text: first column
names(student_text)[1] <- 'doc_id'
###
# Creating a data subset for the American Hult campuses (Boston & San Francisco)
america_campuses <- subset(student_text, student_text$campus == c('Boston')
| student_text$campus == c('San Francisco'))
# Creating a data subset for the Eurasian Hult campuses (Dubai & London)
eurasia_campuses <- subset(student_text, student_text$campus == c('Dubai')
| student_text$campus == c('London'))
################################################################################
# Searching for Word Patterns in "america_campuses" ###########################
################################################################################
# Diversity keywords scanning in "america_campuses"
diversity_keywordsOR <-"diverse|diversity|variety|mix|multi-cultural|
multicultural|global|world|cultures|international"
america_diversity <- grepl(diversity_keywordsOR, america_campuses$student_allText,
ignore.case=TRUE)
# Calculating the % of times diversity keywords have been metioned
america_diversity_score <- sum(america_diversity) / nrow(student_text)
america_diversity_score # 0.3647059
###
# Thinking (Hult DNA) keywords scanning in "america_campuses"
thinking_keywordsOR <-"awareness|self|challenge|growth mindset|"
america_thinking <- grepl(thinking_keywordsOR, america_campuses$student_allText,
ignore.case=TRUE)
# Calculating the % of times thinking keywords have been metioned
america_thinking_score <- sum(america_thinking) / nrow(student_text)
america_thinking_score # 0.4235294
###
# Communicating (Hult DNA) keywords scanning in "america_campuses"
communicating_keywordsOR <-"communication|communicate|confident|sharing|
listening|listen|influence"
america_communicating <- grepl(communicating_keywordsOR,
america_campuses$student_allText,ignore.case=TRUE)
# Calculating the % of times communicating keywords have been mentioned
america_communicating_score <- sum(america_communicating) / nrow(student_text)
america_communicating_score # 0.05882353
###
# Team-Building (Hult DNA) keywords scanning in "america_campuses"
teambuilding_keywordsOR <-"team|peers|clubs|community|engage|engagement|network|
connection|connecting|cooperation"
america_teambuilding <- grepl(teambuilding_keywordsOR,
america_campuses$student_allText,ignore.case=TRUE)
# Calculating the % of times team-building keywords have been mentioned
america_teambuilding_score <- sum(america_teambuilding) / nrow(student_text)
america_teambuilding_score # 0.3058824
################################################################################
# Searching for Word Patterns in "eurasia_campuses" ###########################
################################################################################
# Diversity keywords scanning in "eurasia_campuses"
eurasia_diversity <- grepl(diversity_keywordsOR, eurasia_campuses$student_allText,
ignore.case=TRUE)
# Calculating the % of times diversity keywords have been mentioned
eurasia_diversity_score <- sum(eurasia_diversity) / nrow(student_text)
eurasia_diversity_score # 0.5411765
###
# Thinking (Hult DNA) keywords scanning in "eurasia_campuses"
eurasia_thinking <- grepl(thinking_keywordsOR, eurasia_campuses$student_allText,
ignore.case=TRUE)
# Calculating the % of times thinking keywords have been mentioned
eurasia_thinking_score <- sum(eurasia_thinking) / nrow(student_text)
eurasia_thinking_score # 0.5764706
###
# Communicating (Hult DNA) keywords scanning in "eurasia_campuses"
eurasia_communicating <- grepl(communicating_keywordsOR,
eurasia_campuses$student_allText,ignore.case=TRUE)
# Calculating the % of times communicating keywords have been mentioned
eurasia_communicating_score <- sum(eurasia_communicating) / nrow(student_text)
eurasia_communicating_score # 0.1176471
###
# Team-Building (Hult DNA) keywords scanning in "eurasia_campuses"
eurasia_teambuilding <- grepl(teambuilding_keywordsOR,
eurasia_campuses$student_allText,ignore.case=TRUE)
# Calculating the % of times team-building keywords have been mentioned
eurasia_teambuilding_score <- sum(eurasia_teambuilding) / nrow(student_text)
eurasia_teambuilding_score # 0.3176471
################################################################################
# Comparing Results: "america_campuses" vs. "eurasia_campuses" #################
################################################################################
# Creating a matrix to summarize campuses' scores per category
scores_comparison <- matrix(c(america_diversity_score, eurasia_diversity_score,
america_thinking_score, eurasia_thinking_score,
america_communicating_score, eurasia_communicating_score,
america_teambuilding_score, eurasia_teambuilding_score),
ncol = 2, byrow = TRUE )
# Defining "scores_comparison" columns' names
colnames(scores_comparison ) <- c("America Campuses", "Eurasia Campuses")
# Defining "scores_comparison" rows' names
rownames(scores_comparison) <- c("Diversity", "Thinking", "Communicating",
"Team Building")
# Displaying the "scores_comparison" matrix
scores_comparison
################################################################################
# String_count() for Hult DNA categories and Diversity (all dataset) ##########
################################################################################
# Counting words based on the Hult DNA categories and diversity
diversity <- sum(stri_count(student_text$student_allText, regex = 'diverse|diversity|
variety|mix|multi-cultural|multicultural|
global|world|cultures|international'))
thinking <- sum(stri_count(student_text$student_allText, regex ='awareness|self|
challenge|growth mindset'))
communicating <- sum(stri_count(student_text$student_allText, regex ='communication|
communicate|confident|sharing|listening|listen|
influence'))
teambuilding <- sum(stri_count(student_text$student_allText, regex ='team|peers|
clubs|community|engage|engagement|network|
connection|connecting|cooperation'))
# Organizing term objects into a data frame
all_termFreq <- data.frame(Terms = c('diversity','thinking','communicating',
'teambuilding'),
Freq = c(diversity, thinking, communicating, teambuilding))
# Checking the object frequencies
all_termFreq
# Plotting a geom_bar() for "all_termFreq"
ggplot(data = all_termFreq, aes(x = reorder(Terms, Freq), y = Freq)) +
geom_bar(stat = "identity", fill = "hotpink1") +
labs(title = "Hult DNA and Diversity Words' Categories ", y = "Count", x = " ") +
coord_flip() +
theme_tufte()
################################################################################
# Volatile Corpus ##############################################################
################################################################################
# Making and cleaning a volatile corpus for "student_text"
student_corp <- VCorpus(VectorSource(student_text$student_allText))
student_corp <- cleanCorpus(student_corp, stops)
content(student_corp[[1]]) # Checking student_corp
###
# Making and cleaning a volatile corpus for "america_campuses"
america_corp <- VCorpus(VectorSource(america_campuses$student_allText))
america_corp <- cleanCorpus(america_corp, stops)
content(america_corp[[1]]) # Checking america_corp
###
# Making and cleaning a volatile corpus for "eurasia_campuses"
eurasia_corp <- VCorpus(VectorSource(eurasia_campuses$student_allText))
eurasia_corp <- cleanCorpus(eurasia_corp, stops)
content(eurasia_corp[[1]]) # Checking eurasia_corp
################################################################################
# Term Document Matrix #########################################################
################################################################################
# Making a Term Document Matrix for "america_campuses"
america_Tdm <- TermDocumentMatrix(america_corp, control = list(weighting = weightTf))
america_TdmM <- as.matrix(america_Tdm)
dim(america_TdmM) # Checking matrix dimensions
###
# Making a Term Document Matrix for "eurasia_campuses"
eurasia_Tdm <- TermDocumentMatrix(eurasia_corp, control = list(weighting = weightTf))
eurasia_TdmM <- as.matrix(eurasia_Tdm)
dim(eurasia_TdmM) # Checking matrix dimensions
###
# Making a Term Document Matrix for "eurasia_campuses"
student_Tdm <- TermDocumentMatrix(student_corp, control = list(weighting = weightTf))
student_TdmM <- as.matrix(student_Tdm)
dim(student_TdmM) # Checking matrix dimensions
################################################################################
# Most Frequent Terms ##########################################################
################################################################################
# Getting the most frequent terms for "america_campuses"
america_TopTerms <- rowSums(america_TdmM)
america_TopTerms <- data.frame(terms = rownames(america_TdmM), freq = america_TopTerms)
rownames(america_TopTerms) <- NULL
head(america_TopTerms)
# Getting the most frequent term
america_idx <- which.max(america_TopTerms$freq)
america_TopTerms[america_idx, ] # business
###
# Getting the most frequent terms for "eurasia_campuses"
eurasia_TopTerms <- rowSums(eurasia_TdmM)
eurasia_TopTerms <- data.frame(terms = rownames(eurasia_TdmM), freq = eurasia_TopTerms)
rownames(eurasia_TopTerms) <- NULL
head(eurasia_TopTerms)
# Getting the most frequent term
eurasia_idx <- which.max(eurasia_TopTerms$freq)
america_TopTerms[eurasia_idx, ] # coming
###
# Getting the most frequent terms for all dataset
student_all_TopTerms <- rowSums(student_TdmM)
student_all_TopTerms <- data.frame(terms = rownames(student_TdmM), freq = student_all_TopTerms)
rownames(student_all_TopTerms) <- NULL
head(student_all_TopTerms)
# Getting the most frequent term
student_all_idx <- which.max(student_all_TopTerms$freq)
student_all_TopTerms[student_all_idx, ] # business
################################################################################
# Plotting the most Frequent Terms #############################################
################################################################################
# Creating an "america_TopTerms" subset
america_Top_subset <- subset(america_TopTerms, america_TopTerms$freq > 12)
america_Top_subset <- america_Top_subset[order(america_Top_subset$freq, decreasing=F),]
america_Top_subset[10:30,]
# Converting top terms into factors
america_Top_subset$terms <- factor(america_Top_subset$terms,
levels=unique(as.character(america_Top_subset$terms)))
# Plotting top terms for "america_campuses"
ggplot(data = america_Top_subset, mapping = aes(x=terms, y=freq)) +
geom_bar(stat="identity", fill = "hotpink1") +
labs(title = "Top Terms among America Campuses", x = "", y = "Frequency") +
coord_flip() +
theme_tufte()
###
# Creating an "eurasia_TopTerms" subset
eurasia_Top_subset <- subset(eurasia_TopTerms, eurasia_TopTerms$freq > 16)
eurasia_Top_subset <- eurasia_Top_subset[order(eurasia_Top_subset$freq, decreasing=F),]
eurasia_Top_subset[9:31,]
# Converting top terms into factors
eurasia_Top_subset$terms <- factor(eurasia_Top_subset$terms,
levels=unique(as.character(eurasia_Top_subset$terms)))
# Plotting top terms for "eurasia_campuses"
ggplot(data = eurasia_Top_subset, mapping = aes(x=terms, y=freq)) +
geom_bar(stat="identity", fill = "hotpink1") +
labs(title = "Top Terms among Eurasia Campuses", x = "", y = "Frequency") +
coord_flip() +
theme_tufte()
################################################################################
# Association Analysis #########################################################
################################################################################
# Inspecting word associations for "america_campuses"
america_associations <- findAssocs(america_Tdm, 'business', 0.37)
america_associations # Checking results
# Organizing words for "america_associations"
america_assocDF <- data.frame(terms=names(america_associations[[1]]),
value=unlist(america_associations))
america_assocDF$terms <- factor(america_assocDF$terms, levels=america_assocDF$terms)
rownames(america_assocDF) <- NULL
america_assocDF
# Displaying associations
ggplot(america_assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=america_assocDF, col='hotpink1') +
labs(title = "Association Analysis for Business in America Campuses", x = "Value",
y = " ") +
theme_tufte() +
geom_text(aes(x=value,label=value), colour="grey",hjust="inward",
vjust ="inward" , size=3)
###
# Inspecting word associations for "eurasia_campuses"
eurasia_associations <- findAssocs(eurasia_Tdm, 'business', 0.37)
eurasia_associations # Checking results
# Organizing words for "eurasia_associations"
eurasia_assocDF <- data.frame(terms=names(eurasia_associations[[1]]),
value=unlist(eurasia_associations))
eurasia_assocDF$terms <- factor(eurasia_assocDF$terms, levels=eurasia_assocDF$terms)
rownames(eurasia_assocDF) <- NULL
eurasia_assocDF
# Displaying associations
ggplot(eurasia_assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=eurasia_assocDF, col='hotpink1') +
labs(title = "Association Analysis for Business in Eurasia Campuses", x = "Value",
y = " ") +
theme_tufte() +
geom_text(aes(x=value,label=value), colour="grey",hjust="inward",
vjust ="inward" , size=3)
###
# Inspecting word associations for all dataset
student_associations <- findAssocs(student_Tdm, 'business', 0.30)
student_associations # Checking results
# Organizing words for all dataset
student_assocDF <- data.frame(terms=names(student_associations[[1]]),
value=unlist(student_associations))
student_assocDF$terms <- factor(student_assocDF$terms, levels=student_assocDF$terms)
rownames(student_assocDF) <- NULL
student_assocDF
# Displaying associations
ggplot(student_assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=student_assocDF, col='hotpink1') +
labs(title = "Association Analysis for Business (all dataset)", x = "Value",
y = " ") +
theme_tufte() +
geom_text(aes(x=value,label=value), colour="grey",hjust="inward",
vjust ="inward" , size=3)
################################################################################
# WorldCloud ###################################################################
################################################################################
# Setting worldcloud palette
pal <- brewer.pal(8, "Greys")
pal <- pal[-(1:2)]
###
# Plotting a worldcloud for "america_campuses"
set.seed(1234)
wordcloud(america_TopTerms$terms,
america_TopTerms$freq,
max.words = 50,
random.order = FALSE,
colors = pal,
scale = c(2,1))
###
# Plotting a worldcloud for "eurasia_campuses"
set.seed(1234)
wordcloud(eurasia_TopTerms$terms,
eurasia_TopTerms$freq,
max.words = 50,
random.order = FALSE,
colors = pal,
scale = c(2,1))
###
# Plotting a world cloud for all dataset
set.seed(1234)
wordcloud(student_all_TopTerms$terms,
student_all_TopTerms$freq,
max.words = 50,
random.order = FALSE,
colors = pal,
scale = c(2,1))
################################################################################
# Other WorldCloud Type ########################################################
################################################################################
# Choose a color & drop light ones
pal2 <- brewer.pal(8, "Greys")
wordcloud2(student_all_TopTerms[1:50,],
color = pal2,
backgroundColor = "pink")
################################################################################
# Comparison Cloud: "bio" vs "interest" ########################################
################################################################################
# Defining a vector corpus
all_bio <- VCorpus(VectorSource(student_text$bio))
all_interest <- VCorpus(VectorSource(student_text$interest))
# Cleaning up the data
all_bio <- cleanCorpus(all_bio, stops)
all_interest <- cleanCorpus(all_interest, stops)
# Checking the results
length(all_bio)
length(all_interest)
# Collapsing each document into a single "subject"
all_bio <- paste(all_bio, collapse = ' ')
all_interest <- paste(all_interest, collapse = ' ')
# Combining the "all_bio" and "all_interest"
bio_interest <- c(all_bio, all_interest)
bio_interest <- VCorpus((VectorSource(bio_interest)))
# Defining TDM
ctrl <- list(weighting = weightTfIdf)
bio_interest_TDM <- TermDocumentMatrix(bio_interest, control = ctrl)
bio_interest_TDMm <- as.matrix(bio_interest_TDM)
# Defining columns order
colnames(bio_interest_TDMm) <- c('Bio', 'Interests')
# Examining TDM
head(bio_interest_TDMm)
# Plotting a comparison cloud
comparison.cloud(bio_interest_TDMm,
max.words= 30,
random.order=FALSE,
title.size=0.8,
colors=brewer.pal(ncol(bio_interest_TDMm),"Paired"),
title.colors=FALSE, match.colors=FALSE,
scale=c(3,0.2))
# End ##########################################################################
| /Stefania_Menini_Student_Spotlight_Case.R | no_license | StefaniaMenini/hult-ambassadors-text-analysis | R | false | false | 22,879 | r | #' Title: Hult International Business School Student Spotlight
#' Purpose: Is there any bias in the showcased ambassadors' bio and interest?
#' Author: Stefania Menini
#' E-mail: smenini2019@student.hult.edu
#' Date: Mar 1 2021
################################################################################
# Analysis Objective ###########################################################
# The objective of the analysis is to ensure that the bio and the interest of #
# the showcased Hult students ambassadors do not present bias towards campuses,#
# topics, or other observed information. Achieving this goal will guarantee a #
# welcoming, diverse, and inclusive learning atmosphere (Kwartler, 2020). #
################################################################################
################################################################################
# Initial Set Up ###############################################################
################################################################################
# Setting the working directory
setwd("~/R/Git_hult_NLP_student/hult_NLP_student/cases/session II/student ambassadors")
# Loading basic packages required for the analysis
library(ggplot2)
library(ggthemes)
library(stringi)
library(stringr)
library(tm)
# Loading additional packages
library(lubridate)
library(wordcloud)
library(RColorBrewer)
library(plotrix)
library(ggalt)
library(tibble)
library(dplyr)
library(lexicon)
library(tidytext)
library(radarchart)
library(textdata)
library(magrittr)
library(corpus)
# library(hrbrthemes)
library(qdap)
library(igraph)
library(wordcloud2)
library(pbapply)
# Avoiding strings being counted as factors
options(stringsAsFactors = FALSE)
# Limiting errors by expanding the accepted number of location in character types
Sys.setlocale('LC_ALL','C')
# Loading "final_student_data" dataset and collecting dataset general info
student_text <- read.csv('final_student_data.csv', header = TRUE)
head(student_text,5) # Checking the first five rows of "final_student_data"
names(student_text) # Checking columns names of "final_student_data"
dim(student_text) # Checking "final_student_data" dimension
# Creating stopwords using the 'SMART'
stops <- c(stopwords('SMART'), 'hult', 'hult international business school')
################################################################################
# Customized functions #########################################################
################################################################################
# Defining "tryTolower"
tryTolower <- function(x){
# return NA when there is an error
y = NA
# tryCatch error
try_error = tryCatch(tolower(x), error = function(e) e)
# if not an error
if (!inherits(try_error, 'error'))
y = tolower(x)
return(y)}
# Defining "cleanCorpus"
cleanCorpus<-function(corpus, customStopwords){
corpus <- tm_map(corpus, content_transformer(qdapRegex::rm_url))
corpus <- tm_map(corpus, content_transformer(tryTolower))
corpus <- tm_map(corpus, removeWords, customStopwords)
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, removeNumbers)
corpus <- tm_map(corpus, stripWhitespace)
return(corpus)}
################################################################################
# Ambassadors General Info #####################################################
################################################################################
# Ambassadors by "campus"
ggplot(data = student_text,
mapping = aes(x = campus, fill = namSorGender.likelyGender)) +
geom_bar(position = "dodge", alpha = .9) +
labs(title = "Hult Ambassadors", x = "Hult Campuses", y = " ",
fill = "Gender") +
scale_fill_manual(values = c("hotpink1", "lightgrey")) +
theme_tufte()
###
# Ambassadors by "programTitle"
ggplot(data = student_text, mapping = aes(x = programTitle)) +
geom_bar(fill = "hotpink1", position = "dodge", alpha = .9) +
labs(title = "Ambassadors per Program Title", x = "", y = " ") +
coord_flip() +
theme_tufte()
###
# Counting Ambassadors per Hult campus
table(student_text$campus)
# Counting Ambassadors' per gender
table(student_text$namSorGender.likelyGender)
# Counting Ambassadors per program title
table(student_text$programTitle)
################################################################################
# Creating and Organize Dataset Subsets ########################################
################################################################################
# Concatenating "bio" and "interest"
student_text$student_allText <- paste(student_text$bio,student_text$interests)
# Renaming "student_text: first column
names(student_text)[1] <- 'doc_id'
###
# Creating a data subset for the American Hult campuses (Boston & San Francisco)
america_campuses <- subset(student_text, student_text$campus == c('Boston')
| student_text$campus == c('San Francisco'))
# Creating a data subset for the Eurasian Hult campuses (Dubai & London)
eurasia_campuses <- subset(student_text, student_text$campus == c('Dubai')
| student_text$campus == c('London'))
################################################################################
# Searching for Word Patterns in "america_campuses" ###########################
################################################################################
# Diversity keywords scanning in "america_campuses"
diversity_keywordsOR <-"diverse|diversity|variety|mix|multi-cultural|
multicultural|global|world|cultures|international"
america_diversity <- grepl(diversity_keywordsOR, america_campuses$student_allText,
ignore.case=TRUE)
# Calculating the % of times diversity keywords have been metioned
america_diversity_score <- sum(america_diversity) / nrow(student_text)
america_diversity_score # 0.3647059
###
# Thinking (Hult DNA) keywords scanning in "america_campuses"
thinking_keywordsOR <-"awareness|self|challenge|growth mindset|"
america_thinking <- grepl(thinking_keywordsOR, america_campuses$student_allText,
ignore.case=TRUE)
# Calculating the % of times thinking keywords have been metioned
america_thinking_score <- sum(america_thinking) / nrow(student_text)
america_thinking_score # 0.4235294
###
# Communicating (Hult DNA) keywords scanning in "america_campuses"
communicating_keywordsOR <-"communication|communicate|confident|sharing|
listening|listen|influence"
america_communicating <- grepl(communicating_keywordsOR,
america_campuses$student_allText,ignore.case=TRUE)
# Calculating the % of times communicating keywords have been mentioned
america_communicating_score <- sum(america_communicating) / nrow(student_text)
america_communicating_score # 0.05882353
###
# Team-Building (Hult DNA) keywords scanning in "america_campuses"
teambuilding_keywordsOR <-"team|peers|clubs|community|engage|engagement|network|
connection|connecting|cooperation"
america_teambuilding <- grepl(teambuilding_keywordsOR,
america_campuses$student_allText,ignore.case=TRUE)
# Calculating the % of times team-building keywords have been mentioned
america_teambuilding_score <- sum(america_teambuilding) / nrow(student_text)
america_teambuilding_score # 0.3058824
################################################################################
# Searching for Word Patterns in "eurasia_campuses" ###########################
################################################################################
# Diversity keywords scanning in "eurasia_campuses"
eurasia_diversity <- grepl(diversity_keywordsOR, eurasia_campuses$student_allText,
ignore.case=TRUE)
# Calculating the % of times diversity keywords have been mentioned
eurasia_diversity_score <- sum(eurasia_diversity) / nrow(student_text)
eurasia_diversity_score # 0.5411765
###
# Thinking (Hult DNA) keywords scanning in "eurasia_campuses"
eurasia_thinking <- grepl(thinking_keywordsOR, eurasia_campuses$student_allText,
ignore.case=TRUE)
# Calculating the % of times thinking keywords have been mentioned
eurasia_thinking_score <- sum(eurasia_thinking) / nrow(student_text)
eurasia_thinking_score # 0.5764706
###
# Communicating (Hult DNA) keywords scanning in "eurasia_campuses"
eurasia_communicating <- grepl(communicating_keywordsOR,
eurasia_campuses$student_allText,ignore.case=TRUE)
# Calculating the % of times communicating keywords have been mentioned
eurasia_communicating_score <- sum(eurasia_communicating) / nrow(student_text)
eurasia_communicating_score # 0.1176471
###
# Team-Building (Hult DNA) keywords scanning in "eurasia_campuses"
eurasia_teambuilding <- grepl(teambuilding_keywordsOR,
eurasia_campuses$student_allText,ignore.case=TRUE)
# Calculating the % of times team-building keywords have been mentioned
eurasia_teambuilding_score <- sum(eurasia_teambuilding) / nrow(student_text)
eurasia_teambuilding_score # 0.3176471
################################################################################
# Comparing Results: "america_campuses" vs. "eurasia_campuses" #################
################################################################################
# Creating a matrix to summarize campuses' scores per category
scores_comparison <- matrix(c(america_diversity_score, eurasia_diversity_score,
america_thinking_score, eurasia_thinking_score,
america_communicating_score, eurasia_communicating_score,
america_teambuilding_score, eurasia_teambuilding_score),
ncol = 2, byrow = TRUE )
# Defining "scores_comparison" columns' names
colnames(scores_comparison ) <- c("America Campuses", "Eurasia Campuses")
# Defining "scores_comparison" rows' names
rownames(scores_comparison) <- c("Diversity", "Thinking", "Communicating",
"Team Building")
# Displaying the "scores_comparison" matrix
scores_comparison
################################################################################
# String_count() for Hult DNA categories and Diversity (all dataset) ##########
################################################################################
# Counting words based on the Hult DNA categories and diversity
diversity <- sum(stri_count(student_text$student_allText, regex = 'diverse|diversity|
variety|mix|multi-cultural|multicultural|
global|world|cultures|international'))
thinking <- sum(stri_count(student_text$student_allText, regex ='awareness|self|
challenge|growth mindset'))
communicating <- sum(stri_count(student_text$student_allText, regex ='communication|
communicate|confident|sharing|listening|listen|
influence'))
teambuilding <- sum(stri_count(student_text$student_allText, regex ='team|peers|
clubs|community|engage|engagement|network|
connection|connecting|cooperation'))
# Organizing term objects into a data frame
all_termFreq <- data.frame(Terms = c('diversity','thinking','communicating',
'teambuilding'),
Freq = c(diversity, thinking, communicating, teambuilding))
# Checking the object frequencies
all_termFreq
# Plotting a geom_bar() for "all_termFreq"
ggplot(data = all_termFreq, aes(x = reorder(Terms, Freq), y = Freq)) +
geom_bar(stat = "identity", fill = "hotpink1") +
labs(title = "Hult DNA and Diversity Words' Categories ", y = "Count", x = " ") +
coord_flip() +
theme_tufte()
################################################################################
# Volatile Corpus ##############################################################
################################################################################
# Making and cleaning a volatile corpus for "student_text"
student_corp <- VCorpus(VectorSource(student_text$student_allText))
student_corp <- cleanCorpus(student_corp, stops)
content(student_corp[[1]]) # Checking student_corp
###
# Making and cleaning a volatile corpus for "america_campuses"
america_corp <- VCorpus(VectorSource(america_campuses$student_allText))
america_corp <- cleanCorpus(america_corp, stops)
content(america_corp[[1]]) # Checking america_corp
###
# Making and cleaning a volatile corpus for "eurasia_campuses"
eurasia_corp <- VCorpus(VectorSource(eurasia_campuses$student_allText))
eurasia_corp <- cleanCorpus(eurasia_corp, stops)
content(eurasia_corp[[1]]) # Checking eurasia_corp
################################################################################
# Term Document Matrix #########################################################
################################################################################
# Making a Term Document Matrix for "america_campuses"
america_Tdm <- TermDocumentMatrix(america_corp, control = list(weighting = weightTf))
america_TdmM <- as.matrix(america_Tdm)
dim(america_TdmM) # Checking matrix dimensions
###
# Making a Term Document Matrix for "eurasia_campuses"
eurasia_Tdm <- TermDocumentMatrix(eurasia_corp, control = list(weighting = weightTf))
eurasia_TdmM <- as.matrix(eurasia_Tdm)
dim(eurasia_TdmM) # Checking matrix dimensions
###
# Making a Term Document Matrix for "eurasia_campuses"
student_Tdm <- TermDocumentMatrix(student_corp, control = list(weighting = weightTf))
student_TdmM <- as.matrix(student_Tdm)
dim(student_TdmM) # Checking matrix dimensions
################################################################################
# Most Frequent Terms ##########################################################
################################################################################
# Getting the most frequent terms for "america_campuses"
america_TopTerms <- rowSums(america_TdmM)
america_TopTerms <- data.frame(terms = rownames(america_TdmM), freq = america_TopTerms)
rownames(america_TopTerms) <- NULL
head(america_TopTerms)
# Getting the most frequent term
america_idx <- which.max(america_TopTerms$freq)
america_TopTerms[america_idx, ] # business
###
# Getting the most frequent terms for "eurasia_campuses"
eurasia_TopTerms <- rowSums(eurasia_TdmM)
eurasia_TopTerms <- data.frame(terms = rownames(eurasia_TdmM), freq = eurasia_TopTerms)
rownames(eurasia_TopTerms) <- NULL
head(eurasia_TopTerms)
# Getting the most frequent term
eurasia_idx <- which.max(eurasia_TopTerms$freq)
america_TopTerms[eurasia_idx, ] # coming
###
# Getting the most frequent terms for all dataset
student_all_TopTerms <- rowSums(student_TdmM)
student_all_TopTerms <- data.frame(terms = rownames(student_TdmM), freq = student_all_TopTerms)
rownames(student_all_TopTerms) <- NULL
head(student_all_TopTerms)
# Getting the most frequent term
student_all_idx <- which.max(student_all_TopTerms$freq)
student_all_TopTerms[student_all_idx, ] # business
################################################################################
# Plotting the most Frequent Terms #############################################
################################################################################
# Creating an "america_TopTerms" subset
america_Top_subset <- subset(america_TopTerms, america_TopTerms$freq > 12)
america_Top_subset <- america_Top_subset[order(america_Top_subset$freq, decreasing=F),]
america_Top_subset[10:30,]
# Converting top terms into factors
america_Top_subset$terms <- factor(america_Top_subset$terms,
levels=unique(as.character(america_Top_subset$terms)))
# Plotting top terms for "america_campuses"
ggplot(data = america_Top_subset, mapping = aes(x=terms, y=freq)) +
geom_bar(stat="identity", fill = "hotpink1") +
labs(title = "Top Terms among America Campuses", x = "", y = "Frequency") +
coord_flip() +
theme_tufte()
###
# Creating an "eurasia_TopTerms" subset
eurasia_Top_subset <- subset(eurasia_TopTerms, eurasia_TopTerms$freq > 16)
eurasia_Top_subset <- eurasia_Top_subset[order(eurasia_Top_subset$freq, decreasing=F),]
eurasia_Top_subset[9:31,]
# Converting top terms into factors
eurasia_Top_subset$terms <- factor(eurasia_Top_subset$terms,
levels=unique(as.character(eurasia_Top_subset$terms)))
# Plotting top terms for "eurasia_campuses"
ggplot(data = eurasia_Top_subset, mapping = aes(x=terms, y=freq)) +
geom_bar(stat="identity", fill = "hotpink1") +
labs(title = "Top Terms among Eurasia Campuses", x = "", y = "Frequency") +
coord_flip() +
theme_tufte()
################################################################################
# Association Analysis #########################################################
################################################################################
# Inspecting word associations for "america_campuses"
america_associations <- findAssocs(america_Tdm, 'business', 0.37)
america_associations # Checking results
# Organizing words for "america_associations"
america_assocDF <- data.frame(terms=names(america_associations[[1]]),
value=unlist(america_associations))
america_assocDF$terms <- factor(america_assocDF$terms, levels=america_assocDF$terms)
rownames(america_assocDF) <- NULL
america_assocDF
# Displaying associations
ggplot(america_assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=america_assocDF, col='hotpink1') +
labs(title = "Association Analysis for Business in America Campuses", x = "Value",
y = " ") +
theme_tufte() +
geom_text(aes(x=value,label=value), colour="grey",hjust="inward",
vjust ="inward" , size=3)
###
# Inspecting word associations for "eurasia_campuses"
eurasia_associations <- findAssocs(eurasia_Tdm, 'business', 0.37)
eurasia_associations # Checking results
# Organizing words for "eurasia_associations"
eurasia_assocDF <- data.frame(terms=names(eurasia_associations[[1]]),
value=unlist(eurasia_associations))
eurasia_assocDF$terms <- factor(eurasia_assocDF$terms, levels=eurasia_assocDF$terms)
rownames(eurasia_assocDF) <- NULL
eurasia_assocDF
# Displaying associations
ggplot(eurasia_assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=eurasia_assocDF, col='hotpink1') +
labs(title = "Association Analysis for Business in Eurasia Campuses", x = "Value",
y = " ") +
theme_tufte() +
geom_text(aes(x=value,label=value), colour="grey",hjust="inward",
vjust ="inward" , size=3)
###
# Inspecting word associations for all dataset
student_associations <- findAssocs(student_Tdm, 'business', 0.30)
student_associations # Checking results
# Organizing words for all dataset
student_assocDF <- data.frame(terms=names(student_associations[[1]]),
value=unlist(student_associations))
student_assocDF$terms <- factor(student_assocDF$terms, levels=student_assocDF$terms)
rownames(student_assocDF) <- NULL
student_assocDF
# Displaying associations
ggplot(student_assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=student_assocDF, col='hotpink1') +
labs(title = "Association Analysis for Business (all dataset)", x = "Value",
y = " ") +
theme_tufte() +
geom_text(aes(x=value,label=value), colour="grey",hjust="inward",
vjust ="inward" , size=3)
################################################################################
# WorldCloud ###################################################################
################################################################################
# Setting worldcloud palette
pal <- brewer.pal(8, "Greys")
pal <- pal[-(1:2)]
###
# Plotting a worldcloud for "america_campuses"
set.seed(1234)
wordcloud(america_TopTerms$terms,
america_TopTerms$freq,
max.words = 50,
random.order = FALSE,
colors = pal,
scale = c(2,1))
###
# Plotting a worldcloud for "eurasia_campuses"
set.seed(1234)
wordcloud(eurasia_TopTerms$terms,
eurasia_TopTerms$freq,
max.words = 50,
random.order = FALSE,
colors = pal,
scale = c(2,1))
###
# Plotting a world cloud for all dataset
set.seed(1234)
wordcloud(student_all_TopTerms$terms,
student_all_TopTerms$freq,
max.words = 50,
random.order = FALSE,
colors = pal,
scale = c(2,1))
################################################################################
# Other WorldCloud Type ########################################################
################################################################################
# Choose a color & drop light ones
pal2 <- brewer.pal(8, "Greys")
wordcloud2(student_all_TopTerms[1:50,],
color = pal2,
backgroundColor = "pink")
################################################################################
# Comparison Cloud: "bio" vs "interest" ########################################
################################################################################
# Defining a vector corpus
all_bio <- VCorpus(VectorSource(student_text$bio))
all_interest <- VCorpus(VectorSource(student_text$interest))
# Cleaning up the data
all_bio <- cleanCorpus(all_bio, stops)
all_interest <- cleanCorpus(all_interest, stops)
# Checking the results
length(all_bio)
length(all_interest)
# Collapsing each document into a single "subject"
all_bio <- paste(all_bio, collapse = ' ')
all_interest <- paste(all_interest, collapse = ' ')
# Combining the "all_bio" and "all_interest"
bio_interest <- c(all_bio, all_interest)
bio_interest <- VCorpus((VectorSource(bio_interest)))
# Defining TDM
ctrl <- list(weighting = weightTfIdf)
bio_interest_TDM <- TermDocumentMatrix(bio_interest, control = ctrl)
bio_interest_TDMm <- as.matrix(bio_interest_TDM)
# Defining columns order
colnames(bio_interest_TDMm) <- c('Bio', 'Interests')
# Examining TDM
head(bio_interest_TDMm)
# Plotting a comparison cloud
comparison.cloud(bio_interest_TDMm,
max.words= 30,
random.order=FALSE,
title.size=0.8,
colors=brewer.pal(ncol(bio_interest_TDMm),"Paired"),
title.colors=FALSE, match.colors=FALSE,
scale=c(3,0.2))
# End ##########################################################################
|
#################################
### Homework ideas
#################################
rm(list=ls()) # remove all
options(max.print=80)
options(digits=3)
par(new=TRUE) # allow new plot on same chart
par(las=1) # set text printing to "horizontal"
library(zoo)
# good package loading script inside functions
stopifnot("package:xts" %in% search() || require("xts", quietly=TRUE))
#####################
### temp stuff ###
zoomChart("2010")
zoomChart("2010-04/2010-06")
########
reg_model <- lm(range~volume, data=range_volume["2008/2009"])
plot(reg_model)
reg_model <- lm(range~volume, data=diff(range_volume))
reg_model <- lm(range~volume, data=diff(range_volume["2010/"], lag=11))
reg_model <- lm(range~volume, data=diff(range_volume["2008/2009"]))
summary(reg_model)
plot(range~volume, data=diff(range_volume["2010/"]))
adf.test(range_volume[, "range"])
adf.test(cumsum(rnorm(nrow(range_volume))))
cor(x=range_volume[, "range"], y=range_volume[, "volume"], method="pearson")
cor.test(x=range_volume[, "range"], y=range_volume[, "volume"], method="pearson")
cor(x=range_volume[, "range"], y=range_volume[, "volume"], method="kendall")
cor.test(x=range_volume[, "range"], y=range_volume[, "volume"], method="kendall")
cor(x=range_volume[, "range"], y=range_volume[, "volume"], method="spearman")
cor.test(x=range_volume[, "range"], y=range_volume[, "volume"], method="spearman")
#####################
### end temp stuff ###
########################
### functions
###############
# 3. (20pts even without legend) Plot the probability density of DAX returns together with t-distribution returns with four degrees of freedom on a single plot,
# plot t-distribution
x_var <- seq(-5, 5, length=100)
x_var <- seq(-6, -3, length=100)
plot(x=x_var, y=dt(x_var, df=4), type="l", lwd=2, xlab="", ylab="", ylim=c(0, 0.03))
# add line for density of DAX returns
ts_rets <- 100*diff(log(EuStockMarkets[, 1]))
lines(density(ts_rets), col="red", lwd=2)
# add legend
legend("topright", title="DAX vs t-distr", legend=c("t-distr", "DAX"),
inset=0.05, cex=0.8, lwd=2, lty=c(1, 1), col=c("black", "red"))
########################
### expressions
### while loops
########################
### dates and times
########################
### time series
########################
### stochastic processes
### below are scratch or incorrect - doesn't work properly:
######
# Create a series lagged by one period from "ts_arima", and call it "ts_arima_lag",
# The value of "ts_arima_lag" in a given period should be equal to
# the value of "ts_arima" in the previous period,
# Create a series lagged by two periods from "ts_arima", and call it "ts_arima_lag2",
# use function lag() with the proper argument "k",
# create ARIMA time series of class "ts"
zoo_arima <- arima.sim(n=1000, model=list(ar=c(0.2, 0.3)))
# verify that "ts_arima_lag" and "ts_arima_lag2" are correctly lagged by inspecting them,
# use functions head() and cbind(),
head(cbind(ts_arima, ts_arima_lag, ts_arima_lag2))
tail(cbind(ts_arima, ts_arima_lag, ts_arima_lag2))
######
# Create a linear combination of "ts_arima_1" and its lag=2 series, and call it "ts_arima_2",
# such that the lag=2 autocorrelation of "ts_arima_2" is equal to zero, or is very close to zero,
ts_arima_2 <- ts_arima_1 - vec_acf_1[3]*lag(ts_arima_1, k=-2)
vec_acf_2 <- drop(acf(ts_arima_2, lag=5, plot=FALSE)$acf)
# plot
acf_plus(ts_arima_2, lag=5)
######
# Create a linear combination of "zoo_arima" and "zoo_arima_lag", and call it "zoo_arima_1" (decorrelated),
# such that its lag=1 autocorrelation coefficient is equal to zero, or is very close to zero,
# Extract the lag=5 autocorrelation vector of "zoo_arima_1", and call it "vec_acf_1",
# verify that the lag=1 autocorrelation is very close to zero,
zoo_arima_1 <- zoo_arima - vec_acf[2]*sd(zoo_arima)*zoo_arima_lag/sd(zoo_arima_lag)
vec_acf_1 <- drop(acf(zoo_arima_1, lag=5, plot=FALSE)$acf)
# plot
acf_plus(zoo_arima_1, lag=5)
###########
# classes and inheritance
###############
# create new generic function and method for "string" class, based on: "reverse", "trim", "pad", "scramble",
# create "stringy" class, derived from "string" class
# create new methods for "stringy" class, based on existing generic functions: "length", "+", "print"
# create new methods for "stringy" class, based on "string" generic functions: "", "", ""
# show that "stringy" inherits methods from "string" class
# derive (not create!) new "string" class from "character" object
# simply add "string" to class vector
as.string <- function(x) {
if(!inherits(x, "string"))
class(x) <- c("string", class(x))
x # return "x"
}
# derive (not create!) new "string" class from "character" object
# define generic "string" class converter
as.string <- function (x, ...)
UseMethod("as.string")
# default "string" class converter
as.string.default <- function (x, ...) {
if(!inherits(x, "string"))
x <- structure(x, class=c("string", class(x)), ...)
x # return "x"
} # end as.string.default
# numeric "string" class converter
as.string.numeric <- function (x, ...) {
if(!inherits(x, "string"))
x <- structure(as.character(x), class=c("string", class(x)), ...)
x # return "x"
} # end as.string.numeric
is.string <- function (x)
inherits(x=x, what="string")
# define "string" object
obj_string <- as.string("how are you today?")
obj_string
class(obj_string)
is.string(obj_string)
is.string("hello")
as.string(123)
is.string(as.string(123))
# overload "+" function for "string" class
"+.string" <- function (a, b, ...) {
paste(a, "plus", b)
} # end +.string
# adds character indices and returns character with index equal to the sum
"+.string" <- function (a, b, ...) {
in_dex <- (which(letters==substring(a, 1, 1)) + which(letters==substring(b, 1, 1))) %% length(letters)
letters[in_dex]
} # end +.string
methods("+") # view methods for "+" operator
string1 <- structure("hello", class="string")
string2 <- structure("there", class="string")
class(string1)
string1 + string2 # add two "string" objects
# borrow from "stringr": "check_string", "str_length"
# overload "print" function for "string" class
print.string <- function (str_ing) {
print(
paste(strsplit(str_ing, split=" ")[[1]],
collapse=" + "))
} # end print.string
print(my_string)
# define generic "first" function (if not defined by "xts")
first <- function (x, ...)
UseMethod("first")
# define "first" method for "string" class
first.string <- function (str_ing, ...) {
unclass(substring(str_ing, 1, 1))
} # end first.string
first(string1)
last.string <- function (str_ing, ...) {
unclass(substring(str_ing, nchar(str_ing), nchar(str_ing)))
} # end last.string
last(string1)
### function that adds "character" class objects
add_char <- function (char1, char2) {
# test for "character" class and throw error
stopifnot(is.character(char1) && is.character(char1))
in_dex <- (which(letters==substr(char1, 1, 1)) + which(letters==substr(char2, 1, 1))) %% length(letters)
letters[in_dex]
} # end add_char
add_char("c", "b")
add_char("1", "b")
add_char(1, "b")
a <- "efg"
b <- "opq"
add_char(a, b)
class(my_stringy) <- c("stringy", "string")
"+.stringy" <- function (a, b, ...) {
paste(a, "plus", b)
} # end +.stringy
# create "base5" arithmetic class, derived from "numeric" class
# create new methods for "base5" class, based on existing generic functions: "+", "-", "*", "/"
baz <- function(x) UseMethod("baz", x)
baz.A <- function(x) "A"
baz.B <- function(x) "B"
ab <- 1
class(ab) <- c("A", "B")
ba <- 2
class(ba) <- c("B", "A")
ab <- structure(1, class = c("A", "B"))
ba <- structure(1, class = c("B", "A"))
baz(ab)
baz(ba)
"+.character" <- function(a, b, ...){
NextMethod()
}
#################################
### HW #6 Solution
#################################
# Max score 25pts
# comment:
# Half of the credit for the first part (max 15pts) is from properly calculating
# the length (nrow) of the list object, because nrow() returns NULL for one-dimensional objects.
# Homework assignment:
# 1. (15pts) Create a function called str_ts(), which summarizes time series objects,
# The function input is a time series object,
# The function should return a named list object with the following information: length (nrow), dimensions, number of rows with bad data, colnames, the object's class, data type, and the first and last rows of data,
# The function should validate its argument, and throw an error if it's not a time series object,
str_ts <- function(ts_series=NULL) {
# check if argument is a time series object
stopifnot(is.ts(ts_series) || is.zoo(ts_series))
# create list and return it
list(
length=ifelse(is.null(nrow(ts_series)), length(ts_series), nrow(ts_series)),
dim=dim(ts_series),
bad_data=sum(!complete.cases(ts_series)),
col_names=colnames(ts_series),
ts_class=class(ts_series),
ts_type=typeof(ts_series),
first_row=head(ts_series, 1),
last_row=tail(ts_series, 1)
) # end list
} # end str_ts
# 2. (10pts) Create a synthetic zoo time series of prices with two named columns, based on random returns equal to "rnorm",
# Introduce a few NA values into the time series, and call str_ts() on this time series,
library(zoo) # load package zoo
ts_var <- zoo(matrix(rnorm(20), ncol=2), order.by=(Sys.Date() - 1:10))
colnames(ts_var) <- paste0("col", 1:2)
ts_var[3, 1] <- NA
ts_var[6, 2] <- NA
str_ts(ts_var)
| /FRE_homework_ideas.R | no_license | githubfun/FRE6871 | R | false | false | 9,441 | r | #################################
### Homework ideas
#################################
rm(list=ls()) # remove all
options(max.print=80)
options(digits=3)
par(new=TRUE) # allow new plot on same chart
par(las=1) # set text printing to "horizontal"
library(zoo)
# good package loading script inside functions
stopifnot("package:xts" %in% search() || require("xts", quietly=TRUE))
#####################
### temp stuff ###
zoomChart("2010")
zoomChart("2010-04/2010-06")
########
reg_model <- lm(range~volume, data=range_volume["2008/2009"])
plot(reg_model)
reg_model <- lm(range~volume, data=diff(range_volume))
reg_model <- lm(range~volume, data=diff(range_volume["2010/"], lag=11))
reg_model <- lm(range~volume, data=diff(range_volume["2008/2009"]))
summary(reg_model)
plot(range~volume, data=diff(range_volume["2010/"]))
adf.test(range_volume[, "range"])
adf.test(cumsum(rnorm(nrow(range_volume))))
cor(x=range_volume[, "range"], y=range_volume[, "volume"], method="pearson")
cor.test(x=range_volume[, "range"], y=range_volume[, "volume"], method="pearson")
cor(x=range_volume[, "range"], y=range_volume[, "volume"], method="kendall")
cor.test(x=range_volume[, "range"], y=range_volume[, "volume"], method="kendall")
cor(x=range_volume[, "range"], y=range_volume[, "volume"], method="spearman")
cor.test(x=range_volume[, "range"], y=range_volume[, "volume"], method="spearman")
#####################
### end temp stuff ###
########################
### functions
###############
# 3. (20pts even without legend) Plot the probability density of DAX returns together with t-distribution returns with four degrees of freedom on a single plot,
# plot t-distribution
x_var <- seq(-5, 5, length=100)
x_var <- seq(-6, -3, length=100)
plot(x=x_var, y=dt(x_var, df=4), type="l", lwd=2, xlab="", ylab="", ylim=c(0, 0.03))
# add line for density of DAX returns
ts_rets <- 100*diff(log(EuStockMarkets[, 1]))
lines(density(ts_rets), col="red", lwd=2)
# add legend
legend("topright", title="DAX vs t-distr", legend=c("t-distr", "DAX"),
inset=0.05, cex=0.8, lwd=2, lty=c(1, 1), col=c("black", "red"))
########################
### expressions
### while loops
########################
### dates and times
########################
### time series
########################
### stochastic processes
### below are scratch or incorrect - doesn't work properly:
######
# Create a series lagged by one period from "ts_arima", and call it "ts_arima_lag",
# The value of "ts_arima_lag" in a given period should be equal to
# the value of "ts_arima" in the previous period,
# Create a series lagged by two periods from "ts_arima", and call it "ts_arima_lag2",
# use function lag() with the proper argument "k",
# create ARIMA time series of class "ts"
zoo_arima <- arima.sim(n=1000, model=list(ar=c(0.2, 0.3)))
# verify that "ts_arima_lag" and "ts_arima_lag2" are correctly lagged by inspecting them,
# use functions head() and cbind(),
head(cbind(ts_arima, ts_arima_lag, ts_arima_lag2))
tail(cbind(ts_arima, ts_arima_lag, ts_arima_lag2))
######
# Create a linear combination of "ts_arima_1" and its lag=2 series, and call it "ts_arima_2",
# such that the lag=2 autocorrelation of "ts_arima_2" is equal to zero, or is very close to zero,
ts_arima_2 <- ts_arima_1 - vec_acf_1[3]*lag(ts_arima_1, k=-2)
vec_acf_2 <- drop(acf(ts_arima_2, lag=5, plot=FALSE)$acf)
# plot
acf_plus(ts_arima_2, lag=5)
######
# Create a linear combination of "zoo_arima" and "zoo_arima_lag", and call it "zoo_arima_1" (decorrelated),
# such that its lag=1 autocorrelation coefficient is equal to zero, or is very close to zero,
# Extract the lag=5 autocorrelation vector of "zoo_arima_1", and call it "vec_acf_1",
# verify that the lag=1 autocorrelation is very close to zero,
zoo_arima_1 <- zoo_arima - vec_acf[2]*sd(zoo_arima)*zoo_arima_lag/sd(zoo_arima_lag)
vec_acf_1 <- drop(acf(zoo_arima_1, lag=5, plot=FALSE)$acf)
# plot
acf_plus(zoo_arima_1, lag=5)
###########
# classes and inheritance
###############
# create new generic function and method for "string" class, based on: "reverse", "trim", "pad", "scramble",
# create "stringy" class, derived from "string" class
# create new methods for "stringy" class, based on existing generic functions: "length", "+", "print"
# create new methods for "stringy" class, based on "string" generic functions: "", "", ""
# show that "stringy" inherits methods from "string" class
# derive (not create!) new "string" class from "character" object
# simply add "string" to class vector
as.string <- function(x) {
if(!inherits(x, "string"))
class(x) <- c("string", class(x))
x # return "x"
}
# derive (not create!) new "string" class from "character" object
# define generic "string" class converter
as.string <- function (x, ...)
UseMethod("as.string")
# default "string" class converter
as.string.default <- function (x, ...) {
if(!inherits(x, "string"))
x <- structure(x, class=c("string", class(x)), ...)
x # return "x"
} # end as.string.default
# numeric "string" class converter
as.string.numeric <- function (x, ...) {
if(!inherits(x, "string"))
x <- structure(as.character(x), class=c("string", class(x)), ...)
x # return "x"
} # end as.string.numeric
is.string <- function (x)
inherits(x=x, what="string")
# define "string" object
obj_string <- as.string("how are you today?")
obj_string
class(obj_string)
is.string(obj_string)
is.string("hello")
as.string(123)
is.string(as.string(123))
# overload "+" function for "string" class
"+.string" <- function (a, b, ...) {
paste(a, "plus", b)
} # end +.string
# adds character indices and returns character with index equal to the sum
"+.string" <- function (a, b, ...) {
in_dex <- (which(letters==substring(a, 1, 1)) + which(letters==substring(b, 1, 1))) %% length(letters)
letters[in_dex]
} # end +.string
methods("+") # view methods for "+" operator
string1 <- structure("hello", class="string")
string2 <- structure("there", class="string")
class(string1)
string1 + string2 # add two "string" objects
# borrow from "stringr": "check_string", "str_length"
# overload "print" function for "string" class
print.string <- function (str_ing) {
print(
paste(strsplit(str_ing, split=" ")[[1]],
collapse=" + "))
} # end print.string
print(my_string)
# define generic "first" function (if not defined by "xts")
first <- function (x, ...)
UseMethod("first")
# define "first" method for "string" class
first.string <- function (str_ing, ...) {
unclass(substring(str_ing, 1, 1))
} # end first.string
first(string1)
last.string <- function (str_ing, ...) {
unclass(substring(str_ing, nchar(str_ing), nchar(str_ing)))
} # end last.string
last(string1)
### function that adds "character" class objects
add_char <- function (char1, char2) {
# test for "character" class and throw error
stopifnot(is.character(char1) && is.character(char1))
in_dex <- (which(letters==substr(char1, 1, 1)) + which(letters==substr(char2, 1, 1))) %% length(letters)
letters[in_dex]
} # end add_char
add_char("c", "b")
add_char("1", "b")
add_char(1, "b")
a <- "efg"
b <- "opq"
add_char(a, b)
class(my_stringy) <- c("stringy", "string")
"+.stringy" <- function (a, b, ...) {
paste(a, "plus", b)
} # end +.stringy
# create "base5" arithmetic class, derived from "numeric" class
# create new methods for "base5" class, based on existing generic functions: "+", "-", "*", "/"
baz <- function(x) UseMethod("baz", x)
baz.A <- function(x) "A"
baz.B <- function(x) "B"
ab <- 1
class(ab) <- c("A", "B")
ba <- 2
class(ba) <- c("B", "A")
ab <- structure(1, class = c("A", "B"))
ba <- structure(1, class = c("B", "A"))
baz(ab)
baz(ba)
"+.character" <- function(a, b, ...){
NextMethod()
}
#################################
### HW #6 Solution
#################################
# Max score 25pts
# comment:
# Half of the credit for the first part (max 15pts) is from properly calculating
# the length (nrow) of the list object, because nrow() returns NULL for one-dimensional objects.
# Homework assignment:
# 1. (15pts) Create a function called str_ts(), which summarizes time series objects,
# The function input is a time series object,
# The function should return a named list object with the following information: length (nrow), dimensions, number of rows with bad data, colnames, the object's class, data type, and the first and last rows of data,
# The function should validate its argument, and throw an error if it's not a time series object,
str_ts <- function(ts_series=NULL) {
# check if argument is a time series object
stopifnot(is.ts(ts_series) || is.zoo(ts_series))
# create list and return it
list(
length=ifelse(is.null(nrow(ts_series)), length(ts_series), nrow(ts_series)),
dim=dim(ts_series),
bad_data=sum(!complete.cases(ts_series)),
col_names=colnames(ts_series),
ts_class=class(ts_series),
ts_type=typeof(ts_series),
first_row=head(ts_series, 1),
last_row=tail(ts_series, 1)
) # end list
} # end str_ts
# 2. (10pts) Create a synthetic zoo time series of prices with two named columns, based on random returns equal to "rnorm",
# Introduce a few NA values into the time series, and call str_ts() on this time series,
library(zoo) # load package zoo
ts_var <- zoo(matrix(rnorm(20), ncol=2), order.by=(Sys.Date() - 1:10))
colnames(ts_var) <- paste0("col", 1:2)
ts_var[3, 1] <- NA
ts_var[6, 2] <- NA
str_ts(ts_var)
|
snp_data <- fread("snp_data.csv", verbose = FALSE) %>%
tbl_df()
my_portfolio <- data_frame(
stock_name = c("AAPL", "GOOG", "SBUX", "NKE"),
volume = c(1000, 1000, 1000, 1000)
)
function(input, output) {
my_portfolio <- reactiveValues(data = data_frame(
ticker = c("AAPL", "GOOG", "SBUX", "NKE"),
volume = c(1000, 1000, 1000, 1000)
))
value_portfolio <- function(portfolio_data) {
snp_data %>%
filter(
Date == max(Date)
) %>%
right_join(portfolio_data, by = c("Ticker" = "ticker")) %>%
mutate(
value = Close * volume
) %>%
select(
Name, volume, value
)
}
output$portfolio_tbl <- DT::renderDataTable({
my_portfolio$data %>%
value_portfolio() %>%
DT::datatable(rownames = FALSE, options = list(searching = FALSE),
colnames = c("Stock", "Volume", "Value")
) %>%
formatCurrency("value")
})
} | /server.r | no_license | DrRoad/portfolio_optimization_app | R | false | false | 1,078 | r | snp_data <- fread("snp_data.csv", verbose = FALSE) %>%
tbl_df()
my_portfolio <- data_frame(
stock_name = c("AAPL", "GOOG", "SBUX", "NKE"),
volume = c(1000, 1000, 1000, 1000)
)
function(input, output) {
my_portfolio <- reactiveValues(data = data_frame(
ticker = c("AAPL", "GOOG", "SBUX", "NKE"),
volume = c(1000, 1000, 1000, 1000)
))
value_portfolio <- function(portfolio_data) {
snp_data %>%
filter(
Date == max(Date)
) %>%
right_join(portfolio_data, by = c("Ticker" = "ticker")) %>%
mutate(
value = Close * volume
) %>%
select(
Name, volume, value
)
}
output$portfolio_tbl <- DT::renderDataTable({
my_portfolio$data %>%
value_portfolio() %>%
DT::datatable(rownames = FALSE, options = list(searching = FALSE),
colnames = c("Stock", "Volume", "Value")
) %>%
formatCurrency("value")
})
} |
#' Title: Read TXT files
#' Purpose: Read .txt
#' Author: Ted Kwartler
#' email: edwardkwartler@fas.harvard.edu
#' License: GPL>=3
#' Date: Dec 29 2020
#'
# Read in a txt file (change this to your file)
txt1 <- readLines('https://raw.githubusercontent.com/kwartler/GSERM_TextMining/cacd1d9131fef31309d673b24e744a6fee54269d/E_Friday/data/clinton/C05758905.txt')
# Examine
txt1
# Collapse all lines to make it like a single giant document
txt2 <- paste(txt1, collapse = ' ')
# split on a string "Doc No." to demonstrate getting a single document to
# individual documents
indDocs <- strsplit(txt2, "Doc No.")
# The result is a list object so can be worked with this way
indDocs[[1]][1] # first doc
indDocs[[1]][2] # second doc
indDocs[[1]][3] # third doc
# or "unlist" the object, but this can be challenging if the list is complex
indDocs <- unlist(indDocs)
indDocs[1] # first Doc
indDocs[2] #second doc
indDocs[3] #third doc
# End
| /lessons/oct22/scripts/E_readTXT_LIVE.R | no_license | Fstips/LUX_NLP_student | R | false | false | 940 | r | #' Title: Read TXT files
#' Purpose: Read .txt
#' Author: Ted Kwartler
#' email: edwardkwartler@fas.harvard.edu
#' License: GPL>=3
#' Date: Dec 29 2020
#'
# Read in a txt file (change this to your file)
txt1 <- readLines('https://raw.githubusercontent.com/kwartler/GSERM_TextMining/cacd1d9131fef31309d673b24e744a6fee54269d/E_Friday/data/clinton/C05758905.txt')
# Examine
txt1
# Collapse all lines to make it like a single giant document
txt2 <- paste(txt1, collapse = ' ')
# split on a string "Doc No." to demonstrate getting a single document to
# individual documents
indDocs <- strsplit(txt2, "Doc No.")
# The result is a list object so can be worked with this way
indDocs[[1]][1] # first doc
indDocs[[1]][2] # second doc
indDocs[[1]][3] # third doc
# or "unlist" the object, but this can be challenging if the list is complex
indDocs <- unlist(indDocs)
indDocs[1] # first Doc
indDocs[2] #second doc
indDocs[3] #third doc
# End
|
# see https://stats.stackexchange.com/a/67450
# for more on this
calc_score <- function(odds,
base_odds = 50,
base_score = 650,
pdo = 15,
upper_limit = 1000,
lower_limit = 0) {
# scale:
scale_factor <- pdo / log(2)
# intercept:
intercept <- base_score - log(base_odds) * scale_factor
# score:
raw_score <- intercept + scale_factor * log(odds)
# clip and integer the score:
as.integer(pmax(pmin(raw_score, upper_limit), lower_limit))
}
| /src/score_function_example.R | no_license | Johnwood118/eds_modelling | R | false | false | 594 | r | # see https://stats.stackexchange.com/a/67450
# for more on this
calc_score <- function(odds,
base_odds = 50,
base_score = 650,
pdo = 15,
upper_limit = 1000,
lower_limit = 0) {
# scale:
scale_factor <- pdo / log(2)
# intercept:
intercept <- base_score - log(base_odds) * scale_factor
# score:
raw_score <- intercept + scale_factor * log(odds)
# clip and integer the score:
as.integer(pmax(pmin(raw_score, upper_limit), lower_limit))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cloudfront_operations.R
\name{create_streaming_distribution_with_tags}
\alias{create_streaming_distribution_with_tags}
\title{Create a new streaming distribution with tags}
\usage{
create_streaming_distribution_with_tags(StreamingDistributionConfigWithTags)
}
\arguments{
\item{StreamingDistributionConfigWithTags}{[required] The streaming distribution's configuration information.}
}
\description{
Create a new streaming distribution with tags.
}
\section{Accepted Parameters}{
\preformatted{create_streaming_distribution_with_tags(
StreamingDistributionConfigWithTags = list(
StreamingDistributionConfig = list(
CallerReference = "string",
S3Origin = list(
DomainName = "string",
OriginAccessIdentity = "string"
),
Aliases = list(
Quantity = 123,
Items = list(
"string"
)
),
Comment = "string",
Logging = list(
Enabled = TRUE|FALSE,
Bucket = "string",
Prefix = "string"
),
TrustedSigners = list(
Enabled = TRUE|FALSE,
Quantity = 123,
Items = list(
"string"
)
),
PriceClass = "PriceClass_100"|"PriceClass_200"|"PriceClass_All",
Enabled = TRUE|FALSE
),
Tags = list(
Items = list(
list(
Key = "string",
Value = "string"
)
)
)
)
)
}
}
| /service/paws.cloudfront/man/create_streaming_distribution_with_tags.Rd | permissive | CR-Mercado/paws | R | false | true | 1,476 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cloudfront_operations.R
\name{create_streaming_distribution_with_tags}
\alias{create_streaming_distribution_with_tags}
\title{Create a new streaming distribution with tags}
\usage{
create_streaming_distribution_with_tags(StreamingDistributionConfigWithTags)
}
\arguments{
\item{StreamingDistributionConfigWithTags}{[required] The streaming distribution's configuration information.}
}
\description{
Create a new streaming distribution with tags.
}
\section{Accepted Parameters}{
\preformatted{create_streaming_distribution_with_tags(
StreamingDistributionConfigWithTags = list(
StreamingDistributionConfig = list(
CallerReference = "string",
S3Origin = list(
DomainName = "string",
OriginAccessIdentity = "string"
),
Aliases = list(
Quantity = 123,
Items = list(
"string"
)
),
Comment = "string",
Logging = list(
Enabled = TRUE|FALSE,
Bucket = "string",
Prefix = "string"
),
TrustedSigners = list(
Enabled = TRUE|FALSE,
Quantity = 123,
Items = list(
"string"
)
),
PriceClass = "PriceClass_100"|"PriceClass_200"|"PriceClass_All",
Enabled = TRUE|FALSE
),
Tags = list(
Items = list(
list(
Key = "string",
Value = "string"
)
)
)
)
)
}
}
|
# These functions create a special "matrix" from a user supplied matrix, that
# can be cached, and then computes the inverse. If the inverse is already
# available in cache data, it returs the cached inverse and lets the user know
cacheSolve <- function(x,...){
## Return a matrix that is the inverse of 'x'
m <- x$get_inv()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data <-x$get()
m <-solve(data)
x$set_inv(m)
m
} | /cacheSolve.R | no_license | irganomix/Peer-graded-Assignment-Programming-Assignment-2-Lexical-Scoping | R | false | false | 486 | r | # These functions create a special "matrix" from a user supplied matrix, that
# can be cached, and then computes the inverse. If the inverse is already
# available in cache data, it returs the cached inverse and lets the user know
cacheSolve <- function(x,...){
## Return a matrix that is the inverse of 'x'
m <- x$get_inv()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data <-x$get()
m <-solve(data)
x$set_inv(m)
m
} |
#############################################
#Packae is liscnesed by #
# KrewnSolotions /< /? [- \/\/ |\| #
#############################################
#http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3258155/
m8 <- function(data1){
Cq <- list()
Eff <- list()
F0 <- list()
for(k in 2:length(data1)) {
tryCatch({
Cq[[k-1]] <- which.max(diff(diff(data1[[k]]))) #second derviative maximum
Eff[[k-1]] <- data1[[k]][[Cq[[k-1]]]]/data1[[k]][[Cq[[k-1]]-1]]
F0[[k-1]] <- (data1[[k]][[Cq[[k-1]]]])/((Eff[[k-1]])^Cq[[k-1]])
if(k == 10) {
print(data1[[k]])
print(Cq[[k-1]])
print(F0[[k-1]])
}
}, error = function(err) {
F0[[k-1]] <- 1
})
}
printSHIT(F0,"Method: 5PSM", paste("Chamber ID", "Initial Template Fluorescence", sep=","), "5PSM_FULL.ddv")
tempCont <- list()
tempCounter <- 1
smallFluo <- list()
tempCont[[1]] <- "Method: 5PSM"
tempCont[[2]] <- paste("Chamber ID", "Initial Template Fluorescence")
for(k in 1:length(F0)) {
tryCatch({
if(CT[[k]] > 0) {
temp223 <- paste(gd[[1]][[2]][[k+2]][1], gd[[1]][[2]][[k+2]][2], sep="-")
tempCont[[tempCounter+2]] <- paste(temp223, F0[[k]], sep=",")
smallFluo[[tempCounter]] <- F0[[k]]
tempCounter <- tempCounter + 1
}
}, error = function(err) {
})
}
writeLines(LOLprint(tempCont), "5PSM.ddv")
return(smallFluo)
}
| /m8.R | permissive | requiem3/Rqpcr | R | false | false | 1,733 | r | #############################################
#Packae is liscnesed by #
# KrewnSolotions /< /? [- \/\/ |\| #
#############################################
#http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3258155/
m8 <- function(data1){
Cq <- list()
Eff <- list()
F0 <- list()
for(k in 2:length(data1)) {
tryCatch({
Cq[[k-1]] <- which.max(diff(diff(data1[[k]]))) #second derviative maximum
Eff[[k-1]] <- data1[[k]][[Cq[[k-1]]]]/data1[[k]][[Cq[[k-1]]-1]]
F0[[k-1]] <- (data1[[k]][[Cq[[k-1]]]])/((Eff[[k-1]])^Cq[[k-1]])
if(k == 10) {
print(data1[[k]])
print(Cq[[k-1]])
print(F0[[k-1]])
}
}, error = function(err) {
F0[[k-1]] <- 1
})
}
printSHIT(F0,"Method: 5PSM", paste("Chamber ID", "Initial Template Fluorescence", sep=","), "5PSM_FULL.ddv")
tempCont <- list()
tempCounter <- 1
smallFluo <- list()
tempCont[[1]] <- "Method: 5PSM"
tempCont[[2]] <- paste("Chamber ID", "Initial Template Fluorescence")
for(k in 1:length(F0)) {
tryCatch({
if(CT[[k]] > 0) {
temp223 <- paste(gd[[1]][[2]][[k+2]][1], gd[[1]][[2]][[k+2]][2], sep="-")
tempCont[[tempCounter+2]] <- paste(temp223, F0[[k]], sep=",")
smallFluo[[tempCounter]] <- F0[[k]]
tempCounter <- tempCounter + 1
}
}, error = function(err) {
})
}
writeLines(LOLprint(tempCont), "5PSM.ddv")
return(smallFluo)
}
|
dhist <- function(x,fac,col,legend,pos.legend,title.legend=NULL,lab.legend=NULL,xlab,ylab=NULL,
drawextaxes=TRUE,drawintaxes=FALSE,xlim=NULL,...) {
ymax <- integer(nlevels(fac))
for (i in 1:nlevels(fac)) {
ymax[i] <- max(density(x[as.numeric(fac)==i])$y)
}
h <- suppressWarnings(hist(x,freq=FALSE,plot=FALSE))
oldmar <- par()$mar
if (is.null(ylab)) {ylab="Probability density"}
if (!drawextaxes) {par(mar=c(3.1,2.1,2.1,0.1))}
xlim <- if(!is.null(xlim)) {xlim} else {range(h$breaks)}
plot(0,xlim=xlim,ylim=c(0,max(ymax)),xlab="",ylab="",cex=0,axes=FALSE,...)
if(drawextaxes) {
axis(1)
axis(2)
}
if (drawintaxes) {abline(v=0,col="grey")}
lab.line <- c(ifelse(drawextaxes,3,1.2),ifelse(drawextaxes,3,0.6))
mtext(c(xlab,ylab),side=c(1,2),line=lab.line,at=c(mean(range(x)),mean(c(0,max(ymax)))))
dens <- tapply(x,fac,function(x) density(x))
if (!is.numeric(col)) {
col3 <- col4 <- col
} else {
col2 <- col2rgb(palette()[col])
col3 <- apply(col2,2,function(x) rgb(x[1],x[2],x[3],alpha=0.4*255,maxColorValue=255))
col4 <- apply(col2,2,function(x) rgb(x[1],x[2],x[3],alpha=255,maxColorValue=255))
}
for (i in 1:nlevels(fac)) {
d <- dens[[i]]
polygon(d$x,d$y,col=col3[i],border=NA)
rug(x[as.numeric(fac)==i],col=col4[i])
}
box()
if (legend) {
if (is.null(lab.legend)) {lab.legend <- levels(fac)}
if (!is.null(title.legend) && nchar(title.legend)>0) {
legend(pos.legend,lab.legend,fill=col3,title=title.legend)
} else {
legend(pos.legend,lab.legend,fill=col3)
}
}
par(mar=oldmar)
}
| /R/dhist.R | no_license | SeptiawanAjiP/RVAideMemoire | R | false | false | 1,625 | r | dhist <- function(x,fac,col,legend,pos.legend,title.legend=NULL,lab.legend=NULL,xlab,ylab=NULL,
drawextaxes=TRUE,drawintaxes=FALSE,xlim=NULL,...) {
ymax <- integer(nlevels(fac))
for (i in 1:nlevels(fac)) {
ymax[i] <- max(density(x[as.numeric(fac)==i])$y)
}
h <- suppressWarnings(hist(x,freq=FALSE,plot=FALSE))
oldmar <- par()$mar
if (is.null(ylab)) {ylab="Probability density"}
if (!drawextaxes) {par(mar=c(3.1,2.1,2.1,0.1))}
xlim <- if(!is.null(xlim)) {xlim} else {range(h$breaks)}
plot(0,xlim=xlim,ylim=c(0,max(ymax)),xlab="",ylab="",cex=0,axes=FALSE,...)
if(drawextaxes) {
axis(1)
axis(2)
}
if (drawintaxes) {abline(v=0,col="grey")}
lab.line <- c(ifelse(drawextaxes,3,1.2),ifelse(drawextaxes,3,0.6))
mtext(c(xlab,ylab),side=c(1,2),line=lab.line,at=c(mean(range(x)),mean(c(0,max(ymax)))))
dens <- tapply(x,fac,function(x) density(x))
if (!is.numeric(col)) {
col3 <- col4 <- col
} else {
col2 <- col2rgb(palette()[col])
col3 <- apply(col2,2,function(x) rgb(x[1],x[2],x[3],alpha=0.4*255,maxColorValue=255))
col4 <- apply(col2,2,function(x) rgb(x[1],x[2],x[3],alpha=255,maxColorValue=255))
}
for (i in 1:nlevels(fac)) {
d <- dens[[i]]
polygon(d$x,d$y,col=col3[i],border=NA)
rug(x[as.numeric(fac)==i],col=col4[i])
}
box()
if (legend) {
if (is.null(lab.legend)) {lab.legend <- levels(fac)}
if (!is.null(title.legend) && nchar(title.legend)>0) {
legend(pos.legend,lab.legend,fill=col3,title=title.legend)
} else {
legend(pos.legend,lab.legend,fill=col3)
}
}
par(mar=oldmar)
}
|
### MIDTERM ###
#1
setwd("C:/Users/Marie/Documents/Schools/The New School/Data Visualization/Assignment 3")
titanic <- read.csv("titanic.txt")
#2. Embarkation
ForGraph <- titanic[(titanic$Embarked == '') == FALSE , ]
ForGraph$Embarked.f <- factor(ForGraph$Embarked)
levels(ForGraph$Embarked.f) <- c("Cherbourg FR", "Queenstown IRE", "Southampton UK")
ForGraph$Embarked.f2 <- factor(ForGraph$Embarked.f, levels = c("Southampton UK", "Cherbourg FR", "Queenstown IRE"))
#3. Survived
ForGraph$Survived.f <- factor(ForGraph$Survived, labels = c("Non-Survivors", "Survivors"))
ForGraph$Survived.f2 <- factor(ForGraph$Survived.f, levels = c("Survivors", "Non-Survivors"))
#4. Sex
levels(ForGraph$Sex) <- c("Female", "Male")
#5. Pclass
ForGraph$Pclass.f <- factor(ForGraph$Pclass, labels = c("1st Class", "2nd Class", "3rd Class"))
#6. Plot Setup
library(ggplot2)
library(scales)
library(gridExtra)
#7. Plotting
p <- ggplot(ForGraph, aes(x = Sex, fill = Pclass.f))+
geom_bar(aes(y=(..count..)/sum(..count..)), position="dodge", colour="black")+
scale_y_continuous(labels = percent_format(), breaks = c(0.05, 0.10, 0.15, 0.20, 0.25))+
facet_grid(Survived.f2 ~ Embarked.f2)+
labs(fill = "Passenger Class")+
ylab ("% of Total Passengers")+
ggtitle("TITANIC: Survival Rate by Point of Embarkation, Sex & Class \nby Marie Bakke")+
scale_fill_manual(values=c("#FF3399", "#33FFCC", "#FF3300"))+
theme(panel.background = element_rect(fill = "#FFFFFF", colour = "grey"),
panel.grid.major.y = element_line(colour = "grey"),
panel.grid.minor.y = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
axis.text.x = element_text(colour = "black"),
axis.text.y = element_text(colour = "black"))+
theme(strip.background = element_rect(fill = "#FFFF99", colour = "grey"))
p
g <- arrangeGrob(p, sub = textGrob("Footnote: Two observations are removed due to lack of data.", x = 0, hjust = -0.1, vjust=0.1, gp = gpar(fontface = "italic", fontsize = 8)))
g | /Assignments/midterm/bakke.R | no_license | aaronxhill/dataviz14f | R | false | false | 2,043 | r | ### MIDTERM ###
#1
setwd("C:/Users/Marie/Documents/Schools/The New School/Data Visualization/Assignment 3")
titanic <- read.csv("titanic.txt")
#2. Embarkation
ForGraph <- titanic[(titanic$Embarked == '') == FALSE , ]
ForGraph$Embarked.f <- factor(ForGraph$Embarked)
levels(ForGraph$Embarked.f) <- c("Cherbourg FR", "Queenstown IRE", "Southampton UK")
ForGraph$Embarked.f2 <- factor(ForGraph$Embarked.f, levels = c("Southampton UK", "Cherbourg FR", "Queenstown IRE"))
#3. Survived
ForGraph$Survived.f <- factor(ForGraph$Survived, labels = c("Non-Survivors", "Survivors"))
ForGraph$Survived.f2 <- factor(ForGraph$Survived.f, levels = c("Survivors", "Non-Survivors"))
#4. Sex
levels(ForGraph$Sex) <- c("Female", "Male")
#5. Pclass
ForGraph$Pclass.f <- factor(ForGraph$Pclass, labels = c("1st Class", "2nd Class", "3rd Class"))
#6. Plot Setup
library(ggplot2)
library(scales)
library(gridExtra)
#7. Plotting
p <- ggplot(ForGraph, aes(x = Sex, fill = Pclass.f))+
geom_bar(aes(y=(..count..)/sum(..count..)), position="dodge", colour="black")+
scale_y_continuous(labels = percent_format(), breaks = c(0.05, 0.10, 0.15, 0.20, 0.25))+
facet_grid(Survived.f2 ~ Embarked.f2)+
labs(fill = "Passenger Class")+
ylab ("% of Total Passengers")+
ggtitle("TITANIC: Survival Rate by Point of Embarkation, Sex & Class \nby Marie Bakke")+
scale_fill_manual(values=c("#FF3399", "#33FFCC", "#FF3300"))+
theme(panel.background = element_rect(fill = "#FFFFFF", colour = "grey"),
panel.grid.major.y = element_line(colour = "grey"),
panel.grid.minor.y = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
axis.text.x = element_text(colour = "black"),
axis.text.y = element_text(colour = "black"))+
theme(strip.background = element_rect(fill = "#FFFF99", colour = "grey"))
p
g <- arrangeGrob(p, sub = textGrob("Footnote: Two observations are removed due to lack of data.", x = 0, hjust = -0.1, vjust=0.1, gp = gpar(fontface = "italic", fontsize = 8)))
g |
# Sepal.Length 값을 예측하는 회귀분석 수행
reg_model <- lm(Sepal.Length ~ ., data=iris)
reg_model$coefficients # 계수
reg_model$residuals # 잔차
summary(reg_model) # 회귀분석 결과 정보
# p-value < 0.01이므로 유의미한 모델
# 예측
predict(reg_model)
| /R/Machine_Learning_Linear_Regression_Basic.R | no_license | nuno1111/Bigdata-ML-Source | R | false | false | 290 | r | # Sepal.Length 값을 예측하는 회귀분석 수행
reg_model <- lm(Sepal.Length ~ ., data=iris)
reg_model$coefficients # 계수
reg_model$residuals # 잔차
summary(reg_model) # 회귀분석 결과 정보
# p-value < 0.01이므로 유의미한 모델
# 예측
predict(reg_model)
|
#' Filter predictors according to specified criteria.
#'
#' @param traindata the train set
#' @param testdata the test set
#' @param y the response variable. Must be not \code{NULL} if \code{correlationThreshold} is not \code{NULL}.
#' @param removeOnlyZeroVariacePredictors \code{TRUE} to remove only zero variance predictors
#' @param performVarianceAnalysisOnTrainSetOnly \code{TRUE} to perform the variance analysis on the train set only
#' @param correlationThreshold a correlation threshold above which keeping predictors
#' (considered only if \code{removeOnlyZeroVariacePredictors} is \code{FALSE}).
#' @param removePredictorsMakingIllConditionedSquareMatrix \code{TRUE} to predictors making ill conditioned square matrices
#' @param removeHighCorrelatedPredictors \code{TRUE} to remove high correlared predictors
#' @param removeIdenticalPredictors \code{TRUE} to remove identical predictors (using \code{base::identical} function)
#' @param featureScaling \code{TRUE} to perform feature scaling
#' @param verbose \code{TRUE} to set verbose mode
#'
#' @examples
#' Xtrain <- data.frame( a = rep(1:3 , each = 2), b = c(4:1,6,6), c = rep(1,6))
#' Xtest <- Xtrain + runif(nrow(Xtrain))
#' l = ff.featureFilter (traindata = Xtrain,
#' testdata = Xtest,
#' removeOnlyZeroVariacePredictors=TRUE)
#' Xtrain = l$traindata
#' Xtest = l$testdata
#' @importFrom caret preProcess
#' @importFrom caret nearZeroVar
#' @importFrom subselect trim.matrix
#' @export
#' @return the list of trainset and testset after applying the specified filters
#'
ff.featureFilter <- function(traindata,
testdata,
y = NULL,
removeOnlyZeroVariacePredictors=FALSE,
performVarianceAnalysisOnTrainSetOnly = TRUE ,
correlationThreshold = NULL,
removePredictorsMakingIllConditionedSquareMatrix = TRUE,
removeIdenticalPredictors = TRUE,
removeHighCorrelatedPredictors = TRUE,
featureScaling = TRUE,
verbose = TRUE) {
stopifnot( ! (is.null(testdata) && is.null(traindata)) )
stopifnot( ! (removeOnlyZeroVariacePredictors && (! is.null(correlationThreshold))) )
stopifnot( ! (is.null(y) && (! is.null(correlationThreshold))) )
data = rbind(testdata,traindata)
### removing near zero var predictors
if (! removeOnlyZeroVariacePredictors ) {
PredToDel = NULL
if (performVarianceAnalysisOnTrainSetOnly) {
if (verbose) cat(">>> applying caret nearZeroVar performing caret nearZeroVar function on train set only ... \n")
PredToDel = caret::nearZeroVar(traindata)
} else {
if (verbose) cat(">>> applying caret nearZeroVar performing caret nearZeroVar function on both train set and test set ... \n")
PredToDel = caret::nearZeroVar(data)
}
if (! is.null(correlationThreshold) ) {
if (verbose) cat(">>> computing correlation ... \n")
corrValues <- apply(traindata,
MARGIN = 2,
FUN = function(x, y) cor(x, y),
y = y)
PredToReinsert = as.numeric(which(! is.na(corrValues) & corrValues > correlationThreshold))
if (verbose) cat(">> There are high correlated predictors with response variable. N. ",length(PredToReinsert)," - predictors: ",
paste(colnames(data) [PredToReinsert] , collapse=" " ) , " ... \n ")
PredToDel = PredToDel[! PredToDel %in% PredToReinsert]
}
if (length(PredToDel) > 0) {
if (verbose) cat("removing ",length(PredToDel)," nearZeroVar predictors: ",
paste(colnames(data) [PredToDel] , collapse=" " ) , " ... \n ")
data = data [,-PredToDel,drop=F]
}
} else {
if (verbose) cat(">>> removing zero variance predictors only ... \n")
card = NULL
if (performVarianceAnalysisOnTrainSetOnly) {
if (verbose) cat(">>> removing zero variance predictors performing variance analysis on train set only ... \n")
card = apply(traindata,2,function(x) length(unique(x)) )
} else {
if (verbose) cat(">>> removing zero variance predictors performing variance analysis on both train set and test set ... \n")
card = apply(data,2,function(x) length(unique(x)) )
}
PredToDel = as.numeric(which(card < 2))
if (length(PredToDel) > 0) {
if (verbose) cat("removing ",length(PredToDel)," ZeroVariacePredictors predictors: ",
paste(colnames(data) [PredToDel] , collapse=" " ) , " ... \n ")
data = data [,-PredToDel,drop=F]
}
}
### removing predictors that make ill-conditioned square matrix
if (removePredictorsMakingIllConditionedSquareMatrix) {
if (verbose) cat(">>> finding for predictors that make ill-conditioned square matrix ... \n")
PredToDel = subselect::trim.matrix( cov( data ) )
if (length(PredToDel$numbers.discarded) > 0) {
if (verbose) cat("removing ",length(PredToDel$numbers.discarded)," predictors that make ill-conditioned square matrix: ",
paste(colnames(data) [PredToDel$numbers.discarded] , collapse=" " ) , " ... \n ")
data = data [,-PredToDel$numbers.discarded,drop=F]
}
}
## removing identical predictors
if (removeIdenticalPredictors) {
colToRemove = rep(F,ncol(data))
lapply( 1:(ncol(data)-1) , function(i) {
lapply( (i+1):ncol(data) ,function(j) {
if (identical(data[,i],data[,j])) {
colToRemove[j] <<- T
}
})
})
if (sum(colToRemove) > 0) {
if (verbose) cat("removing ",sum(colToRemove)," identical predictors: ",
paste(colnames(data) [colToRemove] , collapse=" " ) , " ... \n ")
data = data[,-which(colToRemove),drop=F]
}
}
# removing high correlated predictors
if (removeHighCorrelatedPredictors) {
if (verbose) cat(">>> finding for high correlated predictors ... \n")
PredToDel = caret::findCorrelation(cor( data ))
if (length(PredToDel) > 0) {
if (verbose) cat("removing ",length(PredToDel), " removing high correlated predictors: ",
paste(colnames(data) [PredToDel] , collapse=" " ) , " ... \n ")
data = data [,-PredToDel,drop=F]
}
}
## feature scaling
if (featureScaling) {
if (verbose) cat(">>> feature scaling ... \n")
scaler = caret::preProcess(data,method = c("center","scale"))
data = predict(scaler,data)
}
## reassembling
if ( ! is.null(testdata) && ! is.null(traindata) ) {
testdata = data[1:(dim(testdata)[1]),]
traindata = data[((dim(testdata)[1])+1):(dim(data)[1]),]
} else if (is.null(testdata)) {
traindata = data
} else if (is.null(traindata)) {
testdata = data
}
return(list(traindata = traindata,testdata = testdata))
}
#' Make polynomial terms of a \code{data.frame}
#'
#' @param x a \code{data.frame} of \code{numeric}
#' @param n the polynomial degree
#' @param direction if set to \code{0} returns the terms \code{x^(1/n),x^(1/(n-1)),...,x,x^2,...,x^n}.
#' If set to \code{-1} returns the terms \code{x^(1/n),x^(1/(n-1)),...,x}.
#' If set to \code{1} returns the terms \code{x,x^2,...,x^n}.
#'
#' @examples
#' Xtrain <- data.frame( a = rep(1:3 , each = 2), b = c(4:1,6,6), c = rep(1,6))
#' Xtest <- Xtrain + runif(nrow(Xtrain))
#' data = rbind(Xtrain,Xtest)
#' data.poly = ff.poly(x=data,n=3)
#' Xtrain.poly = data.poly[1:nrow(Xtrain),]
#' Xtest.poly = data.poly[(nrow(Xtrain)+1):nrow(data),]
#' @export
#' @return the \code{data.frame} with the specified polynomial terms
#'
ff.poly = function (x,n,direction=0) {
stopifnot(identical(class(x),'data.frame') , identical(class(n),'numeric') )
stopifnot( sum(unlist(lapply(x,function(x) {
return(! (is.atomic(x) && (! is.character(x)) && ! is.factor(x)) )
}))) == 0 )
if (n == 1) {
return (x)
}
x.poly = NULL
x.poly.2 = NULL
##
if (direction>=0) {
x.poly = as.data.frame(matrix(rep(0 , nrow(x)*ncol(x)*(n-1)) , nrow = nrow(x)))
lapply(2:n,function(i){
d = x
d[] <- lapply(X = x , FUN = function(x){
return(x^i)
})
colnames(d) = paste(colnames(x),'^',i,sep='')
x.poly[,((i-2)*ncol(x)+1):((i-1)*ncol(x))] <<- d
colnames(x.poly)[((i-2)*ncol(x)+1):((i-1)*ncol(x))] <<- colnames(d)
})
}
##
if (direction<=0) {
x.poly.2 = as.data.frame(matrix(rep(0 , nrow(x)*ncol(x)*(n-1)) , nrow = nrow(x)))
lapply(2:n,function(i){
d = x
d[] <- lapply(X = x , FUN = function(x){
return(x^(1/i))
})
colnames(d) = paste(colnames(x),'^1/',i,sep='')
x.poly.2[,((i-2)*ncol(x)+1):((i-1)*ncol(x))] <<- d
colnames(x.poly.2)[((i-2)*ncol(x)+1):((i-1)*ncol(x))] <<- colnames(d)
})
}
##
if (direction>0) {
return (cbind(x,x.poly))
} else if (direction==0) {
return (cbind(x,x.poly,x.poly.2))
} else {
return (cbind(x,x.poly.2))
}
}
#' Filter a \code{data.frame} of numeric according to a given threshold of correlation
#'
#' @param Xtrain a train set \code{data.frame} of \code{numeric}
#' @param Xtest a test set \code{data.frame} of \code{numeric}
#' @param y the output variable (as numeric vector)
#' @param method a character string indicating which correlation method is to be used for the test. One of "pearson", "kendall", or "spearman".
#' @param abs_th an absolute threshold (= number of data frame columns)
#' @param rel_th a relative threshold (= percentage of data frame columns)
#'
#' @examples
#' Xtrain <- data.frame( a = rep(1:3 , each = 2), b = c(4:1,6,6), c = rep(1,6))
#' Xtest <- Xtrain + runif(nrow(Xtrain))
#' y = 1:6
#' l = ff.corrFilter(Xtrain=Xtrain,Xtest=Xtest,y=y,rel_th=0.5)
#' Xtrain.filtered = l$Xtrain
#' Xtest.filtered =l$Xtest
#' @export
#' @return a \code{list} of filtered train set and test set with correlation test results
#'
ff.corrFilter = function(Xtrain,Xtest,y,abs_th=NULL,rel_th=1,method = 'pearson') {
warn_def = getOption('warn')
options(warn=-1)
####
stopifnot(is.null(rel_th) || is.null(abs_th))
if (! is.null(rel_th) ) stopifnot( rel_th >0 && rel_th <=1 )
if (! is.null(abs_th) ) stopifnot( abs_th >0 && abs_th <=ncol(Xtrain) )
stopifnot( ! (is.null(Xtrain) || is.null(Xtest)) )
stopifnot( ncol(Xtrain) == ncol(Xtest) )
stopifnot( ncol(Xtrain) > 0 )
stopifnot( nrow(Xtrain) > 0 )
stopifnot( nrow(Xtest) > 0 )
stopifnot( sum(unlist(lapply(Xtrain,function(x) {
return(! (is.atomic(x) && ! is.character(x) && ! is.factor(x)))
}))) == 0 )
stopifnot( sum(unlist(lapply(Xtest,function(x) {
return(! (is.atomic(x) && ! is.character(x) && ! is.factor(x)))
}))) == 0 )
stopifnot(identical(method,'pearson') || identical(method,'kendall') || identical(method,'spearman'))
### TypeIError test
getPvalueTypeIError = function(x,y) {
test = method
pvalue = NULL
estimate = NULL
interpretation = NULL
### pearson / kendall / spearman
test.corr = cor.test(x = x , y = y , method = method)
pvalue = test.corr$p.value
estimate = test.corr$estimate
if (pvalue < 0.05) {
interpretation = 'there is correlation'
} else {
interpretation = 'data do not give you any reason to conclude that the correlation is real'
}
return(list(test=test,pvalue=pvalue,estimate=estimate,interpretation=interpretation))
}
##
int_rel_th = abs_th
if (! is.null(rel_th) ) {
int_rel_th = floor(ncol(Xtrain) * rel_th)
}
## corr analysis
aa = lapply(Xtrain , function(x) {
dummy = list(
test = method,
pvalue=1,
estimate = 0,
interpretation = "error")
setNames(object = dummy , nm = names(x))
ret = plyr::failwith( dummy, getPvalueTypeIError , quiet = TRUE)(x=x,y=y)
return (ret)
})
## make data frame test results
aadf = data.frame(predictor = rep(NA,length(aa)) ,
test = rep(NA,length(aa)) ,
pvalue = rep(NA,length(aa)) ,
estimate = rep(NA,length(aa)) ,
interpretation = rep(NA,length(aa)))
lapply(seq_along(aa) , function(i) {
aadf[i,]$predictor <<- names(aa[i])
aadf[i,]$test <<- aa[[i]]$test
aadf[i,]$pvalue <<- aa[[i]]$pvalue
aadf[i,]$estimate <<- aa[[i]]$estimate
aadf[i,]$interpretation <<- aa[[i]]$interpretation
})
aadf = aadf[order(abs(aadf$estimate) , decreasing = T), ]
## cut to the given threshold
aadf_cut = aadf[1:int_rel_th,,drop=F]
options(warn=warn_def)
return(list(
Xtrain = Xtrain[,aadf_cut$predictor,drop=F],
Xtest = Xtest[,aadf_cut$predictor,drop=F],
test.results = aadf
))
} | /R-package/R/featureFilter.R | permissive | fxcebx/fast-furious | R | false | false | 12,921 | r |
#' Filter predictors according to specified criteria.
#'
#' @param traindata the train set
#' @param testdata the test set
#' @param y the response variable. Must be not \code{NULL} if \code{correlationThreshold} is not \code{NULL}.
#' @param removeOnlyZeroVariacePredictors \code{TRUE} to remove only zero variance predictors
#' @param performVarianceAnalysisOnTrainSetOnly \code{TRUE} to perform the variance analysis on the train set only
#' @param correlationThreshold a correlation threshold above which keeping predictors
#' (considered only if \code{removeOnlyZeroVariacePredictors} is \code{FALSE}).
#' @param removePredictorsMakingIllConditionedSquareMatrix \code{TRUE} to predictors making ill conditioned square matrices
#' @param removeHighCorrelatedPredictors \code{TRUE} to remove high correlared predictors
#' @param removeIdenticalPredictors \code{TRUE} to remove identical predictors (using \code{base::identical} function)
#' @param featureScaling \code{TRUE} to perform feature scaling
#' @param verbose \code{TRUE} to set verbose mode
#'
#' @examples
#' Xtrain <- data.frame( a = rep(1:3 , each = 2), b = c(4:1,6,6), c = rep(1,6))
#' Xtest <- Xtrain + runif(nrow(Xtrain))
#' l = ff.featureFilter (traindata = Xtrain,
#' testdata = Xtest,
#' removeOnlyZeroVariacePredictors=TRUE)
#' Xtrain = l$traindata
#' Xtest = l$testdata
#' @importFrom caret preProcess
#' @importFrom caret nearZeroVar
#' @importFrom subselect trim.matrix
#' @export
#' @return the list of trainset and testset after applying the specified filters
#'
ff.featureFilter <- function(traindata,
testdata,
y = NULL,
removeOnlyZeroVariacePredictors=FALSE,
performVarianceAnalysisOnTrainSetOnly = TRUE ,
correlationThreshold = NULL,
removePredictorsMakingIllConditionedSquareMatrix = TRUE,
removeIdenticalPredictors = TRUE,
removeHighCorrelatedPredictors = TRUE,
featureScaling = TRUE,
verbose = TRUE) {
stopifnot( ! (is.null(testdata) && is.null(traindata)) )
stopifnot( ! (removeOnlyZeroVariacePredictors && (! is.null(correlationThreshold))) )
stopifnot( ! (is.null(y) && (! is.null(correlationThreshold))) )
data = rbind(testdata,traindata)
### removing near zero var predictors
if (! removeOnlyZeroVariacePredictors ) {
PredToDel = NULL
if (performVarianceAnalysisOnTrainSetOnly) {
if (verbose) cat(">>> applying caret nearZeroVar performing caret nearZeroVar function on train set only ... \n")
PredToDel = caret::nearZeroVar(traindata)
} else {
if (verbose) cat(">>> applying caret nearZeroVar performing caret nearZeroVar function on both train set and test set ... \n")
PredToDel = caret::nearZeroVar(data)
}
if (! is.null(correlationThreshold) ) {
if (verbose) cat(">>> computing correlation ... \n")
corrValues <- apply(traindata,
MARGIN = 2,
FUN = function(x, y) cor(x, y),
y = y)
PredToReinsert = as.numeric(which(! is.na(corrValues) & corrValues > correlationThreshold))
if (verbose) cat(">> There are high correlated predictors with response variable. N. ",length(PredToReinsert)," - predictors: ",
paste(colnames(data) [PredToReinsert] , collapse=" " ) , " ... \n ")
PredToDel = PredToDel[! PredToDel %in% PredToReinsert]
}
if (length(PredToDel) > 0) {
if (verbose) cat("removing ",length(PredToDel)," nearZeroVar predictors: ",
paste(colnames(data) [PredToDel] , collapse=" " ) , " ... \n ")
data = data [,-PredToDel,drop=F]
}
} else {
if (verbose) cat(">>> removing zero variance predictors only ... \n")
card = NULL
if (performVarianceAnalysisOnTrainSetOnly) {
if (verbose) cat(">>> removing zero variance predictors performing variance analysis on train set only ... \n")
card = apply(traindata,2,function(x) length(unique(x)) )
} else {
if (verbose) cat(">>> removing zero variance predictors performing variance analysis on both train set and test set ... \n")
card = apply(data,2,function(x) length(unique(x)) )
}
PredToDel = as.numeric(which(card < 2))
if (length(PredToDel) > 0) {
if (verbose) cat("removing ",length(PredToDel)," ZeroVariacePredictors predictors: ",
paste(colnames(data) [PredToDel] , collapse=" " ) , " ... \n ")
data = data [,-PredToDel,drop=F]
}
}
### removing predictors that make ill-conditioned square matrix
if (removePredictorsMakingIllConditionedSquareMatrix) {
if (verbose) cat(">>> finding for predictors that make ill-conditioned square matrix ... \n")
PredToDel = subselect::trim.matrix( cov( data ) )
if (length(PredToDel$numbers.discarded) > 0) {
if (verbose) cat("removing ",length(PredToDel$numbers.discarded)," predictors that make ill-conditioned square matrix: ",
paste(colnames(data) [PredToDel$numbers.discarded] , collapse=" " ) , " ... \n ")
data = data [,-PredToDel$numbers.discarded,drop=F]
}
}
## removing identical predictors
if (removeIdenticalPredictors) {
colToRemove = rep(F,ncol(data))
lapply( 1:(ncol(data)-1) , function(i) {
lapply( (i+1):ncol(data) ,function(j) {
if (identical(data[,i],data[,j])) {
colToRemove[j] <<- T
}
})
})
if (sum(colToRemove) > 0) {
if (verbose) cat("removing ",sum(colToRemove)," identical predictors: ",
paste(colnames(data) [colToRemove] , collapse=" " ) , " ... \n ")
data = data[,-which(colToRemove),drop=F]
}
}
# removing high correlated predictors
if (removeHighCorrelatedPredictors) {
if (verbose) cat(">>> finding for high correlated predictors ... \n")
PredToDel = caret::findCorrelation(cor( data ))
if (length(PredToDel) > 0) {
if (verbose) cat("removing ",length(PredToDel), " removing high correlated predictors: ",
paste(colnames(data) [PredToDel] , collapse=" " ) , " ... \n ")
data = data [,-PredToDel,drop=F]
}
}
## feature scaling
if (featureScaling) {
if (verbose) cat(">>> feature scaling ... \n")
scaler = caret::preProcess(data,method = c("center","scale"))
data = predict(scaler,data)
}
## reassembling
if ( ! is.null(testdata) && ! is.null(traindata) ) {
testdata = data[1:(dim(testdata)[1]),]
traindata = data[((dim(testdata)[1])+1):(dim(data)[1]),]
} else if (is.null(testdata)) {
traindata = data
} else if (is.null(traindata)) {
testdata = data
}
return(list(traindata = traindata,testdata = testdata))
}
#' Make polynomial terms of a \code{data.frame}
#'
#' @param x a \code{data.frame} of \code{numeric}
#' @param n the polynomial degree
#' @param direction if set to \code{0} returns the terms \code{x^(1/n),x^(1/(n-1)),...,x,x^2,...,x^n}.
#' If set to \code{-1} returns the terms \code{x^(1/n),x^(1/(n-1)),...,x}.
#' If set to \code{1} returns the terms \code{x,x^2,...,x^n}.
#'
#' @examples
#' Xtrain <- data.frame( a = rep(1:3 , each = 2), b = c(4:1,6,6), c = rep(1,6))
#' Xtest <- Xtrain + runif(nrow(Xtrain))
#' data = rbind(Xtrain,Xtest)
#' data.poly = ff.poly(x=data,n=3)
#' Xtrain.poly = data.poly[1:nrow(Xtrain),]
#' Xtest.poly = data.poly[(nrow(Xtrain)+1):nrow(data),]
#' @export
#' @return the \code{data.frame} with the specified polynomial terms
#'
ff.poly = function (x,n,direction=0) {
stopifnot(identical(class(x),'data.frame') , identical(class(n),'numeric') )
stopifnot( sum(unlist(lapply(x,function(x) {
return(! (is.atomic(x) && (! is.character(x)) && ! is.factor(x)) )
}))) == 0 )
if (n == 1) {
return (x)
}
x.poly = NULL
x.poly.2 = NULL
##
if (direction>=0) {
x.poly = as.data.frame(matrix(rep(0 , nrow(x)*ncol(x)*(n-1)) , nrow = nrow(x)))
lapply(2:n,function(i){
d = x
d[] <- lapply(X = x , FUN = function(x){
return(x^i)
})
colnames(d) = paste(colnames(x),'^',i,sep='')
x.poly[,((i-2)*ncol(x)+1):((i-1)*ncol(x))] <<- d
colnames(x.poly)[((i-2)*ncol(x)+1):((i-1)*ncol(x))] <<- colnames(d)
})
}
##
if (direction<=0) {
x.poly.2 = as.data.frame(matrix(rep(0 , nrow(x)*ncol(x)*(n-1)) , nrow = nrow(x)))
lapply(2:n,function(i){
d = x
d[] <- lapply(X = x , FUN = function(x){
return(x^(1/i))
})
colnames(d) = paste(colnames(x),'^1/',i,sep='')
x.poly.2[,((i-2)*ncol(x)+1):((i-1)*ncol(x))] <<- d
colnames(x.poly.2)[((i-2)*ncol(x)+1):((i-1)*ncol(x))] <<- colnames(d)
})
}
##
if (direction>0) {
return (cbind(x,x.poly))
} else if (direction==0) {
return (cbind(x,x.poly,x.poly.2))
} else {
return (cbind(x,x.poly.2))
}
}
#' Filter a \code{data.frame} of numeric according to a given threshold of correlation
#'
#' @param Xtrain a train set \code{data.frame} of \code{numeric}
#' @param Xtest a test set \code{data.frame} of \code{numeric}
#' @param y the output variable (as numeric vector)
#' @param method a character string indicating which correlation method is to be used for the test. One of "pearson", "kendall", or "spearman".
#' @param abs_th an absolute threshold (= number of data frame columns)
#' @param rel_th a relative threshold (= percentage of data frame columns)
#'
#' @examples
#' Xtrain <- data.frame( a = rep(1:3 , each = 2), b = c(4:1,6,6), c = rep(1,6))
#' Xtest <- Xtrain + runif(nrow(Xtrain))
#' y = 1:6
#' l = ff.corrFilter(Xtrain=Xtrain,Xtest=Xtest,y=y,rel_th=0.5)
#' Xtrain.filtered = l$Xtrain
#' Xtest.filtered =l$Xtest
#' @export
#' @return a \code{list} of filtered train set and test set with correlation test results
#'
ff.corrFilter = function(Xtrain,Xtest,y,abs_th=NULL,rel_th=1,method = 'pearson') {
warn_def = getOption('warn')
options(warn=-1)
####
stopifnot(is.null(rel_th) || is.null(abs_th))
if (! is.null(rel_th) ) stopifnot( rel_th >0 && rel_th <=1 )
if (! is.null(abs_th) ) stopifnot( abs_th >0 && abs_th <=ncol(Xtrain) )
stopifnot( ! (is.null(Xtrain) || is.null(Xtest)) )
stopifnot( ncol(Xtrain) == ncol(Xtest) )
stopifnot( ncol(Xtrain) > 0 )
stopifnot( nrow(Xtrain) > 0 )
stopifnot( nrow(Xtest) > 0 )
stopifnot( sum(unlist(lapply(Xtrain,function(x) {
return(! (is.atomic(x) && ! is.character(x) && ! is.factor(x)))
}))) == 0 )
stopifnot( sum(unlist(lapply(Xtest,function(x) {
return(! (is.atomic(x) && ! is.character(x) && ! is.factor(x)))
}))) == 0 )
stopifnot(identical(method,'pearson') || identical(method,'kendall') || identical(method,'spearman'))
### TypeIError test
getPvalueTypeIError = function(x,y) {
test = method
pvalue = NULL
estimate = NULL
interpretation = NULL
### pearson / kendall / spearman
test.corr = cor.test(x = x , y = y , method = method)
pvalue = test.corr$p.value
estimate = test.corr$estimate
if (pvalue < 0.05) {
interpretation = 'there is correlation'
} else {
interpretation = 'data do not give you any reason to conclude that the correlation is real'
}
return(list(test=test,pvalue=pvalue,estimate=estimate,interpretation=interpretation))
}
##
int_rel_th = abs_th
if (! is.null(rel_th) ) {
int_rel_th = floor(ncol(Xtrain) * rel_th)
}
## corr analysis
aa = lapply(Xtrain , function(x) {
dummy = list(
test = method,
pvalue=1,
estimate = 0,
interpretation = "error")
setNames(object = dummy , nm = names(x))
ret = plyr::failwith( dummy, getPvalueTypeIError , quiet = TRUE)(x=x,y=y)
return (ret)
})
## make data frame test results
aadf = data.frame(predictor = rep(NA,length(aa)) ,
test = rep(NA,length(aa)) ,
pvalue = rep(NA,length(aa)) ,
estimate = rep(NA,length(aa)) ,
interpretation = rep(NA,length(aa)))
lapply(seq_along(aa) , function(i) {
aadf[i,]$predictor <<- names(aa[i])
aadf[i,]$test <<- aa[[i]]$test
aadf[i,]$pvalue <<- aa[[i]]$pvalue
aadf[i,]$estimate <<- aa[[i]]$estimate
aadf[i,]$interpretation <<- aa[[i]]$interpretation
})
aadf = aadf[order(abs(aadf$estimate) , decreasing = T), ]
## cut to the given threshold
aadf_cut = aadf[1:int_rel_th,,drop=F]
options(warn=warn_def)
return(list(
Xtrain = Xtrain[,aadf_cut$predictor,drop=F],
Xtest = Xtest[,aadf_cut$predictor,drop=F],
test.results = aadf
))
} |
\name{PRESS}
\alias{PRESS}
\alias{PRESS.lm}
\alias{PRESS.fmo}
\title{ PRESS }
\description{
Convenience function to calculate the PRESS criterion value of a fitted lm object.
}
\author{Andrew K. Smith}
\keyword{models} | /man/PRESS.Rd | no_license | cran/CombMSC | R | false | false | 231 | rd | \name{PRESS}
\alias{PRESS}
\alias{PRESS.lm}
\alias{PRESS.fmo}
\title{ PRESS }
\description{
Convenience function to calculate the PRESS criterion value of a fitted lm object.
}
\author{Andrew K. Smith}
\keyword{models} |
\name{tableau_shape_pal}
\alias{tableau_shape_pal}
\title{Tableau Shape Palettes (discrete)}
\usage{
tableau_shape_pal(palette = "default")
}
\arguments{
\item{palette}{Palette name. One of
\Sexpr[results=rd,stage=build]{ggthemes:::charopts(names(ggthemes::ggthemes_data$tableau$shapes))}.}
}
\description{
Shape palettes used by
\href{http://www.tableausoftware.com/}{Tableau}.
}
\examples{
show_shapes(tableau_shape_pal()(5))
}
\seealso{
Other shape tableau: \code{\link{scale_shape_tableau}}
}
| /man/tableau_shape_pal.Rd | no_license | tomhiatt/ggthemes | R | false | false | 510 | rd | \name{tableau_shape_pal}
\alias{tableau_shape_pal}
\title{Tableau Shape Palettes (discrete)}
\usage{
tableau_shape_pal(palette = "default")
}
\arguments{
\item{palette}{Palette name. One of
\Sexpr[results=rd,stage=build]{ggthemes:::charopts(names(ggthemes::ggthemes_data$tableau$shapes))}.}
}
\description{
Shape palettes used by
\href{http://www.tableausoftware.com/}{Tableau}.
}
\examples{
show_shapes(tableau_shape_pal()(5))
}
\seealso{
Other shape tableau: \code{\link{scale_shape_tableau}}
}
|
## casecross.R
## time-stratified case-crossover
## Oct 2011
## assumes date variable is called 'date'
## quicker version
casecross<-function(formula, data, exclusion=2, stratalength=28,
matchdow=FALSE, usefinalwindow=FALSE, matchconf='',
confrange=0,stratamonth=FALSE){
outcome <- dow <- case <-timex <- dow.x <- dow.y <- matchday.x <- matchday.y <- windownum.x <- windownum.y <- NULL # Setting some variables to NULL first (for R CMD check)
thisdata<-data
## Checks
if (class(thisdata$date)!="Date"){
stop("date variable must be in date format, see ?Dates")}
if (exclusion<0){stop("Minimum value for exclusion is zero")}
parts<-paste(formula)
dep<-parts[2] # dependent variable
indep<-parts[3] # dependent variable
if (length(formula)<=2){stop("Must be at least one independent variable")}
## original call with defaults (see amer package)
ans <- as.list(match.call())
frmls <- formals(deparse(ans[[1]]))
add <- which(!(names(frmls) %in% names(ans)))
call<-as.call(c(ans, frmls[add]))
thisdata$dow<-as.numeric(format(thisdata$date,'%w'));
## Slim down the data
f<-as.formula(paste(parts[2],parts[1],parts[3],'+date+dow'))
if (substr(matchconf,1,1)!=""){
f<-as.formula(paste(dep,"~",indep,'+date+dow+',matchconf))
}
datatouse<-model.frame(f,data=thisdata,na.action=na.omit) # remove cases with missing covariates
## Check for irregularly spaced data
if(any(diff(datatouse$date)>1)){
cat('Note, irregularly spaced data...\n')
cat('...check your data for missing days\n')
}
datediff<-as.numeric(datatouse$date)-min(as.numeric(thisdata$date)) # use minimum data in entire sample
time<-as.numeric(datediff)+1 # used as strata number
## Create strata
if (stratamonth==TRUE){
month<-as.numeric(format(datatouse$date,'%m'));
year<-as.numeric(format(datatouse$date,'%Y'));
matchday<-as.numeric(format(datatouse$date,'%d'));
yrdiff<-year-min(year);
windownum<-(yrdiff*12)+month;
}
if (stratamonth==FALSE){
## Get the earliest time and difference all dates from this time
## Increase strata windows in jumps of 'stratalength'
windownum<-floor(datediff/stratalength)+1
nwindows<-floor(nrow(thisdata)/stratalength)+1
matchday<-datediff-((windownum-1)*stratalength)+1 # Day number in strata
## Exclude the last window if it is less than 'stratalength'
lastwindow<-datatouse[datatouse$windownum==nwindows,]
if (nrow(lastwindow)>0){ # only apply to data sets with some data in the final window
lastlength<-max(time[windownum==nwindows])-
min(time[windownum==nwindows])+1
if (lastlength<stratalength&usefinalwindow==FALSE) datatouse <-
datatouse[windownum<nwindows,]
}
}
## Create the case data
n<-nrow(datatouse)
cases<-datatouse
cases$case<-1 # binary indicator of case
cases$timex<-1 # Needed for conditional logistic regression
cases$windownum<-windownum
cases$time<-time
cases$diffdays<-NA
cases$matchday<-matchday
posout<-sum(as.numeric(names(datatouse)==as.character(f[2]))*
(1:ncol(datatouse))) # get the position of the dependent variable
cases$outcome<-datatouse[,c(posout)]
# October 2011, removed nonzerocases
# Create a case number for matching
if (substr(matchconf,1,1)==""){
cases.tomerge<-subset(cases,select=c(matchday,time,outcome,windownum,dow))}
if (substr(matchconf,1,1)!=""){
also<-sum(as.numeric(names(cases)==matchconf)*(1:length(names(cases))))
cases.tomerge<-subset(cases,
select=c(matchday,time,outcome,windownum,dow,also))
}
ncases<-nrow(cases)
cases.tomerge$casenum<-1:ncases
# Duplicate case series to make controls
maxwindows<-max(cases$windownum)
rowstorep<-NA
casenum<-NA
# Fix for missing windows (thanks to Yuming)
windowrange<-as.numeric(levels(as.factor(windownum)))
for (k in windowrange){
small=min(cases$time[cases$windownum==k])
large=max(cases$time[cases$windownum==k])
these<-rep(small:large,large-small+1)
rowstorep<-c(rowstorep,these)
casenum<-c(casenum,these[order(these)])
}
controls<-cases[rowstorep[2:length(rowstorep)],] # can fall over if there's missing data
controls<-subset(controls,select=c(-case,-timex,-time,-outcome))
# Replace case number
controls$casenum<-casenum[2:length(rowstorep)]
# Merge cases with controls by case number
controls<-merge(controls,cases.tomerge,by='casenum')
controls<-controls[controls$windownum.x==controls$windownum.y,] # must be in same stratum window
controls$case<-0 # binary indicator of case
controls$timex<-2 # Needed for conditional logistic regression
controls$diffdays<-abs(controls$matchday.x-controls$matchday.y)
controls<-controls[controls$diffdays>exclusion,] # remove the exclusion window
# match on day of the week
if (matchdow==TRUE){controls<-controls[controls$dow.x==controls$dow.y,]}
# match on a confounder
if (substr(matchconf,1,1)!=""){
one<- paste(matchconf,'.x',sep='')
two<- paste(matchconf,'.y',sep='')
find1<-grep(one,names(controls))
find2<-grep(two,names(controls))
matchdiff<-abs(controls[,find1]-controls[,find2])
controls<-controls[matchdiff<=confrange,]
controls<-subset(controls,select=c(-casenum,-dow.x,-dow.y,-matchday.x,-matchday.y,-windownum.x,-windownum.y,-find1,-find2))
findc<-sum(as.numeric(names(cases)==matchconf)*(1:length(names(cases))))
final.cases<-subset(cases,select=c(-dow,-matchday,-windownum,-findc))
}
if (substr(matchconf,1,1)==""){
controls<-subset(controls,select=c(-casenum,-dow.x,-dow.y,-matchday.x,-matchday.y,-windownum.x,-windownum.y))
final.cases<-subset(cases,select=c(-dow,-matchday,-windownum))
}
finished<-rbind(final.cases,controls)
## Remove empty controls
finished<-finished[finished$outcome>0,]
## Count the number of control days without a case day, and the total number of cases
onlycntl<-finished[finished$case==0,]
ncases<-nrow(table(onlycntl$time))
which.times=unique(onlycntl$time)
extra.only=final.cases[final.cases$time%in%which.times,]
ncontrols<-round(mean(as.numeric(table(onlycntl$time))),1)
## Run the conditional logistic regression
finalformula<-as.formula(paste('Surv(timex,case)~',indep,'+strata(time)'))
c.model<-coxph(finalformula,
weights=outcome,
data=finished,method=c("breslow"))
toret<-list()
toret$call<-call
toret$c.model<-c.model
class(toret$c.model)<-"coxph"
toret$ncases<-sum(extra.only$outcome)
toret$ncasedays<-ncases
toret$ncontroldays<-ncontrols
class(toret)<-'casecross'
return(toret)
}
| /season/R/casecross.R | no_license | ingted/R-Examples | R | false | false | 6,815 | r | ## casecross.R
## time-stratified case-crossover
## Oct 2011
## assumes date variable is called 'date'
## quicker version
casecross<-function(formula, data, exclusion=2, stratalength=28,
matchdow=FALSE, usefinalwindow=FALSE, matchconf='',
confrange=0,stratamonth=FALSE){
outcome <- dow <- case <-timex <- dow.x <- dow.y <- matchday.x <- matchday.y <- windownum.x <- windownum.y <- NULL # Setting some variables to NULL first (for R CMD check)
thisdata<-data
## Checks
if (class(thisdata$date)!="Date"){
stop("date variable must be in date format, see ?Dates")}
if (exclusion<0){stop("Minimum value for exclusion is zero")}
parts<-paste(formula)
dep<-parts[2] # dependent variable
indep<-parts[3] # dependent variable
if (length(formula)<=2){stop("Must be at least one independent variable")}
## original call with defaults (see amer package)
ans <- as.list(match.call())
frmls <- formals(deparse(ans[[1]]))
add <- which(!(names(frmls) %in% names(ans)))
call<-as.call(c(ans, frmls[add]))
thisdata$dow<-as.numeric(format(thisdata$date,'%w'));
## Slim down the data
f<-as.formula(paste(parts[2],parts[1],parts[3],'+date+dow'))
if (substr(matchconf,1,1)!=""){
f<-as.formula(paste(dep,"~",indep,'+date+dow+',matchconf))
}
datatouse<-model.frame(f,data=thisdata,na.action=na.omit) # remove cases with missing covariates
## Check for irregularly spaced data
if(any(diff(datatouse$date)>1)){
cat('Note, irregularly spaced data...\n')
cat('...check your data for missing days\n')
}
datediff<-as.numeric(datatouse$date)-min(as.numeric(thisdata$date)) # use minimum data in entire sample
time<-as.numeric(datediff)+1 # used as strata number
## Create strata
if (stratamonth==TRUE){
month<-as.numeric(format(datatouse$date,'%m'));
year<-as.numeric(format(datatouse$date,'%Y'));
matchday<-as.numeric(format(datatouse$date,'%d'));
yrdiff<-year-min(year);
windownum<-(yrdiff*12)+month;
}
if (stratamonth==FALSE){
## Get the earliest time and difference all dates from this time
## Increase strata windows in jumps of 'stratalength'
windownum<-floor(datediff/stratalength)+1
nwindows<-floor(nrow(thisdata)/stratalength)+1
matchday<-datediff-((windownum-1)*stratalength)+1 # Day number in strata
## Exclude the last window if it is less than 'stratalength'
lastwindow<-datatouse[datatouse$windownum==nwindows,]
if (nrow(lastwindow)>0){ # only apply to data sets with some data in the final window
lastlength<-max(time[windownum==nwindows])-
min(time[windownum==nwindows])+1
if (lastlength<stratalength&usefinalwindow==FALSE) datatouse <-
datatouse[windownum<nwindows,]
}
}
## Create the case data
n<-nrow(datatouse)
cases<-datatouse
cases$case<-1 # binary indicator of case
cases$timex<-1 # Needed for conditional logistic regression
cases$windownum<-windownum
cases$time<-time
cases$diffdays<-NA
cases$matchday<-matchday
posout<-sum(as.numeric(names(datatouse)==as.character(f[2]))*
(1:ncol(datatouse))) # get the position of the dependent variable
cases$outcome<-datatouse[,c(posout)]
# October 2011, removed nonzerocases
# Create a case number for matching
if (substr(matchconf,1,1)==""){
cases.tomerge<-subset(cases,select=c(matchday,time,outcome,windownum,dow))}
if (substr(matchconf,1,1)!=""){
also<-sum(as.numeric(names(cases)==matchconf)*(1:length(names(cases))))
cases.tomerge<-subset(cases,
select=c(matchday,time,outcome,windownum,dow,also))
}
ncases<-nrow(cases)
cases.tomerge$casenum<-1:ncases
# Duplicate case series to make controls
maxwindows<-max(cases$windownum)
rowstorep<-NA
casenum<-NA
# Fix for missing windows (thanks to Yuming)
windowrange<-as.numeric(levels(as.factor(windownum)))
for (k in windowrange){
small=min(cases$time[cases$windownum==k])
large=max(cases$time[cases$windownum==k])
these<-rep(small:large,large-small+1)
rowstorep<-c(rowstorep,these)
casenum<-c(casenum,these[order(these)])
}
controls<-cases[rowstorep[2:length(rowstorep)],] # can fall over if there's missing data
controls<-subset(controls,select=c(-case,-timex,-time,-outcome))
# Replace case number
controls$casenum<-casenum[2:length(rowstorep)]
# Merge cases with controls by case number
controls<-merge(controls,cases.tomerge,by='casenum')
controls<-controls[controls$windownum.x==controls$windownum.y,] # must be in same stratum window
controls$case<-0 # binary indicator of case
controls$timex<-2 # Needed for conditional logistic regression
controls$diffdays<-abs(controls$matchday.x-controls$matchday.y)
controls<-controls[controls$diffdays>exclusion,] # remove the exclusion window
# match on day of the week
if (matchdow==TRUE){controls<-controls[controls$dow.x==controls$dow.y,]}
# match on a confounder
if (substr(matchconf,1,1)!=""){
one<- paste(matchconf,'.x',sep='')
two<- paste(matchconf,'.y',sep='')
find1<-grep(one,names(controls))
find2<-grep(two,names(controls))
matchdiff<-abs(controls[,find1]-controls[,find2])
controls<-controls[matchdiff<=confrange,]
controls<-subset(controls,select=c(-casenum,-dow.x,-dow.y,-matchday.x,-matchday.y,-windownum.x,-windownum.y,-find1,-find2))
findc<-sum(as.numeric(names(cases)==matchconf)*(1:length(names(cases))))
final.cases<-subset(cases,select=c(-dow,-matchday,-windownum,-findc))
}
if (substr(matchconf,1,1)==""){
controls<-subset(controls,select=c(-casenum,-dow.x,-dow.y,-matchday.x,-matchday.y,-windownum.x,-windownum.y))
final.cases<-subset(cases,select=c(-dow,-matchday,-windownum))
}
finished<-rbind(final.cases,controls)
## Remove empty controls
finished<-finished[finished$outcome>0,]
## Count the number of control days without a case day, and the total number of cases
onlycntl<-finished[finished$case==0,]
ncases<-nrow(table(onlycntl$time))
which.times=unique(onlycntl$time)
extra.only=final.cases[final.cases$time%in%which.times,]
ncontrols<-round(mean(as.numeric(table(onlycntl$time))),1)
## Run the conditional logistic regression
finalformula<-as.formula(paste('Surv(timex,case)~',indep,'+strata(time)'))
c.model<-coxph(finalformula,
weights=outcome,
data=finished,method=c("breslow"))
toret<-list()
toret$call<-call
toret$c.model<-c.model
class(toret$c.model)<-"coxph"
toret$ncases<-sum(extra.only$outcome)
toret$ncasedays<-ncases
toret$ncontroldays<-ncontrols
class(toret)<-'casecross'
return(toret)
}
|
m = matrix( c( 1, 2, 3, 4,
11, 22, 33, 44,
111, 222, 333, 444,
1111,2222,3333,4444),
nrow=4,
ncol=4)
is.matrix(m)
# [1] TRUE
| /functions/is.matrix.R | no_license | ReneNyffenegger/about-r | R | false | false | 209 | r | m = matrix( c( 1, 2, 3, 4,
11, 22, 33, 44,
111, 222, 333, 444,
1111,2222,3333,4444),
nrow=4,
ncol=4)
is.matrix(m)
# [1] TRUE
|
## ---- eval = FALSE---------------------------------------------------------
# crispr_set <- readsToTarget(reads, target = target, reference = reference,
# target.loc = target.loc)
# plotVariants(crispr_set)
# # or use plotVariants(crispr_set, txdb) to additionally show the target
# # location with respect to the transcripts if a Transcript Database
# # txdb is available
## ---- message=FALSE, warning=FALSE-----------------------------------------
library(CrispRVariants)
library(sangerseqR)
# List AB1 filenames, get sequence names, make names for the fastq files
# Note that we only include one ab1 file with CrispRVariants because
# of space constraints. All bam files are included
data_dir <- system.file(package="CrispRVariants", "extdata/ab1/ptena")
fq_dir <- tempdir()
ab1_fnames <- dir(data_dir, "ab1$", recursive=TRUE, full=TRUE)
sq_nms <- gsub(".ab1","",basename(ab1_fnames))
# Replace spaces and slashes in filename with underscores
fq_fnames <- paste0(gsub("[\ |\\/]", "_", dirname(ab1_fnames)), ".fastq")
# abifToFastq to read AB1 files and write to FASTQ
dummy <- mapply( function(u,v,w) {
abifToFastq(u,v,file.path(fq_dir,w))
}, sq_nms, ab1_fnames, fq_fnames)
## ---- message=FALSE, warning = FALSE---------------------------------------
length(unique(ab1_fnames))
length(unique(fq_fnames))
## ---- message = FALSE, warning=FALSE, eval=FALSE---------------------------
# library("Rsamtools")
#
# # BWA indices were generated using bwa version 0.7.10
# bwa_index <- "GRCHz10.fa.gz"
# bam_dir <- system.file(package="CrispRVariants", "extdata/bam")
# fq_fnames <- file.path(fq_dir,unique(fq_fnames))
# bm_fnames <- gsub(".fastq$",".bam",basename(fq_fnames))
# srt_bm_fnames <- file.path(bam_dir, gsub(".bam","_s",bm_fnames))
#
# # Map, sort and index the bam files, remove the unsorted bams
# for(i in 1:length(fq_fnames)) {
# cmd <- paste0("bwa mem ", bwa_index, " ", fq_fnames[i],
# " | samtools view -Sb - > ", bm_fnames[i])
# message(cmd, "\n"); system(cmd)
# indexBam(sortBam(bm_fnames[i],srt_bm_fnames[i]))
# unlink(bm_fnames[i])
# }
## ---- message=FALSE--------------------------------------------------------
# The metadata and bam files for this experiment are included with CrispRVariants
library("gdata")
md_fname <- system.file(package="CrispRVariants", "extdata/metadata/metadata.xls")
md <- gdata::read.xls(md_fname, 1)
md
# Get the bam filenames from the metadata table
bam_dir <- system.file(package="CrispRVariants", "extdata/bam")
bam_fnames <- file.path(bam_dir, md$bamfile)
# check that all files exist
all( file.exists(bam_fnames) )
## ---- message=FALSE--------------------------------------------------------
library(rtracklayer)
# Represent the guide as a GenomicRanges::GRanges object
gd_fname <- system.file(package="CrispRVariants", "extdata/bed/guide.bed")
gd <- rtracklayer::import(gd_fname)
gd
## ---- message=FALSE--------------------------------------------------------
gdl <- GenomicRanges::resize(gd, width(gd) + 10, fix = "center")
## ---- eval=FALSE-----------------------------------------------------------
# system("samtools faidx GRCHz10.fa.gz")
#
# reference=system(sprintf("samtools faidx GRCHz10.fa.gz %s:%s-%s",
# seqnames(gdl)[1], start(gdl)[1], end(gdl)[1]),
# intern = TRUE)[[2]]
#
# # The guide is on the negative strand, so the reference needs to be reverse complemented
# reference=Biostrings::reverseComplement(Biostrings::DNAString(reference))
# save(reference, file = "ptena_GRCHz10_ref.rda")
## --------------------------------------------------------------------------
ref_fname <- system.file(package="CrispRVariants", "extdata/ptena_GRCHz10_ref.rda")
load(ref_fname)
reference
## ---- tidy = FALSE---------------------------------------------------------
# First read the alignments into R. The alignments must include
# the read sequences and the MD tag
alns <- GenomicAlignments::readGAlignments(bam_fnames[[1]],
param = Rsamtools::ScanBamParam(tag = "MD", what = c("seq", "flag")),
use.names = TRUE)
# Then reconstruct the reference for the target region.
# If no target region is given, this function will reconstruct
# the complete reference sequence for all reads.
rfa <- refFromAlns(alns, gdl)
# The reconstructed reference sequence is identical to the sequence
# extracted from the reference above
print(rfa == reference)
## ---- message=FALSE--------------------------------------------------------
# Note that the zero point (target.loc parameter) is 22
crispr_set <- readsToTarget(bam_fnames, target = gdl, reference = reference,
names = md$Short.name, target.loc = 22)
crispr_set
# The counts table can be accessed with the "variantCounts" function
vc <- variantCounts(crispr_set)
print(class(vc))
## ---- eval = FALSE---------------------------------------------------------
# # In R
# library(GenomicFeatures)
# gtf_fname <- "Danio_rerio.GRCz10.81_chr17.gtf"
# txdb <- GenomicFeatures::makeTxDbFromGFF(gtf_fname, format = "gtf")
# saveDb(txdb, file= "GRCz10_81_chr17_txdb.sqlite")
## ---- echo=FALSE, message=FALSE--------------------------------------------
library(GenomicFeatures)
txdb_fname <- system.file("extdata/GRCz10_81_ptena_txdb.sqlite",
package="CrispRVariants")
txdb <- loadDb(txdb_fname)
## ---- message = FALSE------------------------------------------------------
# The gridExtra package is required to specify the legend.key.height
# as a "unit" object. It is not needed to call plotVariants() with defaults
library(gridExtra)
# Match the clutch id to the column names of the variants
group <- md$Group
## ----ptena-plot, fig.width = 8.5, fig.height = 7.5, message = FALSE, fig.cap = "(Top) schematic of gene structure showing guide location (left) consensus sequences for variants (right) variant counts in each embryo."----
p <- plotVariants(crispr_set, txdb = txdb, gene.text.size = 8,
row.ht.ratio = c(1,8), col.wdth.ratio = c(4,2),
plotAlignments.args = list(line.weight = 0.5, ins.size = 2,
legend.symbol.size = 4),
plotFreqHeatmap.args = list(plot.text.size = 3, x.size = 8, group = group,
legend.text.size = 8,
legend.key.height = grid::unit(0.5, "lines")))
## --------------------------------------------------------------------------
# Calculate the mutation efficiency, excluding indels that occur in the "control" sample
# and further excluding the "control" sample from the efficiency calculation
eff <- mutationEfficiency(crispr_set, filter.cols = "control", exclude.cols = "control")
eff
# Suppose we just wanted to filter particular variants, not an entire sample.
# This can be done using the "filter.vars" argument
eff2 <- mutationEfficiency(crispr_set, filter.vars = "6:1D", exclude.cols = "control")
# The results are the same in this case as only one variant was filtered from the control
identical(eff,eff2)
## --------------------------------------------------------------------------
sqs <- consensusSeqs(crispr_set)
sqs
# The ptena guide is on the negative strand.
# Confirm that the reverse complement of the "no variant" allele
# matches the reference sequence:
Biostrings::reverseComplement(sqs[["no variant"]]) == reference
## --------------------------------------------------------------------------
ch <- getChimeras(crispr_set, sample = "ptena 4")
# Confirm that all chimeric alignments are part of the same read
length(unique(names(ch))) == 1
# Set up points to annotate on the plot
annotations <- c(resize(gd, 1, fix = "start"), resize(gd, 1, fix = "end"))
annotations$name <- c("ptena_start", "ptena_end")
plotChimeras(ch, annotations = annotations)
## --------------------------------------------------------------------------
mutationEfficiency(crispr_set, filter.cols = "control", exclude.cols = "control",
include.chimeras = FALSE)
## ---- fig.width = 8.5, fig.height = 7.5, message = FALSE, warning = FALSE----
crispr_set_rev <- readsToTarget(bam_fnames, target = gdl, reference = reference,
names = md$Short.name, target.loc = 22,
orientation = "opposite")
plotVariants(crispr_set_rev)
## ---- warning = FALSE------------------------------------------------------
# We create a longer region to use as the "target"
# and the corresponding reference sequence
gdl <- GenomicRanges::resize(gd, width(gd) + 20, fix = "center")
reference <- Biostrings::DNAString("TCATTGCCATGGGCTTTCCAGCCGAACGATTGGAAGGTGTTTA")
# At this stage, target should be the entire region to display and target.loc should
# be the zero point with respect to this region
crispr_set <- readsToTarget(bam_fnames, target = gdl, reference = reference,
names = md$Short.name, target.loc = 10,
verbose = FALSE)
# Multiple guides are added at the stage of plotting
# The boundaries of the guide regions must be specified with respect to the
# given target region
p <- plotVariants(crispr_set,
plotAlignments.args = list(pam.start = c(6,35),
target.loc = c(10, 32),
guide.loc = IRanges::IRanges(c(6, 25),c(20, 37))))
p
## ---- message = FALSE------------------------------------------------------
# Setup for ptena data set
library("CrispRVariants")
library("rtracklayer")
library("GenomicFeatures")
library("gdata")
# Load the guide location
gd_fname <- system.file(package="CrispRVariants", "extdata/bed/guide.bed")
gd <- rtracklayer::import(gd_fname)
gdl <- resize(gd, width(gd) + 10, fix = "center")
# The saved reference sequence corresponds to the guide
# plus 5 bases on either side, i.e. gdl
ref_fname <- system.file(package="CrispRVariants",
"extdata/ptena_GRCHz10_ref.rda")
load(ref_fname)
# Load the metadata table, which gives the sample names
md_fname <- system.file(package="CrispRVariants",
"extdata/metadata/metadata.xls")
md <- gdata::read.xls(md_fname, 1)
# Get the list of bam files
bam_dir <- system.file(package="CrispRVariants", "extdata/bam")
bam_fnames <- file.path(bam_dir, md$bamfile)
# Check that all files were found
all(file.exists(bam_fnames))
crispr_set <- readsToTarget(bam_fnames, target = gdl, reference = reference,
names = md$Short.name, target.loc = 22,
verbose = FALSE)
# Load the transcript database
txdb_fname <- system.file("extdata/GRCz10_81_ptena_txdb.sqlite",
package="CrispRVariants")
txdb <- AnnotationDbi::loadDb(txdb_fname)
## ---- fig.height = 5, warning = FALSE--------------------------------------
p <- plotVariants(crispr_set, txdb = txdb)
## ---- fig.height = 5, warning = FALSE--------------------------------------
p <- plotVariants(crispr_set, txdb = txdb, row.ht.ratio = c(1,3))
## ---- fig.height = 5, message = FALSE, warning = FALSE---------------------
p <- plotVariants(crispr_set, txdb = txdb, col.wdth.ratio = c(4,1))
## --------------------------------------------------------------------------
# Load gol data set
library("CrispRVariants")
data("gol_clutch1")
## ---- fig.height = 2.5, message = FALSE, warning = FALSE-------------------
library(GenomicFeatures)
p <- plotVariants(gol, plotAlignments.args = list(top.n = 3),
plotFreqHeatmap.args = list(top.n = 3),
left.plot.margin = ggplot2::unit(c(0.1,0,5,0.2), "lines"))
## ---- fig.height = 2.5, message = FALSE, warning = FALSE-------------------
plotVariants(gol, plotAlignments.args = list(top.n = 3),
plotFreqHeatmap.args = list(top.n = 3, order = c(1,5,3)),
left.plot.margin = ggplot2::unit(c(0.1,0,5,0.2), "lines"))
## ---- fig.height = 2.5, warning = FALSE------------------------------------
plotAlignments(gol, top.n = 3, ins.size = 6)
## ---- fig.height = 2.5-----------------------------------------------------
plotAlignments(gol, top.n = 3, legend.symbol.size = 6)
## ---- fig.height = 3, warning = FALSE--------------------------------------
plotAlignments(gol, top.n = 5, max.insertion.size = 25)
## ---- fig.height = 3, warning = FALSE--------------------------------------
# Here we set a fairly high value of 50% for min.insertion.freq
# As ambiguous nucleotides occur frequently in this data set,
# there are no alleles passing this cutoff.
plotAlignments(gol, top.n = 5, min.insertion.freq = 50)
## ---- fig.height = 3, warning = FALSE--------------------------------------
plotAlignments(gol, top.n = 5, max.insertion.size = 25, min.insertion.freq = 50)
## ---- fig.height = 2.5, warning = FALSE------------------------------------
# No white space between rows
plotAlignments(gol, top.n = 3, tile.height = 1)
## ---- fig.height = 3, warning = FALSE--------------------------------------
# More white space between rows
plotAlignments(gol, top.n = 3, tile.height = 0.3)
## ---- fig.height = 2.5, warning = FALSE------------------------------------
plotAlignments(gol, top.n = 3, highlight.guide = FALSE)
## ---- fig.height = 3, message = FALSE--------------------------------------
library(IRanges)
guide <- IRanges::IRanges(15,28)
plotAlignments(gol, top.n = 3, guide.loc = guide)
## ---- fig.height = 2.5-----------------------------------------------------
# Here we increase the size of the axis labels and make
# two columns for the legend
plotAlignments(gol, top.n = 5, axis.text.size = 12,
legend.text.size = 12, legend.cols = 2)
## ---- fig.height = 3, warning = FALSE--------------------------------------
# Don't highlight the PAM sequence
plotAlignments(gol, top.n = 3, highlight.pam = FALSE)
## ---- fig.height = 3, warning = FALSE--------------------------------------
# Highlight 3 bases upstream to 3 bases downstream of the target.loc
plotAlignments(gol, top.n = 3, pam.start = 19, pam.end = 25)
## ---- fig.height = 3, warning = FALSE--------------------------------------
plotAlignments(gol, top.n = 3, guide.loc = IRanges(5,10),
pam.start = 8, pam.end = 13)
## ---- fig.height = 3, warning = FALSE--------------------------------------
plotAlignments(gol, top.n = 3, line.weight = 3)
## ---- fig.height = 3, warning = FALSE--------------------------------------
plotAlignments(gol, top.n = 3, codon.frame = 1)
## ---- eval = FALSE---------------------------------------------------------
# plot_data <- plotAlignments(gol, top.n = 3, create.plot = FALSE)
# names(plot_data)
# # This data can be modified as required, then replotted using:
# do.call(plotAlignments, plot_data)
## ----hmap-default, fig.height = 3, fig.width = 4, fig.align='center', fig.cap = "plotFreqHeatmap with default options"----
# Save the plot to a variable then add a title using ggplot2 syntax.
# If the plot is not saved to a variable the unmodified plot is displayed.
p <- plotFreqHeatmap(gol, top.n = 3)
p + labs(title = "A. plotFreqHeatmap with default options")
## ---- fig.height = 2.5, fig.width = 5, fig.align='center', fig.cap = "plotFreqHeatmap showing allele proportions"----
plotFreqHeatmap(gol, top.n = 3, type = "proportions")
## ---- fig.height = 2.5, fig.width = 4, fig.align='center', fig.cap = "plotFreqHeatmap with X-axis labels coloured by experimental group and tiles coloured by count instead of proportion"----
ncolumns <- ncol(variantCounts(gol))
ncolumns
grp <- rep(c(1,2), each = ncolumns/2)
p <- plotFreqHeatmap(gol, top.n = 3, group = grp, as.percent = FALSE)
p + labs(title = "B. coloured X labels with tiles coloured by count")
## ---- fig.height = 2.5, fig.width = 5, fig.align='center', fig.cap = "plotFreqHeatmap with labels showing allele proportions, header showing counts per sample and modified legend position."----
grp_clrs <- c("red", "purple")
p <- plotFreqHeatmap(gol, top.n = 3, group = grp, group.colours = grp_clrs,
type = "proportions", header = "counts",
legend.position = "bottom")
p <- p + labs(title = "C. Modified plotFreqHeatmap")
p
## ---- fig.height = 2.5, fig.width = 4, fig.align='center'------------------
plotFreqHeatmap(gol, top.n = 3,
legend.key.height = ggplot2::unit(1.5, "lines"))
## ---- eval = FALSE---------------------------------------------------------
# var_counts <- variantCounts(gol, top.n = 3)
# # (additional modifications to var_counts can be added here)
# plotFreqHeatmap(var_counts)
## ---- fig.height = 2.5-----------------------------------------------------
barplotAlleleFreqs(crispr_set, txdb = txdb)
## ---- fig.height = 2.5, message = FALSE------------------------------------
barplotAlleleFreqs(crispr_set, txdb = txdb, palette = "bluered")
## ---- fig.height = 2.5, message = FALSE------------------------------------
barplotAlleleFreqs(crispr_set, txdb = txdb, include.table = FALSE)
## ---- fig.height = 2.5-----------------------------------------------------
var_counts <- variantCounts(crispr_set)
barplotAlleleFreqs(var_counts)
## ---- fig.height = 2.5-----------------------------------------------------
rainbowPal9 <- c("#781C81","#3F4EA1","#4683C1",
"#57A3AD","#6DB388","#B1BE4E",
"#DFA53A","#E7742F","#D92120")
barplotAlleleFreqs(var_counts, classify = FALSE, bar.colours = rainbowPal9)
## ---- fig.height = 2.5-----------------------------------------------------
# Classify variants as insertion/deletion/mixed
byType <- crispr_set$classifyVariantsByType()
byType
# Classify variants by their location, without considering size
byLoc <- crispr_set$classifyVariantsByLoc(txdb=txdb)
byLoc
# Coding variants can then be classified by setting a size cutoff
byLoc <- crispr_set$classifyCodingBySize(byLoc, cutoff = 6)
byLoc
# Combine filtering and variant classification, using barplotAlleleFreqs.matrix
vc <- variantCounts(crispr_set)
# Select variants that occur in at least two samples
keep <- names(which(rowSums(vc > 0) > 1))
keep
# Use this classification and the selected variants
barplotAlleleFreqs(vc[keep,], category.labels = byLoc[keep])
## ---- fig.height = 2.5-----------------------------------------------------
p <- plotAlignments(gol, top.n = 3)
p + theme(legend.margin = ggplot2::unit(0, "cm"))
## ---- fig.height = 1-------------------------------------------------------
# Get a reference sequence
library("CrispRVariants")
data("gol_clutch1")
ref <- gol$ref
#Then to make the plot:
plotAlignments(ref, alns = NULL, target.loc = 22, ins.sites = data.frame())
## ---- message = FALSE, warning = FALSE-------------------------------------
library(Biostrings)
library(CrispRVariants)
library(rtracklayer)
## ---- warning = FALSE------------------------------------------------------
# This is a small, manually generated data set with a variety of different mutations
bam_fname <- system.file("extdata", "cntnap2b_test_data_s.bam",
package = "CrispRVariants")
guide_fname <- system.file("extdata", "cntnap2b_test_data_guide.bed",
package = "CrispRVariants")
guide <- rtracklayer::import(guide_fname)
guide <- guide + 5
reference <- Biostrings::DNAString("TAGGCGAATGAAGTCGGGGTTGCCCAGGTTCTC")
cset <- readsToTarget(bam_fname, guide, reference = reference, verbose = FALSE,
name = "Default")
cset2 <- readsToTarget(bam_fname, guide, reference = reference, verbose = FALSE,
chimera.to.target = 100, name = "Including long dels")
default_var_counts <- variantCounts(cset)
print(default_var_counts)
print(c("Total number of reads: ", colSums(default_var_counts)))
# With chimera.to.target = 100, an additional read representing a large deletion is
# reported in the "Other" category.
var_counts_inc_long_dels <- variantCounts(cset2)
print(var_counts_inc_long_dels)
print(c("Total number of reads: ", colSums(var_counts_inc_long_dels)))
# This alignment can be viewed using `plotChimeras`
ch <- getChimeras(cset2, sample = 1)
plotChimeras(ch, annotations = cset2$target)
| /inst/doc/user_guide.R | no_license | HLindsay/CrispRVariants | R | false | false | 20,184 | r | ## ---- eval = FALSE---------------------------------------------------------
# crispr_set <- readsToTarget(reads, target = target, reference = reference,
# target.loc = target.loc)
# plotVariants(crispr_set)
# # or use plotVariants(crispr_set, txdb) to additionally show the target
# # location with respect to the transcripts if a Transcript Database
# # txdb is available
## ---- message=FALSE, warning=FALSE-----------------------------------------
library(CrispRVariants)
library(sangerseqR)
# List AB1 filenames, get sequence names, make names for the fastq files
# Note that we only include one ab1 file with CrispRVariants because
# of space constraints. All bam files are included
data_dir <- system.file(package="CrispRVariants", "extdata/ab1/ptena")
fq_dir <- tempdir()
ab1_fnames <- dir(data_dir, "ab1$", recursive=TRUE, full=TRUE)
sq_nms <- gsub(".ab1","",basename(ab1_fnames))
# Replace spaces and slashes in filename with underscores
fq_fnames <- paste0(gsub("[\ |\\/]", "_", dirname(ab1_fnames)), ".fastq")
# abifToFastq to read AB1 files and write to FASTQ
dummy <- mapply( function(u,v,w) {
abifToFastq(u,v,file.path(fq_dir,w))
}, sq_nms, ab1_fnames, fq_fnames)
## ---- message=FALSE, warning = FALSE---------------------------------------
length(unique(ab1_fnames))
length(unique(fq_fnames))
## ---- message = FALSE, warning=FALSE, eval=FALSE---------------------------
# library("Rsamtools")
#
# # BWA indices were generated using bwa version 0.7.10
# bwa_index <- "GRCHz10.fa.gz"
# bam_dir <- system.file(package="CrispRVariants", "extdata/bam")
# fq_fnames <- file.path(fq_dir,unique(fq_fnames))
# bm_fnames <- gsub(".fastq$",".bam",basename(fq_fnames))
# srt_bm_fnames <- file.path(bam_dir, gsub(".bam","_s",bm_fnames))
#
# # Map, sort and index the bam files, remove the unsorted bams
# for(i in 1:length(fq_fnames)) {
# cmd <- paste0("bwa mem ", bwa_index, " ", fq_fnames[i],
# " | samtools view -Sb - > ", bm_fnames[i])
# message(cmd, "\n"); system(cmd)
# indexBam(sortBam(bm_fnames[i],srt_bm_fnames[i]))
# unlink(bm_fnames[i])
# }
## ---- message=FALSE--------------------------------------------------------
# The metadata and bam files for this experiment are included with CrispRVariants
library("gdata")
md_fname <- system.file(package="CrispRVariants", "extdata/metadata/metadata.xls")
md <- gdata::read.xls(md_fname, 1)
md
# Get the bam filenames from the metadata table
bam_dir <- system.file(package="CrispRVariants", "extdata/bam")
bam_fnames <- file.path(bam_dir, md$bamfile)
# check that all files exist
all( file.exists(bam_fnames) )
## ---- message=FALSE--------------------------------------------------------
library(rtracklayer)
# Represent the guide as a GenomicRanges::GRanges object
gd_fname <- system.file(package="CrispRVariants", "extdata/bed/guide.bed")
gd <- rtracklayer::import(gd_fname)
gd
## ---- message=FALSE--------------------------------------------------------
gdl <- GenomicRanges::resize(gd, width(gd) + 10, fix = "center")
## ---- eval=FALSE-----------------------------------------------------------
# system("samtools faidx GRCHz10.fa.gz")
#
# reference=system(sprintf("samtools faidx GRCHz10.fa.gz %s:%s-%s",
# seqnames(gdl)[1], start(gdl)[1], end(gdl)[1]),
# intern = TRUE)[[2]]
#
# # The guide is on the negative strand, so the reference needs to be reverse complemented
# reference=Biostrings::reverseComplement(Biostrings::DNAString(reference))
# save(reference, file = "ptena_GRCHz10_ref.rda")
## --------------------------------------------------------------------------
ref_fname <- system.file(package="CrispRVariants", "extdata/ptena_GRCHz10_ref.rda")
load(ref_fname)
reference
## ---- tidy = FALSE---------------------------------------------------------
# First read the alignments into R. The alignments must include
# the read sequences and the MD tag
alns <- GenomicAlignments::readGAlignments(bam_fnames[[1]],
param = Rsamtools::ScanBamParam(tag = "MD", what = c("seq", "flag")),
use.names = TRUE)
# Then reconstruct the reference for the target region.
# If no target region is given, this function will reconstruct
# the complete reference sequence for all reads.
rfa <- refFromAlns(alns, gdl)
# The reconstructed reference sequence is identical to the sequence
# extracted from the reference above
print(rfa == reference)
## ---- message=FALSE--------------------------------------------------------
# Note that the zero point (target.loc parameter) is 22
crispr_set <- readsToTarget(bam_fnames, target = gdl, reference = reference,
names = md$Short.name, target.loc = 22)
crispr_set
# The counts table can be accessed with the "variantCounts" function
vc <- variantCounts(crispr_set)
print(class(vc))
## ---- eval = FALSE---------------------------------------------------------
# # In R
# library(GenomicFeatures)
# gtf_fname <- "Danio_rerio.GRCz10.81_chr17.gtf"
# txdb <- GenomicFeatures::makeTxDbFromGFF(gtf_fname, format = "gtf")
# saveDb(txdb, file= "GRCz10_81_chr17_txdb.sqlite")
## ---- echo=FALSE, message=FALSE--------------------------------------------
library(GenomicFeatures)
txdb_fname <- system.file("extdata/GRCz10_81_ptena_txdb.sqlite",
package="CrispRVariants")
txdb <- loadDb(txdb_fname)
## ---- message = FALSE------------------------------------------------------
# The gridExtra package is required to specify the legend.key.height
# as a "unit" object. It is not needed to call plotVariants() with defaults
library(gridExtra)
# Match the clutch id to the column names of the variants
group <- md$Group
## ----ptena-plot, fig.width = 8.5, fig.height = 7.5, message = FALSE, fig.cap = "(Top) schematic of gene structure showing guide location (left) consensus sequences for variants (right) variant counts in each embryo."----
p <- plotVariants(crispr_set, txdb = txdb, gene.text.size = 8,
row.ht.ratio = c(1,8), col.wdth.ratio = c(4,2),
plotAlignments.args = list(line.weight = 0.5, ins.size = 2,
legend.symbol.size = 4),
plotFreqHeatmap.args = list(plot.text.size = 3, x.size = 8, group = group,
legend.text.size = 8,
legend.key.height = grid::unit(0.5, "lines")))
## --------------------------------------------------------------------------
# Calculate the mutation efficiency, excluding indels that occur in the "control" sample
# and further excluding the "control" sample from the efficiency calculation
eff <- mutationEfficiency(crispr_set, filter.cols = "control", exclude.cols = "control")
eff
# Suppose we just wanted to filter particular variants, not an entire sample.
# This can be done using the "filter.vars" argument
eff2 <- mutationEfficiency(crispr_set, filter.vars = "6:1D", exclude.cols = "control")
# The results are the same in this case as only one variant was filtered from the control
identical(eff,eff2)
## --------------------------------------------------------------------------
sqs <- consensusSeqs(crispr_set)
sqs
# The ptena guide is on the negative strand.
# Confirm that the reverse complement of the "no variant" allele
# matches the reference sequence:
Biostrings::reverseComplement(sqs[["no variant"]]) == reference
## --------------------------------------------------------------------------
ch <- getChimeras(crispr_set, sample = "ptena 4")
# Confirm that all chimeric alignments are part of the same read
length(unique(names(ch))) == 1
# Set up points to annotate on the plot
annotations <- c(resize(gd, 1, fix = "start"), resize(gd, 1, fix = "end"))
annotations$name <- c("ptena_start", "ptena_end")
plotChimeras(ch, annotations = annotations)
## --------------------------------------------------------------------------
mutationEfficiency(crispr_set, filter.cols = "control", exclude.cols = "control",
include.chimeras = FALSE)
## ---- fig.width = 8.5, fig.height = 7.5, message = FALSE, warning = FALSE----
crispr_set_rev <- readsToTarget(bam_fnames, target = gdl, reference = reference,
names = md$Short.name, target.loc = 22,
orientation = "opposite")
plotVariants(crispr_set_rev)
## ---- warning = FALSE------------------------------------------------------
# We create a longer region to use as the "target"
# and the corresponding reference sequence
gdl <- GenomicRanges::resize(gd, width(gd) + 20, fix = "center")
reference <- Biostrings::DNAString("TCATTGCCATGGGCTTTCCAGCCGAACGATTGGAAGGTGTTTA")
# At this stage, target should be the entire region to display and target.loc should
# be the zero point with respect to this region
crispr_set <- readsToTarget(bam_fnames, target = gdl, reference = reference,
names = md$Short.name, target.loc = 10,
verbose = FALSE)
# Multiple guides are added at the stage of plotting
# The boundaries of the guide regions must be specified with respect to the
# given target region
p <- plotVariants(crispr_set,
plotAlignments.args = list(pam.start = c(6,35),
target.loc = c(10, 32),
guide.loc = IRanges::IRanges(c(6, 25),c(20, 37))))
p
## ---- message = FALSE------------------------------------------------------
# Setup for ptena data set
library("CrispRVariants")
library("rtracklayer")
library("GenomicFeatures")
library("gdata")
# Load the guide location
gd_fname <- system.file(package="CrispRVariants", "extdata/bed/guide.bed")
gd <- rtracklayer::import(gd_fname)
gdl <- resize(gd, width(gd) + 10, fix = "center")
# The saved reference sequence corresponds to the guide
# plus 5 bases on either side, i.e. gdl
ref_fname <- system.file(package="CrispRVariants",
"extdata/ptena_GRCHz10_ref.rda")
load(ref_fname)
# Load the metadata table, which gives the sample names
md_fname <- system.file(package="CrispRVariants",
"extdata/metadata/metadata.xls")
md <- gdata::read.xls(md_fname, 1)
# Get the list of bam files
bam_dir <- system.file(package="CrispRVariants", "extdata/bam")
bam_fnames <- file.path(bam_dir, md$bamfile)
# Check that all files were found
all(file.exists(bam_fnames))
crispr_set <- readsToTarget(bam_fnames, target = gdl, reference = reference,
names = md$Short.name, target.loc = 22,
verbose = FALSE)
# Load the transcript database
txdb_fname <- system.file("extdata/GRCz10_81_ptena_txdb.sqlite",
package="CrispRVariants")
txdb <- AnnotationDbi::loadDb(txdb_fname)
## ---- fig.height = 5, warning = FALSE--------------------------------------
p <- plotVariants(crispr_set, txdb = txdb)
## ---- fig.height = 5, warning = FALSE--------------------------------------
p <- plotVariants(crispr_set, txdb = txdb, row.ht.ratio = c(1,3))
## ---- fig.height = 5, message = FALSE, warning = FALSE---------------------
p <- plotVariants(crispr_set, txdb = txdb, col.wdth.ratio = c(4,1))
## --------------------------------------------------------------------------
# Load gol data set
library("CrispRVariants")
data("gol_clutch1")
## ---- fig.height = 2.5, message = FALSE, warning = FALSE-------------------
library(GenomicFeatures)
p <- plotVariants(gol, plotAlignments.args = list(top.n = 3),
plotFreqHeatmap.args = list(top.n = 3),
left.plot.margin = ggplot2::unit(c(0.1,0,5,0.2), "lines"))
## ---- fig.height = 2.5, message = FALSE, warning = FALSE-------------------
plotVariants(gol, plotAlignments.args = list(top.n = 3),
plotFreqHeatmap.args = list(top.n = 3, order = c(1,5,3)),
left.plot.margin = ggplot2::unit(c(0.1,0,5,0.2), "lines"))
## ---- fig.height = 2.5, warning = FALSE------------------------------------
plotAlignments(gol, top.n = 3, ins.size = 6)
## ---- fig.height = 2.5-----------------------------------------------------
plotAlignments(gol, top.n = 3, legend.symbol.size = 6)
## ---- fig.height = 3, warning = FALSE--------------------------------------
plotAlignments(gol, top.n = 5, max.insertion.size = 25)
## ---- fig.height = 3, warning = FALSE--------------------------------------
# Here we set a fairly high value of 50% for min.insertion.freq
# As ambiguous nucleotides occur frequently in this data set,
# there are no alleles passing this cutoff.
plotAlignments(gol, top.n = 5, min.insertion.freq = 50)
## ---- fig.height = 3, warning = FALSE--------------------------------------
plotAlignments(gol, top.n = 5, max.insertion.size = 25, min.insertion.freq = 50)
## ---- fig.height = 2.5, warning = FALSE------------------------------------
# No white space between rows
plotAlignments(gol, top.n = 3, tile.height = 1)
## ---- fig.height = 3, warning = FALSE--------------------------------------
# More white space between rows
plotAlignments(gol, top.n = 3, tile.height = 0.3)
## ---- fig.height = 2.5, warning = FALSE------------------------------------
plotAlignments(gol, top.n = 3, highlight.guide = FALSE)
## ---- fig.height = 3, message = FALSE--------------------------------------
library(IRanges)
guide <- IRanges::IRanges(15,28)
plotAlignments(gol, top.n = 3, guide.loc = guide)
## ---- fig.height = 2.5-----------------------------------------------------
# Here we increase the size of the axis labels and make
# two columns for the legend
plotAlignments(gol, top.n = 5, axis.text.size = 12,
legend.text.size = 12, legend.cols = 2)
## ---- fig.height = 3, warning = FALSE--------------------------------------
# Don't highlight the PAM sequence
plotAlignments(gol, top.n = 3, highlight.pam = FALSE)
## ---- fig.height = 3, warning = FALSE--------------------------------------
# Highlight 3 bases upstream to 3 bases downstream of the target.loc
plotAlignments(gol, top.n = 3, pam.start = 19, pam.end = 25)
## ---- fig.height = 3, warning = FALSE--------------------------------------
plotAlignments(gol, top.n = 3, guide.loc = IRanges(5,10),
pam.start = 8, pam.end = 13)
## ---- fig.height = 3, warning = FALSE--------------------------------------
plotAlignments(gol, top.n = 3, line.weight = 3)
## ---- fig.height = 3, warning = FALSE--------------------------------------
plotAlignments(gol, top.n = 3, codon.frame = 1)
## ---- eval = FALSE---------------------------------------------------------
# plot_data <- plotAlignments(gol, top.n = 3, create.plot = FALSE)
# names(plot_data)
# # This data can be modified as required, then replotted using:
# do.call(plotAlignments, plot_data)
## ----hmap-default, fig.height = 3, fig.width = 4, fig.align='center', fig.cap = "plotFreqHeatmap with default options"----
# Save the plot to a variable then add a title using ggplot2 syntax.
# If the plot is not saved to a variable the unmodified plot is displayed.
p <- plotFreqHeatmap(gol, top.n = 3)
p + labs(title = "A. plotFreqHeatmap with default options")
## ---- fig.height = 2.5, fig.width = 5, fig.align='center', fig.cap = "plotFreqHeatmap showing allele proportions"----
plotFreqHeatmap(gol, top.n = 3, type = "proportions")
## ---- fig.height = 2.5, fig.width = 4, fig.align='center', fig.cap = "plotFreqHeatmap with X-axis labels coloured by experimental group and tiles coloured by count instead of proportion"----
ncolumns <- ncol(variantCounts(gol))
ncolumns
grp <- rep(c(1,2), each = ncolumns/2)
p <- plotFreqHeatmap(gol, top.n = 3, group = grp, as.percent = FALSE)
p + labs(title = "B. coloured X labels with tiles coloured by count")
## ---- fig.height = 2.5, fig.width = 5, fig.align='center', fig.cap = "plotFreqHeatmap with labels showing allele proportions, header showing counts per sample and modified legend position."----
grp_clrs <- c("red", "purple")
p <- plotFreqHeatmap(gol, top.n = 3, group = grp, group.colours = grp_clrs,
type = "proportions", header = "counts",
legend.position = "bottom")
p <- p + labs(title = "C. Modified plotFreqHeatmap")
p
## ---- fig.height = 2.5, fig.width = 4, fig.align='center'------------------
plotFreqHeatmap(gol, top.n = 3,
legend.key.height = ggplot2::unit(1.5, "lines"))
## ---- eval = FALSE---------------------------------------------------------
# var_counts <- variantCounts(gol, top.n = 3)
# # (additional modifications to var_counts can be added here)
# plotFreqHeatmap(var_counts)
## ---- fig.height = 2.5-----------------------------------------------------
barplotAlleleFreqs(crispr_set, txdb = txdb)
## ---- fig.height = 2.5, message = FALSE------------------------------------
barplotAlleleFreqs(crispr_set, txdb = txdb, palette = "bluered")
## ---- fig.height = 2.5, message = FALSE------------------------------------
barplotAlleleFreqs(crispr_set, txdb = txdb, include.table = FALSE)
## ---- fig.height = 2.5-----------------------------------------------------
var_counts <- variantCounts(crispr_set)
barplotAlleleFreqs(var_counts)
## ---- fig.height = 2.5-----------------------------------------------------
rainbowPal9 <- c("#781C81","#3F4EA1","#4683C1",
"#57A3AD","#6DB388","#B1BE4E",
"#DFA53A","#E7742F","#D92120")
barplotAlleleFreqs(var_counts, classify = FALSE, bar.colours = rainbowPal9)
## ---- fig.height = 2.5-----------------------------------------------------
# Classify variants as insertion/deletion/mixed
byType <- crispr_set$classifyVariantsByType()
byType
# Classify variants by their location, without considering size
byLoc <- crispr_set$classifyVariantsByLoc(txdb=txdb)
byLoc
# Coding variants can then be classified by setting a size cutoff
byLoc <- crispr_set$classifyCodingBySize(byLoc, cutoff = 6)
byLoc
# Combine filtering and variant classification, using barplotAlleleFreqs.matrix
vc <- variantCounts(crispr_set)
# Select variants that occur in at least two samples
keep <- names(which(rowSums(vc > 0) > 1))
keep
# Use this classification and the selected variants
barplotAlleleFreqs(vc[keep,], category.labels = byLoc[keep])
## ---- fig.height = 2.5-----------------------------------------------------
p <- plotAlignments(gol, top.n = 3)
p + theme(legend.margin = ggplot2::unit(0, "cm"))
## ---- fig.height = 1-------------------------------------------------------
# Get a reference sequence
library("CrispRVariants")
data("gol_clutch1")
ref <- gol$ref
#Then to make the plot:
plotAlignments(ref, alns = NULL, target.loc = 22, ins.sites = data.frame())
## ---- message = FALSE, warning = FALSE-------------------------------------
library(Biostrings)
library(CrispRVariants)
library(rtracklayer)
## ---- warning = FALSE------------------------------------------------------
# This is a small, manually generated data set with a variety of different mutations
bam_fname <- system.file("extdata", "cntnap2b_test_data_s.bam",
package = "CrispRVariants")
guide_fname <- system.file("extdata", "cntnap2b_test_data_guide.bed",
package = "CrispRVariants")
guide <- rtracklayer::import(guide_fname)
guide <- guide + 5
reference <- Biostrings::DNAString("TAGGCGAATGAAGTCGGGGTTGCCCAGGTTCTC")
cset <- readsToTarget(bam_fname, guide, reference = reference, verbose = FALSE,
name = "Default")
cset2 <- readsToTarget(bam_fname, guide, reference = reference, verbose = FALSE,
chimera.to.target = 100, name = "Including long dels")
default_var_counts <- variantCounts(cset)
print(default_var_counts)
print(c("Total number of reads: ", colSums(default_var_counts)))
# With chimera.to.target = 100, an additional read representing a large deletion is
# reported in the "Other" category.
var_counts_inc_long_dels <- variantCounts(cset2)
print(var_counts_inc_long_dels)
print(c("Total number of reads: ", colSums(var_counts_inc_long_dels)))
# This alignment can be viewed using `plotChimeras`
ch <- getChimeras(cset2, sample = 1)
plotChimeras(ch, annotations = cset2$target)
|
get_tablename <- function(x) {
UseMethod("get_tablename")
}
get_tablename.default <- function(x) {
x
}
get_tablename.character <- function(x) {
x
}
get_tablename.aws_dynamodb_table <- function(x) {
x$TableName
}
map_attributes <- function(item) {
item_formatted <- list()
for (i in seq_along(item)) {
if (is.null(item[[i]])) {
item_formatted[[i]] <- list(NULL = TRUE)
} else if (is.list(item[[i]])) {
if (any(names(item[[i]]) %in% "")) {
item_formatted[[i]] <- list(L = unname(item[[i]]))
} else {
item_formatted[[i]] <- list(M = item[[i]])
}
} else if (is.raw(item[[i]])) {
item_formatted[[i]] <- list(B = jsonlite::base64_enc(item[[i]]))
} else if (is.logical(item[[i]])) {
item_formatted[[i]] <- list(BOOL = item[[i]])
} else if (is.numeric(item[[i]])) {
if (length(item[[i]]) == 1L) {
item_formatted[[i]] <- list(N = item[[i]])
} else {
item_formatted[[i]] <- list(NS = item[[i]])
}
} else {
if (length(item[[i]]) == 1L) {
item_formatted[[i]] <- list(S = as.character(item[[i]]))
} else {
item_formatted[[i]] <- list(SS = as.character(item[[i]]))
}
}
}
names(item_formatted) <- names(item)
return(item_formatted)
}
| /R/utils.R | no_license | peetermeos/aws.dynamodb | R | false | false | 1,456 | r | get_tablename <- function(x) {
UseMethod("get_tablename")
}
get_tablename.default <- function(x) {
x
}
get_tablename.character <- function(x) {
x
}
get_tablename.aws_dynamodb_table <- function(x) {
x$TableName
}
map_attributes <- function(item) {
item_formatted <- list()
for (i in seq_along(item)) {
if (is.null(item[[i]])) {
item_formatted[[i]] <- list(NULL = TRUE)
} else if (is.list(item[[i]])) {
if (any(names(item[[i]]) %in% "")) {
item_formatted[[i]] <- list(L = unname(item[[i]]))
} else {
item_formatted[[i]] <- list(M = item[[i]])
}
} else if (is.raw(item[[i]])) {
item_formatted[[i]] <- list(B = jsonlite::base64_enc(item[[i]]))
} else if (is.logical(item[[i]])) {
item_formatted[[i]] <- list(BOOL = item[[i]])
} else if (is.numeric(item[[i]])) {
if (length(item[[i]]) == 1L) {
item_formatted[[i]] <- list(N = item[[i]])
} else {
item_formatted[[i]] <- list(NS = item[[i]])
}
} else {
if (length(item[[i]]) == 1L) {
item_formatted[[i]] <- list(S = as.character(item[[i]]))
} else {
item_formatted[[i]] <- list(SS = as.character(item[[i]]))
}
}
}
names(item_formatted) <- names(item)
return(item_formatted)
}
|
test_that("build_sql() requires connection", {
x <- ident("TABLE")
expect_snapshot(error = TRUE, build_sql("SELECT * FROM ", x))
})
| /tests/testthat/test-build-sql.R | permissive | mgirlich/dbplyr | R | false | false | 136 | r | test_that("build_sql() requires connection", {
x <- ident("TABLE")
expect_snapshot(error = TRUE, build_sql("SELECT * FROM ", x))
})
|
library(shiny)
library(forcer)
ui <- fluidPage(
titlePanel("reactR HTMLWidget Example"),
forcerOutput('widgetOutput')
)
server <- function(input, output, session) {
output$widgetOutput <- renderForcer(
forcer("Hello world!")
)
}
shinyApp(ui, server) | /app.R | permissive | react-R/forcer | R | false | false | 264 | r | library(shiny)
library(forcer)
ui <- fluidPage(
titlePanel("reactR HTMLWidget Example"),
forcerOutput('widgetOutput')
)
server <- function(input, output, session) {
output$widgetOutput <- renderForcer(
forcer("Hello world!")
)
}
shinyApp(ui, server) |
\name{Data}
\alias{Data}
\alias{DataCodominant}
\alias{DataDominant}
\alias{DataContingency}
\title{Data preparation}
\description{Read a text file with coordinates and markers in columns and individuals in rows.}
\usage{DataDominant(input_file,conversion,nb_x,nb_y,output_coords="coord_km.txt")
DataCodominant(input_file,conversion,nb_x,nb_y,output_coords="coord_km.txt")
DataContingency(input_file,conversion,nb_x,nb_y,output_coords="coord_km.txt")}
\arguments{
\item{input_file}{Path of the input text file.
For dominant or codominant data, each row contains the name of the individual, the two coordinates (either abscissa and ordinates, or longitude and
latitude), and the genetic data in succession.
For contingency table, each row corresponds to a sampled point, with the name of the point, its coordinates, and the number of individuals for each modality of each variable.}
\item{conversion}{0 if the coordinates are cartesians, 1 if they are in degree and therefore need to be converted to cartesians.}
\item{nb_x,nb_y}{number of pixels in width and length of the grid.}
\item{output_coords}{the name of the file where the kilometer coordinates will be saved in. Default value is "coord\_indiv.txt".}
}
\value{a list of six items :
\item{spatial coordinates of individuals}{a matrix with one line per individual, and two columns containing abscissa and ordinates of individuals, (x,y).}
\item{genetic_encoded}{the genetic data, containing one column per locus. If data are dominant, it's the same table as the input file.}
\item{grid}{a list of the vector of x, and the vector of y.}
\item{cvx_vertices}{the vertices of the convex hull of sampling area (same format than individuals coordinates).}
\item{cvx_matrix}{a matrix containing a 1 if the corresponding point of the grid is in the convex hull, and a 0 otherwise.}
\item{nb_individual}{the number of individuals in the dataset.}
}
\keyword{manip}
| /man/Data.Rd | no_license | cran/wombsoft | R | false | false | 1,949 | rd | \name{Data}
\alias{Data}
\alias{DataCodominant}
\alias{DataDominant}
\alias{DataContingency}
\title{Data preparation}
\description{Read a text file with coordinates and markers in columns and individuals in rows.}
\usage{DataDominant(input_file,conversion,nb_x,nb_y,output_coords="coord_km.txt")
DataCodominant(input_file,conversion,nb_x,nb_y,output_coords="coord_km.txt")
DataContingency(input_file,conversion,nb_x,nb_y,output_coords="coord_km.txt")}
\arguments{
\item{input_file}{Path of the input text file.
For dominant or codominant data, each row contains the name of the individual, the two coordinates (either abscissa and ordinates, or longitude and
latitude), and the genetic data in succession.
For contingency table, each row corresponds to a sampled point, with the name of the point, its coordinates, and the number of individuals for each modality of each variable.}
\item{conversion}{0 if the coordinates are cartesians, 1 if they are in degree and therefore need to be converted to cartesians.}
\item{nb_x,nb_y}{number of pixels in width and length of the grid.}
\item{output_coords}{the name of the file where the kilometer coordinates will be saved in. Default value is "coord\_indiv.txt".}
}
\value{a list of six items :
\item{spatial coordinates of individuals}{a matrix with one line per individual, and two columns containing abscissa and ordinates of individuals, (x,y).}
\item{genetic_encoded}{the genetic data, containing one column per locus. If data are dominant, it's the same table as the input file.}
\item{grid}{a list of the vector of x, and the vector of y.}
\item{cvx_vertices}{the vertices of the convex hull of sampling area (same format than individuals coordinates).}
\item{cvx_matrix}{a matrix containing a 1 if the corresponding point of the grid is in the convex hull, and a 0 otherwise.}
\item{nb_individual}{the number of individuals in the dataset.}
}
\keyword{manip}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analytics-dashboard.R
\name{sf_dashboard_refresh}
\alias{sf_dashboard_refresh}
\title{Refresh an existing dashboard}
\usage{
sf_dashboard_refresh(dashboard_id, dashboard_filters = c(character(0)))
}
\arguments{
\item{dashboard_id}{\code{character}; the Salesforce Id assigned to a created
dashboard. It will start with \code{"01Z"}.}
\item{dashboard_filters}{\code{character}; Dashboard results are always unfiltered, unless you
have specified filter parameters in your request. Use this argument to include
up to three optional filter Ids. You can obtain the list of defined filter Ids
from the dashboard metadata using \link{sf_dashboard_describe}.}
}
\value{
\code{list}
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}}
}
| /man/sf_dashboard_refresh.Rd | permissive | jfer2pi/salesforcer | R | false | true | 948 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analytics-dashboard.R
\name{sf_dashboard_refresh}
\alias{sf_dashboard_refresh}
\title{Refresh an existing dashboard}
\usage{
sf_dashboard_refresh(dashboard_id, dashboard_filters = c(character(0)))
}
\arguments{
\item{dashboard_id}{\code{character}; the Salesforce Id assigned to a created
dashboard. It will start with \code{"01Z"}.}
\item{dashboard_filters}{\code{character}; Dashboard results are always unfiltered, unless you
have specified filter parameters in your request. Use this argument to include
up to three optional filter Ids. You can obtain the list of defined filter Ids
from the dashboard metadata using \link{sf_dashboard_describe}.}
}
\value{
\code{list}
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}}
}
|
if(FALSE){
tab_res <- table_results(fit, columns = NULL)
mm <- tab_res[tab_res$op == "=~", ]
sem <- tab_res[tab_res$op == "~" | (tab_res$op == "~~" & !(tab_res$lhs == tab_res$rhs)), ]
selected = list("")
if(!any(sem$op == "~")){
selected[[1]] <- unique(c(sem$lhs, sem$rhs))
} else {
# Drop correlations
reg_only <- sem[!sem$op == "~~", ]
remaining <- unique(c(reg_only$lhs, reg_only$rhs))
maxit <- 0
while(length(selected[[length(selected)]]) > 0 & maxit < 100){
reg_only <- reg_only[!reg_only$lhs %in% unlist(selected), ]
selected <- c(selected, list(
unique(reg_only$lhs[!(reg_only$lhs %in% reg_only$rhs)])
))
maxit <- maxit + 1
}
if(maxit == 100){
stop("Maximum number of iterations exceeded in recursive layout algorithm.")
}
selected[[1]] <- NULL
selected[[length(selected)]] <- unique(remaining[!remaining %in% unlist(selected)])
selected <- selected[length(selected):1]
}
if(nrow(mm) > 0){ # If there is a measurement model
items_per_col <- lapply(selected, function(thisvar){unique(mm$rhs[mm$lhs == thisvar])})
lvs_per_col <- lapply(selected, function(thiscol){
unique(mm$lhs[mm$lhs %in% thiscol])
})
max_cells <- max(max(sapply(selected, length)), max(sapply(items_per_col, length)))
if(length(selected) == 1){
mm_col <- unlist(items_per_col)
lv_col <- space_these(selected[[1]], max_cells)
out <- rbind(mm_col, lv_col)
} else { # If there are multiple cols in the layout
for(this_col in 1:length(selected)){
if(this_col == 1){
}
if(this_col == length(selected)){
}
}
lapply(selected, function(this_col){
})
}
}
# Remove rownames
rownames(out) <- NULL
return(out)
}
space_these <- function(these, n){
#these <- letters[1:3]
#n = 9
out <- rep(NA, n)
cellsper <- n/length(these)
is_int <- (cellsper %% 1) == 0
if(is_int){
is_odd <- (cellsper %% 2) == 1
if(is_odd){
the_seq <- seq(from = ceiling(cellsper/2), to = n-floor(cellsper/2), length.out = length(these))
} else {
the_seq <- seq(from = floor(cellsper/2), to = n-floor(cellsper/2), length.out = length(these))
}
} else {
browser() # Fix this
}
out[the_seq] <- these
out
}
# @title Generate a rudimentary layout from a model object
# @description This is a wrapper function to the
# \code{\link[igraph]{layout_as_tree}} function, or other layout
# functions from the \code{\link[igraph]{igraph-package}}. It returns a layout
# in matrix format.
# @param x A model for which a method exists.
# @param layout_algorithm Which algorithm to use, from the
# \code{\link[igraph]{igraph-package}}. The default Reingold-Tilford algorithm
# is most suitable for SEM graphs.
# @return Matrix
# @examples
# \dontrun{
# library(lavaan)
# fit <- sem("Sepal.Length ~ Petal.Width", data = iris)
# generate_layout(fit)
# }
# @rdname generate_layout
#' @method get_layout lavaan
#' @rdname get_layout
#' @param layout_algorithm Optional argument for fit model objects. Character
#' string, indicating which \code{igraph}
#' layout algorithm to apply to position the nodes. Defaults to
#' \code{"layout_as_tree"}; see details for more options.
#' @export
get_layout.lavaan <- function(x, ..., layout_algorithm = "layout_as_tree"){
Args <- as.list(match.call()[-1])
Args$x <- table_results(x, columns = NULL)
do.call(get_layout, Args)
}
#' @method get_layout mplus.model
#' @export
get_layout.mplus.model <- get_layout.lavaan
#' @method get_layout tidy_results
#' @export
#' @importFrom igraph graph.data.frame vertex.attributes
#' layout_as_star layout_as_tree layout_in_circle layout_nicely
#' layout_on_grid layout_randomly layout_with_dh layout_with_fr layout_with_gem
#' layout_with_graphopt layout_with_kk layout_with_lgl layout_with_mds
get_layout.tidy_results <- function(x, ..., layout_algorithm = "layout_as_tree"){
tab_res <- x
df <- tab_res[tab_res$op %in% c("~~", "~", "=~"), c("lhs", "rhs")]
g <- graph.data.frame(df, directed = TRUE)
lo <- do.call(layout_algorithm, list(g))
lo <- round(lo)
if(any(duplicated(lo))){
lo <- resolve_dups(lo)
#stop("Could not snap to grid, some nodes were in the same location.")
}
lo <- sweep(lo, 2, (apply(lo, 2, min)-1), "-")
out <- matrix(nrow = max(lo[,2]), ncol = max(lo[, 1]))
vnames <- vertex.attributes(g)$name
for(this_var in 1:length(vnames)){
out[lo[this_var, 2], lo[this_var, 1]] <- vnames[this_var]
}
if(dim(out)[2] < dim(out)[1]){
out <- t(out)
} else {
out <- out[nrow(out):1, ]
}
class(out) <- c("layout_matrix", class(out))
return(out)
}
#' @importFrom utils tail
resolve_dups <- function(lo){
new_lo <- lo
first_dup <- which(duplicated(lo))[1]
dup_row <- lo[first_dup,]
neighboring_locs <- t(apply(expand.grid(c(-1,0,1), c(-1,0,1)), 1, `+`, dup_row))
free_locs <- neighboring_locs[tail(!duplicated(rbind(lo, neighboring_locs)), 9), ]
if(nrow(free_locs) == 0) stop("Could not generate layout automatically. Please specify a layout manually.")
new_lo[first_dup, ] <- free_locs[sample.int(nrow(free_locs), 1), ]
if(any(duplicated(new_lo))){
resolve_dups(new_lo)
} else {
return(new_lo)
}
}
#' @title Generate graph layout
#' @description Generate a tidy_layout for a SEM graph.
#' @param x An object for which a method exists; currently, methods exist for
#' \code{character}, \code{lavaan}, and \code{mplus.model} objects.
#' @param ... Character arguments corresponding to layout elements. Use node
#' names, empty strings (""), or NA values.
#' @details There are three ways to generate a layout:
#' \enumerate{
#' \item Specify the layout in the call to \code{get_layout()} by providing
#' node names and the number of
#' rows to create a layout matrix. Empty strings (\code{""})
#' or \code{NA} can be used for empty cells. See Example 1.
#' \item Call \code{get_layout()} on a model object or \code{tidy_results}
#' object. It will use the function
#' \code{\link[igraph]{layout_as_tree}}, or any other layout function
#' from the \code{igraph} package, to generate a rudimentary layout. See
#' Example 2.
#' \item Instead of using \code{get_layout()}, just use a \code{matrix} or
#' \code{data.frame} with your layout. For example, specify the layout in a
#' spreadsheet program, and load it into R (see Example 3). Or, copy the
#' layout to the clipboard from your spreadsheet program, and load it from the
#' clipboard (see Example 4)
#' }
#' The layout algorithms imported from \code{igraph} are:
#' \code{c("layout_as_star",
#' "layout_as_tree", "layout_in_circle", "layout_nicely",
#' "layout_on_grid", "layout_randomly", "layout_with_dh", "layout_with_fr",
#' "layout_with_gem",
#' "layout_with_graphopt", "layout_with_kk", "layout_with_lgl",
#' "layout_with_mds")}. These can be used by specifying the optional argument
#' \code{layout_algorithm = ""}.
#' @return Object of class 'tidy_layout'
#' @examples
#' # Example 1
#' get_layout("c", NA, "d",
#' NA, "e", NA, rows = 2)
#'
#' # Example 2
#' library(lavaan)
#' fit <- cfa(' visual =~ x1 + x2 + x3 ',
#' data = HolzingerSwineford1939[1:50, ])
#' get_layout(fit)
#'
#' \dontrun{
#' # Example 3
#' # Here, we first write the layout to .csv, but you could create it in a
#' # spreadsheet program, and save the spreadsheet to .csv:
#' write.csv(matrix(c("c", "", "d", "", "e", ""), nrow = 2, byrow = TRUE),
#' file = file.path(tempdir(), "example3.csv"), row.names = FALSE)
#' # Now, we load the .csv:
#' read.csv(file.path(tempdir(), "example3.csv"))
#'
#' # Example 4
#' # For this example, make your layout in a spreadsheet program, select it, and
#' # copy to clipboard. Reading from the clipboard works differently in Windows
#' # and Mac. For this example, I used Microsoft Excel.
#' # On Windows, run:
#' read.table("clipboard", sep = "\t")
#' # On Mac, run:
#' read.table(pipe("pbpaste"), sep="\t")
#' }
#' @rdname get_layout
#' @keywords tidy_graph
# @seealso long_layout
#' @export
get_layout <- function(x, ...){
UseMethod("get_layout", x)
}
# @title Generate graph layout
# @description Generate a tidy_layout for a SEM graph by specifying node names,
# and empty strings or \code{NA} values for spaces.
# @param ... Character arguments corresponding to layout elements. Use node
# names, empty strings (""), or NA values.
# @param rows Numeric, indicating the number of rows of the graph.
# @return Object of class 'tidy_layout'
# @examples
# get_layout("c", "", "d",
# "", "e", "", rows = 2)
# @rdname layout
# @keywords tidy_graph
# @seealso long_layout
#' @param rows Numeric, indicating the number of rows of the graph.
#' @rdname get_layout
#' @method get_layout default
#' @export
get_layout.default <- function(x, ..., rows = NULL){
Args <- as.list(match.call()[-1])
if("rows" %in% names(Args)){
Args$rows <- NULL
} else {
if(length(sapply(Args, is.numeric)) == 1){
Args[which(sapply(Args, is.numeric))] <- NULL
} else {
stop("Provide 'rows' argument.", call. = FALSE)
}
}
if(!(length(Args) %% rows == 0)){
stop("Number of arguments is not a multiple of rows = ", rows, call. = FALSE)
}
vec <- do.call(c, Args)
out <- do.call(matrix, list(
data = vec,
nrow = rows,
byrow = TRUE
))
class(out) <- c("layout_matrix", class(out))
return(out)
}
# @title Convert object to layout
# @description Convert an object to a tidy_layout for a SEM graph.
# @param x Object to convert to a tidy_layout. The default argument reads a
# selected matrix from the clipboard.
# To use this functionality, specify your layout in a spreadsheet program,
# select the block of cells, and copy it to the clipboard.
# @return Object of class 'tidy_layout'
# @examples
# \dontrun{
# if(interactive()){
# #EXAMPLE1
# }
# }
# @rdname long_layout
# @keywords tidy_graph
# @export
long_layout <- function(x){
UseMethod("long_layout")
}
#' @method long_layout data.frame
#' @export
long_layout.data.frame <- function(x){
Args <- as.list(match.call()[-1])
Args$x <- as.matrix(x)
do.call(long_layout, Args)
}
#' @method long_layout matrix
#' @export
long_layout.matrix <- function(x){
mat <- x
mat[is.na(mat)] <- ""
nodes_long <- setNames(as.data.frame.table(mat), c("y", "x", "name"))
nodes_long[1:2] <- lapply(nodes_long[1:2], as.numeric)
nodes_long$y <- (max(nodes_long$y)+1)-nodes_long$y
nodes_long$name <- as.character(nodes_long$name)
nodes_long <- nodes_long[!nodes_long$name == "", ]
row.names(nodes_long) <- NULL
class(nodes_long) <- c("tidy_layout", class(nodes_long))
nodes_long
}
| /R/plot-generate_layout.R | no_license | stjordanis/tidySEM | R | false | false | 11,015 | r | if(FALSE){
tab_res <- table_results(fit, columns = NULL)
mm <- tab_res[tab_res$op == "=~", ]
sem <- tab_res[tab_res$op == "~" | (tab_res$op == "~~" & !(tab_res$lhs == tab_res$rhs)), ]
selected = list("")
if(!any(sem$op == "~")){
selected[[1]] <- unique(c(sem$lhs, sem$rhs))
} else {
# Drop correlations
reg_only <- sem[!sem$op == "~~", ]
remaining <- unique(c(reg_only$lhs, reg_only$rhs))
maxit <- 0
while(length(selected[[length(selected)]]) > 0 & maxit < 100){
reg_only <- reg_only[!reg_only$lhs %in% unlist(selected), ]
selected <- c(selected, list(
unique(reg_only$lhs[!(reg_only$lhs %in% reg_only$rhs)])
))
maxit <- maxit + 1
}
if(maxit == 100){
stop("Maximum number of iterations exceeded in recursive layout algorithm.")
}
selected[[1]] <- NULL
selected[[length(selected)]] <- unique(remaining[!remaining %in% unlist(selected)])
selected <- selected[length(selected):1]
}
if(nrow(mm) > 0){ # If there is a measurement model
items_per_col <- lapply(selected, function(thisvar){unique(mm$rhs[mm$lhs == thisvar])})
lvs_per_col <- lapply(selected, function(thiscol){
unique(mm$lhs[mm$lhs %in% thiscol])
})
max_cells <- max(max(sapply(selected, length)), max(sapply(items_per_col, length)))
if(length(selected) == 1){
mm_col <- unlist(items_per_col)
lv_col <- space_these(selected[[1]], max_cells)
out <- rbind(mm_col, lv_col)
} else { # If there are multiple cols in the layout
for(this_col in 1:length(selected)){
if(this_col == 1){
}
if(this_col == length(selected)){
}
}
lapply(selected, function(this_col){
})
}
}
# Remove rownames
rownames(out) <- NULL
return(out)
}
space_these <- function(these, n){
#these <- letters[1:3]
#n = 9
out <- rep(NA, n)
cellsper <- n/length(these)
is_int <- (cellsper %% 1) == 0
if(is_int){
is_odd <- (cellsper %% 2) == 1
if(is_odd){
the_seq <- seq(from = ceiling(cellsper/2), to = n-floor(cellsper/2), length.out = length(these))
} else {
the_seq <- seq(from = floor(cellsper/2), to = n-floor(cellsper/2), length.out = length(these))
}
} else {
browser() # Fix this
}
out[the_seq] <- these
out
}
# @title Generate a rudimentary layout from a model object
# @description This is a wrapper function to the
# \code{\link[igraph]{layout_as_tree}} function, or other layout
# functions from the \code{\link[igraph]{igraph-package}}. It returns a layout
# in matrix format.
# @param x A model for which a method exists.
# @param layout_algorithm Which algorithm to use, from the
# \code{\link[igraph]{igraph-package}}. The default Reingold-Tilford algorithm
# is most suitable for SEM graphs.
# @return Matrix
# @examples
# \dontrun{
# library(lavaan)
# fit <- sem("Sepal.Length ~ Petal.Width", data = iris)
# generate_layout(fit)
# }
# @rdname generate_layout
#' @method get_layout lavaan
#' @rdname get_layout
#' @param layout_algorithm Optional argument for fit model objects. Character
#' string, indicating which \code{igraph}
#' layout algorithm to apply to position the nodes. Defaults to
#' \code{"layout_as_tree"}; see details for more options.
#' @export
get_layout.lavaan <- function(x, ..., layout_algorithm = "layout_as_tree"){
Args <- as.list(match.call()[-1])
Args$x <- table_results(x, columns = NULL)
do.call(get_layout, Args)
}
#' @method get_layout mplus.model
#' @export
get_layout.mplus.model <- get_layout.lavaan
#' @method get_layout tidy_results
#' @export
#' @importFrom igraph graph.data.frame vertex.attributes
#' layout_as_star layout_as_tree layout_in_circle layout_nicely
#' layout_on_grid layout_randomly layout_with_dh layout_with_fr layout_with_gem
#' layout_with_graphopt layout_with_kk layout_with_lgl layout_with_mds
get_layout.tidy_results <- function(x, ..., layout_algorithm = "layout_as_tree"){
tab_res <- x
df <- tab_res[tab_res$op %in% c("~~", "~", "=~"), c("lhs", "rhs")]
g <- graph.data.frame(df, directed = TRUE)
lo <- do.call(layout_algorithm, list(g))
lo <- round(lo)
if(any(duplicated(lo))){
lo <- resolve_dups(lo)
#stop("Could not snap to grid, some nodes were in the same location.")
}
lo <- sweep(lo, 2, (apply(lo, 2, min)-1), "-")
out <- matrix(nrow = max(lo[,2]), ncol = max(lo[, 1]))
vnames <- vertex.attributes(g)$name
for(this_var in 1:length(vnames)){
out[lo[this_var, 2], lo[this_var, 1]] <- vnames[this_var]
}
if(dim(out)[2] < dim(out)[1]){
out <- t(out)
} else {
out <- out[nrow(out):1, ]
}
class(out) <- c("layout_matrix", class(out))
return(out)
}
#' @importFrom utils tail
resolve_dups <- function(lo){
new_lo <- lo
first_dup <- which(duplicated(lo))[1]
dup_row <- lo[first_dup,]
neighboring_locs <- t(apply(expand.grid(c(-1,0,1), c(-1,0,1)), 1, `+`, dup_row))
free_locs <- neighboring_locs[tail(!duplicated(rbind(lo, neighboring_locs)), 9), ]
if(nrow(free_locs) == 0) stop("Could not generate layout automatically. Please specify a layout manually.")
new_lo[first_dup, ] <- free_locs[sample.int(nrow(free_locs), 1), ]
if(any(duplicated(new_lo))){
resolve_dups(new_lo)
} else {
return(new_lo)
}
}
#' @title Generate graph layout
#' @description Generate a tidy_layout for a SEM graph.
#' @param x An object for which a method exists; currently, methods exist for
#' \code{character}, \code{lavaan}, and \code{mplus.model} objects.
#' @param ... Character arguments corresponding to layout elements. Use node
#' names, empty strings (""), or NA values.
#' @details There are three ways to generate a layout:
#' \enumerate{
#' \item Specify the layout in the call to \code{get_layout()} by providing
#' node names and the number of
#' rows to create a layout matrix. Empty strings (\code{""})
#' or \code{NA} can be used for empty cells. See Example 1.
#' \item Call \code{get_layout()} on a model object or \code{tidy_results}
#' object. It will use the function
#' \code{\link[igraph]{layout_as_tree}}, or any other layout function
#' from the \code{igraph} package, to generate a rudimentary layout. See
#' Example 2.
#' \item Instead of using \code{get_layout()}, just use a \code{matrix} or
#' \code{data.frame} with your layout. For example, specify the layout in a
#' spreadsheet program, and load it into R (see Example 3). Or, copy the
#' layout to the clipboard from your spreadsheet program, and load it from the
#' clipboard (see Example 4)
#' }
#' The layout algorithms imported from \code{igraph} are:
#' \code{c("layout_as_star",
#' "layout_as_tree", "layout_in_circle", "layout_nicely",
#' "layout_on_grid", "layout_randomly", "layout_with_dh", "layout_with_fr",
#' "layout_with_gem",
#' "layout_with_graphopt", "layout_with_kk", "layout_with_lgl",
#' "layout_with_mds")}. These can be used by specifying the optional argument
#' \code{layout_algorithm = ""}.
#' @return Object of class 'tidy_layout'
#' @examples
#' # Example 1
#' get_layout("c", NA, "d",
#' NA, "e", NA, rows = 2)
#'
#' # Example 2
#' library(lavaan)
#' fit <- cfa(' visual =~ x1 + x2 + x3 ',
#' data = HolzingerSwineford1939[1:50, ])
#' get_layout(fit)
#'
#' \dontrun{
#' # Example 3
#' # Here, we first write the layout to .csv, but you could create it in a
#' # spreadsheet program, and save the spreadsheet to .csv:
#' write.csv(matrix(c("c", "", "d", "", "e", ""), nrow = 2, byrow = TRUE),
#' file = file.path(tempdir(), "example3.csv"), row.names = FALSE)
#' # Now, we load the .csv:
#' read.csv(file.path(tempdir(), "example3.csv"))
#'
#' # Example 4
#' # For this example, make your layout in a spreadsheet program, select it, and
#' # copy to clipboard. Reading from the clipboard works differently in Windows
#' # and Mac. For this example, I used Microsoft Excel.
#' # On Windows, run:
#' read.table("clipboard", sep = "\t")
#' # On Mac, run:
#' read.table(pipe("pbpaste"), sep="\t")
#' }
#' @rdname get_layout
#' @keywords tidy_graph
# @seealso long_layout
#' @export
get_layout <- function(x, ...){
UseMethod("get_layout", x)
}
# @title Generate graph layout
# @description Generate a tidy_layout for a SEM graph by specifying node names,
# and empty strings or \code{NA} values for spaces.
# @param ... Character arguments corresponding to layout elements. Use node
# names, empty strings (""), or NA values.
# @param rows Numeric, indicating the number of rows of the graph.
# @return Object of class 'tidy_layout'
# @examples
# get_layout("c", "", "d",
# "", "e", "", rows = 2)
# @rdname layout
# @keywords tidy_graph
# @seealso long_layout
#' @param rows Numeric, indicating the number of rows of the graph.
#' @rdname get_layout
#' @method get_layout default
#' @export
get_layout.default <- function(x, ..., rows = NULL){
Args <- as.list(match.call()[-1])
if("rows" %in% names(Args)){
Args$rows <- NULL
} else {
if(length(sapply(Args, is.numeric)) == 1){
Args[which(sapply(Args, is.numeric))] <- NULL
} else {
stop("Provide 'rows' argument.", call. = FALSE)
}
}
if(!(length(Args) %% rows == 0)){
stop("Number of arguments is not a multiple of rows = ", rows, call. = FALSE)
}
vec <- do.call(c, Args)
out <- do.call(matrix, list(
data = vec,
nrow = rows,
byrow = TRUE
))
class(out) <- c("layout_matrix", class(out))
return(out)
}
# @title Convert object to layout
# @description Convert an object to a tidy_layout for a SEM graph.
# @param x Object to convert to a tidy_layout. The default argument reads a
# selected matrix from the clipboard.
# To use this functionality, specify your layout in a spreadsheet program,
# select the block of cells, and copy it to the clipboard.
# @return Object of class 'tidy_layout'
# @examples
# \dontrun{
# if(interactive()){
# #EXAMPLE1
# }
# }
# @rdname long_layout
# @keywords tidy_graph
# @export
long_layout <- function(x){
UseMethod("long_layout")
}
#' @method long_layout data.frame
#' @export
long_layout.data.frame <- function(x){
Args <- as.list(match.call()[-1])
Args$x <- as.matrix(x)
do.call(long_layout, Args)
}
#' @method long_layout matrix
#' @export
long_layout.matrix <- function(x){
mat <- x
mat[is.na(mat)] <- ""
nodes_long <- setNames(as.data.frame.table(mat), c("y", "x", "name"))
nodes_long[1:2] <- lapply(nodes_long[1:2], as.numeric)
nodes_long$y <- (max(nodes_long$y)+1)-nodes_long$y
nodes_long$name <- as.character(nodes_long$name)
nodes_long <- nodes_long[!nodes_long$name == "", ]
row.names(nodes_long) <- NULL
class(nodes_long) <- c("tidy_layout", class(nodes_long))
nodes_long
}
|
context("Frequencies are calculates correctly")
test_that("Stop frequencies (headways) for included data are as expected", {
gtfs_obj <- get_stop_frequency(gtfs_obj, by_route=FALSE)
stop_frequency_summary <- gtfs_obj$stops_frequency_df
fifteenth_st_at_hillsborough_rd <- stop_frequency_summary[stop_frequency_summary$stop_id==778123,]$headway
expect_equal(as.integer(7.8688), as.integer(fifteenth_st_at_hillsborough_rd))
})
test_that("Route frequencies (headways) for included data are as expected", {
gtfs_obj <- get_route_frequency(gtfs_obj)
rf <- gtfs_obj$routes_frequency_df
expect_equal(rf[rf$route_id==1679,]$median_headways, 26)
}) | /tests/testthat/test_headways.R | no_license | walkerke/tidytransit | R | false | false | 654 | r | context("Frequencies are calculates correctly")
test_that("Stop frequencies (headways) for included data are as expected", {
gtfs_obj <- get_stop_frequency(gtfs_obj, by_route=FALSE)
stop_frequency_summary <- gtfs_obj$stops_frequency_df
fifteenth_st_at_hillsborough_rd <- stop_frequency_summary[stop_frequency_summary$stop_id==778123,]$headway
expect_equal(as.integer(7.8688), as.integer(fifteenth_st_at_hillsborough_rd))
})
test_that("Route frequencies (headways) for included data are as expected", {
gtfs_obj <- get_route_frequency(gtfs_obj)
rf <- gtfs_obj$routes_frequency_df
expect_equal(rf[rf$route_id==1679,]$median_headways, 26)
}) |
# calculate the temperature response multiplier for Vcmax and Jmax (Kattge and Knorr 2007)
calc_tresp_mult = function(tleaf, tmean, tref){
temp <- tleaf + 273.15
Ha <- 71513 # Activation Energy (J mol^-1) Massad 2007
Hd <- 200000 # Deactivaiton energy (J mol^-1) Massad 2007
adelS <- 668.39
bdelS <- -1.07
trefK <- tref + 273.15
R <- 8.314 # Universal Gas constant (J mol^-1 K^-1)
kbeg <- exp(Ha*(temp-trefK)/(trefK*R*temp))
kend <-((1+exp((trefK*(adelS+bdelS*tmean)-Hd)/(trefK*R)))/(1+exp((temp*(adelS+bdelS*tmean)-Hd)/(temp*R))))
ktotal <- kbeg*kend # Equation 20 in Smith 2019
return(ktotal)
}
| /functions/calc_tresp_mult.R | no_license | hgscott/C4model | R | false | false | 635 | r | # calculate the temperature response multiplier for Vcmax and Jmax (Kattge and Knorr 2007)
calc_tresp_mult = function(tleaf, tmean, tref){
temp <- tleaf + 273.15
Ha <- 71513 # Activation Energy (J mol^-1) Massad 2007
Hd <- 200000 # Deactivaiton energy (J mol^-1) Massad 2007
adelS <- 668.39
bdelS <- -1.07
trefK <- tref + 273.15
R <- 8.314 # Universal Gas constant (J mol^-1 K^-1)
kbeg <- exp(Ha*(temp-trefK)/(trefK*R*temp))
kend <-((1+exp((trefK*(adelS+bdelS*tmean)-Hd)/(trefK*R)))/(1+exp((temp*(adelS+bdelS*tmean)-Hd)/(temp*R))))
ktotal <- kbeg*kend # Equation 20 in Smith 2019
return(ktotal)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aggregation.R
\name{ckde}
\alias{ckde}
\title{Composite Kernel Density Estimates of Radiocarbon Dates}
\usage{
ckde(x, timeRange, bw, normalised = FALSE)
}
\arguments{
\item{x}{A \code{simdates} class object, generated using \code{\link{sampleDates}}.}
\item{timeRange}{A vector of length 2 indicating the start and end date of the analysis in cal BP.}
\item{bw}{Kernel bandwith to be used.}
\item{normalised}{A logical variable indicating whether the contribution of individual dates should be equal (TRUE), or weighted based on the area under the curve of non-normalised calibration (FALSE). Default is TRUE.}
}
\value{
An object of class \code{ckdeSPD} with the following elements
\itemize{
\item{\code{timeRange}} {The \code{timeRange} setting used.}
\item{\code{res.matrix}} {A matrix containing the KDE values with rows representing calendar dates.}
}
}
\description{
Computes a Composite Kernel Density Estimate (CKDE) from multiple sets of randomly sampled calendar dates.
}
\details{
The function computes Kernel Density Estimates using randomly sampled calendar dates contained in a \code{simdates} class object (generated using the \code{simulate.dates()} function). The output contains \code{nsim} KDEs, where \code{nsim} is the argument used in \code{simulate.dates()}. The resulting object can be plotted to visualise a CKDE (cf Brown 2017), and if \code{boot} was set to \code{TRUE} in \code{sampleDates} its bootstraped variant (cf McLaughlin 2018 for a similar analysis). The shape of the CKDE is comparable to an SPD generated from non-normalised dates when the argument \code{normalised} is set to FALSE.
}
\examples{
data(emedyd)
x = calibrate(x=emedyd$CRA, errors=emedyd$Error,normalised=FALSE)
bins = binPrep(sites=emedyd$SiteName, ages=emedyd$CRA,h=50)
s = sampleDates(x,bins=bins,nsim=100,boot=FALSE)
ckdeNorm = ckde(s1,timeRange=c(16000,9000),bw=100,normalised=TRUE)
plot(ckdeNorm,type='multiline',cal='BCAD')
}
\references{
Brown, W. A. 2017. The past and future of growth rate estimation in demographic temporal frequency analysis: Biodemographic interpretability and the ascendance of dynamic growth models. \emph{Journal of Archaeological Science}, 80: 96–108. DOI: https://doi.org/10.1016/j.jas.2017.02.003 \cr
McLaughlin, T. R. 2018. On Applications of Space–Time Modelling with Open-Source 14C Age Calibration. \emph{Journal of Archaeological Method and Theory}. DOI https://doi.org/10.1007/s10816-018-9381-3
}
\seealso{
\code{\link{sampleDates}}
}
| /man/ckde.Rd | no_license | f-silva-archaeo/rcarbon | R | false | true | 2,567 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aggregation.R
\name{ckde}
\alias{ckde}
\title{Composite Kernel Density Estimates of Radiocarbon Dates}
\usage{
ckde(x, timeRange, bw, normalised = FALSE)
}
\arguments{
\item{x}{A \code{simdates} class object, generated using \code{\link{sampleDates}}.}
\item{timeRange}{A vector of length 2 indicating the start and end date of the analysis in cal BP.}
\item{bw}{Kernel bandwith to be used.}
\item{normalised}{A logical variable indicating whether the contribution of individual dates should be equal (TRUE), or weighted based on the area under the curve of non-normalised calibration (FALSE). Default is TRUE.}
}
\value{
An object of class \code{ckdeSPD} with the following elements
\itemize{
\item{\code{timeRange}} {The \code{timeRange} setting used.}
\item{\code{res.matrix}} {A matrix containing the KDE values with rows representing calendar dates.}
}
}
\description{
Computes a Composite Kernel Density Estimate (CKDE) from multiple sets of randomly sampled calendar dates.
}
\details{
The function computes Kernel Density Estimates using randomly sampled calendar dates contained in a \code{simdates} class object (generated using the \code{simulate.dates()} function). The output contains \code{nsim} KDEs, where \code{nsim} is the argument used in \code{simulate.dates()}. The resulting object can be plotted to visualise a CKDE (cf Brown 2017), and if \code{boot} was set to \code{TRUE} in \code{sampleDates} its bootstraped variant (cf McLaughlin 2018 for a similar analysis). The shape of the CKDE is comparable to an SPD generated from non-normalised dates when the argument \code{normalised} is set to FALSE.
}
\examples{
data(emedyd)
x = calibrate(x=emedyd$CRA, errors=emedyd$Error,normalised=FALSE)
bins = binPrep(sites=emedyd$SiteName, ages=emedyd$CRA,h=50)
s = sampleDates(x,bins=bins,nsim=100,boot=FALSE)
ckdeNorm = ckde(s1,timeRange=c(16000,9000),bw=100,normalised=TRUE)
plot(ckdeNorm,type='multiline',cal='BCAD')
}
\references{
Brown, W. A. 2017. The past and future of growth rate estimation in demographic temporal frequency analysis: Biodemographic interpretability and the ascendance of dynamic growth models. \emph{Journal of Archaeological Science}, 80: 96–108. DOI: https://doi.org/10.1016/j.jas.2017.02.003 \cr
McLaughlin, T. R. 2018. On Applications of Space–Time Modelling with Open-Source 14C Age Calibration. \emph{Journal of Archaeological Method and Theory}. DOI https://doi.org/10.1007/s10816-018-9381-3
}
\seealso{
\code{\link{sampleDates}}
}
|
x <- rnorm(100)
hist(x)
| /test.r | no_license | kkondo1981/R-tutorial | R | false | false | 25 | r |
x <- rnorm(100)
hist(x)
|
# Assignment 7 Problem 1
# install.packages("ggplot2")
# install.packages("maps")
# install.packages("ggmap")
library(ggplot2)
library(maps)
library(ggmap)
statesMap = map_data("state")
polling = read.csv("PollingImputed.csv")
Train = subset(polling, Year == 2004 | Year == 2008)
Test = subset(polling, Year == 2012)
mod2 = glm(Republican~SurveyUSA+DiffCount, data=Train, family="binomial")
TestPrediction = predict(mod2, newdata=Test, type="response")
TestPredictionBinary = as.numeric(TestPrediction > 0.5)
predictionDataFrame = data.frame(TestPrediction, TestPredictionBinary, Test$State)
predictionDataFrame$region = tolower(predictionDataFrame$Test.State)
predictionMap = merge(statesMap, predictionDataFrame, by = "region")
predictionMap = predictionMap[order(predictionMap$order),]
ggplot(predictionMap, aes(x = long, y = lat, group = group, fill = TestPredictionBinary)) + geom_polygon(color = "black")
# Read in data
polling = read.csv("PollingData.csv")
str(polling)
table(polling$Year)
summary(polling)
# Install and load mice package
# install.packages("mice")
library(mice)
# Multiple imputation
simple = polling[c("Rasmussen", "SurveyUSA", "PropR", "DiffCount")]
summary(simple)
set.seed(144)
imputed = complete(mice(simple))
summary(imputed)
polling$Rasmussen = imputed$Rasmussen
polling$SurveyUSA = imputed$SurveyUSA
summary(polling)
# Video 3
# Subset data into training set and test set
Train = subset(polling, Year == 2004 | Year == 2008)
Test = subset(polling, Year == 2012)
# Smart Baseline
table(Train$Republican)
sign(20)
sign(-10)
sign(0)
table(sign(Train$Rasmussen))
table(Train$Republican, sign(Train$Rasmussen))
# Video 4
# Multicollinearity
cor(Train)
str(Train)
cor(Train[c("Rasmussen", "SurveyUSA", "PropR", "DiffCount", "Republican")])
# Logistic Regression Model
mod1 = glm(Republican~PropR, data=Train, family="binomial")
summary(mod1)
# Training set predictions
pred1 = predict(mod1, type="response")
table(Train$Republican, pred1 >= 0.5)
# Two-variable model
mod2 = glm(Republican~SurveyUSA+DiffCount, data=Train, family="binomial")
pred2 = predict(mod2, type="response")
table(Train$Republican, pred2 >= 0.5)
summary(mod2)
# Video 5
# Smart baseline accuracy
table(Test$Republican, sign(Test$Rasmussen))
# Test set predictions
TestPrediction = predict(mod2, newdata=Test, type="response")
table(Test$Republican, TestPrediction >= 0.5)
# Analyze mistake
subset(Test, TestPrediction >= 0.5 & Republican == 0) | /Assignment 7 Problem 1.R | no_license | ankitbhargava62/MITx-15.071x-The-Analytics-Edge | R | false | false | 2,469 | r | # Assignment 7 Problem 1
# install.packages("ggplot2")
# install.packages("maps")
# install.packages("ggmap")
library(ggplot2)
library(maps)
library(ggmap)
statesMap = map_data("state")
polling = read.csv("PollingImputed.csv")
Train = subset(polling, Year == 2004 | Year == 2008)
Test = subset(polling, Year == 2012)
mod2 = glm(Republican~SurveyUSA+DiffCount, data=Train, family="binomial")
TestPrediction = predict(mod2, newdata=Test, type="response")
TestPredictionBinary = as.numeric(TestPrediction > 0.5)
predictionDataFrame = data.frame(TestPrediction, TestPredictionBinary, Test$State)
predictionDataFrame$region = tolower(predictionDataFrame$Test.State)
predictionMap = merge(statesMap, predictionDataFrame, by = "region")
predictionMap = predictionMap[order(predictionMap$order),]
ggplot(predictionMap, aes(x = long, y = lat, group = group, fill = TestPredictionBinary)) + geom_polygon(color = "black")
# Read in data
polling = read.csv("PollingData.csv")
str(polling)
table(polling$Year)
summary(polling)
# Install and load mice package
# install.packages("mice")
library(mice)
# Multiple imputation
simple = polling[c("Rasmussen", "SurveyUSA", "PropR", "DiffCount")]
summary(simple)
set.seed(144)
imputed = complete(mice(simple))
summary(imputed)
polling$Rasmussen = imputed$Rasmussen
polling$SurveyUSA = imputed$SurveyUSA
summary(polling)
# Video 3
# Subset data into training set and test set
Train = subset(polling, Year == 2004 | Year == 2008)
Test = subset(polling, Year == 2012)
# Smart Baseline
table(Train$Republican)
sign(20)
sign(-10)
sign(0)
table(sign(Train$Rasmussen))
table(Train$Republican, sign(Train$Rasmussen))
# Video 4
# Multicollinearity
cor(Train)
str(Train)
cor(Train[c("Rasmussen", "SurveyUSA", "PropR", "DiffCount", "Republican")])
# Logistic Regression Model
mod1 = glm(Republican~PropR, data=Train, family="binomial")
summary(mod1)
# Training set predictions
pred1 = predict(mod1, type="response")
table(Train$Republican, pred1 >= 0.5)
# Two-variable model
mod2 = glm(Republican~SurveyUSA+DiffCount, data=Train, family="binomial")
pred2 = predict(mod2, type="response")
table(Train$Republican, pred2 >= 0.5)
summary(mod2)
# Video 5
# Smart baseline accuracy
table(Test$Republican, sign(Test$Rasmussen))
# Test set predictions
TestPrediction = predict(mod2, newdata=Test, type="response")
table(Test$Republican, TestPrediction >= 0.5)
# Analyze mistake
subset(Test, TestPrediction >= 0.5 & Republican == 0) |
library(gratia)
### Name: evaluate_smooth
### Title: Evaluate a smooth
### Aliases: evaluate_smooth evaluate_smooth.gam evaluate_smooth.gamm
### evaluate_parametric_term evaluate_parametric_term.gam
### ** Examples
library("mgcv")
## Don't show:
set.seed(2)
op <- options(cli.unicode = FALSE, digits = 6)
## End(Don't show)
dat <- gamSim(1, n = 400, dist = "normal", scale = 2)
m1 <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat, method = "REML")
evaluate_smooth(m1, "s(x1)")
## 2d example
set.seed(2)
dat <- gamSim(2, n = 1000, dist = "normal", scale = 1)
m2 <- gam(y ~ s(x, z, k = 30), data = dat$data, method = "REML")
evaluate_smooth(m2, "s(x,z)", n = 100)
## Don't show:
options(op)
## End(Don't show)
| /data/genthat_extracted_code/gratia/examples/evaluate_smooth.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 726 | r | library(gratia)
### Name: evaluate_smooth
### Title: Evaluate a smooth
### Aliases: evaluate_smooth evaluate_smooth.gam evaluate_smooth.gamm
### evaluate_parametric_term evaluate_parametric_term.gam
### ** Examples
library("mgcv")
## Don't show:
set.seed(2)
op <- options(cli.unicode = FALSE, digits = 6)
## End(Don't show)
dat <- gamSim(1, n = 400, dist = "normal", scale = 2)
m1 <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat, method = "REML")
evaluate_smooth(m1, "s(x1)")
## 2d example
set.seed(2)
dat <- gamSim(2, n = 1000, dist = "normal", scale = 1)
m2 <- gam(y ~ s(x, z, k = 30), data = dat$data, method = "REML")
evaluate_smooth(m2, "s(x,z)", n = 100)
## Don't show:
options(op)
## End(Don't show)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aggregate.baseflow.R
\name{aggregate.baseflow}
\alias{aggregate.baseflow}
\title{Baseflow Summary}
\usage{
\method{aggregate}{baseflow}(x, by = "months", index = FALSE, ...)
}
\arguments{
\item{x}{an object of class "baseflow."}
\item{by}{the time period to aggregate by. See \bold{Details}.}
\item{index}{compute the baseflow index (proportion of baseflow
to total flow) rather than baseflow?}
\item{\dots}{not used, required for other methods.}
}
\value{
The baseflow for each period specified in \code{by}. The units are
the same as for \code{x}.
}
\description{
Computes baseflow statistics for user-specified periods of time.
}
\details{
The aregument \code{by} can be either a character indicating the period, or a list
created by \code{setSeasons}. If a character , then must be "months," "years,"
"calendar years," "water years," "climate years," or "total."
May be abbreviated; and "years" is the same as "calendar years."
}
\examples{
\dontrun{
library(smwrData)
data(GlacialRidge)
G12.hysep <- with(ChoptankFlow, hysep(Flow, datetime, da=113,
STAID="01491000"))
# monthly summary of recharge in feet
aggregate(G12.hysep)
}
}
\seealso{
\code{\link{part}}, \code{\link{hysep}}, \code{\link{bfi}},
\code{\link{setSeasons}}
}
\keyword{baseflow}
| /man/aggregate.baseflow.Rd | permissive | ggurjar333/DVstats | R | false | true | 1,336 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aggregate.baseflow.R
\name{aggregate.baseflow}
\alias{aggregate.baseflow}
\title{Baseflow Summary}
\usage{
\method{aggregate}{baseflow}(x, by = "months", index = FALSE, ...)
}
\arguments{
\item{x}{an object of class "baseflow."}
\item{by}{the time period to aggregate by. See \bold{Details}.}
\item{index}{compute the baseflow index (proportion of baseflow
to total flow) rather than baseflow?}
\item{\dots}{not used, required for other methods.}
}
\value{
The baseflow for each period specified in \code{by}. The units are
the same as for \code{x}.
}
\description{
Computes baseflow statistics for user-specified periods of time.
}
\details{
The aregument \code{by} can be either a character indicating the period, or a list
created by \code{setSeasons}. If a character , then must be "months," "years,"
"calendar years," "water years," "climate years," or "total."
May be abbreviated; and "years" is the same as "calendar years."
}
\examples{
\dontrun{
library(smwrData)
data(GlacialRidge)
G12.hysep <- with(ChoptankFlow, hysep(Flow, datetime, da=113,
STAID="01491000"))
# monthly summary of recharge in feet
aggregate(G12.hysep)
}
}
\seealso{
\code{\link{part}}, \code{\link{hysep}}, \code{\link{bfi}},
\code{\link{setSeasons}}
}
\keyword{baseflow}
|
#
# load data, extract variables, initialize ------------------------------------------------
#
# number of bootstrap iterations
R <- 100000
# load data table (includes biomass and se)
dat <- read.csv("/Users/nicolekinlock/Documents/Plant Ecology/NetworkMetaAnalysis/Networks/106-Costa-2003.csv")
name <- c("106-Costa-2003")
# number of spp
sp <- max(unique(dat$Target))
# calculate sd from se (using number of replicates)
SD <- dat$SE * sqrt(dat$N)
dat$SD <- SD
#
# RII ------------------------------------------------
#
# calculate RII using bootstrap, biomass mean and sd
# assume normally distributed biomass, sample using mean and sd from paper
# generate distribution of possible RIIs and calculate mean and sd of this distribution (SE)
one.one <- c()
two.one <- c()
three.one <- c()
one.two <- c()
two.two <- c()
three.two <- c()
one.three <- c()
two.three <- c()
three.three <- c()
for (i in 1:R) {
temp <- rtruncnorm(n = nrow(dat), mean = dat$Metric, sd = dat$SD, a = 0)
one.one[i] <- (temp[11] - temp[8]) / (temp[11] + temp[8])
two.one[i] <- (temp[10] - temp[7]) / (temp[10] + temp[7])
three.one[i] <- (temp[12] - temp[9]) / (temp[12] + temp[9])
one.two[i] <- (temp[5] - temp[1]) / (temp[5] + temp[1])
two.two[i] <- (temp[4] - temp[2]) / (temp[4] + temp[2])
three.two[i] <- (temp[6] - temp[3]) / (temp[6] + temp[3])
one.three[i] <- (temp[17] - temp[14]) / (temp[17] + temp[14])
two.three[i] <- (temp[16] - temp[13]) / (temp[16] + temp[13])
three.three[i] <- (temp[18] - temp[15]) / (temp[18] + temp[15])
}
boot.rii <- as.matrix(data.frame(one.one, one.two, one.three, two.one, two.two, two.three, three.one, three.two, three.three))
rii.vec <- c()
rii.sd.vec <- c()
rii.fit <- apply(boot.rii, 2, function(x) fitdist(data = x, distr = "norm", method = "mme"))
for (w in 1:length(rii.fit)) {
rii.vec[w] <- unname(rii.fit[[w]][[1]][1])
rii.sd.vec[w] <- unname(rii.fit[[w]][[1]][2])
}
rii <- matrix(rii.vec, nrow = sp, ncol = sp, byrow = TRUE)
rii.sd <- matrix(rii.sd.vec, nrow = sp, ncol = sp, byrow = TRUE)
# save matrices of mean RII and sd RII
write.table(x = rii, file = paste("/Users/nicolekinlock/Documents/Plant Ecology/NetworkMetaAnalysis/Networks/Complete/RII/Cntrl/", name, "-RII.csv", sep = ""), sep = ",", row.names = FALSE, col.names = FALSE)
write.table(x = rii.sd, file = paste("/Users/nicolekinlock/Documents/Plant Ecology/NetworkMetaAnalysis/Networks/Complete/RII/Cntrl/", name, "-RIIsd.csv", sep = ""), sep = ",", row.names = FALSE, col.names = FALSE)
#
# RY ------------------------------------------------
#
# calculate RY, yield mean and sd
one.one <- c()
two.one <- c()
three.one <- c()
one.two <- c()
two.two <- c()
three.two <- c()
one.three <- c()
two.three <- c()
three.three <- c()
for (i in 1:R) {
temp <- rtruncnorm(n = nrow(dat), mean = dat$Metric, sd = dat$SD, a = 0)
one.one[i] <- temp[11] / temp[8]
two.one[i] <- temp[10] / temp[7]
three.one[i] <- temp[12] / temp[9]
one.two[i] <- temp[5] / temp[1]
two.two[i] <- temp[4] / temp[2]
three.two[i] <- temp[6] / temp[3]
one.three[i] <- temp[17] / temp[14]
two.three[i] <- temp[16] / temp[13]
three.three[i] <- temp[18] / temp[15]
}
boot.ry <- as.matrix(data.frame(one.one, one.two, one.three, two.one, two.two, two.three, three.one, three.two, three.three))
ry.vec <- c()
ry.sd.vec <- c()
ry.fit <- apply(boot.ry, 2, function(x) fitdist(data = x, distr = "norm", method = "mme"))
for (w in 1:length(ry.fit)) {
ry.vec[w] <- unname(ry.fit[[w]][[1]][1])
ry.sd.vec[w] <- unname(ry.fit[[w]][[1]][2])
}
ry <- matrix(ry.vec, nrow = sp, ncol = sp, byrow = TRUE)
ry.sd <- matrix(ry.sd.vec, nrow = sp, ncol = sp, byrow = TRUE)
write.table(x = ry, file = paste("/Users/nicolekinlock/Documents/Plant Ecology/NetworkMetaAnalysis/Networks/Complete/RY/Cntrl/", name, "-RY.csv", sep = ""), sep = ",", row.names = FALSE, col.names = FALSE)
write.table(x = ry.sd, paste("/Users/nicolekinlock/Documents/Plant Ecology/NetworkMetaAnalysis/Networks/Complete/RY/Cntrl/", name, "-RYsd.csv", sep = ""), sep = ",", row.names = FALSE, col.names = FALSE)
| /spcase106Costa.R | no_license | IbrahimRadwan/NetworkMetaAnalysis | R | false | false | 4,071 | r |
#
# load data, extract variables, initialize ------------------------------------------------
#
# number of bootstrap iterations
R <- 100000
# load data table (includes biomass and se)
dat <- read.csv("/Users/nicolekinlock/Documents/Plant Ecology/NetworkMetaAnalysis/Networks/106-Costa-2003.csv")
name <- c("106-Costa-2003")
# number of spp
sp <- max(unique(dat$Target))
# calculate sd from se (using number of replicates)
SD <- dat$SE * sqrt(dat$N)
dat$SD <- SD
#
# RII ------------------------------------------------
#
# calculate RII using bootstrap, biomass mean and sd
# assume normally distributed biomass, sample using mean and sd from paper
# generate distribution of possible RIIs and calculate mean and sd of this distribution (SE)
one.one <- c()
two.one <- c()
three.one <- c()
one.two <- c()
two.two <- c()
three.two <- c()
one.three <- c()
two.three <- c()
three.three <- c()
for (i in 1:R) {
temp <- rtruncnorm(n = nrow(dat), mean = dat$Metric, sd = dat$SD, a = 0)
one.one[i] <- (temp[11] - temp[8]) / (temp[11] + temp[8])
two.one[i] <- (temp[10] - temp[7]) / (temp[10] + temp[7])
three.one[i] <- (temp[12] - temp[9]) / (temp[12] + temp[9])
one.two[i] <- (temp[5] - temp[1]) / (temp[5] + temp[1])
two.two[i] <- (temp[4] - temp[2]) / (temp[4] + temp[2])
three.two[i] <- (temp[6] - temp[3]) / (temp[6] + temp[3])
one.three[i] <- (temp[17] - temp[14]) / (temp[17] + temp[14])
two.three[i] <- (temp[16] - temp[13]) / (temp[16] + temp[13])
three.three[i] <- (temp[18] - temp[15]) / (temp[18] + temp[15])
}
boot.rii <- as.matrix(data.frame(one.one, one.two, one.three, two.one, two.two, two.three, three.one, three.two, three.three))
rii.vec <- c()
rii.sd.vec <- c()
rii.fit <- apply(boot.rii, 2, function(x) fitdist(data = x, distr = "norm", method = "mme"))
for (w in 1:length(rii.fit)) {
rii.vec[w] <- unname(rii.fit[[w]][[1]][1])
rii.sd.vec[w] <- unname(rii.fit[[w]][[1]][2])
}
rii <- matrix(rii.vec, nrow = sp, ncol = sp, byrow = TRUE)
rii.sd <- matrix(rii.sd.vec, nrow = sp, ncol = sp, byrow = TRUE)
# save matrices of mean RII and sd RII
write.table(x = rii, file = paste("/Users/nicolekinlock/Documents/Plant Ecology/NetworkMetaAnalysis/Networks/Complete/RII/Cntrl/", name, "-RII.csv", sep = ""), sep = ",", row.names = FALSE, col.names = FALSE)
write.table(x = rii.sd, file = paste("/Users/nicolekinlock/Documents/Plant Ecology/NetworkMetaAnalysis/Networks/Complete/RII/Cntrl/", name, "-RIIsd.csv", sep = ""), sep = ",", row.names = FALSE, col.names = FALSE)
#
# RY ------------------------------------------------
#
# calculate RY, yield mean and sd
one.one <- c()
two.one <- c()
three.one <- c()
one.two <- c()
two.two <- c()
three.two <- c()
one.three <- c()
two.three <- c()
three.three <- c()
for (i in 1:R) {
temp <- rtruncnorm(n = nrow(dat), mean = dat$Metric, sd = dat$SD, a = 0)
one.one[i] <- temp[11] / temp[8]
two.one[i] <- temp[10] / temp[7]
three.one[i] <- temp[12] / temp[9]
one.two[i] <- temp[5] / temp[1]
two.two[i] <- temp[4] / temp[2]
three.two[i] <- temp[6] / temp[3]
one.three[i] <- temp[17] / temp[14]
two.three[i] <- temp[16] / temp[13]
three.three[i] <- temp[18] / temp[15]
}
boot.ry <- as.matrix(data.frame(one.one, one.two, one.three, two.one, two.two, two.three, three.one, three.two, three.three))
ry.vec <- c()
ry.sd.vec <- c()
ry.fit <- apply(boot.ry, 2, function(x) fitdist(data = x, distr = "norm", method = "mme"))
for (w in 1:length(ry.fit)) {
ry.vec[w] <- unname(ry.fit[[w]][[1]][1])
ry.sd.vec[w] <- unname(ry.fit[[w]][[1]][2])
}
ry <- matrix(ry.vec, nrow = sp, ncol = sp, byrow = TRUE)
ry.sd <- matrix(ry.sd.vec, nrow = sp, ncol = sp, byrow = TRUE)
write.table(x = ry, file = paste("/Users/nicolekinlock/Documents/Plant Ecology/NetworkMetaAnalysis/Networks/Complete/RY/Cntrl/", name, "-RY.csv", sep = ""), sep = ",", row.names = FALSE, col.names = FALSE)
write.table(x = ry.sd, paste("/Users/nicolekinlock/Documents/Plant Ecology/NetworkMetaAnalysis/Networks/Complete/RY/Cntrl/", name, "-RYsd.csv", sep = ""), sep = ",", row.names = FALSE, col.names = FALSE)
|
#########################################################
# Matrix I used for testing purposes
#########################################################
# m <- matrix(c(4:7), nrow = 2, ncol = 2, byrow = TRUE)
#########################################################
# makeCacheMatrix
#########################################################
makeCacheMatrix <- function(x = matrix())
{
m<-NULL
set<-function(y) ### define set function
{
x <<- y
m <<- NULL
}
### define get, setmatrix and getmatrix functions
get<-function() x
setmatrix<-function(solve) m <<- solve
getmatrix<-function() m
### return functions as a list ###
list(set=set, get=get, setmatrix=setmatrix, getmatrix=getmatrix)
}
#########################################################
# cacheSolve function
#########################################################
cacheSolve <- function(x=matrix())
{
m<-x$getmatrix(x) ### R is throwing me an error message here
### "Error in x$getmatrix : $ operator is invalid for atomic vectors"
### Turning in what I've done
### if the matrix is not NULL, retrive it and return it ###
if(!is.null(m))
{
message("getting cached data") ### let the user know cached matrix is being retrieved
return(m) ### return m and exit function cacheSolve
}
### if the matrix is NULL, then get the matrix, invert it, and cache it ###
matrix<-x$get
m<-solve(matrix)
x$setmatrix(m)
### return the inverse of the matrix ###
m
}
| /cacheSolve.R | no_license | npCoursera/ProgrammingAssignment2 | R | false | false | 1,518 | r | #########################################################
# Matrix I used for testing purposes
#########################################################
# m <- matrix(c(4:7), nrow = 2, ncol = 2, byrow = TRUE)
#########################################################
# makeCacheMatrix
#########################################################
makeCacheMatrix <- function(x = matrix())
{
m<-NULL
set<-function(y) ### define set function
{
x <<- y
m <<- NULL
}
### define get, setmatrix and getmatrix functions
get<-function() x
setmatrix<-function(solve) m <<- solve
getmatrix<-function() m
### return functions as a list ###
list(set=set, get=get, setmatrix=setmatrix, getmatrix=getmatrix)
}
#########################################################
# cacheSolve function
#########################################################
cacheSolve <- function(x=matrix())
{
m<-x$getmatrix(x) ### R is throwing me an error message here
### "Error in x$getmatrix : $ operator is invalid for atomic vectors"
### Turning in what I've done
### if the matrix is not NULL, retrive it and return it ###
if(!is.null(m))
{
message("getting cached data") ### let the user know cached matrix is being retrieved
return(m) ### return m and exit function cacheSolve
}
### if the matrix is NULL, then get the matrix, invert it, and cache it ###
matrix<-x$get
m<-solve(matrix)
x$setmatrix(m)
### return the inverse of the matrix ###
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tar_crew.R
\name{tar_crew}
\alias{tar_crew}
\title{Get crew worker info.}
\usage{
tar_crew(store = targets::tar_config_get("store"))
}
\arguments{
\item{store}{Character of length 1, path to the
\code{targets} data store. Defaults to \code{tar_config_get("store")},
which in turn defaults to \verb{_targets/}.
When you set this argument, the value of \code{tar_config_get("store")}
is temporarily changed for the current function call.
See \code{\link[=tar_config_get]{tar_config_get()}} and \code{\link[=tar_config_set]{tar_config_set()}} for details
about how to set the data store path persistently
for a project.}
}
\value{
A data frame one row per \code{crew} worker and the following columns:
\itemize{
\item \code{controller}: name of the \code{crew} controller.
\item \code{launches}: number of times the worker was launched.
\item \code{seconds}: number of seconds the worker spent running tasks.
\item \code{targets}: number of targets the worker completed and delivered.
}
}
\description{
For the most recent run of the pipeline with \code{\link[=tar_make]{tar_make()}}
where a \code{crew} controller was started, get summary-level information
of the workers.
}
\section{Storage access}{
Several functions like \code{tar_make()}, \code{tar_read()}, \code{tar_load()},
\code{tar_meta()}, and \code{tar_progress()} read or modify
the local data store of the pipeline.
The local data store is in flux while a pipeline is running,
and depending on how distributed computing or cloud computing is set up,
not all targets can even reach it. So please do not call these
functions from inside a target as part of a running
pipeline. The only exception is literate programming
target factories in the \code{tarchetypes} package such as \code{tar_render()}
and \code{tar_quarto()}.
Several functions like \code{tar_make()}, \code{tar_read()}, \code{tar_load()},
\code{tar_meta()}, and \code{tar_progress()} read or modify
the local data store of the pipeline.
The local data store is in flux while a pipeline is running,
and depending on how distributed computing or cloud computing is set up,
not all targets can even reach it. So please do not call these
functions from inside a target as part of a running
pipeline. The only exception is literate programming
target factories in the \code{tarchetypes} package such as \code{tar_render()}
and \code{tar_quarto()}.
}
\examples{
if (identical(Sys.getenv("TAR_EXAMPLES"), "true")) { # for CRAN
tar_dir({ # tar_dir() runs code from a temp dir for CRAN.
if (requireNamespace("crew", quietly = TRUE)) {
tar_script({
tar_option_set(controller = crew::crew_controller_local())
list(
tar_target(x, seq_len(2)),
tar_target(y, 2 * x, pattern = map(x))
)
}, ask = FALSE)
tar_make()
tar_process()
tar_process(pid)
}
})
}
}
\seealso{
Other data:
\code{\link{tar_load_everything}()},
\code{\link{tar_load_raw}()},
\code{\link{tar_load}()},
\code{\link{tar_objects}()},
\code{\link{tar_pid}()},
\code{\link{tar_process}()},
\code{\link{tar_read_raw}()},
\code{\link{tar_read}()}
}
\concept{data}
| /man/tar_crew.Rd | permissive | ropensci/targets | R | false | true | 3,130 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tar_crew.R
\name{tar_crew}
\alias{tar_crew}
\title{Get crew worker info.}
\usage{
tar_crew(store = targets::tar_config_get("store"))
}
\arguments{
\item{store}{Character of length 1, path to the
\code{targets} data store. Defaults to \code{tar_config_get("store")},
which in turn defaults to \verb{_targets/}.
When you set this argument, the value of \code{tar_config_get("store")}
is temporarily changed for the current function call.
See \code{\link[=tar_config_get]{tar_config_get()}} and \code{\link[=tar_config_set]{tar_config_set()}} for details
about how to set the data store path persistently
for a project.}
}
\value{
A data frame one row per \code{crew} worker and the following columns:
\itemize{
\item \code{controller}: name of the \code{crew} controller.
\item \code{launches}: number of times the worker was launched.
\item \code{seconds}: number of seconds the worker spent running tasks.
\item \code{targets}: number of targets the worker completed and delivered.
}
}
\description{
For the most recent run of the pipeline with \code{\link[=tar_make]{tar_make()}}
where a \code{crew} controller was started, get summary-level information
of the workers.
}
\section{Storage access}{
Several functions like \code{tar_make()}, \code{tar_read()}, \code{tar_load()},
\code{tar_meta()}, and \code{tar_progress()} read or modify
the local data store of the pipeline.
The local data store is in flux while a pipeline is running,
and depending on how distributed computing or cloud computing is set up,
not all targets can even reach it. So please do not call these
functions from inside a target as part of a running
pipeline. The only exception is literate programming
target factories in the \code{tarchetypes} package such as \code{tar_render()}
and \code{tar_quarto()}.
Several functions like \code{tar_make()}, \code{tar_read()}, \code{tar_load()},
\code{tar_meta()}, and \code{tar_progress()} read or modify
the local data store of the pipeline.
The local data store is in flux while a pipeline is running,
and depending on how distributed computing or cloud computing is set up,
not all targets can even reach it. So please do not call these
functions from inside a target as part of a running
pipeline. The only exception is literate programming
target factories in the \code{tarchetypes} package such as \code{tar_render()}
and \code{tar_quarto()}.
}
\examples{
if (identical(Sys.getenv("TAR_EXAMPLES"), "true")) { # for CRAN
tar_dir({ # tar_dir() runs code from a temp dir for CRAN.
if (requireNamespace("crew", quietly = TRUE)) {
tar_script({
tar_option_set(controller = crew::crew_controller_local())
list(
tar_target(x, seq_len(2)),
tar_target(y, 2 * x, pattern = map(x))
)
}, ask = FALSE)
tar_make()
tar_process()
tar_process(pid)
}
})
}
}
\seealso{
Other data:
\code{\link{tar_load_everything}()},
\code{\link{tar_load_raw}()},
\code{\link{tar_load}()},
\code{\link{tar_objects}()},
\code{\link{tar_pid}()},
\code{\link{tar_process}()},
\code{\link{tar_read_raw}()},
\code{\link{tar_read}()}
}
\concept{data}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
import_fasta_to_vector_each_nt <- function(file) {
.Call('_pairsnp_import_fasta_to_vector_each_nt', PACKAGE = 'pairsnp', file)
}
| /R/RcppExports.R | permissive | gtonkinhill/pairsnp-r | R | false | false | 261 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
import_fasta_to_vector_each_nt <- function(file) {
.Call('_pairsnp_import_fasta_to_vector_each_nt', PACKAGE = 'pairsnp', file)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/N_CVInfoLambda.R
\name{CVInfoLambda-methods}
\alias{CVInfoLambda-methods}
\title{Methods Available for Objects of Class \code{CVInfoLambda}}
\description{
Methods Available for Objects of Class \code{CVInfoLambda}
}
\keyword{internal}
| /man/CVInfoLambda-methods.Rd | no_license | cran/DynTxRegime | R | false | true | 313 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/N_CVInfoLambda.R
\name{CVInfoLambda-methods}
\alias{CVInfoLambda-methods}
\title{Methods Available for Objects of Class \code{CVInfoLambda}}
\description{
Methods Available for Objects of Class \code{CVInfoLambda}
}
\keyword{internal}
|
# TO DO - check all data for correct datetime - e.g. current cont_obs are running on winter time, while all the rest of data runs on summer time
{# INFO
#ALT+0
# when birds arrive to nioz
#### !!! if blood not indicated in what, it is asumed that we have no clue whether blood was taken - if blood was taken upon capture, please indicate this, as well as whether biometry and ful, crc done
#### enter fields f_mass, project,species, age and subspecies when birds brought in
}
# Luc please install:
#install.packages('scales')
#### START HERE
# Luc or Martin
Luc = FALSE
# indicate in DB_LOG
dblog = TRUE
{# TOOLS
{# define working directories
if(Luc == TRUE){
wd0 = "C:/Users/ldemonte/Dropbox/data_entry/"
wd = "C:/Users/ldemonte/Dropbox/data_entry/ready_for_DB_upload/"
outdir = "C:/Users/ldemonte/Dropbox/data_entry/uploaded_to_DB/"
wd2 = "C:/Users/ldemonte/Dropbox/AVESatNIOZ/"
}else{
wd0 = "C:/Users/mbulla/Documents/Dropbox/Science/Projects/MC/Data/data_entry/"
wd = "C:/Users/mbulla/Documents/Dropbox/Science/Projects/MC/Data/data_entry/ready_for_DB_upload/"
outdir = "C:/Users/mbulla/Documents/Dropbox/Science/Projects/MC/Data/data_entry/uploaded_to_DB/"
wd2 = "C:/Users/mbulla/Documents/Dropbox/Science/Projects/MC/Data/AVESatNIOZ/"
}
}
{# load packages
require(plyr)
require(XLConnect)
require("RSQLite")
#require("DBI")
require('Hmisc')
}
{# DB connection
db=paste(wd2,"AVESatNIOZ.sqlite",sep="")
#db=paste(wd2,"test.sqlite",sep="")
#db=paste(wd2,"test2.sqlite",sep="")
}
{# metadata
# birds table
con = dbConnect(dbDriver("SQLite"),dbname = db)
b = dbGetQuery(con, "SELECT*FROM BIRDS")
dbDisconnect(con)
# captures table
con = dbConnect(dbDriver("SQLite"),dbname = db)
z = dbGetQuery(con, "SELECT*FROM CAPTURES")
dbDisconnect(con)
# biometry
o = readWorksheetFromFile(paste(wd2, 'Biometry captive red knots 2017.xlsx', sep = ''), sheet=1)
v = readWorksheetFromFile(paste(wd2, 'morphometrics+sex_2016.xlsx', sep = ''), sheet=1)
v$RNR[v$RNR%in%o$RINGNR]
# locations
g = readWorksheetFromFile(paste(wd2, 'catch_locations.xlsx', sep = ''), sheet=1)
}
{# !!!! DEFINE CONSTANTS
catch = c('Richel', 'Schier','Griend','Vistula', 'Mokbaai') # define off NIOZ catching locations
}
}
# CHECK BEFORE UPLOAD
{# prepare
con = dbConnect(dbDriver("SQLite"),dbname = db)
#dbGetQuery(con, "DROP TABLE IF EXISTS CAPTURES")
a = dbGetQuery(con, "SELECT*FROM CAPTURES")
oo = dbGetQuery(con, "SELECT*FROM DBLOG where DBLOG.'table' = 'CAPTURES'")
dbDisconnect(con)
f = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=TRUE)
f2 = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=FALSE)
}
{# check WHAT entries to know whether additional uploads needed
l = list()
j =NA
for(i in 1:length(f)){#{2){#
#i = 1
m = readWorksheetFromFile(f[i], sheet=1)
{#### if this part does not work hashtag it out
#m[m==""] = NA
#m[m==" "] = NA
#m[m=="NA"] = NA
####
}
#print(i)
#print(unique(m$what)[!is.na(unique(m$what))])
l[[i]] = data.frame(f = i , what = if(length(unique(m$what)[!is.na(unique(m$what))])==0){NA}else{unique(m$what)[!is.na(unique(m$what))]}, stringsAsFactors = FALSE)
j = c(j,unique(m$what)[!is.na(unique(m$what))])
}
#ll = do.call(rbind,l)
#f2[i]
print( unique(j))
}
{# check HEALTH entries to know whether additional uploads needed - FINISH CLEANING
l = list()
j =NA
for(i in 1:length(f)){#{2){#
#i = 20
m = readWorksheetFromFile(f[i], sheet=1)
#print(i)
#print(unique(m$health)[!is.na(unique(m$health))])
l[[i]] = data.frame(f = i , health = if(length(unique(m$health)[!is.na(unique(m$health))])==0){NA}else{unique(m$health)[!is.na(unique(m$health))]}, stringsAsFactors = FALSE)
j = c(j,unique(m$health)[!is.na(unique(m$health))])
}
#ll = do.call(rbind,l)
#f2[i]
print(unique(j))
}
{# UPLOAD CAPTURES
f = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=TRUE)
f2 = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=FALSE)
#for(i in 1:length(f)){#length(f)){#{2){#
i = 1
f2[i]
{# prepare
print(i)
m = readWorksheetFromFile(f[i], sheet=1, colTypes = 'character')
#names(m)[names(m) == 'now'] = 'at'
names(m)[names(m) == 'CGF'] = 'molt_col'
names(m)[names(m) == 'pk'] = 'c_pk'
print(names(a)[!names(a)%in%names(m) & !names(a)%in%c('year_')]) # names that are in the DB but not in the data_entry file (with exception of year_)
print(names(m)[!names(m)%in%names(a) & !names(m)%in%c('m_p','f_p','home')]) # names that are in the data_entry file but not in the DB (with exception of 'm_p','f_p','home')
#m$capture = as.character(m$capture)
#m$release = as.character(m$release)
m$year_ = substring(m$capture, 1,4)
{#### if this part does not work, hashtag it out
m[m==""] = NA
m[m==" "] = NA
m[m=="NA"] = NA
####
}
if(length(names(m)[names(m)=='c_pk'])==0){m$c_pk = NA}
if(length(names(m)[names(m)=='pic'])==0){m$pic = NA}
if(length(names(m)[names(m)=='with'])==0){m$with = NA}
#m$capture = as.POSIXct(m$capture)
#m$release = as.POSIXct(m$release)
}
{# upload to captures
#print(names(m)[!names(m)%in%c("year_", "capture", "at","release", "where", "bird_ID", "what", "what_ID", "health", "feet","mass", "remarks", "author", "plum", "molt","molt_col", "L01","L02","L03","L04","L05","L06","L07","L08","L09","L10","R01","R02","R03","R04","R05","R06","R07","R08","R09","R10","crc_now", "capture_pk")])
mm = m[,c("year_", "capture", "at","release", "where", "bird_ID", "what", "what_ID", "health", "feet","mass", "with", "remarks", "author", "plum", "molt","molt_col", "L01","L02","L03","L04","L05","L06","L07","L08","L09","L10","R01","R02","R03","R04","R05","R06","R07","R08","R09","R10","crc_now","pic", "c_pk")]
#mm$capture = as.character(mm$capture)
#mm$release = as.character(mm$release)
if(f2[i]%in%oo$remarks){print('NO UPLOAD!!! - data already in DB - see DBLOG table')}else{
con = dbConnect(dbDriver("SQLite"),dbname = db)
#print(names(z)[!names(z)%in%names(mm)])
#print(names(mm)[!names(mm)%in%names(z)])
dbWriteTable(con, name = "CAPTURES", value = mm, row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'CAPTURES', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste(f2[i],'uploaded to captures'))
}
}
#}
}
{# create/update BIRDS entries
f = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=TRUE)
f2 = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=FALSE)
#for(i in 1:length(f)){#{2){#
i = 1
{# prepare
print(i)
m = readWorksheetFromFile(f[i], sheet=1, colTypes = 'character')
#names(m)[names(m) == 'now'] = 'at'
names(m)[names(m) == 'CGF'] = 'molt_col'
names(m)[names(m) == 'pk'] = 'c_pk'
#print(names(a)[!names(a)%in%names(m) & !names(a)%in%c('year_')]) # names that are in the DB but not in the data_entry file (with exception of year_)
#print(names(m)[!names(m)%in%names(a) & !names(m)%in%c('m_p','f_p','home')]) # names that are in the data_entry file but not in the DB (with exception of 'm_p','f_p','home')
m$year_ = substring(m$capture, 1,4)
m[m==""] = NA
m[m==" "] = NA
m[m=="NA"] = NA
if(length(names(m)[names(m)=='pic'])==0){m$pic = NA}
if(length(names(m)[names(m)=='with'])==0){m$with = NA}
#m$capture = as.POSIXct(m$capture)
#m$release = as.POSIXct(m$release)
}
{# IF BIRD ARRIVES to NIOZ - create its data entry line and if data missing create TO_DO
mm = m[m$at%in%catch | grepl("capt",m$what, perl = TRUE),]
if(nrow(mm)==0){print('no capt in what')}else{
# TO_DO entry if data missing
mass_f = length(names(mm)[names(mm)=='mass_f'])
project = length(names(mm)[names(mm)=='project'])
species = length(names(mm)[names(mm)=='species'])
subspecies = length(names(mm)[names(mm)=='subspecies'])
age = length(names(mm)[names(mm)=='age'])
if((mass_f+project+species+subspecies+age) < 5){
mx = mm[,c('capture', 'bird_ID', 'what')]
mx$what = paste(if(mass_f==0){'mass_f'}, if(age==0){'age'},if(species==0){'species'},if(subspecies==0){'subspecies'},if(project==0){'project'}, sep =",")
mx$capture = as.character(mx$capture)
mx$datetime_solved = mx$remarks = mx$todo_pk = NA
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "TO_DO", value = mx[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('no', paste(if(mass_f==0){'mass_f'}, if(age==0){'age'},if(species==0){'species'},if(subspecies==0){'subspecies'},if(project==0){'project'},sep=","), 'column names and data entry despite capt in what in capture sheet, to do created'))
}
# add data to birds
mm$capture = as.character(mm$capture)
mm$release = as.character(mm$release)
mm$year_ = substring(mm$capture, 1,4)
mm$blood = ifelse(grepl("blood",mm$what, perl = TRUE), 'yes',NA) #### !!! if blood not indicated in what, it is asumed that we have no clue whether blood taken
mm$sex_method = ifelse(grepl("blood",mm$what, perl = TRUE), 'blood',NA)
if(length(names(mm)[names(mm)=='wing']) == 0){mm$wing = mm$bill = mm$totalhead = mm$tarsus = mm$tartoe = mm$bio_datetime = mm$bio_author = NA}else{mm$bio_datetime == mm$capture; mm$bio_author = mm$author}
if(length(names(mm)[names(mm)=='mass_f']) == 0){mm$mass_f = NA}
if(length(names(mm)[names(mm)=='project']) == 0){mm$project = NA}
if(length(names(mm)[names(mm)=='subspecies']) == 0){mm$subspecies = NA}
if(length(names(mm)[names(mm)=='species']) == 0){mm$species = NA}
if(length(names(mm)[names(mm)=='age']) == 0){mm$age = NA}
if(length(names(mm)[names(mm)=='height_1']) == 0){mm$muscle = mm$height_1 = mm$width_1 = mm$height_2 = mm$width_2 = mm$ful_datetime = mm$ful_author = NA}else{mm$ful_datetime == mm$capture; mm$ful_author = mm$author}
mm$end_ = mm$end_type = mm$site_r = mm$bird_pk = mm$sex = mm$lat_r = mm$lon_r = mm$site_r = NA
# UPDATE CATCHING LOCATIONS
names(mm)[names(mm)=='capture'] = 'caught'
names(mm)[names(mm)=='release'] = 'start_'
names(mm)[names(mm)=='at'] = 'site_c'
names(mm)[names(mm)=='where'] = 'current_av'
names(mm)[names(mm)=='mass'] = 'mass_c'
mm$site_c = capitalize(tolower(mm$site_c))
mm$home_av = mm$current_av
mm$crc = mm$crc_now
mm$lat_c = g$lat[match(mm$site_c,g$abb)]
mm$lon_c = g$lon[match(mm$site_c,g$abb)]
#x = c("year_","species","subspecies","bird_ID","crc","crc_now","home_av","current_av","age","sex" ,"start_","end_","end_type","caught", "lat_c","lon_c", "site_c", "lat_r", "lon_r", "site_r","muscle", "height_1","width_1", "height_2","width_2","mass_f", "mass_c", "wing","bill","totalhead", "tarsus", "tartoe", "blood","sex_method","bio_datetime","bio_author","ful_datetime","ful_author","remarks", 'bird_pk')
#x[!x%in%names(mm)]
v = mm[,c("year_","species","subspecies","bird_ID","crc","crc_now","home_av","current_av","age","sex" ,"start_","end_","end_type","caught", "lat_c","lon_c", "site_c", "lat_r", "lon_r", "site_r","muscle", "height_1","width_1", "height_2","width_2","mass_f", "mass_c", "wing","bill","totalhead", "tarsus", "tartoe", "blood","sex_method","bio_datetime","bio_author","ful_datetime","ful_author","project","remarks", 'bird_pk')]
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "BIRDS", value = v, row.names = FALSE, append = TRUE)
#dbGetQuery(con, "UPDATE BIRDS SET caught = (SELECT temp.capture FROM temp WHERE temp.bird_ID = BIRDS.bird_ID,
# start_ = (SELECT temp.release FROM temp WHERE temp.bird_ID = BIRDS.bird_ID,
# site_c = (SELECT temp.at FROM temp WHERE temp.bird_ID = BIRDS.bird_ID
# ")
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'major', script = 'DB_upload.R', remarks = 'new birds', stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('capt info uploaded to BIRDS for', mm$bird_ID))
}
}
{# IF BIRD ENDS at NIOZ
mm = m[m$at%in%catch | grepl("free",m$what, perl = TRUE) | grepl("died",m$what, perl = TRUE) | grepl("dead",m$what, perl = TRUE) | grepl("killed",m$what, perl = TRUE) | grepl("killed",m$health, perl = TRUE) | grepl("died",m$health, perl = TRUE) | grepl("dead",m$health, perl = TRUE),]
if(nrow(mm) > 0){
mm$what = ifelse(!mm$what%in%c("free","died","killed"), mm$health, mm$what)
mm$release = as.character(mm$release)
mm$type = ifelse(grepl("free",mm$what, perl = TRUE), 'released', ifelse(grepl("dead",mm$what, perl = TRUE), 'died', ifelse(grepl("died",mm$what, perl = TRUE), 'died', ifelse(grepl("killed",mm$what, perl = TRUE), 'killed', NA))))
mm$where = capitalize(tolower(mm$where))
mm$lat_r = g$lat[match(mm$where,g$abb)]
mm$lon_r = g$lon[match(mm$where,g$abb)]
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm[,c('bird_ID','release','where','type', 'lat_r', 'lon_r')], row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET end_ = (SELECT temp.release FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
site_r = (SELECT temp.'where' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
lat_r = (SELECT temp.lat_r FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
lon_r = (SELECT temp.lon_r FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
end_type = (SELECT temp.type FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print(paste('end info uploaded to BIRDS', mm$bird_ID))
}else{print('no free, killed, died in what or health')}
}
{# IF WHAT = SWITCH THEN UPDATE HOME AVIARY FROM WHERE
mm = m[which(!is.na(m$what)),]
mm = mm[grepl("switch",mm$what, perl = TRUE),c('bird_ID', 'capture','where', 'what','home')]
mm = ddply(mm,.(bird_ID), summarise, where = where[capture == max(capture)])
if(nrow(mm) > 0){
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm, row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET home_av = (SELECT temp.'where' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (SELECT * FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print(paste('home_av updated in BIRDS for',mm$bird_ID))
}else{print('no switch in what')}
}
{# update current aviary and mass values
{# update current aviary
mm = m[!grepl("obs",m$what, perl = TRUE)| !grepl("cons",m$what, perl = TRUE),]
mm = ddply(mm,.(bird_ID), summarise, where = where[capture == max(capture)])
mm$where = ifelse(tolower(mm$where)%in%tolower(unique(g$abb[!is.na(g$abb)])), NA, mm$where)
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm, row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET current_av = (SELECT temp.'where' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (SELECT * FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print('current_av updated in BIRDS')
}
{# update current mass
m2=m
m2$mass[is.na(m2$mass)] = m2$with[is.na(m2$mass)]
m2 = ddply(m2[!is.na(m2$mass),],.(bird_ID), summarise, mass = mass[capture == max(capture)])
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = m2, row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET mass_c = (SELECT temp.'mass' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (SELECT * FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print('mass_c updated in BIRDS')
}
}
{# update BIRDS, if data present, or TO_DO where data missing but what is cr,blood,bio,ul,ful- note that blood means that update to SEX is needed
# upadate crc_now
mm = m[!(is.na(m$crc_now)| m$crc_now%in%c('yes_flag','no_flag','no_metal','',' ')),c('bird_ID', 'crc_now')]
if(nrow(mm) > 0){
if(nrow(mm[!is.na(mm$crc_now),]) == 0){
mx = mm[,c('capture', 'bird_ID', 'crc_now')]
mx$what = 'cr'
mx$capture = as.character(mx$capture)
mx$datetime_solved = mx$remarks = mx$todo_pk = NA
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "TO_DO", value = mx[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
print('cr in what but not data in crc_now, TODO created')
}else{
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm[!is.na(mm$crc_now),], row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET crc_now = (SELECT temp.crc_now FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (SELECT * FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print(paste('crc_now updated in birds for',mm$bird_ID))
}
}else{print('no crc_now change')}
# update blood
mm = m[which(grepl("blood",m$what, perl = TRUE)) ,]
if(nrow(mm) > 0){
mm = mm[,c('capture', 'bird_ID', 'what')]
mm$what = 'sex'
mm$capture = as.character(mm$capture)
mm$datetime_solved = mm$remarks = mm$todo_pk = NA
mm$blood = 'yes'
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "TO_DO", value = mm[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm, row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET
blood = (SELECT temp.blood FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
#dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'TO_DO', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
#dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('blood updated in BIRDS and TO_DO for sex created', mm$bird_ID))
}else{print('no blood in what')}
# update bio
mm = m[ which(grepl("bio",m$what, perl = TRUE) & !grepl("capt",m$what, perl = TRUE)) ,]
if(nrow(mm)==0){print('no bio in what')}else{
if(length(names(mm)[names(mm)=='wing']) == 0){
con = dbConnect(dbDriver("SQLite"),dbname = db)
mm = mm[,c('capture', 'bird_ID', 'what','author')]
mm$what = 'bio'
mm$capture = as.character(mm$capture)
#mm$author = 'jh'
mm$datetime_solved = mm$remarks = mm$todo_pk = NA
dbWriteTable(con, name = "TO_DO", value = mm[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm, row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET
bio_author = (SELECT temp.author FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
bio_datetime = (SELECT temp.capture FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print('no bio columns in capture sheet; to do created')
print(paste('bio_datetime and bio_author updated in BIRDS for', mm$bird_ID))
}else{
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm[,c('author','capture','bird_ID','wing','bill','totalhead', 'tarsus', 'tartoe')], row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET
bio_author = (SELECT temp.author FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
bio_datetime = (SELECT temp.capture FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
wing = (SELECT temp.wing FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
bill = (SELECT temp.'bill' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
totalhead = (SELECT temp.totalhead FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
tarsus = (SELECT temp.tarsus FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
tartoe = (SELECT temp.tartoe FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print(paste('bio updated in BIRDS for', mm$bird_ID))
}
}
# update cr
mm = m[ which(grepl("cr",m$what, perl = TRUE)& !grepl("capt",m$what, perl = TRUE) & !grepl("crc",m$what, perl = TRUE)) ,]
if(nrow(mm)==0){print('no cr in what')}else{
if(nrow(mm[is.na(mm$crc_now),]) > 0){
mm = mm[,c('capture', 'bird_ID', 'what')]
mm$what = 'cr'
mm$capture = as.character(mm$capture)
mm$datetime_solved = mm$remarks = mm$todo_pk = NA
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "TO_DO", value = mm[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('no crc_now entry despite cr in what in capture sheet, to do created for', mm$bird_ID))
}else{
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm[,c('bird_ID','crc_now')], row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET crc = (SELECT temp.crc_now FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print(paste('crc updated in birds', mm$bird_ID))
}
}
# update ful
mm = m[ grepl("ful",m$what, perl = TRUE) & !grepl("capt",m$what, perl = TRUE) ,]
if(nrow(mm)==0){print('no ful in what')}else{
if(length(names(mm)[names(mm)=='height_1']) == 0){
mm = mm[,c('capture', 'bird_ID', 'what')]
mm$what = 'ful'
mm$capture = as.character(mm$capture)
mm$datetime_solved = mm$remarks = mm$todo_pk = NA
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "TO_DO", value = mm[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('no ful column names and data entry despite ful in what in capture sheet, to do created', mm$bird_ID))
}else{
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm[,c('author','capture','bird_ID','muscle','height_1','width_1','height_2', 'width_2')], row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET
ful_author = (SELECT temp.author FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
ful_datetime = (SELECT temp.capture FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
muscle = (SELECT temp.muscle FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
height_1 = (SELECT temp.'height_1' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
width_1 = (SELECT temp.width_1 FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
height_2 = (SELECT temp.height_2 FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
width_2 = (SELECT temp.width_2 FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print(paste('ful updated in birds for', mm$bird_ID))
}
}
}
{# make entry in DB_LOG
con = dbConnect(dbDriver("SQLite"),dbname = db)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
}
print(paste(f2[i],'updated BIRDS'))
}
{# update SPECIAL tables
{# prepare
f = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=TRUE)
f2 = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=FALSE)
#f = list.files(path=paste(outdir,sep =''),pattern='data_entry', recursive=TRUE,full.names=TRUE)
#f2 = list.files(path=paste(outdir,sep =''),pattern='data_entry', recursive=TRUE,full.names=FALSE)
con = dbConnect(dbDriver("SQLite"),dbname = db)
#dbGetQuery(con, "DROP TABLE IF EXISTS CAPTURES")
a = dbGetQuery(con, "SELECT*FROM CAPTURES")
oo = dbGetQuery(con, "SELECT*FROM DBLOG where DBLOG.'table' = 'CAPTURES'")
dbDisconnect(con)
i = 1
#for(i in 1:length(f)){#{2){#
print(i)
m = readWorksheetFromFile(f[i], sheet=1, colTypes = 'character')
#names(m)[names(m) == 'now'] = 'at'
names(m)[names(m) == 'CGF'] = 'molt_col'
names(m)[names(m) == 'pk'] = 'c_pk'
print(names(a)[!names(a)%in%names(m) & !names(a)%in%c('year_')]) # names that are in the DB but not in the data_entry file (with exception of year_)
print(names(m)[!names(m)%in%names(a) & !names(m)%in%c('m_p','f_p','home')]) # names that are in the data_entry file but not in the DB (with exception of 'm_p','f_p','home')
m$year_ = substring(m$capture, 1,4)
#m[m==""] = NA
#m[m==" "] = NA
#m[m=="NA"] = NA
if(length(names(m)[names(m)=='c_pk'])==0){m$c_pk = NA}
if(length(names(m)[names(m)=='pic'])==0){m$pic = NA}
if(length(names(m)[names(m)=='with'])==0){m$with = NA}
#m$capture = as.POSIXct(m$capture)
#m$release = as.POSIXct(m$release)
}
{# update BIO_TRAIN if btrain or utrain or ult in WHAT
mm = m[ grepl("btrain",m$what, perl = TRUE) | grepl("utrain",m$what, perl = TRUE) ,]
mm = mm[ !is.na(mm$what) ,]
if(nrow(mm)>0){
mm$datetime_ = as.character(mm$capture)
mm$year_ = substring(mm$capture, 1,4)
if(TRUE%in%unique(grepl("btrain",mm$what, perl = TRUE)) & TRUE%in%unique(grepl("utrain",mm$what, perl = TRUE))){
mm = mm[,c('year_','author', 'datetime_', 'bird_ID','wing', 'bill', 'totalhead','tarsus','tartoe','muscle','height_1','width_1','height_2','width_2')]
mm$remarks = mm$bio_pk = NA
}else{ if(TRUE%in%unique(grepl("btrain",mm$what, perl = TRUE))){
mm = mm[,c('year_','author', 'datetime_', 'bird_ID','wing', 'bill', 'totalhead','tarsus','tartoe')]
mm$muscle = mm$height_1 = mm$width_1 = mm$height_2 = mm$width_2 = mm$remarks = mm$bio_pk = NA
}else{ if(TRUE%in%unique(grepl("utrain",mm$what, perl = TRUE))){
mm$wing = mm$tarsus = mm$tartoe = mm$bill = mm$totalhead = NA
mm = mm[,c('year_','author', 'datetime_', 'bird_ID','wing', 'bill', 'totalhead','tarsus','tartoe','muscle','height_1','width_1','height_2','width_2')]
mm$remarks = mm$bio_pk = NA
}}}
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "BIO_TRAIN", value = mm[,c('year_','author', 'datetime_', 'bird_ID','wing', 'bill', 'totalhead','tarsus','tartoe','muscle', 'height_1','width_1','height_2','width_2','remarks','bio_pk')], row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIO_TRAIN', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste(mm$capture,'BIO_TRAIN data added for', mm$bird_ID))
dbDisconnect(con)
}else{print("no btrain or utrain in WHAT")}
}
{# update ULTRASOUND table if UL present
mm = m[ grepl("ul",m$what, perl = TRUE) ,]
mm = mm[ !is.na(mm$what) ,]
mm = mm[ !grepl("ful",mm$what, perl = TRUE) ,]
if(nrow(mm)==0){print('no ul in what')}else{
con = dbConnect(dbDriver("SQLite"),dbname = db)
u = dbGetQuery(con, "SELECT*FROM DBLOG where DBLOG.'table' = 'ULTRASOUND'")
dbDisconnect(con)
if(nrow(u)==0 | !f2[i]%in%u$remarks){
if(length(names(mm)[names(mm)=='height_1']) == 0){
con = dbConnect(dbDriver("SQLite"),dbname = db)
mm = mm[,c('capture', 'bird_ID', 'what')]
mm$what = 'ul'
mm$capture = as.character(mm$capture)
mm$datetime_solved = mm$remarks = mm$todo_pk = NA
dbWriteTable(con, name = "TO_DO", value = mm[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('no ul column names and data entry despite ul in what in capture sheet, to do created for', mm$bird_ID))
}else{
mm$ultra_pk=NA
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "ULTRASOUND", value = mm[,c('author','capture','bird_ID','muscle','height_1','width_1','height_2', 'width_2','remarks','ultra_pk')], row.names = FALSE, append = FALSE)
v = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'ULTRASOUND', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('ul added to ULTRASOUND for', mm$bird_ID))
}
}else{print('NO UPLOAD!!! - data already in ULTRASOUND table - see DBLOG table')}
}
}
{# update SAMPLE table
mm = m[ which((grepl("blood",m$what, perl = TRUE) & !is.na(m$what_ID))| (grepl("skin",m$what, perl = TRUE) & !is.na(m$what_ID))),]
mm = mm[ !is.na(mm$what) ,]
if(nrow(mm)==0){print('no blood or skin in what or no what_ID')}else{
con = dbConnect(dbDriver("SQLite"),dbname = db)
u = dbGetQuery(con, "SELECT*FROM DBLOG where DBLOG.'table' = 'SAMPLES'")
dbDisconnect(con)
if(nrow(u)==0 | !f2[i]%in%u$remarks){
mm$sample_pk=NA
mm$datetime_=as.character(mm$capture)
mm$type = ifelse(grepl("blood",mm$what, perl = TRUE), 'blood', ifelse(grepl("skin",mm$what, perl = TRUE), 'skin',NA))
mm$where = ifelse(mm$type == 'blood', 'NIOZ','MPIO')
mm$remarks = NA
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "SAMPLES", value = mm[,c('datetime_','type','what_ID','where','remarks','sample_pk')], row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'SAMPLES', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('samples added to samples for', mm$bird_ID))
}else{print('NO UPLOAD!!! - data already in SAMPLES table - see DBLOG table')}
}
}
{# update HARN table if all HARN columns present and 'neck' value entered
if(length(names(m)[names(m)=='neck']) == 1){
mm = m[!is.na(m$neck) & !m$neck %in% c(""," "),]
if(nrow(mm)>0){
mm = mm[,c('capture', 'bird_ID','what', 'what_ID', 'tilt','neck','armpit','back','size')]
mm$harn_pk= NA
mm$capture = as.character(mm$capture)
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "HARN", value = mm, row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'HARN', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste(mm$capture,'HARN data added for', mm$bird_ID))
dbDisconnect(con)
}else{print('no harn data although neck column present')}}else{print('no harn additional data = no neck columnt')}
}
}
{# MOVE THE FILE TO DONE
file.rename(f[i], paste(outdir,f2[i], sep = ''))
}
print(paste('uploaded',f2[i]))
###}
##### AFTER UPLOAD GREY OUT THE DATA IN THE SHEETS OF VISITS FILE
{# UPLOAD VISITS - current way or date time based ---- NA what check
{# prepare
# current visits data from DB
con = dbConnect(dbDriver("SQLite"),dbname = db)
d = dbGetQuery(con, "SELECT*FROM VISITS")
dbDisconnect(con)
d = d[ !grepl("session", d$remarks, perl = TRUE) ,]
if(nrow(d)> 0){d$v_pk = 1:nrow(d)}
# current visits data_entry file
v = readWorksheetFromFile(paste(wd0, 'VISITS_cons_devices_aviary_ENTRY.xlsx', sep = ''), sheet='visits')
v$v_pk = 1:nrow(v)
if(nrow(d)>0){
v = v[v$v_pk>max(d$v_pk),] # select only rows that are not in DB yet
if(nrow(v)==0){'no need to check/upload - no new data'}
}
}
{# check
# NAs in authors
v[is.na(v$author),]
# author field - show those that are not in authors
con = dbConnect(dbDriver("SQLite"),dbname = db)
a = dbGetQuery(con, "SELECT*FROM AUTHORS")
a = unique(a$initials[a$initials!=""])
dbDisconnect(con)
g = unique(unlist(strsplit(v$author, ',')))
g[!g%in%c(a)] # "drew" "ih" "ms" "kc" "others"
# check whether 'where' field has only allowed values
v[!v$where%in%c(paste('o', seq(1,8,1), sep=""),paste('w', seq(1,7,1), sep=""), 'wu','out', 'hall', 'front', 'back','tech','attic'),]
# datetimes
v[is.na(v$start),] # check if start time is NA
v[is.na(v$end),] # check if start time is NA
v[which((!is.na(v$start) | !is.na(v$end)) & v$start>v$end), ] # check whether end happened before start
v[which(as.numeric(difftime(v$start,trunc(v$start,"day"), units = "hours"))<6),] # visits before 6:00
v[which(as.numeric(difftime(v$end,trunc(v$end,"day"), units = "hours"))<6),] # visits before 6:00
v[which(as.numeric(difftime(v$start,trunc(v$start,"day"), units = "hours"))>22),] # visits after 22:00
v[which(as.numeric(difftime(v$end,trunc(v$end,"day"), units = "hours"))>22),] # visits after 22:00
# check rows with NA in what
v[is.na(v$what),]
# check rows with multiple what info
#v[!v$what%in%c(NA,"check","floor","feather","food","fff", "catch", "release", "process", "clean", "bleach","clhall", "logger","harness","dummies", "things", "obs", "cons","ul"),]
# check whether all in what is defined and show the entries which are not
g = unique(unlist(strsplit(v$what, ',')))
gg = g[!g%in%c(NA,"check","dcheck","floor","feather","food","fff", "flood","catch", "release", "process", "clean", "bleach","clhall", "logger","harness","dummies", "things", "obs", "cons","ul","repair", "prep","light_off","set","water","rinse","noise")]
#gg
if(length(gg)>0){
for(i in 1:length(gg)){
print(v[grepl(gg[i],v$what, perl = TRUE),])
}
}else{print('no undefined what')}
}
{# upload
if(nrow(v)>0){
v$v_pk = NA
v$start = as.character(v$start)
v$end = as.character(v$end)
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "VISITS", value = v[,c("author","where","start","what","end","comments","v_pk")], row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'VISITS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_uploda.R', remarks = '', stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste('VISITS data uploaded from', v$start[1], 'to', v$start[nrow(v)]))
dbDisconnect(con)
}else{print('no new data, no upload')}
}
}
{# UPLOAD CONTINUOUS OBSERVATIONS - Z080710 needs SLEEP
{# prepare
# current CONS data from DB
con = dbConnect(dbDriver("SQLite"),dbname = db)
d = dbGetQuery(con, "SELECT*FROM CONT_OBS")
dbDisconnect(con)
# current con_obs data_entry file
v = readWorksheetFromFile(paste(wd0, 'VISITS_cons_devices_aviary_ENTRY.xlsx', sep = ''), sheet='continuous_observations')
v$cont_pk = 1:nrow(v)
if(nrow(d)>0){
v = v[v$cont_pk>max(d$cont_pk),] # select only rows that are not in DB yet
if(nrow(v)==0){'no need to check/upload - no new data'}
}
}
{# check
# NAs in authors
v[is.na(v$author),]
# author field - show those that are not in authors
con = dbConnect(dbDriver("SQLite"),dbname = db)
a = dbGetQuery(con, "SELECT*FROM AUTHORS")
a = unique(a$initials[a$initials!=""])
dbDisconnect(con)
g = unique(unlist(strsplit(v$author, ',')))
g[!g%in%a] # "drew" "ih" "ms" "kc" "others"
# check aviary
v[!v$aviary%in%c(paste('o', seq(1,8,1), sep=""),paste('w', seq(1,7,1), sep="")),]
# check unique new sessions
unique(v$session)
# check if bird_ID correct
# birds table
con = dbConnect(dbDriver("SQLite"),dbname = db)
b = dbGetQuery(con, "SELECT*FROM BIRDS")
dbDisconnect(con)
v[!v$bird_ID%in%c(b$bird_ID),]
# check that each session has only one bird_ID
vv = ddply(v,.(session, bird_ID), summarise, n = length(bird_ID))
vv[duplicated(vv$session),]
# datetimes
v[is.na(v$datetime_),] # check if datetime_ is NA
# check rows with NA in beh
v[is.na(v$beh),]
# check whether 'beh' field has only allowed values
v[!v$beh%in%c('sleep', 'rest', 'stand', 'preen','stretch','hop', 'hh', 'walk','fly', 'run', 'active', 'eat', 'prob', 'peck','drink', 'ruffle'),]
# sure - y,n
v[!v$sure%in%c('n','y'),]
# check whether all birds observed have rest or sleep OR not wrong time and hence too long sleep
v=ddply(v,.(session), transform, prev = c(datetime_[1],datetime_[-length(datetime_)]))
v$dur = difftime(v$datetime_,v$prev, units = 'secs')
v[as.numeric(v$dur)>5*60,] # shows lines with behaviour that lasted longer than 5 min
#v[v$bird_ID == 'Z080704',]
vv = ddply(v,.(bird_ID), summarise, sleep = length(sure[beh%in%c('sleep','rest')]),dur = sum(dur[beh%in%c('sleep','rest')]))
vv # shows duration of sleep/rest observation per bird
}
{# upload
if(nrow(v)>0){
v$cont_pk = NA
v$dur = v$prev = NULL
v$datetime_ = as.character(v$datetime_)
con = dbConnect(dbDriver("SQLite"),dbname = db)
# to CONT_OBS
dbWriteTable(con, name = "CONT_OBS", value = v, row.names = FALSE, append = TRUE)
# to VISITS
names(v)[names(v)=='aviary'] = 'where'
vv = ddply(v,.(author, where, session, bird_ID), summarise, start = min(datetime_), what = 'cons', 'general_check' = 'n', end = max(datetime_), comments = NA)
vv$comments = paste('session', vv$session, 'bird_ID', vv$bird_ID)
vv$session = vv$bird_ID = NULL
vv$v_pk = NA
dbWriteTable(con, name = "VISITS", value = vv[,c("author","where","start","what","end","comments","v_pk")], row.names = FALSE, append = TRUE)
# update DBLOG
dv1 = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'CONT_OBS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = NA, stringsAsFactors = FALSE)
dv2 = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'VISITS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'cons', script = 'DB_uploda.R', remarks = NA, stringsAsFactors = FALSE)
dv = rbind(dv1, dv2)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste('CONT_OBS and VISITS data uploaded from', min(as.POSIXct(v$datetime_)), 'to', max(as.POSIXct(v$datetime))))
dbDisconnect(con)
}else{print('no new data, no upload')}
}
}
{# UPLOAD AUTHORS
{# prepare
# current visits data from DB
con = dbConnect(dbDriver("SQLite"),dbname = db)
d = dbGetQuery(con, "SELECT*FROM AUTHORS")
dbDisconnect(con)
# current visits data_entry file
v = readWorksheetFromFile(paste(wd0, 'VISITS_cons_devices_aviary_ENTRY.xlsx', sep = ''), sheet='authors')
v$authors_pk = 1:nrow(v)
if(nrow(d)>0){
v = v[v$authors_pk>max(d$authors_pk),] # select only rows that are not in DB yet
if(nrow(v)==0){'no need to check/upload - no new data'}
}
}
{# check
# NAs in initials
v[is.na(v$initials),]
# NAs in initials
v[is.na(v$name),]
# NAs in initials
v[is.na(v$surname),]
# NAs in contact
v[is.na(v$contact),]
# alias and project
unique(unlist(strsplit(v$alias, ',')))
unique(unlist(strsplit(v$project, ',')))
# datetimes
v$start_ = as.POSIXct(v$start_, format="%Y-%m-%d")
v$end_ = as.POSIXct(v$end_, format="%Y-%m-%d")
v[is.na(v$start_),] # check if start time is NA
v[is.na(v$end_),] # check if start time is NA
v[which((!is.na(v$start_) | !is.na(v$end_)) & v$start_>v$end_), ] # check whether end happened before start
}
{# upload
if(nrow(v)>0){
v$v_pk = NA
v$start_ = as.character(v$start_)
v$end_ = as.character(v$end_)
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "VISITS", value = v, row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'AUTHORS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'updated', script = 'DB_uploda.R', remarks = NA, stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste('AUTHORS data uploaded'))
dbDisconnect(con)
}else{print('no new data, no upload')}
}
}
{# UPLOAD DEVICE
{# prepare
# current CONS data from DB
con = dbConnect(dbDriver("SQLite"),dbname = db)
d = dbGetQuery(con, "SELECT*FROM DEVICES")
dbDisconnect(con)
# current con_obs data_entry file
v = readWorksheetFromFile(paste(wd0, 'VISITS_cons_devices_aviary_ENTRY.xlsx', sep = ''), sheet='devices')
v$devices_pk = 1:nrow(v)
if(nrow(d)>0){
v = v[v$devices_pk>max(d$devices_pk),] # select only rows that are not in DB yet
if(nrow(v)==0){'no need to check/upload - no new data'}
}
}
{# check
# datetimes
v[is.na(v$datetime_),] # check if datetime_ is NA
# NAs in devices
v[is.na(v$device),]
# check whether 'devices' field has only allowed values
v[!v$device%in%c('acc', 'toa', 'harn', 'dummie'),]
# check ID
# # of characters shall be 3
v[nchar(v$ID)!=3,]
# fist letter
unique(substring(v$ID,1,1))
# numbers
unique(substring(v$ID,2,3))
# what
v[!v$what%in%c('on', 'off', 'dd','fail'),]
# batt
unique(v$batt)
}
{# upload
if(nrow(v)>0){
v$devices_pk = NA
v$datetime_ = as.character(v$datetime_)
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "DEVICES", value = v, row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'DEVICES', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_uploda.R', remarks = NA, stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste('DEVICES data uploaded from', min(as.POSIXct(v$datetime_)), 'to', max(as.POSIXct(v$datetime))))
dbDisconnect(con)
}else{print('no new data, no upload')}
}
}
{# UPLOAD AVIARIES
{# prepare
# current CONS data from DB
con = dbConnect(dbDriver("SQLite"),dbname = db)
d = dbGetQuery(con, "SELECT*FROM AVIARIES")
dbDisconnect(con)
# current con_obs data_entry file
v = readWorksheetFromFile(paste(wd0, 'VISITS_cons_devices_aviary_ENTRY.xlsx', sep = ''), sheet='aviaries')
v$av_pk = 1:nrow(v)
if(nrow(d)>0){
v = v[v$av_pk>max(d$av_pk),] # select only rows that are not in DB yet
if(nrow(v)==0){'no need to check/upload - no new data'}
}
}
{# check
# datetimes
v[is.na(v$datetime_),] # check if datetime_ is NA
# NAs in authors
v[is.na(v$author),]
# author field - show those that are not in authors
con = dbConnect(dbDriver("SQLite"),dbname = db)
a = dbGetQuery(con, "SELECT*FROM AUTHORS")
a = unique(a$initials[a$initials!=""])
dbDisconnect(con)
g = unique(unlist(strsplit(v$author, ',')))
g[!g%in%a] # "drew" "ih" "ms" "kc" "others"
# NAs in aviary
v[is.na(v$aviary),]
# check aviary
v[!v$aviary%in%paste('w', seq(1,7,1), sep=""),]
# check light_cycle
v[!v$light_cycle%in%c('constant','natural', '12'),]
# check T_cycle
v[!v$T_cycle%in%c('constant_seewater','natural', '12'),]
# light and T values
summary(v)
}
{# upload
if(nrow(v)>0){
v$av_pk = NA
v$datetime_ = as.character(v$datetime_)
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "AVIARIES", value = v, row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'AVIARIES', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_uploda.R', remarks = NA, stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste('AVIARIES data uploaded from', min(as.POSIXct(v$datetime_)), 'to', max(as.POSIXct(v$datetime))))
dbDisconnect(con)
}else{print('no new data, no upload')}
}
}
{# UPLOAD TAGS
{# prepare
# current CONS data from DB
con = dbConnect(dbDriver("SQLite"),dbname = db)
d = dbGetQuery(con, "SELECT*FROM TAGS")
dbDisconnect(con)
# current con_obs data_entry file
v = readWorksheetFromFile(paste(wd0, 'VISITS_cons_devices_aviary_ENTRY.xlsx', sep = ''), sheet='tags')
v$tag_pk = 1:nrow(v)
if(nrow(d)>0){
v = v[v$tag_pk>max(d$tag_pk),] # select only rows that are not in DB yet
#v = v[!is.na(v$start),]
if(nrow(v)==0){'no need to check/upload - no new data'}
}
}
{# check
# NAs in type
v[is.na(v$type),]
# types
unique(v$type)
# NAs in coating
v[is.na(v$coating),]
# coating
unique(v$coating)
# NAs in memmory
v[is.na(v$memmory),]
# memmory
unique(v$memmory)
# batt and mass values
summary(v)
}
{# upload
if(nrow(v)>0){
v$tag_pk = NA
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "TAGS", value = v, row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'TAGS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_uploda.R', remarks = NA, stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste('TAGS data uploaded'))
dbDisconnect(con)
}else{print('no new data, no upload')}
}
}
##### DONE 2018-01-31 13:45:29
# if re-run needed - please use first BIRD TABLE, then above CAPTURE BIRDS update and only then the remaining 2
{# 1. BIRDS TABLE - first upload 2015 - 2017
#con = dbConnect(dbDriver("SQLite"),dbname = db)
#dbGetQuery(con, "DROP TABLE IF EXISTS BIRDS")
#bDisconnect(con)
# then make the table a new directly in SQLiteStudio
{# upload EVA's catches + RUFAs
v = readWorksheetFromFile(paste(wd2, 'morphometrics+sex_2016.xlsx', sep = ''), sheet=1)
v$RNR[v$RNR%in%o$RINGNR]
r = readWorksheetFromFile(paste(wd2, 'ColourRings2016.xlsx', sep = ''), sheet=1)
r$RNR = toupper(r$RNR)
v$colcom = r$complete_cr[match(v$RNR,r$RNR)]
v$colcom_now = r$actual_cr[match(v$RNR,r$RNR)]
v$year_ = substring(v$CatchDate,1,4)
v$CatchLocation[v$CatchLocation == 'Vistula Mouth'] = 'Vistula'
v = data.frame(year_ = v$year_, species = 'REKN', subspecies = v$Species, bird_ID = v$RNR, crc = v$colcom, crc_now = v$colcom_no, age = v$Age, sex = v$Sex, caught = v$CatchDate, site_c = v$CatchLocation, wing = v$WING, bill = v$BILL, totalhead = v$TOTHD, tarsus = v$TARS, tartoe = v$TATO, stringsAsFactors = FALSE)
v$home_av = v$current_av = v$start_ = v$end_ = v$end_type = v$lat_c = v$lon_c = v$lat_r = v$lon_r = v$site_r = v$muscle = v$height_1 = v$width_1 = v$height_2 = v$width_2 = v$mass_f = v$mass_c = v$bio_author = v$ful_datetime = v$ful_author = v$remarks = v$bird_pk = v$blood = v$sex_method = v$bio_datetime = NA
v$project = 'MigrationOnthogeny'
#v[duplicated(v$RNR),]
xx =c("year_","species","subspecies","bird_ID","crc","crc_now","home_av","current_av","age","sex" ,"start_", "end_", "end_type","caught", "lat_c","lon_c", "site_c", "lat_r", "lon_r", "site_r","muscle", "height_1","width_1", "height_2","width_2","mass_f", "mass_c", "wing","bill","totalhead", "tarsus", "tartoe", "blood","sex_method","bio_datetime","bio_author","ful_datetime","ful_author","project","remarks", "bird_pk")
xx[!xx%in%names(v)]
v$caught = as.character(v$caught)
v$site_c = capitalize(tolower(v$site_c))
v$lat_c = g$lat[match(v$site_c,g$abb)]
v$lon_c = g$lon[match(v$site_c,g$abb)]
vr = data.frame(species = 'REKN', subspecies = 'ruf', bird_ID = as.character(c('982284830', '982284831')), stringsAsFactors = FALSE)
vx = merge(v,vr,all=TRUE)
vx = vx[,c("year_","species","subspecies","bird_ID","crc","crc_now","home_av","current_av","age","sex" ,"start_", "end_", "end_type","caught", "lat_c","lon_c", "site_c", "lat_r", "lon_r", "site_r","muscle", "height_1","width_1", "height_2","width_2","mass_f", "mass_c", "wing","bill","totalhead", "tarsus", "tartoe", "blood","sex_method","bio_datetime","bio_author","ful_datetime","ful_author","project","remarks", "bird_pk")]
#vr = vx[vx$bird_ID%in%c('982284830', '982284831'),]
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "BIRDS", value = vx , row.names = FALSE, append = TRUE)
if(dblog == TRUE){
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'major', script = 'DB_upload.R', remarks = '2015-2016 catches')
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
}
dbDisconnect(con)
}
{# upload 2017 catches (except for last one)
v = readWorksheetFromFile(paste(wd2, 'Biometry captive red knots 2017.xlsx', sep = ''), sheet=1)
v = v[which(v$Nioz == 'Yes'),]
v$DNA = ifelse(v$DNA==TRUE, 'yes', NA)
v$CATCH_MONTH = ifelse(nchar(v$CATCH_MONTH)==1, paste(0, v$CATCH_MONTH, sep=""), v$CATCH_MONTH)
vv = v[nchar(v$ULTRASOUN)>2,]
u1 = data.frame(measured = paste(vv$CATCH_YEAR,'08',vv$CATCH_DAY, sep = '-'), bird_ID = vv$RINGNR, muscle = vv$PECTORAL.MUSCLE, height_1 = vv$SH1 , width_1 = vv$SW1, height_2 = vv$SH2 , width_2 = vv$SW2, stringsAsFactors = FALSE)
u1$muscle = gsub(",", ".", u1$muscle)
u1$height_1 = gsub(",", ".", u1$height_1)
u1$width_1 = gsub(",", ".", u1$width_1)
u1$width_2 = gsub(",", ".", u1$width_2)
u1$height_2 = gsub(",", ".", u1$height_2)
u2 = readWorksheetFromFile(paste(wd2, 'ultrasound.xlsx', sep = ''), sheet=1)
u2$mass = u2$age = u2$comments = u2$where = u2$released = NULL
u2$measured = as.character(u2$measured)
u = rbind(u1,u2)
v = merge(v,u, by.x = 'RINGNR', by.y = 'bird_ID', all.x = TRUE)
v$bio_datetime = ifelse(v$CATCH_MONTH == '09', '2017-10-04', ifelse(v$CATCH_MONTH == '08', '2017-09-04', paste(v$CATCH_YEAR,v$CATCH_MONTH,v$CATCH_DAY, sep = '-')))
v$start_ = ifelse(v$CATCH_MONTH == '09', '2017-09-22', NA)
v=v[v$CATCH_MONTH != '09',]
v = data.frame(bird_pk = v$BIOKLRI_ID, year_ = v$CATCH_YEAR, species = 'REKN', subspecies = 'isl', bird_ID = v$RINGNR, crc = v$CR_CODE, crc_now = NA, age = v$AGE, sex = NA, caught = paste(v$CATCH_YEAR,v$CATCH_MONTH,v$CATCH_DAY, sep = '-'), site_c = v$CATCH_LOCATION, wing = v$WING, bill = v$BILL, totalhead = v$TOTHD, tarsus = v$TARS, tartoe = v$TATO, mass_f = v$MASS, giz_author = 'ad', bio_author = 'jth', blood = v$DNA, muscle = v$muscle, height_1 = v$height_1, width_1 = v$width_1, height_2 = v$height_2, width_2 = v$width_2, giz_datetime = v$measured, bio_datetime = v$bio_datetime, start_ = v$start_ , stringsAsFactors = FALSE)
v$home_av = v$current_av = v$end_ = v$end_type = v$lat_c = v$lon_c = v$lat_r = v$lon_r = v$site_r = v$mass_c = v$remarks = v$sex_method = NA
v$site_c = ifelse(v$site_c == 'GRIEND', 'Griend', ifelse( v$site_c == 'DE RICHEL', 'Richel', 'Schier'))
v$lat_c = g$lat[match(v$site_c,g$abb)]
v$lon_c = g$lon[match(v$site_c,g$abb)]
x = readWorksheetFromFile(paste(wd2, 'captive_knots_2017_12+moving_2018_01.xlsx', sep = ''), sheet=1)
x = x[x$X2 == 'Martin',]
v$project = ifelse(v$bird_ID%in%x$ID,'SocialJetLag','MigrationOnthogeny')
xx =c("year_","species","subspecies","bird_ID","crc","crc_now","home_av","current_av","age","sex" ,"start_", "end_", "end_type","caught", "lat_c","lon_c", "site_c", "lat_r", "lon_r", "site_r","muscle", "height_1","width_1", "height_2","width_2","mass_f", "mass_c", "wing","bill","totalhead", "tarsus", "tartoe", "blood","sex_method","bio_datetime","bio_author","giz_datetime","giz_author","project","remarks", "bird_pk")
xx[!xx%in%names(v)]
v1 = v[c("year_","species","subspecies","bird_ID","crc","crc_now","home_av","current_av","age","sex" ,"start_", "end_", "end_type","caught", "lat_c","lon_c", "site_c", "lat_r", "lon_r", "site_r","muscle", "height_1","width_1", "height_2","width_2","mass_f", "mass_c", "wing","bill","totalhead", "tarsus", "tartoe", "blood","sex_method","bio_datetime","bio_author","giz_datetime","giz_author","project","remarks", "bird_pk")]
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "BIRDS", value = v1, row.names = FALSE, append = TRUE)
if(dblog == TRUE){
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'major', script = 'DB_upload.R', remarks = '2017-07 and 08 catches')
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
}
dbDisconnect(con)
}
}
# 2. capture BIRDs above
{# 3. update FUL from file, which has to have following info 'author','measured','bird_ID','muscle','height_1','width_1','height_2', 'width_2'
ul_date = '2017-09-23' # DEFINE
u = readWorksheetFromFile(paste(wd2, 'ultrasound_',ul_date,'.xlsx', sep = ''), sheet=1)
u$measured = as.character(u$measured)
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = u[,c('author','measured','bird_ID','muscle','height_1','width_1','height_2', 'width_2')], row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET
ful_author = (SELECT temp.author FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
ful_datetime = (SELECT temp.measured FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
muscle = (SELECT temp.muscle FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
height_1 = (SELECT temp.'height_1' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
width_1 = (SELECT temp.width_1 FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
height_2 = (SELECT temp.height_2 FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
width_2 = (SELECT temp.width_2 FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
# update DBLOG
x = data.frame(bird_ID = u$bird_ID, datetime_ = as.character(Sys.time()), stringsAsFactors=FALSE)
dbWriteTable(con, name = "temp", value = x, row.names = FALSE)
dbExecute(con, "UPDATE TO_DO SET
datetime_solved = (SELECT temp.datetime_ FROM temp WHERE temp.bird_ID = TO_DO.bird_ID and TO_DO.what like '%ful%')
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = TO_DO.bird_ID and TO_DO.what like '%ful%')
")
dbWriteTable(con, name = "TO_DO", value = mx[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
dbDisconnect(con)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
if(dblog == TRUE){
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'major', script = 'DB_upload.R', remarks = 'ful of 2017-09 catch')
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
}
dbDisconnect(con)
print('ful updated in birds')
}
{# 4. DONE update biometrics and other info from JOBs DB for BIRDS 2017-09 catch DATA
v = readWorksheetFromFile(paste(wd2, 'Biometry captive red knots 2017.xlsx', sep = ''), sheet=1)
v = v[which(v$Nioz == 'Yes'),]
v$DNA = ifelse(v$DNA==TRUE, 'yes', NA)
v$CATCH_MONTH = ifelse(nchar(v$CATCH_MONTH)==1, paste(0, v$CATCH_MONTH, sep=""), v$CATCH_MONTH)
v = v[v$CATCH_MONT=='09',]
v$site_c = ifelse(v$CATCH_LOCATION == 'GRIEND', 'Griend', ifelse( v$CATCH_LOCATION == 'DE RICHEL', 'Richel', 'Schier'))
v$lat_c = g$lat[match(v$site_c,g$abb)]
v$lon_c = g$lon[match(v$site_c,g$abb)]
v$project = 'SocialJetLag'
v$age = ifelse(v$AGE == 3, 'A', v$AGE)
v$bio_author = 'jh'
v$bird_ID = v$RINGNR
v$species = 'REKN'
v$subspecies = 'isl'
# UPDATE BIRDS
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = v[,c('bio_author','bird_ID','TOTHD','BILL','WING','TARS','TATO', 'age','project','site_c','lat_c','lon_c','DNA','MASS','species','subspecies')], row.names = FALSE, append = FALSE)
#bio_datetime = (SELECT temp.capture FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
dbExecute(con, "UPDATE BIRDS SET
bio_author = (SELECT temp.bio_author FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
species = (SELECT temp.species FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
subspecies = (SELECT temp.subspecies FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
mass_f = (SELECT temp.MASS FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
age = (SELECT temp.age FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
blood = (SELECT temp.DNA FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
project = (SELECT temp.project FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
site_c = (SELECT temp.site_c FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
lat_c = (SELECT temp.lat_c FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
lon_c = (SELECT temp.lon_c FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
wing = (SELECT temp.WING FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
bill = (SELECT temp.'BILL' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
totalhead = (SELECT temp.TOTHD FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
tarsus = (SELECT temp.TARS FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
tartoe = (SELECT temp.TATO FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
# UPDATE TO_DO
x = data.frame(bird_ID = v$bird_ID, datetime_ = as.character(Sys.time()), stringsAsFactors=FALSE)
dbWriteTable(con, name = "temp", value = x, row.names = FALSE)
dbExecute(con, "UPDATE TO_DO SET
datetime_solved = (SELECT temp.datetime_ FROM temp WHERE temp.bird_ID = TO_DO.bird_ID and TO_DO.what like '%mass_f%')
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = TO_DO.bird_ID and TO_DO.what like '%mass_f%')
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
if(dblog == TRUE){
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'major', script = 'DB_upload.R', remarks = 'ful of 2017-09 bio, age, species, etc')
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
}
dbDisconnect(con)
print('bio updated in birds')
}
{# update HARN table if 'harn' or 'on'/'off' and what_ID starting with 'H', 'P','D'
mm = m[grepl("harn",m$what, perl = TRUE)| grepl("on",m$what, perl = TRUE) & substring(m$what_ID,1,1) %in%c('H','D','P') | grepl("off",m$what, perl = TRUE) & substring(m$what_ID,1,1) %in%c('H','D','P'),]
mm = mm[!is.na(mm$what),]
if(nrow(mm)==0){print('no harn in what')}else{
if(length(names(mm)[names(mm)=='tilt']) == 0){
mm = mm[,c('capture', 'bird_ID','what', 'what_ID')]
mm$tilt = mm$neck = mm$armpit = mm$back = mm$size = mm$harn_pk= NA
mm$capture = as.character(mm$capture)
}else{
mm = mm[,c('capture', 'bird_ID', 'what','what_ID','tilt', 'neck', 'armpit','back','size')]
mm$harn_pk=NA
mm$capture = as.character(mm$capture)
}
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "HARN", value = mm, row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'HARN', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste(mm$capture,'HARN data added for', mm$bird_ID))
dbDisconnect(con)
}
}
{# 5. update positions to decimals
v = readWorksheetFromFile(paste(wd2, 'catch_locations.xlsx', sep = ''), sheet=1)
v[v==""] = NA
v[v==" "] = NA
v[v=="NA"] = NA
#conv_unit("6 13 51", from = 'deg_min_sec', to = 'dec_deg')
#conv_unit("5 16 40", from = 'deg_min_sec', to = 'dec_deg')
#v$lat_deg = gsub('.', ' ', v$lat_deg, fixed = TRUE)
#v$lon_deg = gsub('.', ' ', v$lon_deg, fixed = TRUE)
#v$lat = ifelse(is.na(v$lat_deg), v$lat, conv_unit(v$lat_deg, from = 'deg_min_sec', to = 'dec_deg'))
con = dbConnect(dbDriver("SQLite"),dbname = db)
b = dbGetQuery(con, "SELECT*FROM BIRDS")
dbDisconnect(con)
b$lat_c = v$lat[match(b$site_c, v$abb)]
b$lon_c = v$lon[match(b$site_c, v$abb)]
b$lat_r = v$lat[match(b$site_r, v$abb)]
b$lon_r = v$lon[match(b$site_r, v$abb)]
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = b, row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET
lat_c = (SELECT temp.lat_c FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
lon_c = (SELECT temp.lon_c FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
lat_r = (SELECT temp.lat_r FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
lon_r = (SELECT temp.lon_r FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'minor', script = 'DB_upload.R: 5. update positions to decimals', remarks = 'updated lat and lon', stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
}
{# 6. update color combos
m = read.csv(paste(wd2,'BIOKLRI.csv', sep=""), stringsAsFactors=FALSE)
con = dbConnect(dbDriver("SQLite"),dbname = db)
b = dbGetQuery(con, "SELECT*FROM BIRDS where crc is null")
dbDisconnect(con)
b$crc = m$CR_CODE[match(b$bird_ID, m$RINGNR)]
#b[,c('bird_ID','crc')]
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = b[,c('bird_ID','crc')], row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET
crc = (SELECT temp.crc FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'minor', script = 'DB_upload.R: update color combos', remarks = 'updated crc', stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
} | /Data/AVESatNIOZ/DB_upload.R | no_license | MartinBulla/SocialJetLag | R | false | false | 67,262 | r | # TO DO - check all data for correct datetime - e.g. current cont_obs are running on winter time, while all the rest of data runs on summer time
{# INFO
#ALT+0
# when birds arrive to nioz
#### !!! if blood not indicated in what, it is asumed that we have no clue whether blood was taken - if blood was taken upon capture, please indicate this, as well as whether biometry and ful, crc done
#### enter fields f_mass, project,species, age and subspecies when birds brought in
}
# Luc please install:
#install.packages('scales')
#### START HERE
# Luc or Martin
Luc = FALSE
# indicate in DB_LOG
dblog = TRUE
{# TOOLS
{# define working directories
if(Luc == TRUE){
wd0 = "C:/Users/ldemonte/Dropbox/data_entry/"
wd = "C:/Users/ldemonte/Dropbox/data_entry/ready_for_DB_upload/"
outdir = "C:/Users/ldemonte/Dropbox/data_entry/uploaded_to_DB/"
wd2 = "C:/Users/ldemonte/Dropbox/AVESatNIOZ/"
}else{
wd0 = "C:/Users/mbulla/Documents/Dropbox/Science/Projects/MC/Data/data_entry/"
wd = "C:/Users/mbulla/Documents/Dropbox/Science/Projects/MC/Data/data_entry/ready_for_DB_upload/"
outdir = "C:/Users/mbulla/Documents/Dropbox/Science/Projects/MC/Data/data_entry/uploaded_to_DB/"
wd2 = "C:/Users/mbulla/Documents/Dropbox/Science/Projects/MC/Data/AVESatNIOZ/"
}
}
{# load packages
require(plyr)
require(XLConnect)
require("RSQLite")
#require("DBI")
require('Hmisc')
}
{# DB connection
db=paste(wd2,"AVESatNIOZ.sqlite",sep="")
#db=paste(wd2,"test.sqlite",sep="")
#db=paste(wd2,"test2.sqlite",sep="")
}
{# metadata
# birds table
con = dbConnect(dbDriver("SQLite"),dbname = db)
b = dbGetQuery(con, "SELECT*FROM BIRDS")
dbDisconnect(con)
# captures table
con = dbConnect(dbDriver("SQLite"),dbname = db)
z = dbGetQuery(con, "SELECT*FROM CAPTURES")
dbDisconnect(con)
# biometry
o = readWorksheetFromFile(paste(wd2, 'Biometry captive red knots 2017.xlsx', sep = ''), sheet=1)
v = readWorksheetFromFile(paste(wd2, 'morphometrics+sex_2016.xlsx', sep = ''), sheet=1)
v$RNR[v$RNR%in%o$RINGNR]
# locations
g = readWorksheetFromFile(paste(wd2, 'catch_locations.xlsx', sep = ''), sheet=1)
}
{# !!!! DEFINE CONSTANTS
catch = c('Richel', 'Schier','Griend','Vistula', 'Mokbaai') # define off NIOZ catching locations
}
}
# CHECK BEFORE UPLOAD
{# prepare
con = dbConnect(dbDriver("SQLite"),dbname = db)
#dbGetQuery(con, "DROP TABLE IF EXISTS CAPTURES")
a = dbGetQuery(con, "SELECT*FROM CAPTURES")
oo = dbGetQuery(con, "SELECT*FROM DBLOG where DBLOG.'table' = 'CAPTURES'")
dbDisconnect(con)
f = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=TRUE)
f2 = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=FALSE)
}
{# check WHAT entries to know whether additional uploads needed
l = list()
j =NA
for(i in 1:length(f)){#{2){#
#i = 1
m = readWorksheetFromFile(f[i], sheet=1)
{#### if this part does not work hashtag it out
#m[m==""] = NA
#m[m==" "] = NA
#m[m=="NA"] = NA
####
}
#print(i)
#print(unique(m$what)[!is.na(unique(m$what))])
l[[i]] = data.frame(f = i , what = if(length(unique(m$what)[!is.na(unique(m$what))])==0){NA}else{unique(m$what)[!is.na(unique(m$what))]}, stringsAsFactors = FALSE)
j = c(j,unique(m$what)[!is.na(unique(m$what))])
}
#ll = do.call(rbind,l)
#f2[i]
print( unique(j))
}
{# check HEALTH entries to know whether additional uploads needed - FINISH CLEANING
l = list()
j =NA
for(i in 1:length(f)){#{2){#
#i = 20
m = readWorksheetFromFile(f[i], sheet=1)
#print(i)
#print(unique(m$health)[!is.na(unique(m$health))])
l[[i]] = data.frame(f = i , health = if(length(unique(m$health)[!is.na(unique(m$health))])==0){NA}else{unique(m$health)[!is.na(unique(m$health))]}, stringsAsFactors = FALSE)
j = c(j,unique(m$health)[!is.na(unique(m$health))])
}
#ll = do.call(rbind,l)
#f2[i]
print(unique(j))
}
{# UPLOAD CAPTURES
f = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=TRUE)
f2 = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=FALSE)
#for(i in 1:length(f)){#length(f)){#{2){#
i = 1
f2[i]
{# prepare
print(i)
m = readWorksheetFromFile(f[i], sheet=1, colTypes = 'character')
#names(m)[names(m) == 'now'] = 'at'
names(m)[names(m) == 'CGF'] = 'molt_col'
names(m)[names(m) == 'pk'] = 'c_pk'
print(names(a)[!names(a)%in%names(m) & !names(a)%in%c('year_')]) # names that are in the DB but not in the data_entry file (with exception of year_)
print(names(m)[!names(m)%in%names(a) & !names(m)%in%c('m_p','f_p','home')]) # names that are in the data_entry file but not in the DB (with exception of 'm_p','f_p','home')
#m$capture = as.character(m$capture)
#m$release = as.character(m$release)
m$year_ = substring(m$capture, 1,4)
{#### if this part does not work, hashtag it out
m[m==""] = NA
m[m==" "] = NA
m[m=="NA"] = NA
####
}
if(length(names(m)[names(m)=='c_pk'])==0){m$c_pk = NA}
if(length(names(m)[names(m)=='pic'])==0){m$pic = NA}
if(length(names(m)[names(m)=='with'])==0){m$with = NA}
#m$capture = as.POSIXct(m$capture)
#m$release = as.POSIXct(m$release)
}
{# upload to captures
#print(names(m)[!names(m)%in%c("year_", "capture", "at","release", "where", "bird_ID", "what", "what_ID", "health", "feet","mass", "remarks", "author", "plum", "molt","molt_col", "L01","L02","L03","L04","L05","L06","L07","L08","L09","L10","R01","R02","R03","R04","R05","R06","R07","R08","R09","R10","crc_now", "capture_pk")])
mm = m[,c("year_", "capture", "at","release", "where", "bird_ID", "what", "what_ID", "health", "feet","mass", "with", "remarks", "author", "plum", "molt","molt_col", "L01","L02","L03","L04","L05","L06","L07","L08","L09","L10","R01","R02","R03","R04","R05","R06","R07","R08","R09","R10","crc_now","pic", "c_pk")]
#mm$capture = as.character(mm$capture)
#mm$release = as.character(mm$release)
if(f2[i]%in%oo$remarks){print('NO UPLOAD!!! - data already in DB - see DBLOG table')}else{
con = dbConnect(dbDriver("SQLite"),dbname = db)
#print(names(z)[!names(z)%in%names(mm)])
#print(names(mm)[!names(mm)%in%names(z)])
dbWriteTable(con, name = "CAPTURES", value = mm, row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'CAPTURES', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste(f2[i],'uploaded to captures'))
}
}
#}
}
{# create/update BIRDS entries
f = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=TRUE)
f2 = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=FALSE)
#for(i in 1:length(f)){#{2){#
i = 1
{# prepare
print(i)
m = readWorksheetFromFile(f[i], sheet=1, colTypes = 'character')
#names(m)[names(m) == 'now'] = 'at'
names(m)[names(m) == 'CGF'] = 'molt_col'
names(m)[names(m) == 'pk'] = 'c_pk'
#print(names(a)[!names(a)%in%names(m) & !names(a)%in%c('year_')]) # names that are in the DB but not in the data_entry file (with exception of year_)
#print(names(m)[!names(m)%in%names(a) & !names(m)%in%c('m_p','f_p','home')]) # names that are in the data_entry file but not in the DB (with exception of 'm_p','f_p','home')
m$year_ = substring(m$capture, 1,4)
m[m==""] = NA
m[m==" "] = NA
m[m=="NA"] = NA
if(length(names(m)[names(m)=='pic'])==0){m$pic = NA}
if(length(names(m)[names(m)=='with'])==0){m$with = NA}
#m$capture = as.POSIXct(m$capture)
#m$release = as.POSIXct(m$release)
}
{# IF BIRD ARRIVES to NIOZ - create its data entry line and if data missing create TO_DO
mm = m[m$at%in%catch | grepl("capt",m$what, perl = TRUE),]
if(nrow(mm)==0){print('no capt in what')}else{
# TO_DO entry if data missing
mass_f = length(names(mm)[names(mm)=='mass_f'])
project = length(names(mm)[names(mm)=='project'])
species = length(names(mm)[names(mm)=='species'])
subspecies = length(names(mm)[names(mm)=='subspecies'])
age = length(names(mm)[names(mm)=='age'])
if((mass_f+project+species+subspecies+age) < 5){
mx = mm[,c('capture', 'bird_ID', 'what')]
mx$what = paste(if(mass_f==0){'mass_f'}, if(age==0){'age'},if(species==0){'species'},if(subspecies==0){'subspecies'},if(project==0){'project'}, sep =",")
mx$capture = as.character(mx$capture)
mx$datetime_solved = mx$remarks = mx$todo_pk = NA
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "TO_DO", value = mx[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('no', paste(if(mass_f==0){'mass_f'}, if(age==0){'age'},if(species==0){'species'},if(subspecies==0){'subspecies'},if(project==0){'project'},sep=","), 'column names and data entry despite capt in what in capture sheet, to do created'))
}
# add data to birds
mm$capture = as.character(mm$capture)
mm$release = as.character(mm$release)
mm$year_ = substring(mm$capture, 1,4)
mm$blood = ifelse(grepl("blood",mm$what, perl = TRUE), 'yes',NA) #### !!! if blood not indicated in what, it is asumed that we have no clue whether blood taken
mm$sex_method = ifelse(grepl("blood",mm$what, perl = TRUE), 'blood',NA)
if(length(names(mm)[names(mm)=='wing']) == 0){mm$wing = mm$bill = mm$totalhead = mm$tarsus = mm$tartoe = mm$bio_datetime = mm$bio_author = NA}else{mm$bio_datetime == mm$capture; mm$bio_author = mm$author}
if(length(names(mm)[names(mm)=='mass_f']) == 0){mm$mass_f = NA}
if(length(names(mm)[names(mm)=='project']) == 0){mm$project = NA}
if(length(names(mm)[names(mm)=='subspecies']) == 0){mm$subspecies = NA}
if(length(names(mm)[names(mm)=='species']) == 0){mm$species = NA}
if(length(names(mm)[names(mm)=='age']) == 0){mm$age = NA}
if(length(names(mm)[names(mm)=='height_1']) == 0){mm$muscle = mm$height_1 = mm$width_1 = mm$height_2 = mm$width_2 = mm$ful_datetime = mm$ful_author = NA}else{mm$ful_datetime == mm$capture; mm$ful_author = mm$author}
mm$end_ = mm$end_type = mm$site_r = mm$bird_pk = mm$sex = mm$lat_r = mm$lon_r = mm$site_r = NA
# UPDATE CATCHING LOCATIONS
names(mm)[names(mm)=='capture'] = 'caught'
names(mm)[names(mm)=='release'] = 'start_'
names(mm)[names(mm)=='at'] = 'site_c'
names(mm)[names(mm)=='where'] = 'current_av'
names(mm)[names(mm)=='mass'] = 'mass_c'
mm$site_c = capitalize(tolower(mm$site_c))
mm$home_av = mm$current_av
mm$crc = mm$crc_now
mm$lat_c = g$lat[match(mm$site_c,g$abb)]
mm$lon_c = g$lon[match(mm$site_c,g$abb)]
#x = c("year_","species","subspecies","bird_ID","crc","crc_now","home_av","current_av","age","sex" ,"start_","end_","end_type","caught", "lat_c","lon_c", "site_c", "lat_r", "lon_r", "site_r","muscle", "height_1","width_1", "height_2","width_2","mass_f", "mass_c", "wing","bill","totalhead", "tarsus", "tartoe", "blood","sex_method","bio_datetime","bio_author","ful_datetime","ful_author","remarks", 'bird_pk')
#x[!x%in%names(mm)]
v = mm[,c("year_","species","subspecies","bird_ID","crc","crc_now","home_av","current_av","age","sex" ,"start_","end_","end_type","caught", "lat_c","lon_c", "site_c", "lat_r", "lon_r", "site_r","muscle", "height_1","width_1", "height_2","width_2","mass_f", "mass_c", "wing","bill","totalhead", "tarsus", "tartoe", "blood","sex_method","bio_datetime","bio_author","ful_datetime","ful_author","project","remarks", 'bird_pk')]
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "BIRDS", value = v, row.names = FALSE, append = TRUE)
#dbGetQuery(con, "UPDATE BIRDS SET caught = (SELECT temp.capture FROM temp WHERE temp.bird_ID = BIRDS.bird_ID,
# start_ = (SELECT temp.release FROM temp WHERE temp.bird_ID = BIRDS.bird_ID,
# site_c = (SELECT temp.at FROM temp WHERE temp.bird_ID = BIRDS.bird_ID
# ")
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'major', script = 'DB_upload.R', remarks = 'new birds', stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('capt info uploaded to BIRDS for', mm$bird_ID))
}
}
{# IF BIRD ENDS at NIOZ
mm = m[m$at%in%catch | grepl("free",m$what, perl = TRUE) | grepl("died",m$what, perl = TRUE) | grepl("dead",m$what, perl = TRUE) | grepl("killed",m$what, perl = TRUE) | grepl("killed",m$health, perl = TRUE) | grepl("died",m$health, perl = TRUE) | grepl("dead",m$health, perl = TRUE),]
if(nrow(mm) > 0){
mm$what = ifelse(!mm$what%in%c("free","died","killed"), mm$health, mm$what)
mm$release = as.character(mm$release)
mm$type = ifelse(grepl("free",mm$what, perl = TRUE), 'released', ifelse(grepl("dead",mm$what, perl = TRUE), 'died', ifelse(grepl("died",mm$what, perl = TRUE), 'died', ifelse(grepl("killed",mm$what, perl = TRUE), 'killed', NA))))
mm$where = capitalize(tolower(mm$where))
mm$lat_r = g$lat[match(mm$where,g$abb)]
mm$lon_r = g$lon[match(mm$where,g$abb)]
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm[,c('bird_ID','release','where','type', 'lat_r', 'lon_r')], row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET end_ = (SELECT temp.release FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
site_r = (SELECT temp.'where' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
lat_r = (SELECT temp.lat_r FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
lon_r = (SELECT temp.lon_r FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
end_type = (SELECT temp.type FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print(paste('end info uploaded to BIRDS', mm$bird_ID))
}else{print('no free, killed, died in what or health')}
}
{# IF WHAT = SWITCH THEN UPDATE HOME AVIARY FROM WHERE
mm = m[which(!is.na(m$what)),]
mm = mm[grepl("switch",mm$what, perl = TRUE),c('bird_ID', 'capture','where', 'what','home')]
mm = ddply(mm,.(bird_ID), summarise, where = where[capture == max(capture)])
if(nrow(mm) > 0){
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm, row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET home_av = (SELECT temp.'where' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (SELECT * FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print(paste('home_av updated in BIRDS for',mm$bird_ID))
}else{print('no switch in what')}
}
{# update current aviary and mass values
{# update current aviary
mm = m[!grepl("obs",m$what, perl = TRUE)| !grepl("cons",m$what, perl = TRUE),]
mm = ddply(mm,.(bird_ID), summarise, where = where[capture == max(capture)])
mm$where = ifelse(tolower(mm$where)%in%tolower(unique(g$abb[!is.na(g$abb)])), NA, mm$where)
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm, row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET current_av = (SELECT temp.'where' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (SELECT * FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print('current_av updated in BIRDS')
}
{# update current mass
m2=m
m2$mass[is.na(m2$mass)] = m2$with[is.na(m2$mass)]
m2 = ddply(m2[!is.na(m2$mass),],.(bird_ID), summarise, mass = mass[capture == max(capture)])
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = m2, row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET mass_c = (SELECT temp.'mass' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (SELECT * FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print('mass_c updated in BIRDS')
}
}
{# update BIRDS, if data present, or TO_DO where data missing but what is cr,blood,bio,ul,ful- note that blood means that update to SEX is needed
# upadate crc_now
mm = m[!(is.na(m$crc_now)| m$crc_now%in%c('yes_flag','no_flag','no_metal','',' ')),c('bird_ID', 'crc_now')]
if(nrow(mm) > 0){
if(nrow(mm[!is.na(mm$crc_now),]) == 0){
mx = mm[,c('capture', 'bird_ID', 'crc_now')]
mx$what = 'cr'
mx$capture = as.character(mx$capture)
mx$datetime_solved = mx$remarks = mx$todo_pk = NA
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "TO_DO", value = mx[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
print('cr in what but not data in crc_now, TODO created')
}else{
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm[!is.na(mm$crc_now),], row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET crc_now = (SELECT temp.crc_now FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (SELECT * FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print(paste('crc_now updated in birds for',mm$bird_ID))
}
}else{print('no crc_now change')}
# update blood
mm = m[which(grepl("blood",m$what, perl = TRUE)) ,]
if(nrow(mm) > 0){
mm = mm[,c('capture', 'bird_ID', 'what')]
mm$what = 'sex'
mm$capture = as.character(mm$capture)
mm$datetime_solved = mm$remarks = mm$todo_pk = NA
mm$blood = 'yes'
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "TO_DO", value = mm[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm, row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET
blood = (SELECT temp.blood FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
#dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'TO_DO', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
#dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('blood updated in BIRDS and TO_DO for sex created', mm$bird_ID))
}else{print('no blood in what')}
# update bio
mm = m[ which(grepl("bio",m$what, perl = TRUE) & !grepl("capt",m$what, perl = TRUE)) ,]
if(nrow(mm)==0){print('no bio in what')}else{
if(length(names(mm)[names(mm)=='wing']) == 0){
con = dbConnect(dbDriver("SQLite"),dbname = db)
mm = mm[,c('capture', 'bird_ID', 'what','author')]
mm$what = 'bio'
mm$capture = as.character(mm$capture)
#mm$author = 'jh'
mm$datetime_solved = mm$remarks = mm$todo_pk = NA
dbWriteTable(con, name = "TO_DO", value = mm[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm, row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET
bio_author = (SELECT temp.author FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
bio_datetime = (SELECT temp.capture FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print('no bio columns in capture sheet; to do created')
print(paste('bio_datetime and bio_author updated in BIRDS for', mm$bird_ID))
}else{
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm[,c('author','capture','bird_ID','wing','bill','totalhead', 'tarsus', 'tartoe')], row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET
bio_author = (SELECT temp.author FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
bio_datetime = (SELECT temp.capture FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
wing = (SELECT temp.wing FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
bill = (SELECT temp.'bill' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
totalhead = (SELECT temp.totalhead FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
tarsus = (SELECT temp.tarsus FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
tartoe = (SELECT temp.tartoe FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print(paste('bio updated in BIRDS for', mm$bird_ID))
}
}
# update cr
mm = m[ which(grepl("cr",m$what, perl = TRUE)& !grepl("capt",m$what, perl = TRUE) & !grepl("crc",m$what, perl = TRUE)) ,]
if(nrow(mm)==0){print('no cr in what')}else{
if(nrow(mm[is.na(mm$crc_now),]) > 0){
mm = mm[,c('capture', 'bird_ID', 'what')]
mm$what = 'cr'
mm$capture = as.character(mm$capture)
mm$datetime_solved = mm$remarks = mm$todo_pk = NA
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "TO_DO", value = mm[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('no crc_now entry despite cr in what in capture sheet, to do created for', mm$bird_ID))
}else{
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm[,c('bird_ID','crc_now')], row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET crc = (SELECT temp.crc_now FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print(paste('crc updated in birds', mm$bird_ID))
}
}
# update ful
mm = m[ grepl("ful",m$what, perl = TRUE) & !grepl("capt",m$what, perl = TRUE) ,]
if(nrow(mm)==0){print('no ful in what')}else{
if(length(names(mm)[names(mm)=='height_1']) == 0){
mm = mm[,c('capture', 'bird_ID', 'what')]
mm$what = 'ful'
mm$capture = as.character(mm$capture)
mm$datetime_solved = mm$remarks = mm$todo_pk = NA
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "TO_DO", value = mm[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('no ful column names and data entry despite ful in what in capture sheet, to do created', mm$bird_ID))
}else{
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = mm[,c('author','capture','bird_ID','muscle','height_1','width_1','height_2', 'width_2')], row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET
ful_author = (SELECT temp.author FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
ful_datetime = (SELECT temp.capture FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
muscle = (SELECT temp.muscle FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
height_1 = (SELECT temp.'height_1' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
width_1 = (SELECT temp.width_1 FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
height_2 = (SELECT temp.height_2 FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
width_2 = (SELECT temp.width_2 FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbDisconnect(con)
print(paste('ful updated in birds for', mm$bird_ID))
}
}
}
{# make entry in DB_LOG
con = dbConnect(dbDriver("SQLite"),dbname = db)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
}
print(paste(f2[i],'updated BIRDS'))
}
{# update SPECIAL tables
{# prepare
f = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=TRUE)
f2 = list.files(path=paste(wd,sep =''),pattern='data_entry', recursive=TRUE,full.names=FALSE)
#f = list.files(path=paste(outdir,sep =''),pattern='data_entry', recursive=TRUE,full.names=TRUE)
#f2 = list.files(path=paste(outdir,sep =''),pattern='data_entry', recursive=TRUE,full.names=FALSE)
con = dbConnect(dbDriver("SQLite"),dbname = db)
#dbGetQuery(con, "DROP TABLE IF EXISTS CAPTURES")
a = dbGetQuery(con, "SELECT*FROM CAPTURES")
oo = dbGetQuery(con, "SELECT*FROM DBLOG where DBLOG.'table' = 'CAPTURES'")
dbDisconnect(con)
i = 1
#for(i in 1:length(f)){#{2){#
print(i)
m = readWorksheetFromFile(f[i], sheet=1, colTypes = 'character')
#names(m)[names(m) == 'now'] = 'at'
names(m)[names(m) == 'CGF'] = 'molt_col'
names(m)[names(m) == 'pk'] = 'c_pk'
print(names(a)[!names(a)%in%names(m) & !names(a)%in%c('year_')]) # names that are in the DB but not in the data_entry file (with exception of year_)
print(names(m)[!names(m)%in%names(a) & !names(m)%in%c('m_p','f_p','home')]) # names that are in the data_entry file but not in the DB (with exception of 'm_p','f_p','home')
m$year_ = substring(m$capture, 1,4)
#m[m==""] = NA
#m[m==" "] = NA
#m[m=="NA"] = NA
if(length(names(m)[names(m)=='c_pk'])==0){m$c_pk = NA}
if(length(names(m)[names(m)=='pic'])==0){m$pic = NA}
if(length(names(m)[names(m)=='with'])==0){m$with = NA}
#m$capture = as.POSIXct(m$capture)
#m$release = as.POSIXct(m$release)
}
{# update BIO_TRAIN if btrain or utrain or ult in WHAT
mm = m[ grepl("btrain",m$what, perl = TRUE) | grepl("utrain",m$what, perl = TRUE) ,]
mm = mm[ !is.na(mm$what) ,]
if(nrow(mm)>0){
mm$datetime_ = as.character(mm$capture)
mm$year_ = substring(mm$capture, 1,4)
if(TRUE%in%unique(grepl("btrain",mm$what, perl = TRUE)) & TRUE%in%unique(grepl("utrain",mm$what, perl = TRUE))){
mm = mm[,c('year_','author', 'datetime_', 'bird_ID','wing', 'bill', 'totalhead','tarsus','tartoe','muscle','height_1','width_1','height_2','width_2')]
mm$remarks = mm$bio_pk = NA
}else{ if(TRUE%in%unique(grepl("btrain",mm$what, perl = TRUE))){
mm = mm[,c('year_','author', 'datetime_', 'bird_ID','wing', 'bill', 'totalhead','tarsus','tartoe')]
mm$muscle = mm$height_1 = mm$width_1 = mm$height_2 = mm$width_2 = mm$remarks = mm$bio_pk = NA
}else{ if(TRUE%in%unique(grepl("utrain",mm$what, perl = TRUE))){
mm$wing = mm$tarsus = mm$tartoe = mm$bill = mm$totalhead = NA
mm = mm[,c('year_','author', 'datetime_', 'bird_ID','wing', 'bill', 'totalhead','tarsus','tartoe','muscle','height_1','width_1','height_2','width_2')]
mm$remarks = mm$bio_pk = NA
}}}
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "BIO_TRAIN", value = mm[,c('year_','author', 'datetime_', 'bird_ID','wing', 'bill', 'totalhead','tarsus','tartoe','muscle', 'height_1','width_1','height_2','width_2','remarks','bio_pk')], row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIO_TRAIN', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste(mm$capture,'BIO_TRAIN data added for', mm$bird_ID))
dbDisconnect(con)
}else{print("no btrain or utrain in WHAT")}
}
{# update ULTRASOUND table if UL present
mm = m[ grepl("ul",m$what, perl = TRUE) ,]
mm = mm[ !is.na(mm$what) ,]
mm = mm[ !grepl("ful",mm$what, perl = TRUE) ,]
if(nrow(mm)==0){print('no ul in what')}else{
con = dbConnect(dbDriver("SQLite"),dbname = db)
u = dbGetQuery(con, "SELECT*FROM DBLOG where DBLOG.'table' = 'ULTRASOUND'")
dbDisconnect(con)
if(nrow(u)==0 | !f2[i]%in%u$remarks){
if(length(names(mm)[names(mm)=='height_1']) == 0){
con = dbConnect(dbDriver("SQLite"),dbname = db)
mm = mm[,c('capture', 'bird_ID', 'what')]
mm$what = 'ul'
mm$capture = as.character(mm$capture)
mm$datetime_solved = mm$remarks = mm$todo_pk = NA
dbWriteTable(con, name = "TO_DO", value = mm[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('no ul column names and data entry despite ul in what in capture sheet, to do created for', mm$bird_ID))
}else{
mm$ultra_pk=NA
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "ULTRASOUND", value = mm[,c('author','capture','bird_ID','muscle','height_1','width_1','height_2', 'width_2','remarks','ultra_pk')], row.names = FALSE, append = FALSE)
v = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'ULTRASOUND', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('ul added to ULTRASOUND for', mm$bird_ID))
}
}else{print('NO UPLOAD!!! - data already in ULTRASOUND table - see DBLOG table')}
}
}
{# update SAMPLE table
mm = m[ which((grepl("blood",m$what, perl = TRUE) & !is.na(m$what_ID))| (grepl("skin",m$what, perl = TRUE) & !is.na(m$what_ID))),]
mm = mm[ !is.na(mm$what) ,]
if(nrow(mm)==0){print('no blood or skin in what or no what_ID')}else{
con = dbConnect(dbDriver("SQLite"),dbname = db)
u = dbGetQuery(con, "SELECT*FROM DBLOG where DBLOG.'table' = 'SAMPLES'")
dbDisconnect(con)
if(nrow(u)==0 | !f2[i]%in%u$remarks){
mm$sample_pk=NA
mm$datetime_=as.character(mm$capture)
mm$type = ifelse(grepl("blood",mm$what, perl = TRUE), 'blood', ifelse(grepl("skin",mm$what, perl = TRUE), 'skin',NA))
mm$where = ifelse(mm$type == 'blood', 'NIOZ','MPIO')
mm$remarks = NA
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "SAMPLES", value = mm[,c('datetime_','type','what_ID','where','remarks','sample_pk')], row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'SAMPLES', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
print(paste('samples added to samples for', mm$bird_ID))
}else{print('NO UPLOAD!!! - data already in SAMPLES table - see DBLOG table')}
}
}
{# update HARN table if all HARN columns present and 'neck' value entered
if(length(names(m)[names(m)=='neck']) == 1){
mm = m[!is.na(m$neck) & !m$neck %in% c(""," "),]
if(nrow(mm)>0){
mm = mm[,c('capture', 'bird_ID','what', 'what_ID', 'tilt','neck','armpit','back','size')]
mm$harn_pk= NA
mm$capture = as.character(mm$capture)
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "HARN", value = mm, row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'HARN', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste(mm$capture,'HARN data added for', mm$bird_ID))
dbDisconnect(con)
}else{print('no harn data although neck column present')}}else{print('no harn additional data = no neck columnt')}
}
}
{# MOVE THE FILE TO DONE
file.rename(f[i], paste(outdir,f2[i], sep = ''))
}
print(paste('uploaded',f2[i]))
###}
##### AFTER UPLOAD GREY OUT THE DATA IN THE SHEETS OF VISITS FILE
{# UPLOAD VISITS - current way or date time based ---- NA what check
{# prepare
# current visits data from DB
con = dbConnect(dbDriver("SQLite"),dbname = db)
d = dbGetQuery(con, "SELECT*FROM VISITS")
dbDisconnect(con)
d = d[ !grepl("session", d$remarks, perl = TRUE) ,]
if(nrow(d)> 0){d$v_pk = 1:nrow(d)}
# current visits data_entry file
v = readWorksheetFromFile(paste(wd0, 'VISITS_cons_devices_aviary_ENTRY.xlsx', sep = ''), sheet='visits')
v$v_pk = 1:nrow(v)
if(nrow(d)>0){
v = v[v$v_pk>max(d$v_pk),] # select only rows that are not in DB yet
if(nrow(v)==0){'no need to check/upload - no new data'}
}
}
{# check
# NAs in authors
v[is.na(v$author),]
# author field - show those that are not in authors
con = dbConnect(dbDriver("SQLite"),dbname = db)
a = dbGetQuery(con, "SELECT*FROM AUTHORS")
a = unique(a$initials[a$initials!=""])
dbDisconnect(con)
g = unique(unlist(strsplit(v$author, ',')))
g[!g%in%c(a)] # "drew" "ih" "ms" "kc" "others"
# check whether 'where' field has only allowed values
v[!v$where%in%c(paste('o', seq(1,8,1), sep=""),paste('w', seq(1,7,1), sep=""), 'wu','out', 'hall', 'front', 'back','tech','attic'),]
# datetimes
v[is.na(v$start),] # check if start time is NA
v[is.na(v$end),] # check if start time is NA
v[which((!is.na(v$start) | !is.na(v$end)) & v$start>v$end), ] # check whether end happened before start
v[which(as.numeric(difftime(v$start,trunc(v$start,"day"), units = "hours"))<6),] # visits before 6:00
v[which(as.numeric(difftime(v$end,trunc(v$end,"day"), units = "hours"))<6),] # visits before 6:00
v[which(as.numeric(difftime(v$start,trunc(v$start,"day"), units = "hours"))>22),] # visits after 22:00
v[which(as.numeric(difftime(v$end,trunc(v$end,"day"), units = "hours"))>22),] # visits after 22:00
# check rows with NA in what
v[is.na(v$what),]
# check rows with multiple what info
#v[!v$what%in%c(NA,"check","floor","feather","food","fff", "catch", "release", "process", "clean", "bleach","clhall", "logger","harness","dummies", "things", "obs", "cons","ul"),]
# check whether all in what is defined and show the entries which are not
g = unique(unlist(strsplit(v$what, ',')))
gg = g[!g%in%c(NA,"check","dcheck","floor","feather","food","fff", "flood","catch", "release", "process", "clean", "bleach","clhall", "logger","harness","dummies", "things", "obs", "cons","ul","repair", "prep","light_off","set","water","rinse","noise")]
#gg
if(length(gg)>0){
for(i in 1:length(gg)){
print(v[grepl(gg[i],v$what, perl = TRUE),])
}
}else{print('no undefined what')}
}
{# upload
if(nrow(v)>0){
v$v_pk = NA
v$start = as.character(v$start)
v$end = as.character(v$end)
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "VISITS", value = v[,c("author","where","start","what","end","comments","v_pk")], row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'VISITS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_uploda.R', remarks = '', stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste('VISITS data uploaded from', v$start[1], 'to', v$start[nrow(v)]))
dbDisconnect(con)
}else{print('no new data, no upload')}
}
}
{# UPLOAD CONTINUOUS OBSERVATIONS - Z080710 needs SLEEP
{# prepare
# current CONS data from DB
con = dbConnect(dbDriver("SQLite"),dbname = db)
d = dbGetQuery(con, "SELECT*FROM CONT_OBS")
dbDisconnect(con)
# current con_obs data_entry file
v = readWorksheetFromFile(paste(wd0, 'VISITS_cons_devices_aviary_ENTRY.xlsx', sep = ''), sheet='continuous_observations')
v$cont_pk = 1:nrow(v)
if(nrow(d)>0){
v = v[v$cont_pk>max(d$cont_pk),] # select only rows that are not in DB yet
if(nrow(v)==0){'no need to check/upload - no new data'}
}
}
{# check
# NAs in authors
v[is.na(v$author),]
# author field - show those that are not in authors
con = dbConnect(dbDriver("SQLite"),dbname = db)
a = dbGetQuery(con, "SELECT*FROM AUTHORS")
a = unique(a$initials[a$initials!=""])
dbDisconnect(con)
g = unique(unlist(strsplit(v$author, ',')))
g[!g%in%a] # "drew" "ih" "ms" "kc" "others"
# check aviary
v[!v$aviary%in%c(paste('o', seq(1,8,1), sep=""),paste('w', seq(1,7,1), sep="")),]
# check unique new sessions
unique(v$session)
# check if bird_ID correct
# birds table
con = dbConnect(dbDriver("SQLite"),dbname = db)
b = dbGetQuery(con, "SELECT*FROM BIRDS")
dbDisconnect(con)
v[!v$bird_ID%in%c(b$bird_ID),]
# check that each session has only one bird_ID
vv = ddply(v,.(session, bird_ID), summarise, n = length(bird_ID))
vv[duplicated(vv$session),]
# datetimes
v[is.na(v$datetime_),] # check if datetime_ is NA
# check rows with NA in beh
v[is.na(v$beh),]
# check whether 'beh' field has only allowed values
v[!v$beh%in%c('sleep', 'rest', 'stand', 'preen','stretch','hop', 'hh', 'walk','fly', 'run', 'active', 'eat', 'prob', 'peck','drink', 'ruffle'),]
# sure - y,n
v[!v$sure%in%c('n','y'),]
# check whether all birds observed have rest or sleep OR not wrong time and hence too long sleep
v=ddply(v,.(session), transform, prev = c(datetime_[1],datetime_[-length(datetime_)]))
v$dur = difftime(v$datetime_,v$prev, units = 'secs')
v[as.numeric(v$dur)>5*60,] # shows lines with behaviour that lasted longer than 5 min
#v[v$bird_ID == 'Z080704',]
vv = ddply(v,.(bird_ID), summarise, sleep = length(sure[beh%in%c('sleep','rest')]),dur = sum(dur[beh%in%c('sleep','rest')]))
vv # shows duration of sleep/rest observation per bird
}
{# upload
if(nrow(v)>0){
v$cont_pk = NA
v$dur = v$prev = NULL
v$datetime_ = as.character(v$datetime_)
con = dbConnect(dbDriver("SQLite"),dbname = db)
# to CONT_OBS
dbWriteTable(con, name = "CONT_OBS", value = v, row.names = FALSE, append = TRUE)
# to VISITS
names(v)[names(v)=='aviary'] = 'where'
vv = ddply(v,.(author, where, session, bird_ID), summarise, start = min(datetime_), what = 'cons', 'general_check' = 'n', end = max(datetime_), comments = NA)
vv$comments = paste('session', vv$session, 'bird_ID', vv$bird_ID)
vv$session = vv$bird_ID = NULL
vv$v_pk = NA
dbWriteTable(con, name = "VISITS", value = vv[,c("author","where","start","what","end","comments","v_pk")], row.names = FALSE, append = TRUE)
# update DBLOG
dv1 = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'CONT_OBS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = NA, stringsAsFactors = FALSE)
dv2 = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'VISITS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'cons', script = 'DB_uploda.R', remarks = NA, stringsAsFactors = FALSE)
dv = rbind(dv1, dv2)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste('CONT_OBS and VISITS data uploaded from', min(as.POSIXct(v$datetime_)), 'to', max(as.POSIXct(v$datetime))))
dbDisconnect(con)
}else{print('no new data, no upload')}
}
}
{# UPLOAD AUTHORS
{# prepare
# current visits data from DB
con = dbConnect(dbDriver("SQLite"),dbname = db)
d = dbGetQuery(con, "SELECT*FROM AUTHORS")
dbDisconnect(con)
# current visits data_entry file
v = readWorksheetFromFile(paste(wd0, 'VISITS_cons_devices_aviary_ENTRY.xlsx', sep = ''), sheet='authors')
v$authors_pk = 1:nrow(v)
if(nrow(d)>0){
v = v[v$authors_pk>max(d$authors_pk),] # select only rows that are not in DB yet
if(nrow(v)==0){'no need to check/upload - no new data'}
}
}
{# check
# NAs in initials
v[is.na(v$initials),]
# NAs in initials
v[is.na(v$name),]
# NAs in initials
v[is.na(v$surname),]
# NAs in contact
v[is.na(v$contact),]
# alias and project
unique(unlist(strsplit(v$alias, ',')))
unique(unlist(strsplit(v$project, ',')))
# datetimes
v$start_ = as.POSIXct(v$start_, format="%Y-%m-%d")
v$end_ = as.POSIXct(v$end_, format="%Y-%m-%d")
v[is.na(v$start_),] # check if start time is NA
v[is.na(v$end_),] # check if start time is NA
v[which((!is.na(v$start_) | !is.na(v$end_)) & v$start_>v$end_), ] # check whether end happened before start
}
{# upload
if(nrow(v)>0){
v$v_pk = NA
v$start_ = as.character(v$start_)
v$end_ = as.character(v$end_)
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "VISITS", value = v, row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'AUTHORS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'updated', script = 'DB_uploda.R', remarks = NA, stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste('AUTHORS data uploaded'))
dbDisconnect(con)
}else{print('no new data, no upload')}
}
}
{# UPLOAD DEVICE
{# prepare
# current CONS data from DB
con = dbConnect(dbDriver("SQLite"),dbname = db)
d = dbGetQuery(con, "SELECT*FROM DEVICES")
dbDisconnect(con)
# current con_obs data_entry file
v = readWorksheetFromFile(paste(wd0, 'VISITS_cons_devices_aviary_ENTRY.xlsx', sep = ''), sheet='devices')
v$devices_pk = 1:nrow(v)
if(nrow(d)>0){
v = v[v$devices_pk>max(d$devices_pk),] # select only rows that are not in DB yet
if(nrow(v)==0){'no need to check/upload - no new data'}
}
}
{# check
# datetimes
v[is.na(v$datetime_),] # check if datetime_ is NA
# NAs in devices
v[is.na(v$device),]
# check whether 'devices' field has only allowed values
v[!v$device%in%c('acc', 'toa', 'harn', 'dummie'),]
# check ID
# # of characters shall be 3
v[nchar(v$ID)!=3,]
# fist letter
unique(substring(v$ID,1,1))
# numbers
unique(substring(v$ID,2,3))
# what
v[!v$what%in%c('on', 'off', 'dd','fail'),]
# batt
unique(v$batt)
}
{# upload
if(nrow(v)>0){
v$devices_pk = NA
v$datetime_ = as.character(v$datetime_)
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "DEVICES", value = v, row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'DEVICES', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_uploda.R', remarks = NA, stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste('DEVICES data uploaded from', min(as.POSIXct(v$datetime_)), 'to', max(as.POSIXct(v$datetime))))
dbDisconnect(con)
}else{print('no new data, no upload')}
}
}
{# UPLOAD AVIARIES
{# prepare
# current CONS data from DB
con = dbConnect(dbDriver("SQLite"),dbname = db)
d = dbGetQuery(con, "SELECT*FROM AVIARIES")
dbDisconnect(con)
# current con_obs data_entry file
v = readWorksheetFromFile(paste(wd0, 'VISITS_cons_devices_aviary_ENTRY.xlsx', sep = ''), sheet='aviaries')
v$av_pk = 1:nrow(v)
if(nrow(d)>0){
v = v[v$av_pk>max(d$av_pk),] # select only rows that are not in DB yet
if(nrow(v)==0){'no need to check/upload - no new data'}
}
}
{# check
# datetimes
v[is.na(v$datetime_),] # check if datetime_ is NA
# NAs in authors
v[is.na(v$author),]
# author field - show those that are not in authors
con = dbConnect(dbDriver("SQLite"),dbname = db)
a = dbGetQuery(con, "SELECT*FROM AUTHORS")
a = unique(a$initials[a$initials!=""])
dbDisconnect(con)
g = unique(unlist(strsplit(v$author, ',')))
g[!g%in%a] # "drew" "ih" "ms" "kc" "others"
# NAs in aviary
v[is.na(v$aviary),]
# check aviary
v[!v$aviary%in%paste('w', seq(1,7,1), sep=""),]
# check light_cycle
v[!v$light_cycle%in%c('constant','natural', '12'),]
# check T_cycle
v[!v$T_cycle%in%c('constant_seewater','natural', '12'),]
# light and T values
summary(v)
}
{# upload
if(nrow(v)>0){
v$av_pk = NA
v$datetime_ = as.character(v$datetime_)
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "AVIARIES", value = v, row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'AVIARIES', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_uploda.R', remarks = NA, stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste('AVIARIES data uploaded from', min(as.POSIXct(v$datetime_)), 'to', max(as.POSIXct(v$datetime))))
dbDisconnect(con)
}else{print('no new data, no upload')}
}
}
{# UPLOAD TAGS
{# prepare
# current CONS data from DB
con = dbConnect(dbDriver("SQLite"),dbname = db)
d = dbGetQuery(con, "SELECT*FROM TAGS")
dbDisconnect(con)
# current con_obs data_entry file
v = readWorksheetFromFile(paste(wd0, 'VISITS_cons_devices_aviary_ENTRY.xlsx', sep = ''), sheet='tags')
v$tag_pk = 1:nrow(v)
if(nrow(d)>0){
v = v[v$tag_pk>max(d$tag_pk),] # select only rows that are not in DB yet
#v = v[!is.na(v$start),]
if(nrow(v)==0){'no need to check/upload - no new data'}
}
}
{# check
# NAs in type
v[is.na(v$type),]
# types
unique(v$type)
# NAs in coating
v[is.na(v$coating),]
# coating
unique(v$coating)
# NAs in memmory
v[is.na(v$memmory),]
# memmory
unique(v$memmory)
# batt and mass values
summary(v)
}
{# upload
if(nrow(v)>0){
v$tag_pk = NA
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "TAGS", value = v, row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'TAGS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_uploda.R', remarks = NA, stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste('TAGS data uploaded'))
dbDisconnect(con)
}else{print('no new data, no upload')}
}
}
##### DONE 2018-01-31 13:45:29
# if re-run needed - please use first BIRD TABLE, then above CAPTURE BIRDS update and only then the remaining 2
{# 1. BIRDS TABLE - first upload 2015 - 2017
#con = dbConnect(dbDriver("SQLite"),dbname = db)
#dbGetQuery(con, "DROP TABLE IF EXISTS BIRDS")
#bDisconnect(con)
# then make the table a new directly in SQLiteStudio
{# upload EVA's catches + RUFAs
v = readWorksheetFromFile(paste(wd2, 'morphometrics+sex_2016.xlsx', sep = ''), sheet=1)
v$RNR[v$RNR%in%o$RINGNR]
r = readWorksheetFromFile(paste(wd2, 'ColourRings2016.xlsx', sep = ''), sheet=1)
r$RNR = toupper(r$RNR)
v$colcom = r$complete_cr[match(v$RNR,r$RNR)]
v$colcom_now = r$actual_cr[match(v$RNR,r$RNR)]
v$year_ = substring(v$CatchDate,1,4)
v$CatchLocation[v$CatchLocation == 'Vistula Mouth'] = 'Vistula'
v = data.frame(year_ = v$year_, species = 'REKN', subspecies = v$Species, bird_ID = v$RNR, crc = v$colcom, crc_now = v$colcom_no, age = v$Age, sex = v$Sex, caught = v$CatchDate, site_c = v$CatchLocation, wing = v$WING, bill = v$BILL, totalhead = v$TOTHD, tarsus = v$TARS, tartoe = v$TATO, stringsAsFactors = FALSE)
v$home_av = v$current_av = v$start_ = v$end_ = v$end_type = v$lat_c = v$lon_c = v$lat_r = v$lon_r = v$site_r = v$muscle = v$height_1 = v$width_1 = v$height_2 = v$width_2 = v$mass_f = v$mass_c = v$bio_author = v$ful_datetime = v$ful_author = v$remarks = v$bird_pk = v$blood = v$sex_method = v$bio_datetime = NA
v$project = 'MigrationOnthogeny'
#v[duplicated(v$RNR),]
xx =c("year_","species","subspecies","bird_ID","crc","crc_now","home_av","current_av","age","sex" ,"start_", "end_", "end_type","caught", "lat_c","lon_c", "site_c", "lat_r", "lon_r", "site_r","muscle", "height_1","width_1", "height_2","width_2","mass_f", "mass_c", "wing","bill","totalhead", "tarsus", "tartoe", "blood","sex_method","bio_datetime","bio_author","ful_datetime","ful_author","project","remarks", "bird_pk")
xx[!xx%in%names(v)]
v$caught = as.character(v$caught)
v$site_c = capitalize(tolower(v$site_c))
v$lat_c = g$lat[match(v$site_c,g$abb)]
v$lon_c = g$lon[match(v$site_c,g$abb)]
vr = data.frame(species = 'REKN', subspecies = 'ruf', bird_ID = as.character(c('982284830', '982284831')), stringsAsFactors = FALSE)
vx = merge(v,vr,all=TRUE)
vx = vx[,c("year_","species","subspecies","bird_ID","crc","crc_now","home_av","current_av","age","sex" ,"start_", "end_", "end_type","caught", "lat_c","lon_c", "site_c", "lat_r", "lon_r", "site_r","muscle", "height_1","width_1", "height_2","width_2","mass_f", "mass_c", "wing","bill","totalhead", "tarsus", "tartoe", "blood","sex_method","bio_datetime","bio_author","ful_datetime","ful_author","project","remarks", "bird_pk")]
#vr = vx[vx$bird_ID%in%c('982284830', '982284831'),]
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "BIRDS", value = vx , row.names = FALSE, append = TRUE)
if(dblog == TRUE){
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'major', script = 'DB_upload.R', remarks = '2015-2016 catches')
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
}
dbDisconnect(con)
}
{# upload 2017 catches (except for last one)
v = readWorksheetFromFile(paste(wd2, 'Biometry captive red knots 2017.xlsx', sep = ''), sheet=1)
v = v[which(v$Nioz == 'Yes'),]
v$DNA = ifelse(v$DNA==TRUE, 'yes', NA)
v$CATCH_MONTH = ifelse(nchar(v$CATCH_MONTH)==1, paste(0, v$CATCH_MONTH, sep=""), v$CATCH_MONTH)
vv = v[nchar(v$ULTRASOUN)>2,]
u1 = data.frame(measured = paste(vv$CATCH_YEAR,'08',vv$CATCH_DAY, sep = '-'), bird_ID = vv$RINGNR, muscle = vv$PECTORAL.MUSCLE, height_1 = vv$SH1 , width_1 = vv$SW1, height_2 = vv$SH2 , width_2 = vv$SW2, stringsAsFactors = FALSE)
u1$muscle = gsub(",", ".", u1$muscle)
u1$height_1 = gsub(",", ".", u1$height_1)
u1$width_1 = gsub(",", ".", u1$width_1)
u1$width_2 = gsub(",", ".", u1$width_2)
u1$height_2 = gsub(",", ".", u1$height_2)
u2 = readWorksheetFromFile(paste(wd2, 'ultrasound.xlsx', sep = ''), sheet=1)
u2$mass = u2$age = u2$comments = u2$where = u2$released = NULL
u2$measured = as.character(u2$measured)
u = rbind(u1,u2)
v = merge(v,u, by.x = 'RINGNR', by.y = 'bird_ID', all.x = TRUE)
v$bio_datetime = ifelse(v$CATCH_MONTH == '09', '2017-10-04', ifelse(v$CATCH_MONTH == '08', '2017-09-04', paste(v$CATCH_YEAR,v$CATCH_MONTH,v$CATCH_DAY, sep = '-')))
v$start_ = ifelse(v$CATCH_MONTH == '09', '2017-09-22', NA)
v=v[v$CATCH_MONTH != '09',]
v = data.frame(bird_pk = v$BIOKLRI_ID, year_ = v$CATCH_YEAR, species = 'REKN', subspecies = 'isl', bird_ID = v$RINGNR, crc = v$CR_CODE, crc_now = NA, age = v$AGE, sex = NA, caught = paste(v$CATCH_YEAR,v$CATCH_MONTH,v$CATCH_DAY, sep = '-'), site_c = v$CATCH_LOCATION, wing = v$WING, bill = v$BILL, totalhead = v$TOTHD, tarsus = v$TARS, tartoe = v$TATO, mass_f = v$MASS, giz_author = 'ad', bio_author = 'jth', blood = v$DNA, muscle = v$muscle, height_1 = v$height_1, width_1 = v$width_1, height_2 = v$height_2, width_2 = v$width_2, giz_datetime = v$measured, bio_datetime = v$bio_datetime, start_ = v$start_ , stringsAsFactors = FALSE)
v$home_av = v$current_av = v$end_ = v$end_type = v$lat_c = v$lon_c = v$lat_r = v$lon_r = v$site_r = v$mass_c = v$remarks = v$sex_method = NA
v$site_c = ifelse(v$site_c == 'GRIEND', 'Griend', ifelse( v$site_c == 'DE RICHEL', 'Richel', 'Schier'))
v$lat_c = g$lat[match(v$site_c,g$abb)]
v$lon_c = g$lon[match(v$site_c,g$abb)]
x = readWorksheetFromFile(paste(wd2, 'captive_knots_2017_12+moving_2018_01.xlsx', sep = ''), sheet=1)
x = x[x$X2 == 'Martin',]
v$project = ifelse(v$bird_ID%in%x$ID,'SocialJetLag','MigrationOnthogeny')
xx =c("year_","species","subspecies","bird_ID","crc","crc_now","home_av","current_av","age","sex" ,"start_", "end_", "end_type","caught", "lat_c","lon_c", "site_c", "lat_r", "lon_r", "site_r","muscle", "height_1","width_1", "height_2","width_2","mass_f", "mass_c", "wing","bill","totalhead", "tarsus", "tartoe", "blood","sex_method","bio_datetime","bio_author","giz_datetime","giz_author","project","remarks", "bird_pk")
xx[!xx%in%names(v)]
v1 = v[c("year_","species","subspecies","bird_ID","crc","crc_now","home_av","current_av","age","sex" ,"start_", "end_", "end_type","caught", "lat_c","lon_c", "site_c", "lat_r", "lon_r", "site_r","muscle", "height_1","width_1", "height_2","width_2","mass_f", "mass_c", "wing","bill","totalhead", "tarsus", "tartoe", "blood","sex_method","bio_datetime","bio_author","giz_datetime","giz_author","project","remarks", "bird_pk")]
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "BIRDS", value = v1, row.names = FALSE, append = TRUE)
if(dblog == TRUE){
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'major', script = 'DB_upload.R', remarks = '2017-07 and 08 catches')
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
}
dbDisconnect(con)
}
}
# 2. capture BIRDs above
{# 3. update FUL from file, which has to have following info 'author','measured','bird_ID','muscle','height_1','width_1','height_2', 'width_2'
ul_date = '2017-09-23' # DEFINE
u = readWorksheetFromFile(paste(wd2, 'ultrasound_',ul_date,'.xlsx', sep = ''), sheet=1)
u$measured = as.character(u$measured)
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = u[,c('author','measured','bird_ID','muscle','height_1','width_1','height_2', 'width_2')], row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET
ful_author = (SELECT temp.author FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
ful_datetime = (SELECT temp.measured FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
muscle = (SELECT temp.muscle FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
height_1 = (SELECT temp.'height_1' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
width_1 = (SELECT temp.width_1 FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
height_2 = (SELECT temp.height_2 FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
width_2 = (SELECT temp.width_2 FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
# update DBLOG
x = data.frame(bird_ID = u$bird_ID, datetime_ = as.character(Sys.time()), stringsAsFactors=FALSE)
dbWriteTable(con, name = "temp", value = x, row.names = FALSE)
dbExecute(con, "UPDATE TO_DO SET
datetime_solved = (SELECT temp.datetime_ FROM temp WHERE temp.bird_ID = TO_DO.bird_ID and TO_DO.what like '%ful%')
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = TO_DO.bird_ID and TO_DO.what like '%ful%')
")
dbWriteTable(con, name = "TO_DO", value = mx[,c('capture','bird_ID','what','datetime_solved','remarks','todo_pk')], row.names = FALSE, append = TRUE)
dbDisconnect(con)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
if(dblog == TRUE){
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'major', script = 'DB_upload.R', remarks = 'ful of 2017-09 catch')
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
}
dbDisconnect(con)
print('ful updated in birds')
}
{# 4. DONE update biometrics and other info from JOBs DB for BIRDS 2017-09 catch DATA
v = readWorksheetFromFile(paste(wd2, 'Biometry captive red knots 2017.xlsx', sep = ''), sheet=1)
v = v[which(v$Nioz == 'Yes'),]
v$DNA = ifelse(v$DNA==TRUE, 'yes', NA)
v$CATCH_MONTH = ifelse(nchar(v$CATCH_MONTH)==1, paste(0, v$CATCH_MONTH, sep=""), v$CATCH_MONTH)
v = v[v$CATCH_MONT=='09',]
v$site_c = ifelse(v$CATCH_LOCATION == 'GRIEND', 'Griend', ifelse( v$CATCH_LOCATION == 'DE RICHEL', 'Richel', 'Schier'))
v$lat_c = g$lat[match(v$site_c,g$abb)]
v$lon_c = g$lon[match(v$site_c,g$abb)]
v$project = 'SocialJetLag'
v$age = ifelse(v$AGE == 3, 'A', v$AGE)
v$bio_author = 'jh'
v$bird_ID = v$RINGNR
v$species = 'REKN'
v$subspecies = 'isl'
# UPDATE BIRDS
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = v[,c('bio_author','bird_ID','TOTHD','BILL','WING','TARS','TATO', 'age','project','site_c','lat_c','lon_c','DNA','MASS','species','subspecies')], row.names = FALSE, append = FALSE)
#bio_datetime = (SELECT temp.capture FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
dbExecute(con, "UPDATE BIRDS SET
bio_author = (SELECT temp.bio_author FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
species = (SELECT temp.species FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
subspecies = (SELECT temp.subspecies FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
mass_f = (SELECT temp.MASS FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
age = (SELECT temp.age FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
blood = (SELECT temp.DNA FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
project = (SELECT temp.project FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
site_c = (SELECT temp.site_c FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
lat_c = (SELECT temp.lat_c FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
lon_c = (SELECT temp.lon_c FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
wing = (SELECT temp.WING FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
bill = (SELECT temp.'BILL' FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
totalhead = (SELECT temp.TOTHD FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
tarsus = (SELECT temp.TARS FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
tartoe = (SELECT temp.TATO FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
# UPDATE TO_DO
x = data.frame(bird_ID = v$bird_ID, datetime_ = as.character(Sys.time()), stringsAsFactors=FALSE)
dbWriteTable(con, name = "temp", value = x, row.names = FALSE)
dbExecute(con, "UPDATE TO_DO SET
datetime_solved = (SELECT temp.datetime_ FROM temp WHERE temp.bird_ID = TO_DO.bird_ID and TO_DO.what like '%mass_f%')
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = TO_DO.bird_ID and TO_DO.what like '%mass_f%')
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
if(dblog == TRUE){
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'major', script = 'DB_upload.R', remarks = 'ful of 2017-09 bio, age, species, etc')
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
}
dbDisconnect(con)
print('bio updated in birds')
}
{# update HARN table if 'harn' or 'on'/'off' and what_ID starting with 'H', 'P','D'
mm = m[grepl("harn",m$what, perl = TRUE)| grepl("on",m$what, perl = TRUE) & substring(m$what_ID,1,1) %in%c('H','D','P') | grepl("off",m$what, perl = TRUE) & substring(m$what_ID,1,1) %in%c('H','D','P'),]
mm = mm[!is.na(mm$what),]
if(nrow(mm)==0){print('no harn in what')}else{
if(length(names(mm)[names(mm)=='tilt']) == 0){
mm = mm[,c('capture', 'bird_ID','what', 'what_ID')]
mm$tilt = mm$neck = mm$armpit = mm$back = mm$size = mm$harn_pk= NA
mm$capture = as.character(mm$capture)
}else{
mm = mm[,c('capture', 'bird_ID', 'what','what_ID','tilt', 'neck', 'armpit','back','size')]
mm$harn_pk=NA
mm$capture = as.character(mm$capture)
}
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbWriteTable(con, name = "HARN", value = mm, row.names = FALSE, append = TRUE)
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'HARN', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'weekly', script = 'DB_upload.R', remarks = f2[i], stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
print(paste(mm$capture,'HARN data added for', mm$bird_ID))
dbDisconnect(con)
}
}
{# 5. update positions to decimals
v = readWorksheetFromFile(paste(wd2, 'catch_locations.xlsx', sep = ''), sheet=1)
v[v==""] = NA
v[v==" "] = NA
v[v=="NA"] = NA
#conv_unit("6 13 51", from = 'deg_min_sec', to = 'dec_deg')
#conv_unit("5 16 40", from = 'deg_min_sec', to = 'dec_deg')
#v$lat_deg = gsub('.', ' ', v$lat_deg, fixed = TRUE)
#v$lon_deg = gsub('.', ' ', v$lon_deg, fixed = TRUE)
#v$lat = ifelse(is.na(v$lat_deg), v$lat, conv_unit(v$lat_deg, from = 'deg_min_sec', to = 'dec_deg'))
con = dbConnect(dbDriver("SQLite"),dbname = db)
b = dbGetQuery(con, "SELECT*FROM BIRDS")
dbDisconnect(con)
b$lat_c = v$lat[match(b$site_c, v$abb)]
b$lon_c = v$lon[match(b$site_c, v$abb)]
b$lat_r = v$lat[match(b$site_r, v$abb)]
b$lon_r = v$lon[match(b$site_r, v$abb)]
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = b, row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET
lat_c = (SELECT temp.lat_c FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
lon_c = (SELECT temp.lon_c FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
lat_r = (SELECT temp.lat_r FROM temp WHERE temp.bird_ID = BIRDS.bird_ID),
lon_r = (SELECT temp.lon_r FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'minor', script = 'DB_upload.R: 5. update positions to decimals', remarks = 'updated lat and lon', stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
}
{# 6. update color combos
m = read.csv(paste(wd2,'BIOKLRI.csv', sep=""), stringsAsFactors=FALSE)
con = dbConnect(dbDriver("SQLite"),dbname = db)
b = dbGetQuery(con, "SELECT*FROM BIRDS where crc is null")
dbDisconnect(con)
b$crc = m$CR_CODE[match(b$bird_ID, m$RINGNR)]
#b[,c('bird_ID','crc')]
con = dbConnect(dbDriver("SQLite"),dbname = db)
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dbWriteTable(con, name = "temp", value = b[,c('bird_ID','crc')], row.names = FALSE, append = FALSE)
dbExecute(con, "UPDATE BIRDS SET
crc = (SELECT temp.crc FROM temp WHERE temp.bird_ID = BIRDS.bird_ID)
WHERE
EXISTS (
SELECT *
FROM temp
WHERE temp.bird_ID = BIRDS.bird_ID)
")
dbGetQuery(con, "DROP TABLE IF EXISTS temp")
dv = data.frame(pk = NA, db = 'AVESatNIOZ', table = 'BIRDS', datetime_ = as.character(Sys.time()), author = if(Luc == TRUE){'lm'}else{'mb'}, type = 'minor', script = 'DB_upload.R: update color combos', remarks = 'updated crc', stringsAsFactors = FALSE)
dbWriteTable(con, name = "DBLOG", value = dv, row.names = FALSE, append = TRUE)
dbDisconnect(con)
} |
detectionPval.filter <-
function(methLumi_data, detectionPval.threshold=0.01, detectionPval.perc.threshold=80, projectName = NULL, PATH="./"){
#get detection p-values
detectPval <- assayDataElement(methLumi_data, "detection")
#get sample names
samples <- colnames(detectPval)
nbSignifPval <- vector()
percentSignifPval <- vector()
#for each sample compute the number and % or relevant signal (detection p-value < detectionPval.threshold)
for(i in 1:length(samples)){
nb <- length(which(detectPval[,i] <= detectionPval.threshold))
percent <- nb*100/length(detectPval[,i])
nbSignifPval <- c(nbSignifPval,nb)
percentSignifPval <- c(percentSignifPval, percent)
}
rm(detectPval)
#get "bad" samples indices
index2remove <- which(percentSignifPval < detectionPval.perc.threshold)
#remove "bad" samples from methylumi object
if(length(index2remove)>0) methLumi_data <- methLumi_data[,-index2remove]
index <- sort(percentSignifPval, index.return=TRUE)$ix
# save sample quality report as text file
if(!is.null(projectName)) write.table(list(samples=samples[index], nbSignifPval=nbSignifPval[index], percentSignifPval=percentSignifPval[index]), file=paste(PATH, projectName, "_signifPvalStats_threshold", detectionPval.threshold, ".txt", sep=""), sep="\t", row.names=FALSE, col.names=TRUE)
return(methLumi_data)
}
| /R/detectionPval.filter.R | no_license | schalkwyk/wateRmelon | R | false | false | 1,373 | r | detectionPval.filter <-
function(methLumi_data, detectionPval.threshold=0.01, detectionPval.perc.threshold=80, projectName = NULL, PATH="./"){
#get detection p-values
detectPval <- assayDataElement(methLumi_data, "detection")
#get sample names
samples <- colnames(detectPval)
nbSignifPval <- vector()
percentSignifPval <- vector()
#for each sample compute the number and % or relevant signal (detection p-value < detectionPval.threshold)
for(i in 1:length(samples)){
nb <- length(which(detectPval[,i] <= detectionPval.threshold))
percent <- nb*100/length(detectPval[,i])
nbSignifPval <- c(nbSignifPval,nb)
percentSignifPval <- c(percentSignifPval, percent)
}
rm(detectPval)
#get "bad" samples indices
index2remove <- which(percentSignifPval < detectionPval.perc.threshold)
#remove "bad" samples from methylumi object
if(length(index2remove)>0) methLumi_data <- methLumi_data[,-index2remove]
index <- sort(percentSignifPval, index.return=TRUE)$ix
# save sample quality report as text file
if(!is.null(projectName)) write.table(list(samples=samples[index], nbSignifPval=nbSignifPval[index], percentSignifPval=percentSignifPval[index]), file=paste(PATH, projectName, "_signifPvalStats_threshold", detectionPval.threshold, ".txt", sep=""), sep="\t", row.names=FALSE, col.names=TRUE)
return(methLumi_data)
}
|
#' Plot segmentation profile
#'
#' Creates an IGV-like graphical representation of the copy-number segments across the samples in a segmentation object, as output by \code{run_facets}.
#'
#' @param segs FACETS segment output, IGV formatted.
#' @param plotX If \code{TRUE}, includes chromosome X.
#' @param sample_order Manual order of samples.
#' @param cap_log_ratios Cap log-ratios at the absolute value.
#' @param colors Vector of three colors, giving the low-, mid- and high-point of the color scale.
#' @param return_object If \code{TRUE}, returns \code{ggplot2} object instead of printing plot.
#'
#' @return Output plots in viewer, unless \code{return_object} is used and \code{ggplot2} objects are returned.
#'
#' @importFrom dplyr mutate left_join
#' @importFrom purrr map_dfr
#' @import ggplot2
#' @export
plot_segmentation = function(segs,
plotX = FALSE,
sample_order = NULL,
cap_log_ratios = TRUE,
colors = c('darkblue', 'white', 'red'),
return_object = FALSE) {
if (!plotX) {
segs = filter(segs, chrom != 23)
chrom_order = seq(1:23)
} else {
chrom_order = seq(1:22)
}
segs = mutate(segs, chrom = factor(chrom, levels = chrom_order, ordered = T))
if (!is.null(sample_order)) {
if (!all(segs$ID %in% sample_order)) stop('Samples missing from provided sample order', call. = FALSE)
segs = mutate(segs, ID = factor(ID, levels = sample_order, ordered = T))
}
# Determine lengths of chromosomes and adjust X-axis accordingly
chrom_lengths = map_dfr(unique(segs$chrom), function(x) {
chrom_max = max(segs$loc.end[segs$chrom == x], na.rm = T)
chrom_min = min(segs$loc.end[segs$chrom == x], na.rm = T)
list(chrom = x,
chrom_length = as.numeric(chrom_max - chrom_min))
}) %>%
mutate(.data, rel_length = chrom_length/sum(chrom_length))
segs = left_join(segs, chrom_lengths, by = 'chrom')
# Cap log-ratios and set colorlegend
if (cap_log_ratios != FALSE) {
if (is.numeric(cap_log_ratios)) {
segs$seg.mean[which(segs$seg.mean > cap_log_ratios)] = cap_log_ratios
segs$seg.mean[which(segs$seg.mean < -cap_log_ratios)] = -cap_log_ratios
legend_breaks = c(-cap_log_ratios, -cap_log_ratios/2, 0, cap_log_ratios/2, cap_log_ratios)
} else {
segs$seg.mean[which(segs$seg.mean > 2)] = 2
segs$seg.mean[which(segs$seg.mean < -2)] = -2
legend_breaks = seq(-2, 2, 1)
}
} else {
legend_breaks = c(min(segs$seg.mean), min(segs$seg.mean)/2, 0, max(segs$seg.mean)/2, max(segs$seg.mean))
}
# Set Y-axis
sample_number = length(unique(segs$ID))
increments = 100 / sample_number
max_vec = cumsum(rep(increments, sample_number))
min_vec = c(0, max_vec[-length(max_vec)])
segs = mutate(segs,
y_min = min_vec[match(ID, factor(unique(segs$ID)))],
y_max = max_vec[match(ID, factor(unique(segs$ID)))])
y_labs = distinct(segs, ID, .keep_all = T) %>%
mutate(pos = (y_max-y_min)/2 + y_min)
# Plot
seg_plot = ggplot(segs, aes(xmin = loc.start, xmax = loc.end, ymin = y_min, ymax = y_max, fill = seg.mean)) +
geom_rect() +
scale_fill_gradient2(low = colors[1], mid = colors[2], high = colors[3], guide = 'colourbar',
'Log ratio', breaks = legend_breaks, labels = round(legend_breaks)) +
scale_y_continuous(expand = c(0,0), breaks = y_labs$pos, labels = y_labs$ID) +
facet_grid(.~chrom, space = 'free_x', scales = 'free_x', switch = 'x') +
theme(
axis.text.x = element_blank(), axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
panel.spacing.x = unit(-.5, 'lines'),
panel.spacing.y = unit(0, 'lines'),
strip.background = element_rect(fill = 'white'),
panel.background = element_rect(fill = 'white')
)
if (return_object == TRUE) {
seg_plot
} else {
print(seg_plot)
}
}
| /R/plot-segmentation.R | permissive | mskcc/facets-suite | R | false | false | 4,249 | r | #' Plot segmentation profile
#'
#' Creates an IGV-like graphical representation of the copy-number segments across the samples in a segmentation object, as output by \code{run_facets}.
#'
#' @param segs FACETS segment output, IGV formatted.
#' @param plotX If \code{TRUE}, includes chromosome X.
#' @param sample_order Manual order of samples.
#' @param cap_log_ratios Cap log-ratios at the absolute value.
#' @param colors Vector of three colors, giving the low-, mid- and high-point of the color scale.
#' @param return_object If \code{TRUE}, returns \code{ggplot2} object instead of printing plot.
#'
#' @return Output plots in viewer, unless \code{return_object} is used and \code{ggplot2} objects are returned.
#'
#' @importFrom dplyr mutate left_join
#' @importFrom purrr map_dfr
#' @import ggplot2
#' @export
plot_segmentation = function(segs,
plotX = FALSE,
sample_order = NULL,
cap_log_ratios = TRUE,
colors = c('darkblue', 'white', 'red'),
return_object = FALSE) {
if (!plotX) {
segs = filter(segs, chrom != 23)
chrom_order = seq(1:23)
} else {
chrom_order = seq(1:22)
}
segs = mutate(segs, chrom = factor(chrom, levels = chrom_order, ordered = T))
if (!is.null(sample_order)) {
if (!all(segs$ID %in% sample_order)) stop('Samples missing from provided sample order', call. = FALSE)
segs = mutate(segs, ID = factor(ID, levels = sample_order, ordered = T))
}
# Determine lengths of chromosomes and adjust X-axis accordingly
chrom_lengths = map_dfr(unique(segs$chrom), function(x) {
chrom_max = max(segs$loc.end[segs$chrom == x], na.rm = T)
chrom_min = min(segs$loc.end[segs$chrom == x], na.rm = T)
list(chrom = x,
chrom_length = as.numeric(chrom_max - chrom_min))
}) %>%
mutate(.data, rel_length = chrom_length/sum(chrom_length))
segs = left_join(segs, chrom_lengths, by = 'chrom')
# Cap log-ratios and set colorlegend
if (cap_log_ratios != FALSE) {
if (is.numeric(cap_log_ratios)) {
segs$seg.mean[which(segs$seg.mean > cap_log_ratios)] = cap_log_ratios
segs$seg.mean[which(segs$seg.mean < -cap_log_ratios)] = -cap_log_ratios
legend_breaks = c(-cap_log_ratios, -cap_log_ratios/2, 0, cap_log_ratios/2, cap_log_ratios)
} else {
segs$seg.mean[which(segs$seg.mean > 2)] = 2
segs$seg.mean[which(segs$seg.mean < -2)] = -2
legend_breaks = seq(-2, 2, 1)
}
} else {
legend_breaks = c(min(segs$seg.mean), min(segs$seg.mean)/2, 0, max(segs$seg.mean)/2, max(segs$seg.mean))
}
# Set Y-axis
sample_number = length(unique(segs$ID))
increments = 100 / sample_number
max_vec = cumsum(rep(increments, sample_number))
min_vec = c(0, max_vec[-length(max_vec)])
segs = mutate(segs,
y_min = min_vec[match(ID, factor(unique(segs$ID)))],
y_max = max_vec[match(ID, factor(unique(segs$ID)))])
y_labs = distinct(segs, ID, .keep_all = T) %>%
mutate(pos = (y_max-y_min)/2 + y_min)
# Plot
seg_plot = ggplot(segs, aes(xmin = loc.start, xmax = loc.end, ymin = y_min, ymax = y_max, fill = seg.mean)) +
geom_rect() +
scale_fill_gradient2(low = colors[1], mid = colors[2], high = colors[3], guide = 'colourbar',
'Log ratio', breaks = legend_breaks, labels = round(legend_breaks)) +
scale_y_continuous(expand = c(0,0), breaks = y_labs$pos, labels = y_labs$ID) +
facet_grid(.~chrom, space = 'free_x', scales = 'free_x', switch = 'x') +
theme(
axis.text.x = element_blank(), axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
panel.spacing.x = unit(-.5, 'lines'),
panel.spacing.y = unit(0, 'lines'),
strip.background = element_rect(fill = 'white'),
panel.background = element_rect(fill = 'white')
)
if (return_object == TRUE) {
seg_plot
} else {
print(seg_plot)
}
}
|
if ("rmarkdown" %in% rownames(installed.packages()) == FALSE) {
install.packages("rmarkdown")
}
setwd("C:/Users/Tess/Desktop/analytics-portfolio-t/ML sandbox/fraud_detection")
setwd("C:/Users/Tess/Desktop/analytics-portfolio-t/ML sandbox/fraud_detection")
Sys.setenv(RSTUDIO_PANDOC = "/usr/lib/rstudio/bin/pandoc")
## render HTML output
rmarkdown::render("fraud.Rmd", output_file = "fraud.html") | /ML_sandbox/fraud_detection/fraud.R | no_license | Tlarot/analytics-projects-ts | R | false | false | 400 | r | if ("rmarkdown" %in% rownames(installed.packages()) == FALSE) {
install.packages("rmarkdown")
}
setwd("C:/Users/Tess/Desktop/analytics-portfolio-t/ML sandbox/fraud_detection")
setwd("C:/Users/Tess/Desktop/analytics-portfolio-t/ML sandbox/fraud_detection")
Sys.setenv(RSTUDIO_PANDOC = "/usr/lib/rstudio/bin/pandoc")
## render HTML output
rmarkdown::render("fraud.Rmd", output_file = "fraud.html") |
#' read key pause for plotting
#' @export
readkey<-function(){
cat ("Press [enter] to continue")
line <- readline()
}
| /R/readkey.R | no_license | arcolombo/rToolKit | R | false | false | 129 | r | #' read key pause for plotting
#' @export
readkey<-function(){
cat ("Press [enter] to continue")
line <- readline()
}
|
# ===================================================================
# Title: HW02
# Description:
# This script performs cleaning tasks and transformations on
# various columns of the raw data file.
# Input(s): court picture
# Output(s): txt files; csv files, pdf
# Author: Molly Li
# Date: 03-01-2018
# ===================================================================
#load packages
library(ggplot2)
library(dplyr)
library(tibble)
library("jpeg")
library("grid")
#read data
stephen <- read.csv("data/stephen-curry.csv", stringsAsFactors = FALSE)
klay <- read.csv("data/klay-thompson.csv", stringsAsFactors = FALSE)
kevin <- read.csv("data/kevin-durant.csv", stringsAsFactors = FALSE)
draymond <- read.csv("data/draymond-green.csv", stringsAsFactors = FALSE)
andre <- read.csv("data/andre-iguodala.csv", stringsAsFactors = FALSE)
# scatterplot
klay_scatterplot <- ggplot(data = klay) +
geom_point(aes(x = x, y = y, color = shot_made_flag))
# court image (to be used as background of plot)
court_file <- download.file("https://raw.githubusercontent.com/ucb-stat133/stat133-spring-2018/master/images/nba-court.jpg",
'/Users/XuewenLi/desktop/133/hw02/images/nba-court.jpeg')
# create raste object
court_image <- rasterGrob(readJPEG( '/Users/XuewenLi/desktop/133/hw02/images/nba-court.jpeg'), width = unit(1, "npc"), height = unit(1, "npc"))
#4.1) Shot charts of each player (10 pts)
#example
klay_shot_chart <- ggplot(data = klay) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Klay Thompson (2016 season)') +
theme_minimal()
# andre-iguodala-shot-chart.pdf
andre_shot_chart <- ggplot(data = andre) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Andre Iguodala (2016 season)') +
theme_minimal()
pdf(file = "images/andre-iguodala-shot-chart.pdf ", width = 6.5, height = 5)
plot(andre_shot_chart)
dev.off()
# draymond-green-shot-chart.pdf
draymond_shot_chart <- ggplot(data = draymond) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Draymond Green (2016 season)') +
theme_minimal()
pdf(file = "images/draymond-green-shot-chart.pdf ", width = 6.5, height = 5)
plot(draymond_shot_chart)
dev.off()
# kevin-durant-shot-chart.pdf
kevin_shot_chart <- ggplot(data = kevin) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart:Kevin Durant (2016 season)') +
theme_minimal()
pdf(file = "images/kevin-durant-shot-chart.pdf ", width = 6.5, height = 5)
plot(kevin_shot_chart)
dev.off()
# klay-thompson-shot-chart.pdf
klay_shot_chart <- ggplot(data = klay) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Klay Thompson (2016 season)') +
theme_minimal()
pdf(file = "images/klay-thompson-shot-chart.pdf", width = 6.5, height = 5)
plot(klay_shot_chart)
dev.off()
# stephen-curry-shot-chart.pdf
stephen_shot_chart <- ggplot(data = stephen) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Stephen Curry (2016 season)') +
theme_minimal()
pdf(file = "images/stephen-curry-shot-chart.pdf", width = 6.5, height = 5)
plot(stephen_shot_chart)
dev.off()
# 4.2) Facetted Shot Chart (10 pts)?????
igdtc <- rbind(andre,draymond,kevin,klay,stephen)
gsw_shot_charts <- ggplot(igdtc) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: GSW (2016 season)') +
theme_minimal()+
facet_wrap(~name)
pdf(file = "images/stephen-curry-shot-chart.pdf", width = 8, height = 7)
plot(gsw_shot_charts)
dev.off()
| /code/make-shot-charts-script.R | no_license | mollyli96/hw2 | R | false | false | 4,130 | r | # ===================================================================
# Title: HW02
# Description:
# This script performs cleaning tasks and transformations on
# various columns of the raw data file.
# Input(s): court picture
# Output(s): txt files; csv files, pdf
# Author: Molly Li
# Date: 03-01-2018
# ===================================================================
#load packages
library(ggplot2)
library(dplyr)
library(tibble)
library("jpeg")
library("grid")
#read data
stephen <- read.csv("data/stephen-curry.csv", stringsAsFactors = FALSE)
klay <- read.csv("data/klay-thompson.csv", stringsAsFactors = FALSE)
kevin <- read.csv("data/kevin-durant.csv", stringsAsFactors = FALSE)
draymond <- read.csv("data/draymond-green.csv", stringsAsFactors = FALSE)
andre <- read.csv("data/andre-iguodala.csv", stringsAsFactors = FALSE)
# scatterplot
klay_scatterplot <- ggplot(data = klay) +
geom_point(aes(x = x, y = y, color = shot_made_flag))
# court image (to be used as background of plot)
court_file <- download.file("https://raw.githubusercontent.com/ucb-stat133/stat133-spring-2018/master/images/nba-court.jpg",
'/Users/XuewenLi/desktop/133/hw02/images/nba-court.jpeg')
# create raste object
court_image <- rasterGrob(readJPEG( '/Users/XuewenLi/desktop/133/hw02/images/nba-court.jpeg'), width = unit(1, "npc"), height = unit(1, "npc"))
#4.1) Shot charts of each player (10 pts)
#example
klay_shot_chart <- ggplot(data = klay) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Klay Thompson (2016 season)') +
theme_minimal()
# andre-iguodala-shot-chart.pdf
andre_shot_chart <- ggplot(data = andre) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Andre Iguodala (2016 season)') +
theme_minimal()
pdf(file = "images/andre-iguodala-shot-chart.pdf ", width = 6.5, height = 5)
plot(andre_shot_chart)
dev.off()
# draymond-green-shot-chart.pdf
draymond_shot_chart <- ggplot(data = draymond) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Draymond Green (2016 season)') +
theme_minimal()
pdf(file = "images/draymond-green-shot-chart.pdf ", width = 6.5, height = 5)
plot(draymond_shot_chart)
dev.off()
# kevin-durant-shot-chart.pdf
kevin_shot_chart <- ggplot(data = kevin) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart:Kevin Durant (2016 season)') +
theme_minimal()
pdf(file = "images/kevin-durant-shot-chart.pdf ", width = 6.5, height = 5)
plot(kevin_shot_chart)
dev.off()
# klay-thompson-shot-chart.pdf
klay_shot_chart <- ggplot(data = klay) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Klay Thompson (2016 season)') +
theme_minimal()
pdf(file = "images/klay-thompson-shot-chart.pdf", width = 6.5, height = 5)
plot(klay_shot_chart)
dev.off()
# stephen-curry-shot-chart.pdf
stephen_shot_chart <- ggplot(data = stephen) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Stephen Curry (2016 season)') +
theme_minimal()
pdf(file = "images/stephen-curry-shot-chart.pdf", width = 6.5, height = 5)
plot(stephen_shot_chart)
dev.off()
# 4.2) Facetted Shot Chart (10 pts)?????
igdtc <- rbind(andre,draymond,kevin,klay,stephen)
gsw_shot_charts <- ggplot(igdtc) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: GSW (2016 season)') +
theme_minimal()+
facet_wrap(~name)
pdf(file = "images/stephen-curry-shot-chart.pdf", width = 8, height = 7)
plot(gsw_shot_charts)
dev.off()
|
N <- 1000000
X <- rpois(N, lambda = 2)
X.2 <- X*(X-1) | /test.R | no_license | youknowwhatmynameis/myrepo | R | false | false | 53 | r | N <- 1000000
X <- rpois(N, lambda = 2)
X.2 <- X*(X-1) |
## Read electricity data from the local text file residing in the working directory
electricData <- read.table("./household_power_consumption.txt", header = TRUE, sep = ";", colClasses = c(rep("character", 2), rep("numeric", 7)), na.strings = c("NA", "?"))
electricData$Date <- as.Date(electricData$Date, "%d/%m/%Y")
## Subset the dataframe to include only the wanted date range
electricData <- electricData[(electricData$Date >= "2007-02-01" & electricData$Date <= "2007-02-02"), ]
## Convert the datetime variables
electricData$DateTime <- paste(electricData$Date, electricData$Time)
electricData$DateTime <- strptime(electricData$DateTime, "%Y-%m-%d %H:%M:%S")
## Plot the plot2.png
png(filename = "plot2.png", bg = "transparent", width = 480, height = 480, units = "px")
with(electricData, plot(DateTime, Global_active_power, type = 'l', xlab = "", ylab = "Global Active Power (kilowatts)"))
dev.off() | /plot2.R | no_license | siowmeng/ExData_Plotting1 | R | false | false | 909 | r |
## Read electricity data from the local text file residing in the working directory
electricData <- read.table("./household_power_consumption.txt", header = TRUE, sep = ";", colClasses = c(rep("character", 2), rep("numeric", 7)), na.strings = c("NA", "?"))
electricData$Date <- as.Date(electricData$Date, "%d/%m/%Y")
## Subset the dataframe to include only the wanted date range
electricData <- electricData[(electricData$Date >= "2007-02-01" & electricData$Date <= "2007-02-02"), ]
## Convert the datetime variables
electricData$DateTime <- paste(electricData$Date, electricData$Time)
electricData$DateTime <- strptime(electricData$DateTime, "%Y-%m-%d %H:%M:%S")
## Plot the plot2.png
png(filename = "plot2.png", bg = "transparent", width = 480, height = 480, units = "px")
with(electricData, plot(DateTime, Global_active_power, type = 'l', xlab = "", ylab = "Global Active Power (kilowatts)"))
dev.off() |
#' Estimate Lagging and Leading Times and Concentrations
#'
#' Estimates lagging and leading times and concentrations.
#' Used by correct.xx functions to estimate lagging and leading timepoints
#' and concentrations for each timepoint.
#' @param x data.frame
#' @param nomtimevar1 column name in x indicating nominal time after dose
#' @param depvar1 column name in x indicating concentration
#' @param timevar1 column name in x indicating actual time after dose
#' @param lagc concentration at previous sampling time
#' @param lagt previous sampling time
#' @param leadc concentration at next sampling time
#' @param leadt next sampling time
#' @param ... ignored
#' @return data.frame
#' @importFrom dplyr last lead lag left_join
lag_lead <- function(
x,nomtimevar1=NA,depvar1=NA,timevar1=NA,
lagc=NA,lagt=NA,leadc=NA,leadt=NA,...
){
# original <- x %>%
# mutate(depvar = !!depvar1, # dependent variable (internal)
# timevar = !!timevar1, # actual time variable (internal)
# ptime = !!nomtimevar1 # nominal time (internal)
# ) %>%
# mutate(flag=ifelse(!is.na(depvar),0,1)) %>% # flags type of missing value (in between or at the end)
# mutate(flag=ifelse(is.na(depvar)&timevar>last(timevar[!is.na(depvar)]),2,flag))
original <- x
original$depvar <- original[[depvar1]]
original$timevar <- original[[timevar1]]
original$ptime <- original[[nomtimevar1]]
original %<>%
mutate(flag=ifelse(!is.na(depvar),0,1)) %>% # flags type of missing value (in between or at the end)
mutate(flag=ifelse(is.na(depvar)&timevar>last(timevar[!is.na(depvar)]),2,flag))
#1 delete NA's
no.na=original %>%filter(!is.na(depvar))
#2 calc lead and lag
no.na=no.na %>% arrange(ptime) %>%
mutate(leadc=lead(depvar), # concentration at next sampling time (internal)
lagc=lag(depvar), # concentration at previous sampling time (internal)
leadt=lead(timevar), # next sampling time (internal)
lagt=lag(timevar) # previous sampling time (internal)
) %>%
select(ptime,leadc,lagc,leadt,lagt)
#3 merge with original
# newdata=left_join(original,no.na,by="ptime")
newdata=left_join(original,no.na)
newdata = newdata %>% arrange(ptime) %>%
mutate(leadc =ifelse(flag==1,locf(leadc),leadc),
leadt =ifelse(flag==1,locf(leadt),leadt),
lagc =ifelse(flag==2,last(depvar[!is.na(depvar)]),lagc),
lagt =ifelse(flag==2,last(timevar[!is.na(depvar)]),lagt)
)
newdata = newdata %>% arrange(-ptime)
newdata = newdata %>%
mutate(lagc =ifelse(flag==1,locf(lagc),lagc),
lagt =ifelse(flag==1,locf(lagt),lagt)
)
newdata = newdata %>%
arrange(ptime) %>%
mutate(leadc =ifelse(ptime==last(ptime),NA,leadc),
leadt =ifelse(ptime==last(ptime),NA,leadt)
)
names(newdata)[names(newdata)=="lagc"]=lagc
names(newdata)[names(newdata)=="lagt"]=lagt
names(newdata)[names(newdata)=="leadc"]=leadc
names(newdata)[names(newdata)=="leadt"]=leadt
return(newdata)
}
| /R/lag.lead.r | no_license | billdenney/qpNCA | R | false | false | 3,162 | r | #' Estimate Lagging and Leading Times and Concentrations
#'
#' Estimates lagging and leading times and concentrations.
#' Used by correct.xx functions to estimate lagging and leading timepoints
#' and concentrations for each timepoint.
#' @param x data.frame
#' @param nomtimevar1 column name in x indicating nominal time after dose
#' @param depvar1 column name in x indicating concentration
#' @param timevar1 column name in x indicating actual time after dose
#' @param lagc concentration at previous sampling time
#' @param lagt previous sampling time
#' @param leadc concentration at next sampling time
#' @param leadt next sampling time
#' @param ... ignored
#' @return data.frame
#' @importFrom dplyr last lead lag left_join
lag_lead <- function(
x,nomtimevar1=NA,depvar1=NA,timevar1=NA,
lagc=NA,lagt=NA,leadc=NA,leadt=NA,...
){
# original <- x %>%
# mutate(depvar = !!depvar1, # dependent variable (internal)
# timevar = !!timevar1, # actual time variable (internal)
# ptime = !!nomtimevar1 # nominal time (internal)
# ) %>%
# mutate(flag=ifelse(!is.na(depvar),0,1)) %>% # flags type of missing value (in between or at the end)
# mutate(flag=ifelse(is.na(depvar)&timevar>last(timevar[!is.na(depvar)]),2,flag))
original <- x
original$depvar <- original[[depvar1]]
original$timevar <- original[[timevar1]]
original$ptime <- original[[nomtimevar1]]
original %<>%
mutate(flag=ifelse(!is.na(depvar),0,1)) %>% # flags type of missing value (in between or at the end)
mutate(flag=ifelse(is.na(depvar)&timevar>last(timevar[!is.na(depvar)]),2,flag))
#1 delete NA's
no.na=original %>%filter(!is.na(depvar))
#2 calc lead and lag
no.na=no.na %>% arrange(ptime) %>%
mutate(leadc=lead(depvar), # concentration at next sampling time (internal)
lagc=lag(depvar), # concentration at previous sampling time (internal)
leadt=lead(timevar), # next sampling time (internal)
lagt=lag(timevar) # previous sampling time (internal)
) %>%
select(ptime,leadc,lagc,leadt,lagt)
#3 merge with original
# newdata=left_join(original,no.na,by="ptime")
newdata=left_join(original,no.na)
newdata = newdata %>% arrange(ptime) %>%
mutate(leadc =ifelse(flag==1,locf(leadc),leadc),
leadt =ifelse(flag==1,locf(leadt),leadt),
lagc =ifelse(flag==2,last(depvar[!is.na(depvar)]),lagc),
lagt =ifelse(flag==2,last(timevar[!is.na(depvar)]),lagt)
)
newdata = newdata %>% arrange(-ptime)
newdata = newdata %>%
mutate(lagc =ifelse(flag==1,locf(lagc),lagc),
lagt =ifelse(flag==1,locf(lagt),lagt)
)
newdata = newdata %>%
arrange(ptime) %>%
mutate(leadc =ifelse(ptime==last(ptime),NA,leadc),
leadt =ifelse(ptime==last(ptime),NA,leadt)
)
names(newdata)[names(newdata)=="lagc"]=lagc
names(newdata)[names(newdata)=="lagt"]=lagt
names(newdata)[names(newdata)=="leadc"]=leadc
names(newdata)[names(newdata)=="leadt"]=leadt
return(newdata)
}
|
testlist <- list(data = structure(c(1.269748709812e-320, 1.49121849020985e-312, 6.92383063338793e-251, 3.49284541244692e+30, 3.5295369653595e+30, 6.31151507988387e-28, 9.93476335118854e+44, 3.52953664162775e+30, 7.24774526323231e+43, 2.4173705217461e+35, 6.74730202138664e+38, 2.55318533568062e-310, 8.28904605845809e-317, 5.41108926696144e-312, 3.8174704291228e-310, 2.80698452022019e-307, 3.3229862250991e+36, 1.44438129484958e-134), .Dim = c(9L, 2L)), q = 2.71615493501101e-312)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) | /biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610556731-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 557 | r | testlist <- list(data = structure(c(1.269748709812e-320, 1.49121849020985e-312, 6.92383063338793e-251, 3.49284541244692e+30, 3.5295369653595e+30, 6.31151507988387e-28, 9.93476335118854e+44, 3.52953664162775e+30, 7.24774526323231e+43, 2.4173705217461e+35, 6.74730202138664e+38, 2.55318533568062e-310, 8.28904605845809e-317, 5.41108926696144e-312, 3.8174704291228e-310, 2.80698452022019e-307, 3.3229862250991e+36, 1.44438129484958e-134), .Dim = c(9L, 2L)), q = 2.71615493501101e-312)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) |
##URL: https://github.com/garciarb/ProgrammingAssignment2
## The makeCacheMatrix function creates a special "matrix",
## which is really a list containing a function to:
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse of the matrix
## 4. get the value of the inverse of the matrix
#makeCacheMatrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function calculates the inverse of the special "matrix"
## that is returned by makeCacheMatrix.
##cacheSolve checks to see if the inverse has previously been caclculated.
##If it has been calculated and the matrix did not change, then cacheSolve
##retrieves the inverse from the cache.
##If it was not solved, it calculates the inverse of the matrix and sets
##the value through the usage of the setinverse function.
#cacheSolve
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
} | /cachematrix.R | no_license | garciarb/ProgrammingAssignment2 | R | false | false | 1,351 | r | ##URL: https://github.com/garciarb/ProgrammingAssignment2
## The makeCacheMatrix function creates a special "matrix",
## which is really a list containing a function to:
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse of the matrix
## 4. get the value of the inverse of the matrix
#makeCacheMatrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function calculates the inverse of the special "matrix"
## that is returned by makeCacheMatrix.
##cacheSolve checks to see if the inverse has previously been caclculated.
##If it has been calculated and the matrix did not change, then cacheSolve
##retrieves the inverse from the cache.
##If it was not solved, it calculates the inverse of the matrix and sets
##the value through the usage of the setinverse function.
#cacheSolve
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
} |
library(shiny)
shinyUI(fluidPage(
# Descriptive main title
titlePanel("Carbon Dioxide Uptake in Grass Plants"),
sidebarLayout(
# Ask the user which locations they want data from
sidebarPanel(
helpText("This Shiny application uses the CO2 dataset in R to display carbon dioxide uptake data for grass plants. Simply pick which locations you want data from and which color you want the histogram to be created in."),
checkboxInput(inputId="quebecBox", label="Show Quebec plants data",
value=TRUE),
checkboxInput(inputId="mississippiBox", label="Show Mississippi
plants data", value=TRUE),
selectInput(inputId="colorBox", label="What color do you want the plot in?",
choices=c("Blue", "Green", "Purple", "Orange", "Cyan", "Maroon"))
),
# Display the histogram of the gas uptake for the selected data
mainPanel(
plotOutput("histogram")
)
)
))
| /ui.R | no_license | sulaksh555/developing-data-products-week-4-assignment | R | false | false | 1,044 | r | library(shiny)
shinyUI(fluidPage(
# Descriptive main title
titlePanel("Carbon Dioxide Uptake in Grass Plants"),
sidebarLayout(
# Ask the user which locations they want data from
sidebarPanel(
helpText("This Shiny application uses the CO2 dataset in R to display carbon dioxide uptake data for grass plants. Simply pick which locations you want data from and which color you want the histogram to be created in."),
checkboxInput(inputId="quebecBox", label="Show Quebec plants data",
value=TRUE),
checkboxInput(inputId="mississippiBox", label="Show Mississippi
plants data", value=TRUE),
selectInput(inputId="colorBox", label="What color do you want the plot in?",
choices=c("Blue", "Green", "Purple", "Orange", "Cyan", "Maroon"))
),
# Display the histogram of the gas uptake for the selected data
mainPanel(
plotOutput("histogram")
)
)
))
|
# This is the absolute bare minimum of what I need to create a shiny app.
# Beware! ... This alone will be a REALLY boring app. A blank page :(
# You will create an app to compare states' cumulative number of COVID cases over time.
# The x-axis will be number of days since 20+ cases and the y-axis will be cumulative cases on the
# log scale (`scale_y_log10()`). We use number of days since 20+ cases on the x-axis so we can make
# better comparisons of the curve trajectories. You will have an input box where the user can choose
# which states to compare (`selectInput()`) and have a submit button to click once the user has chosen
# all states they're interested in comparing. The graph should display a different line for each state,
# with labels either on the graph or in a legend. Color can be used if needed.
library(shiny)
library(tidyverse)
ui <- fluidPage(
selectInput(inputId = "states",
label = "Choose a State",
choices = unique(covid19$state),
multiple = TRUE),
#submitButton(text = "Create Plot!"),
plotOutput(outputId = "covidtimeplot")
)
server <- function(input, output) {
output$covidtimeplot <- renderPlot({
covid19 %>%
group_by(state) %>%
filter(cases >= 20) %>%
mutate(days_since = as.numeric(difftime(date, lag(date, 1))),
Between = ifelse(is.na(days_since), 0, days_since),
days_since20 = cumsum(as.numeric(between))) %>%
select(~days_since, ~Between) %>%
filter(state %in% input$state) %>%
ggplot(aes(x = days_since20 , y = cases, color = state)) +
geom_line() +
scale_y_log10() +
scale_x_discrete() +
labs(title = "Days Since 20 Covid Cases by State",
x = "Days Since 20 Cases",
y = "Total Cases")
})
}
shinyApp(ui = ui, server = server) | /Exercise_six_shiny/app.R | no_license | mpolemen/matt_excersize_six | R | false | false | 1,843 | r | # This is the absolute bare minimum of what I need to create a shiny app.
# Beware! ... This alone will be a REALLY boring app. A blank page :(
# You will create an app to compare states' cumulative number of COVID cases over time.
# The x-axis will be number of days since 20+ cases and the y-axis will be cumulative cases on the
# log scale (`scale_y_log10()`). We use number of days since 20+ cases on the x-axis so we can make
# better comparisons of the curve trajectories. You will have an input box where the user can choose
# which states to compare (`selectInput()`) and have a submit button to click once the user has chosen
# all states they're interested in comparing. The graph should display a different line for each state,
# with labels either on the graph or in a legend. Color can be used if needed.
library(shiny)
library(tidyverse)
ui <- fluidPage(
selectInput(inputId = "states",
label = "Choose a State",
choices = unique(covid19$state),
multiple = TRUE),
#submitButton(text = "Create Plot!"),
plotOutput(outputId = "covidtimeplot")
)
server <- function(input, output) {
output$covidtimeplot <- renderPlot({
covid19 %>%
group_by(state) %>%
filter(cases >= 20) %>%
mutate(days_since = as.numeric(difftime(date, lag(date, 1))),
Between = ifelse(is.na(days_since), 0, days_since),
days_since20 = cumsum(as.numeric(between))) %>%
select(~days_since, ~Between) %>%
filter(state %in% input$state) %>%
ggplot(aes(x = days_since20 , y = cases, color = state)) +
geom_line() +
scale_y_log10() +
scale_x_discrete() +
labs(title = "Days Since 20 Covid Cases by State",
x = "Days Since 20 Cases",
y = "Total Cases")
})
}
shinyApp(ui = ui, server = server) |
############################################################################
## Wrapper function for Fitting multiple factor DM model.
## Take a bsseq object and a design matrix
############################################################################
DMLfit.multiFactor <- function(BSobj, design, formula, smoothing=FALSE, smoothing.span=500) {
## some checking to make sure input data is correct
if(length(sampleNames(BSobj)) != nrow(design))
stop("Dimension of data and design don't match. ")
## make design matrix out of formula
X <- model.matrix(formula, design)
if(nrow(X) <= ncol(X)+1)
stop("No enough degree of freedom to fit the linear model. Drop some terms in formula.")
## take counts from BSobj
N0 <- getBSseq(BSobj, "Cov")
Y0 <- getBSseq(BSobj, "M")
## compute the response variable Z, which is transformed methylation level
c0 = 0.1
if(smoothing) { ## with smoothing. The mean methylation levels are smoothed
allchr <- as.character(seqnames(BSobj))
allpos <- start(BSobj)
N0.sm = N0; Y0.sm = Y0
for(i in 1:ncol(N0)) {
N0.sm[,i] <- round(smooth.chr(as.double(N0[,i]), smoothing.span, allchr, allpos, "avg"))
Y0.sm[,i] <- round(smooth.chr(as.double(Y0[,i]), smoothing.span, allchr, allpos, "avg"))
}
Z0 = asin(2*(Y0.sm+c0)/(N0.sm+2*c0) - 1)
} else { ## no smoothing
Z0 = asin(2*(Y0+c0)/(N0+2*c0) - 1)
}
## fit the model
fit <- DMLfit.multiFactor.engine(as.array(Y0), as.array(N0), X, as.array(Z0))
## return
list(gr=getBSseq(BSobj, "gr"), design=design, formula=formula, X=X, fit=fit)
}
############################################################################
## Engine function for Fitting multiple factor DM model.
############################################################################
DMLfit.multiFactor.engine <- function(Y0, N0, X0, Z0) {
if( (!is.matrix(Y0) | !is.matrix(N0)) & (class(Y0)!="DelayedMatrix" | class(N0)!="DelayedMatrix") )
stop("Y and N need to be matrices.\n")
## get dimensions
p = NCOL(X0)
n = NROW(X0)
C = nrow(Y0)
## loop over CpG sites
beta = matrix(NA, nrow=C, ncol=p)
var.beta = matrix(NA, nrow=C, ncol=p*p)
cat("Fitting DML model for CpG site: ")
for( i in 1:C ) {
if(i %% 1e5 == 0)
cat(i, ", ")
## take counts for current CpG and fit model
tmp = DMLfit.oneCG(Y0[i,], N0[i,], X0, Z0[i,], n, p)
if(is.null(tmp)) next
## save point estimates and SE for this CpG
beta[i,] = tmp$beta0
var.beta[i,] = tmp$var.beta0
}
list(beta=beta, var.beta=var.beta)
}
##############################################################
## DML model fitting for one CpG
## This is the "core" function and all methods are in here!!
##############################################################
DMLfit.oneCG <- function(Y, N, X, Z, n, p) {
## small constants to bound p and phi
c1 = 0.001
## check to make sure data is complete
ix <- N > 0
if(mean(ix) < 1) { ## has missing entries
X <- X[ix,,drop=FALSE]
Y <- Y[ix]
N <- N[ix]
## check design
if(nrow(X) < ncol(X) + 1) ## not enough df for regression
return(NULL)
if(any(abs(svd(X)$d) <1e-8)) ## design is not of full rank because of missing. Skip
return(NULL)
Z <- Z[ix]
}
## Transform the methylation levels. Add a small constant to bound away from 0/1.
# Z = asin(2*(Y+c0)/(N+2*c0) - 1)
## First round of weighted least square.
## QR decomposition has to be performed for each CpG because it's WLS!!!
XTVinv = t(X * N)
beta0 = solve(XTVinv %*% X) %*% (XTVinv %*% Z) ### this parenthesis is also helpful to speed up
## get dispersion estimates, and restrict a bit to bound away from 0/1.
phiHat = (sum( (Z - X %*% beta0)^2 * N) - (n - p)) * n / (n - p) / sum(N-1)
phiHat = min(max(c1, phiHat),1-c1)
## Shrinkage phiHat a bit --- how to do this???
## second round of regression.
XTVinv = t(X * (N/(1+(N-1)*phiHat))) ###t(X)%*%VInv
XTVinvX.inv = solve(XTVinv %*% X)
beta0 = solve(XTVinv %*% X) %*% (XTVinv %*% Z)
se.beta0 = sqrt(diag(XTVinvX.inv))
## return. I'll flatten the var/cov matrix for easy storing.
list(beta0=beta0, se.beta0=se.beta0, var.beta0 = as.vector(XTVinvX.inv))
}
##############################################################
### hypothesis testing function
##############################################################
DMLtest.multiFactor <- function(DMLfit, coef=2, term, Contrast) {
## figure out index of the factor to be tested
## coef = find.factor(DMLfit$design, DMLfit$formula, factor)
## check inputs
flag = 0
if(!missing(coef))
flag = flag + 1
if(!missing(term))
flag = flag + 1
if(!missing(Contrast))
flag = flag + 1
if(flag == 0)
stop("Must specify one of the following parameter for testing: coef, term, or Contrast.\n")
if(flag > 1)
stop("You can only specify one of the following parameter for testing: coef, term, or Contrast.\n")
if(!missing(coef)) { # specified coef
res = DMLtest.multiFactor.coef(DMLfit, coef)
} else if(!missing(term)) { # specify term
## create a contrast matrix for testing the term
Contrast = makeContrast(DMLfit, term)
## testing
res = DMLtest.multiFactor.Contrast(DMLfit, Contrast)
} else if(!missing(Contrast)) { # specify contrast
## check contrast matrix
if( nrow(Contrast) != ncol(DMLfit$X) )
stop("Input Contrast matrix has wrong dimension: its number of rows must match the number of columns of the design matrix.\n")
## testing
res = DMLtest.multiFactor.Contrast(DMLfit, Contrast)
}
class(res)[2] = "DMLtest.multiFactor"
invisible(res)
}
##############################################################
## Hypothesis testing when specify a coef for testing.
## This only tests one column in the design matrix.
## Wald test will be used.
##############################################################
DMLtest.multiFactor.coef <- function(DMLfit, coef) {
if(is.character(coef)) {
tmp = which(colnames(DMLfit$X) == coef)
if(length(tmp) == 0)
stop(paste0("Can't find terms to be tested: ", coef,
". Make sure it matches a column name in design matrix."))
coef = tmp
}
## hypothesis testing
p = ncol(DMLfit$X)
fit = DMLfit$fit
betas = fit$beta[,coef]
## take out SE estimates from var/cov matrix
tmp = t(apply(fit$var.beta, 1, function(x) diag(matrix(x, ncol=p))))
ses = sqrt(tmp[,coef])
## Wald test, get p-values and FDR
stat = betas / ses
pvals = 2*pnorm(-abs(stat)) #2*(1- pnorm(abs(stat)))
fdrs = p.adjust(pvals, method="BH")
## return a data frame
gr = DMLfit$gr
res = data.frame(chr=seqnames(gr), pos=start(gr), stat, pvals, fdrs)
invisible(res)
}
##############################################################
## Hypothesis testing when specify a contrast matrix.
## This tests multiple columns in the design matrix.
## F-test will be used.
##############################################################
DMLtest.multiFactor.Contrast <- function(DMLfit, Contrast) {
p = ncol(DMLfit$X)
fit = DMLfit$fit
betas = fit$beta
## A^T * beta
Abeta = betas %*% Contrast
## loop through CpG sites -- have to do this since the var/cov matrices of the beta estimates
## are different for each site
stat = rep( NA, nrow(betas) )
for( i in 1:nrow(betas) ) {
Sigma = matrix(fit$var.beta[i,], ncol=p)
tmp = solve(t(Contrast) %*% Sigma %*% Contrast)
thisAbeta = Abeta[i,,drop=FALSE]
stat[i] = thisAbeta %*% tmp %*% t(thisAbeta)
}
## get the sign of the contrast if there's only one contrast.
## This is to be added to test statistics
## When Contrast has multiple rows, there won't be a sign for test statistics.
if(nrow(Contrast) == 1)
signs = sign(betas %*% Contrast)
else signs = 1
## get p-values. Stat follows F_{r, R}
## I found that using F distribution, the p-values are pretty large.
## Use sqrt(f) and normal gives much smaller p-values,
## and this is consistent with the Wald test in two-group comparison.
r = ncol(Contrast)
## R = nrow(DMLfit$X) - ncol(DMLfit$X)
## stat = stat / r
## pvals = 1 - pf(stat, r, R)
stat = sqrt(stat / r) * signs
pvals = 2*pnorm(-abs(stat))
fdrs = p.adjust(pvals, method="BH")
## return a data frame
gr = DMLfit$gr
res = data.frame(chr=seqnames(gr), pos=start(gr), stat, pvals, fdrs)
attr(res, "Contrast") = Contrast
invisible(res)
}
##############################################################
## make contrast matrix given a model and a term to be tested
## Input term can be a vector (testing multiple terms)
##############################################################
makeContrast <- function(fit, term) {
formula.terms = terms(fit$formula)
ix = match(term, attr(formula.terms, "term.labels"))
if( length(ix) == 0 )
stop("term(s) to be tested can't be found in the formula.\n")
if( any(is.na(ix)) )
warning("Some term(s) to be tested can't be found in the formula. Will proceed to test the locatable terms.\n")
## make contrast matrix. All columns in the design matrix related to
## the provided term (including interactions) should be tested.
allcolnam = colnames(fit$X)
ix.term = NULL
for( t in term ) {
ix.term = c(ix.term, grep(t, allcolnam))
## using grep is dangerous is two terms has similar names, such as aa and aaa.
## I need to find a better way for this
}
## make matrix.
L = matrix(0, ncol=ncol(fit$X), nrow=length(ix.term))
for(i in 1:nrow(L))
L[i, ix.term[i]] = 1
return(t(L))
}
| /R/DML.multiFactor.R | no_license | hmyh1202/DSS | R | false | false | 10,072 | r | ############################################################################
## Wrapper function for Fitting multiple factor DM model.
## Take a bsseq object and a design matrix
############################################################################
DMLfit.multiFactor <- function(BSobj, design, formula, smoothing=FALSE, smoothing.span=500) {
## some checking to make sure input data is correct
if(length(sampleNames(BSobj)) != nrow(design))
stop("Dimension of data and design don't match. ")
## make design matrix out of formula
X <- model.matrix(formula, design)
if(nrow(X) <= ncol(X)+1)
stop("No enough degree of freedom to fit the linear model. Drop some terms in formula.")
## take counts from BSobj
N0 <- getBSseq(BSobj, "Cov")
Y0 <- getBSseq(BSobj, "M")
## compute the response variable Z, which is transformed methylation level
c0 = 0.1
if(smoothing) { ## with smoothing. The mean methylation levels are smoothed
allchr <- as.character(seqnames(BSobj))
allpos <- start(BSobj)
N0.sm = N0; Y0.sm = Y0
for(i in 1:ncol(N0)) {
N0.sm[,i] <- round(smooth.chr(as.double(N0[,i]), smoothing.span, allchr, allpos, "avg"))
Y0.sm[,i] <- round(smooth.chr(as.double(Y0[,i]), smoothing.span, allchr, allpos, "avg"))
}
Z0 = asin(2*(Y0.sm+c0)/(N0.sm+2*c0) - 1)
} else { ## no smoothing
Z0 = asin(2*(Y0+c0)/(N0+2*c0) - 1)
}
## fit the model
fit <- DMLfit.multiFactor.engine(as.array(Y0), as.array(N0), X, as.array(Z0))
## return
list(gr=getBSseq(BSobj, "gr"), design=design, formula=formula, X=X, fit=fit)
}
############################################################################
## Engine function for Fitting multiple factor DM model.
############################################################################
DMLfit.multiFactor.engine <- function(Y0, N0, X0, Z0) {
if( (!is.matrix(Y0) | !is.matrix(N0)) & (class(Y0)!="DelayedMatrix" | class(N0)!="DelayedMatrix") )
stop("Y and N need to be matrices.\n")
## get dimensions
p = NCOL(X0)
n = NROW(X0)
C = nrow(Y0)
## loop over CpG sites
beta = matrix(NA, nrow=C, ncol=p)
var.beta = matrix(NA, nrow=C, ncol=p*p)
cat("Fitting DML model for CpG site: ")
for( i in 1:C ) {
if(i %% 1e5 == 0)
cat(i, ", ")
## take counts for current CpG and fit model
tmp = DMLfit.oneCG(Y0[i,], N0[i,], X0, Z0[i,], n, p)
if(is.null(tmp)) next
## save point estimates and SE for this CpG
beta[i,] = tmp$beta0
var.beta[i,] = tmp$var.beta0
}
list(beta=beta, var.beta=var.beta)
}
##############################################################
## DML model fitting for one CpG
## This is the "core" function and all methods are in here!!
##############################################################
DMLfit.oneCG <- function(Y, N, X, Z, n, p) {
## small constants to bound p and phi
c1 = 0.001
## check to make sure data is complete
ix <- N > 0
if(mean(ix) < 1) { ## has missing entries
X <- X[ix,,drop=FALSE]
Y <- Y[ix]
N <- N[ix]
## check design
if(nrow(X) < ncol(X) + 1) ## not enough df for regression
return(NULL)
if(any(abs(svd(X)$d) <1e-8)) ## design is not of full rank because of missing. Skip
return(NULL)
Z <- Z[ix]
}
## Transform the methylation levels. Add a small constant to bound away from 0/1.
# Z = asin(2*(Y+c0)/(N+2*c0) - 1)
## First round of weighted least square.
## QR decomposition has to be performed for each CpG because it's WLS!!!
XTVinv = t(X * N)
beta0 = solve(XTVinv %*% X) %*% (XTVinv %*% Z) ### this parenthesis is also helpful to speed up
## get dispersion estimates, and restrict a bit to bound away from 0/1.
phiHat = (sum( (Z - X %*% beta0)^2 * N) - (n - p)) * n / (n - p) / sum(N-1)
phiHat = min(max(c1, phiHat),1-c1)
## Shrinkage phiHat a bit --- how to do this???
## second round of regression.
XTVinv = t(X * (N/(1+(N-1)*phiHat))) ###t(X)%*%VInv
XTVinvX.inv = solve(XTVinv %*% X)
beta0 = solve(XTVinv %*% X) %*% (XTVinv %*% Z)
se.beta0 = sqrt(diag(XTVinvX.inv))
## return. I'll flatten the var/cov matrix for easy storing.
list(beta0=beta0, se.beta0=se.beta0, var.beta0 = as.vector(XTVinvX.inv))
}
##############################################################
### hypothesis testing function
##############################################################
DMLtest.multiFactor <- function(DMLfit, coef=2, term, Contrast) {
## figure out index of the factor to be tested
## coef = find.factor(DMLfit$design, DMLfit$formula, factor)
## check inputs
flag = 0
if(!missing(coef))
flag = flag + 1
if(!missing(term))
flag = flag + 1
if(!missing(Contrast))
flag = flag + 1
if(flag == 0)
stop("Must specify one of the following parameter for testing: coef, term, or Contrast.\n")
if(flag > 1)
stop("You can only specify one of the following parameter for testing: coef, term, or Contrast.\n")
if(!missing(coef)) { # specified coef
res = DMLtest.multiFactor.coef(DMLfit, coef)
} else if(!missing(term)) { # specify term
## create a contrast matrix for testing the term
Contrast = makeContrast(DMLfit, term)
## testing
res = DMLtest.multiFactor.Contrast(DMLfit, Contrast)
} else if(!missing(Contrast)) { # specify contrast
## check contrast matrix
if( nrow(Contrast) != ncol(DMLfit$X) )
stop("Input Contrast matrix has wrong dimension: its number of rows must match the number of columns of the design matrix.\n")
## testing
res = DMLtest.multiFactor.Contrast(DMLfit, Contrast)
}
class(res)[2] = "DMLtest.multiFactor"
invisible(res)
}
##############################################################
## Hypothesis testing when specify a coef for testing.
## This only tests one column in the design matrix.
## Wald test will be used.
##############################################################
DMLtest.multiFactor.coef <- function(DMLfit, coef) {
if(is.character(coef)) {
tmp = which(colnames(DMLfit$X) == coef)
if(length(tmp) == 0)
stop(paste0("Can't find terms to be tested: ", coef,
". Make sure it matches a column name in design matrix."))
coef = tmp
}
## hypothesis testing
p = ncol(DMLfit$X)
fit = DMLfit$fit
betas = fit$beta[,coef]
## take out SE estimates from var/cov matrix
tmp = t(apply(fit$var.beta, 1, function(x) diag(matrix(x, ncol=p))))
ses = sqrt(tmp[,coef])
## Wald test, get p-values and FDR
stat = betas / ses
pvals = 2*pnorm(-abs(stat)) #2*(1- pnorm(abs(stat)))
fdrs = p.adjust(pvals, method="BH")
## return a data frame
gr = DMLfit$gr
res = data.frame(chr=seqnames(gr), pos=start(gr), stat, pvals, fdrs)
invisible(res)
}
##############################################################
## Hypothesis testing when specify a contrast matrix.
## This tests multiple columns in the design matrix.
## F-test will be used.
##############################################################
DMLtest.multiFactor.Contrast <- function(DMLfit, Contrast) {
p = ncol(DMLfit$X)
fit = DMLfit$fit
betas = fit$beta
## A^T * beta
Abeta = betas %*% Contrast
## loop through CpG sites -- have to do this since the var/cov matrices of the beta estimates
## are different for each site
stat = rep( NA, nrow(betas) )
for( i in 1:nrow(betas) ) {
Sigma = matrix(fit$var.beta[i,], ncol=p)
tmp = solve(t(Contrast) %*% Sigma %*% Contrast)
thisAbeta = Abeta[i,,drop=FALSE]
stat[i] = thisAbeta %*% tmp %*% t(thisAbeta)
}
## get the sign of the contrast if there's only one contrast.
## This is to be added to test statistics
## When Contrast has multiple rows, there won't be a sign for test statistics.
if(nrow(Contrast) == 1)
signs = sign(betas %*% Contrast)
else signs = 1
## get p-values. Stat follows F_{r, R}
## I found that using F distribution, the p-values are pretty large.
## Use sqrt(f) and normal gives much smaller p-values,
## and this is consistent with the Wald test in two-group comparison.
r = ncol(Contrast)
## R = nrow(DMLfit$X) - ncol(DMLfit$X)
## stat = stat / r
## pvals = 1 - pf(stat, r, R)
stat = sqrt(stat / r) * signs
pvals = 2*pnorm(-abs(stat))
fdrs = p.adjust(pvals, method="BH")
## return a data frame
gr = DMLfit$gr
res = data.frame(chr=seqnames(gr), pos=start(gr), stat, pvals, fdrs)
attr(res, "Contrast") = Contrast
invisible(res)
}
##############################################################
## make contrast matrix given a model and a term to be tested
## Input term can be a vector (testing multiple terms)
##############################################################
makeContrast <- function(fit, term) {
formula.terms = terms(fit$formula)
ix = match(term, attr(formula.terms, "term.labels"))
if( length(ix) == 0 )
stop("term(s) to be tested can't be found in the formula.\n")
if( any(is.na(ix)) )
warning("Some term(s) to be tested can't be found in the formula. Will proceed to test the locatable terms.\n")
## make contrast matrix. All columns in the design matrix related to
## the provided term (including interactions) should be tested.
allcolnam = colnames(fit$X)
ix.term = NULL
for( t in term ) {
ix.term = c(ix.term, grep(t, allcolnam))
## using grep is dangerous is two terms has similar names, such as aa and aaa.
## I need to find a better way for this
}
## make matrix.
L = matrix(0, ncol=ncol(fit$X), nrow=length(ix.term))
for(i in 1:nrow(L))
L[i, ix.term[i]] = 1
return(t(L))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run-source.R
\name{source_gist}
\alias{source_gist}
\title{Run a script on gist}
\usage{
source_gist(id, ..., filename = NULL, sha1 = NULL, quiet = FALSE)
}
\arguments{
\item{id}{either full url (character), gist ID (numeric or character of
numeric).}
\item{...}{other options passed to \code{\link[=source]{source()}}}
\item{filename}{if there is more than one R file in the gist, which one to
source (filename ending in '.R')? Default \code{NULL} will source the
first file.}
\item{sha1}{The SHA-1 hash of the file at the remote URL. This is highly
recommend as it prevents you from accidentally running code that's not
what you expect. See \code{\link[=source_url]{source_url()}} for more information on
using a SHA-1 hash.}
\item{quiet}{if \code{FALSE}, the default, prints informative messages.}
}
\description{
\dQuote{Gist is a simple way to share snippets and pastes with others.
All gists are git repositories, so they are automatically versioned,
forkable and usable as a git repository.}
\url{https://gist.github.com/}
}
\examples{
\dontrun{
# You can run gists given their id
source_gist(6872663)
source_gist("6872663")
# Or their html url
source_gist("https://gist.github.com/hadley/6872663")
source_gist("gist.github.com/hadley/6872663")
# It's highly recommend that you run source_gist with the optional
# sha1 argument - this will throw an error if the file has changed since
# you first ran it
source_gist(6872663, sha1 = "54f1db27e60")
# Wrong hash will result in error
source_gist(6872663, sha1 = "54f1db27e61")
#' # You can speficy a particular R file in the gist
source_gist(6872663, filename = "hi.r")
source_gist(6872663, filename = "hi.r", sha1 = "54f1db27e60")
}
}
\seealso{
\code{\link[=source_url]{source_url()}}
}
| /man/source_gist.Rd | permissive | r-lib/devtools | R | false | true | 1,827 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run-source.R
\name{source_gist}
\alias{source_gist}
\title{Run a script on gist}
\usage{
source_gist(id, ..., filename = NULL, sha1 = NULL, quiet = FALSE)
}
\arguments{
\item{id}{either full url (character), gist ID (numeric or character of
numeric).}
\item{...}{other options passed to \code{\link[=source]{source()}}}
\item{filename}{if there is more than one R file in the gist, which one to
source (filename ending in '.R')? Default \code{NULL} will source the
first file.}
\item{sha1}{The SHA-1 hash of the file at the remote URL. This is highly
recommend as it prevents you from accidentally running code that's not
what you expect. See \code{\link[=source_url]{source_url()}} for more information on
using a SHA-1 hash.}
\item{quiet}{if \code{FALSE}, the default, prints informative messages.}
}
\description{
\dQuote{Gist is a simple way to share snippets and pastes with others.
All gists are git repositories, so they are automatically versioned,
forkable and usable as a git repository.}
\url{https://gist.github.com/}
}
\examples{
\dontrun{
# You can run gists given their id
source_gist(6872663)
source_gist("6872663")
# Or their html url
source_gist("https://gist.github.com/hadley/6872663")
source_gist("gist.github.com/hadley/6872663")
# It's highly recommend that you run source_gist with the optional
# sha1 argument - this will throw an error if the file has changed since
# you first ran it
source_gist(6872663, sha1 = "54f1db27e60")
# Wrong hash will result in error
source_gist(6872663, sha1 = "54f1db27e61")
#' # You can speficy a particular R file in the gist
source_gist(6872663, filename = "hi.r")
source_gist(6872663, filename = "hi.r", sha1 = "54f1db27e60")
}
}
\seealso{
\code{\link[=source_url]{source_url()}}
}
|
VB_exp <- function(k,n,nrow,ncol,p,q,r){
# This Function calculated the value of element of exponential n by n matrix to the kth power
# This is for Markov process exp^{Qt} where Q is nxn matrix
# n = dimension of the matrix
# k = power of matrix
# p = forward probability
# q = backward probabiity
# r = returning probability
# nrow = the row of the matrix
# ncol = the column of the matrix
# Initialize lambda and A
lambda <- rep(0,n)
A <- rep(0, n)
for (nMatrix in 1:n){
if ((2 * nMatrix / (n + 1)) %% 2 == 1){
lambda[nMatrix] = r
}
else{
lambda[nMatrix] =
r + 2 * sqrt(q * p) * cos((pi * nMatrix) / (n+1))
} # end else
A[nMatrix] = findAj(nrow, ncol, nMatrix, n, p, q, r)
} #end for loop
# Initialize running sum
afSum = 0 ;
# Calculate the element of the matrix
for (nSum in 1:n){
afSum = A[nSum] * exp(lambda[nSum] * k) + afSum
}
return(afSum)
} # end function
| /VB_exp.R | no_license | JeremyJosephLin/markovcpp | R | false | false | 983 | r | VB_exp <- function(k,n,nrow,ncol,p,q,r){
# This Function calculated the value of element of exponential n by n matrix to the kth power
# This is for Markov process exp^{Qt} where Q is nxn matrix
# n = dimension of the matrix
# k = power of matrix
# p = forward probability
# q = backward probabiity
# r = returning probability
# nrow = the row of the matrix
# ncol = the column of the matrix
# Initialize lambda and A
lambda <- rep(0,n)
A <- rep(0, n)
for (nMatrix in 1:n){
if ((2 * nMatrix / (n + 1)) %% 2 == 1){
lambda[nMatrix] = r
}
else{
lambda[nMatrix] =
r + 2 * sqrt(q * p) * cos((pi * nMatrix) / (n+1))
} # end else
A[nMatrix] = findAj(nrow, ncol, nMatrix, n, p, q, r)
} #end for loop
# Initialize running sum
afSum = 0 ;
# Calculate the element of the matrix
for (nSum in 1:n){
afSum = A[nSum] * exp(lambda[nSum] * k) + afSum
}
return(afSum)
} # end function
|
%
% Copyright (c) 2013, 2014, IBM Corp. All rights reserved.
%
% This program is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program. If not, see <http://www.gnu.org/licenses/>.
%
%
\name{ibmdbR-package}
\alias{ibmdbR-package}
\alias{ibmdbR}
\docType{package}
\title{
IBM In-Database Analytics
}
\description{
In-database analytics functions operate directly on data in a database,
rather than requiring that the data first be extracted to working memory.
This lets you analyze large amounts of data that would be impractical
or impossible to extract. It also avoids security issues associated
with extracting data, and ensures that the data being analyzed
is as current as possible. Some functions additionally use lazy loading
to load only those parts of the data that are actually required,
to further increase efficiency.
This package also contains a data structure called a \code{\link{ida.list}}, which you
can use to store R objects in the database. This simplifies the sharing
of R objects among users. Each user is assigned two tables for R object
storage: a private table, to which only that user has access, and a
public table, which can be read by other users. Use a IDA list to generate
a pointer to either of these tables, and use the pointer to list, store,
or retrieve R objects.}
| /man/overview.Rd | no_license | cran/ibmdbR | R | false | false | 1,845 | rd | %
% Copyright (c) 2013, 2014, IBM Corp. All rights reserved.
%
% This program is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program. If not, see <http://www.gnu.org/licenses/>.
%
%
\name{ibmdbR-package}
\alias{ibmdbR-package}
\alias{ibmdbR}
\docType{package}
\title{
IBM In-Database Analytics
}
\description{
In-database analytics functions operate directly on data in a database,
rather than requiring that the data first be extracted to working memory.
This lets you analyze large amounts of data that would be impractical
or impossible to extract. It also avoids security issues associated
with extracting data, and ensures that the data being analyzed
is as current as possible. Some functions additionally use lazy loading
to load only those parts of the data that are actually required,
to further increase efficiency.
This package also contains a data structure called a \code{\link{ida.list}}, which you
can use to store R objects in the database. This simplifies the sharing
of R objects among users. Each user is assigned two tables for R object
storage: a private table, to which only that user has access, and a
public table, which can be read by other users. Use a IDA list to generate
a pointer to either of these tables, and use the pointer to list, store,
or retrieve R objects.}
|
# Verified 1.3.18
# Version 4.0
panomapa <- function(collection, main, axis = TRUE,
xlab = "Long",
ylab = "Lat",
lab.col = 'black',
bg = NA,
map.bg = NA,
map.col = 'black',
col.ramp = c("Green3", "darkorange1","red"),
arrow.cex = 4.5,
arrow.plot = TRUE,
pt.col = rgb(0, 0, 0, 0.75),
pt.cex = 4.5,
pt.pch = 21,
leg.pt.bg = pt.bg,
leg.bg = NA,
leg.title = "Lengevity\n(years)",# "Longevidad\n(años)",
leg.offset = c(0,0),
leg.y.intersp = 1.75) {
colf = function(x) {
colorRamp(col.ramp)(x)
}
print_ramp <- function (ColorLevels, pal, mar = c(1,2.5,2.5,2), xlab="", ylab="", ...) {
par(mar=mar)
image(1, ColorLevels,
matrix(data = ColorLevels, ncol = length(ColorLevels), nrow = 1),
col = pal,
xaxt = "n", ylab = ylab, xlab = xlab, ...)
title(...)
}
# Function start ####
if ( length(names(collection$catalog)) != 0 ) {
cat = collection$catalog; catalogo = list(); catalogo[[1]] = cat; rm("cat")
dat = collection$data; datos = list(); datos[[1]] = dat; rm("dat")
} else {
catalogo = collection$catalog
datos = collection$data
}
map.abb = unique(unlist(lapply( catalogo, function(x){x$State} )))
if ( length(catalogo) != length(datos) ) {
stop("In collection: catalogo and datos lengths differ.")
}
n = length(catalogo)
pos = plyr::ldply(catalogo, function(x) {
c(x$Longitude, x$Latitude)
})
D = matrix(c(range(pos[,1]), range(pos[,2])), ncol=2)
dpa = list()
for (k in 1:n) {
dpa[[k]] = list(qty.disp = length(catalogo[[k]]$Avble.yrs),
m = length(datos[[k]]), frac.na = sum(is.na(datos[[k]]))/length(datos[[k]]),
frac.ag = sum(datos[[k]] < 0, na.rm = T)/length(datos[[k]]))
}
pta = plyr::ldply(dpa, function(x) {
c(x$qty.disp, x$m, x$frac.na, x$frac.ag)
})
m.disp = max(pta[, 1])
pta[, 1] = pta[, 1]/m.disp
pta[pta[, 1] <= 0.1, 1] = 0.1
pt.bg = rgb(colf(pta[, 3])/255, alpha = 0.75)
leg.pt.bg = rgb(colf(rev(c(0.1, 0.25, 0.5, 0.75, 1)))/255,
alpha = 0.75)
par.save <- par(no.readonly = TRUE)
layout(mat = matrix(c(1,1,1,2,3,4), ncol=2), widths = c(4,1), heights = c(2,2,1))
ptatruescale = par()$fin[1] * 0.66
if (!is.na(map.abb)) {
ESS <- get.shape.state(map.abb)
SHP.range = matrix(ncol = 4, nrow = length(ESS))
for (i in 1:length(ESS)) {
d = slot(ESS, "polygons")[[i]]
SHP.sub = matrix(ncol = 4, nrow = length(slot(d,"Polygons")))
for (j in 1:length(slot(d, "Polygons"))) {
d.sub = slot(d, "Polygons")[[j]]
d.sub = slot(d.sub, "coords")
SHP.sub[j, 1:2] = range(d.sub[, 1])
SHP.sub[j, 3:4] = range(d.sub[, 2])
}
d = matrix(apply(SHP.sub, 2, range), ncol = 4)
SHP.range[i, 1:2] = diag(d[1:2, 1:2])
SHP.range[i, 3:4] = diag(d[1:2, 3:4])
}
d = matrix(apply(SHP.range, 2, range), ncol = 2)
D = rbind(d, D) # max betwen points and shape border
}
plot(axes = F, asp = 1, bty = "n", type = "n", range(D[,1]), range(D[, 2]), ylab = ylab, xlab = xlab)
if (!is.na(map.abb)) {
plot(add = T, axes = F, ESS, bg = map.bg, border = map.col, asp = 1)
}
points(pos, cex = ptatruescale * pt.cex * pta[, 1], bg = pt.bg, pch = pt.pch,
col = pt.col)
if (axis == T) {
axis(1, col = lab.col, col.axis = lab.col)
axis(2, col = lab.col, col.axis = lab.col)
}
if (missing(main)) {
if ( length(catalogo) == 1) {
main = "Station longevity"
} else {
main = "Stations longevity"
}
if ( ! is.na(map.abb) ) {
estados.venezuela <- get.shape.state()
main = paste(main, "for", paste(estados.venezuela[map.abb, "shape.name"], collapse = ", "))
}
}
title(main = main, col.main = lab.col,cex.main=2.5)
long = round(c(0.1, 0.25, 0.5, 0.75, 1) * m.disp, 0)
long = apply(cbind(c("<", "<", "<", "<", "<"), long), 1, paste0, collapse = "")
par(mar = c(0.5,0.5,0,0.5) + 0.1, mai=c(0,0,1,0))
plot(c(-1,1), c(-1,6), typ='n', asp=1, axes=F, xlab=NA, ylab=NA)
legend(x = -1, y = 5.9,
legend = long,
pt.cex = pt.cex * ptatruescale * c(0.2,0.25, 0.5, 0.75, 1),
pch = 21, bg = leg.bg, pt.bg = NA,
cex = 1.25, bty = "n", text.col = lab.col,
y.intersp = leg.y.intersp, )
title(main = leg.title, cex.main = 1.45, font.main = 2)
leg.lvl = seq(0, 100, by=5)
leg.col = rgb(colf(rev(leg.lvl/100))/255, alpha = 0.75)
print_ramp(leg.lvl, leg.col, main="Data %",mar = c(1,5,7.5,3.5))
par(mar = rep(0.5,4) + 0.1)
plot(c(-1,1), c(-1,5), typ = 'n', asp = 1, axes = F, xlab = NA, ylab = NA)
if (arrow.plot){ plotArrow(cex = arrow.cex) }
par(par.save)
} | /vetools/R/panomapa.R | no_license | ingted/R-Examples | R | false | false | 5,085 | r | # Verified 1.3.18
# Version 4.0
panomapa <- function(collection, main, axis = TRUE,
xlab = "Long",
ylab = "Lat",
lab.col = 'black',
bg = NA,
map.bg = NA,
map.col = 'black',
col.ramp = c("Green3", "darkorange1","red"),
arrow.cex = 4.5,
arrow.plot = TRUE,
pt.col = rgb(0, 0, 0, 0.75),
pt.cex = 4.5,
pt.pch = 21,
leg.pt.bg = pt.bg,
leg.bg = NA,
leg.title = "Lengevity\n(years)",# "Longevidad\n(años)",
leg.offset = c(0,0),
leg.y.intersp = 1.75) {
colf = function(x) {
colorRamp(col.ramp)(x)
}
print_ramp <- function (ColorLevels, pal, mar = c(1,2.5,2.5,2), xlab="", ylab="", ...) {
par(mar=mar)
image(1, ColorLevels,
matrix(data = ColorLevels, ncol = length(ColorLevels), nrow = 1),
col = pal,
xaxt = "n", ylab = ylab, xlab = xlab, ...)
title(...)
}
# Function start ####
if ( length(names(collection$catalog)) != 0 ) {
cat = collection$catalog; catalogo = list(); catalogo[[1]] = cat; rm("cat")
dat = collection$data; datos = list(); datos[[1]] = dat; rm("dat")
} else {
catalogo = collection$catalog
datos = collection$data
}
map.abb = unique(unlist(lapply( catalogo, function(x){x$State} )))
if ( length(catalogo) != length(datos) ) {
stop("In collection: catalogo and datos lengths differ.")
}
n = length(catalogo)
pos = plyr::ldply(catalogo, function(x) {
c(x$Longitude, x$Latitude)
})
D = matrix(c(range(pos[,1]), range(pos[,2])), ncol=2)
dpa = list()
for (k in 1:n) {
dpa[[k]] = list(qty.disp = length(catalogo[[k]]$Avble.yrs),
m = length(datos[[k]]), frac.na = sum(is.na(datos[[k]]))/length(datos[[k]]),
frac.ag = sum(datos[[k]] < 0, na.rm = T)/length(datos[[k]]))
}
pta = plyr::ldply(dpa, function(x) {
c(x$qty.disp, x$m, x$frac.na, x$frac.ag)
})
m.disp = max(pta[, 1])
pta[, 1] = pta[, 1]/m.disp
pta[pta[, 1] <= 0.1, 1] = 0.1
pt.bg = rgb(colf(pta[, 3])/255, alpha = 0.75)
leg.pt.bg = rgb(colf(rev(c(0.1, 0.25, 0.5, 0.75, 1)))/255,
alpha = 0.75)
par.save <- par(no.readonly = TRUE)
layout(mat = matrix(c(1,1,1,2,3,4), ncol=2), widths = c(4,1), heights = c(2,2,1))
ptatruescale = par()$fin[1] * 0.66
if (!is.na(map.abb)) {
ESS <- get.shape.state(map.abb)
SHP.range = matrix(ncol = 4, nrow = length(ESS))
for (i in 1:length(ESS)) {
d = slot(ESS, "polygons")[[i]]
SHP.sub = matrix(ncol = 4, nrow = length(slot(d,"Polygons")))
for (j in 1:length(slot(d, "Polygons"))) {
d.sub = slot(d, "Polygons")[[j]]
d.sub = slot(d.sub, "coords")
SHP.sub[j, 1:2] = range(d.sub[, 1])
SHP.sub[j, 3:4] = range(d.sub[, 2])
}
d = matrix(apply(SHP.sub, 2, range), ncol = 4)
SHP.range[i, 1:2] = diag(d[1:2, 1:2])
SHP.range[i, 3:4] = diag(d[1:2, 3:4])
}
d = matrix(apply(SHP.range, 2, range), ncol = 2)
D = rbind(d, D) # max betwen points and shape border
}
plot(axes = F, asp = 1, bty = "n", type = "n", range(D[,1]), range(D[, 2]), ylab = ylab, xlab = xlab)
if (!is.na(map.abb)) {
plot(add = T, axes = F, ESS, bg = map.bg, border = map.col, asp = 1)
}
points(pos, cex = ptatruescale * pt.cex * pta[, 1], bg = pt.bg, pch = pt.pch,
col = pt.col)
if (axis == T) {
axis(1, col = lab.col, col.axis = lab.col)
axis(2, col = lab.col, col.axis = lab.col)
}
if (missing(main)) {
if ( length(catalogo) == 1) {
main = "Station longevity"
} else {
main = "Stations longevity"
}
if ( ! is.na(map.abb) ) {
estados.venezuela <- get.shape.state()
main = paste(main, "for", paste(estados.venezuela[map.abb, "shape.name"], collapse = ", "))
}
}
title(main = main, col.main = lab.col,cex.main=2.5)
long = round(c(0.1, 0.25, 0.5, 0.75, 1) * m.disp, 0)
long = apply(cbind(c("<", "<", "<", "<", "<"), long), 1, paste0, collapse = "")
par(mar = c(0.5,0.5,0,0.5) + 0.1, mai=c(0,0,1,0))
plot(c(-1,1), c(-1,6), typ='n', asp=1, axes=F, xlab=NA, ylab=NA)
legend(x = -1, y = 5.9,
legend = long,
pt.cex = pt.cex * ptatruescale * c(0.2,0.25, 0.5, 0.75, 1),
pch = 21, bg = leg.bg, pt.bg = NA,
cex = 1.25, bty = "n", text.col = lab.col,
y.intersp = leg.y.intersp, )
title(main = leg.title, cex.main = 1.45, font.main = 2)
leg.lvl = seq(0, 100, by=5)
leg.col = rgb(colf(rev(leg.lvl/100))/255, alpha = 0.75)
print_ramp(leg.lvl, leg.col, main="Data %",mar = c(1,5,7.5,3.5))
par(mar = rep(0.5,4) + 0.1)
plot(c(-1,1), c(-1,5), typ = 'n', asp = 1, axes = F, xlab = NA, ylab = NA)
if (arrow.plot){ plotArrow(cex = arrow.cex) }
par(par.save)
} |
testlist <- list(a = -2L, b = -11206656L, x = c(-63998L, NA, -49153L, 656801545L, -1L, 1560737791L, -12961222L, 976894522L, 691681850L, 976894522L, 976894522L, 976894522L, 976894522L, -1886417009L, -1886417009L, -1886417009L, -1886417009L, -1886417009L, -1886417009L, -1886388474L, -1886417009L, NA, -1886417009L, -1886417009L, -1886417009L, -1886417009L, -1886453513L, -1L, -53505L, -14804225L, -10872294L, -14745746L, 1028992767L, -65707L, 0L, 0L, 851967L, -215L, -250L, -20481L, -1L, -1L, 505085951L, -67207168L, 2097164L, 16777215L, 505085951L, 16777216L, 16383225L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610130805-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 640 | r | testlist <- list(a = -2L, b = -11206656L, x = c(-63998L, NA, -49153L, 656801545L, -1L, 1560737791L, -12961222L, 976894522L, 691681850L, 976894522L, 976894522L, 976894522L, 976894522L, -1886417009L, -1886417009L, -1886417009L, -1886417009L, -1886417009L, -1886417009L, -1886388474L, -1886417009L, NA, -1886417009L, -1886417009L, -1886417009L, -1886417009L, -1886453513L, -1L, -53505L, -14804225L, -10872294L, -14745746L, 1028992767L, -65707L, 0L, 0L, 851967L, -215L, -250L, -20481L, -1L, -1L, 505085951L, -67207168L, 2097164L, 16777215L, 505085951L, 16777216L, 16383225L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
#file header
#date
#title
#purpose
#authors
#assignment for class
#required packages
library(tidyverse)
#set directory
setwd("/Users/nicholashuron/Google Drive/")
#read in pa invasives data
invasives <- read_csv("./QuantSci_GP/data/PA_Invasive_Species/all_obs_imap_18Dec17_v2_0.csv")
dim(invasives)
####################################################################################################
##Make a barchart of the proportional number of records for each species name.
#Make this figure neat, clean and titled. Use aesthetics that are unique.
#Note, I do not require you to plot all the species if you cannot get a good figure with so many species.
#Hint: Use your title to explain what you are plotting.
####################################################################################################
#check colnames to figure out where species names are found
colnames(invasives)
#state_scientific_name is a reliable name convention
#find the number of unique species names in this column and order them for easy dups check by sight
sort(unique(invasives$state_scientific_name))
#subset to only aquatic species
aqua.invasives <- filter(invasives, invasives$natlhabitat=="Aquatic")
#find the 25 aquatic species with the most observations
aqua <- aqua.invasives %>% count(state_scientific_name, sort = T) %>% .[1:25,]
#store in a new object
aqua.invasives.top <- aqua.invasives %>% filter(state_scientific_name %in% aqua$state_scientific_name)
#reorder the summary object aqua for plotting
aqua$state_scientific_name <- factor(aqua$state_scientific_name, levels = aqua$state_scientific_name[order(-aqua$n)])
#plot relative proportional number of records for the top 25 species (version with counts)
ggplot(data = aqua.invasives.top) +
geom_bar(mapping = aes(x = reorder(x = state_scientific_name,X = -table(state_scientific_name)[state_scientific_name]), group=factor(0), y = ..prop.., fill = factor(..x..)), show.legend=FALSE) +
coord_flip() +
labs(y = "Proportion of Observations", x = "Invasive Species", title = "Proportional Prevalance Among the Top Twenty-Five \nMost Sighted Invasive Aquatic Species in PA")
#version with summary table instead (minus sign is missing in reorder to do descending order)
ggplot(data = aqua) +
geom_bar(mapping = aes(x = reorder(state_scientific_name, n), y = (n/sum(n)), fill = factor(..x..)), stat = "identity", show.legend=FALSE) +
coord_flip() +
labs(y = "Proportion of Observations", x = "Invasive Species", title = "Proportional Prevalance Among the Top Twenty-Five \nMost Sighted Invasive Aquatic Species in PA")
####################################################################################################
##In a single plot (facets are encouraged), summarize the relationship between two or more variables of your choosing.
#Use color, shape changes or other techniques you learned in Chapter 3.
#Make your figures unique as it is unlikely that two people would code the exact same thing...
####################################################################################################
invasives.co <- invasives %>% count(County, sort=T)
invasives.sc <- invasives %>% count(state_scientific_name, sort = T)
invasives.na <- invasives %>% count(natlhabitat, sort=T)
invasives$County <- factor(invasives$County, levels = unique(invasives$County[order(invasives$County, decreasing = T)]))
ggplot(data = invasives) +
geom_bar(mapping = aes(x = reorder(County, table(County)[County]), group = factor(0), fill= factor(..x..)), show.legend = FALSE) +
coord_flip() +
facet_wrap(~natlhabitat) +
labs(y = "Invasive Species Observations", x = "County (Pennsylvania)", title ="Aquatic and Terrestrial Invasive Species Sightings by County")
ggplot(data = invasives) +
geom_bar(mapping = aes(x = County, group = factor(0), fill= factor(..x..)), show.legend = FALSE) +
coord_flip() +
facet_wrap(~natlhabitat) +
labs(y = "Invasive Species Observations", x = "County (Pennsylvania)", title ="Aquatic and Terrestrial Invasive Species Sightings by County") | /invasives_.R | no_license | nahuron/QSGP | R | false | false | 4,059 | r | #file header
#date
#title
#purpose
#authors
#assignment for class
#required packages
library(tidyverse)
#set directory
setwd("/Users/nicholashuron/Google Drive/")
#read in pa invasives data
invasives <- read_csv("./QuantSci_GP/data/PA_Invasive_Species/all_obs_imap_18Dec17_v2_0.csv")
dim(invasives)
####################################################################################################
##Make a barchart of the proportional number of records for each species name.
#Make this figure neat, clean and titled. Use aesthetics that are unique.
#Note, I do not require you to plot all the species if you cannot get a good figure with so many species.
#Hint: Use your title to explain what you are plotting.
####################################################################################################
#check colnames to figure out where species names are found
colnames(invasives)
#state_scientific_name is a reliable name convention
#find the number of unique species names in this column and order them for easy dups check by sight
sort(unique(invasives$state_scientific_name))
#subset to only aquatic species
aqua.invasives <- filter(invasives, invasives$natlhabitat=="Aquatic")
#find the 25 aquatic species with the most observations
aqua <- aqua.invasives %>% count(state_scientific_name, sort = T) %>% .[1:25,]
#store in a new object
aqua.invasives.top <- aqua.invasives %>% filter(state_scientific_name %in% aqua$state_scientific_name)
#reorder the summary object aqua for plotting
aqua$state_scientific_name <- factor(aqua$state_scientific_name, levels = aqua$state_scientific_name[order(-aqua$n)])
#plot relative proportional number of records for the top 25 species (version with counts)
ggplot(data = aqua.invasives.top) +
geom_bar(mapping = aes(x = reorder(x = state_scientific_name,X = -table(state_scientific_name)[state_scientific_name]), group=factor(0), y = ..prop.., fill = factor(..x..)), show.legend=FALSE) +
coord_flip() +
labs(y = "Proportion of Observations", x = "Invasive Species", title = "Proportional Prevalance Among the Top Twenty-Five \nMost Sighted Invasive Aquatic Species in PA")
#version with summary table instead (minus sign is missing in reorder to do descending order)
ggplot(data = aqua) +
geom_bar(mapping = aes(x = reorder(state_scientific_name, n), y = (n/sum(n)), fill = factor(..x..)), stat = "identity", show.legend=FALSE) +
coord_flip() +
labs(y = "Proportion of Observations", x = "Invasive Species", title = "Proportional Prevalance Among the Top Twenty-Five \nMost Sighted Invasive Aquatic Species in PA")
####################################################################################################
##In a single plot (facets are encouraged), summarize the relationship between two or more variables of your choosing.
#Use color, shape changes or other techniques you learned in Chapter 3.
#Make your figures unique as it is unlikely that two people would code the exact same thing...
####################################################################################################
invasives.co <- invasives %>% count(County, sort=T)
invasives.sc <- invasives %>% count(state_scientific_name, sort = T)
invasives.na <- invasives %>% count(natlhabitat, sort=T)
invasives$County <- factor(invasives$County, levels = unique(invasives$County[order(invasives$County, decreasing = T)]))
ggplot(data = invasives) +
geom_bar(mapping = aes(x = reorder(County, table(County)[County]), group = factor(0), fill= factor(..x..)), show.legend = FALSE) +
coord_flip() +
facet_wrap(~natlhabitat) +
labs(y = "Invasive Species Observations", x = "County (Pennsylvania)", title ="Aquatic and Terrestrial Invasive Species Sightings by County")
ggplot(data = invasives) +
geom_bar(mapping = aes(x = County, group = factor(0), fill= factor(..x..)), show.legend = FALSE) +
coord_flip() +
facet_wrap(~natlhabitat) +
labs(y = "Invasive Species Observations", x = "County (Pennsylvania)", title ="Aquatic and Terrestrial Invasive Species Sightings by County") |
# 1. Reading in data
# Define variables specific to input files and plots
input <- "../household_power_consumption.txt"
time.format <- "%d/%m/%Y %H:%M:%S"
start <- "01/02/2007 00:00:00"
end <- "02/02/2007 23:59:00"
col.classes <- c(rep("character", 2), rep("numeric", 7))
# Read the first line of the file & find the first recorded date and time
dat0 <- read.csv(input, header = T, sep = ";", na.strings = "?",
colClasses = col.classes, nrows = 1)
date0 <- dat0$Date[1]
time0 <- dat0$Time[1]
d0 <- strptime(paste(c(date0, time0), collapse = " "), time.format)
var.names <- colnames(dat0) # save column names
# Calculate the corresponding rows for the target date and time from difference
# in minute because data are recorded with a one-minute sampling rate.
d1 <- strptime(start, time.format) # start point
d2 <- strptime(end, time.format) # end point
row.skip <- as.integer(difftime(d1, d0, units = "mins"))
row.read <- as.integer(difftime(d2, d1, units = "mins") + 1)
# Read the rows between the specified start and end dates & times
dat <- read.csv(input, header = T, sep = ";", na.strings = "?",
colClasses = col.classes, col.names = var.names,
skip = row.skip, nrows = row.read)
# 2. data transformations
# Combine "Date" and "Time" columns and convert it to POSIXlt format
dat$Date <- paste(dat$Date, dat$Time, sep = " ")
dat$Date <- strptime(dat$Date, time.format)
# 3. plotting graph
png(file = "plot3.png", width = 480, height = 480, units = "px")
with(dat, plot(Date, Sub_metering_1, type = "l", col = "black", xlab = NA,
ylab = "Energy sub metering"))
with(dat, lines(Date, Sub_metering_2, type = "l", col = "red"))
with(dat, lines(Date, Sub_metering_3, type = "l", col = "blue"))
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
| /plot3.R | no_license | ZZen427/ExData_Plotting1 | R | false | false | 1,915 | r | # 1. Reading in data
# Define variables specific to input files and plots
input <- "../household_power_consumption.txt"
time.format <- "%d/%m/%Y %H:%M:%S"
start <- "01/02/2007 00:00:00"
end <- "02/02/2007 23:59:00"
col.classes <- c(rep("character", 2), rep("numeric", 7))
# Read the first line of the file & find the first recorded date and time
dat0 <- read.csv(input, header = T, sep = ";", na.strings = "?",
colClasses = col.classes, nrows = 1)
date0 <- dat0$Date[1]
time0 <- dat0$Time[1]
d0 <- strptime(paste(c(date0, time0), collapse = " "), time.format)
var.names <- colnames(dat0) # save column names
# Calculate the corresponding rows for the target date and time from difference
# in minute because data are recorded with a one-minute sampling rate.
d1 <- strptime(start, time.format) # start point
d2 <- strptime(end, time.format) # end point
row.skip <- as.integer(difftime(d1, d0, units = "mins"))
row.read <- as.integer(difftime(d2, d1, units = "mins") + 1)
# Read the rows between the specified start and end dates & times
dat <- read.csv(input, header = T, sep = ";", na.strings = "?",
colClasses = col.classes, col.names = var.names,
skip = row.skip, nrows = row.read)
# 2. data transformations
# Combine "Date" and "Time" columns and convert it to POSIXlt format
dat$Date <- paste(dat$Date, dat$Time, sep = " ")
dat$Date <- strptime(dat$Date, time.format)
# 3. plotting graph
png(file = "plot3.png", width = 480, height = 480, units = "px")
with(dat, plot(Date, Sub_metering_1, type = "l", col = "black", xlab = NA,
ylab = "Energy sub metering"))
with(dat, lines(Date, Sub_metering_2, type = "l", col = "red"))
with(dat, lines(Date, Sub_metering_3, type = "l", col = "blue"))
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
library(yaml)
library(tidyverse)
library(glue)
library(rprojroot)
score_submission <- function(submission_filename) {
answers <- yaml.load_file(submission_filename)
high_cellline <- answers$high_cellline
cms_match <- answers$cms_match
if (high_cellline == "T-47D" & cms_match == "no") {
msg <- glue(
"Your interpretation matches what I found — T-47D appears to have a ",
"greater abundance of clutches and motors, based on the expression of ",
"37 cell adhesion and 68 myosin genes, respectively. Based on this ",
"assumption, I would expect this cell line to exhibit greater motility ",
"in the higher stiffness condition — but that's not the case."
)
} else if (high_cellline == "MDA-MB-231" & cms_match == "yes") {
msg <- glue(
"I can see how, if you interpreted MDA-MB-231 as the 'high motors and ",
"clutches' cell line, then the fact that this cell line to exhibits ",
"greater motility in the higher stiffness condition would indeed match ",
"the CMS predictions. However, it's tough to justify that assumption ",
"based on the expression of 37 cell adhesion and 68 myosin genes, ",
"respectively, that we examined here."
)
} else {
msg <- glue(
"I'm not sure how you reached that particular conclusion. Check out ",
"some of the other submissions for a couple interpretations that we'd ",
"expect to see, given the data used."
)
}
answers["comment"] <- msg
answers
}
| /modules/module7/.eval/eval_fxn.R | permissive | milen-sage/minidream-challenge | R | false | false | 1,516 | r | library(yaml)
library(tidyverse)
library(glue)
library(rprojroot)
score_submission <- function(submission_filename) {
answers <- yaml.load_file(submission_filename)
high_cellline <- answers$high_cellline
cms_match <- answers$cms_match
if (high_cellline == "T-47D" & cms_match == "no") {
msg <- glue(
"Your interpretation matches what I found — T-47D appears to have a ",
"greater abundance of clutches and motors, based on the expression of ",
"37 cell adhesion and 68 myosin genes, respectively. Based on this ",
"assumption, I would expect this cell line to exhibit greater motility ",
"in the higher stiffness condition — but that's not the case."
)
} else if (high_cellline == "MDA-MB-231" & cms_match == "yes") {
msg <- glue(
"I can see how, if you interpreted MDA-MB-231 as the 'high motors and ",
"clutches' cell line, then the fact that this cell line to exhibits ",
"greater motility in the higher stiffness condition would indeed match ",
"the CMS predictions. However, it's tough to justify that assumption ",
"based on the expression of 37 cell adhesion and 68 myosin genes, ",
"respectively, that we examined here."
)
} else {
msg <- glue(
"I'm not sure how you reached that particular conclusion. Check out ",
"some of the other submissions for a couple interpretations that we'd ",
"expect to see, given the data used."
)
}
answers["comment"] <- msg
answers
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chromfx.R
\name{readPeakSummits}
\alias{readPeakSummits}
\title{readPeakSummits}
\usage{
readPeakSummits(psum, genome = "hg38")
}
\arguments{
\item{psum}{paths to sample peak summits bed file}
\item{genome}{reference genome, either "hg38 (default) or "mm10"}
}
\value{
GRanges
}
\description{
function that reads ATAC peaks summits
}
\keyword{peaks}
| /man/readPeakSummits.Rd | no_license | DoaneAS/chromfx | R | false | true | 429 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chromfx.R
\name{readPeakSummits}
\alias{readPeakSummits}
\title{readPeakSummits}
\usage{
readPeakSummits(psum, genome = "hg38")
}
\arguments{
\item{psum}{paths to sample peak summits bed file}
\item{genome}{reference genome, either "hg38 (default) or "mm10"}
}
\value{
GRanges
}
\description{
function that reads ATAC peaks summits
}
\keyword{peaks}
|
#MINISTERIO DEL TRABAJO Y PROMOCIÓN DEL EMPLEO
rm(list = ls()) # limpia la memoria de trabajo
library(tidyverse) #instala además 8 paquetes adicionales
#library(readr)
#library(stringr) # manejador de string (objeto, patron)
#library(dplyr)
#library(tidyr)
#library(purrr)
#library(ggplot2)
#library(forcats)
#library(tibble)
library(xml2)
library(rvest)
library(RSelenium) #escrapea paginas dinámicas
library(wdman) # Navegación fantasma para rselenium
library(robotstxt)
library(binman)
library(tm) # text mining
library(NLP)
library(pdftools)
library(tesseract)
library(magick)
URL<-"http://69.10.39.53/SISCAS/externo/portal/ConvocatoriasPortal.aspx" # Defrente al Iframe vamos
#Preguntar si esta premitio bajar los datos de la web
#paths_allowed(paths = c(URL)) #
get_robotstxt(URL) # otra forma de preguntar
#
#acceptAlert()
#Acepta el cuadro de diálogo de alerta que se muestra actualmente
#equivale a hacer clic el botón "Aceptar" en el cuadro de diálogo
#dismissAlert() #Descarta el cuadro de diálogo de alerta que se muestra actualmente en la página
#Para los cuadros de diálogo confirmar () y preguntar (),esto equivale a hacer clic en el botón "Cancelar"
#Para los cuadros de diálogo alert (), esto es equivalente hacer clic en el botón "Aceptar"
# Asignamos como encondig a UTF-8
options(encoding = "utf-8")
#Abrimos una sesion en la web
# Ejecutamos el servidor phantomjs -creamos un navegador fantasma
server<-phantomjs(port=5011L)
#Abrimos el navegador
Browser <- remoteDriver(browserName = "phantomjs", port=5011L)
Browser$open()
#Navegar la página web que guardamos
Browser$navigate(URL)
Browser$screenshot(display=TRUE) #Muéstrame en foto de la página
# No hay boton de alerta, por lo tanto,
# Eligimos los años
NodoYears<-Browser$findElement(using = 'xpath',
value='//*[@id="ddlanio"]')
Year<-NodoYears$selectTag()
Year$value[7] # año 2019
#Years<-NodoYears$getElementText()
# Introducimos el año que queremos
txtYear<- Browser$findElement(using = 'css', "#ddlanio")
txtYear$clickElement()
txtYear$sendKeysToElement(list(Year$value[7])) # le dije el año 2019
Browser$screenshot(display = TRUE)
# Eligimos los meses
NodoMonths<-Browser$findElement(using = 'xpath',
value='//*[@id="ddlmes"]')
Meses<-NodoMonths$selectTag()
Meses$text[1] # Me da el mes que elijo
#Ver previamente en que meses hacer click y buscar información
#Meses: Febrero(2Hojas),Abril(4), Mayo(7), junio(3), Julio(10),Agosto(10),
#Setiembre(10),Octubre(10),Noviembre(9) y Diciembre(2)
#Nos ingeniamos para buscar solo lo que queremos, para el bucle
Mesclick<-c(2,4,5:12) # Creamos el numero que corresponde a los meses
Mesclick<-as.list(Mesclick) # convertimos a lista para indexar
Meses$text[Mesclick[[1]]] # Probamos la indexación
length(Mesclick) # Para saber cuántas veces indexar el mes
#Introducimos el mes
txtMes<- Browser$findElement(using = 'css', "#ddlmes")
txtMes$clickElement()
txtMes$sendKeysToElement(list(Meses$text[Mesclick[[1]]])) # le dije el mes que está indexada
Browser$screenshot(display = TRUE)
# Hacer clic en Buscar y ver cuántas hojas tiene cada mes
Buscar<- Browser$findElement(using = 'xpath',
value = "//input[@id='btnbuscar']")
Buscar$clickElement()
Browser$screenshot(display = TRUE)
#Hacer clic en siguiente y anterior, es indistinto, pero es lógico, inicia con siguente
Siguiente<-Browser$findElement(using = "xpath",
value = "//*[@id='PaginadoControl1']")
Siguiente$clickElement()
Browser$screenshot(display = TRUE)
# Hacer clic en anterior
##Anterior<-browser$findElement(using = "xpath",
## value = "//input[@id='ctl00_cphBodyMain_reserva1_btnanterior']")
##Anterior$clickElement()
##browser$screenshot(display = TRUE)
#----Parte Rvest individual #----
# Ahora podemos bajar información con rvest sobre la web actual
Pagina_actual<-Browser$getPageSource()
# Extraemos sólo el texto de la hoja N° 01
Hoja1<-read_html(Pagina_actual[[1]])%>% # el elemento 1 de la lista esta la url de la página actual
html_nodes(css = ".etiketa")%>%
html_text()%>%
str_remove("AÑO")%>%
str_remove("MES")%>%
str_remove_all("Bases")%>%
str_remove_all("Anexos")%>%
str_remove_all("Resultado Final")%>%
str_remove_all("Resultado de Evaluación Curricular")%>%
str_subset("[:alnum:]")%>%# Extrea sólo los afanúmericos, sin los saltos
str_replace_all("\n","")%>%
str_trim()
#Meses: Febrero(2Hojas),Abril(4), Mayo(7), junio(3), Julio(10),Agosto(10),
#Setiembre(10),Octubre(10),Noviembre(9) y Diciembre(2)
Hojas<-c(2,4,7,3,10,10,10,10,9,2)
Hojas<-as.list(Hojas) # Servirá para el bucle que extraiga información de las hojas de cada mes
# Extraemos los link de los pdf para leerlos (Hoja 1)
#CAS%20008-2019%20-%20SECRETARIA%20REGIONAL%20-%20CAJAMARCA.pdf
#PDF DE REQUISITOS
Hoja1_linkPdf<-read_html(Pagina_actual[[1]])%>%
html_nodes(".etiketa")%>%
html_nodes("input")%>%
html_attr("value")%>%
str_subset("[:alnum:]")%>%
str_trim()
Hoja1_linkPdf[1]
# No podemos acceder a los pdfs desde R, ¿?
#De aquí para adelante ya no funciona
UrlMadrePdf<-"http://sdv.midis.gob.pe/sis_rrhh/externo/portal/convocatoriasportal.aspx/"
ReadPDF_MIDIS<-pdf_ocr_text(paste0(UrlMadrePdf,Hoja1_linkPdf[1]),pages = c(1:2),language = "spa")
#Pagina_actual<-Browser$getPageSource() #obtener de la página actual
# Nos quedamos aquí
# siempre cerrar la sesión
Browser$close()
server$stop() | /R script/ScriptMTPE19_Rs.R | no_license | manosaladata/DataSet-CAS-PERU | R | false | false | 5,538 | r |
#MINISTERIO DEL TRABAJO Y PROMOCIÓN DEL EMPLEO
rm(list = ls()) # limpia la memoria de trabajo
library(tidyverse) #instala además 8 paquetes adicionales
#library(readr)
#library(stringr) # manejador de string (objeto, patron)
#library(dplyr)
#library(tidyr)
#library(purrr)
#library(ggplot2)
#library(forcats)
#library(tibble)
library(xml2)
library(rvest)
library(RSelenium) #escrapea paginas dinámicas
library(wdman) # Navegación fantasma para rselenium
library(robotstxt)
library(binman)
library(tm) # text mining
library(NLP)
library(pdftools)
library(tesseract)
library(magick)
URL<-"http://69.10.39.53/SISCAS/externo/portal/ConvocatoriasPortal.aspx" # Defrente al Iframe vamos
#Preguntar si esta premitio bajar los datos de la web
#paths_allowed(paths = c(URL)) #
get_robotstxt(URL) # otra forma de preguntar
#
#acceptAlert()
#Acepta el cuadro de diálogo de alerta que se muestra actualmente
#equivale a hacer clic el botón "Aceptar" en el cuadro de diálogo
#dismissAlert() #Descarta el cuadro de diálogo de alerta que se muestra actualmente en la página
#Para los cuadros de diálogo confirmar () y preguntar (),esto equivale a hacer clic en el botón "Cancelar"
#Para los cuadros de diálogo alert (), esto es equivalente hacer clic en el botón "Aceptar"
# Asignamos como encondig a UTF-8
options(encoding = "utf-8")
#Abrimos una sesion en la web
# Ejecutamos el servidor phantomjs -creamos un navegador fantasma
server<-phantomjs(port=5011L)
#Abrimos el navegador
Browser <- remoteDriver(browserName = "phantomjs", port=5011L)
Browser$open()
#Navegar la página web que guardamos
Browser$navigate(URL)
Browser$screenshot(display=TRUE) #Muéstrame en foto de la página
# No hay boton de alerta, por lo tanto,
# Eligimos los años
NodoYears<-Browser$findElement(using = 'xpath',
value='//*[@id="ddlanio"]')
Year<-NodoYears$selectTag()
Year$value[7] # año 2019
#Years<-NodoYears$getElementText()
# Introducimos el año que queremos
txtYear<- Browser$findElement(using = 'css', "#ddlanio")
txtYear$clickElement()
txtYear$sendKeysToElement(list(Year$value[7])) # le dije el año 2019
Browser$screenshot(display = TRUE)
# Eligimos los meses
NodoMonths<-Browser$findElement(using = 'xpath',
value='//*[@id="ddlmes"]')
Meses<-NodoMonths$selectTag()
Meses$text[1] # Me da el mes que elijo
#Ver previamente en que meses hacer click y buscar información
#Meses: Febrero(2Hojas),Abril(4), Mayo(7), junio(3), Julio(10),Agosto(10),
#Setiembre(10),Octubre(10),Noviembre(9) y Diciembre(2)
#Nos ingeniamos para buscar solo lo que queremos, para el bucle
Mesclick<-c(2,4,5:12) # Creamos el numero que corresponde a los meses
Mesclick<-as.list(Mesclick) # convertimos a lista para indexar
Meses$text[Mesclick[[1]]] # Probamos la indexación
length(Mesclick) # Para saber cuántas veces indexar el mes
#Introducimos el mes
txtMes<- Browser$findElement(using = 'css', "#ddlmes")
txtMes$clickElement()
txtMes$sendKeysToElement(list(Meses$text[Mesclick[[1]]])) # le dije el mes que está indexada
Browser$screenshot(display = TRUE)
# Hacer clic en Buscar y ver cuántas hojas tiene cada mes
Buscar<- Browser$findElement(using = 'xpath',
value = "//input[@id='btnbuscar']")
Buscar$clickElement()
Browser$screenshot(display = TRUE)
#Hacer clic en siguiente y anterior, es indistinto, pero es lógico, inicia con siguente
Siguiente<-Browser$findElement(using = "xpath",
value = "//*[@id='PaginadoControl1']")
Siguiente$clickElement()
Browser$screenshot(display = TRUE)
# Hacer clic en anterior
##Anterior<-browser$findElement(using = "xpath",
## value = "//input[@id='ctl00_cphBodyMain_reserva1_btnanterior']")
##Anterior$clickElement()
##browser$screenshot(display = TRUE)
#----Parte Rvest individual #----
# Ahora podemos bajar información con rvest sobre la web actual
Pagina_actual<-Browser$getPageSource()
# Extraemos sólo el texto de la hoja N° 01
Hoja1<-read_html(Pagina_actual[[1]])%>% # el elemento 1 de la lista esta la url de la página actual
html_nodes(css = ".etiketa")%>%
html_text()%>%
str_remove("AÑO")%>%
str_remove("MES")%>%
str_remove_all("Bases")%>%
str_remove_all("Anexos")%>%
str_remove_all("Resultado Final")%>%
str_remove_all("Resultado de Evaluación Curricular")%>%
str_subset("[:alnum:]")%>%# Extrea sólo los afanúmericos, sin los saltos
str_replace_all("\n","")%>%
str_trim()
#Meses: Febrero(2Hojas),Abril(4), Mayo(7), junio(3), Julio(10),Agosto(10),
#Setiembre(10),Octubre(10),Noviembre(9) y Diciembre(2)
Hojas<-c(2,4,7,3,10,10,10,10,9,2)
Hojas<-as.list(Hojas) # Servirá para el bucle que extraiga información de las hojas de cada mes
# Extraemos los link de los pdf para leerlos (Hoja 1)
#CAS%20008-2019%20-%20SECRETARIA%20REGIONAL%20-%20CAJAMARCA.pdf
#PDF DE REQUISITOS
Hoja1_linkPdf<-read_html(Pagina_actual[[1]])%>%
html_nodes(".etiketa")%>%
html_nodes("input")%>%
html_attr("value")%>%
str_subset("[:alnum:]")%>%
str_trim()
Hoja1_linkPdf[1]
# No podemos acceder a los pdfs desde R, ¿?
#De aquí para adelante ya no funciona
UrlMadrePdf<-"http://sdv.midis.gob.pe/sis_rrhh/externo/portal/convocatoriasportal.aspx/"
ReadPDF_MIDIS<-pdf_ocr_text(paste0(UrlMadrePdf,Hoja1_linkPdf[1]),pages = c(1:2),language = "spa")
#Pagina_actual<-Browser$getPageSource() #obtener de la página actual
# Nos quedamos aquí
# siempre cerrar la sesión
Browser$close()
server$stop() |
moving_average_plot <- function(mavg_data){
ggplot(mavg_data,
aes(x=Index, ymax=Average, ymin=AVG)) +
geom_ribbon(fill="blue") +
theme_minimal()
}
| /R/moving_average_plot.R | no_license | bayesball/BayesTestStreak | R | false | false | 165 | r | moving_average_plot <- function(mavg_data){
ggplot(mavg_data,
aes(x=Index, ymax=Average, ymin=AVG)) +
geom_ribbon(fill="blue") +
theme_minimal()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotmclust.R
\name{plotmclust}
\alias{plotmclust}
\title{plotmclust}
\usage{
plotmclust(
mclustobj,
x = 1,
y = 2,
MSTorder = NULL,
show_tree = T,
show_full_tree = F,
show_cell_names = F,
cell_name_size = 3,
cell_point_size = 3,
markerexpr = NULL,
showcluster = T
)
}
\arguments{
\item{mclustobj}{The exact output of \code{\link{exprmclust}} function.}
\item{x}{The column of data after dimension reduction to be plotted on the horizontal axis.}
\item{y}{The column of data after dimension reduction to be plotted on the vertical axis.}
\item{MSTorder}{The arbitrary order of cluster to be shown on the plot.}
\item{show_tree}{Whether to show the links between cells connected in the minimum spanning tree.}
\item{show_full_tree}{Whether to show the full tree or not. Only useful when show_tree=T. Overrides MSTorder.}
\item{show_cell_names}{Whether to draw the name of each cell in the plot.}
\item{cell_name_size}{The size of cell name labels if show_cell_names is TRUE.}
\item{cell_point_size}{The size of cell point.}
\item{markerexpr}{The gene expression used to define the size of nodes.}
}
\value{
A ggplot2 object.
}
\description{
Plot the model-based clustering results
}
\details{
This function will plot the gene expression data after dimension reduction and show the clustering results.
}
\examples{
data(lpsdata)
procdata <- preprocess(lpsdata)
lpsmclust <- exprmclust(procdata)
plotmclust(lpsmclust)
}
\author{
Zhicheng Ji, Hongkai Ji <zji4@zji4.edu>
}
| /man/plotmclust.Rd | no_license | wangyadong-bio/TSCAN | R | false | true | 1,575 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotmclust.R
\name{plotmclust}
\alias{plotmclust}
\title{plotmclust}
\usage{
plotmclust(
mclustobj,
x = 1,
y = 2,
MSTorder = NULL,
show_tree = T,
show_full_tree = F,
show_cell_names = F,
cell_name_size = 3,
cell_point_size = 3,
markerexpr = NULL,
showcluster = T
)
}
\arguments{
\item{mclustobj}{The exact output of \code{\link{exprmclust}} function.}
\item{x}{The column of data after dimension reduction to be plotted on the horizontal axis.}
\item{y}{The column of data after dimension reduction to be plotted on the vertical axis.}
\item{MSTorder}{The arbitrary order of cluster to be shown on the plot.}
\item{show_tree}{Whether to show the links between cells connected in the minimum spanning tree.}
\item{show_full_tree}{Whether to show the full tree or not. Only useful when show_tree=T. Overrides MSTorder.}
\item{show_cell_names}{Whether to draw the name of each cell in the plot.}
\item{cell_name_size}{The size of cell name labels if show_cell_names is TRUE.}
\item{cell_point_size}{The size of cell point.}
\item{markerexpr}{The gene expression used to define the size of nodes.}
}
\value{
A ggplot2 object.
}
\description{
Plot the model-based clustering results
}
\details{
This function will plot the gene expression data after dimension reduction and show the clustering results.
}
\examples{
data(lpsdata)
procdata <- preprocess(lpsdata)
lpsmclust <- exprmclust(procdata)
plotmclust(lpsmclust)
}
\author{
Zhicheng Ji, Hongkai Ji <zji4@zji4.edu>
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2195
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2195
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query50_query71_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 913
c no.of clauses 2195
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2195
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query50_query71_1344n.qdimacs 913 2195 E1 [] 0 70 843 2195 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query50_query71_1344n/query50_query71_1344n.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 710 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2195
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2195
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query50_query71_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 913
c no.of clauses 2195
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2195
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query50_query71_1344n.qdimacs 913 2195 E1 [] 0 70 843 2195 NONE
|
#' bio17: Calculate precipitation of the driest quarter.
#'
#' @description `bio17` is used to calculate the total precipitation in the
#' driest quarter of the year
#'
#' @param prec a vector of precipitation values, normally for one year (see
#' details).
#' @param tme a `POSIXlt` object representing the date and time of each `prec`
#' value.
#'
#' @return a single numeric value of total precipitation of the driest quarter.
#' @export
#'
#' @details Precipitation in quarter is calculated and total
#' precipitation in the driest quarter returned. If data span more than one
#' year, calculations are performed on all data and single value returned.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' tme <- tmecreate(2010, 1)
#' plot(hourly_precip~as.POSIXct(tme), type = "l", xlab = "Month",
#' ylab = "Precipitation")
#' bio17(hourly_precip, tme)
bio17 <- function(prec, tme) {
if (is.na(sd(prec, na.rm = TRUE)))
pdry <- NA
else {
if (length(unique(tme$year)) > 1) warnb()
qtr <- function(i, int) {
pw <- c(prec, prec)
su <- sum(pw[i: (i + int)], na.rm = TRUE)
su
}
id <- (as.numeric(tme[2]) - as.numeric(tme[1])) / 86400
int <- 91 / id
dq <- sapply(c(1:length(prec)), qtr, int)
i <- which(dq == min(dq, na.rm = TRUE))[1]
pre2 <- c(prec, prec)
pdry <- sum(pre2[i:(i + int)], na.rm = TRUE)
}
return(pdry)
}
| /R/bio17.R | no_license | everydayduffy/climvars | R | false | false | 1,432 | r | #' bio17: Calculate precipitation of the driest quarter.
#'
#' @description `bio17` is used to calculate the total precipitation in the
#' driest quarter of the year
#'
#' @param prec a vector of precipitation values, normally for one year (see
#' details).
#' @param tme a `POSIXlt` object representing the date and time of each `prec`
#' value.
#'
#' @return a single numeric value of total precipitation of the driest quarter.
#' @export
#'
#' @details Precipitation in quarter is calculated and total
#' precipitation in the driest quarter returned. If data span more than one
#' year, calculations are performed on all data and single value returned.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' tme <- tmecreate(2010, 1)
#' plot(hourly_precip~as.POSIXct(tme), type = "l", xlab = "Month",
#' ylab = "Precipitation")
#' bio17(hourly_precip, tme)
bio17 <- function(prec, tme) {
if (is.na(sd(prec, na.rm = TRUE)))
pdry <- NA
else {
if (length(unique(tme$year)) > 1) warnb()
qtr <- function(i, int) {
pw <- c(prec, prec)
su <- sum(pw[i: (i + int)], na.rm = TRUE)
su
}
id <- (as.numeric(tme[2]) - as.numeric(tme[1])) / 86400
int <- 91 / id
dq <- sapply(c(1:length(prec)), qtr, int)
i <- which(dq == min(dq, na.rm = TRUE))[1]
pre2 <- c(prec, prec)
pdry <- sum(pre2[i:(i + int)], na.rm = TRUE)
}
return(pdry)
}
|
par(family = "serif", font = 2)
pacman::p_load(teachingApps)
library(SMRD)
ShockAbsorber.ld <-
frame.to.ld(shockabsorber,
response.column = 1,
censor.column = 3,
time.units = "Kilometers") | /inst/apps/shock_absorber8/global.R | no_license | Ammar-K/SMRD | R | false | false | 237 | r | par(family = "serif", font = 2)
pacman::p_load(teachingApps)
library(SMRD)
ShockAbsorber.ld <-
frame.to.ld(shockabsorber,
response.column = 1,
censor.column = 3,
time.units = "Kilometers") |
### type 1 error
args <- commandArgs(TRUE)
core.i <- as.numeric(args)
# load packages, functions
path="/users/PAS1149/osu10039/PAS1149/qldeng/AF_data/june_simulations"
setwd(paste0(path,"/","code","/","functions"))
source("af_functions_noz.R")
n = 1000 # sample size
betaz = 0 # effect of covariates
t <- 100 # traits
cor_matrix = "cs" # correlation structure
rho = 0.3 # correlation strength
rep <- 50 # permutation for each core
# matrix/vectors save results
pval_AF_comb <- matrix(NA,nrow = rep,ncol = 3)
pval_minP <- rep(NA,rep)
pval_MANOVA <- rep(NA,rep)
pval_SPU <- matrix(NA,nrow = rep,ncol = 10)
pval_SPU_ex <- matrix(NA,nrow = rep,ncol = 10)
pval_MSKAT <- matrix(NA,nrow = rep,ncol = 2)
pval_TATES <- rep(NA,rep)
pval_MultiPhen <- rep(NA,rep)
for(i in (1:rep)){
print(i)
set.seed(core.i*1000 + i) # random seed
# type1: null beta
beta <- rep(0,t)
sim <- SIM_covariate_power_Z(betaz=betaz,constr = cor_matrix,beta,traits = t,cat_ratio =0,cor=rho,n.cov = 2, MAF = 0.3)
# do PCA
Y.res <- sim$Y
X.res <- sim$X
Y.pca <- Y_PCA(Y.res)
## Score test
score <- score_test(cbind(Y.pca,Y.res),X.res,n)
###################### Original AF ######################
## AF w./ PCA
PCA_res <- AdaptiveFisher_combo(score$p_l[,1:t],score$p_u[,1:t])
p_perm_pcares <- matrix(PCA_res$P_vector,ncol = 1)
# AF w.o. PCA
AF <- AdaptiveFisher_combo(score$p_l[,-(1:t)],score$p_u[,-(1:t)])
p_perm_af <- matrix(AF$P_vector,ncol = 1)
# combine
combo <- AdaptiveFisher_compute(cbind(p_perm_af,p_perm_pcares))
pval_AF_comb[i,1] <- combo$`AF P_value`
# record each method's p-value and min
pval_AF_comb[i,2] <- PCA_res$AF_pvalue
pval_AF_comb[i,3] <- AF$AF_pvalue
#################### Other Methods #######################
### minP ###
p_matrix_2side <- 2*pnorm(abs(qnorm(score$p_l[,-(1:t)])),lower.tail = FALSE)
pval_minP[i] <- min_p(p_matrix_2side)$p.min
### SPUs & aSPU ###
invisible(capture.output(suppressMessages(pan<-GEEaSPU(traits = sim$Y,geno = sim$X,model = "gaussian"))))
pval_SPU[i,] <- pan[1:10]
invisible(capture.output(suppressMessages(pan<-GEEaSPU(traits = sim$Y,geno = sim$X,model = "gaussian",corstr = "exchangeable"))))
pval_SPU_ex[i,] <- pan[1:10]
### MSKAT ###
X_matrix <- matrix(sim$X,ncol = 1)
SKAT <- MSKAT(MSKAT.cnull(sim$Y,X=NULL), X_matrix, W.beta=c(1,25))
pval_MSKAT[i,] <- SKAT
#### MANOVA ####
man <- manova(sim$Y~sim$X)
pval_MANOVA[i] <- summary(man)$stats["sim$X","Pr(>F)"]
### TATES ###
TATEs <- TATES(sim$Y,score$p_l[,-(1:t)],t,n.snp = 1)
pval_TATES[i] <- TATEs[t+1]
#### MultiPhen ###
X = as.matrix(sim$X)
dimnames(X) <- list(1:n,1)
Y = as.matrix(sim$Y)
dimnames(Y) <- list(1:n,1:t)
opts = mPhen.options(c("regression","pheno.input"))
opts$mPhen.scoreTest = TRUE
invisible(capture.output(suppressMessages(m <- mPhen(X[,1,drop=FALSE],Y,phenotypes = "all", opts = opts))))
pval_MultiPhen[i] <- m$Results[,,,"pvalue"] ["JointModel"]
}
print(paste("type1_cont_noz",t,rho,sep = "_"))
colMeans(pval_AF_comb<0.05,na.rm = TRUE)
colMeans(pval_MSKAT<0.05,na.rm = TRUE)
colMeans(pval_SPU<0.05,na.rm = TRUE)
colMeans(pval_SPU_ex<0.05,na.rm = TRUE)
mean(pval_MANOVA<0.05,na.rm = TRUE)
mean(pval_minP<0.05,na.rm = TRUE)
mean(pval_MultiPhen<0.05,na.rm = TRUE)
mean(pval_TATES<0.05,na.rm = TRUE)
setwd(paste0(path,"/","type1/continuous/no_covariates/trait",t))
save(list = ls(all.names = TRUE), file = paste0("type1_cont_noz","_",t,"_",rho,"_",core.i,".RData"), envir = .GlobalEnv)
| /type1/sim_cont_noz.R | no_license | songbiostat/MTAF | R | false | false | 3,552 | r |
### type 1 error
args <- commandArgs(TRUE)
core.i <- as.numeric(args)
# load packages, functions
path="/users/PAS1149/osu10039/PAS1149/qldeng/AF_data/june_simulations"
setwd(paste0(path,"/","code","/","functions"))
source("af_functions_noz.R")
n = 1000 # sample size
betaz = 0 # effect of covariates
t <- 100 # traits
cor_matrix = "cs" # correlation structure
rho = 0.3 # correlation strength
rep <- 50 # permutation for each core
# matrix/vectors save results
pval_AF_comb <- matrix(NA,nrow = rep,ncol = 3)
pval_minP <- rep(NA,rep)
pval_MANOVA <- rep(NA,rep)
pval_SPU <- matrix(NA,nrow = rep,ncol = 10)
pval_SPU_ex <- matrix(NA,nrow = rep,ncol = 10)
pval_MSKAT <- matrix(NA,nrow = rep,ncol = 2)
pval_TATES <- rep(NA,rep)
pval_MultiPhen <- rep(NA,rep)
for(i in (1:rep)){
print(i)
set.seed(core.i*1000 + i) # random seed
# type1: null beta
beta <- rep(0,t)
sim <- SIM_covariate_power_Z(betaz=betaz,constr = cor_matrix,beta,traits = t,cat_ratio =0,cor=rho,n.cov = 2, MAF = 0.3)
# do PCA
Y.res <- sim$Y
X.res <- sim$X
Y.pca <- Y_PCA(Y.res)
## Score test
score <- score_test(cbind(Y.pca,Y.res),X.res,n)
###################### Original AF ######################
## AF w./ PCA
PCA_res <- AdaptiveFisher_combo(score$p_l[,1:t],score$p_u[,1:t])
p_perm_pcares <- matrix(PCA_res$P_vector,ncol = 1)
# AF w.o. PCA
AF <- AdaptiveFisher_combo(score$p_l[,-(1:t)],score$p_u[,-(1:t)])
p_perm_af <- matrix(AF$P_vector,ncol = 1)
# combine
combo <- AdaptiveFisher_compute(cbind(p_perm_af,p_perm_pcares))
pval_AF_comb[i,1] <- combo$`AF P_value`
# record each method's p-value and min
pval_AF_comb[i,2] <- PCA_res$AF_pvalue
pval_AF_comb[i,3] <- AF$AF_pvalue
#################### Other Methods #######################
### minP ###
p_matrix_2side <- 2*pnorm(abs(qnorm(score$p_l[,-(1:t)])),lower.tail = FALSE)
pval_minP[i] <- min_p(p_matrix_2side)$p.min
### SPUs & aSPU ###
invisible(capture.output(suppressMessages(pan<-GEEaSPU(traits = sim$Y,geno = sim$X,model = "gaussian"))))
pval_SPU[i,] <- pan[1:10]
invisible(capture.output(suppressMessages(pan<-GEEaSPU(traits = sim$Y,geno = sim$X,model = "gaussian",corstr = "exchangeable"))))
pval_SPU_ex[i,] <- pan[1:10]
### MSKAT ###
X_matrix <- matrix(sim$X,ncol = 1)
SKAT <- MSKAT(MSKAT.cnull(sim$Y,X=NULL), X_matrix, W.beta=c(1,25))
pval_MSKAT[i,] <- SKAT
#### MANOVA ####
man <- manova(sim$Y~sim$X)
pval_MANOVA[i] <- summary(man)$stats["sim$X","Pr(>F)"]
### TATES ###
TATEs <- TATES(sim$Y,score$p_l[,-(1:t)],t,n.snp = 1)
pval_TATES[i] <- TATEs[t+1]
#### MultiPhen ###
X = as.matrix(sim$X)
dimnames(X) <- list(1:n,1)
Y = as.matrix(sim$Y)
dimnames(Y) <- list(1:n,1:t)
opts = mPhen.options(c("regression","pheno.input"))
opts$mPhen.scoreTest = TRUE
invisible(capture.output(suppressMessages(m <- mPhen(X[,1,drop=FALSE],Y,phenotypes = "all", opts = opts))))
pval_MultiPhen[i] <- m$Results[,,,"pvalue"] ["JointModel"]
}
print(paste("type1_cont_noz",t,rho,sep = "_"))
colMeans(pval_AF_comb<0.05,na.rm = TRUE)
colMeans(pval_MSKAT<0.05,na.rm = TRUE)
colMeans(pval_SPU<0.05,na.rm = TRUE)
colMeans(pval_SPU_ex<0.05,na.rm = TRUE)
mean(pval_MANOVA<0.05,na.rm = TRUE)
mean(pval_minP<0.05,na.rm = TRUE)
mean(pval_MultiPhen<0.05,na.rm = TRUE)
mean(pval_TATES<0.05,na.rm = TRUE)
setwd(paste0(path,"/","type1/continuous/no_covariates/trait",t))
save(list = ls(all.names = TRUE), file = paste0("type1_cont_noz","_",t,"_",rho,"_",core.i,".RData"), envir = .GlobalEnv)
|
\name{adj2mat}
\alias{adj2mat}
\title{Convert adjacency list into an adjacency matrix.}
\description{
Converts an adjacency-like list (which may or may not
contain all the gene IDs in the network) into an adjacency
matrix. This function is originally from ENA R package and the pathDESeq package uses this as an internal function for the \code{neibMat} function.
}
\author{
Jeffrey D. Allen \email{Jeffrey.Allen@UTSouthwestern.edu}
}
\seealso{\code{\link{neibMat}}}
\references{
Jeffrey, D. A., & Guanghua, X. (2014). ENA:Ensemble Network Aggregation R package version 1.3-0.
}
| /man/adj2mat.Rd | no_license | MalathiSIDona/pathDESeq | R | false | false | 580 | rd | \name{adj2mat}
\alias{adj2mat}
\title{Convert adjacency list into an adjacency matrix.}
\description{
Converts an adjacency-like list (which may or may not
contain all the gene IDs in the network) into an adjacency
matrix. This function is originally from ENA R package and the pathDESeq package uses this as an internal function for the \code{neibMat} function.
}
\author{
Jeffrey D. Allen \email{Jeffrey.Allen@UTSouthwestern.edu}
}
\seealso{\code{\link{neibMat}}}
\references{
Jeffrey, D. A., & Guanghua, X. (2014). ENA:Ensemble Network Aggregation R package version 1.3-0.
}
|
# This shiny user interface file is a work-in-progress, initially
# designed for Johns Hopkins Data Products class. It will ultimately try
# to teach an uninitiated adult learner the subject of Algebra.
#
# Cynthia Davies Cunha
# Johns Hopkins Developing Data Products
# October 2014
#
library(shiny)
shinyUI(fluidPage(
titlePanel("Learning Algebra"),
sidebarLayout(
sidebarPanel(
h2("Lesson One: Containers"),
helpText("A container is a representation of an unknown value. In the equation
x + 3 = 7, x is the unknown value, a container. Select a container and
an operator and we will build an equation to solve."),
selectInput("var",
label = "Choose a container to represent an unknown value",
choices = c("?", "x", "a", "y"),
selected = ""
),
selectInput("op",
label = "Choose an operation to perform:",
choices = c("+","-","/","*"),
selected = ""
),
numericInput("num", label = "Choose a number:", 0)
),
mainPanel(
p("Sometimes the nomenclature of a new subject can be intimidating. In Algebra,
we often use a ",
strong(em("container")),
" for an unknown value."),
br(),
p("Let's start with the familiar."),
br(),
p("If I were to ask:"),
p("What do you have to add to the number 3 to get 7, I'm sure you would readily answer 4."),
br(),
p("Suppose I write the above problem like this:"),
p(strong("?"),
" + 3 = 7", style = "color:blue"),
br(),
p("You would still answer 4, right? The ",
strong("?"),
" is just a container, it represents and holds an unknown value."),
br(),
p("What if I wrote the problem like this: "),
p(strong("x"),
" + 3 = 7", style = "color:blue"),
br(),
p("The letter ",
strong("x"),
"here is still just ",
em("a container"),
" for an unknown value."),
p("Your answer is still 4, as in x = 4, which means you substitute 4 into
the container x to get: "),
p("4 + 3 = 7", style = "color:blue"),
h3(textOutput("text1")),
h3(textOutput("text2"))
)
)
)) | /algebraVis/ui.R | no_license | CDCwrites/BuildingDataProducts | R | false | false | 2,754 | r | # This shiny user interface file is a work-in-progress, initially
# designed for Johns Hopkins Data Products class. It will ultimately try
# to teach an uninitiated adult learner the subject of Algebra.
#
# Cynthia Davies Cunha
# Johns Hopkins Developing Data Products
# October 2014
#
library(shiny)
shinyUI(fluidPage(
titlePanel("Learning Algebra"),
sidebarLayout(
sidebarPanel(
h2("Lesson One: Containers"),
helpText("A container is a representation of an unknown value. In the equation
x + 3 = 7, x is the unknown value, a container. Select a container and
an operator and we will build an equation to solve."),
selectInput("var",
label = "Choose a container to represent an unknown value",
choices = c("?", "x", "a", "y"),
selected = ""
),
selectInput("op",
label = "Choose an operation to perform:",
choices = c("+","-","/","*"),
selected = ""
),
numericInput("num", label = "Choose a number:", 0)
),
mainPanel(
p("Sometimes the nomenclature of a new subject can be intimidating. In Algebra,
we often use a ",
strong(em("container")),
" for an unknown value."),
br(),
p("Let's start with the familiar."),
br(),
p("If I were to ask:"),
p("What do you have to add to the number 3 to get 7, I'm sure you would readily answer 4."),
br(),
p("Suppose I write the above problem like this:"),
p(strong("?"),
" + 3 = 7", style = "color:blue"),
br(),
p("You would still answer 4, right? The ",
strong("?"),
" is just a container, it represents and holds an unknown value."),
br(),
p("What if I wrote the problem like this: "),
p(strong("x"),
" + 3 = 7", style = "color:blue"),
br(),
p("The letter ",
strong("x"),
"here is still just ",
em("a container"),
" for an unknown value."),
p("Your answer is still 4, as in x = 4, which means you substitute 4 into
the container x to get: "),
p("4 + 3 = 7", style = "color:blue"),
h3(textOutput("text1")),
h3(textOutput("text2"))
)
)
)) |
\name{are.parrice.valid}
\alias{are.parrice.valid}
\title{Are the Distribution Parameters Consistent with the Rice Distribution}
\description{
Is the distribution parameter object consistent with the corresponding distribution? The distribution functions (\code{\link{cdfrice}}, \code{\link{pdfrice}}, \code{\link{quarice}}, and
\code{\link{lmomrice}}) require consistent parameters to return the cumulative
probability (nonexceedance), density, quantile, and L-moments of the distribution,
respectively. These functions internally use the \code{\link{are.parrice.valid}}
function.
}
\usage{
are.parrice.valid(para, nowarn=FALSE)
}
\arguments{
\item{para}{A distribution parameter list returned by \code{\link{parrice}} or \code{\link{vec2par}}.}
\item{nowarn}{A logical switch on warning suppression. If \code{TRUE} then \code{options(warn=-1)} is made and restored on return. This switch is to permit calls in which warnings are not desired as the user knows how to handle the returned value---say in an optimization algorithm.}
}
\value{
\item{TRUE}{If the parameters are \code{rice} consistent.}
\item{FALSE}{If the parameters are not \code{rice} consistent.}
}
\note{
This function calls \code{\link{is.rice}} to verify consistency between the distribution parameter object and the intent of the user.
}
\author{W.H. Asquith}
\references{
Asquith, W.H., 2011, Distributional analysis with L-moment statistics using the R environment for statistical computing: Createspace Independent Publishing Platform, ISBN 978--146350841--8.
}
\seealso{\code{\link{is.rice}}, \code{\link{parrice}} }
\examples{
#para <- parrice(lmoms(c(123,34,4,654,37,78)))
#if(are.parrice.valid(para)) Q <- quarice(0.5,para)
}
\keyword{utility (distribution)}
\keyword{Distribution: Rice}
\keyword{utility (distribution/parameter validation)}
| /man/are.parrice.valid.Rd | no_license | wasquith/lmomco | R | false | false | 1,832 | rd | \name{are.parrice.valid}
\alias{are.parrice.valid}
\title{Are the Distribution Parameters Consistent with the Rice Distribution}
\description{
Is the distribution parameter object consistent with the corresponding distribution? The distribution functions (\code{\link{cdfrice}}, \code{\link{pdfrice}}, \code{\link{quarice}}, and
\code{\link{lmomrice}}) require consistent parameters to return the cumulative
probability (nonexceedance), density, quantile, and L-moments of the distribution,
respectively. These functions internally use the \code{\link{are.parrice.valid}}
function.
}
\usage{
are.parrice.valid(para, nowarn=FALSE)
}
\arguments{
\item{para}{A distribution parameter list returned by \code{\link{parrice}} or \code{\link{vec2par}}.}
\item{nowarn}{A logical switch on warning suppression. If \code{TRUE} then \code{options(warn=-1)} is made and restored on return. This switch is to permit calls in which warnings are not desired as the user knows how to handle the returned value---say in an optimization algorithm.}
}
\value{
\item{TRUE}{If the parameters are \code{rice} consistent.}
\item{FALSE}{If the parameters are not \code{rice} consistent.}
}
\note{
This function calls \code{\link{is.rice}} to verify consistency between the distribution parameter object and the intent of the user.
}
\author{W.H. Asquith}
\references{
Asquith, W.H., 2011, Distributional analysis with L-moment statistics using the R environment for statistical computing: Createspace Independent Publishing Platform, ISBN 978--146350841--8.
}
\seealso{\code{\link{is.rice}}, \code{\link{parrice}} }
\examples{
#para <- parrice(lmoms(c(123,34,4,654,37,78)))
#if(are.parrice.valid(para)) Q <- quarice(0.5,para)
}
\keyword{utility (distribution)}
\keyword{Distribution: Rice}
\keyword{utility (distribution/parameter validation)}
|
#' Align two functions
#'
#' This function aligns two SRSF functions using Dynamic Programming
#'
#' @param Q1 srsf of function 1
#' @param T1 sample points of function 1
#' @param Q2 srsf of function 2
#' @param T2 sample points of function 2
#' @param lambda controls amount of warping (default = 0)
#' @param method controls which optimization method (default="DP") options are
#' Dynamic Programming ("DP"), Coordinate Descent ("DP2"), and Riemannian BFGS
#' ("RBFGS")
#' @param w controls LRBFGS (default = 0.01)
#' @param f1o initial value of f1, vector or scalar depending on q1, defaults to zero
#' @param f2o initial value of f2, vector or scalar depending on q1, defaults to zero
#' @return gam warping function
#' @keywords srsf alignment
#' @references Srivastava, A., Wu, W., Kurtek, S., Klassen, E., Marron, J. S.,
#' May 2011. Registration of functional data using fisher-rao metric,
#' arXiv:1103.3817v2 [math.ST].
#' @references Tucker, J. D., Wu, W., Srivastava, A.,
#' Generative Models for Function Data using Phase and Amplitude Separation,
#' Computational Statistics and Data Analysis (2012), 10.1016/j.csda.2012.12.001.
#' @export
#' @examples
#' data("simu_data")
#' q = f_to_srvf(simu_data$f,simu_data$time)
#' gam = optimum.reparam(q[,1],simu_data$time,q[,2],simu_data$time)
optimum.reparam <- function(Q1,T1,Q2,T2,lambda=0,method="DP",w=0.01,f1o=0.0,
f2o=0.0){
n = length(T1)
if (method=="DPo" && all(T1!=T2))
method = "DP"
Q1=(Q1/pvecnorm(Q1,2))
Q2=(Q2/pvecnorm(Q2,2))
C1=srsf_to_f(Q1,T1,f1o)
C2=srsf_to_f(Q2,T2,f2o)
rotated = FALSE
isclosed = FALSE
skipm = 0
auto = 0
if (method=="DP"){
G = rep(0,n)
T = rep(0,n)
size = 0;
ret = .Call('DPQ2', PACKAGE = 'fdasrvf', Q1, T1, Q2, T2, 1, n, n, T1, T2, n, n, G, T, size, lambda);
G = ret$G[1:ret$size]
Tf = ret$T[1:ret$size]
gam0 = approx(Tf,G,xout=T2)$y
} else if (method=="DPo"){
gam0 = .Call('DPQ', PACKAGE = 'fdasrvf', Q2, Q1, 1, n, lambda, 0, rep(0,n))
} else if (method=="SIMUL"){
out = simul_align(C1,C2)
u = seq(0,1,length.out=length(out$g1))
tmin = min(T1)
tmax = max(T1)
timet2 = T1
timet2 = (timet2-tmin)/(tmax-tmin)
gam0 = simul_gam(u,out$g1,out$g2,timet2,out$s1,out$s2,timet2)
} else if (method=="DP2") {
opt = rep(0,n+1+1);
swap = FALSE
fopts = rep(0,5)
comtime = rep(0,5)
out = .Call('opt_reparam', PACKAGE = 'fdasrvf', C1,C2,n,1,0.0,TRUE,
rotated,isclosed,skipm,auto,opt,swap,fopts,comtime)
gam0 = out$opt
gam0 = gam0[1:(length(gam0)-2)]
if (out$swap){
gam0 = invertGamma(gam0);
}
} else {
opt = rep(0,n+1+1);
swap = FALSE
fopts = rep(0,5)
comtime = rep(0,5)
out = .Call('opt_reparam', PACKAGE = 'fdasrvf', C1,C2,n,1,w,FALSE,
rotated,isclosed,skipm,auto,opt,swap,fopts,comtime)
if (out$fopts[1] == 1000){
out = .Call('opt_reparam', PACKAGE = 'fdasrvf', C1,C2,n,1,0.0,TRUE,
rotated,isclosed,skipm,auto,opt,swap,fopts,comtime)
}
gam0 = out$opt
gam0 = gam0[1:(length(gam0)-2)]
if (out$swap){
gam0 = invertGamma(gam0);
}
}
gam = (gam0-gam0[1])/(gam0[length(gam0)]-gam0[1]) # slight change on scale
return(gam)
}
| /R/optimum.reparam.R | no_license | jasonradams47/fdasrvf_R | R | false | false | 3,498 | r | #' Align two functions
#'
#' This function aligns two SRSF functions using Dynamic Programming
#'
#' @param Q1 srsf of function 1
#' @param T1 sample points of function 1
#' @param Q2 srsf of function 2
#' @param T2 sample points of function 2
#' @param lambda controls amount of warping (default = 0)
#' @param method controls which optimization method (default="DP") options are
#' Dynamic Programming ("DP"), Coordinate Descent ("DP2"), and Riemannian BFGS
#' ("RBFGS")
#' @param w controls LRBFGS (default = 0.01)
#' @param f1o initial value of f1, vector or scalar depending on q1, defaults to zero
#' @param f2o initial value of f2, vector or scalar depending on q1, defaults to zero
#' @return gam warping function
#' @keywords srsf alignment
#' @references Srivastava, A., Wu, W., Kurtek, S., Klassen, E., Marron, J. S.,
#' May 2011. Registration of functional data using fisher-rao metric,
#' arXiv:1103.3817v2 [math.ST].
#' @references Tucker, J. D., Wu, W., Srivastava, A.,
#' Generative Models for Function Data using Phase and Amplitude Separation,
#' Computational Statistics and Data Analysis (2012), 10.1016/j.csda.2012.12.001.
#' @export
#' @examples
#' data("simu_data")
#' q = f_to_srvf(simu_data$f,simu_data$time)
#' gam = optimum.reparam(q[,1],simu_data$time,q[,2],simu_data$time)
optimum.reparam <- function(Q1,T1,Q2,T2,lambda=0,method="DP",w=0.01,f1o=0.0,
f2o=0.0){
n = length(T1)
if (method=="DPo" && all(T1!=T2))
method = "DP"
Q1=(Q1/pvecnorm(Q1,2))
Q2=(Q2/pvecnorm(Q2,2))
C1=srsf_to_f(Q1,T1,f1o)
C2=srsf_to_f(Q2,T2,f2o)
rotated = FALSE
isclosed = FALSE
skipm = 0
auto = 0
if (method=="DP"){
G = rep(0,n)
T = rep(0,n)
size = 0;
ret = .Call('DPQ2', PACKAGE = 'fdasrvf', Q1, T1, Q2, T2, 1, n, n, T1, T2, n, n, G, T, size, lambda);
G = ret$G[1:ret$size]
Tf = ret$T[1:ret$size]
gam0 = approx(Tf,G,xout=T2)$y
} else if (method=="DPo"){
gam0 = .Call('DPQ', PACKAGE = 'fdasrvf', Q2, Q1, 1, n, lambda, 0, rep(0,n))
} else if (method=="SIMUL"){
out = simul_align(C1,C2)
u = seq(0,1,length.out=length(out$g1))
tmin = min(T1)
tmax = max(T1)
timet2 = T1
timet2 = (timet2-tmin)/(tmax-tmin)
gam0 = simul_gam(u,out$g1,out$g2,timet2,out$s1,out$s2,timet2)
} else if (method=="DP2") {
opt = rep(0,n+1+1);
swap = FALSE
fopts = rep(0,5)
comtime = rep(0,5)
out = .Call('opt_reparam', PACKAGE = 'fdasrvf', C1,C2,n,1,0.0,TRUE,
rotated,isclosed,skipm,auto,opt,swap,fopts,comtime)
gam0 = out$opt
gam0 = gam0[1:(length(gam0)-2)]
if (out$swap){
gam0 = invertGamma(gam0);
}
} else {
opt = rep(0,n+1+1);
swap = FALSE
fopts = rep(0,5)
comtime = rep(0,5)
out = .Call('opt_reparam', PACKAGE = 'fdasrvf', C1,C2,n,1,w,FALSE,
rotated,isclosed,skipm,auto,opt,swap,fopts,comtime)
if (out$fopts[1] == 1000){
out = .Call('opt_reparam', PACKAGE = 'fdasrvf', C1,C2,n,1,0.0,TRUE,
rotated,isclosed,skipm,auto,opt,swap,fopts,comtime)
}
gam0 = out$opt
gam0 = gam0[1:(length(gam0)-2)]
if (out$swap){
gam0 = invertGamma(gam0);
}
}
gam = (gam0-gam0[1])/(gam0[length(gam0)]-gam0[1]) # slight change on scale
return(gam)
}
|
systemContentDistribution <- function(lambda,m,buffer_size)
{
#a total of buffer_size + m jobs can be in the system, at max m can be in service in at any moment.
matrix_size = m+buffer_size+1;
P = matrix(0, nrow = matrix_size, ncol = matrix_size);
# index i contains probability that there are less than i arrivals (not less than equal to)
# i.e. cumulative_prob[i] = Pr(X < i)
cumulative_prob = rep_len(0,2*matrix_size)
cumulative_prob[1] = prob_n_arrivals(0,lambda)
for( i in 2:length(cumulative_prob))
{
cumulative_prob[i] = cumulative_prob[i-1] + prob_n_arrivals(i-1,lambda);
}
for(from in 1:matrix_size)
{
for( to in 1:matrix_size)
{
P[from,to] = prob_transition(from,to,lambda,m, matrix_size, cumulative_prob);
}
}
nthP = P%^%1000;
systemContentDistribution = nthP[1,1:matrix_size]
}
eSystemContent <- function(distribution)
{
eSystemContent = 0;
for(i in 1:length(distribution))
{
eSystemContent = eSystemContent + (i-1)*distribution[i];
}
eSystemContent = eSystemContent
}
eResponseTime <- function(lambda,distribution)
{
blocking_probability = distribution[length(distribution)];
eResponseTime = eSystemContent(distribution)/(lambda*(1-blocking_probability));
}
eBlockingProbability <-function(lambda,distribution)
{
eBlockingProbability = distribution[length(distribution)];
} | /performance_measures.R | permissive | saxe405/edge | R | false | false | 1,371 | r | systemContentDistribution <- function(lambda,m,buffer_size)
{
#a total of buffer_size + m jobs can be in the system, at max m can be in service in at any moment.
matrix_size = m+buffer_size+1;
P = matrix(0, nrow = matrix_size, ncol = matrix_size);
# index i contains probability that there are less than i arrivals (not less than equal to)
# i.e. cumulative_prob[i] = Pr(X < i)
cumulative_prob = rep_len(0,2*matrix_size)
cumulative_prob[1] = prob_n_arrivals(0,lambda)
for( i in 2:length(cumulative_prob))
{
cumulative_prob[i] = cumulative_prob[i-1] + prob_n_arrivals(i-1,lambda);
}
for(from in 1:matrix_size)
{
for( to in 1:matrix_size)
{
P[from,to] = prob_transition(from,to,lambda,m, matrix_size, cumulative_prob);
}
}
nthP = P%^%1000;
systemContentDistribution = nthP[1,1:matrix_size]
}
eSystemContent <- function(distribution)
{
eSystemContent = 0;
for(i in 1:length(distribution))
{
eSystemContent = eSystemContent + (i-1)*distribution[i];
}
eSystemContent = eSystemContent
}
eResponseTime <- function(lambda,distribution)
{
blocking_probability = distribution[length(distribution)];
eResponseTime = eSystemContent(distribution)/(lambda*(1-blocking_probability));
}
eBlockingProbability <-function(lambda,distribution)
{
eBlockingProbability = distribution[length(distribution)];
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.