content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
hpc<-read.csv("~/Documents/hpc.txt",header = TRUE,sep = ";",na.strings = "?",stringsAsFactors = FALSE,dec = ".")
#hpc$Date<-as.Date(hpc$Date,format = "%d/%m/%y")
newhpc<-hpc[hpc$Date %in% c("1/2/2007","2/2/2007"),]
datetime<-strptime(paste(newhpc$Date, newhpc$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(newhpc$Global_active_power)
subMetering1 <- as.numeric(newhpc$Sub_metering_1)
subMetering2 <- as.numeric(newhpc$Sub_metering_2)
subMetering3 <- as.numeric(newhpc$Sub_metering_3)
globalReactivePower <- as.numeric(newhpc$Global_reactive_power)
voltage <- as.numeric(newhpc$Voltage)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
| /plot4.R | no_license | shukla-raj/ExData_Plotting1 | R | false | false | 1,245 | r | hpc<-read.csv("~/Documents/hpc.txt",header = TRUE,sep = ";",na.strings = "?",stringsAsFactors = FALSE,dec = ".")
#hpc$Date<-as.Date(hpc$Date,format = "%d/%m/%y")
newhpc<-hpc[hpc$Date %in% c("1/2/2007","2/2/2007"),]
datetime<-strptime(paste(newhpc$Date, newhpc$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(newhpc$Global_active_power)
subMetering1 <- as.numeric(newhpc$Sub_metering_1)
subMetering2 <- as.numeric(newhpc$Sub_metering_2)
subMetering3 <- as.numeric(newhpc$Sub_metering_3)
globalReactivePower <- as.numeric(newhpc$Global_reactive_power)
voltage <- as.numeric(newhpc$Voltage)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
seed <- 578
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 226473.87928833842
df.resid <- 35402
df <- 165
coefs <- c(6.643197965109865, 6.06769819800986, 5.700118131810741, 5.395570720188193, 5.08712669633937, 4.911006542665757, 4.686394551730664, 4.613662841089397, 4.336754234073764, 4.229601321713079, 4.267602919805995, 4.148088726879516, 4.017076847520894, 3.9457845755875978, 3.7143662485309776, 3.5107598267154705, 3.2239347062079924, 2.952202258101428, 2.4441184880723474, 2.013976449209317, 1.5327963219928538, 0.9367719245923112, 1.0841531728245233, 5.126275104942396e-2, 0.6372768579396854, -0.7592663409952263, -0.3473788739491943, 1.0306977132193909, 1.02649763375229, -1.22920551819852, -1.9529877825380575, -2.3540865771971937, -0.3775565517148799, 0.8234881152777558, 1.4532471002030394, -0.9098940189272942, 0.526313219159461, -0.6978475198447243, -2.8572495124833175e-2, -0.9483757871639177, 0.8684256219823345, 0.8526244324914589, -0.7513697005626738, -2.084184202201395, -0.8956864215399208, -0.8695646674857578, -0.9563235084755988, 0.32878168859392687, 0.5662564285425037, -0.7905218751152978, -0.2986365494358533, 1.136138911160491, -2.4833990349669963, 1.6949203262135346, 0.6380774377731833, 0.999128543891688, -1.829206203371389, -0.18694985244708773, -0.788604717227715, 1.036181135886029, 0.83216844002715, 0.6653133451227337, -1.3732745058234108, -0.8102980157332447, -0.7992703939676096, -0.12375378435377596, 0.5640929717285733, -0.24998009491019868, -1.2609768643399384, -0.5073207229602625, -1.977035409746283, -0.3624958952277542, 0.8051805534291795, 0.9135720120723635, 0.7465466457356278, -1.3490698975680209, -1.0347426374795716, -1.4765022470271731, -9.277473818699995e-2, 0.7940069284478732, 1.134738744118038, 3.7192207650490604e-2, 0.2611460859289575, -1.6273299408739248, -0.9347968347832734, 0.4071577512111056, 1.224544142201952, 0.4928977382260774, 0.9003372926557672, -2.334741225378198, 0.47852761818721434, 0.5683289408814955, 0.8304453037702533, 0.32175638623780534, 5.614877411289097e-2, 1.2044002534139095, -0.35749508943625735, -0.15749279434352334, 0.18077082298925995, 0.40494006273492816, 0.528015262624292, 7.77290991709862e-2, 0.6972738579685424, -6.429461324389793e-2, 0.5448082122151235, 0.872770659711502, 1.181820692157906, 0.19428108004147535, -1.3822373539173864, -0.8700472310185483, 0.34787993871461936, 0.8244090865560985, 1.5385894354601461, -0.47918462751028607, -0.16779219261279077, -0.5581551620494425, 0.7738806391106998, -0.32661289796904636, 0.47758849034632495, 0.6343409364071397, -0.34496855458325143, -0.3894151759635619, -1.517521776127729, -1.011282803143462, 0.36361514218561003, 0.969664737060768, -0.1122418130264879, 0.9560024688768796, -0.7798030443032278, -0.48818566247646794, 0.30216946535708555, 0.6971356901899202, 0.5748376851857994, 0.416858709143713, 5.3558854399053106e-2, 1.1185602127289183, -0.44680263288979005, 1.1304369234730243, 0.7134403585765369, 0.9278980349175122, 0.8061171172543875, -0.6290958487372017, -1.028120441677634, 0.4721492067221187, 0.4085038475923901, 0.45711475144947683, -0.193172390254507, -0.2891640348045964, -1.9527201673741703, 1.3045315079384647, 0.12470938376558711, 1.1751912694701467, -0.14140370139750913, 1.5750358605892096e-2, -6.618970380354151e-2, -1.7188128872749966, -1.3310391991560042, 0.8045743682814884, 1.2052561866749445, -0.5994531862126143, 1.4914544780177814, -0.3865196238803326, -0.1389554010726398, -0.10985553688353733, 1.1946427701982913)
| /analysis/boot/boot578.R | no_license | patperry/interaction-proc | R | false | false | 3,743 | r | seed <- 578
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 226473.87928833842
df.resid <- 35402
df <- 165
coefs <- c(6.643197965109865, 6.06769819800986, 5.700118131810741, 5.395570720188193, 5.08712669633937, 4.911006542665757, 4.686394551730664, 4.613662841089397, 4.336754234073764, 4.229601321713079, 4.267602919805995, 4.148088726879516, 4.017076847520894, 3.9457845755875978, 3.7143662485309776, 3.5107598267154705, 3.2239347062079924, 2.952202258101428, 2.4441184880723474, 2.013976449209317, 1.5327963219928538, 0.9367719245923112, 1.0841531728245233, 5.126275104942396e-2, 0.6372768579396854, -0.7592663409952263, -0.3473788739491943, 1.0306977132193909, 1.02649763375229, -1.22920551819852, -1.9529877825380575, -2.3540865771971937, -0.3775565517148799, 0.8234881152777558, 1.4532471002030394, -0.9098940189272942, 0.526313219159461, -0.6978475198447243, -2.8572495124833175e-2, -0.9483757871639177, 0.8684256219823345, 0.8526244324914589, -0.7513697005626738, -2.084184202201395, -0.8956864215399208, -0.8695646674857578, -0.9563235084755988, 0.32878168859392687, 0.5662564285425037, -0.7905218751152978, -0.2986365494358533, 1.136138911160491, -2.4833990349669963, 1.6949203262135346, 0.6380774377731833, 0.999128543891688, -1.829206203371389, -0.18694985244708773, -0.788604717227715, 1.036181135886029, 0.83216844002715, 0.6653133451227337, -1.3732745058234108, -0.8102980157332447, -0.7992703939676096, -0.12375378435377596, 0.5640929717285733, -0.24998009491019868, -1.2609768643399384, -0.5073207229602625, -1.977035409746283, -0.3624958952277542, 0.8051805534291795, 0.9135720120723635, 0.7465466457356278, -1.3490698975680209, -1.0347426374795716, -1.4765022470271731, -9.277473818699995e-2, 0.7940069284478732, 1.134738744118038, 3.7192207650490604e-2, 0.2611460859289575, -1.6273299408739248, -0.9347968347832734, 0.4071577512111056, 1.224544142201952, 0.4928977382260774, 0.9003372926557672, -2.334741225378198, 0.47852761818721434, 0.5683289408814955, 0.8304453037702533, 0.32175638623780534, 5.614877411289097e-2, 1.2044002534139095, -0.35749508943625735, -0.15749279434352334, 0.18077082298925995, 0.40494006273492816, 0.528015262624292, 7.77290991709862e-2, 0.6972738579685424, -6.429461324389793e-2, 0.5448082122151235, 0.872770659711502, 1.181820692157906, 0.19428108004147535, -1.3822373539173864, -0.8700472310185483, 0.34787993871461936, 0.8244090865560985, 1.5385894354601461, -0.47918462751028607, -0.16779219261279077, -0.5581551620494425, 0.7738806391106998, -0.32661289796904636, 0.47758849034632495, 0.6343409364071397, -0.34496855458325143, -0.3894151759635619, -1.517521776127729, -1.011282803143462, 0.36361514218561003, 0.969664737060768, -0.1122418130264879, 0.9560024688768796, -0.7798030443032278, -0.48818566247646794, 0.30216946535708555, 0.6971356901899202, 0.5748376851857994, 0.416858709143713, 5.3558854399053106e-2, 1.1185602127289183, -0.44680263288979005, 1.1304369234730243, 0.7134403585765369, 0.9278980349175122, 0.8061171172543875, -0.6290958487372017, -1.028120441677634, 0.4721492067221187, 0.4085038475923901, 0.45711475144947683, -0.193172390254507, -0.2891640348045964, -1.9527201673741703, 1.3045315079384647, 0.12470938376558711, 1.1751912694701467, -0.14140370139750913, 1.5750358605892096e-2, -6.618970380354151e-2, -1.7188128872749966, -1.3310391991560042, 0.8045743682814884, 1.2052561866749445, -0.5994531862126143, 1.4914544780177814, -0.3865196238803326, -0.1389554010726398, -0.10985553688353733, 1.1946427701982913)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shorts-S3.R
\name{fitted.shorts_model}
\alias{fitted.shorts_model}
\title{S3 method for returning predictions of \code{shorts_model}}
\usage{
\method{fitted}{shorts_model}(object, ...)
}
\arguments{
\item{object}{\code{shorts_model} object}
\item{...}{Extra arguments. Not used}
}
\description{
S3 method for returning predictions of \code{shorts_model}
}
\examples{
split_distances <- c(10, 20, 30, 40, 50)
split_times <- create_timing_gates_splits(
gates = split_distances,
MSS = 10,
MAC = 9,
FD = 0.25,
TC = 0
)
# Simple model
simple_model <- model_timing_gates(split_distances, split_times)
fitted(simple_model)
}
| /man/fitted.shorts_model.Rd | permissive | mladenjovanovic/shorts | R | false | true | 708 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shorts-S3.R
\name{fitted.shorts_model}
\alias{fitted.shorts_model}
\title{S3 method for returning predictions of \code{shorts_model}}
\usage{
\method{fitted}{shorts_model}(object, ...)
}
\arguments{
\item{object}{\code{shorts_model} object}
\item{...}{Extra arguments. Not used}
}
\description{
S3 method for returning predictions of \code{shorts_model}
}
\examples{
split_distances <- c(10, 20, 30, 40, 50)
split_times <- create_timing_gates_splits(
gates = split_distances,
MSS = 10,
MAC = 9,
FD = 0.25,
TC = 0
)
# Simple model
simple_model <- model_timing_gates(split_distances, split_times)
fitted(simple_model)
}
|
library(leaps)
?regsubsets
train <- train[,c(1:83,95:99,110:154)]
which(colnames(train)=="age1")
S <- mlogit.data(train,shape="wide",choice="Choice",sep="",varying=c(4:83,89:132),alt.levels=c("Ch1","Ch2","Ch3","Ch4"),id.var="Case")
S$cate
summary(S)
summary(train)
S$cate <- as.numeric(S$cate==TRUE)
which (colnames(S)=="CC")
which (colnames(S)=="Price")
model1 <- regsubsets(cate~.-Case-No-Task-chid-alt, data=S,nvmax = 30,method = "forward")
model2 <- regsubsets(cate~age+gender+educ+distance+night+region+Urb+ppark+money+year+car,data=train,nvmax = 10,method = "forward")
summary(model1)
which.min((summary(model2))$bic)
which.min((summary(model1))$bic)
coef(model1,25)
library(rpart)
library(rpart.plot)
set.seed(8)
model3 <-rpart(cate~.-Case-No-Task-chid-alt-Ch1-Ch2-Ch3-Ch4-Choice,data=S)
prp(model3)
prp(model3,type=4,extra=2)
predict1 <-predict(model3,newdata = S, type="class")
predict1
table(predict1, S$cate)
###### Random Forest ###########
S$cate
library("randomForest")
set.seed(8)
rf <- randomForest(Choice~CC+GN+NS+BU+FA+LD+BZ+FC+FP+RP+PP+KA+SC+TS+NV+MA+LB+AF+HU+Price+age+gender+educ+distance+night+money+region+Urb+ppark+year+car,data = S, nodesize = 50, ntree = 100)
summary(S)
?randomForest
summary(rf)
?predict
Prediction <- predict(rf, newdata = S)
Prediction
Prediction <-as.double(Prediction)
PredRf <- matrix(Prediction,nrow=4, ncol=14250)
PredRf <-t(PredRf)
colnames(PredRf) <- c("Ch1", "Ch2", "Ch3","Ch4")
###### Cart ########
library(rpart)
library(rpart.plot)
cart1 <- rpart(as.factor(cate)~GN+NS+BU+FA+LD+BZ+FC+FP+RP+PP+KA+SC+TS+NV+MA+LB+AF+HU+Price+age+gender+educ+distance+night+money+region+Urb+ppark,data=S) #fast : X mostly are binary
summary(cart1)
cart1
prp(cart1) #same var occur at may points on the tree
prp(cart1, type = 1) #label all nodes
prp(cart1, type = 4,extra=4)
predictcart1 <- predict(cart1, newdata=S, type='class')
predictcart1
# testIndexes <- which(folds==3,arr.ind=TRUE)
# testData <- S[testIndexes, ]
# trainData <- S[-testIndexes, ]
# set.seed(8)
# rf <- randomForest(Choice~GN+NS+BU+FA+LD+BZ+FC+FP+RP+PP+KA+SC+TS+NV+MA+LB+AF+HU+Price+age+gender+educ+distance+night+money+region+Urb+ppark,data = trainData, nodesize = 50, ntree = 100)
# summary(rf)
# Prediction <- predict(rf, newdata = testData, type="class")
# Prediction <-as.double(Prediction)
# PredRf <- matrix(Prediction,nrow=4, ncol=nrow(testData)/4)
# PredRf <-t(PredRf)
# colnames(PredRf) <- c("Ch1", "Ch2", "Ch3","Ch4")
# ActualChoice <- subset(testData, testData$Choice==TRUE)[,c("Ch1","Ch2","Ch3","Ch4")]
# ActualChoice <-as.matrix(ActualChoice)
# (-1*mean(log(PredRf[model.matrix(~ ActualChoice + 0) - PredRf > 0])))
folds <- cut(seq(1,nrow(S)),breaks=10,labels=FALSE)
tot <-0
for(k in 1:10){
#Segement your data by fold using the which() function
testIndexes <- which(folds==k,arr.ind=TRUE)
testData <- S[testIndexes, ]
trainData <- S[-testIndexes, ]
set.seed(8)
Mn<- mnlogit(Choice~GN+NS+BU+FA+LD+BZ+FC+FP+RP+PP+KA+SC+TS+NV+MA+LB+AF+HU+Price+age+gender+educ+money+region+Urb+ppark,data=S)
Prediction <- predict(Mn, newdata = testData)
# Prediction <-as.double(Prediction)
# PredRf <- matrix(Prediction,nrow=4, ncol=14250)
# PredRf <-t(PredRf)
# colnames(PredRf) <- c("Ch1", "Ch2", "Ch3","Ch4")
ActualChoice <- subset(testData, testData$Choice==TRUE)[,c("Ch1","Ch2","Ch3","Ch4")]
ActualChoice <-as.matrix(ActualChoice)
tot <- tot +(-1*mean(log(PredRf[model.matrix(~ ActualChoice + 0) - PredRf > 0])))
#Use the test and train data partitions however you desire...
}
tot/10
| /Analytics Edge Competition/src/FeatureSelection.R | no_license | wangyiranamy/Wang-Yiran-Undergrad | R | false | false | 3,533 | r | library(leaps)
?regsubsets
train <- train[,c(1:83,95:99,110:154)]
which(colnames(train)=="age1")
S <- mlogit.data(train,shape="wide",choice="Choice",sep="",varying=c(4:83,89:132),alt.levels=c("Ch1","Ch2","Ch3","Ch4"),id.var="Case")
S$cate
summary(S)
summary(train)
S$cate <- as.numeric(S$cate==TRUE)
which (colnames(S)=="CC")
which (colnames(S)=="Price")
model1 <- regsubsets(cate~.-Case-No-Task-chid-alt, data=S,nvmax = 30,method = "forward")
model2 <- regsubsets(cate~age+gender+educ+distance+night+region+Urb+ppark+money+year+car,data=train,nvmax = 10,method = "forward")
summary(model1)
which.min((summary(model2))$bic)
which.min((summary(model1))$bic)
coef(model1,25)
library(rpart)
library(rpart.plot)
set.seed(8)
model3 <-rpart(cate~.-Case-No-Task-chid-alt-Ch1-Ch2-Ch3-Ch4-Choice,data=S)
prp(model3)
prp(model3,type=4,extra=2)
predict1 <-predict(model3,newdata = S, type="class")
predict1
table(predict1, S$cate)
###### Random Forest ###########
S$cate
library("randomForest")
set.seed(8)
rf <- randomForest(Choice~CC+GN+NS+BU+FA+LD+BZ+FC+FP+RP+PP+KA+SC+TS+NV+MA+LB+AF+HU+Price+age+gender+educ+distance+night+money+region+Urb+ppark+year+car,data = S, nodesize = 50, ntree = 100)
summary(S)
?randomForest
summary(rf)
?predict
Prediction <- predict(rf, newdata = S)
Prediction
Prediction <-as.double(Prediction)
PredRf <- matrix(Prediction,nrow=4, ncol=14250)
PredRf <-t(PredRf)
colnames(PredRf) <- c("Ch1", "Ch2", "Ch3","Ch4")
###### Cart ########
library(rpart)
library(rpart.plot)
cart1 <- rpart(as.factor(cate)~GN+NS+BU+FA+LD+BZ+FC+FP+RP+PP+KA+SC+TS+NV+MA+LB+AF+HU+Price+age+gender+educ+distance+night+money+region+Urb+ppark,data=S) #fast : X mostly are binary
summary(cart1)
cart1
prp(cart1) #same var occur at may points on the tree
prp(cart1, type = 1) #label all nodes
prp(cart1, type = 4,extra=4)
predictcart1 <- predict(cart1, newdata=S, type='class')
predictcart1
# testIndexes <- which(folds==3,arr.ind=TRUE)
# testData <- S[testIndexes, ]
# trainData <- S[-testIndexes, ]
# set.seed(8)
# rf <- randomForest(Choice~GN+NS+BU+FA+LD+BZ+FC+FP+RP+PP+KA+SC+TS+NV+MA+LB+AF+HU+Price+age+gender+educ+distance+night+money+region+Urb+ppark,data = trainData, nodesize = 50, ntree = 100)
# summary(rf)
# Prediction <- predict(rf, newdata = testData, type="class")
# Prediction <-as.double(Prediction)
# PredRf <- matrix(Prediction,nrow=4, ncol=nrow(testData)/4)
# PredRf <-t(PredRf)
# colnames(PredRf) <- c("Ch1", "Ch2", "Ch3","Ch4")
# ActualChoice <- subset(testData, testData$Choice==TRUE)[,c("Ch1","Ch2","Ch3","Ch4")]
# ActualChoice <-as.matrix(ActualChoice)
# (-1*mean(log(PredRf[model.matrix(~ ActualChoice + 0) - PredRf > 0])))
folds <- cut(seq(1,nrow(S)),breaks=10,labels=FALSE)
tot <-0
for(k in 1:10){
#Segement your data by fold using the which() function
testIndexes <- which(folds==k,arr.ind=TRUE)
testData <- S[testIndexes, ]
trainData <- S[-testIndexes, ]
set.seed(8)
Mn<- mnlogit(Choice~GN+NS+BU+FA+LD+BZ+FC+FP+RP+PP+KA+SC+TS+NV+MA+LB+AF+HU+Price+age+gender+educ+money+region+Urb+ppark,data=S)
Prediction <- predict(Mn, newdata = testData)
# Prediction <-as.double(Prediction)
# PredRf <- matrix(Prediction,nrow=4, ncol=14250)
# PredRf <-t(PredRf)
# colnames(PredRf) <- c("Ch1", "Ch2", "Ch3","Ch4")
ActualChoice <- subset(testData, testData$Choice==TRUE)[,c("Ch1","Ch2","Ch3","Ch4")]
ActualChoice <-as.matrix(ActualChoice)
tot <- tot +(-1*mean(log(PredRf[model.matrix(~ ActualChoice + 0) - PredRf > 0])))
#Use the test and train data partitions however you desire...
}
tot/10
|
### Exercise 2 ###
library(shiny)
# We'll look into these more next week: http://shiny.rstudio.com/gallery/widget-gallery.html
# Create a shiny server that creates a scatterplot.
# It should takes as an input the number of observations, and a color
# It should return a rendered plot
shinyServer(function(input, output) {
# Save a 'scatter' property which is a renderPlot object (that renders a scatterplot)
output$scatter -> renderPlot({
x -> rnorm(input$num)
y -> rnorm(input$sum)
return(plot(x,y, col=input$color))
})
}) | /exercise-2/server.R | permissive | kgoodman3/m18-shiny | R | false | false | 547 | r | ### Exercise 2 ###
library(shiny)
# We'll look into these more next week: http://shiny.rstudio.com/gallery/widget-gallery.html
# Create a shiny server that creates a scatterplot.
# It should takes as an input the number of observations, and a color
# It should return a rendered plot
shinyServer(function(input, output) {
# Save a 'scatter' property which is a renderPlot object (that renders a scatterplot)
output$scatter -> renderPlot({
x -> rnorm(input$num)
y -> rnorm(input$sum)
return(plot(x,y, col=input$color))
})
}) |
### Launching all pair spread computations
### Trying to forecast the spread between ?/? bonds futures
# library("SIT")
library("RPQuantUtils")
library("RPToolsDB")
require(ggplot2)
require("ppcor")
require(graphics)
require("TTR")
require(plyr)
# server fails if we not require this package loading
require(labeling)
# require(reshape)
require(reshape2)
require(RColorBrewer)
require(stats)
require(Rsolnp)
require(zoo)
require(xts)
require(vars)
# require(Quandl)
## for the modeling
require(rpart)
require(randomForest)
require(xgboost)
## require caret (for dummy variables)
# require(caret)
## require Metrics to compute error
require(Metrics)
source("./RCode/RP_Plotting_Utils.R")
source("./RCode/RP_Macro_Monthly_Utils.R")
source("./RCode/RP_Spread_Utils.R")
source("./RCode/RP_Dates_Utils.R")
source("./RCode/RP_Df_Utils.R")
user = 'sduprey'
# JIRA Code (e.g. NAR-#)
JIRACode = 'NAR-271'
repoPath = RP_GetSharedPath(user)
# Input Data Path
inputDataPath = paste(repoPath,'InputData/', user,'/',JIRACode,'/',sep="")
# Output Data Path
outputDataPath = paste(repoPath,'OutputData/', user,'/',JIRACode,'/',sep="")
outputDataPathWeek <- paste(outputDataPath,"Week_2007/",sep="")
backtesting_starting_date <- "2007-01-01"
backtesting_ending_date <- "2015-10-01"
my_pairs = list(c("US","JP"),c("US","GB"),c("US","DE"))
# my_pairs = list (c("US","DE"))
options(warn=-1)
for (my_pair in my_pairs){
# investment_horizon <- c(5,21,1)
investment_horizon <- c(5)
# my_depths <- c(3,4)
my_depths <- c(3)
# my_algorithms <- c("xgboost_cv","rpart_cv","rpart_pruned","xgboost","rpart","random_forest","rpart","svm")
my_algorithms <- c("xgboost_cv")
# my_algorithms <- c("rpart_unpruned")
# europe_handling <- c(TRUE,FALSE)
europe_handling <- c(TRUE)
# to_zscore <- c(TRUE,FALSE)
to_zscore <- c(TRUE)
# spread_amplification_factors <- c(0.05,0.1,0.5,1,5)
spread_amplification_factors <- c(0.5,1,0.05,0.1)
# backtesting rolling window in year !!!
# rolling_windows <- c(1,2,3,5,10,-1)
rolling_windows <- c(3,5,10,-1)
# recalibration_frequency <- c(10,20,50)
recalibration_frequencies <- 52*rolling_windows
# recalibration_frequencies <- 52*10
# we calibrate our model just once
# then we only retrain it
for (my_rolling_window in rolling_windows){
for (depth in my_depths){
for (algorithm in my_algorithms){
for (europe_as_third_country in europe_handling){
for (zscore in to_zscore){
for (my_horizon in investment_horizon) {
for (spread_amplification_factor in spread_amplification_factors){
for(recalibration_frequency in recalibration_frequencies){
# capture.output(
all_results <- compute_spread_strategy_west_first_horizon(inputDataPath, outputDataPath, my_pair[1], my_pair[2], backtesting_starting_date, backtesting_ending_date, spread_amplification_factor,algorithm, europe_as_third_country, zscore, depth, my_horizon,my_rolling_window,recalibration_frequency)
# )
SaveDataFrame(all_results$results,outputDataPathWeek,paste(my_pair[1], my_pair[2], algorithm,"EU",europe_as_third_country,"ZS",zscore,"depth",depth,"amp",spread_amplification_factor,"inv_hor",my_horizon,"roll_win",my_rolling_window,"rec_freq",recalibration_frequency,"spread_results_week_2007",sep=""))
# SaveDataFrame(all_results$sentiments,outputDataPathWeek,paste(my_pair[1], my_pair[2], algorithm,"EU",europe_as_third_country,"ZS",zscore,"depth",depth,"amp",spread_amplification_factor,"inv_hor",my_horizon,"spread_sentiments_week_2007",sep=""))
SaveDataFrame(all_results$first_leg,outputDataPathWeek,paste(my_pair[1], my_pair[2], algorithm,"EU",europe_as_third_country,"ZS",zscore,"depth",depth,"amp",spread_amplification_factor,"inv_hor",my_horizon,"roll_win",my_rolling_window,"rec_freq",recalibration_frequency,"spread_first_leg_week_2007",sep=""))
SaveDataFrame(all_results$second_leg,outputDataPathWeek,paste(my_pair[1], my_pair[2], algorithm,"EU",europe_as_third_country,"ZS",zscore,"depth",depth,"amp",spread_amplification_factor,"inv_hor",my_horizon,"roll_win",my_rolling_window,"rec_freq",recalibration_frequency,"spread_second_leg_week_2007",sep=""))
SaveDataFrame(all_results$calibration_parameters,outputDataPathWeek,paste(my_pair[1], my_pair[2], algorithm,"EU",europe_as_third_country,"ZS",zscore,"depth",depth,"amp",spread_amplification_factor,"inv_hor",my_horizon,"roll_win",my_rolling_window,"rec_freq",recalibration_frequency,"spread_calibration_week_2007",sep=""))
# importance_matrix <- xgb.importance(all_results$predicting_columns, model = all_results$first_leg)
# xgb.plot.importance(importance_matrix)
results <- all_results$results
#######toplot_df <- melt(results[,c("DATES","SPREAD_STRATEGY_TODAY", "SPREAD_STRATEGY_YESTERDAY", "SPREAD_STRATEGY_TOMORROW", "STRATEGY_TODAY", "STRATEGY_YESTERDAY","STRATEGY_TOMORROW","FIRST_BOND","SECOND_BOND")],"DATES")
toplot_df <- melt(results[,c("DATES","STRATEGY_TODAY", "STRATEGY_YESTERDAY","STRATEGY_TOMORROW","FIRST_BOND","SECOND_BOND")],"DATES")
my_title <-paste("Macro Sentiment ML over Ravenpack ESS metrics and taxonomy ",my_pair[1],"/",my_pair[2],sep="")
g<-ggplot(
toplot_df,aes(
x = DATES,y = value,group = variable,color = variable
)
) +
geom_line() +
scale_x_date() +
ggtitle(my_title) + xlab("Time") + ylab("Cumulated sentiment") +
theme(title = element_text(size = 12, face = 'bold')) +
theme(legend.position = c(0.2,0.8), legend.box = "vertical") +
theme(legend.background = element_rect(fill = "gray90")) +
theme(legend.key.size = unit(0.7, "cm"))
print(g)
IR <- computeIR(results$STRATEGY_RETURN, my_horizon)
print("Information ratio for Ravenpack news trading strategy today s night return")
print(IR)
}
}
}
}
}
}
}
}
}
| /BOND_FOREX_ML/WEEK_OLD/NAR-269_CUM_US_WW_HORIZON_ALL_COMPUTATIONS_2007_WEEK.R | no_license | sduprey/ML_NEWS_STRATEGY | R | false | false | 6,650 | r | ### Launching all pair spread computations
### Trying to forecast the spread between ?/? bonds futures
# library("SIT")
library("RPQuantUtils")
library("RPToolsDB")
require(ggplot2)
require("ppcor")
require(graphics)
require("TTR")
require(plyr)
# server fails if we not require this package loading
require(labeling)
# require(reshape)
require(reshape2)
require(RColorBrewer)
require(stats)
require(Rsolnp)
require(zoo)
require(xts)
require(vars)
# require(Quandl)
## for the modeling
require(rpart)
require(randomForest)
require(xgboost)
## require caret (for dummy variables)
# require(caret)
## require Metrics to compute error
require(Metrics)
source("./RCode/RP_Plotting_Utils.R")
source("./RCode/RP_Macro_Monthly_Utils.R")
source("./RCode/RP_Spread_Utils.R")
source("./RCode/RP_Dates_Utils.R")
source("./RCode/RP_Df_Utils.R")
user = 'sduprey'
# JIRA Code (e.g. NAR-#)
JIRACode = 'NAR-271'
repoPath = RP_GetSharedPath(user)
# Input Data Path
inputDataPath = paste(repoPath,'InputData/', user,'/',JIRACode,'/',sep="")
# Output Data Path
outputDataPath = paste(repoPath,'OutputData/', user,'/',JIRACode,'/',sep="")
outputDataPathWeek <- paste(outputDataPath,"Week_2007/",sep="")
backtesting_starting_date <- "2007-01-01"
backtesting_ending_date <- "2015-10-01"
my_pairs = list(c("US","JP"),c("US","GB"),c("US","DE"))
# my_pairs = list (c("US","DE"))
options(warn=-1)
for (my_pair in my_pairs){
# investment_horizon <- c(5,21,1)
investment_horizon <- c(5)
# my_depths <- c(3,4)
my_depths <- c(3)
# my_algorithms <- c("xgboost_cv","rpart_cv","rpart_pruned","xgboost","rpart","random_forest","rpart","svm")
my_algorithms <- c("xgboost_cv")
# my_algorithms <- c("rpart_unpruned")
# europe_handling <- c(TRUE,FALSE)
europe_handling <- c(TRUE)
# to_zscore <- c(TRUE,FALSE)
to_zscore <- c(TRUE)
# spread_amplification_factors <- c(0.05,0.1,0.5,1,5)
spread_amplification_factors <- c(0.5,1,0.05,0.1)
# backtesting rolling window in year !!!
# rolling_windows <- c(1,2,3,5,10,-1)
rolling_windows <- c(3,5,10,-1)
# recalibration_frequency <- c(10,20,50)
recalibration_frequencies <- 52*rolling_windows
# recalibration_frequencies <- 52*10
# we calibrate our model just once
# then we only retrain it
for (my_rolling_window in rolling_windows){
for (depth in my_depths){
for (algorithm in my_algorithms){
for (europe_as_third_country in europe_handling){
for (zscore in to_zscore){
for (my_horizon in investment_horizon) {
for (spread_amplification_factor in spread_amplification_factors){
for(recalibration_frequency in recalibration_frequencies){
# capture.output(
all_results <- compute_spread_strategy_west_first_horizon(inputDataPath, outputDataPath, my_pair[1], my_pair[2], backtesting_starting_date, backtesting_ending_date, spread_amplification_factor,algorithm, europe_as_third_country, zscore, depth, my_horizon,my_rolling_window,recalibration_frequency)
# )
SaveDataFrame(all_results$results,outputDataPathWeek,paste(my_pair[1], my_pair[2], algorithm,"EU",europe_as_third_country,"ZS",zscore,"depth",depth,"amp",spread_amplification_factor,"inv_hor",my_horizon,"roll_win",my_rolling_window,"rec_freq",recalibration_frequency,"spread_results_week_2007",sep=""))
# SaveDataFrame(all_results$sentiments,outputDataPathWeek,paste(my_pair[1], my_pair[2], algorithm,"EU",europe_as_third_country,"ZS",zscore,"depth",depth,"amp",spread_amplification_factor,"inv_hor",my_horizon,"spread_sentiments_week_2007",sep=""))
SaveDataFrame(all_results$first_leg,outputDataPathWeek,paste(my_pair[1], my_pair[2], algorithm,"EU",europe_as_third_country,"ZS",zscore,"depth",depth,"amp",spread_amplification_factor,"inv_hor",my_horizon,"roll_win",my_rolling_window,"rec_freq",recalibration_frequency,"spread_first_leg_week_2007",sep=""))
SaveDataFrame(all_results$second_leg,outputDataPathWeek,paste(my_pair[1], my_pair[2], algorithm,"EU",europe_as_third_country,"ZS",zscore,"depth",depth,"amp",spread_amplification_factor,"inv_hor",my_horizon,"roll_win",my_rolling_window,"rec_freq",recalibration_frequency,"spread_second_leg_week_2007",sep=""))
SaveDataFrame(all_results$calibration_parameters,outputDataPathWeek,paste(my_pair[1], my_pair[2], algorithm,"EU",europe_as_third_country,"ZS",zscore,"depth",depth,"amp",spread_amplification_factor,"inv_hor",my_horizon,"roll_win",my_rolling_window,"rec_freq",recalibration_frequency,"spread_calibration_week_2007",sep=""))
# importance_matrix <- xgb.importance(all_results$predicting_columns, model = all_results$first_leg)
# xgb.plot.importance(importance_matrix)
results <- all_results$results
#######toplot_df <- melt(results[,c("DATES","SPREAD_STRATEGY_TODAY", "SPREAD_STRATEGY_YESTERDAY", "SPREAD_STRATEGY_TOMORROW", "STRATEGY_TODAY", "STRATEGY_YESTERDAY","STRATEGY_TOMORROW","FIRST_BOND","SECOND_BOND")],"DATES")
toplot_df <- melt(results[,c("DATES","STRATEGY_TODAY", "STRATEGY_YESTERDAY","STRATEGY_TOMORROW","FIRST_BOND","SECOND_BOND")],"DATES")
my_title <-paste("Macro Sentiment ML over Ravenpack ESS metrics and taxonomy ",my_pair[1],"/",my_pair[2],sep="")
g<-ggplot(
toplot_df,aes(
x = DATES,y = value,group = variable,color = variable
)
) +
geom_line() +
scale_x_date() +
ggtitle(my_title) + xlab("Time") + ylab("Cumulated sentiment") +
theme(title = element_text(size = 12, face = 'bold')) +
theme(legend.position = c(0.2,0.8), legend.box = "vertical") +
theme(legend.background = element_rect(fill = "gray90")) +
theme(legend.key.size = unit(0.7, "cm"))
print(g)
IR <- computeIR(results$STRATEGY_RETURN, my_horizon)
print("Information ratio for Ravenpack news trading strategy today s night return")
print(IR)
}
}
}
}
}
}
}
}
}
|
library(tidyverse)
library(dplyr)
pres_final_pros <- read.csv('pres_final_pros.csv')
data <- read.csv('actual_cost by practice_name.csv')
colnames(data) <- c('actual_cost', 'practice_name')
lookup <- pres_final_pros %>% filter(practice_name %in% data$practice_name)
df_aggr <- aggregate((lookup$value), list(lookup$practice_name), mean)
colnames(df_aggr) <- c('practice_name', 'mean')
shapiro.test(df_aggr$mean)
model <- aov(lookup$value ~ lookup$practice_name)
summary(model)
| /SynopsisProject-ANOVA.R | no_license | marlonducille/SynopsisProject-Deliverable_MarlonDucille | R | false | false | 515 | r |
library(tidyverse)
library(dplyr)
pres_final_pros <- read.csv('pres_final_pros.csv')
data <- read.csv('actual_cost by practice_name.csv')
colnames(data) <- c('actual_cost', 'practice_name')
lookup <- pres_final_pros %>% filter(practice_name %in% data$practice_name)
df_aggr <- aggregate((lookup$value), list(lookup$practice_name), mean)
colnames(df_aggr) <- c('practice_name', 'mean')
shapiro.test(df_aggr$mean)
model <- aov(lookup$value ~ lookup$practice_name)
summary(model)
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Demo - interative plots - remove outliers"),
plotOutput("boxplot", brush = "plot_brush_"),
fixedRow(
column(width = 5, tags$b(tags$i("Actual Dataset")), tableOutput("data1")),
column(width = 5, tags$b(tags$i("Updated Dataset")), tableOutput("data2"))
)
))
| /Shiny/29_reomove_outliers/ui.R | permissive | Xiaozhu-Zhang1998/RDevCoursera | R | false | false | 422 | r | library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Demo - interative plots - remove outliers"),
plotOutput("boxplot", brush = "plot_brush_"),
fixedRow(
column(width = 5, tags$b(tags$i("Actual Dataset")), tableOutput("data1")),
column(width = 5, tags$b(tags$i("Updated Dataset")), tableOutput("data2"))
)
))
|
#creating ROckland report
#alene Onion
# April 2019
library(dplyr)
data<-read.csv("sections/data/data.backup.all.csv", stringsAsFactors=FALSE)
Rockland<-data[data$LAKE_ID=="1403BIG0345",]
Rockland<-unique(Rockland[c('LAKE_ID','SAMPLE_ID','SAMPLE_NAME','LOCATION_ID','DATA_PROVIDER','SAMPLE_DATE','TIME','START_DEPTH','END_DEPTH','Characteristic.Name','Result.Value','Result.Unit','Result.Sample.Fraction','Depth','WATER','LocationName','Y_Coordinate','X_Coordinate','INFO_TYPE')])
Rockland$SAMPLE_DATE<-as.Date(Rockland$SAMPLE_DATE,format="%Y-%m-%d")
profiles<- Rockland %>%
filter(Characteristic.Name %in% c('TEMPERATURE, WATER','PH','SPECIFIC CONDUCTANCE','DISSOLVED OXYGEN SATURATION','DISSOLVED OXYGEN (DO)','OXIDATION REDUCTION POTENTIAL (ORP)','DEPTH, SECCHI DISK DEPTH'))
profiles$INFO_TYPE<-"DP"
profiles$INFO_TYPE<-ifelse(profiles$Characteristic.Name=="DEPTH, SECCHI DISK DEPTH","SD",profiles$INFO_TYPE)
#add 2017 rockland secchi data
#secchi<-read.csv("sections/data/Rockland.secchi.2017.csv",stringsAsFactors = FALSE)
#secchi$SAMPLE_DATE<-as.Date(secchi$SAMPLE_DATE,format="%m/%d/%Y")
#profiles<-merge(profiles,secchi,all=TRUE)
#rm(secchi)
phosphorus<-Rockland %>%
filter(Characteristic.Name %in% c('PHOSPHORUS')) %>%
filter(DATA_PROVIDER %in% c('LCI','IL'))
phosphorus$START_DEPTH<-as.numeric(phosphorus$START_DEPTH)
titles<-Rockland$WATER[1]
rmarkdown::render("Lake.Report.rmd")
#scraps
junk<-unique(phosphorus[c('START_DEPTH','INFO_TYPE','DATA_PROVIDER','SAMPLE_NAME','SAMPLE_DATE')])
junk<-junk[order(junk$START_DEPTH),]
junk
| /Rockland.R | no_license | AleneOnion/LCIReports | R | false | false | 1,557 | r | #creating ROckland report
#alene Onion
# April 2019
library(dplyr)
data<-read.csv("sections/data/data.backup.all.csv", stringsAsFactors=FALSE)
Rockland<-data[data$LAKE_ID=="1403BIG0345",]
Rockland<-unique(Rockland[c('LAKE_ID','SAMPLE_ID','SAMPLE_NAME','LOCATION_ID','DATA_PROVIDER','SAMPLE_DATE','TIME','START_DEPTH','END_DEPTH','Characteristic.Name','Result.Value','Result.Unit','Result.Sample.Fraction','Depth','WATER','LocationName','Y_Coordinate','X_Coordinate','INFO_TYPE')])
Rockland$SAMPLE_DATE<-as.Date(Rockland$SAMPLE_DATE,format="%Y-%m-%d")
profiles<- Rockland %>%
filter(Characteristic.Name %in% c('TEMPERATURE, WATER','PH','SPECIFIC CONDUCTANCE','DISSOLVED OXYGEN SATURATION','DISSOLVED OXYGEN (DO)','OXIDATION REDUCTION POTENTIAL (ORP)','DEPTH, SECCHI DISK DEPTH'))
profiles$INFO_TYPE<-"DP"
profiles$INFO_TYPE<-ifelse(profiles$Characteristic.Name=="DEPTH, SECCHI DISK DEPTH","SD",profiles$INFO_TYPE)
#add 2017 rockland secchi data
#secchi<-read.csv("sections/data/Rockland.secchi.2017.csv",stringsAsFactors = FALSE)
#secchi$SAMPLE_DATE<-as.Date(secchi$SAMPLE_DATE,format="%m/%d/%Y")
#profiles<-merge(profiles,secchi,all=TRUE)
#rm(secchi)
phosphorus<-Rockland %>%
filter(Characteristic.Name %in% c('PHOSPHORUS')) %>%
filter(DATA_PROVIDER %in% c('LCI','IL'))
phosphorus$START_DEPTH<-as.numeric(phosphorus$START_DEPTH)
titles<-Rockland$WATER[1]
rmarkdown::render("Lake.Report.rmd")
#scraps
junk<-unique(phosphorus[c('START_DEPTH','INFO_TYPE','DATA_PROVIDER','SAMPLE_NAME','SAMPLE_DATE')])
junk<-junk[order(junk$START_DEPTH),]
junk
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eml_data_qa.R
\name{qa_attributes}
\alias{qa_attributes}
\title{Test congruence of attributes and data for a given dataset and dataTable}
\usage{
qa_attributes(node, dataTable, data, checkEnumeratedDomains = TRUE)
}
\arguments{
\item{node}{(MNode) Member Node where the PID is associated with an object.}
\item{dataTable}{(dataTable) EML \code{dataTable} or \code{otherEntity} associated with the data object.}
\item{data}{(data.frame) Data frame of data object.}
\item{checkEnumeratedDomains}{(logical) Default TRUE. If True, will match unique values in data to defined EML enumerated domains.}
}
\description{
This function is called by \code{\link{qa_package}} but can be used on its own to test congruence
between a dataTable and a data object (data.frame). See \code{\link{qa_package}} help documentation for more details.
}
\details{
Functions:
Names: Check that all column names in attributes match the column names in the csv
Possible conditions to account for:
- attributeList does not exist for a csv
- Physical has not been set correctly
- Some of the attributes that exist in the data don't exist in the attributeList
- Some of the attributes that exist in the attributeList don't exist in the data
- There is a typo in one of the attributes or column names so they don't match (maybe covered by above)
Domains: Check that all attribute types match attribute types in the csv
Possible conditions to account for:
- nominal, ordinal, integer, ratio, dateTime
- If domain is enumerated domain, not all enumerated values in the data are accounted for in the enumarated definition
- If domain is enumerated domain, not all enumerated values in the enumerated definition are actually represented in the data
- Type of data does not match type
Values: Check for accidental characters in the csv (one char in a column of ints)
}
\examples{
\dontrun{
# For a package, run QA checks on a data.frame and its associated EML dataTable.
qa_attributes(mn, dataTable, dataObject)
}
}
\author{
Emily O'Dean \email{eodean10@gmail.com}
}
| /man/qa_attributes.Rd | permissive | jagoldstein/datamgmt | R | false | true | 2,169 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eml_data_qa.R
\name{qa_attributes}
\alias{qa_attributes}
\title{Test congruence of attributes and data for a given dataset and dataTable}
\usage{
qa_attributes(node, dataTable, data, checkEnumeratedDomains = TRUE)
}
\arguments{
\item{node}{(MNode) Member Node where the PID is associated with an object.}
\item{dataTable}{(dataTable) EML \code{dataTable} or \code{otherEntity} associated with the data object.}
\item{data}{(data.frame) Data frame of data object.}
\item{checkEnumeratedDomains}{(logical) Default TRUE. If True, will match unique values in data to defined EML enumerated domains.}
}
\description{
This function is called by \code{\link{qa_package}} but can be used on its own to test congruence
between a dataTable and a data object (data.frame). See \code{\link{qa_package}} help documentation for more details.
}
\details{
Functions:
Names: Check that all column names in attributes match the column names in the csv
Possible conditions to account for:
- attributeList does not exist for a csv
- Physical has not been set correctly
- Some of the attributes that exist in the data don't exist in the attributeList
- Some of the attributes that exist in the attributeList don't exist in the data
- There is a typo in one of the attributes or column names so they don't match (maybe covered by above)
Domains: Check that all attribute types match attribute types in the csv
Possible conditions to account for:
- nominal, ordinal, integer, ratio, dateTime
- If domain is enumerated domain, not all enumerated values in the data are accounted for in the enumarated definition
- If domain is enumerated domain, not all enumerated values in the enumerated definition are actually represented in the data
- Type of data does not match type
Values: Check for accidental characters in the csv (one char in a column of ints)
}
\examples{
\dontrun{
# For a package, run QA checks on a data.frame and its associated EML dataTable.
qa_attributes(mn, dataTable, dataObject)
}
}
\author{
Emily O'Dean \email{eodean10@gmail.com}
}
|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(fig.width=7, fig.height = 5, fig.align = 'center', fig.show='hold',
warning=FALSE, message=FALSE, progress=FALSE, collapse=TRUE, comments="#>")
## ---- eval=FALSE---------------------------------------------------------
# install.packages('devtools')
# devtools::install_github('Keefe-Murphy/IMIFA')
## ---- eval=FALSE---------------------------------------------------------
# install.packages('IMIFA')
## ------------------------------------------------------------------------
library(IMIFA)
## ---- eval=FALSE---------------------------------------------------------
# # Simulate 100 observations from 3 balanced clusters with cluster-specific numbers of latent factors
# sim_data <- sim_IMIFA_data(N=100, G=3, P=20, Q=c(2, 2, 5),
# psi=matrix(rgamma(60, 2, 1), nrow=20, ncol=3),
# mu=matrix(rnorm(60, -2 + 1:3, 1), nrow=20, ncol=3, byrow=TRUE))
## ------------------------------------------------------------------------
data(olive)
## ---- eval=FALSE---------------------------------------------------------
# ?olive
## ---- eval=FALSE---------------------------------------------------------
# ?mcmc_IMIFA
## ---- eval=FALSE---------------------------------------------------------
# simMFA <- mcmc_IMIFA(olive, method="MFA", n.iters=10000, range.G=3:6, range.Q=0:3, centering=FALSE,
# scaling="unit", uni.type="isotropic", score.switch=FALSE)
## ---- eval=FALSE---------------------------------------------------------
# simMIFA <- mcmc_IMIFA(olive, method="MIFA", n.iters=10000, centering=TRUE,
# range.G=1:3, z.init="kmeans")
## ---- eval=FALSE---------------------------------------------------------
# simOMIFA <- mcmc_IMIFA(olive, method="OMIFA", n.iters=10000, range.G=10, alpha=0.8,
# alpha.d1=3.5, nu=3, alpha.d2=7, prop=0.6, epsilon=0.12)
## ---- eval=FALSE---------------------------------------------------------
# simIMIFA <- mcmc_IMIFA(olive, method="IMIFA", n.iters=50000, verbose=FALSE)
## ---- eval=FALSE---------------------------------------------------------
# resMFA <- get_IMIFA_results(simMFA)
## ---- eval=FALSE---------------------------------------------------------
# resMFA2 <- get_IMIFA_results(simMFA, G=3, criterion="aic.mcmc")
## ---- eval=FALSE---------------------------------------------------------
# resIMIFA <- get_IMIFA_results(simIMIFA, z.avgsim=TRUE)
## ---- include=FALSE------------------------------------------------------
load(file="res_olive_IMIFA__Edited-Vignette-only-Version.rda")
## ------------------------------------------------------------------------
summary(resIMIFA)
## ---- results='hide', eval=FALSE-----------------------------------------
# plot(resIMIFA, plot.meth="GQ")
## ---- results='hide', echo=FALSE-----------------------------------------
plot(resIMIFA, plot.meth="GQ", g=1)
## ---- results='hide', echo=FALSE-----------------------------------------
suppressWarnings(plot(resIMIFA, plot.meth="GQ", g=2))
## ---- results='hide', echo=FALSE-----------------------------------------
plot(resIMIFA, plot.meth="GQ", g=3)
## ------------------------------------------------------------------------
plot(resIMIFA, plot.meth="zlabels", zlabels=olive$area, g=1)
## ---- results="hide"-----------------------------------------------------
plot(resIMIFA, plot.meth="zlabels", zlabels=olive$area, g=2)
## ---- results="hide"-----------------------------------------------------
plot(resIMIFA, plot.meth="zlabels", g=4)
## ---- eval=FALSE---------------------------------------------------------
# plot(resIMIFA, plot.meth="zlabels", g=5)
## ---- results='hide', echo=FALSE-----------------------------------------
suppressMessages(plot(resIMIFA, plot.meth="zlabels", g=5))
## ------------------------------------------------------------------------
plot(resIMIFA, plot.meth="means", param="means", mat=TRUE, g=1)
## ---- eval=FALSE---------------------------------------------------------
# plot(resIMIFA, plot.meth="trace", param="scores", mat=TRUE, ind=1)
## ---- eval=FALSE---------------------------------------------------------
# plot(resIMIFA, plot.meth="trace", param="scores", mat=TRUE, by.fac=TRUE, fac=2)
## ------------------------------------------------------------------------
plot(resIMIFA, plot.meth="means", param="loadings", heat.map=TRUE, g=1)
## ------------------------------------------------------------------------
plot(resIMIFA, plot.meth="parallel.coords", param="uniquenesses")
## ------------------------------------------------------------------------
plot(resIMIFA, plot.meth="errors", g=1)
## ------------------------------------------------------------------------
plot(resIMIFA, plot.meth="errors", g=3)
## ---- fig.height=7-------------------------------------------------------
plot(resIMIFA, plot.meth="all", param="alpha")
## ---- fig.height=7-------------------------------------------------------
plot(resIMIFA, plot.meth="all", param="discount")
| /data/genthat_extracted_code/IMIFA/vignettes/IMIFA.R | no_license | surayaaramli/typeRrh | R | false | false | 5,272 | r | ## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(fig.width=7, fig.height = 5, fig.align = 'center', fig.show='hold',
warning=FALSE, message=FALSE, progress=FALSE, collapse=TRUE, comments="#>")
## ---- eval=FALSE---------------------------------------------------------
# install.packages('devtools')
# devtools::install_github('Keefe-Murphy/IMIFA')
## ---- eval=FALSE---------------------------------------------------------
# install.packages('IMIFA')
## ------------------------------------------------------------------------
library(IMIFA)
## ---- eval=FALSE---------------------------------------------------------
# # Simulate 100 observations from 3 balanced clusters with cluster-specific numbers of latent factors
# sim_data <- sim_IMIFA_data(N=100, G=3, P=20, Q=c(2, 2, 5),
# psi=matrix(rgamma(60, 2, 1), nrow=20, ncol=3),
# mu=matrix(rnorm(60, -2 + 1:3, 1), nrow=20, ncol=3, byrow=TRUE))
## ------------------------------------------------------------------------
data(olive)
## ---- eval=FALSE---------------------------------------------------------
# ?olive
## ---- eval=FALSE---------------------------------------------------------
# ?mcmc_IMIFA
## ---- eval=FALSE---------------------------------------------------------
# simMFA <- mcmc_IMIFA(olive, method="MFA", n.iters=10000, range.G=3:6, range.Q=0:3, centering=FALSE,
# scaling="unit", uni.type="isotropic", score.switch=FALSE)
## ---- eval=FALSE---------------------------------------------------------
# simMIFA <- mcmc_IMIFA(olive, method="MIFA", n.iters=10000, centering=TRUE,
# range.G=1:3, z.init="kmeans")
## ---- eval=FALSE---------------------------------------------------------
# simOMIFA <- mcmc_IMIFA(olive, method="OMIFA", n.iters=10000, range.G=10, alpha=0.8,
# alpha.d1=3.5, nu=3, alpha.d2=7, prop=0.6, epsilon=0.12)
## ---- eval=FALSE---------------------------------------------------------
# simIMIFA <- mcmc_IMIFA(olive, method="IMIFA", n.iters=50000, verbose=FALSE)
## ---- eval=FALSE---------------------------------------------------------
# resMFA <- get_IMIFA_results(simMFA)
## ---- eval=FALSE---------------------------------------------------------
# resMFA2 <- get_IMIFA_results(simMFA, G=3, criterion="aic.mcmc")
## ---- eval=FALSE---------------------------------------------------------
# resIMIFA <- get_IMIFA_results(simIMIFA, z.avgsim=TRUE)
## ---- include=FALSE------------------------------------------------------
load(file="res_olive_IMIFA__Edited-Vignette-only-Version.rda")
## ------------------------------------------------------------------------
summary(resIMIFA)
## ---- results='hide', eval=FALSE-----------------------------------------
# plot(resIMIFA, plot.meth="GQ")
## ---- results='hide', echo=FALSE-----------------------------------------
plot(resIMIFA, plot.meth="GQ", g=1)
## ---- results='hide', echo=FALSE-----------------------------------------
suppressWarnings(plot(resIMIFA, plot.meth="GQ", g=2))
## ---- results='hide', echo=FALSE-----------------------------------------
plot(resIMIFA, plot.meth="GQ", g=3)
## ------------------------------------------------------------------------
plot(resIMIFA, plot.meth="zlabels", zlabels=olive$area, g=1)
## ---- results="hide"-----------------------------------------------------
plot(resIMIFA, plot.meth="zlabels", zlabels=olive$area, g=2)
## ---- results="hide"-----------------------------------------------------
plot(resIMIFA, plot.meth="zlabels", g=4)
## ---- eval=FALSE---------------------------------------------------------
# plot(resIMIFA, plot.meth="zlabels", g=5)
## ---- results='hide', echo=FALSE-----------------------------------------
suppressMessages(plot(resIMIFA, plot.meth="zlabels", g=5))
## ------------------------------------------------------------------------
plot(resIMIFA, plot.meth="means", param="means", mat=TRUE, g=1)
## ---- eval=FALSE---------------------------------------------------------
# plot(resIMIFA, plot.meth="trace", param="scores", mat=TRUE, ind=1)
## ---- eval=FALSE---------------------------------------------------------
# plot(resIMIFA, plot.meth="trace", param="scores", mat=TRUE, by.fac=TRUE, fac=2)
## ------------------------------------------------------------------------
plot(resIMIFA, plot.meth="means", param="loadings", heat.map=TRUE, g=1)
## ------------------------------------------------------------------------
plot(resIMIFA, plot.meth="parallel.coords", param="uniquenesses")
## ------------------------------------------------------------------------
plot(resIMIFA, plot.meth="errors", g=1)
## ------------------------------------------------------------------------
plot(resIMIFA, plot.meth="errors", g=3)
## ---- fig.height=7-------------------------------------------------------
plot(resIMIFA, plot.meth="all", param="alpha")
## ---- fig.height=7-------------------------------------------------------
plot(resIMIFA, plot.meth="all", param="discount")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_date_variables.R
\name{add_date_variables}
\alias{add_date_variables}
\title{Add year, month, and day of year variable columns to daily flows}
\usage{
add_date_variables(data, dates = Date, station_number, water_year_start = 1)
}
\arguments{
\item{data}{Data frame of daily data that contains columns of dates, flow values, and (optional) groups (e.g. station numbers).
Leave blank or set to \code{NULL} if using \code{station_number} argument.}
\item{dates}{Name of column in \code{data} that contains dates formatted YYYY-MM-DD. Only required if dates column name is not
'Date' (default). Leave blank or set to \code{NULL} if using \code{station_number} argument.}
\item{station_number}{Character string vector of seven digit Water Survey of Canada station numbers (e.g. \code{"08NM116"}) of
which to extract daily streamflow data from a HYDAT database. Requires \code{tidyhydat} package and a HYDAT database.
Leave blank if using \code{data} argument.}
\item{water_year_start}{Numeric value indicating the month (\code{1} through \code{12}) of the start of water year for
analysis. Default \code{1}.}
}
\value{
A tibble data frame of the source data with additional columns:
\item{CalendarYear}{calendar year}
\item{Month}{numeric month (1 to 12)}
\item{MonthName}{month abbreviation (Jan-Dec)}
\item{WaterYear}{year starting from the selected month start, water_year_start}
\item{DayofYear}{day of the year from the selected month start (1-365 or 366)}
}
\description{
Add columns of CalendarYear (YYYY), Month (MM), MonthName (e.g. 'Jan'), WaterYear (YYYY), and DayofYear (1-365 or 366;
of WaterYear); to a data frame with a column of dates called 'Date'. Water years are designated by the year in which they end.
For example, Water Year 1999 (starting Oct) is from 1 Oct 1998 (DayofYear 1) to 30 Sep 1999 (DayofYear 365)).
}
\examples{
# Run if HYDAT database has been downloaded (using tidyhydat::download_hydat())
if (file.exists(tidyhydat::hy_downloaded_db())) {
# Add date variables using calendar years
add_date_variables(station_number = "08NM116")
# Add date variables using water years starting in August
add_date_variables(station_number = "08NM116",
water_year_start = 8)
}
}
| /man/add_date_variables.Rd | no_license | cran/fasstr | R | false | true | 2,386 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_date_variables.R
\name{add_date_variables}
\alias{add_date_variables}
\title{Add year, month, and day of year variable columns to daily flows}
\usage{
add_date_variables(data, dates = Date, station_number, water_year_start = 1)
}
\arguments{
\item{data}{Data frame of daily data that contains columns of dates, flow values, and (optional) groups (e.g. station numbers).
Leave blank or set to \code{NULL} if using \code{station_number} argument.}
\item{dates}{Name of column in \code{data} that contains dates formatted YYYY-MM-DD. Only required if dates column name is not
'Date' (default). Leave blank or set to \code{NULL} if using \code{station_number} argument.}
\item{station_number}{Character string vector of seven digit Water Survey of Canada station numbers (e.g. \code{"08NM116"}) of
which to extract daily streamflow data from a HYDAT database. Requires \code{tidyhydat} package and a HYDAT database.
Leave blank if using \code{data} argument.}
\item{water_year_start}{Numeric value indicating the month (\code{1} through \code{12}) of the start of water year for
analysis. Default \code{1}.}
}
\value{
A tibble data frame of the source data with additional columns:
\item{CalendarYear}{calendar year}
\item{Month}{numeric month (1 to 12)}
\item{MonthName}{month abbreviation (Jan-Dec)}
\item{WaterYear}{year starting from the selected month start, water_year_start}
\item{DayofYear}{day of the year from the selected month start (1-365 or 366)}
}
\description{
Add columns of CalendarYear (YYYY), Month (MM), MonthName (e.g. 'Jan'), WaterYear (YYYY), and DayofYear (1-365 or 366;
of WaterYear); to a data frame with a column of dates called 'Date'. Water years are designated by the year in which they end.
For example, Water Year 1999 (starting Oct) is from 1 Oct 1998 (DayofYear 1) to 30 Sep 1999 (DayofYear 365)).
}
\examples{
# Run if HYDAT database has been downloaded (using tidyhydat::download_hydat())
if (file.exists(tidyhydat::hy_downloaded_db())) {
# Add date variables using calendar years
add_date_variables(station_number = "08NM116")
# Add date variables using water years starting in August
add_date_variables(station_number = "08NM116",
water_year_start = 8)
}
}
|
testlist <- list(metric = 0L, vec = NULL, vec = NULL, w_vec = structure(0, .Dim = c(1L, 1L)), y_vec = structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = 9:8))
result <- do.call(UniIsoRegression:::reg_2d,testlist)
str(result) | /UniIsoRegression/inst/testfiles/reg_2d/libFuzzer_reg_2d/reg_2d_valgrind_files/1612737060-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 413 | r | testlist <- list(metric = 0L, vec = NULL, vec = NULL, w_vec = structure(0, .Dim = c(1L, 1L)), y_vec = structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = 9:8))
result <- do.call(UniIsoRegression:::reg_2d,testlist)
str(result) |
library(shiny)
shinyUI(fluidPage(
titlePanel("Text Prediction Model"),
sidebarLayout(
sidebarPanel(
h3("PREDICTION TEXT"),
textInput("txt", "Please enter the text below", value = "Write here"),
#submitButton("Submit")
),
mainPanel(
tabsetPanel(
tabPanel("Prediction",
textOutput("out1"),
textOutput("out2")
),
tabPanel("Probabilities", )
)
)
)
)
) | /ui.R | no_license | Rahulg13/UltimateProject | R | false | false | 744 | r | library(shiny)
shinyUI(fluidPage(
titlePanel("Text Prediction Model"),
sidebarLayout(
sidebarPanel(
h3("PREDICTION TEXT"),
textInput("txt", "Please enter the text below", value = "Write here"),
#submitButton("Submit")
),
mainPanel(
tabsetPanel(
tabPanel("Prediction",
textOutput("out1"),
textOutput("out2")
),
tabPanel("Probabilities", )
)
)
)
)
) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/describr.R
\name{describr}
\alias{describr}
\title{\code{describr} object}
\usage{
describr(df, by = NULL, theme_new = theme_default(), pvalues = FALSE,
totals = TRUE)
}
\arguments{
\item{df}{a data frame.}
\item{by}{unquoted name of optional stratifying factor}
\item{theme_new}{theme to use}
\item{pvalus}{Flag indicating whether p-values for homogeneity null hypothesis
should be printed if available.}
\item{total}{Flag indicating whether all descriptors should also be given
for the total sample.}
}
\value{
An object of class \code{describr < list} with fields:
\describe{
\item{\code{df}}{data frame to describe}
\item{\code{by}}{variable name by which to stratify the description
(character or NULL)}
\item{\code{core}}{named list with names corresponding
to the variables in \code{df} which will b described and eac element
is a list of \code{descriptor} objects osed to describe that variable.
TODO: rename!}
\item{\code{group_descriptors}}{list of descriptors used for the grouping
(stratifying) variable \code{by}.}
\item{\code{pvalues}}{Boolean flag indicating whether p-values should be
displayed}
\item{\code{totals}}{Boolean flag indicating whether a total column
should be included when the description is stratified by a grouping
factor.}
\item{\code{theme_new}}{The theme to use for plotting. TODO: rename}
}
}
\description{
This is the core object for \code{describr} package.
All information required to create a descriptive table for a data frame is
stored a \code{describr}-object.
}
| /man/describr.Rd | no_license | imbi-heidelberg/describr | R | false | true | 1,686 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/describr.R
\name{describr}
\alias{describr}
\title{\code{describr} object}
\usage{
describr(df, by = NULL, theme_new = theme_default(), pvalues = FALSE,
totals = TRUE)
}
\arguments{
\item{df}{a data frame.}
\item{by}{unquoted name of optional stratifying factor}
\item{theme_new}{theme to use}
\item{pvalus}{Flag indicating whether p-values for homogeneity null hypothesis
should be printed if available.}
\item{total}{Flag indicating whether all descriptors should also be given
for the total sample.}
}
\value{
An object of class \code{describr < list} with fields:
\describe{
\item{\code{df}}{data frame to describe}
\item{\code{by}}{variable name by which to stratify the description
(character or NULL)}
\item{\code{core}}{named list with names corresponding
to the variables in \code{df} which will b described and eac element
is a list of \code{descriptor} objects osed to describe that variable.
TODO: rename!}
\item{\code{group_descriptors}}{list of descriptors used for the grouping
(stratifying) variable \code{by}.}
\item{\code{pvalues}}{Boolean flag indicating whether p-values should be
displayed}
\item{\code{totals}}{Boolean flag indicating whether a total column
should be included when the description is stratified by a grouping
factor.}
\item{\code{theme_new}}{The theme to use for plotting. TODO: rename}
}
}
\description{
This is the core object for \code{describr} package.
All information required to create a descriptive table for a data frame is
stored a \code{describr}-object.
}
|
library(soundgen)
### Name: getCheckerboardKernel
### Title: Checkerboard kernel
### Aliases: getCheckerboardKernel
### Keywords: internal
### ** Examples
kernel = soundgen:::getCheckerboardKernel(size = 64, kernelSD = 0.2, plot = TRUE)
dim(kernel)
| /data/genthat_extracted_code/soundgen/examples/getCheckerboardKernel.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 256 | r | library(soundgen)
### Name: getCheckerboardKernel
### Title: Checkerboard kernel
### Aliases: getCheckerboardKernel
### Keywords: internal
### ** Examples
kernel = soundgen:::getCheckerboardKernel(size = 64, kernelSD = 0.2, plot = TRUE)
dim(kernel)
|
nest <- read.csv("C:/Users/hjp4906/Desktop/Exam 2/nest2.csv")
year <- nest$modayyr
year2 <- str_sub(string = year, start = 1, end=4)
head(year)
head(year2)
library(stringr)
nest$year <- year2
year2 <- as.Date(x = year2, format = '%Y')
colnames(nest)[1] <- "lon"
colnames(nest)[2] <- 'lat'
write.csv(nest, 'nest2.csv')
library('dplyr')
nest[nest == 0] <- NA
sums <- aggregate(nest$youngnum, by=list(year=nest$year), FUN=sum)
nest[is.na(nest)] <- 0
| /Exam2.R | no_license | hprev30/Exam2 | R | false | false | 450 | r | nest <- read.csv("C:/Users/hjp4906/Desktop/Exam 2/nest2.csv")
year <- nest$modayyr
year2 <- str_sub(string = year, start = 1, end=4)
head(year)
head(year2)
library(stringr)
nest$year <- year2
year2 <- as.Date(x = year2, format = '%Y')
colnames(nest)[1] <- "lon"
colnames(nest)[2] <- 'lat'
write.csv(nest, 'nest2.csv')
library('dplyr')
nest[nest == 0] <- NA
sums <- aggregate(nest$youngnum, by=list(year=nest$year), FUN=sum)
nest[is.na(nest)] <- 0
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536106886e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613109045-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 257 | r | testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536106886e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
#
# Copyright 2007-2021 by the individuals mentioned in the source code history
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
getAllModelNames <- function(model) {
ret <- c(model@name)
if (length(model@submodels)) ret <- c(ret, sapply(model@submodels, getAllModelNames))
ret
}
mxRename <- function(model, newname, oldname = NA) {
warnModelCreatedByOldVersion(model)
if( !is.character(newname)) {
stop("'newname' argument is not a character string")
}
if( !(is.na(oldname) || is.character(oldname))) {
stop("'oldname' argument is not either NA or character string")
}
imxVerifyName(newname, 0)
if (is.na(oldname)) {
oldname <- model@name
}
if (newname == oldname) return(model)
existing <- getAllModelNames(model)
if (newname %in% existing) {
stop(paste("There is already a model named", omxQuotes(newname)))
}
model <- propagateModelName(model, oldname, newname)
return(model)
}
propagateModelName <- function(model, oldname, newname) {
if(model@name == oldname) {
model@name <- newname
}
model@matrices <- lapply(model@matrices, renameMatrix, oldname, newname)
model@algebras <- lapply(model@algebras, renameAlgebra, oldname, newname)
model@constraints <- lapply(model@constraints, renameConstraint, oldname, newname)
model@intervals <- lapply(model@intervals, renameConfidenceIntervals, oldname, newname)
model@expectation <- genericExpRename(model@expectation, oldname, newname)
model@fitfunction <- genericFitRename(model@fitfunction, oldname, newname)
model@submodels <- lapply(model@submodels, propagateModelName, oldname, newname)
model@output <- renameModelOutput(model@output, oldname, newname)
names(model@intervals) <- imxExtractReferences(model@intervals)
names(model@submodels) <- imxExtractNames(model@submodels)
return(model)
}
renameReference <- function(reference, oldname, newname) {
if (length(reference) == 0) return(reference)
if (is.na(reference)) {
return(reference)
}
components <- unlist(strsplit(reference, imxSeparatorChar, fixed = TRUE))
if (length(components) == 2 && components[[1]] == oldname) {
return(paste(newname, components[[2]], sep = imxSeparatorChar))
} else if (length(components) == 1 && components[[1]] == oldname) {
# Don't have enough context to know whether this is a model name or not.
reference
} else {
reference
}
}
renameUnqualifiedReference <- function(reference, oldname, newname) {
if (length(reference) == 0) return(reference)
if (is.na(reference)) {
return(reference)
}
components <- unlist(strsplit(reference, imxSeparatorChar, fixed = TRUE))
if (length(components) == 2 && components[[1]] == oldname) {
return(paste(newname, components[[2]], sep = imxSeparatorChar))
} else if (length(components) == 1 && components[[1]] == oldname) {
newname # assume it's a model name
} else {
reference
}
}
renameModelOutput <- function(output, oldname, newname) {
if(is.null(output)) {
return(output)
}
if(!is.null(output$confidenceIntervals)) {
names <- dimnames(output$confidenceIntervals)
rownames <- lapply(names[[1]], renameReference, oldname, newname)
dimnames(output$confidenceIntervals) <- list(rownames, names[[2]])
}
if(!is.null(output$confidenceIntervalCodes)) {
names <- dimnames(output$confidenceIntervalCodes)
rownames <- lapply(names[[1]], renameReference, oldname, newname)
dimnames(output$confidenceIntervalCodes) <- list(rownames, names[[2]])
}
return(output)
}
renameMatrix <- function(matrix, oldname, newname) {
matrix@labels <- apply(matrix@labels, c(1,2), renameReference, oldname, newname)
return(matrix)
}
renameConstraint <- function(constraint, oldname, newname) {
constraint@name <- renameReference(constraint@name, oldname, newname)
constraint@formula <- renameFormula(constraint@formula, oldname, newname)
constraint@alg1 <- renameReference(constraint@alg1, oldname, newname)
constraint@alg2 <- renameReference(constraint@alg2, oldname, newname)
return(constraint)
}
renameSymbol <- function(symbol, oldname, newname) {
if (is.numeric(symbol) || is.character(symbol)) {
return(symbol)
} else {
return(as.symbol(renameReference(as.character(symbol), oldname, newname)))
}
}
renameAlgebra <- function(algebra, oldname, newname) {
algebra@formula <- renameFormula(algebra@formula, oldname, newname)
return(algebra)
}
renameConfidenceIntervals <- function(interval, oldname, newname) {
interval@reference <- renameReference(interval@reference, oldname, newname)
return(interval)
}
renameFormula <- function(formula, oldname, newname) {
len <- length(formula)
if (len == 0) {
stop("mxRename has reached an invalid state")
} else if (len == 1) {
formula <- renameSymbol(formula, oldname, newname)
} else {
tail <- formula[-1]
select <- sapply(tail, function(x) { !identical(as.character(x), '') })
tail[select] <- lapply(tail[select], renameFormula, oldname, newname)
formula[-1] <- tail
}
return(formula)
}
| /R/MxRename.R | no_license | OpenMx/OpenMx | R | false | false | 5,451 | r | #
# Copyright 2007-2021 by the individuals mentioned in the source code history
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
getAllModelNames <- function(model) {
ret <- c(model@name)
if (length(model@submodels)) ret <- c(ret, sapply(model@submodels, getAllModelNames))
ret
}
mxRename <- function(model, newname, oldname = NA) {
warnModelCreatedByOldVersion(model)
if( !is.character(newname)) {
stop("'newname' argument is not a character string")
}
if( !(is.na(oldname) || is.character(oldname))) {
stop("'oldname' argument is not either NA or character string")
}
imxVerifyName(newname, 0)
if (is.na(oldname)) {
oldname <- model@name
}
if (newname == oldname) return(model)
existing <- getAllModelNames(model)
if (newname %in% existing) {
stop(paste("There is already a model named", omxQuotes(newname)))
}
model <- propagateModelName(model, oldname, newname)
return(model)
}
propagateModelName <- function(model, oldname, newname) {
if(model@name == oldname) {
model@name <- newname
}
model@matrices <- lapply(model@matrices, renameMatrix, oldname, newname)
model@algebras <- lapply(model@algebras, renameAlgebra, oldname, newname)
model@constraints <- lapply(model@constraints, renameConstraint, oldname, newname)
model@intervals <- lapply(model@intervals, renameConfidenceIntervals, oldname, newname)
model@expectation <- genericExpRename(model@expectation, oldname, newname)
model@fitfunction <- genericFitRename(model@fitfunction, oldname, newname)
model@submodels <- lapply(model@submodels, propagateModelName, oldname, newname)
model@output <- renameModelOutput(model@output, oldname, newname)
names(model@intervals) <- imxExtractReferences(model@intervals)
names(model@submodels) <- imxExtractNames(model@submodels)
return(model)
}
renameReference <- function(reference, oldname, newname) {
if (length(reference) == 0) return(reference)
if (is.na(reference)) {
return(reference)
}
components <- unlist(strsplit(reference, imxSeparatorChar, fixed = TRUE))
if (length(components) == 2 && components[[1]] == oldname) {
return(paste(newname, components[[2]], sep = imxSeparatorChar))
} else if (length(components) == 1 && components[[1]] == oldname) {
# Don't have enough context to know whether this is a model name or not.
reference
} else {
reference
}
}
renameUnqualifiedReference <- function(reference, oldname, newname) {
if (length(reference) == 0) return(reference)
if (is.na(reference)) {
return(reference)
}
components <- unlist(strsplit(reference, imxSeparatorChar, fixed = TRUE))
if (length(components) == 2 && components[[1]] == oldname) {
return(paste(newname, components[[2]], sep = imxSeparatorChar))
} else if (length(components) == 1 && components[[1]] == oldname) {
newname # assume it's a model name
} else {
reference
}
}
renameModelOutput <- function(output, oldname, newname) {
if(is.null(output)) {
return(output)
}
if(!is.null(output$confidenceIntervals)) {
names <- dimnames(output$confidenceIntervals)
rownames <- lapply(names[[1]], renameReference, oldname, newname)
dimnames(output$confidenceIntervals) <- list(rownames, names[[2]])
}
if(!is.null(output$confidenceIntervalCodes)) {
names <- dimnames(output$confidenceIntervalCodes)
rownames <- lapply(names[[1]], renameReference, oldname, newname)
dimnames(output$confidenceIntervalCodes) <- list(rownames, names[[2]])
}
return(output)
}
renameMatrix <- function(matrix, oldname, newname) {
matrix@labels <- apply(matrix@labels, c(1,2), renameReference, oldname, newname)
return(matrix)
}
renameConstraint <- function(constraint, oldname, newname) {
constraint@name <- renameReference(constraint@name, oldname, newname)
constraint@formula <- renameFormula(constraint@formula, oldname, newname)
constraint@alg1 <- renameReference(constraint@alg1, oldname, newname)
constraint@alg2 <- renameReference(constraint@alg2, oldname, newname)
return(constraint)
}
renameSymbol <- function(symbol, oldname, newname) {
if (is.numeric(symbol) || is.character(symbol)) {
return(symbol)
} else {
return(as.symbol(renameReference(as.character(symbol), oldname, newname)))
}
}
renameAlgebra <- function(algebra, oldname, newname) {
algebra@formula <- renameFormula(algebra@formula, oldname, newname)
return(algebra)
}
renameConfidenceIntervals <- function(interval, oldname, newname) {
interval@reference <- renameReference(interval@reference, oldname, newname)
return(interval)
}
renameFormula <- function(formula, oldname, newname) {
len <- length(formula)
if (len == 0) {
stop("mxRename has reached an invalid state")
} else if (len == 1) {
formula <- renameSymbol(formula, oldname, newname)
} else {
tail <- formula[-1]
select <- sapply(tail, function(x) { !identical(as.character(x), '') })
tail[select] <- lapply(tail[select], renameFormula, oldname, newname)
formula[-1] <- tail
}
return(formula)
}
|
ram_local = function( returnvalue="ram", ram_main=NULL, ram_process=NULL ) {
os = Sys.info()[["sysname"]]
if (os == "Windows"){
x = try( memory.size(max = FALSE))
if ( "try-error" %in% class(x) ) {
x <- system2("wmic", args = "OS get FreePhysicalMemory /Value", stdout = TRUE)
x <- x[grepl("FreePhysicalMemory", x)]
x <- gsub("FreePhysicalMemory=", "", x, fixed = TRUE)
x <- gsub("\r", "", x, fixed = TRUE)
out = as.integer(x)
}
}
if (os=="Linux") {
x = system2('free', args='-g', stdout=TRUE)
y = x[ grepl("total", x) ]
y = unlist( strsplit( y, "[[:space:]]+") )
y[1] = "dummy"
z = x[ grepl("Mem", x) ]
z = unlist( strsplit( z, "[[:space:]]+") )
z[1] = NA
z = as.numeric(z)
names(z) = y
out = z["total"]
}
if (returnvalue=="ram") return (out)
if (returnvalue=="ncores") {
ncores_total = parallel::detectCores()
ncores_required = floor( (out - ram_main) / ram_process )
return( min( ncores_total, ncores_required ) )
}
}
| /R/ram_local.R | permissive | PEDsnowcrab/aegis | R | false | false | 1,041 | r | ram_local = function( returnvalue="ram", ram_main=NULL, ram_process=NULL ) {
os = Sys.info()[["sysname"]]
if (os == "Windows"){
x = try( memory.size(max = FALSE))
if ( "try-error" %in% class(x) ) {
x <- system2("wmic", args = "OS get FreePhysicalMemory /Value", stdout = TRUE)
x <- x[grepl("FreePhysicalMemory", x)]
x <- gsub("FreePhysicalMemory=", "", x, fixed = TRUE)
x <- gsub("\r", "", x, fixed = TRUE)
out = as.integer(x)
}
}
if (os=="Linux") {
x = system2('free', args='-g', stdout=TRUE)
y = x[ grepl("total", x) ]
y = unlist( strsplit( y, "[[:space:]]+") )
y[1] = "dummy"
z = x[ grepl("Mem", x) ]
z = unlist( strsplit( z, "[[:space:]]+") )
z[1] = NA
z = as.numeric(z)
names(z) = y
out = z["total"]
}
if (returnvalue=="ram") return (out)
if (returnvalue=="ncores") {
ncores_total = parallel::detectCores()
ncores_required = floor( (out - ram_main) / ram_process )
return( min( ncores_total, ncores_required ) )
}
}
|
/16-Metodologia Box-Jenkings.R | no_license | richardsilva1/aula-16 | R | false | false | 5,624 | r | ||
setwd("~/Desktop")
alldogs<-read.table("~/Desktop/allbouts.txt",header=TRUE,sep="\t",stringsAsFactors = FALSE)
alldogs$LocalDate<-as.Date(alldogs$LocalDate,format="%d/%m/%Y")
alldogs$LocalTime<-times(alldogs$LocalTime,format="h:m:s")
alldogs$StartDate<-as.Date(alldogs$StartDate,format="%d/%m/%Y")
alldogs$StartTime<-times(alldogs$StartTime,format="h:m:s")
alldogs$SumActivity<-as.numeric(alldogs$SumActivity)
alldogs$Duration<-as.numeric(alldogs$Duration)
alldogs$StopDate<-as.Date(alldogs$StopDate,format="%d/%m/%Y")
alldogs$StopTime<-times(alldogs$StopTime,format="h:m:s")
alldogs$TimeToNextBout<-as.numeric(alldogs$TimeToNextBout)
hist(alldogs$Duration,breaks=100, xlim=c(1,100))
hist(alldogs$SumActivity,breaks=1000,xlim=c(1,200))
dur<-subset(alldogs$Duration, alldogs$Duration>20)
hist(dur)
sumact<-subset(alldogs$SumActivity, alldogs$SumActivity>50)
hist(sumact)
longbouts<-alldogs[(alldogs$Duration)>20, ]
hist(longbouts$StartTime,breaks=1440)
bigbouts50<-alldogs[(alldogs$SumActivity)>50, ]
hist(bigbouts50$StartTime)
hist(bigbouts50$StartTime,breaks=1440)
biglong<-bigbouts50[(bigbouts50$Duration)>20,]
hist(biglong$StartTime,breaks=1440)
bigbouts100<-alldogs[(alldogs$SumActivity)>100, ]
biglong100<-bigbouts100[(bigbouts100$Duration)>20,]
hist(biglong100$StartTime,breaks=1440)
bigbouts200<-alldogs[(alldogs$SumActivity)>200, ]
biglong200<-bigbouts200[(bigbouts200$Duration)>20,]
hist(biglong200$StartTime,breaks=1440)
bigbouts500<-alldogs[(alldogs$SumActivity)>500, ]
biglong500<-bigbouts500[(bigbouts500$Duration)>20,]
hist(biglong500$StartTime,breaks=1440)
bigbouts1000<-alldogs[(alldogs$SumActivity)>1000, ]
biglong1000<-bigbouts1000[(bigbouts1000$Duration)>20,]
hist(biglong1000$StartTime,breaks=1440)
# choose to use this one - duration>20 and sumact>1000
# gets rid of tail before second peak and can be more precise when isolating peaks
| /Archive/biglongbouts.R | no_license | Dani2107/Ben | R | false | false | 1,870 | r | setwd("~/Desktop")
alldogs<-read.table("~/Desktop/allbouts.txt",header=TRUE,sep="\t",stringsAsFactors = FALSE)
alldogs$LocalDate<-as.Date(alldogs$LocalDate,format="%d/%m/%Y")
alldogs$LocalTime<-times(alldogs$LocalTime,format="h:m:s")
alldogs$StartDate<-as.Date(alldogs$StartDate,format="%d/%m/%Y")
alldogs$StartTime<-times(alldogs$StartTime,format="h:m:s")
alldogs$SumActivity<-as.numeric(alldogs$SumActivity)
alldogs$Duration<-as.numeric(alldogs$Duration)
alldogs$StopDate<-as.Date(alldogs$StopDate,format="%d/%m/%Y")
alldogs$StopTime<-times(alldogs$StopTime,format="h:m:s")
alldogs$TimeToNextBout<-as.numeric(alldogs$TimeToNextBout)
hist(alldogs$Duration,breaks=100, xlim=c(1,100))
hist(alldogs$SumActivity,breaks=1000,xlim=c(1,200))
dur<-subset(alldogs$Duration, alldogs$Duration>20)
hist(dur)
sumact<-subset(alldogs$SumActivity, alldogs$SumActivity>50)
hist(sumact)
longbouts<-alldogs[(alldogs$Duration)>20, ]
hist(longbouts$StartTime,breaks=1440)
bigbouts50<-alldogs[(alldogs$SumActivity)>50, ]
hist(bigbouts50$StartTime)
hist(bigbouts50$StartTime,breaks=1440)
biglong<-bigbouts50[(bigbouts50$Duration)>20,]
hist(biglong$StartTime,breaks=1440)
bigbouts100<-alldogs[(alldogs$SumActivity)>100, ]
biglong100<-bigbouts100[(bigbouts100$Duration)>20,]
hist(biglong100$StartTime,breaks=1440)
bigbouts200<-alldogs[(alldogs$SumActivity)>200, ]
biglong200<-bigbouts200[(bigbouts200$Duration)>20,]
hist(biglong200$StartTime,breaks=1440)
bigbouts500<-alldogs[(alldogs$SumActivity)>500, ]
biglong500<-bigbouts500[(bigbouts500$Duration)>20,]
hist(biglong500$StartTime,breaks=1440)
bigbouts1000<-alldogs[(alldogs$SumActivity)>1000, ]
biglong1000<-bigbouts1000[(bigbouts1000$Duration)>20,]
hist(biglong1000$StartTime,breaks=1440)
# choose to use this one - duration>20 and sumact>1000
# gets rid of tail before second peak and can be more precise when isolating peaks
|
### Group Project - Maxime's code
#library
library(readxl)
library(data.table)
library(ggplot2)
library(plotly)
library(moments)
library(corrplot)
library(PerformanceAnalytics)
library(rsq)
library(psych)
source("http://www.sthda.com/upload/rquery_cormat.r")
#convert into datframe
alloy <- as.data.frame(Alloy_working_data)
#summary of datframe
summary(alloy)
#rename columns
names(alloy) <- c("week", "visits","unique","pageviews","pagevisits","avg_time","bounce","newvisits","revenue","profit","lbs_sold","inquiries")
#regression
all_f <- glm(unique ~ visits+pageviews+pagevisits+avg_time+bounce+newvisits+revenue+profit+lbs_sold+inquiries, data=alloy)
summary(all_f)
#Scatter matrix
pairs(~revenue+profit+pageviews+inquiries,data=alloy,
main="Simple Scatterplot Matrix")
#Scatter plot Q5 using Car package
library(car)
scatterplot(revenue ~ lbs_sold, data=alloy)
#Scatter plot Q5 using GGPLOT2
#revenue vs. lbs_sold
library(ggplot2)
ggplot(alloy, aes(x=lbs_sold, y=revenue)) +
geom_point()+
geom_smooth(method=lm)
#Correl
cor(revenue,lbs_sold)
#Scatter plot Q6 using GGPLOT2
#revenue vs. visits
ggplot(alloy, aes(x=visits, y=revenue)) +
geom_point()+
geom_smooth(method=lm)
#Correl
attach(alloy)
cor(revenue,visits)
#Import weekly lbs. sold
alloy_2_df <- as.data.frame(alloy_2)
#Summary values Q8
summary(alloy_2_df$lbs_sold2)
#Histogram Q8
ggplot(alloy_2_df, aes(x=alloy_2_df$lbs_sold2)) + geom_histogram(bins = 40)
#Empirical rule check
the_mean <- mean(alloy_2_df$lbs_sold2)
the_sd <- sd(alloy_2_df$lbs_sold2)
#Theoritical observation
lbs_obs <- length(alloy_2_df$lbs_sold2)
t1 <- round(lbs_obs * 0.68)
t2 <- round(lbs_obs * 0.95)
t3 <- round(lbs_obs * 0.99)
#Actual values calculation
lower_bounds <- the_mean - 1:3*the_sd
upper_bounds <- the_mean + 1:3*the_sd
one_sd <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 > lower_bounds[1] & alloy_2_df$lbs_sold2 < upper_bounds[1])])
two_sd <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 > lower_bounds[2] & alloy_2_df$lbs_sold2 < upper_bounds[2])])
three_sd <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 > lower_bounds[3] & alloy_2_df$lbs_sold2 < upper_bounds[3])])
#Comparison table
library(data.table)
data.table(interval = c("mean ± 1 std. dev","mean ± 2 std. dev","mean ± 3 std. dev"),
Theoretical_Perc_of_Data = c("68", "95","99"),
Theoretical_No_Obs = c(t1, t2, t3),
Actual_No_Obs = c(one_sd, two_sd, three_sd))
#Actual values with ups and lows
one_sd_up <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 < the_mean + the_sd
& alloy_2_df$lbs_sold2 > the_mean)])
one_sd_low <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 < the_mean - the_sd
& alloy_2_df$lbs_sold2 > the_mean)])
two_sd_up <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 < the_mean + 2*the_sd
& alloy_2_df$lbs_sold2 > the_mean + the_sd)])
two_sd_low <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 < the_mean - 2*the_sd
& alloy_2_df$lbs_sold2 > the_mean - the_sd)])
three_sd_up <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 < the_mean + 3*the_sd
& alloy_2_df$lbs_sold2 > the_mean + 2*the_sd)])
three_sd_low <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 < the_mean - 3*the_sd
& alloy_2_df$lbs_sold2 > the_mean - 2*the_sd)])
#Comparison table
data.table(interval = c("mean + 1 std. dev","mean - 1 std. dev",
"mean + 2 std. dev","mean - 2 std. dev",
"mean + 3 std. dev","mean - 3 std. dev"),
Theoretical_Perc_of_Data = c("34","34", "13.5","13.5","2","2"),
Theoretical_No_Obs = c(t1/2, t1/2,
(t2-t1)/2, (t2-t1)/2,
(t3-t2)/2, (t3-t2)/2),
Actual_No_Obs = c(one_sd_up,one_sd_low,
two_sd_up,two_sd_low,
three_sd_up,three_sd_low))
library(moments)
skewness(alloy_2_df$lbs_sold2)
kurtosis(alloy_2_df$lbs_sold2)
### Extra analysis ###
#Heatmaps
round(cor(alloy[,c(2,3,4,5,6,7,8,9,10,11)]),2)
#Visual heatmaps
rquery.cormat(alloy[,c(2,3,4,5,6,7,8,9,10,11)], type = "full")
chart.Correlation(alloy[,c(2,3,4,5,6,7,8,9,10,11)], histogram=TRUE, pch=19)
pairs.panels(alloy[,c(2,3,4,5,6,7,8,9,10,11)])
heatmap(x = cor(alloy[,c(2,3,4,5,6,7,8,9,10,11)]), symm = TRUE)
###Other Graphs + Regression
#Profit per week
ggplot(alloy, aes( week, profit)) +
geom_bar(stat="identity", width = 0.5, fill="green") +
labs(title="Profit Per Week") +
theme(axis.text.x = element_text(angle=90, vjust=0.8))
#Visits per week
ggplot(alloy, aes(week, visits)) +
geom_bar(stat="identity", width = 0.5, fill="blue") +
labs(title="Unique Visit Per Week") +
theme(axis.text.x = element_text(angle=90, vjust=0.8))
#Revenue per week
ggplot(alloy, aes(week, revenue))+
geom_bar(stat="identity", width = 0.5, fill="tomato2") +
labs(title="Revenue Per Week") +
theme(axis.text.x = element_text(angle=90, vjust=0.8))
#Bounce rate per week
ggplot(alloy, aes(week, bounce)) +
geom_bar(stat="identity", width = 0.5, fill="blue") +
labs(title="Unique Visit Per Week") +
theme(axis.text.x = element_text(angle=90, vjust=0.8))
#Regression of Inquiries vs Revenue
ggplot(alloy, aes(x=inquiries, y=revenue))+
geom_point()+stat_smooth(method=lm)
alloy <- as.data.frame(Alloy_working_data)
summary(alloy)
names(alloy) <- c("week", "visits","unique","pageviews","pagevisits","avg_time","bounce","newvisits","revenue","profit","lbs_sold","inquiries")
all_f <- glm(Visits ~ Revenue+Profit+Pageviews+Inquiries, data=alloy)
summary(all_f)
library(ggplot2)
attach(alloy)
ggplot(alloy, aes(week, unique,group=4)) +
geom_line(color = 'cyan') +
geom_area(fill = 'cyan', alpha = .3) +
labs(x = 'Week'
, y = 'Unique'
, title = "Unique Visit per week") +
scale_x_discrete(breaks = levels(alloy$week)[5])+
theme(axis.text.x = element_text(angle=90, vjust=0.8))+
theme(text = element_text(family = 'bold.italic', color = "#444444")
,panel.background = element_rect(fill = '#444B5A')
,panel.grid.minor = element_line(color = '#4d5566')
,panel.grid.major = element_line(color = '#586174')
,plot.title = element_text(size = 28)
,axis.title = element_text(size = 18, color = '#555555')
,axis.title.y = element_text(vjust = 1, angle = 0)
,axis.title.x = element_text(hjust = 0)
)
ggplot(alloy, aes(week, revenue,group=4)) +
geom_line(color = 'green') +
geom_area(fill = 'green', alpha = .3) +
labs(x = 'Week'
, y = 'Revenue'
, title = "Revenue per week") +
scale_x_discrete(breaks = levels(alloy$week)[5])+
theme(axis.text.x = element_text(angle=90, vjust=0.8))+
theme(text = element_text(family = 'bold.italic', color = "#444444")
,panel.background = element_rect(fill = '#444B5A')
,panel.grid.minor = element_line(color = '#4d5566')
,panel.grid.major = element_line(color = '#586174')
,plot.title = element_text(size = 28)
,axis.title = element_text(size = 18, color = '#555555')
,axis.title.y = element_text(vjust = 1, angle = 0)
,axis.title.x = element_text(hjust = 0)
)
library(ggplot2)
#imoport different tab in same excel sheet
weeklyvisits<-read_excel("Alloy - working data.xls",sheet=1,col_names = TRUE,range = "A5:L67")
head(weeklyvisits)
lbssold <- read_excel("Alloy - working data.xls",sheet=2,col_names = TRUE,range ="A5:B295")
head(lbssold)
dailyvisits<-read_excel("Alloy - working data.xls",sheet=3,col_names = TRUE,range ="A5:B2467")
head(dailyvisits)
referingsites<-read_excel("Alloy - working data.xls",sheet=4,col_names = TRUE,range ="B7:C11")
head(referingsites)
leadssites<-read_excel("Alloy - working data.xls",sheet=4,col_names = TRUE,range ="B14:C24")
head(leadssites)
searchengine<-read_excel("Alloy - working data.xls",sheet=4,col_names = TRUE,range ="B27:C37")
head(searchengine)
geo<-read_excel("Alloy - working data.xls",sheet=4,col_names = TRUE,range ="B40:C50")
head(geo)
browsers<-read_excel("Alloy - working data.xls",sheet=4,col_names = TRUE,range ="B54:C64")
head(browsers)
operatingsystem<-read_excel("Alloy - working data.xls",sheet=4,col_names = TRUE,range ="B68:C78")
head(operatingsystem)
#Q5
# Change the point size, and shape
ggplot(alloy, aes(x=lbs_sold, y=revenue)) +
geom_point(size=1.2, color='black', shape=23)+geom_smooth(method=lm, se=FALSE)
#Q6
ggplot(alloy, aes(x=visits, y=revenue)) +
geom_point(size=1.2, color='brown', shape=23)+geom_smooth(method=lm, se=FALSE)
#Q7
attach(alloy)
cor_1 <- cor(revenue,visits, method = "pearson")
summary(cor_1)
#Q8
#a)
alloy_2 <- as.data.frame(alloy_2)
summary(alloy_2$lbs_sold2)
#b)
library(ggplot2)
h<-ggplot(alloy_2, aes(x=lbs_sold2)) +
geom_histogram(color="black", fill="white")
h
#c)
the.mean = mean(alloy_2$lbs_sold2)
the.sd = sd(alloy_2$lbs_sold2)
lower.bounds = the.mean - 1:3*the.sd
upper.bounds = the.mean + 1:3*the.sd
one.sd = (alloy_2$lbs_sold2 > lower.bounds[1] & alloy_2$lbs_sold2 < upper.bounds[1])
two.sd = mean(alloy_2$lbs_sold2 > lower.bounds[2] & alloy_2$lbs_sold2 < upper.bounds[2])
three.sd = mean(alloy_2$lbs_sold2 > lower.bounds[3] & alloy_2$lbs_sold2 < upper.bounds[3])
#Lead_sites graph
g<- ggplot(leadssites,aes(x=leadssites$...1, y=Lead_sites$Visits, fill=leadssites$...1)) +
geom_bar(stat="identity")+theme_minimal() + guides(fill = guide_legend(reverse=TRUE))+
xlab("") + ylab("") +
ggtitle("Visit Per Leads") +labs(fill = "Leads") + scale_x_discrete(breaks = levels(leadssites$...1)[5])+
theme(plot.title = element_text(color="grey", size=14, face="bold.italic"),
axis.text.x = element_text(angle=70, vjust = 0.8))
g
#Geographical_frequency
h<-ggplot(geo,aes(x=Geographical_frequency$`Geographical Frequency`, y=Geographical_frequency$Visits, fill=Geographical_frequency$`Geographical Frequency`)) +
geom_bar(stat="identity")+theme_minimal()+ guides(fill = guide_legend(reverse=TRUE)) +
xlab("") + ylab("") +ggtitle("Visit per Geo") + labs(fill = "Geo")+
theme(plot.title = element_text(color="grey", size=14, face="bold.italic"),
axis.text.x = element_text(angle=90, vjust=0.8))
h
#Search Engine _Frequency
i<-ggplot(searchengine,aes(x=Website_Frequency$Website, y=Website_Frequency$Visits, fill=Website_Frequency$Website)) +
geom_bar(stat="identity")+theme_minimal()+ guides(fill = guide_legend(reverse=TRUE)) +
xlab("") + ylab("") +ggtitle("Visit per Top 10 Web") + labs(fill = "Website")+
theme(plot.title = element_text(color="grey", size=14, face="bold.italic"),
axis.text.x = element_text(angle=90, vjust=0.8))
i
# Browser_Distribution
j<-ggplot(browsers,aes(x=Browser_Distribution$`Browser Distribution`, y=Browser_Distribution$Visits, fill=Browser_Distribution$`Browser Distribution`)) +
geom_bar(stat="identity")+theme_minimal()+ guides(fill = guide_legend(reverse=TRUE)) +
xlab("") + ylab("") +ggtitle("Visit per Browser") + labs(fill = "Browser")+
theme(plot.title = element_text(color="grey", size=14, face="bold.italic"),
axis.text.x = element_text(angle=-90, vjust=0.5,hjust=0))
j
#OS_Distrubution
k<-ggplot(operatingsystem,aes(x=OS_Distribution$`OS Distribution`, y=Visits, fill=OS_Distribution$`OS Distribution`)) +
geom_bar(stat="identity")+theme_minimal() +
xlab("") + ylab("") +ggtitle("Visit Per Device") + guides(fill = guide_legend(reverse=TRUE)) + labs(fill = "OS")+
theme(plot.title = element_text(color="grey", size=14, face="bold.italic"),
axis.text.x = element_text(angle=90, vjust=0.8))
k
# Visits Source
l<-ggplot(referingsites,aes(x=referingsites$...1, y=Visits, fill=referingsites$...1)) +
geom_bar(stat="identity")+theme_minimal() + guides(fill = guide_legend(reverse=TRUE)) +
xlab("") + ylab("") + ggtitle("Source of Visits") + labs(fill = "Visits") + scale_x_discrete(breaks = levels(referingsites$...1)[5])
theme(plot.title = element_text(color="grey", size=14, face="bold.italic"),
axis.text.x = element_text(angle=90, vjust=0.8))
l
#Dapo file
#install the packages if necessary
if(!require("tidyverse")) install.packages("tidyverse")
if(!require("fs")) install.packages("fs")
if(!require("readxl")) install.packages("readxl")
#load packages
library(tidyverse)
library(fs)
library(readxl)
library(ggplot2)
#import weekly visit sheet
weekly<-read_excel("Web Analytics Case Student Spreadsheet-2.xls",sheet=2,col_names = TRUE,range = "A5:H71")
summary(weekly)
#import Financial sheet
financial<-read_excel("Web Analytics Case Student Spreadsheet-2.xls",sheet=3,col_names = TRUE,range = "A5:E71")
summary(financial)
#merging the sheets
mydf<-merge(weekly,financial,sort=FALSE)
mydf
summary(mydf)
head(mydf)
tail(mydf)
#importing daily visit sheet
dailyvisits<-read_excel("Web Analytics Case Student Spreadsheet-2.xls",sheet=5,col_names = TRUE,range = "A5:B467")
head(dailyvisits)
#Adding a new column (cost=Revenue-Profit) to mydf
mydf$Revenue
mydf$Profit
for (i in 1:nrow(mydf)){
mydf$cost[i] <- mydf$Revenue[i]-mydf$Profit[i]
}
head(mydf)
#subsetting the Weekly means for the time periods
time_period<-c("May 25 to Aug 30, 2008","Aug 21 to Jan 24, 2009", "Jan 25 to May 23, 2009", "May 24 to Aug 29, 2009")
mydf_visits<-c((mean(mydf$Visits[1:14])),(mean(mydf$Visits[15:35])),(mean(mydf$Visits[36:52])),(mean(mydf$Visits[53:66])))
mydf_uniquevisits<-c((mean(mydf$`Unique Visits`[1:14])),(mean(mydf$`Unique Visits`[15:35])),(mean(mydf$`Unique Visits`[36:52])),(mean(mydf$`Unique Visits`[53:66])))
mydf_pageviews<-c((mean(mydf$Pageviews[1:14])),(mean(mydf$Pageviews[15:35])),(mean(mydf$Pageviews[36:52])),(mean(mydf$Pageviews[53:66])))
mydf_pagespervisit<-c((mean(mydf$`Pages/Visit`[1:14])),(mean(mydf$`Pages/Visit`[15:35])),(mean(mydf$`Pages/Visit`[36:52])),(mean(mydf$`Pages/Visit`[53:66])))
mydf_timeonsite<-c((mean(mydf$`Avg. Time on Site (secs.)`[1:14])),(mean(mydf$`Avg. Time on Site (secs.)`[15:35])),(mean(mydf$`Avg. Time on Site (secs.)`[36:52])),(mean(mydf$`Avg. Time on Site (secs.)`[53:66])))
mydf_bounce<-c((mean(mydf$`Bounce Rate`[1:14])),(mean(mydf$`Bounce Rate`[15:35])),(mean(mydf$`Bounce Rate`[36:52])),(mean(mydf$`Bounce Rate`[53:66])))
mydf_revenue<-c((mean(mydf$Revenue[1:14])),(mean(mydf$Revenue[15:35])),(mean(mydf$Revenue[36:52])),(mean(mydf$Revenue[53:66])))
mydf_profit<-c((mean(mydf$Profit[1:14])),(mean(mydf$Profit[15:35])),(mean(mydf$Profit[36:52])),(mean(mydf$Profit[53:66])))
mydf_lbsold<-c((mean(mydf$`Lbs. Sold`[1:14])),(mean(mydf$`Lbs. Sold`[15:35])),(mean(mydf$`Lbs. Sold`[36:52])),(mean(mydf$`Lbs. Sold`[53:66])))
mydf_inquiries<-c((mean(mydf$Inquiries[1:14])),(mean(mydf$Inquiries[15:35])),(mean(mydf$Inquiries[36:52])),(mean(mydf$Inquiries[53:66])))
mydf_newvisit<-c((mean(mydf$`% New Visits`[1:14])),(mean(mydf$`% New Visits`[15:35])),(mean(mydf$`% New Visits`[36:52])),(mean(mydf$`% New Visits`[53:66])))
mydf_cost<-c((mean(mydf$cost[1:14])),(mean(mydf$cost[15:35])),(mean(mydf$cost[36:52])),(mean(mydf$cost[53:66])))
mydf_visits
mydf_uniquevisits
mydf_pageviews
mydf_pagespervisit
mydf_timeonsite
mydf_bounce
mydf_revenue
mydf_profit
mydf_lbsold
mydf_inquiries
mydf_newvisit
mydf_cost
# Create the matrix of the periodic weekly means values.
Values <- matrix(c(mydf_visits, mydf_uniquevisits,mydf_pageviews,mydf_pagespervisit,mydf_timeonsite,
mydf_bounce,mydf_revenue,mydf_profit,mydf_lbsold,mydf_inquiries,mydf_newvisit,mydf_cost ), nrow = 12, ncol = 4, byrow = TRUE)
Values<-t(Values)
Values
# Create the input vectors.
colors = c("green","orange","brown","blue","red","purple","yellow","violet","pink","grey",'black')
months <- c("Initial period","Pre-promotion","Promotion","Post-promotion")
regions <- c("Visits","Unique Visits","Pageviews","Pages/Visit","Avg. Time on Site (secs.)","Bounce Rate","Revenue",
"Profit","Lbs. Sold","Inquiries","% New Visits",'cost')
# Give the chart file a name
png(file = "barchart_stacked.png")
#creating loop for graphs
for(i in 1:ncol(Values)) {
barplot(Values[,i], main = regions[i], names.arg = months, xlab = "Time Period", ylab = regions[i], col = colors)
}
# Add the legend to the chart
legend("bottomleft", time_period, cex = 1.3, fill = colors)
| /Quality Alloy Analysis.R | no_license | Maxlev3/Business-Case-Analysis-Quality-Alloy | R | false | false | 16,487 | r | ### Group Project - Maxime's code
#library
library(readxl)
library(data.table)
library(ggplot2)
library(plotly)
library(moments)
library(corrplot)
library(PerformanceAnalytics)
library(rsq)
library(psych)
source("http://www.sthda.com/upload/rquery_cormat.r")
#convert into datframe
alloy <- as.data.frame(Alloy_working_data)
#summary of datframe
summary(alloy)
#rename columns
names(alloy) <- c("week", "visits","unique","pageviews","pagevisits","avg_time","bounce","newvisits","revenue","profit","lbs_sold","inquiries")
#regression
all_f <- glm(unique ~ visits+pageviews+pagevisits+avg_time+bounce+newvisits+revenue+profit+lbs_sold+inquiries, data=alloy)
summary(all_f)
#Scatter matrix
pairs(~revenue+profit+pageviews+inquiries,data=alloy,
main="Simple Scatterplot Matrix")
#Scatter plot Q5 using Car package
library(car)
scatterplot(revenue ~ lbs_sold, data=alloy)
#Scatter plot Q5 using GGPLOT2
#revenue vs. lbs_sold
library(ggplot2)
ggplot(alloy, aes(x=lbs_sold, y=revenue)) +
geom_point()+
geom_smooth(method=lm)
#Correl
cor(revenue,lbs_sold)
#Scatter plot Q6 using GGPLOT2
#revenue vs. visits
ggplot(alloy, aes(x=visits, y=revenue)) +
geom_point()+
geom_smooth(method=lm)
#Correl
attach(alloy)
cor(revenue,visits)
#Import weekly lbs. sold
alloy_2_df <- as.data.frame(alloy_2)
#Summary values Q8
summary(alloy_2_df$lbs_sold2)
#Histogram Q8
ggplot(alloy_2_df, aes(x=alloy_2_df$lbs_sold2)) + geom_histogram(bins = 40)
#Empirical rule check
the_mean <- mean(alloy_2_df$lbs_sold2)
the_sd <- sd(alloy_2_df$lbs_sold2)
#Theoritical observation
lbs_obs <- length(alloy_2_df$lbs_sold2)
t1 <- round(lbs_obs * 0.68)
t2 <- round(lbs_obs * 0.95)
t3 <- round(lbs_obs * 0.99)
#Actual values calculation
lower_bounds <- the_mean - 1:3*the_sd
upper_bounds <- the_mean + 1:3*the_sd
one_sd <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 > lower_bounds[1] & alloy_2_df$lbs_sold2 < upper_bounds[1])])
two_sd <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 > lower_bounds[2] & alloy_2_df$lbs_sold2 < upper_bounds[2])])
three_sd <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 > lower_bounds[3] & alloy_2_df$lbs_sold2 < upper_bounds[3])])
#Comparison table
library(data.table)
data.table(interval = c("mean ± 1 std. dev","mean ± 2 std. dev","mean ± 3 std. dev"),
Theoretical_Perc_of_Data = c("68", "95","99"),
Theoretical_No_Obs = c(t1, t2, t3),
Actual_No_Obs = c(one_sd, two_sd, three_sd))
#Actual values with ups and lows
one_sd_up <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 < the_mean + the_sd
& alloy_2_df$lbs_sold2 > the_mean)])
one_sd_low <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 < the_mean - the_sd
& alloy_2_df$lbs_sold2 > the_mean)])
two_sd_up <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 < the_mean + 2*the_sd
& alloy_2_df$lbs_sold2 > the_mean + the_sd)])
two_sd_low <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 < the_mean - 2*the_sd
& alloy_2_df$lbs_sold2 > the_mean - the_sd)])
three_sd_up <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 < the_mean + 3*the_sd
& alloy_2_df$lbs_sold2 > the_mean + 2*the_sd)])
three_sd_low <- length(alloy_2_df$lbs_sold2[which(alloy_2_df$lbs_sold2 < the_mean - 3*the_sd
& alloy_2_df$lbs_sold2 > the_mean - 2*the_sd)])
#Comparison table
data.table(interval = c("mean + 1 std. dev","mean - 1 std. dev",
"mean + 2 std. dev","mean - 2 std. dev",
"mean + 3 std. dev","mean - 3 std. dev"),
Theoretical_Perc_of_Data = c("34","34", "13.5","13.5","2","2"),
Theoretical_No_Obs = c(t1/2, t1/2,
(t2-t1)/2, (t2-t1)/2,
(t3-t2)/2, (t3-t2)/2),
Actual_No_Obs = c(one_sd_up,one_sd_low,
two_sd_up,two_sd_low,
three_sd_up,three_sd_low))
library(moments)
skewness(alloy_2_df$lbs_sold2)
kurtosis(alloy_2_df$lbs_sold2)
### Extra analysis ###
#Heatmaps
round(cor(alloy[,c(2,3,4,5,6,7,8,9,10,11)]),2)
#Visual heatmaps
rquery.cormat(alloy[,c(2,3,4,5,6,7,8,9,10,11)], type = "full")
chart.Correlation(alloy[,c(2,3,4,5,6,7,8,9,10,11)], histogram=TRUE, pch=19)
pairs.panels(alloy[,c(2,3,4,5,6,7,8,9,10,11)])
heatmap(x = cor(alloy[,c(2,3,4,5,6,7,8,9,10,11)]), symm = TRUE)
###Other Graphs + Regression
#Profit per week
ggplot(alloy, aes( week, profit)) +
geom_bar(stat="identity", width = 0.5, fill="green") +
labs(title="Profit Per Week") +
theme(axis.text.x = element_text(angle=90, vjust=0.8))
#Visits per week
ggplot(alloy, aes(week, visits)) +
geom_bar(stat="identity", width = 0.5, fill="blue") +
labs(title="Unique Visit Per Week") +
theme(axis.text.x = element_text(angle=90, vjust=0.8))
#Revenue per week
ggplot(alloy, aes(week, revenue))+
geom_bar(stat="identity", width = 0.5, fill="tomato2") +
labs(title="Revenue Per Week") +
theme(axis.text.x = element_text(angle=90, vjust=0.8))
#Bounce rate per week
ggplot(alloy, aes(week, bounce)) +
geom_bar(stat="identity", width = 0.5, fill="blue") +
labs(title="Unique Visit Per Week") +
theme(axis.text.x = element_text(angle=90, vjust=0.8))
#Regression of Inquiries vs Revenue
ggplot(alloy, aes(x=inquiries, y=revenue))+
geom_point()+stat_smooth(method=lm)
alloy <- as.data.frame(Alloy_working_data)
summary(alloy)
names(alloy) <- c("week", "visits","unique","pageviews","pagevisits","avg_time","bounce","newvisits","revenue","profit","lbs_sold","inquiries")
all_f <- glm(Visits ~ Revenue+Profit+Pageviews+Inquiries, data=alloy)
summary(all_f)
library(ggplot2)
attach(alloy)
ggplot(alloy, aes(week, unique,group=4)) +
geom_line(color = 'cyan') +
geom_area(fill = 'cyan', alpha = .3) +
labs(x = 'Week'
, y = 'Unique'
, title = "Unique Visit per week") +
scale_x_discrete(breaks = levels(alloy$week)[5])+
theme(axis.text.x = element_text(angle=90, vjust=0.8))+
theme(text = element_text(family = 'bold.italic', color = "#444444")
,panel.background = element_rect(fill = '#444B5A')
,panel.grid.minor = element_line(color = '#4d5566')
,panel.grid.major = element_line(color = '#586174')
,plot.title = element_text(size = 28)
,axis.title = element_text(size = 18, color = '#555555')
,axis.title.y = element_text(vjust = 1, angle = 0)
,axis.title.x = element_text(hjust = 0)
)
ggplot(alloy, aes(week, revenue,group=4)) +
geom_line(color = 'green') +
geom_area(fill = 'green', alpha = .3) +
labs(x = 'Week'
, y = 'Revenue'
, title = "Revenue per week") +
scale_x_discrete(breaks = levels(alloy$week)[5])+
theme(axis.text.x = element_text(angle=90, vjust=0.8))+
theme(text = element_text(family = 'bold.italic', color = "#444444")
,panel.background = element_rect(fill = '#444B5A')
,panel.grid.minor = element_line(color = '#4d5566')
,panel.grid.major = element_line(color = '#586174')
,plot.title = element_text(size = 28)
,axis.title = element_text(size = 18, color = '#555555')
,axis.title.y = element_text(vjust = 1, angle = 0)
,axis.title.x = element_text(hjust = 0)
)
library(ggplot2)
#imoport different tab in same excel sheet
weeklyvisits<-read_excel("Alloy - working data.xls",sheet=1,col_names = TRUE,range = "A5:L67")
head(weeklyvisits)
lbssold <- read_excel("Alloy - working data.xls",sheet=2,col_names = TRUE,range ="A5:B295")
head(lbssold)
dailyvisits<-read_excel("Alloy - working data.xls",sheet=3,col_names = TRUE,range ="A5:B2467")
head(dailyvisits)
referingsites<-read_excel("Alloy - working data.xls",sheet=4,col_names = TRUE,range ="B7:C11")
head(referingsites)
leadssites<-read_excel("Alloy - working data.xls",sheet=4,col_names = TRUE,range ="B14:C24")
head(leadssites)
searchengine<-read_excel("Alloy - working data.xls",sheet=4,col_names = TRUE,range ="B27:C37")
head(searchengine)
geo<-read_excel("Alloy - working data.xls",sheet=4,col_names = TRUE,range ="B40:C50")
head(geo)
browsers<-read_excel("Alloy - working data.xls",sheet=4,col_names = TRUE,range ="B54:C64")
head(browsers)
operatingsystem<-read_excel("Alloy - working data.xls",sheet=4,col_names = TRUE,range ="B68:C78")
head(operatingsystem)
#Q5
# Change the point size, and shape
ggplot(alloy, aes(x=lbs_sold, y=revenue)) +
geom_point(size=1.2, color='black', shape=23)+geom_smooth(method=lm, se=FALSE)
#Q6
ggplot(alloy, aes(x=visits, y=revenue)) +
geom_point(size=1.2, color='brown', shape=23)+geom_smooth(method=lm, se=FALSE)
#Q7
attach(alloy)
cor_1 <- cor(revenue,visits, method = "pearson")
summary(cor_1)
#Q8
#a)
alloy_2 <- as.data.frame(alloy_2)
summary(alloy_2$lbs_sold2)
#b)
library(ggplot2)
h<-ggplot(alloy_2, aes(x=lbs_sold2)) +
geom_histogram(color="black", fill="white")
h
#c)
the.mean = mean(alloy_2$lbs_sold2)
the.sd = sd(alloy_2$lbs_sold2)
lower.bounds = the.mean - 1:3*the.sd
upper.bounds = the.mean + 1:3*the.sd
one.sd = (alloy_2$lbs_sold2 > lower.bounds[1] & alloy_2$lbs_sold2 < upper.bounds[1])
two.sd = mean(alloy_2$lbs_sold2 > lower.bounds[2] & alloy_2$lbs_sold2 < upper.bounds[2])
three.sd = mean(alloy_2$lbs_sold2 > lower.bounds[3] & alloy_2$lbs_sold2 < upper.bounds[3])
#Lead_sites graph
g<- ggplot(leadssites,aes(x=leadssites$...1, y=Lead_sites$Visits, fill=leadssites$...1)) +
geom_bar(stat="identity")+theme_minimal() + guides(fill = guide_legend(reverse=TRUE))+
xlab("") + ylab("") +
ggtitle("Visit Per Leads") +labs(fill = "Leads") + scale_x_discrete(breaks = levels(leadssites$...1)[5])+
theme(plot.title = element_text(color="grey", size=14, face="bold.italic"),
axis.text.x = element_text(angle=70, vjust = 0.8))
g
#Geographical_frequency
h<-ggplot(geo,aes(x=Geographical_frequency$`Geographical Frequency`, y=Geographical_frequency$Visits, fill=Geographical_frequency$`Geographical Frequency`)) +
geom_bar(stat="identity")+theme_minimal()+ guides(fill = guide_legend(reverse=TRUE)) +
xlab("") + ylab("") +ggtitle("Visit per Geo") + labs(fill = "Geo")+
theme(plot.title = element_text(color="grey", size=14, face="bold.italic"),
axis.text.x = element_text(angle=90, vjust=0.8))
h
#Search Engine _Frequency
i<-ggplot(searchengine,aes(x=Website_Frequency$Website, y=Website_Frequency$Visits, fill=Website_Frequency$Website)) +
geom_bar(stat="identity")+theme_minimal()+ guides(fill = guide_legend(reverse=TRUE)) +
xlab("") + ylab("") +ggtitle("Visit per Top 10 Web") + labs(fill = "Website")+
theme(plot.title = element_text(color="grey", size=14, face="bold.italic"),
axis.text.x = element_text(angle=90, vjust=0.8))
i
# Browser_Distribution
j<-ggplot(browsers,aes(x=Browser_Distribution$`Browser Distribution`, y=Browser_Distribution$Visits, fill=Browser_Distribution$`Browser Distribution`)) +
geom_bar(stat="identity")+theme_minimal()+ guides(fill = guide_legend(reverse=TRUE)) +
xlab("") + ylab("") +ggtitle("Visit per Browser") + labs(fill = "Browser")+
theme(plot.title = element_text(color="grey", size=14, face="bold.italic"),
axis.text.x = element_text(angle=-90, vjust=0.5,hjust=0))
j
#OS_Distrubution
k<-ggplot(operatingsystem,aes(x=OS_Distribution$`OS Distribution`, y=Visits, fill=OS_Distribution$`OS Distribution`)) +
geom_bar(stat="identity")+theme_minimal() +
xlab("") + ylab("") +ggtitle("Visit Per Device") + guides(fill = guide_legend(reverse=TRUE)) + labs(fill = "OS")+
theme(plot.title = element_text(color="grey", size=14, face="bold.italic"),
axis.text.x = element_text(angle=90, vjust=0.8))
k
# Visits Source
l<-ggplot(referingsites,aes(x=referingsites$...1, y=Visits, fill=referingsites$...1)) +
geom_bar(stat="identity")+theme_minimal() + guides(fill = guide_legend(reverse=TRUE)) +
xlab("") + ylab("") + ggtitle("Source of Visits") + labs(fill = "Visits") + scale_x_discrete(breaks = levels(referingsites$...1)[5])
theme(plot.title = element_text(color="grey", size=14, face="bold.italic"),
axis.text.x = element_text(angle=90, vjust=0.8))
l
#Dapo file
#install the packages if necessary
if(!require("tidyverse")) install.packages("tidyverse")
if(!require("fs")) install.packages("fs")
if(!require("readxl")) install.packages("readxl")
#load packages
library(tidyverse)
library(fs)
library(readxl)
library(ggplot2)
#import weekly visit sheet
weekly<-read_excel("Web Analytics Case Student Spreadsheet-2.xls",sheet=2,col_names = TRUE,range = "A5:H71")
summary(weekly)
#import Financial sheet
financial<-read_excel("Web Analytics Case Student Spreadsheet-2.xls",sheet=3,col_names = TRUE,range = "A5:E71")
summary(financial)
#merging the sheets
mydf<-merge(weekly,financial,sort=FALSE)
mydf
summary(mydf)
head(mydf)
tail(mydf)
#importing daily visit sheet
dailyvisits<-read_excel("Web Analytics Case Student Spreadsheet-2.xls",sheet=5,col_names = TRUE,range = "A5:B467")
head(dailyvisits)
#Adding a new column (cost=Revenue-Profit) to mydf
mydf$Revenue
mydf$Profit
for (i in 1:nrow(mydf)){
mydf$cost[i] <- mydf$Revenue[i]-mydf$Profit[i]
}
head(mydf)
#subsetting the Weekly means for the time periods
time_period<-c("May 25 to Aug 30, 2008","Aug 21 to Jan 24, 2009", "Jan 25 to May 23, 2009", "May 24 to Aug 29, 2009")
mydf_visits<-c((mean(mydf$Visits[1:14])),(mean(mydf$Visits[15:35])),(mean(mydf$Visits[36:52])),(mean(mydf$Visits[53:66])))
mydf_uniquevisits<-c((mean(mydf$`Unique Visits`[1:14])),(mean(mydf$`Unique Visits`[15:35])),(mean(mydf$`Unique Visits`[36:52])),(mean(mydf$`Unique Visits`[53:66])))
mydf_pageviews<-c((mean(mydf$Pageviews[1:14])),(mean(mydf$Pageviews[15:35])),(mean(mydf$Pageviews[36:52])),(mean(mydf$Pageviews[53:66])))
mydf_pagespervisit<-c((mean(mydf$`Pages/Visit`[1:14])),(mean(mydf$`Pages/Visit`[15:35])),(mean(mydf$`Pages/Visit`[36:52])),(mean(mydf$`Pages/Visit`[53:66])))
mydf_timeonsite<-c((mean(mydf$`Avg. Time on Site (secs.)`[1:14])),(mean(mydf$`Avg. Time on Site (secs.)`[15:35])),(mean(mydf$`Avg. Time on Site (secs.)`[36:52])),(mean(mydf$`Avg. Time on Site (secs.)`[53:66])))
mydf_bounce<-c((mean(mydf$`Bounce Rate`[1:14])),(mean(mydf$`Bounce Rate`[15:35])),(mean(mydf$`Bounce Rate`[36:52])),(mean(mydf$`Bounce Rate`[53:66])))
mydf_revenue<-c((mean(mydf$Revenue[1:14])),(mean(mydf$Revenue[15:35])),(mean(mydf$Revenue[36:52])),(mean(mydf$Revenue[53:66])))
mydf_profit<-c((mean(mydf$Profit[1:14])),(mean(mydf$Profit[15:35])),(mean(mydf$Profit[36:52])),(mean(mydf$Profit[53:66])))
mydf_lbsold<-c((mean(mydf$`Lbs. Sold`[1:14])),(mean(mydf$`Lbs. Sold`[15:35])),(mean(mydf$`Lbs. Sold`[36:52])),(mean(mydf$`Lbs. Sold`[53:66])))
mydf_inquiries<-c((mean(mydf$Inquiries[1:14])),(mean(mydf$Inquiries[15:35])),(mean(mydf$Inquiries[36:52])),(mean(mydf$Inquiries[53:66])))
mydf_newvisit<-c((mean(mydf$`% New Visits`[1:14])),(mean(mydf$`% New Visits`[15:35])),(mean(mydf$`% New Visits`[36:52])),(mean(mydf$`% New Visits`[53:66])))
mydf_cost<-c((mean(mydf$cost[1:14])),(mean(mydf$cost[15:35])),(mean(mydf$cost[36:52])),(mean(mydf$cost[53:66])))
mydf_visits
mydf_uniquevisits
mydf_pageviews
mydf_pagespervisit
mydf_timeonsite
mydf_bounce
mydf_revenue
mydf_profit
mydf_lbsold
mydf_inquiries
mydf_newvisit
mydf_cost
# Create the matrix of the periodic weekly means values.
Values <- matrix(c(mydf_visits, mydf_uniquevisits,mydf_pageviews,mydf_pagespervisit,mydf_timeonsite,
mydf_bounce,mydf_revenue,mydf_profit,mydf_lbsold,mydf_inquiries,mydf_newvisit,mydf_cost ), nrow = 12, ncol = 4, byrow = TRUE)
Values<-t(Values)
Values
# Create the input vectors.
colors = c("green","orange","brown","blue","red","purple","yellow","violet","pink","grey",'black')
months <- c("Initial period","Pre-promotion","Promotion","Post-promotion")
regions <- c("Visits","Unique Visits","Pageviews","Pages/Visit","Avg. Time on Site (secs.)","Bounce Rate","Revenue",
"Profit","Lbs. Sold","Inquiries","% New Visits",'cost')
# Give the chart file a name
png(file = "barchart_stacked.png")
#creating loop for graphs
for(i in 1:ncol(Values)) {
barplot(Values[,i], main = regions[i], names.arg = months, xlab = "Time Period", ylab = regions[i], col = colors)
}
# Add the legend to the chart
legend("bottomleft", time_period, cex = 1.3, fill = colors)
|
insertSql <- function(myDataFrame, myTableName){
# INSERT INTO test (
# id, name, price, status
# ) VALUES
# (id, name, price, status)
# ,(id, name, price, status)
line1 <- paste("INSERT INTO", myTableName, "(")
cols <- colnames(myDataFrame)
line2 <- paste(cols, collapse=",")
line3 <- paste(") VALUES")
#line4_0 <- sprintf("('%s', '%s', '%s', '%s')", myDataFrame[2:5, 1], myDataFrame[1:5, 2], myDataFrame[1:5, 3], myDataFrame[1:5, 4])
nr <- nrow(myDataFrame)
nc <- ncol(myDataFrame)
line4_1 <- sprintf("('%s'", myDataFrame[1:nr, 1])
#
for(i in 2:nc) {
# line4_1 <- sprintf(paste0(line4_1, ",'%s'"), myDataFrame[1:nr, i])
line4_1 <-paste0(line4_1, ",'",myDataFrame[1:nr, i], "'")
}
line4 <- paste0(line4_1, ")", collapse = ",")
lines <- paste0(line1, line2, line3, line4)
return(lines)
}
myDataFrame <-data.frame(id=1:5,name=c("苹果","香蕉","梨子","玉米","西瓜"),price=c(8.8,4.98,7.8,6,2.1),status=c("无","打折","无","售罄","批发"))
strSql <- insertSql(myDataFrame, "test")
print(strSql)
| /genInserSqlFromDataFrame -V2.R | no_license | liango2/lianxi-rstat | R | false | false | 1,096 | r | insertSql <- function(myDataFrame, myTableName){
# INSERT INTO test (
# id, name, price, status
# ) VALUES
# (id, name, price, status)
# ,(id, name, price, status)
line1 <- paste("INSERT INTO", myTableName, "(")
cols <- colnames(myDataFrame)
line2 <- paste(cols, collapse=",")
line3 <- paste(") VALUES")
#line4_0 <- sprintf("('%s', '%s', '%s', '%s')", myDataFrame[2:5, 1], myDataFrame[1:5, 2], myDataFrame[1:5, 3], myDataFrame[1:5, 4])
nr <- nrow(myDataFrame)
nc <- ncol(myDataFrame)
line4_1 <- sprintf("('%s'", myDataFrame[1:nr, 1])
#
for(i in 2:nc) {
# line4_1 <- sprintf(paste0(line4_1, ",'%s'"), myDataFrame[1:nr, i])
line4_1 <-paste0(line4_1, ",'",myDataFrame[1:nr, i], "'")
}
line4 <- paste0(line4_1, ")", collapse = ",")
lines <- paste0(line1, line2, line3, line4)
return(lines)
}
myDataFrame <-data.frame(id=1:5,name=c("苹果","香蕉","梨子","玉米","西瓜"),price=c(8.8,4.98,7.8,6,2.1),status=c("无","打折","无","售罄","批发"))
strSql <- insertSql(myDataFrame, "test")
print(strSql)
|
#'
#'
#' ----------------- Additional Runit Utilities -----------------
#'
#'
read.zip<-
function(zipfile, exdir,header=T) {
zipdir <- exdir
unzip(zipfile, exdir=zipdir)
files <- list.files(zipdir)
file <- paste(zipdir, files[1], sep="/")
read.csv(file,header=header)
}
sandbox<-
function() {
test_name <- R.utils::commandArgs(asValues=TRUE)$"f"
if (is.null(test_name)) {
test_name <- paste(getwd(), "r_command_line", sep="/")
}
Rsandbox <- paste("./Rsandbox_", basename(test_name), sep = "")
dir.create(Rsandbox, showWarnings = FALSE)
commandsLog <- paste(Rsandbox, "/commands.log", sep = "")
errorsLog <- paste(Rsandbox, "/errors.log", sep = "")
if(file.exists(commandsLog)) file.remove(commandsLog)
if(file.exists(errorsLog)) file.remove(errorsLog)
h2o.startLogging(paste(Rsandbox, "/rest.log", sep = ""))
}
Log.info<-
function(m) {
message <- paste("[INFO] : ",m,sep="")
logging(message)
}
Log.warn<-
function(m) {
logging(paste("[WARN] : ",m,sep=""))
traceback()
}
Log.err<-
function(m) {
logging(paste("[ERROR] : ",m,sep=""))
logging("[ERROR] : TEST FAILED")
traceback()
}
logging<-
function(m) {
cat(sprintf("[%s] %s\n", Sys.time(),m))
}
PASS_BANNER<-
function() {
cat("\n")
cat("######## ### ###### ###### \n")
cat("## ## ## ## ## ## ## ##\n")
cat("## ## ## ## ## ## \n")
cat("######## ## ## ###### ###### \n")
cat("## ######### ## ##\n")
cat("## ## ## ## ## ## ##\n")
cat("## ## ## ###### ###### \n")
cat("\n")
}
FAIL_BANNER<-
function() {
cat("\n")
cat("######## ### #### ## \n")
cat("## ## ## ## ## \n")
cat("## ## ## ## ## \n")
cat("###### ## ## ## ## \n")
cat("## ######### ## ## \n")
cat("## ## ## ## ## \n")
cat("## ## ## #### ######## \n")
cat("\n")
}
PASS<-
function() {
PASS_BANNER()
q("no",0,FALSE)
}
FAIL<-
function(e) {
FAIL_BANNER()
Log.err(e)
q("no",1,FALSE) #exit with nonzero exit code
}
SKIP<-
function() {
q("no",42,FALSE) #exit with nonzero exit code
}
WARN<-
function(w) {
Log.warn(w)
}
#----------------------------------------------------------------------
# Print out a message with clear whitespace.
#
# Parameters: x -- Message to print out.
# n -- (optional) Step number.
#
# Returns: none
#----------------------------------------------------------------------
heading <- function(x, n = -1) {
Log.info("")
Log.info("")
if (n < 0) {
Log.info(sprintf("STEP: %s", x))
} else {
Log.info(sprintf("STEP %2d: %s", n, x))
}
Log.info("")
Log.info("")
}
#----------------------------------------------------------------------
# "Safe" system. Error checks process exit status code. stop() if it failed.
#
# Parameters: x -- String of command to run (passed to system()).
#
# Returns: none
#----------------------------------------------------------------------
safeSystem <- function(x) {
print(sprintf("+ CMD: %s", x))
res <- system(x)
print(res)
if (res != 0) {
msg <- sprintf("SYSTEM COMMAND FAILED (exit status %d)", res)
stop(msg)
}
}
parseArgs<-
function(args) {
i <- 1
while (i <= length(args)) {
s <- args[i]
if (s == "--usecloud") {
i <- i + 1
if (i > length(args)) {
usage()
}
argsplit <- strsplit(args[i], ":")[[1]]
H2O.IP <<- argsplit[1]
H2O.PORT <<- as.numeric(argsplit[2])
} else if (s == "--onJenkHadoop") {
ON.JENKINS.HADOOP <<- TRUE
} else {
unknownArg(s)
}
i <- i + 1
}
}
usage<-
function() {
print("")
print("Usage for: R -f runit.R --args [...options...]")
print("")
print(" --usecloud connect to h2o on specified ip and port, where ip and port are specified as follows:")
print(" IP:PORT")
print("")
print(" --onJenkHadoop signal to runt that it will be run on h2o-hadoop cluster.")
print("")
q("no",1,FALSE) #exit with nonzero exit code
}
unknownArg<-
function(arg) {
print("")
print(paste0("ERROR: Unknown argument: ",arg))
print("")
usage()
}
withWarnings <- function(expr) {
myWarnings <- NULL
wHandler <- function(w) {
myWarnings <<- c(myWarnings, list(w))
invokeRestart("muffleWarning")
}
val <- withCallingHandlers(expr, warning = wHandler)
list(value = val, warnings = myWarnings)
for(w in myWarnings) WARN(w)
}
doTest<-
function(testDesc, test) {
h2o.removeAll()
Log.info("======================== Begin Test ===========================\n")
conn <- h2o.getConnection()
conn@mutable$session_id <- .init.session_id()
tryCatch(test_that(testDesc, withWarnings(test())), warning = function(w) WARN(w), error =function(e) FAIL(e))
PASS()
}
cleanSummary <- function(mysum, alphabetical = FALSE) {
# Returns string without leading or trailing whitespace
trim <- function(x) { gsub("^\\s+|\\s+$", "", x) }
lapply(1:ncol(mysum), {
function(i) {
nams <- sapply(mysum[,i], function(x) { trim(unlist(strsplit(x, ":"))[1]) })
vals <- sapply(mysum[,i], function(x) {
numMatch <- sum(unlist(strsplit(x, "")) == ":")
# If only one colon, then it contains numeric data
# WARNING: This assumes categorical levels don't contain colons
if(is.na(numMatch) || numMatch <= 1) {
as.numeric(unlist(strsplit(x, ":"))[2])
} else {
# Otherwise, return a string for min/max/quantile
tmp <- unlist(strsplit(as.character(x), ":"))[-1]
paste(tmp, collapse = ":")
}
})
names(vals) <- nams
vals <- vals[!is.na(nams)]
if(alphabetical) vals <- vals[order(names(vals))]
return(vals)
}
})
}
checkSummary <- function(object, expected, tolerance = 1e-6) {
sumR <- cleanSummary(expected, alphabetical = TRUE)
sumH2O <- cleanSummary(object, alphabetical = TRUE)
expect_equal(length(sumH2O), length(sumR))
lapply(1:length(sumR), function(i) {
vecR <- sumR[[i]]; vecH2O <- sumH2O[[i]]
expect_equal(length(vecH2O), length(vecR))
expect_equal(names(vecH2O), names(vecR))
for(j in 1:length(vecR))
expect_equal(vecH2O[j], vecR[j], tolerance = tolerance)
})
}
genDummyCols <- function(df, use_all_factor_levels = TRUE) {
NUM <- function(x) { x[,sapply(x, is.numeric)] }
FAC <- function(x) { x[,sapply(x, is.factor)] }
FAC_LEVS <- function(x) { sapply(x, function(z) { length(levels(z)) })}
df_fac <- data.frame(FAC(df))
if(ncol(df_fac) == 0) {
DF <- data.frame(NUM(df))
names(DF) <- colnames(df)[which(sapply(df, is.numeric))]
} else {
if(!"ade4" %in% rownames(installed.packages())) install.packages("ade4")
require(ade4)
df_fac_acm <- acm.disjonctif(df_fac)
if (!use_all_factor_levels) {
fac_offs <- cumsum(c(1, FAC_LEVS(df_fac)))
fac_offs <- fac_offs[-length(fac_offs)]
df_fac_acm <- data.frame(df_fac_acm[,-fac_offs])
}
DF <- data.frame(df_fac_acm, NUM(df))
fac_nams <- mapply(function(x, cname) {
levs <- levels(x)
if(!use_all_factor_levels) levs <- levs[-1]
paste(cname, levs, sep = ".") },
df_fac, colnames(df)[which(sapply(df, is.factor))])
fac_nams <- as.vector(unlist(fac_nams))
fac_range <- 1:ncol(df_fac_acm)
names(DF)[fac_range] <- fac_nams
if(ncol(NUM(df)) > 0) {
num_range <- (ncol(df_fac_acm)+1):ncol(DF)
names(DF)[num_range] <- colnames(df)[which(sapply(df, is.numeric))]
}
}
return(DF)
}
alignData <- function(df, center = FALSE, scale = FALSE, ignore_const_cols = TRUE, use_all_factor_levels = TRUE) {
df.clone <- df
is_num <- sapply(df.clone, is.numeric)
if(any(is_num)) {
df.clone[,is_num] <- scale(df.clone[,is_num], center = center, scale = scale)
df.clone <- df.clone[, c(which(!is_num), which(is_num))] # Move categorical column to front
}
if(ignore_const_cols) {
is_const <- sapply(df.clone, function(z) { var(z, na.rm = TRUE) == 0 })
if(any(is_const))
df.clone <- df.clone[,!is_const]
}
genDummyCols(df.clone, use_all_factor_levels)
}
#' HDFS helper
is.running.internal.to.h2o <- function() {
url <- sprintf("http://%s:50070", H2O.INTERNAL.HDFS.NAME.NODE);
internal <- url.exists(url, timeout = 5)
return(internal)
}
| /h2o-r/tests/Utils/utilsR.R | permissive | tomasgreif/h2o-3 | R | false | false | 8,474 | r | #'
#'
#' ----------------- Additional Runit Utilities -----------------
#'
#'
read.zip<-
function(zipfile, exdir,header=T) {
zipdir <- exdir
unzip(zipfile, exdir=zipdir)
files <- list.files(zipdir)
file <- paste(zipdir, files[1], sep="/")
read.csv(file,header=header)
}
sandbox<-
function() {
test_name <- R.utils::commandArgs(asValues=TRUE)$"f"
if (is.null(test_name)) {
test_name <- paste(getwd(), "r_command_line", sep="/")
}
Rsandbox <- paste("./Rsandbox_", basename(test_name), sep = "")
dir.create(Rsandbox, showWarnings = FALSE)
commandsLog <- paste(Rsandbox, "/commands.log", sep = "")
errorsLog <- paste(Rsandbox, "/errors.log", sep = "")
if(file.exists(commandsLog)) file.remove(commandsLog)
if(file.exists(errorsLog)) file.remove(errorsLog)
h2o.startLogging(paste(Rsandbox, "/rest.log", sep = ""))
}
Log.info<-
function(m) {
message <- paste("[INFO] : ",m,sep="")
logging(message)
}
Log.warn<-
function(m) {
logging(paste("[WARN] : ",m,sep=""))
traceback()
}
Log.err<-
function(m) {
logging(paste("[ERROR] : ",m,sep=""))
logging("[ERROR] : TEST FAILED")
traceback()
}
logging<-
function(m) {
cat(sprintf("[%s] %s\n", Sys.time(),m))
}
PASS_BANNER<-
function() {
cat("\n")
cat("######## ### ###### ###### \n")
cat("## ## ## ## ## ## ## ##\n")
cat("## ## ## ## ## ## \n")
cat("######## ## ## ###### ###### \n")
cat("## ######### ## ##\n")
cat("## ## ## ## ## ## ##\n")
cat("## ## ## ###### ###### \n")
cat("\n")
}
FAIL_BANNER<-
function() {
cat("\n")
cat("######## ### #### ## \n")
cat("## ## ## ## ## \n")
cat("## ## ## ## ## \n")
cat("###### ## ## ## ## \n")
cat("## ######### ## ## \n")
cat("## ## ## ## ## \n")
cat("## ## ## #### ######## \n")
cat("\n")
}
PASS<-
function() {
PASS_BANNER()
q("no",0,FALSE)
}
FAIL<-
function(e) {
FAIL_BANNER()
Log.err(e)
q("no",1,FALSE) #exit with nonzero exit code
}
SKIP<-
function() {
q("no",42,FALSE) #exit with nonzero exit code
}
WARN<-
function(w) {
Log.warn(w)
}
#----------------------------------------------------------------------
# Print out a message with clear whitespace.
#
# Parameters: x -- Message to print out.
# n -- (optional) Step number.
#
# Returns: none
#----------------------------------------------------------------------
heading <- function(x, n = -1) {
Log.info("")
Log.info("")
if (n < 0) {
Log.info(sprintf("STEP: %s", x))
} else {
Log.info(sprintf("STEP %2d: %s", n, x))
}
Log.info("")
Log.info("")
}
#----------------------------------------------------------------------
# "Safe" system. Error checks process exit status code. stop() if it failed.
#
# Parameters: x -- String of command to run (passed to system()).
#
# Returns: none
#----------------------------------------------------------------------
safeSystem <- function(x) {
print(sprintf("+ CMD: %s", x))
res <- system(x)
print(res)
if (res != 0) {
msg <- sprintf("SYSTEM COMMAND FAILED (exit status %d)", res)
stop(msg)
}
}
parseArgs<-
function(args) {
i <- 1
while (i <= length(args)) {
s <- args[i]
if (s == "--usecloud") {
i <- i + 1
if (i > length(args)) {
usage()
}
argsplit <- strsplit(args[i], ":")[[1]]
H2O.IP <<- argsplit[1]
H2O.PORT <<- as.numeric(argsplit[2])
} else if (s == "--onJenkHadoop") {
ON.JENKINS.HADOOP <<- TRUE
} else {
unknownArg(s)
}
i <- i + 1
}
}
usage<-
function() {
print("")
print("Usage for: R -f runit.R --args [...options...]")
print("")
print(" --usecloud connect to h2o on specified ip and port, where ip and port are specified as follows:")
print(" IP:PORT")
print("")
print(" --onJenkHadoop signal to runt that it will be run on h2o-hadoop cluster.")
print("")
q("no",1,FALSE) #exit with nonzero exit code
}
unknownArg<-
function(arg) {
print("")
print(paste0("ERROR: Unknown argument: ",arg))
print("")
usage()
}
withWarnings <- function(expr) {
myWarnings <- NULL
wHandler <- function(w) {
myWarnings <<- c(myWarnings, list(w))
invokeRestart("muffleWarning")
}
val <- withCallingHandlers(expr, warning = wHandler)
list(value = val, warnings = myWarnings)
for(w in myWarnings) WARN(w)
}
doTest<-
function(testDesc, test) {
h2o.removeAll()
Log.info("======================== Begin Test ===========================\n")
conn <- h2o.getConnection()
conn@mutable$session_id <- .init.session_id()
tryCatch(test_that(testDesc, withWarnings(test())), warning = function(w) WARN(w), error =function(e) FAIL(e))
PASS()
}
cleanSummary <- function(mysum, alphabetical = FALSE) {
# Returns string without leading or trailing whitespace
trim <- function(x) { gsub("^\\s+|\\s+$", "", x) }
lapply(1:ncol(mysum), {
function(i) {
nams <- sapply(mysum[,i], function(x) { trim(unlist(strsplit(x, ":"))[1]) })
vals <- sapply(mysum[,i], function(x) {
numMatch <- sum(unlist(strsplit(x, "")) == ":")
# If only one colon, then it contains numeric data
# WARNING: This assumes categorical levels don't contain colons
if(is.na(numMatch) || numMatch <= 1) {
as.numeric(unlist(strsplit(x, ":"))[2])
} else {
# Otherwise, return a string for min/max/quantile
tmp <- unlist(strsplit(as.character(x), ":"))[-1]
paste(tmp, collapse = ":")
}
})
names(vals) <- nams
vals <- vals[!is.na(nams)]
if(alphabetical) vals <- vals[order(names(vals))]
return(vals)
}
})
}
checkSummary <- function(object, expected, tolerance = 1e-6) {
sumR <- cleanSummary(expected, alphabetical = TRUE)
sumH2O <- cleanSummary(object, alphabetical = TRUE)
expect_equal(length(sumH2O), length(sumR))
lapply(1:length(sumR), function(i) {
vecR <- sumR[[i]]; vecH2O <- sumH2O[[i]]
expect_equal(length(vecH2O), length(vecR))
expect_equal(names(vecH2O), names(vecR))
for(j in 1:length(vecR))
expect_equal(vecH2O[j], vecR[j], tolerance = tolerance)
})
}
genDummyCols <- function(df, use_all_factor_levels = TRUE) {
NUM <- function(x) { x[,sapply(x, is.numeric)] }
FAC <- function(x) { x[,sapply(x, is.factor)] }
FAC_LEVS <- function(x) { sapply(x, function(z) { length(levels(z)) })}
df_fac <- data.frame(FAC(df))
if(ncol(df_fac) == 0) {
DF <- data.frame(NUM(df))
names(DF) <- colnames(df)[which(sapply(df, is.numeric))]
} else {
if(!"ade4" %in% rownames(installed.packages())) install.packages("ade4")
require(ade4)
df_fac_acm <- acm.disjonctif(df_fac)
if (!use_all_factor_levels) {
fac_offs <- cumsum(c(1, FAC_LEVS(df_fac)))
fac_offs <- fac_offs[-length(fac_offs)]
df_fac_acm <- data.frame(df_fac_acm[,-fac_offs])
}
DF <- data.frame(df_fac_acm, NUM(df))
fac_nams <- mapply(function(x, cname) {
levs <- levels(x)
if(!use_all_factor_levels) levs <- levs[-1]
paste(cname, levs, sep = ".") },
df_fac, colnames(df)[which(sapply(df, is.factor))])
fac_nams <- as.vector(unlist(fac_nams))
fac_range <- 1:ncol(df_fac_acm)
names(DF)[fac_range] <- fac_nams
if(ncol(NUM(df)) > 0) {
num_range <- (ncol(df_fac_acm)+1):ncol(DF)
names(DF)[num_range] <- colnames(df)[which(sapply(df, is.numeric))]
}
}
return(DF)
}
alignData <- function(df, center = FALSE, scale = FALSE, ignore_const_cols = TRUE, use_all_factor_levels = TRUE) {
df.clone <- df
is_num <- sapply(df.clone, is.numeric)
if(any(is_num)) {
df.clone[,is_num] <- scale(df.clone[,is_num], center = center, scale = scale)
df.clone <- df.clone[, c(which(!is_num), which(is_num))] # Move categorical column to front
}
if(ignore_const_cols) {
is_const <- sapply(df.clone, function(z) { var(z, na.rm = TRUE) == 0 })
if(any(is_const))
df.clone <- df.clone[,!is_const]
}
genDummyCols(df.clone, use_all_factor_levels)
}
#' HDFS helper
is.running.internal.to.h2o <- function() {
url <- sprintf("http://%s:50070", H2O.INTERNAL.HDFS.NAME.NODE);
internal <- url.exists(url, timeout = 5)
return(internal)
}
|
library(tidyr)
library(dplyr)
lowerletters <- tolower(refine_original$company)
refiner <- mutate(refine_original, company = substr(lowerletters,1,stop = 1))
refiner$company <- gsub("^[p|f]", "philips", refiner$company)
refiner$company <- gsub("^[a]", "akzo", refiner$company)
refiner$company <- gsub("^[v]", "van houten", refiner$company)
refiner$company <- gsub("^[u]", "unilever", refiner$company)
refiner <- separate(refiner, Product.code...number, c("product_code", "product_number"), sep = "-")
refiner <- mutate(refiner, "product_category" = product_code)
refiner$product_category <- gsub("^[p]", "Smartphone", refiner$product_category)
refiner$product_category <- gsub("^[v]", "TV", refiner$product_category)
refiner$product_category <- gsub("^[x]", "Laptop", refiner$product_category)
refiner$product_category <- gsub("^[q]", "Tablet", refiner$product_category)
refiner <- unite(refiner, "full_address", address : country, sep = "," )
refiner <- mutate(refiner, company_philips = ifelse(company == "philips", 1, 0))
refiner <- mutate(refiner, company_akzo = ifelse(company == "akzo", 1, 0))
refiner <- mutate(refiner, company_unilever = ifelse(company == "unilever", 1, 0))
refiner <- mutate(refiner, company_van_houten = ifelse(company == "van houten", 1, 0))
refiner <- mutate(refiner, product_smartphone = ifelse(product_category == "Smartphone", 1, 0))
refiner <- mutate(refiner, product_laptop = ifelse(product_category == "Laptop", 1, 0))
refiner <- mutate(refiner, product_tv = ifelse(product_category == "TV", 1, 0))
refiner <- mutate(refiner, product_tablet = ifelse(product_category == "Tablet", 1, 0))
refine_clean <- refiner
write.csv(refine_clean, file = "refine_clean.csv")
| /SB-Data-Wrangling-Exercise-1_code.R | no_license | rlindberg3/SB-Data-Wrangling-Exercise-1 | R | false | false | 1,697 | r | library(tidyr)
library(dplyr)
lowerletters <- tolower(refine_original$company)
refiner <- mutate(refine_original, company = substr(lowerletters,1,stop = 1))
refiner$company <- gsub("^[p|f]", "philips", refiner$company)
refiner$company <- gsub("^[a]", "akzo", refiner$company)
refiner$company <- gsub("^[v]", "van houten", refiner$company)
refiner$company <- gsub("^[u]", "unilever", refiner$company)
refiner <- separate(refiner, Product.code...number, c("product_code", "product_number"), sep = "-")
refiner <- mutate(refiner, "product_category" = product_code)
refiner$product_category <- gsub("^[p]", "Smartphone", refiner$product_category)
refiner$product_category <- gsub("^[v]", "TV", refiner$product_category)
refiner$product_category <- gsub("^[x]", "Laptop", refiner$product_category)
refiner$product_category <- gsub("^[q]", "Tablet", refiner$product_category)
refiner <- unite(refiner, "full_address", address : country, sep = "," )
refiner <- mutate(refiner, company_philips = ifelse(company == "philips", 1, 0))
refiner <- mutate(refiner, company_akzo = ifelse(company == "akzo", 1, 0))
refiner <- mutate(refiner, company_unilever = ifelse(company == "unilever", 1, 0))
refiner <- mutate(refiner, company_van_houten = ifelse(company == "van houten", 1, 0))
refiner <- mutate(refiner, product_smartphone = ifelse(product_category == "Smartphone", 1, 0))
refiner <- mutate(refiner, product_laptop = ifelse(product_category == "Laptop", 1, 0))
refiner <- mutate(refiner, product_tv = ifelse(product_category == "TV", 1, 0))
refiner <- mutate(refiner, product_tablet = ifelse(product_category == "Tablet", 1, 0))
refine_clean <- refiner
write.csv(refine_clean, file = "refine_clean.csv")
|
##########################################################################################
### Load and clean results from previously run model for Bird to Mosquito transmission ###
##########################################################################################
## If the Stan model has been run previously and the user does not want to rerun it, load data
if (load_stan_bird_mos_res == TRUE & file.exists("saved_output/bird_to_mosquito_transmission.Rds")) {
bird_mos_rds <- readRDS("saved_output/bird_to_mosquito_transmission.Rds")
bird_mos_model_out_summ <- bird_mos_rds[[1]]
bird_mos_pred <- bird_mos_rds[[2]]
samps_bird_mos <- bird_mos_rds[[3]]
rm(bird_mos_rds)
samps_bird_mos <- rbind(samps_bird_mos[,1,], samps_bird_mos[,2,], samps_bird_mos[,3,], samps_bird_mos[,4,])
} else {
mos_bird_trans <- read.csv("data/mos_bird_trans.csv", header = TRUE)
## Data associated with bird to mosquito transmission
vectcomp <- mos_bird_trans %>% filter(Host_to_Mosquito == "Y" & Time_Series == "N")
## Needed to reset factors so that they are a continuous seq of numbers when converted to numeric for Stan model
vectcomp <- droplevels(vectcomp)
## Stan model data
bird_mos.data <-
with(vectcomp,
list(
"N" = nrow(vectcomp)
, "N_Samp" = Sample_Size
, "N_Inf" = Number_Infected
, "N_CIT" = length(unique(Citation))
, "N_VS" = length(unique(Vector_Species))
, "Samp_Max" = max(Sample_Size)
, "Inf_Max" = max(Number_Infected)
, "LD" = Log_Dose
, "Temp" = Temperature_C
, "VS" = as.numeric(Vector_Species)
, "CIT" = as.numeric(Citation)))
## Run model
bird_mos_model_out <- stan(
file = "stan/Bird_to_Mosquito.stan"
, data = bird_mos.data
, iter = 14000
, thin = 4
, warmup = 4000
, refresh = max(14000/100, 1)
, control = list(max_treedepth = 16, adapt_delta = .90)
, chains = 4)
## Pleasant way to look at convergence of the model
# launch_shinystan(bird_mos_model_out)
detach("package:tidyr", unload = TRUE)
samps_bird_mos <- extract(bird_mos_model_out, permuted = FALSE)
library(tidyr)
tidy_bird_mos <- tidy(bird_mos_model_out)
bird_mos_model_out_summ <- summary(bird_mos_model_out)
bird_mos_pred <- bird_mos_model_out_summ[["summary"]]
saveRDS(list(bird_mos_model_out_summ, bird_mos_pred, samps_bird_mos)
, file = "saved_output/bird_to_mosquito_transmission.Rds")
samps_bird_mos <- rbind(samps_bird_mos[,1,], samps_bird_mos[,2,], samps_bird_mos[,3,], samps_bird_mos[,4,])
}
| /data_clean_bird_mos.R | no_license | morgankain/WNV_Mechanistic_Model | R | false | false | 2,580 | r | ##########################################################################################
### Load and clean results from previously run model for Bird to Mosquito transmission ###
##########################################################################################
## If the Stan model has been run previously and the user does not want to rerun it, load data
if (load_stan_bird_mos_res == TRUE & file.exists("saved_output/bird_to_mosquito_transmission.Rds")) {
bird_mos_rds <- readRDS("saved_output/bird_to_mosquito_transmission.Rds")
bird_mos_model_out_summ <- bird_mos_rds[[1]]
bird_mos_pred <- bird_mos_rds[[2]]
samps_bird_mos <- bird_mos_rds[[3]]
rm(bird_mos_rds)
samps_bird_mos <- rbind(samps_bird_mos[,1,], samps_bird_mos[,2,], samps_bird_mos[,3,], samps_bird_mos[,4,])
} else {
mos_bird_trans <- read.csv("data/mos_bird_trans.csv", header = TRUE)
## Data associated with bird to mosquito transmission
vectcomp <- mos_bird_trans %>% filter(Host_to_Mosquito == "Y" & Time_Series == "N")
## Needed to reset factors so that they are a continuous seq of numbers when converted to numeric for Stan model
vectcomp <- droplevels(vectcomp)
## Stan model data
bird_mos.data <-
with(vectcomp,
list(
"N" = nrow(vectcomp)
, "N_Samp" = Sample_Size
, "N_Inf" = Number_Infected
, "N_CIT" = length(unique(Citation))
, "N_VS" = length(unique(Vector_Species))
, "Samp_Max" = max(Sample_Size)
, "Inf_Max" = max(Number_Infected)
, "LD" = Log_Dose
, "Temp" = Temperature_C
, "VS" = as.numeric(Vector_Species)
, "CIT" = as.numeric(Citation)))
## Run model
bird_mos_model_out <- stan(
file = "stan/Bird_to_Mosquito.stan"
, data = bird_mos.data
, iter = 14000
, thin = 4
, warmup = 4000
, refresh = max(14000/100, 1)
, control = list(max_treedepth = 16, adapt_delta = .90)
, chains = 4)
## Pleasant way to look at convergence of the model
# launch_shinystan(bird_mos_model_out)
detach("package:tidyr", unload = TRUE)
samps_bird_mos <- extract(bird_mos_model_out, permuted = FALSE)
library(tidyr)
tidy_bird_mos <- tidy(bird_mos_model_out)
bird_mos_model_out_summ <- summary(bird_mos_model_out)
bird_mos_pred <- bird_mos_model_out_summ[["summary"]]
saveRDS(list(bird_mos_model_out_summ, bird_mos_pred, samps_bird_mos)
, file = "saved_output/bird_to_mosquito_transmission.Rds")
samps_bird_mos <- rbind(samps_bird_mos[,1,], samps_bird_mos[,2,], samps_bird_mos[,3,], samps_bird_mos[,4,])
}
|
# Author: Asha Mohan
# Read rollingsales_statenisland.csv and do exploratory analysis
# Plot the price of houses whose gross square feet is between 2000 and 2500
setwd("/Users/ASHMOHAN 1/Desktop/Data_Science/SMU_Term1_Doing_Data_science/Project1/dds_statenisland_assignment")
# So, save the file as a csv and use read.csv instead
statenisland_data <- read.csv("Data/rollingsales_statenisland.csv", header = TRUE, sep = ",")
## Check the data
head(statenisland_data)
summary(statenisland_data)
str(statenisland_data)
names(statenisland_data)
# Format some columns like square feet columns
statenisland_data$LAND.SQUARE.FEET <- as.numeric(gsub("[^[:digit:]]", "", statenisland_data$LAND.SQUARE.FEET))
statenisland_data$GROSS.SQUARE.FEET <- as.numeric(gsub("[^[:digit:]]", "", statenisland_data$GROSS.SQUARE.FEET))
statenisland_data$YEAR.BUILT <- as.numeric(as.character(statenisland_data$YEAR.BUILT))
statenisland_data$SALE.PRICE <- as.numeric(gsub("[^[:digit:]]", "", statenisland_data$SALE.PRICE))
statenisland_data$SALE.PRICE
#Remove all the values that are NA
actual_sale <- statenisland_data$SALE.PRICE[!(is.na(statenisland_data$SALE.PRICE))]
#Finally get rid of sale value < 1000 as its not possible.
actual_sale_final <- actual_sale[actual_sale > 1000]
actual_sale_final
## for now, let's look at family homes of 2000 - 2500 square feet and plot the prices
st.mid_size_home <- statenisland_data$LAND.SQUARE.FEET[which(statenisland_data$LAND.SQUARE.FEET > 2000 & statenisland_data$LAND.SQUARE.FEET < 2500)]
st.mid_size_home_value <- statenisland_data$SALE.PRICE[which(statenisland_data$LAND.SQUARE.FEET > 2000 & statenisland_data$LAND.SQUARE.FEET < 2500)]
count(st.mid_size_home)
hist(st.mid_size_home)
plot(st.mid_size_home, st.mid_size_home_value,main="Mid Size House Prices", xlab="Sale Price in dollars", ylab="Square feet", pch=19)
| /Analysis/gross_sqft_analysis.R | no_license | peter-smu/dds_statenisland_assignment | R | false | false | 1,848 | r | # Author: Asha Mohan
# Read rollingsales_statenisland.csv and do exploratory analysis
# Plot the price of houses whose gross square feet is between 2000 and 2500
setwd("/Users/ASHMOHAN 1/Desktop/Data_Science/SMU_Term1_Doing_Data_science/Project1/dds_statenisland_assignment")
# So, save the file as a csv and use read.csv instead
statenisland_data <- read.csv("Data/rollingsales_statenisland.csv", header = TRUE, sep = ",")
## Check the data
head(statenisland_data)
summary(statenisland_data)
str(statenisland_data)
names(statenisland_data)
# Format some columns like square feet columns
statenisland_data$LAND.SQUARE.FEET <- as.numeric(gsub("[^[:digit:]]", "", statenisland_data$LAND.SQUARE.FEET))
statenisland_data$GROSS.SQUARE.FEET <- as.numeric(gsub("[^[:digit:]]", "", statenisland_data$GROSS.SQUARE.FEET))
statenisland_data$YEAR.BUILT <- as.numeric(as.character(statenisland_data$YEAR.BUILT))
statenisland_data$SALE.PRICE <- as.numeric(gsub("[^[:digit:]]", "", statenisland_data$SALE.PRICE))
statenisland_data$SALE.PRICE
#Remove all the values that are NA
actual_sale <- statenisland_data$SALE.PRICE[!(is.na(statenisland_data$SALE.PRICE))]
#Finally get rid of sale value < 1000 as its not possible.
actual_sale_final <- actual_sale[actual_sale > 1000]
actual_sale_final
## for now, let's look at family homes of 2000 - 2500 square feet and plot the prices
st.mid_size_home <- statenisland_data$LAND.SQUARE.FEET[which(statenisland_data$LAND.SQUARE.FEET > 2000 & statenisland_data$LAND.SQUARE.FEET < 2500)]
st.mid_size_home_value <- statenisland_data$SALE.PRICE[which(statenisland_data$LAND.SQUARE.FEET > 2000 & statenisland_data$LAND.SQUARE.FEET < 2500)]
count(st.mid_size_home)
hist(st.mid_size_home)
plot(st.mid_size_home, st.mid_size_home_value,main="Mid Size House Prices", xlab="Sale Price in dollars", ylab="Square feet", pch=19)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/methods-plotCountDensity.R
\docType{methods}
\name{plotCountDensity}
\alias{plotCountDensity}
\alias{plotCountDensity,bcbioRNASeq-method}
\alias{plotCountDensity,data.frame-method}
\title{Plot Count Density}
\usage{
plotCountDensity(object, ...)
\S4method{plotCountDensity}{bcbioRNASeq}(object, interestingGroups,
normalized = "tmm", style = "solid",
color = scale_color_viridis(discrete = TRUE),
fill = scale_fill_viridis(discrete = TRUE))
\S4method{plotCountDensity}{data.frame}(object,
interestingGroups = "sampleName", style = "solid",
color = scale_color_viridis(discrete = TRUE),
fill = scale_fill_viridis(discrete = TRUE))
}
\arguments{
\item{object}{Object.}
\item{...}{\emph{Additional arguments (for the S4 generic definition).}}
\item{interestingGroups}{Category to use to group samples. In the plotting
functions, this will define color and shape, where applicable. If unset,
this is automatically determined by the metadata set inside the
\link{bcbioRNASeq} object. When set to \code{NULL}, this will default to
\code{sampleName}.}
\item{normalized}{Count normalization method. See \code{\link[=counts]{counts()}} documentation
for more information.}
\item{style}{Desired plot style (\code{line} or \code{solid}).}
\item{color}{Desired ggplot color scale. Defaults to
\code{\link[viridis:scale_color_viridis]{viridis::scale_color_viridis()}}. Must supply discrete values. When set to
\code{NULL}, the default ggplot2 color palette will be used. If manual color
definitions are desired, we recommend using
\code{\link[ggplot2:scale_color_manual]{ggplot2::scale_color_manual()}}.}
\item{fill}{Desired ggplot fill scale. Defaults to
\code{\link[viridis:scale_fill_viridis]{viridis::scale_fill_viridis()}}. Must supply discrete values. When set to
\code{NULL}, the default ggplot2 color palette will be used. If manual color
definitions are desired, we recommend using \code{\link[ggplot2:scale_fill_manual]{ggplot2::scale_fill_manual()}}.}
}
\value{
\link{ggplot}.
}
\description{
Plot Count Density
}
\examples{
plotCountDensity(bcb)
\dontrun{
plotCountDensity(
bcb,
interestingGroups = "group",
fill = NULL)
}
# data.frame
\dontrun{
meltLog10(bcb, normalized = "tmm") \%>\% plotCountDensity()
}
}
\seealso{
Other Quality Control Plots: \code{\link{plot53Bias}},
\code{\link{plotCountsPerGene}},
\code{\link{plotExonicMappingRate}},
\code{\link{plotGenderMarkers}},
\code{\link{plotGeneSaturation}},
\code{\link{plotGenesDetected}}, \code{\link{plotGene}},
\code{\link{plotIntronicMappingRate}},
\code{\link{plotMappedReads}},
\code{\link{plotMappingRate}},
\code{\link{plotRRNAMappingRate}},
\code{\link{plotTotalReads}}
}
\author{
Michael Steinbaugh, Rory Kirchner, Victor Barrera
}
\concept{Quality Control Plots}
| /man/plotCountDensity.Rd | permissive | YTLogos/bcbioRNASeq | R | false | true | 2,877 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/methods-plotCountDensity.R
\docType{methods}
\name{plotCountDensity}
\alias{plotCountDensity}
\alias{plotCountDensity,bcbioRNASeq-method}
\alias{plotCountDensity,data.frame-method}
\title{Plot Count Density}
\usage{
plotCountDensity(object, ...)
\S4method{plotCountDensity}{bcbioRNASeq}(object, interestingGroups,
normalized = "tmm", style = "solid",
color = scale_color_viridis(discrete = TRUE),
fill = scale_fill_viridis(discrete = TRUE))
\S4method{plotCountDensity}{data.frame}(object,
interestingGroups = "sampleName", style = "solid",
color = scale_color_viridis(discrete = TRUE),
fill = scale_fill_viridis(discrete = TRUE))
}
\arguments{
\item{object}{Object.}
\item{...}{\emph{Additional arguments (for the S4 generic definition).}}
\item{interestingGroups}{Category to use to group samples. In the plotting
functions, this will define color and shape, where applicable. If unset,
this is automatically determined by the metadata set inside the
\link{bcbioRNASeq} object. When set to \code{NULL}, this will default to
\code{sampleName}.}
\item{normalized}{Count normalization method. See \code{\link[=counts]{counts()}} documentation
for more information.}
\item{style}{Desired plot style (\code{line} or \code{solid}).}
\item{color}{Desired ggplot color scale. Defaults to
\code{\link[viridis:scale_color_viridis]{viridis::scale_color_viridis()}}. Must supply discrete values. When set to
\code{NULL}, the default ggplot2 color palette will be used. If manual color
definitions are desired, we recommend using
\code{\link[ggplot2:scale_color_manual]{ggplot2::scale_color_manual()}}.}
\item{fill}{Desired ggplot fill scale. Defaults to
\code{\link[viridis:scale_fill_viridis]{viridis::scale_fill_viridis()}}. Must supply discrete values. When set to
\code{NULL}, the default ggplot2 color palette will be used. If manual color
definitions are desired, we recommend using \code{\link[ggplot2:scale_fill_manual]{ggplot2::scale_fill_manual()}}.}
}
\value{
\link{ggplot}.
}
\description{
Plot Count Density
}
\examples{
plotCountDensity(bcb)
\dontrun{
plotCountDensity(
bcb,
interestingGroups = "group",
fill = NULL)
}
# data.frame
\dontrun{
meltLog10(bcb, normalized = "tmm") \%>\% plotCountDensity()
}
}
\seealso{
Other Quality Control Plots: \code{\link{plot53Bias}},
\code{\link{plotCountsPerGene}},
\code{\link{plotExonicMappingRate}},
\code{\link{plotGenderMarkers}},
\code{\link{plotGeneSaturation}},
\code{\link{plotGenesDetected}}, \code{\link{plotGene}},
\code{\link{plotIntronicMappingRate}},
\code{\link{plotMappedReads}},
\code{\link{plotMappingRate}},
\code{\link{plotRRNAMappingRate}},
\code{\link{plotTotalReads}}
}
\author{
Michael Steinbaugh, Rory Kirchner, Victor Barrera
}
\concept{Quality Control Plots}
|
x <- iris
summary(x)
names(iris)
names(x)
pairs(x[1:4],col=as.numeric(x$Species)) #pairwise take the first 2 columns and understand the scatter of these variables
head(x)
irisdat <- x[1:4]
iriscov <- cov(irisdat)
iriscov
iriseig <- eigen(iriscov)
iriseig
irispca <- princomp(irisdat,cor="False") #cor equals false will do the PCA on the covariance and not the correlation matrix. This function will do all the first 4 lines again
irispca
summary(irispca)
screeplot(irispca)
irispca$loadings
summary(irispca)
irispca
#Run Naive Bayes classifier to Iris data
| /iris.R | no_license | robin93/CDS | R | false | false | 559 | r | x <- iris
summary(x)
names(iris)
names(x)
pairs(x[1:4],col=as.numeric(x$Species)) #pairwise take the first 2 columns and understand the scatter of these variables
head(x)
irisdat <- x[1:4]
iriscov <- cov(irisdat)
iriscov
iriseig <- eigen(iriscov)
iriseig
irispca <- princomp(irisdat,cor="False") #cor equals false will do the PCA on the covariance and not the correlation matrix. This function will do all the first 4 lines again
irispca
summary(irispca)
screeplot(irispca)
irispca$loadings
summary(irispca)
irispca
#Run Naive Bayes classifier to Iris data
|
\name{soi_from_sot}
\Rdversion{1.1}
\alias{soi_from_sot}
\title{
Calculation of the Spillover Index for a given Spillover Table
}
\description{
Given a spillover table, this function calculates the corresponding spillover index.
}
\usage{soi_from_sot(input_table)}
\arguments{
\item{input_table}{
Either a spillover table or a list thereof
}
}
\details{
The spillover index was introduced by Diebold and Yilmaz in 2009 (see References). It is
based on a variance decompostion of the forecast error variances of an \eqn{N}-dimensional MA(\eqn{\infty}) process.
The underlying idea is to decompose the forecast error of each variable into own variance shares
and cross variance shares. The latter are interpreted as contributions of shocks of one variable
to the error variance in forecasting another variable (see also \code{\link{sot}}).
The spillover index then is a number between 0 and 100, describing the relative amount of forecast error variances that can
be explained by shocks coming from other variables in the model.
The typical application of the 'list' version of \code{soi_from_sot} is a rolling windows approach when \code{input_table} is a list representing the corresponding spillover tables at different points in time
(rolling windows).
}
\value{
Numeric value or a list thereof.
}
\references{
[1] Diebold, F. X. and Yilmaz, K. (2009): \href{http://onlinelibrary.wiley.com/doi/10.1111/j.1468-0297.2008.02208.x/pdf}{Measuring financial asset return and volatitliy spillovers,
with application to global equity markets},
Economic Journal 199(534): 158-171.
[2] Kloessner, S. and Wagner, S. (2012): \href{http://onlinelibrary.wiley.com/doi/10.1002/jae.2366/pdf}{Exploring All VAR Orderings for Calculating Spillovers? Yes, We Can! -
A Note on Diebold and Yilmaz (2009)}, Journal of Applied Econometrics 29(1): 172-179
}
\author{
Stefan Kloessner (\email{S.Kloessner@mx.uni-saarland.de}), \cr
with contributions by Sven Wagner (\email{sven.wagner@mx.uni-saarland.de})
}
\seealso{\code{\link{fastSOM-package}}, \code{\link{soi}}, \code{\link{sot}} }
\examples{
# generate randomly positive definite matrix Sigma of dimension N
N <- 10
Sigma <- crossprod(matrix(rnorm(N*N),nrow=N))
# generate randomly coefficient matrices
H <- 10
A <- array(rnorm(N*N*H),dim=c(N,N,H))
# calculate spillover table
SOT <- sot(Sigma,A)
# calculate spillover index from spillover table
soi_from_sot(SOT)
}
\keyword{spillover index from spillover table} | /man/soi_from_sot.Rd | no_license | Allisterh/fastSOM | R | false | false | 2,702 | rd | \name{soi_from_sot}
\Rdversion{1.1}
\alias{soi_from_sot}
\title{
Calculation of the Spillover Index for a given Spillover Table
}
\description{
Given a spillover table, this function calculates the corresponding spillover index.
}
\usage{soi_from_sot(input_table)}
\arguments{
\item{input_table}{
Either a spillover table or a list thereof
}
}
\details{
The spillover index was introduced by Diebold and Yilmaz in 2009 (see References). It is
based on a variance decompostion of the forecast error variances of an \eqn{N}-dimensional MA(\eqn{\infty}) process.
The underlying idea is to decompose the forecast error of each variable into own variance shares
and cross variance shares. The latter are interpreted as contributions of shocks of one variable
to the error variance in forecasting another variable (see also \code{\link{sot}}).
The spillover index then is a number between 0 and 100, describing the relative amount of forecast error variances that can
be explained by shocks coming from other variables in the model.
The typical application of the 'list' version of \code{soi_from_sot} is a rolling windows approach when \code{input_table} is a list representing the corresponding spillover tables at different points in time
(rolling windows).
}
\value{
Numeric value or a list thereof.
}
\references{
[1] Diebold, F. X. and Yilmaz, K. (2009): \href{http://onlinelibrary.wiley.com/doi/10.1111/j.1468-0297.2008.02208.x/pdf}{Measuring financial asset return and volatitliy spillovers,
with application to global equity markets},
Economic Journal 199(534): 158-171.
[2] Kloessner, S. and Wagner, S. (2012): \href{http://onlinelibrary.wiley.com/doi/10.1002/jae.2366/pdf}{Exploring All VAR Orderings for Calculating Spillovers? Yes, We Can! -
A Note on Diebold and Yilmaz (2009)}, Journal of Applied Econometrics 29(1): 172-179
}
\author{
Stefan Kloessner (\email{S.Kloessner@mx.uni-saarland.de}), \cr
with contributions by Sven Wagner (\email{sven.wagner@mx.uni-saarland.de})
}
\seealso{\code{\link{fastSOM-package}}, \code{\link{soi}}, \code{\link{sot}} }
\examples{
# generate randomly positive definite matrix Sigma of dimension N
N <- 10
Sigma <- crossprod(matrix(rnorm(N*N),nrow=N))
# generate randomly coefficient matrices
H <- 10
A <- array(rnorm(N*N*H),dim=c(N,N,H))
# calculate spillover table
SOT <- sot(Sigma,A)
# calculate spillover index from spillover table
soi_from_sot(SOT)
}
\keyword{spillover index from spillover table} |
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{vizgraph}
\alias{vizgraph}
\alias{vizgraph-package}
\title{Self-contained R package for Graphviz}
\description{
See http://www.graphviz.org/
}
| /man/vizgraph.Rd | no_license | igraph/vizgraph | R | false | false | 221 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{vizgraph}
\alias{vizgraph}
\alias{vizgraph-package}
\title{Self-contained R package for Graphviz}
\description{
See http://www.graphviz.org/
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/criaModeloGenerico2.R
\name{criaModeloGenerico2}
\alias{criaModeloGenerico2}
\title{Create function with generic model}
\usage{
criaModeloGenerico2(nome, formula, funcaoRegressao, variaveis, palpite = NULL,
maisParametros = NULL, requires = NULL)
}
\arguments{
\item{nome}{is the name of model}
\item{formula}{is the string formula begin with y2~y1}
\item{funcaoRegressao}{is the function that will make the regression, ex.: 'nlsLM'}
\item{variaveis}{list variables that are present in the model that are field database}
\item{palpite}{param start of funcaoRegressao}
\item{maisParametros}{string add in funcaoRegressao, ex lm(y2~y1, data=base, maisParametros)}
\item{requires}{list of string of packges used to work with funcaoRegressao}
}
\value{
will be returned function with generic model to map to a base
}
\description{
This function creates a generic model that will be a funcao that has parameters for the variables that can be mapped to each different base. her return will be a generic model that should be mapped to be used by the function avaliaEstimativas
}
| /man/criaModeloGenerico2.Rd | no_license | GRSEB9S/ITGM | R | false | true | 1,158 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/criaModeloGenerico2.R
\name{criaModeloGenerico2}
\alias{criaModeloGenerico2}
\title{Create function with generic model}
\usage{
criaModeloGenerico2(nome, formula, funcaoRegressao, variaveis, palpite = NULL,
maisParametros = NULL, requires = NULL)
}
\arguments{
\item{nome}{is the name of model}
\item{formula}{is the string formula begin with y2~y1}
\item{funcaoRegressao}{is the function that will make the regression, ex.: 'nlsLM'}
\item{variaveis}{list variables that are present in the model that are field database}
\item{palpite}{param start of funcaoRegressao}
\item{maisParametros}{string add in funcaoRegressao, ex lm(y2~y1, data=base, maisParametros)}
\item{requires}{list of string of packges used to work with funcaoRegressao}
}
\value{
will be returned function with generic model to map to a base
}
\description{
This function creates a generic model that will be a funcao that has parameters for the variables that can be mapped to each different base. her return will be a generic model that should be mapped to be used by the function avaliaEstimativas
}
|
library(datasets)
library(dplyr)
#Load Data
mydata <- read.csv("./household_power_consumption.txt",sep = ";")
#Add da time column genereated from Date and Time
mydata$DateTime <- strptime(paste(mydata$Date, mydata$Time, sep = " " ), format = "%d/%m/%Y %H:%M:%S")
#Parse string date column to DATE type
mydata$Date <- as.Date(mydata$Date, "%d/%m/%Y")
#Set start and end date variable to filter data
startdate <- as.Date("1/2/2007", "%d/%m/%Y")
enddate <- as.Date("2/2/2007", "%d/%m/%Y")
#Filter Data based on date range
febdata <- mydata[ mydata$Date >= startdate & mydata$Date <= enddate,]
rm(mydata)
##Remove record that has ?
febdata <- febdata[febdata$Global_active_power != '?',]
febdata$Global_active_power <- as.numeric(as.character(febdata$Global_active_power))
##Plot the graph
mainLable <- "Global Active Power"
histYLable <- "Global Active Power (killowatts)"
png(filename="plot2.png", width = 480, height = 480, units = "px")
with(febdata, plot(DateTime, Global_active_power, type = "l", xlab = "", ylab = histYLable))
#hist(febdata$Global_active_power, main=mainLable, xlab = histXLable, col="red")
dev.off()
rm(febdata) | /plot2.R | no_license | hrpatel34/ExData_Plotting1 | R | false | false | 1,143 | r | library(datasets)
library(dplyr)
#Load Data
mydata <- read.csv("./household_power_consumption.txt",sep = ";")
#Add da time column genereated from Date and Time
mydata$DateTime <- strptime(paste(mydata$Date, mydata$Time, sep = " " ), format = "%d/%m/%Y %H:%M:%S")
#Parse string date column to DATE type
mydata$Date <- as.Date(mydata$Date, "%d/%m/%Y")
#Set start and end date variable to filter data
startdate <- as.Date("1/2/2007", "%d/%m/%Y")
enddate <- as.Date("2/2/2007", "%d/%m/%Y")
#Filter Data based on date range
febdata <- mydata[ mydata$Date >= startdate & mydata$Date <= enddate,]
rm(mydata)
##Remove record that has ?
febdata <- febdata[febdata$Global_active_power != '?',]
febdata$Global_active_power <- as.numeric(as.character(febdata$Global_active_power))
##Plot the graph
mainLable <- "Global Active Power"
histYLable <- "Global Active Power (killowatts)"
png(filename="plot2.png", width = 480, height = 480, units = "px")
with(febdata, plot(DateTime, Global_active_power, type = "l", xlab = "", ylab = histYLable))
#hist(febdata$Global_active_power, main=mainLable, xlab = histXLable, col="red")
dev.off()
rm(febdata) |
# Data frame folio and Typo
DF_FT <- FinalCluster %>%
select(folio, typo)
### Add Family_Lab "Family Labor (working days per ha)"
RawData <- RawData %>%
mutate(Fam_Lab = (jh_pt_f + jh_s_f + jh_apf_f + jh_ahp_f + jh_r_f + jh_c_f)/x0)
# Data frame Labour
DF_Lab <- RawData %>%
select(folio, Fam_Lab, Hrd_Lab)
# Add "typo" variable to RawData
DF_FstClus <- left_join(RawData, DF_FT, by = "folio")
#Select variables for Data Base
DF_FstClus2 <- DF_FstClus %>%
select(typo, comuna, cultivo, sistema, x0, yield, p0, jh_c, jh, ctomaq, ctoinsumosh, ctototal)
#Add Labour Data to Cluster Data Frame
DF_FstClus3 <- bind_cols(DF_FstClus2, DF_Lab)
DF_FstClus3 <- DF_FstClus3 %>%
select(typo, comuna, cultivo, sistema, x0, yield, p0, Fam_Lab, Hrd_Lab, jh_c, jh, ctomaq, ctoinsumosh, ctototal)
# Adding Labour cost ($CLP/ha)
DF_FstClus4 <- DF_FstClus3 %>%
mutate(HrdLabCst = Hrd_Lab*jh_c, FamLabCst = Fam_Lab*jh) %>%
mutate(TtlCst= HrdLabCst + FamLabCst + ctomaq + ctoinsumosh)
######################################--Household Type 1--#############################
Household1 <- DF_FstClus4 %>%
filter(typo == 1)
Household1<-na.omit(Household1)
Household1 <- Household1 %>%
group_by(comuna, cultivo, sistema) %>%
summarise(area = sum(x0), yield = mean(yield), CropPrice = mean(p0), HiredLabor = mean(Hrd_Lab),
FamilyLab = mean(Fam_Lab), PriceHrdLab = mean(jh_c), PriceFamLab = mean(jh),
rentedMachCosts = mean(ctomaq), InputCosts = mean(ctoinsumosh))
CrpsHT1 <- Household1 %>%
group_by(cultivo) %>%
summarise(m_Area = sum(area)) %>%
mutate(Area_shr = m_Area/sum(m_Area))
SysHT1 <- Household1 %>%
group_by(tipo_cultivo) %>%
summarise(m_Sys = sum(area) %>%
mutate(Sys_shr = m_Sys/sum(m_Sys))
RevHT1 <- Household1 %>%
group_by(cultivo) %>%
summarise(m_Rev = sum(area * yield * CropPrice)) %>%
mutate(Rev_shr = m_Rev/sum(m_Rev))
Household1$HT <- "H1"
######################################--Household Type 2--#############################
Household2 <- DF_FstClus4 %>%
filter(typo == 2)
Household2<-na.omit(Household2)
Household2 <- Household2 %>%
group_by(comuna, cultivo, sistema) %>%
summarise(area = sum(x0), yield = mean(yield), CropPrice = mean(p0), HiredLabor = mean(Hrd_Lab),
FamilyLab = mean(Fam_Lab), PriceHrdLab = mean(jh_c), PriceFamLab = mean(jh),
rentedMachCosts = mean(ctomaq), InputCosts = mean(ctoinsumosh))
CrpsHT2 <- Household2 %>%
group_by(cultivo) %>%
summarise(m_Area = sum(area)) %>%
mutate(Area_shr = m_Area/sum(m_Area))
SysHT2 <- Household2 %>%
group_by(tipo_cultivo) %>%
summarise(m_Sys = sum(area) %>%
mutate(Sys_shr = m_Sys/sum(m_Sys))
RevHT2 <- Household2 %>%
group_by(cultivo) %>%
summarise(m_Rev = sum(area * yield * CropPrice)) %>%
mutate(Rev_shr = m_Rev/sum(m_Rev))
Household2$HT <- "H2"
######################################--Household Type 3--#############################
Household3 <- DF_FstClus4 %>%
filter(typo == 3)
Household3<-na.omit(Household3)
Household3 <- Household3 %>%
group_by(comuna, cultivo, sistema) %>%
summarise(area = sum(x0), yield = mean(yield), CropPrice = mean(p0), HiredLabor = mean(Hrd_Lab),
FamilyLab = mean(Fam_Lab), PriceHrdLab = mean(jh_c), PriceFamLab = mean(jh),
rentedMachCosts = mean(ctomaq), InputCosts = mean(ctoinsumosh))
CrpsHT3 <- Household3 %>%
group_by(cultivo) %>%
summarise(m_Area = sum(area)) %>%
mutate(Area_shr = m_Area/sum(m_Area))
SysHT3 <- Household3 %>%
group_by(tipo_cultivo) %>%
summarise(m_Sys = sum(area) %>%
mutate(Sys_shr = m_Sys/sum(m_Sys))
RevHT3 <- Household3 %>%
group_by(cultivo) %>%
summarise(m_Rev = sum(area * yield * CropPrice)) %>%
mutate(Rev_shr = m_Rev/sum(m_Rev))
Household3$HT <- "H3"
######################################--Household Type 4--#############################
Household4 <- DF_FstClus4 %>%
filter(typo == 4)
Household4<-na.omit(Household4)
Household4 <- Household4 %>%
group_by(comuna, cultivo, sistema) %>%
summarise(area = sum(x0), yield = mean(yield), CropPrice = mean(p0), HiredLabor = mean(Hrd_Lab),
FamilyLab = mean(Fam_Lab), PriceHrdLab = mean(jh_c), PriceFamLab = mean(jh),
rentedMachCosts = mean(ctomaq), InputCosts = mean(ctoinsumosh))
CrpsHT4 <- Household4 %>%
group_by(cultivo) %>%
summarise(m_Area = sum(area)) %>%
mutate(Area_shr = m_Area/sum(m_Area))
SysHT4 <- Household4 %>%
group_by(tipo_cultivo) %>%
summarise(m_Sys = sum(area) %>%
mutate(Sys_shr = m_Sys/sum(m_Sys))
RevHT4 <- Household4 %>%
group_by(cultivo) %>%
summarise(m_Rev = sum(area * yield * CropPrice)) %>%
mutate(Rev_shr = m_Rev/sum(m_Rev))
Household4$HT <- "H4"
######################################--Household Type 4--#############################
Household5 <- DF_FstClus4 %>%
filter(typo == 5)
Household5<-na.omit(Household5)
Household5 <- Household5 %>%
group_by(comuna, cultivo, sistema) %>%
summarise(area = sum(x0), yield = mean(yield), CropPrice = mean(p0), HiredLabor = mean(Hrd_Lab),
FamilyLab = mean(Fam_Lab), PriceHrdLab = mean(jh_c), PriceFamLab = mean(jh),
rentedMachCosts = mean(ctomaq), InputCosts = mean(ctoinsumosh))
CrpsHT5 <- Household5 %>%
group_by(cultivo) %>%
summarise(m_Area = sum(area)) %>%
mutate(Area_shr = m_Area/sum(m_Area))
SysHT5 <- Household5 %>%
group_by(tipo_cultivo) %>%
summarise(m_Sys = sum(area) %>%
mutate(Sys_shr = m_Sys/sum(m_Sys))
RevHT5 <- Household5 %>%
group_by(cultivo) %>%
summarise(m_Rev = sum(area * yield * CropPrice)) %>%
mutate(Rev_shr = m_Rev/sum(m_Rev))
Household5$HT <- "H5"
################ Binding Households Data frames --> Creating Final DB for household model
FinalDB_0803 <- bind_rows(Household1, Household2, Household3, Household4, Household5)
FinalDB_0803 <- FinalDB_0803[,c(13,1:12)]
FinalDB_0803 <- FinalDB_0803 %>%
rename(commune=comuna, crop=cultivo, system=sistema)
### Final Details
## Codes for communes
FinalDB_0803$commune <- as.character(FinalDB_0803$commune)
FinalDB_0803$commune[FinalDB_0803$commune == "Pencahue"] <- "PEN"
FinalDB_0803$commune[FinalDB_0803$commune == "Cauquenes"] <- "CAU"
FinalDB_0803$commune[FinalDB_0803$commune == "San Clemente"] <- "SC"
FinalDB_0803$commune[FinalDB_0803$commune == "Parral"] <- "PAR"
## Codes for systems
FinalDB_0803$system <- as.character(FinalDB_0803$system)
FinalDB_0803$system[FinalDB_0803$system == "riego"] <- "irr"
FinalDB_0803$system[FinalDB_0803$system == "secano"] <- "dry"
## Codes for Crops
FinalDB_0803$crop <- as.character(FinalDB_0803$crop)
FinalDB_0803$crop[FinalDB_0803$crop == "ARROZ"] <- "ric"
FinalDB_0803$crop[FinalDB_0803$crop == "ARVEJA"] <- "pea"
FinalDB_0803$crop[FinalDB_0803$crop == "AVENA"] <- "oat"
FinalDB_0803$crop[FinalDB_0803$crop == "CEBOLLA"] <- "oni"
FinalDB_0803$crop[FinalDB_0803$crop == "GARBANZO"] <- "chk"
FinalDB_0803$crop[FinalDB_0803$crop == "LENTEJA"] <- "len"
FinalDB_0803$crop[FinalDB_0803$crop == "MAIZ"] <- "mze"
FinalDB_0803$crop[FinalDB_0803$crop == "MAIZ_SEM"] <- "smze"
FinalDB_0803$crop[FinalDB_0803$crop == "MARAVILLA"] <- "snf"
FinalDB_0803$crop[FinalDB_0803$crop == "MELON"] <- "mel"
FinalDB_0803$crop[FinalDB_0803$crop == "MELON_SEM"] <- "smel"
FinalDB_0803$crop[FinalDB_0803$crop == "PAPA"] <- "pot"
FinalDB_0803$crop[FinalDB_0803$crop == "PEPINO_SEM"] <- "scuc"
FinalDB_0803$crop[FinalDB_0803$crop == "POROTO"] <- "cmb"
FinalDB_0803$crop[FinalDB_0803$crop == "POROTO_VER"] <- "gbn"
FinalDB_0803$crop[FinalDB_0803$crop == "REMOLACHA"] <- "sgb"
FinalDB_0803$crop[FinalDB_0803$crop == "REPOLLO"] <- "cbg"
FinalDB_0803$crop[FinalDB_0803$crop == "REPOLLO_SEM"] <- "scbg"
FinalDB_0803$crop[FinalDB_0803$crop == "SANDIA"] <- "wtm"
FinalDB_0803$crop[FinalDB_0803$crop == "SANDIA_SEM"] <- "swtm"
FinalDB_0803$crop[FinalDB_0803$crop == "SOYA"] <- "soy"
FinalDB_0803$crop[FinalDB_0803$crop == "TABACO"] <- "tob"
FinalDB_0803$crop[FinalDB_0803$crop == "TOMATE"] <- "tom"
FinalDB_0803$crop[FinalDB_0803$crop == "TRIGO"] <- "wht"
FinalDB_0803$crop[FinalDB_0803$crop == "ZAPALLO"] <- "sqh"
## Rounding values
FinalDB_0803$yield <-round(FinalDB_0803$yield,2)
FinalDB_0803$CropPrice <-round(FinalDB_0803$CropPrice)
FinalDB_0803$HiredLabor <- round(FinalDB_0803$HiredLabor,2)
FinalDB_0803$FamilyLab <- round(FinalDB_0803$FamilyLab,2)
FinalDB_0803$PriceHrdLab <- round(FinalDB_0803$PriceHrdLab)
FinalDB_0803$PriceFamLab <- round(FinalDB_0803$PriceFamLab)
FinalDB_0803$rentedMachCosts <- round(FinalDB_0803$rentedMachCosts)
FinalDB_0803$InputCosts <- round(FinalDB_0803$InputCosts)
#Saving FinalDB as csv
write.csv(FinalDB_0803, file="csv_files/FinalDB_0803.csv")
| /R/Step4_Final_Cluster_DB/Data_Base_develop.R | no_license | fjfernandezj/HHM_final_v1 | R | false | false | 9,259 | r | # Data frame folio and Typo
DF_FT <- FinalCluster %>%
select(folio, typo)
### Add Family_Lab "Family Labor (working days per ha)"
RawData <- RawData %>%
mutate(Fam_Lab = (jh_pt_f + jh_s_f + jh_apf_f + jh_ahp_f + jh_r_f + jh_c_f)/x0)
# Data frame Labour
DF_Lab <- RawData %>%
select(folio, Fam_Lab, Hrd_Lab)
# Add "typo" variable to RawData
DF_FstClus <- left_join(RawData, DF_FT, by = "folio")
#Select variables for Data Base
DF_FstClus2 <- DF_FstClus %>%
select(typo, comuna, cultivo, sistema, x0, yield, p0, jh_c, jh, ctomaq, ctoinsumosh, ctototal)
#Add Labour Data to Cluster Data Frame
DF_FstClus3 <- bind_cols(DF_FstClus2, DF_Lab)
DF_FstClus3 <- DF_FstClus3 %>%
select(typo, comuna, cultivo, sistema, x0, yield, p0, Fam_Lab, Hrd_Lab, jh_c, jh, ctomaq, ctoinsumosh, ctototal)
# Adding Labour cost ($CLP/ha)
DF_FstClus4 <- DF_FstClus3 %>%
mutate(HrdLabCst = Hrd_Lab*jh_c, FamLabCst = Fam_Lab*jh) %>%
mutate(TtlCst= HrdLabCst + FamLabCst + ctomaq + ctoinsumosh)
######################################--Household Type 1--#############################
Household1 <- DF_FstClus4 %>%
filter(typo == 1)
Household1<-na.omit(Household1)
Household1 <- Household1 %>%
group_by(comuna, cultivo, sistema) %>%
summarise(area = sum(x0), yield = mean(yield), CropPrice = mean(p0), HiredLabor = mean(Hrd_Lab),
FamilyLab = mean(Fam_Lab), PriceHrdLab = mean(jh_c), PriceFamLab = mean(jh),
rentedMachCosts = mean(ctomaq), InputCosts = mean(ctoinsumosh))
CrpsHT1 <- Household1 %>%
group_by(cultivo) %>%
summarise(m_Area = sum(area)) %>%
mutate(Area_shr = m_Area/sum(m_Area))
SysHT1 <- Household1 %>%
group_by(tipo_cultivo) %>%
summarise(m_Sys = sum(area) %>%
mutate(Sys_shr = m_Sys/sum(m_Sys))
RevHT1 <- Household1 %>%
group_by(cultivo) %>%
summarise(m_Rev = sum(area * yield * CropPrice)) %>%
mutate(Rev_shr = m_Rev/sum(m_Rev))
Household1$HT <- "H1"
######################################--Household Type 2--#############################
Household2 <- DF_FstClus4 %>%
filter(typo == 2)
Household2<-na.omit(Household2)
Household2 <- Household2 %>%
group_by(comuna, cultivo, sistema) %>%
summarise(area = sum(x0), yield = mean(yield), CropPrice = mean(p0), HiredLabor = mean(Hrd_Lab),
FamilyLab = mean(Fam_Lab), PriceHrdLab = mean(jh_c), PriceFamLab = mean(jh),
rentedMachCosts = mean(ctomaq), InputCosts = mean(ctoinsumosh))
CrpsHT2 <- Household2 %>%
group_by(cultivo) %>%
summarise(m_Area = sum(area)) %>%
mutate(Area_shr = m_Area/sum(m_Area))
SysHT2 <- Household2 %>%
group_by(tipo_cultivo) %>%
summarise(m_Sys = sum(area) %>%
mutate(Sys_shr = m_Sys/sum(m_Sys))
RevHT2 <- Household2 %>%
group_by(cultivo) %>%
summarise(m_Rev = sum(area * yield * CropPrice)) %>%
mutate(Rev_shr = m_Rev/sum(m_Rev))
Household2$HT <- "H2"
######################################--Household Type 3--#############################
Household3 <- DF_FstClus4 %>%
filter(typo == 3)
Household3<-na.omit(Household3)
Household3 <- Household3 %>%
group_by(comuna, cultivo, sistema) %>%
summarise(area = sum(x0), yield = mean(yield), CropPrice = mean(p0), HiredLabor = mean(Hrd_Lab),
FamilyLab = mean(Fam_Lab), PriceHrdLab = mean(jh_c), PriceFamLab = mean(jh),
rentedMachCosts = mean(ctomaq), InputCosts = mean(ctoinsumosh))
CrpsHT3 <- Household3 %>%
group_by(cultivo) %>%
summarise(m_Area = sum(area)) %>%
mutate(Area_shr = m_Area/sum(m_Area))
SysHT3 <- Household3 %>%
group_by(tipo_cultivo) %>%
summarise(m_Sys = sum(area) %>%
mutate(Sys_shr = m_Sys/sum(m_Sys))
RevHT3 <- Household3 %>%
group_by(cultivo) %>%
summarise(m_Rev = sum(area * yield * CropPrice)) %>%
mutate(Rev_shr = m_Rev/sum(m_Rev))
Household3$HT <- "H3"
######################################--Household Type 4--#############################
Household4 <- DF_FstClus4 %>%
filter(typo == 4)
Household4<-na.omit(Household4)
Household4 <- Household4 %>%
group_by(comuna, cultivo, sistema) %>%
summarise(area = sum(x0), yield = mean(yield), CropPrice = mean(p0), HiredLabor = mean(Hrd_Lab),
FamilyLab = mean(Fam_Lab), PriceHrdLab = mean(jh_c), PriceFamLab = mean(jh),
rentedMachCosts = mean(ctomaq), InputCosts = mean(ctoinsumosh))
CrpsHT4 <- Household4 %>%
group_by(cultivo) %>%
summarise(m_Area = sum(area)) %>%
mutate(Area_shr = m_Area/sum(m_Area))
SysHT4 <- Household4 %>%
group_by(tipo_cultivo) %>%
summarise(m_Sys = sum(area) %>%
mutate(Sys_shr = m_Sys/sum(m_Sys))
RevHT4 <- Household4 %>%
group_by(cultivo) %>%
summarise(m_Rev = sum(area * yield * CropPrice)) %>%
mutate(Rev_shr = m_Rev/sum(m_Rev))
Household4$HT <- "H4"
######################################--Household Type 4--#############################
Household5 <- DF_FstClus4 %>%
filter(typo == 5)
Household5<-na.omit(Household5)
Household5 <- Household5 %>%
group_by(comuna, cultivo, sistema) %>%
summarise(area = sum(x0), yield = mean(yield), CropPrice = mean(p0), HiredLabor = mean(Hrd_Lab),
FamilyLab = mean(Fam_Lab), PriceHrdLab = mean(jh_c), PriceFamLab = mean(jh),
rentedMachCosts = mean(ctomaq), InputCosts = mean(ctoinsumosh))
CrpsHT5 <- Household5 %>%
group_by(cultivo) %>%
summarise(m_Area = sum(area)) %>%
mutate(Area_shr = m_Area/sum(m_Area))
SysHT5 <- Household5 %>%
group_by(tipo_cultivo) %>%
summarise(m_Sys = sum(area) %>%
mutate(Sys_shr = m_Sys/sum(m_Sys))
RevHT5 <- Household5 %>%
group_by(cultivo) %>%
summarise(m_Rev = sum(area * yield * CropPrice)) %>%
mutate(Rev_shr = m_Rev/sum(m_Rev))
Household5$HT <- "H5"
################ Binding Households Data frames --> Creating Final DB for household model
FinalDB_0803 <- bind_rows(Household1, Household2, Household3, Household4, Household5)
FinalDB_0803 <- FinalDB_0803[,c(13,1:12)]
FinalDB_0803 <- FinalDB_0803 %>%
rename(commune=comuna, crop=cultivo, system=sistema)
### Final Details
## Codes for communes
FinalDB_0803$commune <- as.character(FinalDB_0803$commune)
FinalDB_0803$commune[FinalDB_0803$commune == "Pencahue"] <- "PEN"
FinalDB_0803$commune[FinalDB_0803$commune == "Cauquenes"] <- "CAU"
FinalDB_0803$commune[FinalDB_0803$commune == "San Clemente"] <- "SC"
FinalDB_0803$commune[FinalDB_0803$commune == "Parral"] <- "PAR"
## Codes for systems
FinalDB_0803$system <- as.character(FinalDB_0803$system)
FinalDB_0803$system[FinalDB_0803$system == "riego"] <- "irr"
FinalDB_0803$system[FinalDB_0803$system == "secano"] <- "dry"
## Codes for Crops
FinalDB_0803$crop <- as.character(FinalDB_0803$crop)
FinalDB_0803$crop[FinalDB_0803$crop == "ARROZ"] <- "ric"
FinalDB_0803$crop[FinalDB_0803$crop == "ARVEJA"] <- "pea"
FinalDB_0803$crop[FinalDB_0803$crop == "AVENA"] <- "oat"
FinalDB_0803$crop[FinalDB_0803$crop == "CEBOLLA"] <- "oni"
FinalDB_0803$crop[FinalDB_0803$crop == "GARBANZO"] <- "chk"
FinalDB_0803$crop[FinalDB_0803$crop == "LENTEJA"] <- "len"
FinalDB_0803$crop[FinalDB_0803$crop == "MAIZ"] <- "mze"
FinalDB_0803$crop[FinalDB_0803$crop == "MAIZ_SEM"] <- "smze"
FinalDB_0803$crop[FinalDB_0803$crop == "MARAVILLA"] <- "snf"
FinalDB_0803$crop[FinalDB_0803$crop == "MELON"] <- "mel"
FinalDB_0803$crop[FinalDB_0803$crop == "MELON_SEM"] <- "smel"
FinalDB_0803$crop[FinalDB_0803$crop == "PAPA"] <- "pot"
FinalDB_0803$crop[FinalDB_0803$crop == "PEPINO_SEM"] <- "scuc"
FinalDB_0803$crop[FinalDB_0803$crop == "POROTO"] <- "cmb"
FinalDB_0803$crop[FinalDB_0803$crop == "POROTO_VER"] <- "gbn"
FinalDB_0803$crop[FinalDB_0803$crop == "REMOLACHA"] <- "sgb"
FinalDB_0803$crop[FinalDB_0803$crop == "REPOLLO"] <- "cbg"
FinalDB_0803$crop[FinalDB_0803$crop == "REPOLLO_SEM"] <- "scbg"
FinalDB_0803$crop[FinalDB_0803$crop == "SANDIA"] <- "wtm"
FinalDB_0803$crop[FinalDB_0803$crop == "SANDIA_SEM"] <- "swtm"
FinalDB_0803$crop[FinalDB_0803$crop == "SOYA"] <- "soy"
FinalDB_0803$crop[FinalDB_0803$crop == "TABACO"] <- "tob"
FinalDB_0803$crop[FinalDB_0803$crop == "TOMATE"] <- "tom"
FinalDB_0803$crop[FinalDB_0803$crop == "TRIGO"] <- "wht"
FinalDB_0803$crop[FinalDB_0803$crop == "ZAPALLO"] <- "sqh"
## Rounding values
FinalDB_0803$yield <-round(FinalDB_0803$yield,2)
FinalDB_0803$CropPrice <-round(FinalDB_0803$CropPrice)
FinalDB_0803$HiredLabor <- round(FinalDB_0803$HiredLabor,2)
FinalDB_0803$FamilyLab <- round(FinalDB_0803$FamilyLab,2)
FinalDB_0803$PriceHrdLab <- round(FinalDB_0803$PriceHrdLab)
FinalDB_0803$PriceFamLab <- round(FinalDB_0803$PriceFamLab)
FinalDB_0803$rentedMachCosts <- round(FinalDB_0803$rentedMachCosts)
FinalDB_0803$InputCosts <- round(FinalDB_0803$InputCosts)
#Saving FinalDB as csv
write.csv(FinalDB_0803, file="csv_files/FinalDB_0803.csv")
|
## Allows caching the inverse of matrix, since calculating inverse of matrix
## is a costly computation, stores the matrix inverse in a variable so
## it doesn't have to be calculated everytime.
## Creates a matrix which has an ability to cache its inverse,
## exposes a list of functions that can be used to get, set
## the matrix and its inverse.
## Note: Resets the inverse, when the matrix is updated using set function.
makeCacheMatrix <- function(x = matrix()) {
# initially set the cached value to NULL
inv <- NULL
# Set the matrix
set <- function (y) {
x <<- y
inv <<- NULL
}
# Get Matrix
get <- function () x
# Set Inverse
setInv <- function (inverse) inv <<- inverse
# Get Inverse
getInv <- function () inv
# Return list of functions
list(set = set, get = get, setInv = setInv, getInv = getInv)
}
## Returns a matrix that is the inverse of 'x',
## The result is cached, so after calculating it once,
## if the matrix is not changed.
## Note: This function assumes matrix is inversible.
cacheSolve <- function(x, ...) {
inv <- x$getInv()
if (!is.null(inv)) {
message("Returning cached inverse")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setInv(inv)
inv
}
| /cachematrix.R | no_license | ahmednasir91/ProgrammingAssignment2 | R | false | false | 1,252 | r | ## Allows caching the inverse of matrix, since calculating inverse of matrix
## is a costly computation, stores the matrix inverse in a variable so
## it doesn't have to be calculated everytime.
## Creates a matrix which has an ability to cache its inverse,
## exposes a list of functions that can be used to get, set
## the matrix and its inverse.
## Note: Resets the inverse, when the matrix is updated using set function.
makeCacheMatrix <- function(x = matrix()) {
# initially set the cached value to NULL
inv <- NULL
# Set the matrix
set <- function (y) {
x <<- y
inv <<- NULL
}
# Get Matrix
get <- function () x
# Set Inverse
setInv <- function (inverse) inv <<- inverse
# Get Inverse
getInv <- function () inv
# Return list of functions
list(set = set, get = get, setInv = setInv, getInv = getInv)
}
## Returns a matrix that is the inverse of 'x',
## The result is cached, so after calculating it once,
## if the matrix is not changed.
## Note: This function assumes matrix is inversible.
cacheSolve <- function(x, ...) {
inv <- x$getInv()
if (!is.null(inv)) {
message("Returning cached inverse")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setInv(inv)
inv
}
|
library(MetaCycle)
### Name: meta3d
### Title: Detect rhythmic signals from time-series datasets with
### individual information
### Aliases: meta3d
### ** Examples
# write 'cycHumanBloodData' and 'cycHumanBloodDesign' into two 'csv' files
write.csv(cycHumanBloodData, file="cycHumanBloodData.csv",
row.names=FALSE)
write.csv(cycHumanBloodDesign, file="cycHumanBloodDesign.csv",
row.names=FALSE)
# detect circadian transcripts with JTK in studied individuals
meta3d(datafile="cycHumanBloodData.csv", cycMethodOne="JTK",
designfile="cycHumanBloodDesign.csv", outdir="example",
filestyle="csv", design_libColm=1, design_subjectColm=2,
design_hrColm=4, design_groupColm=3)
| /data/genthat_extracted_code/MetaCycle/examples/meta3d.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 690 | r | library(MetaCycle)
### Name: meta3d
### Title: Detect rhythmic signals from time-series datasets with
### individual information
### Aliases: meta3d
### ** Examples
# write 'cycHumanBloodData' and 'cycHumanBloodDesign' into two 'csv' files
write.csv(cycHumanBloodData, file="cycHumanBloodData.csv",
row.names=FALSE)
write.csv(cycHumanBloodDesign, file="cycHumanBloodDesign.csv",
row.names=FALSE)
# detect circadian transcripts with JTK in studied individuals
meta3d(datafile="cycHumanBloodData.csv", cycMethodOne="JTK",
designfile="cycHumanBloodDesign.csv", outdir="example",
filestyle="csv", design_libColm=1, design_subjectColm=2,
design_hrColm=4, design_groupColm=3)
|
# CPP Projet Modelisation
# file name : plotSpikes.r
# authors : Belmahi Asmae, Maxwell Sam
# date : 14/04/2021
dev.new(width=7, height=4)
df=read.table("isih_2.txt",header=F)
hist(df[,1],
main = "ISIH\nDistribution of time between two spikes",
xlab = "delta time [ms]",
col = "#3182bd",
breaks=40
)
| /ISIH/plotISIH.r | no_license | MaxwellSam/Modelisation_Cpp_Projet | R | false | false | 313 | r | # CPP Projet Modelisation
# file name : plotSpikes.r
# authors : Belmahi Asmae, Maxwell Sam
# date : 14/04/2021
dev.new(width=7, height=4)
df=read.table("isih_2.txt",header=F)
hist(df[,1],
main = "ISIH\nDistribution of time between two spikes",
xlab = "delta time [ms]",
col = "#3182bd",
breaks=40
)
|
mydata= read.csv("E:\\Dataset\\customer-segmentation-dataset\\Mall_Customers.csv")
library(plotrix)
df=mydata
par(mfrow= c(4,1))
# Gender visualization using bargraph.
group= table(df$Gender)
barplot(group, xlab = "Gender", ylab = "Number", main = "Classifcation of male & female", col = rainbow(2), legend.text =rownames(a))
# Gender visualization using piechart.
r= round(group/sum(group)*100)
lbs= paste(c("Female","Male"),"",r,"%")
pie3D(group, labels =lbs, main="Pie chart for love")
# visualization of age distribution using histogram.
ages=df$Age
hist(ages, col = "green", xlab = "Ages", ylab = "Number of Gender", labels = TRUE,main = "Histogram")
# visualization of age using Boxplot for compare the values in graph.
boxplot(ages, main="Discriptive analysis of ages", col = "blue")
| /customer_semgentation.R | no_license | CyberROOM05/Customer-data-visualization-in-R | R | false | false | 829 | r | mydata= read.csv("E:\\Dataset\\customer-segmentation-dataset\\Mall_Customers.csv")
library(plotrix)
df=mydata
par(mfrow= c(4,1))
# Gender visualization using bargraph.
group= table(df$Gender)
barplot(group, xlab = "Gender", ylab = "Number", main = "Classifcation of male & female", col = rainbow(2), legend.text =rownames(a))
# Gender visualization using piechart.
r= round(group/sum(group)*100)
lbs= paste(c("Female","Male"),"",r,"%")
pie3D(group, labels =lbs, main="Pie chart for love")
# visualization of age distribution using histogram.
ages=df$Age
hist(ages, col = "green", xlab = "Ages", ylab = "Number of Gender", labels = TRUE,main = "Histogram")
# visualization of age using Boxplot for compare the values in graph.
boxplot(ages, main="Discriptive analysis of ages", col = "blue")
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Project: NEXXUS
# Purpose: Matching Process
# Version: 0.1
# Programmer: Xin Huang
# Date: 09/09/2015
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# read in the fake data for testing
match_raw_data <- read.csv("match_raw_data.csv", header = TRUE,
stringsAsFactors = FALSE)
colnames(match_raw_data) <- tolower(colnames(match_raw_data))
match <- function(data, group, id, mvars, wts, dmaxk, dmax, dist = 1,
ncontls = 1, time, transf = 0, seedca, seedco,
print = TRUE, rsp_var, rsp_ind, pscore_cut = 0.0001,
out, outnmca, outnmcp){
# @@parameters:
# DATA = , /* SAS data set containing cases and potential controls */
# GROUP = , /* SAS variable defining cases. Group=1 if case, 0 if control*/
# ID = , /* SAS CHARACTER ID variable for the cases and controls*/
# MVARS = , /* List of numeric matching variables common to both case and control*/
# WTS = , /* List of non-negative weights corresponding to each matching vars*/
# DMAXK = , /* List of non-negative values corresponding to each matching vars*/
# DMAX = , /* Largest value of Distance(Dij) to be considered as a valid match*/
# DIST = 1,/* Type of distance to use 1=absolute difference; 2=Euclidean*/
# NCONTLS = 1,/* The number of controls to match to each case*/
# TIME = , /* Time variable used for risk set matching, only if ctrl time > case being matched*/
# TRANSF = 0,/* Whether all matching vars are to be transformed (0=no, 1=standardize, 2=use ranks)*/
# SEEDCA = , /* Seed value used to randomly sort the cases prior to match*/
# SEEDCO = , /* Seed value used to randomly sort the controls prior to match*/
# PRINT = y,/* Option to print data summary for matched cases */
# Rsp_var = , /* To include propensity score in match. Response variable to calculate propensity score */
# Rsp_Ind = , /* Independent variable list to be used for propensity score calculation, dafault=&MVars */
# Pscore_cut = 0.0001, /*set propensity score diff < 0.005 as valid match */
# OUT = __OUT, /* matched data in paired format, &out._Matched in original data format */
# OUTNMCA = __NMCA,
# OUTNMCO = __NMCO);
data = match_raw_data
group = "case"
id = "id"
mvars = c("sex", "age")
wts = c(1, 1)
dmaxk = c(0, 2)
#dmax =
dist = 1
ncontls = 2
#time
transf = 0
seedca = 234
seedco = 489
print = TRUE
rsp_var = "resp"
rsp_ind = c("sex", "age")
pscore_cut = 0.0001
out = "outd"
outnmca = "matched"
#outnmcp
# do the logistic regression to get the propensity
if (!missing(rsp_var)) {
if (missing(rsp_ind)) {
rsp_ind <- mvars
}
formu <- paste(rsp_var, " ~ ", paste(rsp_ind, collapse = " + "))
glm_model <- glm(formu, data, family = "binomial")
p_score <- predict(glm_model, data, type = "response")
.ndata <- cbind(data, pscore = p_score)
data <- .ndata
mvars <- c(mvars, "pscore")
wts <- c(wts, 1)
if (!missing(dmaxk)) dmaxk <- c(dmaxk, pscore_cut)
}
bad <- 0
if (missing(data)) {
bad <- 1
stop("ERROR: NO DATASET SUPPLIED")
}
if (missing(id)) {
bad <- 1
stop("ERROR: NO ID VARIABLE SUPPLIED")
}
if (missing(group)) {
bad <- 1
stop("ERROR: NO CASE(1)/CONTROL(0) GROUP VARIABLE SUPPLIED")
}
if (missing(wts)) {
bad <- 1
stop("ERROR: NO WEIGHTS SUPPLIED")
}
nvar <- length(mvars)
nwts <- length(wts)
if (nvar != nwts) {
bad <- 1
stop("ERROR: #VARS MUST EQUAL #WTS")
}
nk <- length(dmaxk)
if (nk > nvar) nk <- nvar
v <- mvars
w <- nwts
if (any(w < 0)) {
bad = 1
stop("EERROR: WEIGHTS MUST BE NON-NEGATIVE")
}
if (nk > 0) {
k <- dmaxk
if(any(k < 0)){
bad = 1
stop("ERROR: DMAXK VALUES MUST BE NON-NEGATIVE")
}
}
### for match #####
# remove the rows with missing values
.check <- data
.check[ ".id"] <- data[, "id"]
.check <- .check[complete.cases(.check), ]
# standardize the vars
if (transf == 1) {
.stdzd <- scale(.check[, mvars], center = TRUE, scale = TRUE)
.caco <- cbind(.check[, setdiff(colnames(.check), mvars)], .stdzd)
} else if (transf == 0) {
} else {
.caco <- .check
}
# for case dataset
.case <- .caco[.caco[, group] == 1, ]
.case[, ".idca"] <- .case[, id]
if (!missing(time)) .case[, ".catime"] <- .case[, time]
tmp <- paste(".ca", 1:nvar, sep = "")
.case[, tmp] <- .case[, v]
if (!missing(seedca)) {
set.seed(seedca)
.case[, ".r"] <- rnorm(nrow(.case))
} else {
.case[, ".r"] <- 1
}
if (missing(time)) {
.case <- .case[, c('.idca', tmp, ".r", mvars)]
} else {
.case <- .case[, c('.idca', tmp, ".r", mvars, ".catime")]
}
nca <- nrow(.case)
# for control dataset
.cont <- .caco[.caco[, group] == 0, ]
.cont[, ".idco"] <- .cont[, id]
if (!missing(time)) .cont[, ".cotime"] <- .cont[, time]
tmp1 <- paste(".co", 1:nvar, sep = "")
.cont[, tmp1] <- .cont[, v]
if (!missing(seedco)) {
set.seed(seedco)
.cont[, ".r"] <- rnorm(nrow(.cont))
} else {
.cont[, ".r"] <- 1
}
if (missing(time)) {
.cont <- .cont[, c('.idco', tmp1, ".r", mvars)]
} else {
.cont <- .cont[, c('.idco', tmp1, ".r", mvars, ".cotime")]
}
nco <- nrow(.cont)
bad2 <- 0
if (nco < nca * ncontls) {
bad2 <- 1
stop("ERROR: NOT ENOUGH CONTROLS TO MAKE REQUESTED MATCHES")
}
.cont <- .cont[order(.cont[, c(".r", ".idco")]), ]
#### do the matching #####
# set some flags
.used <- vector("integer", nco)
.small <- NULL
.match <- NULL
# distance matrix
# dm <- as.matrix(.case[, mvars]) %*% as.matrix(t(.cont[, mvars]))
# tmp <- NULL
d.matrix <- as.vector(apply(.case[, tmp], 1,
function(x) apply(.cont[, tmp1], 1,
function(y) x - y)))
d.matrix <- matrix(d.matrix, ncol = length(tmp), byrow = TRUE)
d.matrix_m <- apply(d.matrix, 1, function(x) sum(x ^ 2))
d.matrix_m <- matrix(d.matrix_m, ncol = nrow(.cont), byrow = TRUE)
}
| /match.R | no_license | HeraclesHX/ps_matching | R | false | false | 6,413 | r | #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Project: NEXXUS
# Purpose: Matching Process
# Version: 0.1
# Programmer: Xin Huang
# Date: 09/09/2015
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# read in the fake data for testing
match_raw_data <- read.csv("match_raw_data.csv", header = TRUE,
stringsAsFactors = FALSE)
colnames(match_raw_data) <- tolower(colnames(match_raw_data))
match <- function(data, group, id, mvars, wts, dmaxk, dmax, dist = 1,
ncontls = 1, time, transf = 0, seedca, seedco,
print = TRUE, rsp_var, rsp_ind, pscore_cut = 0.0001,
out, outnmca, outnmcp){
# @@parameters:
# DATA = , /* SAS data set containing cases and potential controls */
# GROUP = , /* SAS variable defining cases. Group=1 if case, 0 if control*/
# ID = , /* SAS CHARACTER ID variable for the cases and controls*/
# MVARS = , /* List of numeric matching variables common to both case and control*/
# WTS = , /* List of non-negative weights corresponding to each matching vars*/
# DMAXK = , /* List of non-negative values corresponding to each matching vars*/
# DMAX = , /* Largest value of Distance(Dij) to be considered as a valid match*/
# DIST = 1,/* Type of distance to use 1=absolute difference; 2=Euclidean*/
# NCONTLS = 1,/* The number of controls to match to each case*/
# TIME = , /* Time variable used for risk set matching, only if ctrl time > case being matched*/
# TRANSF = 0,/* Whether all matching vars are to be transformed (0=no, 1=standardize, 2=use ranks)*/
# SEEDCA = , /* Seed value used to randomly sort the cases prior to match*/
# SEEDCO = , /* Seed value used to randomly sort the controls prior to match*/
# PRINT = y,/* Option to print data summary for matched cases */
# Rsp_var = , /* To include propensity score in match. Response variable to calculate propensity score */
# Rsp_Ind = , /* Independent variable list to be used for propensity score calculation, dafault=&MVars */
# Pscore_cut = 0.0001, /*set propensity score diff < 0.005 as valid match */
# OUT = __OUT, /* matched data in paired format, &out._Matched in original data format */
# OUTNMCA = __NMCA,
# OUTNMCO = __NMCO);
data = match_raw_data
group = "case"
id = "id"
mvars = c("sex", "age")
wts = c(1, 1)
dmaxk = c(0, 2)
#dmax =
dist = 1
ncontls = 2
#time
transf = 0
seedca = 234
seedco = 489
print = TRUE
rsp_var = "resp"
rsp_ind = c("sex", "age")
pscore_cut = 0.0001
out = "outd"
outnmca = "matched"
#outnmcp
# do the logistic regression to get the propensity
if (!missing(rsp_var)) {
if (missing(rsp_ind)) {
rsp_ind <- mvars
}
formu <- paste(rsp_var, " ~ ", paste(rsp_ind, collapse = " + "))
glm_model <- glm(formu, data, family = "binomial")
p_score <- predict(glm_model, data, type = "response")
.ndata <- cbind(data, pscore = p_score)
data <- .ndata
mvars <- c(mvars, "pscore")
wts <- c(wts, 1)
if (!missing(dmaxk)) dmaxk <- c(dmaxk, pscore_cut)
}
bad <- 0
if (missing(data)) {
bad <- 1
stop("ERROR: NO DATASET SUPPLIED")
}
if (missing(id)) {
bad <- 1
stop("ERROR: NO ID VARIABLE SUPPLIED")
}
if (missing(group)) {
bad <- 1
stop("ERROR: NO CASE(1)/CONTROL(0) GROUP VARIABLE SUPPLIED")
}
if (missing(wts)) {
bad <- 1
stop("ERROR: NO WEIGHTS SUPPLIED")
}
nvar <- length(mvars)
nwts <- length(wts)
if (nvar != nwts) {
bad <- 1
stop("ERROR: #VARS MUST EQUAL #WTS")
}
nk <- length(dmaxk)
if (nk > nvar) nk <- nvar
v <- mvars
w <- nwts
if (any(w < 0)) {
bad = 1
stop("EERROR: WEIGHTS MUST BE NON-NEGATIVE")
}
if (nk > 0) {
k <- dmaxk
if(any(k < 0)){
bad = 1
stop("ERROR: DMAXK VALUES MUST BE NON-NEGATIVE")
}
}
### for match #####
# remove the rows with missing values
.check <- data
.check[ ".id"] <- data[, "id"]
.check <- .check[complete.cases(.check), ]
# standardize the vars
if (transf == 1) {
.stdzd <- scale(.check[, mvars], center = TRUE, scale = TRUE)
.caco <- cbind(.check[, setdiff(colnames(.check), mvars)], .stdzd)
} else if (transf == 0) {
} else {
.caco <- .check
}
# for case dataset
.case <- .caco[.caco[, group] == 1, ]
.case[, ".idca"] <- .case[, id]
if (!missing(time)) .case[, ".catime"] <- .case[, time]
tmp <- paste(".ca", 1:nvar, sep = "")
.case[, tmp] <- .case[, v]
if (!missing(seedca)) {
set.seed(seedca)
.case[, ".r"] <- rnorm(nrow(.case))
} else {
.case[, ".r"] <- 1
}
if (missing(time)) {
.case <- .case[, c('.idca', tmp, ".r", mvars)]
} else {
.case <- .case[, c('.idca', tmp, ".r", mvars, ".catime")]
}
nca <- nrow(.case)
# for control dataset
.cont <- .caco[.caco[, group] == 0, ]
.cont[, ".idco"] <- .cont[, id]
if (!missing(time)) .cont[, ".cotime"] <- .cont[, time]
tmp1 <- paste(".co", 1:nvar, sep = "")
.cont[, tmp1] <- .cont[, v]
if (!missing(seedco)) {
set.seed(seedco)
.cont[, ".r"] <- rnorm(nrow(.cont))
} else {
.cont[, ".r"] <- 1
}
if (missing(time)) {
.cont <- .cont[, c('.idco', tmp1, ".r", mvars)]
} else {
.cont <- .cont[, c('.idco', tmp1, ".r", mvars, ".cotime")]
}
nco <- nrow(.cont)
bad2 <- 0
if (nco < nca * ncontls) {
bad2 <- 1
stop("ERROR: NOT ENOUGH CONTROLS TO MAKE REQUESTED MATCHES")
}
.cont <- .cont[order(.cont[, c(".r", ".idco")]), ]
#### do the matching #####
# set some flags
.used <- vector("integer", nco)
.small <- NULL
.match <- NULL
# distance matrix
# dm <- as.matrix(.case[, mvars]) %*% as.matrix(t(.cont[, mvars]))
# tmp <- NULL
d.matrix <- as.vector(apply(.case[, tmp], 1,
function(x) apply(.cont[, tmp1], 1,
function(y) x - y)))
d.matrix <- matrix(d.matrix, ncol = length(tmp), byrow = TRUE)
d.matrix_m <- apply(d.matrix, 1, function(x) sum(x ^ 2))
d.matrix_m <- matrix(d.matrix_m, ncol = nrow(.cont), byrow = TRUE)
}
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53792524541349e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613101147-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 343 | r | testlist <- list(A = structure(c(2.31584307392677e+77, 9.53792524541349e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
lav.form <- function(var.names) {
var.vector = c(var.names, var.names)
nam.loc = seq(1, (length(var.vector)-1), by = 2)
plus.loc = seq(2, (length(var.vector)-1), by = 2)
var.vector[nam.loc] = var.names
var.vector[plus.loc] = " + "
var.vector = var.vector[-length(var.vector)]
form = paste(var.vector, collapse = "")
return(form)
}
lav.form(1:3)
lav.form(c("x", "y", "z")) | /lavaan_formula.R | no_license | denohora/myRtricks | R | false | false | 388 | r | lav.form <- function(var.names) {
var.vector = c(var.names, var.names)
nam.loc = seq(1, (length(var.vector)-1), by = 2)
plus.loc = seq(2, (length(var.vector)-1), by = 2)
var.vector[nam.loc] = var.names
var.vector[plus.loc] = " + "
var.vector = var.vector[-length(var.vector)]
form = paste(var.vector, collapse = "")
return(form)
}
lav.form(1:3)
lav.form(c("x", "y", "z")) |
#' Draw a heart curve
#'
#' Calculate the coordinates of a heart shape and draw it with a polygon.
#' @param n the number of points to use when calculating the coordinates of the
#' heart shape
#' @param ... other arguments to be passed to \code{\link[graphics]{polygon}},
#' e.g. the color of the polygon (usually red)
#' @return NULL
#' @author Yihui Xie <\url{http://yihui.name}>
#' @export
#' @examples heart_curve()
#' heart_curve(col = 'red')
#' heart_curve(col = 'pink', border = 'red')
heart_curve = function(n = 101, ...) {
y0 = seq(0, pi/2, length = n)
x0 = sin(y0)
y0 = c(y0, sqrt(1/4 - c(rev(x0) - .5)^2) + pi/2)
x0 = c(x0, rev(x0))
x0 = c(x0, -x0[c((2*n):(n+1), n:1)])
y0 = c(y0, rev(y0))
par(mar = rep(.05, 4))
plot(x0, y0, type = 'n', ann = FALSE, axes = FALSE)
polygon(x0, y0, ...)
}
| /R/heart_curve.R | no_license | mlinking/MSG | R | false | false | 822 | r | #' Draw a heart curve
#'
#' Calculate the coordinates of a heart shape and draw it with a polygon.
#' @param n the number of points to use when calculating the coordinates of the
#' heart shape
#' @param ... other arguments to be passed to \code{\link[graphics]{polygon}},
#' e.g. the color of the polygon (usually red)
#' @return NULL
#' @author Yihui Xie <\url{http://yihui.name}>
#' @export
#' @examples heart_curve()
#' heart_curve(col = 'red')
#' heart_curve(col = 'pink', border = 'red')
heart_curve = function(n = 101, ...) {
y0 = seq(0, pi/2, length = n)
x0 = sin(y0)
y0 = c(y0, sqrt(1/4 - c(rev(x0) - .5)^2) + pi/2)
x0 = c(x0, rev(x0))
x0 = c(x0, -x0[c((2*n):(n+1), n:1)])
y0 = c(y0, rev(y0))
par(mar = rep(.05, 4))
plot(x0, y0, type = 'n', ann = FALSE, axes = FALSE)
polygon(x0, y0, ...)
}
|
#' Bayesian Mediation Analysis Controlling For False Discovery
#'
#' \code{fdr.bama} uses the permutation test to estimate the null PIP
#' distribution for each mediator and determines a threshold (based off of the
#' \code{fdr} parameter) for significance.
#'
#' @param Y Length \code{n} numeric outcome vector
#' @param A Length \code{n} numeric exposure vector
#' @param M \code{n x p} numeric matrix of mediators of Y and A
#' @param C1 \code{n x nc1} numeric matrix of extra covariates to include in the
#' outcome model
#' @param C2 \code{n x nc2} numeric matrix of extra covariates to include in the
#' mediator model
#' @param beta.m Length \code{p} numeric vector of initial \code{beta.m} in the
#' outcome model
#' @param alpha.a Length \code{p} numeric vector of initial \code{alpha.a} in
#' the mediator model
#' @param burnin Number of iterations to run the MCMC before sampling
#' @param ndraws Number of draws to take from MCMC after the burnin period
#' @param fdr False discovery rate. Default is 0.1
#' @param npermutations The number of permutations to generate while estimating
#' the null pip distribution. Default is 200
#' @param weights Length \code{n} numeric vector of weights
#' @param k Shape parameter prior for inverse gamma. Default is 2.0
#' @param lm0 Scale parameter prior for inverse gamma for the small normal
#' components. Default is 1e-4
#' @param lm1 Scale parameter prior for inverse gamma for the large normal
#' component of beta_m. Default is 1.0
#' @param lma1 Scale parameter prior for inverse gamma for the large normal
#' component of alpha_a. Default is 1.0
#' @param l Scale parameter prior for the other inverse gamma distributions.
#' Default is 1.0
#' @param mc.cores The number of cores to use while running \code{fdr.bama}.
#' \code{fdr.bama} uses the \code{parallel} package for parallelization,
#' so see that for more information. Default is 1 core
#' @param type Type of cluster to make when \code{mc.cores > 1}. See
#' \code{makeCluster} in the \code{parallel} package for more details.
#' Default is "PSOCK"
#' @return
#' \code{fdr.bama} returns a object of type "fdr.bama" with 5 elements:
#' \describe{
#' \item{bama.out}{Output from the \code{bama} run.}
#' \item{pip.null}{A \code{p x npermutations} matrices containing the
#' estimated null PIP distribution for each mediator.
#' }
#' \item{threshold}{The cutoff significance threshold for each PIP controlling
#' for the false discovery rate.
#' }
#' \item{fdr}{The false discovery rate used to calculate \code{threshold}.}
#' \item{call}{The R call that generated the output.}
#' }
#' @examples
#' library(bama)
#'
#' Y <- bama.data$y
#' A <- bama.data$a
#'
#' # grab the mediators from the example data.frame
#' M <- as.matrix(bama.data[, paste0("m", 1:100)], nrow(bama.data))
#'
#' # We just include the intercept term in this example as we have no covariates
#' C1 <- matrix(1, 1000, 1)
#' C2 <- matrix(1, 1000, 1)
#' beta.m <- rep(0, 100)
#' alpha.a <- rep(0, 100)
#'
#' set.seed(12345)
#' \donttest{
#' out <- fdr.bama(Y, A, M, C1, C2, beta.m, alpha.a, burnin = 100,
#' ndraws = 120, npermutations = 10)
#'
#' # The package includes a function to summarise output from 'fdr.bama'
#' summary(out)
#' }
#' @references
#' Song, Y, Zhou, X, Zhang, M, et al. Bayesian shrinkage estimation of high
#' dimensional causal mediation effects in omics studies. Biometrics. 2019;
#' 1-11. \doi{10.1111/biom.13189}
#' @author Alexander Rix
#' @export
fdr.bama <- function(Y, A, M, C1, C2, beta.m, alpha.a, burnin, ndraws,
weights = NULL, npermutations = 200, fdr = 0.1, k = 2.0,
lm0 = 1e-4, lm1 = 1.0, lma1 = 1.0, l = 1.0, mc.cores = 1, type = "PSOCK")
{
call <- match.call()
if (npermutations <= 0)
stop("'npermutations' must be a positive integer.")
if (fdr <= 0 || fdr >=1)
stop("'fdr' must be in the interval (0, 1).")
bama.out <- bama(Y = Y, A = A, M = M, C1 = C1, C2 = C2, method = "BSLMM",
burnin = burnin, ndraws = ndraws, weights = weights,
control = list(k = k, lm0 = lm0, lm1 = lm1, lma1 = lma1, l = l))
pi2 <- colMeans(bama.out$r1 == 0 & bama.out$r3 == 1)
pi3 <- colMeans(bama.out$r1 == 1 & bama.out$r3 == 0)
pi4 <- colMeans(bama.out$r1 == 0 & bama.out$r3 == 0)
n <- length(Y)
if (!is.null(weights)) {
print(weights)
if (!is.numeric(weights) || !is.vector(weights) ||
length(weights) != n || any(weights < 0))
{
stop("'weights' must be a length 'n' nonnegative numeric vector.")
}
w <- sqrt(weights)
Y <- w * Y
A <- w * A
M <- apply(M, 2, function(m) m * w)
C1 <- apply(C1, 2, function(c1) c1 * w)
C2 <- apply(C2, 2, function(c2) c2 * w)
}
seeds <- sample(.Machine$integer.max, npermutations + 1)
permute.bama <- function(i)
{
set.seed(seeds[i])
n <- length(Y)
bama.r1 <- run_bama_mcmc(Y[sample(n)], A, M, C1, C2, beta.m, alpha.a,
burnin, ndraws, k, lm0, lm1, lma1, l)
bama.r3 <- run_bama_mcmc(Y, A, M[sample(n), ], C1, C2, beta.m, alpha.a,
burnin, ndraws, k, lm0, lm1, lma1, l)
bama.r1.r3 <- run_bama_mcmc(Y[sample(n)], A, M[sample(n), ], C1, C2,
beta.m, alpha.a, burnin, ndraws, k, lm0,
lm1, lma1, l)
p2 <- colMeans(bama.r1$r1 == 0 & bama.r1$r3 == 1)
p3 <- colMeans(bama.r3$r1 == 1 & bama.r3$r3 == 0)
p4 <- colMeans(bama.r1.r3$r1 == 0 & bama.r1.r3$r3 == 0)
# Adding small number to prevent divsion by 0 in certain cases
denom <- pi2 + pi3 + pi4 + 1e-9
p2 * pi2 / denom + p3 * pi3 / denom + p4 * pi4 / denom
}
if (mc.cores == 1) {
pip.null <- sapply(seq(npermutations), permute.bama)
}
else {
cl <- parallel::makeCluster(mc.cores, type = type)
parallel::clusterExport(cl, list("Y", "A", "M", "C1", "C2", "beta.m",
"alpha.a", "burnin", "ndraws", "k",
"lm0", "lm1","lma1","l", "pi2", "pi3",
"pi4", "seeds"),
envir = environment()
)
pip.null <- parallel::parSapply(cl, seq(npermutations), permute.bama)
parallel::stopCluster(cl)
}
set.seed(seeds[npermutations + 1])
# calculate the null pip threshold
threshold <- apply(pip.null, 1, stats::quantile, probs = 1 - fdr)
names(threshold) <- colnames(M)
structure(list(bama.out = bama.out, pip.null = pip.null,
threshold = threshold, fdr = fdr, call = call),
class = "fdr.bama")
}
#' Summarize objects of type "fdr.bama"
#'
#' \code{summary.fdr.bama} summarizes the \code{beta.m} estimates from
#' \code{fdr.bama} and for each mediator generates an overall estimate,
#' credible interval, posterior inclusion probability (PIP), and PIP threshold
#' for significance controlling for the specified false discovery rate (FDR).
#' @return A data.frame with 4 elements. The beta.m estimates, the estimates'
#' *credible* interval (which by default is 95\%), and the posterior
#' inclusion probability (pip) of each 'beta.m'.
#' @param object An object of class "bama".
#' @param rank Whether or not to rank the output by posterior inclusion
#' probability. Default is TRUE.
#' @param ci The credible interval to calculate. \code{ci} should be a length 2
#' numeric vector specifying the upper and lower bounds of the CI. By
#' default, \code{ci = c(0.025, .975)}.
#' @param fdr False discovery rate. By default, it is set to whatever the
#' \code{fdr} of \code{object} is. However, it can be changed to recalculate
#' the PIP cutoff threshold.
#' @param filter Whether or not to filter out mediators with PIP less than the
#' PIP threshold.
#' @param ... Additional optional arguments to \code{summary}
#' @export
summary.fdr.bama <- function(object, rank = F, ci = c(0.025, 0.975),
fdr = object$fdr, filter = T, ...)
{
if (!identical(class(object), "fdr.bama"))
stop("'object' is not an bama object.")
if (!is.logical(rank) || length(rank) != 1)
stop("'rank' should be a length 1 logical.")
if (!is.numeric(ci) || length(ci) != 2)
stop("'ci' should be a length 2 numeric.")
if (ci[1] >= ci[2])
stop("'ci[1]' should be less than 'ci[2]'.")
pip <- colMeans(object$bama.out$r1 * object$bama.out$r3)
beta.m <- colMeans(object$bama.out$beta.m)
ci.l <- apply(object$bama.out$beta.m, 2, stats::quantile, probs = ci[1])
ci.h <- apply(object$bama.out$beta.m, 2, stats::quantile, probs = ci[2])
out <- data.frame(estimate = beta.m, ci.lower = ci.l,
ci.upper = ci.h, pip = pip,
pip.threshold = object$threshold)
if (rank)
out <- out[order(pip, decreasing = T), ]
if (filter)
out <- out[which(out$pip > out$pip.threshold), ]
out
}
#' Printing bama objects
#'
#' Print a bama object.
#' @param x An object of class 'bama'.
#' @param ... Additional arguments to pass to print.data.frame or summary.bama
#' @export
print.fdr.bama <- function(x , ...)
{
print(summary(x, ...), ...)
}
| /R/fdr.bama.R | no_license | umich-cphds/bama | R | false | false | 9,500 | r | #' Bayesian Mediation Analysis Controlling For False Discovery
#'
#' \code{fdr.bama} uses the permutation test to estimate the null PIP
#' distribution for each mediator and determines a threshold (based off of the
#' \code{fdr} parameter) for significance.
#'
#' @param Y Length \code{n} numeric outcome vector
#' @param A Length \code{n} numeric exposure vector
#' @param M \code{n x p} numeric matrix of mediators of Y and A
#' @param C1 \code{n x nc1} numeric matrix of extra covariates to include in the
#' outcome model
#' @param C2 \code{n x nc2} numeric matrix of extra covariates to include in the
#' mediator model
#' @param beta.m Length \code{p} numeric vector of initial \code{beta.m} in the
#' outcome model
#' @param alpha.a Length \code{p} numeric vector of initial \code{alpha.a} in
#' the mediator model
#' @param burnin Number of iterations to run the MCMC before sampling
#' @param ndraws Number of draws to take from MCMC after the burnin period
#' @param fdr False discovery rate. Default is 0.1
#' @param npermutations The number of permutations to generate while estimating
#' the null pip distribution. Default is 200
#' @param weights Length \code{n} numeric vector of weights
#' @param k Shape parameter prior for inverse gamma. Default is 2.0
#' @param lm0 Scale parameter prior for inverse gamma for the small normal
#' components. Default is 1e-4
#' @param lm1 Scale parameter prior for inverse gamma for the large normal
#' component of beta_m. Default is 1.0
#' @param lma1 Scale parameter prior for inverse gamma for the large normal
#' component of alpha_a. Default is 1.0
#' @param l Scale parameter prior for the other inverse gamma distributions.
#' Default is 1.0
#' @param mc.cores The number of cores to use while running \code{fdr.bama}.
#' \code{fdr.bama} uses the \code{parallel} package for parallelization,
#' so see that for more information. Default is 1 core
#' @param type Type of cluster to make when \code{mc.cores > 1}. See
#' \code{makeCluster} in the \code{parallel} package for more details.
#' Default is "PSOCK"
#' @return
#' \code{fdr.bama} returns a object of type "fdr.bama" with 5 elements:
#' \describe{
#' \item{bama.out}{Output from the \code{bama} run.}
#' \item{pip.null}{A \code{p x npermutations} matrices containing the
#' estimated null PIP distribution for each mediator.
#' }
#' \item{threshold}{The cutoff significance threshold for each PIP controlling
#' for the false discovery rate.
#' }
#' \item{fdr}{The false discovery rate used to calculate \code{threshold}.}
#' \item{call}{The R call that generated the output.}
#' }
#' @examples
#' library(bama)
#'
#' Y <- bama.data$y
#' A <- bama.data$a
#'
#' # grab the mediators from the example data.frame
#' M <- as.matrix(bama.data[, paste0("m", 1:100)], nrow(bama.data))
#'
#' # We just include the intercept term in this example as we have no covariates
#' C1 <- matrix(1, 1000, 1)
#' C2 <- matrix(1, 1000, 1)
#' beta.m <- rep(0, 100)
#' alpha.a <- rep(0, 100)
#'
#' set.seed(12345)
#' \donttest{
#' out <- fdr.bama(Y, A, M, C1, C2, beta.m, alpha.a, burnin = 100,
#' ndraws = 120, npermutations = 10)
#'
#' # The package includes a function to summarise output from 'fdr.bama'
#' summary(out)
#' }
#' @references
#' Song, Y, Zhou, X, Zhang, M, et al. Bayesian shrinkage estimation of high
#' dimensional causal mediation effects in omics studies. Biometrics. 2019;
#' 1-11. \doi{10.1111/biom.13189}
#' @author Alexander Rix
#' @export
fdr.bama <- function(Y, A, M, C1, C2, beta.m, alpha.a, burnin, ndraws,
weights = NULL, npermutations = 200, fdr = 0.1, k = 2.0,
lm0 = 1e-4, lm1 = 1.0, lma1 = 1.0, l = 1.0, mc.cores = 1, type = "PSOCK")
{
call <- match.call()
if (npermutations <= 0)
stop("'npermutations' must be a positive integer.")
if (fdr <= 0 || fdr >=1)
stop("'fdr' must be in the interval (0, 1).")
bama.out <- bama(Y = Y, A = A, M = M, C1 = C1, C2 = C2, method = "BSLMM",
burnin = burnin, ndraws = ndraws, weights = weights,
control = list(k = k, lm0 = lm0, lm1 = lm1, lma1 = lma1, l = l))
pi2 <- colMeans(bama.out$r1 == 0 & bama.out$r3 == 1)
pi3 <- colMeans(bama.out$r1 == 1 & bama.out$r3 == 0)
pi4 <- colMeans(bama.out$r1 == 0 & bama.out$r3 == 0)
n <- length(Y)
if (!is.null(weights)) {
print(weights)
if (!is.numeric(weights) || !is.vector(weights) ||
length(weights) != n || any(weights < 0))
{
stop("'weights' must be a length 'n' nonnegative numeric vector.")
}
w <- sqrt(weights)
Y <- w * Y
A <- w * A
M <- apply(M, 2, function(m) m * w)
C1 <- apply(C1, 2, function(c1) c1 * w)
C2 <- apply(C2, 2, function(c2) c2 * w)
}
seeds <- sample(.Machine$integer.max, npermutations + 1)
permute.bama <- function(i)
{
set.seed(seeds[i])
n <- length(Y)
bama.r1 <- run_bama_mcmc(Y[sample(n)], A, M, C1, C2, beta.m, alpha.a,
burnin, ndraws, k, lm0, lm1, lma1, l)
bama.r3 <- run_bama_mcmc(Y, A, M[sample(n), ], C1, C2, beta.m, alpha.a,
burnin, ndraws, k, lm0, lm1, lma1, l)
bama.r1.r3 <- run_bama_mcmc(Y[sample(n)], A, M[sample(n), ], C1, C2,
beta.m, alpha.a, burnin, ndraws, k, lm0,
lm1, lma1, l)
p2 <- colMeans(bama.r1$r1 == 0 & bama.r1$r3 == 1)
p3 <- colMeans(bama.r3$r1 == 1 & bama.r3$r3 == 0)
p4 <- colMeans(bama.r1.r3$r1 == 0 & bama.r1.r3$r3 == 0)
# Adding small number to prevent divsion by 0 in certain cases
denom <- pi2 + pi3 + pi4 + 1e-9
p2 * pi2 / denom + p3 * pi3 / denom + p4 * pi4 / denom
}
if (mc.cores == 1) {
pip.null <- sapply(seq(npermutations), permute.bama)
}
else {
cl <- parallel::makeCluster(mc.cores, type = type)
parallel::clusterExport(cl, list("Y", "A", "M", "C1", "C2", "beta.m",
"alpha.a", "burnin", "ndraws", "k",
"lm0", "lm1","lma1","l", "pi2", "pi3",
"pi4", "seeds"),
envir = environment()
)
pip.null <- parallel::parSapply(cl, seq(npermutations), permute.bama)
parallel::stopCluster(cl)
}
set.seed(seeds[npermutations + 1])
# calculate the null pip threshold
threshold <- apply(pip.null, 1, stats::quantile, probs = 1 - fdr)
names(threshold) <- colnames(M)
structure(list(bama.out = bama.out, pip.null = pip.null,
threshold = threshold, fdr = fdr, call = call),
class = "fdr.bama")
}
#' Summarize objects of type "fdr.bama"
#'
#' \code{summary.fdr.bama} summarizes the \code{beta.m} estimates from
#' \code{fdr.bama} and for each mediator generates an overall estimate,
#' credible interval, posterior inclusion probability (PIP), and PIP threshold
#' for significance controlling for the specified false discovery rate (FDR).
#' @return A data.frame with 4 elements. The beta.m estimates, the estimates'
#' *credible* interval (which by default is 95\%), and the posterior
#' inclusion probability (pip) of each 'beta.m'.
#' @param object An object of class "bama".
#' @param rank Whether or not to rank the output by posterior inclusion
#' probability. Default is TRUE.
#' @param ci The credible interval to calculate. \code{ci} should be a length 2
#' numeric vector specifying the upper and lower bounds of the CI. By
#' default, \code{ci = c(0.025, .975)}.
#' @param fdr False discovery rate. By default, it is set to whatever the
#' \code{fdr} of \code{object} is. However, it can be changed to recalculate
#' the PIP cutoff threshold.
#' @param filter Whether or not to filter out mediators with PIP less than the
#' PIP threshold.
#' @param ... Additional optional arguments to \code{summary}
#' @export
summary.fdr.bama <- function(object, rank = F, ci = c(0.025, 0.975),
fdr = object$fdr, filter = T, ...)
{
if (!identical(class(object), "fdr.bama"))
stop("'object' is not an bama object.")
if (!is.logical(rank) || length(rank) != 1)
stop("'rank' should be a length 1 logical.")
if (!is.numeric(ci) || length(ci) != 2)
stop("'ci' should be a length 2 numeric.")
if (ci[1] >= ci[2])
stop("'ci[1]' should be less than 'ci[2]'.")
pip <- colMeans(object$bama.out$r1 * object$bama.out$r3)
beta.m <- colMeans(object$bama.out$beta.m)
ci.l <- apply(object$bama.out$beta.m, 2, stats::quantile, probs = ci[1])
ci.h <- apply(object$bama.out$beta.m, 2, stats::quantile, probs = ci[2])
out <- data.frame(estimate = beta.m, ci.lower = ci.l,
ci.upper = ci.h, pip = pip,
pip.threshold = object$threshold)
if (rank)
out <- out[order(pip, decreasing = T), ]
if (filter)
out <- out[which(out$pip > out$pip.threshold), ]
out
}
#' Printing bama objects
#'
#' Print a bama object.
#' @param x An object of class 'bama'.
#' @param ... Additional arguments to pass to print.data.frame or summary.bama
#' @export
print.fdr.bama <- function(x , ...)
{
print(summary(x, ...), ...)
}
|
#!/usr/bin/env Rscript
library(MASS)
library("xlsx")
Michigan2106 = matrix (c(
1732,1663,18050,4877,4448,2384,1156,9114,21642,4108,29495,5061,24157,7270,5137,4302,5379,4249,16492,2110,6436,3923,24938,6972,20965,102751,3794,2925,5666,4799,6018,4579,79110,8352,4345,2004,11404,25795,67148,2280,138683,527,1939,12734,6774,16750,34384,681,2085,176317,4979,16042,5281,5827,3539,15635,1565,26863,7874,1287,37304,6212,343070,3973,3030,1176,2705,1044,3556,44973,2400,4287,44396,4873,1369,12546,24553,7526,7429,13258,128483,519444,4436
,4201,2585,34183,9090,8469,4950,2158,19202,28328,5539,38647,11786,31494,14243,8674,8683,9122,8505,21636,4354,11121,8580,27609,10616,27413,84175,8124,4018,9880,14095,8475,10692,43868,16635,8345,3675,12338,39793,51034,6116,148180,814,3159,30037,7239,26430,65680,1756,3744,224665,6915,14646,8505,10305,6702,23846,5386,43261,16907,3498,36127,15173,289203,7228,6827,2066,7336,2843,8266,88467,4488,8141,45469,13446,2556,19230,49051,14884,17102,17890,50631,228993,10000
), 2,83, TRUE)
row_names <- c(
"PRESIDENT-Democratic-Clinton",
"PRESIDENT-Republican-Trump"
)
column_names <- c("ALCONA",
"ALGER",
"ALLEGAN",
"ALPENA",
"ANTRIM",
"ARENAC",
"BARAGA",
"BARRY",
"BAY",
"BENZIE",
"BERRIEN",
"BRANCH",
"CALHOUN",
"CASS",
"CHARLEVOIX",
"CHEBOYGAN",
"CHIPPEWA",
"CLARE",
"CLINTON",
"CRAWFORD",
"DELTA",
"DICKINSON",
"EATON",
"EMMET",
"GD. TRAVERSE",
"GENESEE",
"GLADWIN",
"GOGEBIC",
"GRATIOT",
"HILLSDALE",
"HOUGHTON",
"HURON",
"INGHAM",
"IONIA",
"IOSCO",
"IRON",
"ISABELLA",
"JACKSON",
"KALAMAZOO",
"KALKASKA",
"KENT",
"KEWEENAW",
"LAKE",
"LAPEER",
"LEELANAU",
"LENAWEE",
"LIVINGSTON",
"LUCE",
"MACKINAC",
"MACOMB",
"MANISTEE",
"MARQUETTE",
"MASON",
"MECOSTA",
"MENOMINEE",
"MIDLAND",
"MISSAUKEE",
"MONROE",
"MONTCALM",
"MONTMORENCY",
"MUSKEGON",
"NEWAYGO",
"OAKLAND",
"OCEANA",
"OGEMAW",
"ONTONAGON",
"OSCEOLA",
"OSCODA",
"OTSEGO",
"OTTAWA",
"PRESQUE ISLE",
"ROSCOMMON",
"SAGINAW",
"SANILAC",
"SCHOOLCRAFT",
"SHIAWASSEE",
"ST. CLAIR",
"ST. JOSEPH",
"TUSCOLA",
"VAN BUREN",
"WASHTENAW",
"WAYNE",
"WEXFORD" )
rownames (Michigan2106) <- row_names
colnames (Michigan2106) <- column_names
res = svd(Michigan2106)
#res$u
V = res$v
S = diag(res$d)
U = res$u
rownames (U) <- row_names
rownames (V) <- column_names
Scaled = U %*% S
Recon = Scaled %*% t(V)
file = "Michgan2016_President_Analysis.xlsx"
write.xlsx(Michigan2106, file, sheetName = "Michigan2106", col.names = TRUE, row.names = TRUE, append = FALSE)
write.xlsx(U, file, sheetName = "U", col.names = TRUE, row.names = TRUE, append = TRUE)
write.xlsx(S, file, sheetName = "S", col.names = TRUE, row.names = TRUE, append = TRUE)
write.xlsx(Scaled, file, sheetName = "Scaled", col.names = TRUE, row.names = TRUE, append = TRUE)
write.xlsx(V, file, sheetName = "V", col.names = TRUE, row.names = TRUE, append = TRUE)
write.xlsx(Recon, file, sheetName = "Recon", col.names = TRUE, row.names = TRUE, append = TRUE)
| /R_scripts/src/main/R/michigan_2016.R | no_license | wallstft/elemental | R | false | false | 5,342 | r | #!/usr/bin/env Rscript
library(MASS)
library("xlsx")
Michigan2106 = matrix (c(
1732,1663,18050,4877,4448,2384,1156,9114,21642,4108,29495,5061,24157,7270,5137,4302,5379,4249,16492,2110,6436,3923,24938,6972,20965,102751,3794,2925,5666,4799,6018,4579,79110,8352,4345,2004,11404,25795,67148,2280,138683,527,1939,12734,6774,16750,34384,681,2085,176317,4979,16042,5281,5827,3539,15635,1565,26863,7874,1287,37304,6212,343070,3973,3030,1176,2705,1044,3556,44973,2400,4287,44396,4873,1369,12546,24553,7526,7429,13258,128483,519444,4436
,4201,2585,34183,9090,8469,4950,2158,19202,28328,5539,38647,11786,31494,14243,8674,8683,9122,8505,21636,4354,11121,8580,27609,10616,27413,84175,8124,4018,9880,14095,8475,10692,43868,16635,8345,3675,12338,39793,51034,6116,148180,814,3159,30037,7239,26430,65680,1756,3744,224665,6915,14646,8505,10305,6702,23846,5386,43261,16907,3498,36127,15173,289203,7228,6827,2066,7336,2843,8266,88467,4488,8141,45469,13446,2556,19230,49051,14884,17102,17890,50631,228993,10000
), 2,83, TRUE)
row_names <- c(
"PRESIDENT-Democratic-Clinton",
"PRESIDENT-Republican-Trump"
)
column_names <- c("ALCONA",
"ALGER",
"ALLEGAN",
"ALPENA",
"ANTRIM",
"ARENAC",
"BARAGA",
"BARRY",
"BAY",
"BENZIE",
"BERRIEN",
"BRANCH",
"CALHOUN",
"CASS",
"CHARLEVOIX",
"CHEBOYGAN",
"CHIPPEWA",
"CLARE",
"CLINTON",
"CRAWFORD",
"DELTA",
"DICKINSON",
"EATON",
"EMMET",
"GD. TRAVERSE",
"GENESEE",
"GLADWIN",
"GOGEBIC",
"GRATIOT",
"HILLSDALE",
"HOUGHTON",
"HURON",
"INGHAM",
"IONIA",
"IOSCO",
"IRON",
"ISABELLA",
"JACKSON",
"KALAMAZOO",
"KALKASKA",
"KENT",
"KEWEENAW",
"LAKE",
"LAPEER",
"LEELANAU",
"LENAWEE",
"LIVINGSTON",
"LUCE",
"MACKINAC",
"MACOMB",
"MANISTEE",
"MARQUETTE",
"MASON",
"MECOSTA",
"MENOMINEE",
"MIDLAND",
"MISSAUKEE",
"MONROE",
"MONTCALM",
"MONTMORENCY",
"MUSKEGON",
"NEWAYGO",
"OAKLAND",
"OCEANA",
"OGEMAW",
"ONTONAGON",
"OSCEOLA",
"OSCODA",
"OTSEGO",
"OTTAWA",
"PRESQUE ISLE",
"ROSCOMMON",
"SAGINAW",
"SANILAC",
"SCHOOLCRAFT",
"SHIAWASSEE",
"ST. CLAIR",
"ST. JOSEPH",
"TUSCOLA",
"VAN BUREN",
"WASHTENAW",
"WAYNE",
"WEXFORD" )
rownames (Michigan2106) <- row_names
colnames (Michigan2106) <- column_names
res = svd(Michigan2106)
#res$u
V = res$v
S = diag(res$d)
U = res$u
rownames (U) <- row_names
rownames (V) <- column_names
Scaled = U %*% S
Recon = Scaled %*% t(V)
file = "Michgan2016_President_Analysis.xlsx"
write.xlsx(Michigan2106, file, sheetName = "Michigan2106", col.names = TRUE, row.names = TRUE, append = FALSE)
write.xlsx(U, file, sheetName = "U", col.names = TRUE, row.names = TRUE, append = TRUE)
write.xlsx(S, file, sheetName = "S", col.names = TRUE, row.names = TRUE, append = TRUE)
write.xlsx(Scaled, file, sheetName = "Scaled", col.names = TRUE, row.names = TRUE, append = TRUE)
write.xlsx(V, file, sheetName = "V", col.names = TRUE, row.names = TRUE, append = TRUE)
write.xlsx(Recon, file, sheetName = "Recon", col.names = TRUE, row.names = TRUE, append = TRUE)
|
# 1
library(tidyverse)
theme_set(theme_bw())
library(ggfortify)
# 2
pokemon <- read.csv(file="Pokemon.csv", na.strings = "")
str(pokemon)
# 3
pokemon %>%
group_by(Type.1) %>%
count() %>%
arrange(desc(n))
# 4
ggplot(pokemon, aes(x=reorder(Type.1, Attack, FUN=median), y=Attack)) +
geom_boxplot() +
labs(x="Tipo de Pokemon", y="Ataque") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# 5
ggplot(pokemon, aes(x=Defense, y=Attack)) +
geom_point(alpha=0.25) +
geom_smooth(method="lm", se=FALSE)
ajuste <- lm(Attack ~ Defense, data=pokemon)
summary(ajuste)
autoplot(ajuste)
pokemon[231, ]
predict(ajuste, newdata=data.frame(Defense=230))
pokemon[430, ]
predict(ajuste, newdata=data.frame(Defense=20))
#
library(rvest)
library(dplyr)
library(ggplot2)
theme_set(theme_bw())
library(stringr)
library(scales)
# 1
url <- "https://pt.wikipedia.org/wiki/Lista_de_munic%C3%ADpios_do_Brasil_por_popula%C3%A7%C3%A3o"
populacao <- url %>%
read_html()
populacao <- populacao %>%
html_table(fill=TRUE)
populacao <- populacao[[1]]
names(populacao) <- c("Posição", "Código do IBGE", "Município", "Unidade federativa", "População")
head(populacao)
tail(populacao)
# 2
url <- "https://pt.wikipedia.org/wiki/Lista_de_munic%C3%ADpios_brasileiros_por_%C3%A1rea_decrescente"
area <- url %>%
read_html()
area <- area %>%
html_table(fill=TRUE)
area <- area[[1]]
head(area)
tail(area)
# 3
# utilizar `Município` e `Unidade federativa` nao funciona porque
# algumas cidades estao com grafias difernetes nos dois data frames
dados <- left_join(populacao, area, by = c("Município", "Unidade federativa"))
head(dados)
# 4
dados <- dados %>%
select(Município, `Unidade federativa`, `Área (km²)`, População)
names(dados) <- c("municipio", "estado", "area", "populacao")
head(dados)
dados$area <- str_replace(dados$area, "[[:space:]]", "")
dados$area <- str_replace(dados$area, ",", ".")
dados$area <- as.numeric(dados$area)
dados$populacao <- str_replace_all(dados$populacao, "[[:space:]]", "")
dados$populacao <- as.numeric(dados$populacao)
head(dados)
str(dados)
dados <- na.omit(dados)
# 5
ggplot(dados, aes(x=area, y=populacao)) +
geom_point() +
labs(x="Área (km^2)", y="População")
ggplot(dados, aes(x=area, y=populacao)) +
geom_point() +
labs(x="log(Área (km^2))", y="População") +
scale_x_log10(labels=comma)
ggplot(dados, aes(x=area, y=populacao)) +
geom_point() +
labs(x="Área (km^2)", y="log(População)") +
scale_y_log10(labels=comma)
ggplot(dados, aes(x=area, y=populacao)) +
geom_point() +
labs(x="log(Área (km^2))", y="log(População)") +
scale_y_log10(labels=comma) +
scale_x_log10(labels=comma)
# 6
dados %>%
arrange(desc(populacao)) %>%
head(5)
dados %>%
arrange(desc(populacao)) %>%
tail(5)
# 7
dados <- dados %>%
mutate(densidade=populacao/area)
dados %>%
arrange(desc(densidade)) %>%
head(5)
dados %>%
arrange(desc(densidade)) %>%
tail(5)
dados %>%
filter(municipio=="Natal")
dados %>%
filter(municipio=="Macaíba")
# 8
dados %>%
filter(estado=="Rio Grande do Norte") %>%
arrange(desc(populacao)) %>%
head(5)
dados %>%
filter(estado=="Rio Grande do Norte") %>%
arrange(desc(populacao)) %>%
tail(5)
# 9
dados %>%
filter(estado=="Rio Grande do Norte") %>%
arrange(desc(densidade)) %>%
head(5)
dados %>%
filter(estado=="Rio Grande do Norte") %>%
arrange(desc(densidade)) %>%
tail(5)
| /aulas/01_Obtencao_de_Dados/01a_Obtencao_De_Dados_Exercicios.R | no_license | marcusfreire0504/introbigdata_material | R | false | false | 3,463 | r | # 1
library(tidyverse)
theme_set(theme_bw())
library(ggfortify)
# 2
pokemon <- read.csv(file="Pokemon.csv", na.strings = "")
str(pokemon)
# 3
pokemon %>%
group_by(Type.1) %>%
count() %>%
arrange(desc(n))
# 4
ggplot(pokemon, aes(x=reorder(Type.1, Attack, FUN=median), y=Attack)) +
geom_boxplot() +
labs(x="Tipo de Pokemon", y="Ataque") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# 5
ggplot(pokemon, aes(x=Defense, y=Attack)) +
geom_point(alpha=0.25) +
geom_smooth(method="lm", se=FALSE)
ajuste <- lm(Attack ~ Defense, data=pokemon)
summary(ajuste)
autoplot(ajuste)
pokemon[231, ]
predict(ajuste, newdata=data.frame(Defense=230))
pokemon[430, ]
predict(ajuste, newdata=data.frame(Defense=20))
#
library(rvest)
library(dplyr)
library(ggplot2)
theme_set(theme_bw())
library(stringr)
library(scales)
# 1
url <- "https://pt.wikipedia.org/wiki/Lista_de_munic%C3%ADpios_do_Brasil_por_popula%C3%A7%C3%A3o"
populacao <- url %>%
read_html()
populacao <- populacao %>%
html_table(fill=TRUE)
populacao <- populacao[[1]]
names(populacao) <- c("Posição", "Código do IBGE", "Município", "Unidade federativa", "População")
head(populacao)
tail(populacao)
# 2
url <- "https://pt.wikipedia.org/wiki/Lista_de_munic%C3%ADpios_brasileiros_por_%C3%A1rea_decrescente"
area <- url %>%
read_html()
area <- area %>%
html_table(fill=TRUE)
area <- area[[1]]
head(area)
tail(area)
# 3
# utilizar `Município` e `Unidade federativa` nao funciona porque
# algumas cidades estao com grafias difernetes nos dois data frames
dados <- left_join(populacao, area, by = c("Município", "Unidade federativa"))
head(dados)
# 4
dados <- dados %>%
select(Município, `Unidade federativa`, `Área (km²)`, População)
names(dados) <- c("municipio", "estado", "area", "populacao")
head(dados)
dados$area <- str_replace(dados$area, "[[:space:]]", "")
dados$area <- str_replace(dados$area, ",", ".")
dados$area <- as.numeric(dados$area)
dados$populacao <- str_replace_all(dados$populacao, "[[:space:]]", "")
dados$populacao <- as.numeric(dados$populacao)
head(dados)
str(dados)
dados <- na.omit(dados)
# 5
ggplot(dados, aes(x=area, y=populacao)) +
geom_point() +
labs(x="Área (km^2)", y="População")
ggplot(dados, aes(x=area, y=populacao)) +
geom_point() +
labs(x="log(Área (km^2))", y="População") +
scale_x_log10(labels=comma)
ggplot(dados, aes(x=area, y=populacao)) +
geom_point() +
labs(x="Área (km^2)", y="log(População)") +
scale_y_log10(labels=comma)
ggplot(dados, aes(x=area, y=populacao)) +
geom_point() +
labs(x="log(Área (km^2))", y="log(População)") +
scale_y_log10(labels=comma) +
scale_x_log10(labels=comma)
# 6
dados %>%
arrange(desc(populacao)) %>%
head(5)
dados %>%
arrange(desc(populacao)) %>%
tail(5)
# 7
dados <- dados %>%
mutate(densidade=populacao/area)
dados %>%
arrange(desc(densidade)) %>%
head(5)
dados %>%
arrange(desc(densidade)) %>%
tail(5)
dados %>%
filter(municipio=="Natal")
dados %>%
filter(municipio=="Macaíba")
# 8
dados %>%
filter(estado=="Rio Grande do Norte") %>%
arrange(desc(populacao)) %>%
head(5)
dados %>%
filter(estado=="Rio Grande do Norte") %>%
arrange(desc(populacao)) %>%
tail(5)
# 9
dados %>%
filter(estado=="Rio Grande do Norte") %>%
arrange(desc(densidade)) %>%
head(5)
dados %>%
filter(estado=="Rio Grande do Norte") %>%
arrange(desc(densidade)) %>%
tail(5)
|
context("utils")
test_that(".addColumn", {
d <- data.frame(a = 1:3, b = 4:6)
result <- data.frame(a = 1:3, b = 4:6, c = 7:9)
resultForced <- data.frame(a = 1:3, b = 7:9)
expect_error(Pbase:::.addColumn(list()))
expect_error(Pbase:::.addColumn(d))
expect_error(Pbase:::.addColumn(d, character()))
expect_error(Pbase:::.addColumn(d, ""))
expect_error(Pbase:::.addColumn(d, "b", integer()))
expect_error(Pbase:::.addColumn(d, "b", 7:9),
"The column .*b.* already exists.")
expect_equal(Pbase:::.addColumn(d, "c", 7:9), result)
expect_equal(Pbase:::.addColumn(d, "b", 7:9, force = TRUE), resultForced)
})
test_that(".isInRange", {
x <- 1:10
result <- rep(c(FALSE, TRUE, FALSE), c(2, 4, 4))
expect_error(Pbase:::.isInRange(x, 3))
expect_error(Pbase:::.isInRange(x, "foo"))
expect_equal(Pbase:::.isInRange(x, c(3, 6)), result)
expect_equal(Pbase:::.isInRange(x, c(6, 3)), result)
expect_equal(Pbase:::.isInRange(x, c(1, 20)), rep(TRUE, 10))
expect_equal(Pbase:::.isInRange(x, c(11, 20)), rep(FALSE, 10))
})
test_that(".flatIRangesList", {
expect_error(Pbase:::.flatIRangesList(list()))
irl <- IRangesList(IRanges(start = c(1, 5), end = c(3, 10)),
IRanges(start = c(7, 10), end = c(9, 15)))
result <- IRanges(start = c(1, 5, 7, 10), end = c(3, 10, 9, 15))
resultShift <- IRanges(start = c(1, 5, 17, 20), end = c(3, 10, 19, 25))
resultShiftBy <- IRanges(start = c(2, 6, 27, 30), end = c(4, 11, 29, 35))
expect_equal(Pbase:::.flatIRangesList(irl), result)
expect_equal(Pbase:::.flatIRangesList(irl, shift = TRUE), resultShift)
expect_equal(Pbase:::.flatIRangesList(irl, shift = TRUE,
shiftBy = c(1, 20)), resultShiftBy)
})
test_that(".splitIRanges", {
expect_error(Pbase:::.splitIRanges(list()))
ir <- IRanges(start = c(1, 5, 17, 20), end = c(3, 10, 19, 25))
result <- IRangesList(IRanges(start = 1, end = 3),
IRanges(start = 5, end = 10),
IRanges(start = 17, end = 19),
IRanges(start = 20, end = 25))
names(result) <- as.character(1:4)
resultSplit <- IRangesList(IRanges(start = c(1, 5), end = c(3, 10)),
IRanges(start = c(17, 20), end = c(19, 25)))
names(resultSplit) <- as.character(1:2)
resultUnshift <- IRangesList(IRanges(start = c(1, 5), end = c(3, 10)),
IRanges(start = c(7, 10), end = c(9, 15)))
names(resultUnshift) <- as.character(1:2)
expect_equal(Pbase:::.splitIRanges(ir), result)
expect_equal(Pbase:::.splitIRanges(ir, f = c(1, 1, 2, 2)), resultSplit)
expect_equal(Pbase:::.splitIRanges(ir, f = c(1, 1, 2, 2), unshift = TRUE),
resultUnshift)
})
test_that(".setNames2", {
x <- 1:3
nm1 <- LETTERS[1:3]
nm2 <- setNames(LETTERS[1:3], LETTERS[4:6])
expect_equal(Pbase:::.setNames2(x, nm1), setNames(x, nm1))
expect_equal(Pbase:::.setNames2(x, nm2), setNames(x, names(nm2)))
})
test_that(".singleAA", {
x1 <- "ABC"
x2 <- c(a = "ABC", b = "DEF")
x3 <- AAString(x1)
x4 <- AAStringSet(x2)
x5 <- AAStringSetList(list(P1=x2, P2=x2[2]))
result1 <- list(LETTERS[1:3])
result2 <- list(a = LETTERS[1:3], b = LETTERS[4:6])
result5 <- list(P1=LETTERS[1:3], P1=LETTERS[4:6], P2=LETTERS[4:6])
expect_equal(Pbase:::.singleAA(x1), result1)
expect_equal(Pbase:::.singleAA(x2), result2)
expect_equal(Pbase:::.singleAA(x3), result1)
expect_equal(Pbase:::.singleAA(x4), result2)
expect_equal(Pbase:::.singleAA(x5), result5)
})
test_that(".singular", {
x1 <- c(1, 2, 2, 3, 1, 5, 4, 3, 2, 1)
x2 <- rep(1, 10)
x3 <- 1:5
expect_equal(Pbase:::.singular(x1), c(5, 4))
expect_equal(Pbase:::.singular(x2), double())
expect_equal(Pbase:::.singular(x3), 1:5)
})
| /tests/testthat/test_utils.R | no_license | thomasp85/Pbase | R | false | false | 3,962 | r | context("utils")
test_that(".addColumn", {
d <- data.frame(a = 1:3, b = 4:6)
result <- data.frame(a = 1:3, b = 4:6, c = 7:9)
resultForced <- data.frame(a = 1:3, b = 7:9)
expect_error(Pbase:::.addColumn(list()))
expect_error(Pbase:::.addColumn(d))
expect_error(Pbase:::.addColumn(d, character()))
expect_error(Pbase:::.addColumn(d, ""))
expect_error(Pbase:::.addColumn(d, "b", integer()))
expect_error(Pbase:::.addColumn(d, "b", 7:9),
"The column .*b.* already exists.")
expect_equal(Pbase:::.addColumn(d, "c", 7:9), result)
expect_equal(Pbase:::.addColumn(d, "b", 7:9, force = TRUE), resultForced)
})
test_that(".isInRange", {
x <- 1:10
result <- rep(c(FALSE, TRUE, FALSE), c(2, 4, 4))
expect_error(Pbase:::.isInRange(x, 3))
expect_error(Pbase:::.isInRange(x, "foo"))
expect_equal(Pbase:::.isInRange(x, c(3, 6)), result)
expect_equal(Pbase:::.isInRange(x, c(6, 3)), result)
expect_equal(Pbase:::.isInRange(x, c(1, 20)), rep(TRUE, 10))
expect_equal(Pbase:::.isInRange(x, c(11, 20)), rep(FALSE, 10))
})
test_that(".flatIRangesList", {
expect_error(Pbase:::.flatIRangesList(list()))
irl <- IRangesList(IRanges(start = c(1, 5), end = c(3, 10)),
IRanges(start = c(7, 10), end = c(9, 15)))
result <- IRanges(start = c(1, 5, 7, 10), end = c(3, 10, 9, 15))
resultShift <- IRanges(start = c(1, 5, 17, 20), end = c(3, 10, 19, 25))
resultShiftBy <- IRanges(start = c(2, 6, 27, 30), end = c(4, 11, 29, 35))
expect_equal(Pbase:::.flatIRangesList(irl), result)
expect_equal(Pbase:::.flatIRangesList(irl, shift = TRUE), resultShift)
expect_equal(Pbase:::.flatIRangesList(irl, shift = TRUE,
shiftBy = c(1, 20)), resultShiftBy)
})
test_that(".splitIRanges", {
expect_error(Pbase:::.splitIRanges(list()))
ir <- IRanges(start = c(1, 5, 17, 20), end = c(3, 10, 19, 25))
result <- IRangesList(IRanges(start = 1, end = 3),
IRanges(start = 5, end = 10),
IRanges(start = 17, end = 19),
IRanges(start = 20, end = 25))
names(result) <- as.character(1:4)
resultSplit <- IRangesList(IRanges(start = c(1, 5), end = c(3, 10)),
IRanges(start = c(17, 20), end = c(19, 25)))
names(resultSplit) <- as.character(1:2)
resultUnshift <- IRangesList(IRanges(start = c(1, 5), end = c(3, 10)),
IRanges(start = c(7, 10), end = c(9, 15)))
names(resultUnshift) <- as.character(1:2)
expect_equal(Pbase:::.splitIRanges(ir), result)
expect_equal(Pbase:::.splitIRanges(ir, f = c(1, 1, 2, 2)), resultSplit)
expect_equal(Pbase:::.splitIRanges(ir, f = c(1, 1, 2, 2), unshift = TRUE),
resultUnshift)
})
test_that(".setNames2", {
x <- 1:3
nm1 <- LETTERS[1:3]
nm2 <- setNames(LETTERS[1:3], LETTERS[4:6])
expect_equal(Pbase:::.setNames2(x, nm1), setNames(x, nm1))
expect_equal(Pbase:::.setNames2(x, nm2), setNames(x, names(nm2)))
})
test_that(".singleAA", {
x1 <- "ABC"
x2 <- c(a = "ABC", b = "DEF")
x3 <- AAString(x1)
x4 <- AAStringSet(x2)
x5 <- AAStringSetList(list(P1=x2, P2=x2[2]))
result1 <- list(LETTERS[1:3])
result2 <- list(a = LETTERS[1:3], b = LETTERS[4:6])
result5 <- list(P1=LETTERS[1:3], P1=LETTERS[4:6], P2=LETTERS[4:6])
expect_equal(Pbase:::.singleAA(x1), result1)
expect_equal(Pbase:::.singleAA(x2), result2)
expect_equal(Pbase:::.singleAA(x3), result1)
expect_equal(Pbase:::.singleAA(x4), result2)
expect_equal(Pbase:::.singleAA(x5), result5)
})
test_that(".singular", {
x1 <- c(1, 2, 2, 3, 1, 5, 4, 3, 2, 1)
x2 <- rep(1, 10)
x3 <- 1:5
expect_equal(Pbase:::.singular(x1), c(5, 4))
expect_equal(Pbase:::.singular(x2), double())
expect_equal(Pbase:::.singular(x3), 1:5)
})
|
#' Latent Space Joint Model
#'
#' Function to joint modelling of multiple network views using the Latent Space Jont Model (LSJM) Gollini and Murphy (2014).
#' The LSJM merges the information given by the multiple network views by assuming that the probability of a node being connected with other nodes in each view is explained by a unique latent variable.
#'
#' @param Y list containing a (\code{N} x \code{N}) binary adjacency matrix for each network view.
#' @param D integer dimension of the latent space
#' @param sigma (\code{D} x \code{D}) variance/covariance matrix of the prior distribution for the latent positions. Default \code{sigma = 1}
#' @param xi vector of means of the prior distributions of \eqn{\alpha}. Default \code{xi = 0}
#' @param psi2 vector of variances of the prior distributions of \eqn{\alpha}. Default \code{psi2 = 2}
#' @param Niter maximum number of iterations. Default \code{Niter = 500}
#' @param tol desired tolerance. Default \code{tol = 0.1^2}
#' @param preit Preliminary number of iterations default \code{preit = 20}
#' @param randomZ logical; If \code{randomZ = TRUE} random initialization for the latent positions is used. If \code{randomZ = FALSE} and \code{D} = 2 or 3 the latent positions are initialized using the Fruchterman-Reingold method and multidimensional scaling is used for \code{D} = 1 or \code{D} > 3. Default \code{randomZ = FALSE}
#' @return List containing:
#' \itemize{
#' \item \code{EZ} (\code{N} x \code{D}) matrix containing the posterior means of the latent positions
#' \item \code{VZ} (\code{D} x \code{D}) matrix containing the posterior variance of the latent positions
#' \item \code{lsmEZ} list contatining a (\code{N} x \code{D}) matrix for each network view containing the posterior means of the latent positions under each model in the latent space.
#' \item \code{lsmVZ} list contatining a (\code{D} x \code{D}) matrix for each network view containing the posterior variance of the latent positions under each model in the latent space.
#' \item \code{xiT} vector of means of the posterior distributions of \eqn{\alpha}
#' \item \code{psi2T} vector of variances of the posterior distributions of \eqn{\alpha}
#' \item \code{Ell} expected log-likelihood
#' }
#' @references Gollini, I., and Murphy, T. B. (2014), "Joint Modelling of Multiple Network Views", Journal of Computational and Graphical Statistics \url{http://arxiv.org/abs/1301.3759}.
#' @export
#' @examples
#'## Simulate Undirected Network
#' N <- 20
#' Ndata <- 2
#' Y <- list()
#' Y[[1]] <- network(N, directed = FALSE)[,]
#' ### create a new view that is similar to the original
#'
#' for(nd in 2:Ndata){
#' Y[[nd]] <- Y[[nd - 1]] - sample(c(-1, 0, 1), N * N, replace = TRUE,
#' prob = c(.05, .85, .1))
#' Y[[nd]] <- 1 * (Y[[nd]] > 0 )
#' diag(Y[[nd]]) <- 0
#' }
#'
#' par(mfrow = c(1, 2))
#' z <- plotY(Y[[1]], verbose = TRUE, main = 'Network 1')
#' plotY(Y[[2]], EZ = z, main = 'Network 2')
#' par(mfrow = c(1, 1))
#'
#' modLSJM <- lsjm(Y, D = 2)
#' plot(modLSJM, Y, drawCB = TRUE)
#' plot(modLSJM, Y, drawCB = TRUE, plotZtilde = TRUE)
lsjm<-function(Y, D, sigma = 1, xi = rep(0, length(Y)), psi2 = rep(2, length(Y)),
Niter = 500, tol = 0.1^2, preit = 20, randomZ = FALSE)
{
stopifnot(is.list(Y), sapply(Y, is.adjacency))
stopifnot(length(D) == 1, D > 0, D == floor(D))
stopifnot(sigma > 0)
stopifnot(length(xi) == length(Y), length(psi2) == length(Y), psi2 > 0)
stopifnot(preit > 0, preit == floor(preit), Niter > preit, Niter == floor(Niter))
stopifnot(tol > 0)
stopifnot(is.logical(randomZ))
stepA <- 0
N <- nrow(Y[[1]])
Ndata <- length(Y)
xiT <- xi
psi2T<-psi2
lsmVZ <- list()
for(i in 1:Ndata) lsmVZ[[i]] <- diag(D)
if(randomZ){
lsmEZ <- list()
for(i in 1:Ndata) lsmEZ[[i]] <- matrix(rnorm(N * D), ncol = D)
} else {
if(D %in% 2:3){ # Fruchterman-Reingold
lsmEZ <- lapply(Y, frEZ, d = D)
} else { # Multidimensional Scaling
lsmEZ <- lapply(Y, function(y) cmdscale(as.dist(1 - y), k = D))
}
}
lsmEZ <- lapply(lsmEZ, function(z) z / apply(z, 2, sd))
Aezvz <- 0
for(i in 1:Ndata)
{
Aezvz <- lsmEZ[[i]] %*% solve(lsmVZ[[i]]) + Aezvz
xiT[i] <- glm(c(Y[[i]])~c(as.matrix(dist(lsmEZ[[i]])^2)))$coeff[1]
names(xiT[i]) <- NULL
}
if(D>1){
VZ <- solve(matrix(rowSums(sapply(lsmVZ, solve)), D, D) - (Ndata - 1) / sigma^2 * diag(D))
} else {
VZ <- as.matrix(1 / sum(sapply(lsmVZ, solve)) - (Ndata - 1) / sigma^2)
}
EZ <- Aezvz %*% VZ
############
############
iter <- 0
dif <- 1
l <- seq(0, 0, length.out=3)
ellm <- rep(0, Ndata)
while (iter<Niter & dif>tol)
{
iter<- iter+1
for(i in 1:Ndata)
{
lsm <- mainLSM(psi2T[i], xiT[i], EZ, VZ, Y[[i]], xi[i], psi2[i], sigma^2)
xiT[i] <- lsm$xiT
psi2T[i] <- lsm$psi2T
lsmVZ[[i]] <- as.matrix(lsm$lsmVZ)
lsmEZ[[i]] <- lsm$lsmEZ
}
######## Joint Model ##############
if(D > 1 & iter > preit)
{
for(i in 2:Ndata)
{
rotLSM <- rotXtoY(lsmEZ[[i]], lsmEZ[[1]] %*% lsmVZ[[i]])
lsmEZ[[i]] <- rotLSM$X
}
}
Aezvz <- 0
for(i in 1:Ndata) {
Aezvz <- lsmEZ[[i]] %*% solve(lsmVZ[[i]]) + Aezvz
}
#####
if(D>1){
VZ <- solve(matrix(rowSums(sapply(lsmVZ, solve)), D, D) - (Ndata - 1) / sigma^2 * diag(D))
} else {
VZ <- as.matrix(1 / sum(sapply(lsmVZ, solve)) - (Ndata - 1) / sigma^2)
}
EZ<- Aezvz %*% VZ
########
for(i in 1:Ndata)
{
ellm[i] <- Ell(psi2T[i], xiT[i], VZ, EZ, Y[[i]])
}
ell <-sum(ellm)
l <- c(l[-1], ell)
if(iter > preit) dif<- l[3] - l[2]
if(dif < -tol) dif <- abs(dif) + 1
}
robj <- list(EZ = EZ, VZ = VZ, lsmEZ = lsmEZ, lsmVZ = lsmVZ, xiT = xiT, psi2T = psi2T, Ell = ell)
class(robj) <- c("lsjm")
robj
}
| /lvm4net/R/lsjm.R | no_license | ingted/R-Examples | R | false | false | 5,749 | r | #' Latent Space Joint Model
#'
#' Function to joint modelling of multiple network views using the Latent Space Jont Model (LSJM) Gollini and Murphy (2014).
#' The LSJM merges the information given by the multiple network views by assuming that the probability of a node being connected with other nodes in each view is explained by a unique latent variable.
#'
#' @param Y list containing a (\code{N} x \code{N}) binary adjacency matrix for each network view.
#' @param D integer dimension of the latent space
#' @param sigma (\code{D} x \code{D}) variance/covariance matrix of the prior distribution for the latent positions. Default \code{sigma = 1}
#' @param xi vector of means of the prior distributions of \eqn{\alpha}. Default \code{xi = 0}
#' @param psi2 vector of variances of the prior distributions of \eqn{\alpha}. Default \code{psi2 = 2}
#' @param Niter maximum number of iterations. Default \code{Niter = 500}
#' @param tol desired tolerance. Default \code{tol = 0.1^2}
#' @param preit Preliminary number of iterations default \code{preit = 20}
#' @param randomZ logical; If \code{randomZ = TRUE} random initialization for the latent positions is used. If \code{randomZ = FALSE} and \code{D} = 2 or 3 the latent positions are initialized using the Fruchterman-Reingold method and multidimensional scaling is used for \code{D} = 1 or \code{D} > 3. Default \code{randomZ = FALSE}
#' @return List containing:
#' \itemize{
#' \item \code{EZ} (\code{N} x \code{D}) matrix containing the posterior means of the latent positions
#' \item \code{VZ} (\code{D} x \code{D}) matrix containing the posterior variance of the latent positions
#' \item \code{lsmEZ} list contatining a (\code{N} x \code{D}) matrix for each network view containing the posterior means of the latent positions under each model in the latent space.
#' \item \code{lsmVZ} list contatining a (\code{D} x \code{D}) matrix for each network view containing the posterior variance of the latent positions under each model in the latent space.
#' \item \code{xiT} vector of means of the posterior distributions of \eqn{\alpha}
#' \item \code{psi2T} vector of variances of the posterior distributions of \eqn{\alpha}
#' \item \code{Ell} expected log-likelihood
#' }
#' @references Gollini, I., and Murphy, T. B. (2014), "Joint Modelling of Multiple Network Views", Journal of Computational and Graphical Statistics \url{http://arxiv.org/abs/1301.3759}.
#' @export
#' @examples
#'## Simulate Undirected Network
#' N <- 20
#' Ndata <- 2
#' Y <- list()
#' Y[[1]] <- network(N, directed = FALSE)[,]
#' ### create a new view that is similar to the original
#'
#' for(nd in 2:Ndata){
#' Y[[nd]] <- Y[[nd - 1]] - sample(c(-1, 0, 1), N * N, replace = TRUE,
#' prob = c(.05, .85, .1))
#' Y[[nd]] <- 1 * (Y[[nd]] > 0 )
#' diag(Y[[nd]]) <- 0
#' }
#'
#' par(mfrow = c(1, 2))
#' z <- plotY(Y[[1]], verbose = TRUE, main = 'Network 1')
#' plotY(Y[[2]], EZ = z, main = 'Network 2')
#' par(mfrow = c(1, 1))
#'
#' modLSJM <- lsjm(Y, D = 2)
#' plot(modLSJM, Y, drawCB = TRUE)
#' plot(modLSJM, Y, drawCB = TRUE, plotZtilde = TRUE)
lsjm<-function(Y, D, sigma = 1, xi = rep(0, length(Y)), psi2 = rep(2, length(Y)),
Niter = 500, tol = 0.1^2, preit = 20, randomZ = FALSE)
{
stopifnot(is.list(Y), sapply(Y, is.adjacency))
stopifnot(length(D) == 1, D > 0, D == floor(D))
stopifnot(sigma > 0)
stopifnot(length(xi) == length(Y), length(psi2) == length(Y), psi2 > 0)
stopifnot(preit > 0, preit == floor(preit), Niter > preit, Niter == floor(Niter))
stopifnot(tol > 0)
stopifnot(is.logical(randomZ))
stepA <- 0
N <- nrow(Y[[1]])
Ndata <- length(Y)
xiT <- xi
psi2T<-psi2
lsmVZ <- list()
for(i in 1:Ndata) lsmVZ[[i]] <- diag(D)
if(randomZ){
lsmEZ <- list()
for(i in 1:Ndata) lsmEZ[[i]] <- matrix(rnorm(N * D), ncol = D)
} else {
if(D %in% 2:3){ # Fruchterman-Reingold
lsmEZ <- lapply(Y, frEZ, d = D)
} else { # Multidimensional Scaling
lsmEZ <- lapply(Y, function(y) cmdscale(as.dist(1 - y), k = D))
}
}
lsmEZ <- lapply(lsmEZ, function(z) z / apply(z, 2, sd))
Aezvz <- 0
for(i in 1:Ndata)
{
Aezvz <- lsmEZ[[i]] %*% solve(lsmVZ[[i]]) + Aezvz
xiT[i] <- glm(c(Y[[i]])~c(as.matrix(dist(lsmEZ[[i]])^2)))$coeff[1]
names(xiT[i]) <- NULL
}
if(D>1){
VZ <- solve(matrix(rowSums(sapply(lsmVZ, solve)), D, D) - (Ndata - 1) / sigma^2 * diag(D))
} else {
VZ <- as.matrix(1 / sum(sapply(lsmVZ, solve)) - (Ndata - 1) / sigma^2)
}
EZ <- Aezvz %*% VZ
############
############
iter <- 0
dif <- 1
l <- seq(0, 0, length.out=3)
ellm <- rep(0, Ndata)
while (iter<Niter & dif>tol)
{
iter<- iter+1
for(i in 1:Ndata)
{
lsm <- mainLSM(psi2T[i], xiT[i], EZ, VZ, Y[[i]], xi[i], psi2[i], sigma^2)
xiT[i] <- lsm$xiT
psi2T[i] <- lsm$psi2T
lsmVZ[[i]] <- as.matrix(lsm$lsmVZ)
lsmEZ[[i]] <- lsm$lsmEZ
}
######## Joint Model ##############
if(D > 1 & iter > preit)
{
for(i in 2:Ndata)
{
rotLSM <- rotXtoY(lsmEZ[[i]], lsmEZ[[1]] %*% lsmVZ[[i]])
lsmEZ[[i]] <- rotLSM$X
}
}
Aezvz <- 0
for(i in 1:Ndata) {
Aezvz <- lsmEZ[[i]] %*% solve(lsmVZ[[i]]) + Aezvz
}
#####
if(D>1){
VZ <- solve(matrix(rowSums(sapply(lsmVZ, solve)), D, D) - (Ndata - 1) / sigma^2 * diag(D))
} else {
VZ <- as.matrix(1 / sum(sapply(lsmVZ, solve)) - (Ndata - 1) / sigma^2)
}
EZ<- Aezvz %*% VZ
########
for(i in 1:Ndata)
{
ellm[i] <- Ell(psi2T[i], xiT[i], VZ, EZ, Y[[i]])
}
ell <-sum(ellm)
l <- c(l[-1], ell)
if(iter > preit) dif<- l[3] - l[2]
if(dif < -tol) dif <- abs(dif) + 1
}
robj <- list(EZ = EZ, VZ = VZ, lsmEZ = lsmEZ, lsmVZ = lsmVZ, xiT = xiT, psi2T = psi2T, Ell = ell)
class(robj) <- c("lsjm")
robj
}
|
getWellData = function(new.data = "Y", filled.Data = "Filled"){
if(new.data == "Y"){
if(filled.Data =="Original"){
model.domain.data1 = read.csv(paste("C:/all.agency.mdata.csv", sep = ""), header = TRUE) ### all agency mdata
} else {model.domain.data1 = read.csv(paste(mainDir, "data/Filled_stations_stack_All_agency_MaxCorr_0.9_MatchPair_10_MinRegPeriod3.csv", sep = ""), header = TRUE)} ### all agency mdata
}
return(model.domain.data1)
} | /getWellData.R | no_license | natefromcolorado/functions | R | false | false | 446 | r | getWellData = function(new.data = "Y", filled.Data = "Filled"){
if(new.data == "Y"){
if(filled.Data =="Original"){
model.domain.data1 = read.csv(paste("C:/all.agency.mdata.csv", sep = ""), header = TRUE) ### all agency mdata
} else {model.domain.data1 = read.csv(paste(mainDir, "data/Filled_stations_stack_All_agency_MaxCorr_0.9_MatchPair_10_MinRegPeriod3.csv", sep = ""), header = TRUE)} ### all agency mdata
}
return(model.domain.data1)
} |
library(TMDb)
### Name: tv_season
### Title: Retrieve basic informations about a TV season.
### Aliases: tv_season
### Keywords: tv_season
### ** Examples
## Not run:
##D
##D ## An example of an authenticated request,
##D ## where api_key is fictitious.
##D ## You can obtain your own at https://www.themoviedb.org/documentation/api
##D
##D api_key <- "key"
##D
##D tv_season(api_key = api_key, id = 1396, season_number = 3)
## End(Not run)
| /data/genthat_extracted_code/TMDb/examples/tv_season.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 456 | r | library(TMDb)
### Name: tv_season
### Title: Retrieve basic informations about a TV season.
### Aliases: tv_season
### Keywords: tv_season
### ** Examples
## Not run:
##D
##D ## An example of an authenticated request,
##D ## where api_key is fictitious.
##D ## You can obtain your own at https://www.themoviedb.org/documentation/api
##D
##D api_key <- "key"
##D
##D tv_season(api_key = api_key, id = 1396, season_number = 3)
## End(Not run)
|
\name{computeIsDayByHour}
\alias{computeIsDayByHour}
\title{computeIsDayByHour}
\description{tell for each date, whether its daytime}
\usage{computeIsDayByHour(date, sunriseHour = 7,
sunsetHour = 18, duskOffset = 0)}
\arguments{
\item{date}{POSIXct vector}
\item{sunriseHour}{sunrise as fractional hour (0..24)
(vector of length date or length 1)}
\item{sunsetHour}{sunset as fractional hour
(vector of length date or length 1)}
\item{duskOffset}{integer scalar: time in hours after dusk for
which records are still regarded as day}
}
\value{logical vector (length(date)): true if its daytime}
\author{Thomas Wutzler}
| /man/computeIsDayByHour.Rd | no_license | cran/solartime | R | false | false | 662 | rd | \name{computeIsDayByHour}
\alias{computeIsDayByHour}
\title{computeIsDayByHour}
\description{tell for each date, whether its daytime}
\usage{computeIsDayByHour(date, sunriseHour = 7,
sunsetHour = 18, duskOffset = 0)}
\arguments{
\item{date}{POSIXct vector}
\item{sunriseHour}{sunrise as fractional hour (0..24)
(vector of length date or length 1)}
\item{sunsetHour}{sunset as fractional hour
(vector of length date or length 1)}
\item{duskOffset}{integer scalar: time in hours after dusk for
which records are still regarded as day}
}
\value{logical vector (length(date)): true if its daytime}
\author{Thomas Wutzler}
|
library(lidR)
library(rgdal)
library(raster)
library(stringr)
library(dplyr)
###############################################################
#Combining CUBE .xyz and CUBE Uncertainty .xyz into one file and removing overlappling/non-matching points. More specifically,
#The list of files is read in from the directory. Reg==ardless of the number of CUBE files in the directory, it will remove
#the cross check file and read in the uncertainty file separately while combining all of the xyz files into one data frame.
#This also binds the xyz and uncertainty columns while removing duplicates and creates two new columns for QA and Map.
###############################################################################################
XYZToTextFiles<-function(home_dir, lake_name, WSEL, dam_elev,alt_source_types){
gc()
memory.size(max=TRUE)
#Required libraries
start.time <- Sys.time() #Timing how long the program takes to run
options(digits=12)
list.of.packages <- c("lidR", "rgdal", "raster","stringr","dplyr","sf")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
options("rgdal_show_exportToProj4_warnings"="none")
library(lidR)
library(rgdal)
library(raster)
library(stringr)
library(dplyr)
library(sf)
print("Working on XYZ files...")
##Establishing Sort, Lidar, ADCP, GPS, and Output directories
sort_path = paste(home_dir,"/Sort",sep="")
lidar_path = paste(home_dir,"/Lidar",sep="")
out_path = paste(home_dir,"/RTextFiles",sep="")
#Concatenating XYZ files only
setwd(sort_path)
ls=list.files(pattern=".xyz$")
ls=ls[!str_detect(ls,pattern="XCheck")]
ls=ls[!str_detect(ls,pattern="Xcheck")]
ls=ls[!str_detect(ls,pattern="uncert")]
print(ls)
##############
#Looping through the various XYZ files and concatenating them
i=1
for(i in 1:length(ls)){
xyz=read.table(ls[i],sep=" ")
xyz=as.data.frame(xyz,stringsAsFactors=FALSE)
if(i==1){
colnames(xyz)=c("X","Y","Z")
all=xyz
temp=NULL
xyz_all=plyr::rbind.fill(all,temp)
}else{
colnames(xyz)=c("X","Y","Z")
temp=xyz
xyz_all=plyr::rbind.fill(all,temp)
}
}
xyz_all[c("Source")]="MB"
############
#Looping through the various CUBE uncertainty files and concatenating them
print("Working on CUBE Uncertainty files...")
i=1
ls=list.files(pattern=".xyz$")
ls=ls[!str_detect(ls,pattern="XCheck")]
ls=ls[!str_detect(ls,pattern="Xcheck")]
ls=ls[!str_detect(ls,pattern="CUBE.xyz$")]
for(i in 1:length(ls)){
uncert=read.table(ls[i],sep=" ")
uncert=as.data.frame(uncert,stringsAsFactors=FALSE)
if(i==1){
colnames(uncert)=c("X",'Y','TPU')
all=uncert
temp=NULL
uncert_all=plyr::rbind.fill(all,temp)
}else{
colnames(uncert)=c('X','Y','TPU')
temp=uncert
uncert_all=plyr::rbind.fill(all,temp)
}
}
#########
#Looping through the alternative source files (if they exist) and concatenating them. This loop is skipped if they do not exist
i=1
if(length(alt_source_types!=0)){
for(i in 1:length(alt_source_types)){
type = alt_source_types[[i]]
paste("Working on",type,"files...")
alt_path = paste(home_dir,"/",type,sep="")
if(dir.exists(alt_path)){
setwd(alt_path)
j=1
ls=list.files(pattern=".csv$")
for(j in 1:length(ls)){
alt=read.table(ls[j],sep=",",header=TRUE)
alt=as.data.frame(alt,stringsAsFactors=FALSE)
if(j==1){
colnames(alt)=c("X","Y","Z")
all=alt
temp=NULL
alt_all=plyr::rbind.fill(all,temp)
}else{
colnames(alt)=c("X","Y","Z")
temp=alt
alt_all=plyr::rbind.fill(all,temp)
}
}
alt_all[c("Source")]=type
if(i==1){
full_alt_table = plyr::rbind.fill(alt_all)
}else{
full_alt_table = plyr::rbind.fill(full_alt_table,alt_all)
}
}
}
}
clip_xyz=subset(xyz_all,xyz_all$Z < (WSEL+0.5)) #Selects MB points that are 0.5 meter above the WSEL and less
clip_uncert=subset(uncert_all,uncert_all$TPU<=1.52) #Removing points with a CUBE Uncertainty >1.52 meters (~5 feet)
#################DATA FRAME COMBINING, UNIT CONVERSION, REARRANGING, EXPORTING
#df=plyr::rbind.fill(clip_xyz,clip_uncert) #For emergency use only in case the number of Uncertainty points doesn't match the number of XYZ points
print("Merging, combining, and moving some stuff around!")
df=merge(clip_xyz,clip_uncert,by=(c("X","Y"))) #Merging the MB and CUBE uncertainty points by XY location
if(length(alt_source_types!=0)){
df=plyr::rbind.fill(df,full_alt_table)
df$TPU <- ifelse(df$Source!="MB",-9999,df$TPU)
} ##Adding alternative source points into the overall dataframe if they exist
df=unique(df) #Removing duplicates that may have occurred during joining
df$Z=df$Z*3.2808 #Converting the Z column to feet
df$TPU <- ifelse(df$Source=="MB",df$TPU*3.2808,df$TPU)
#df$TPU=df$TPU*3.28 #Converting the CUBE Uncert column
#Setting the number of QA points and randomization groups
total_points = nrow(xyz_all)
map_pt_count = round(total_points*0.25)
lower_qa_pt_bound = round(0.1*map_pt_count)
upper_qa_pt_bound = round(0.15*map_pt_count)
qa_pt_count = round(median(c(lower_qa_pt_bound, upper_qa_pt_bound)))
approx_QAgroupsize = round(qa_pt_count/0.75)
num_groups = round(total_points / approx_QAgroupsize)
FifteenPercContourQAsize = round(qa_pt_count*0.15)
FivePercContourQAsize = round(qa_pt_count*0.05)
#Printing the results
cat(
"Lake: ",lake_name,"\n",
"Total points: ",total_points,"\n",
"Map point count: ",map_pt_count,"\n",
"QA point count: ",qa_pt_count,"\n",
"Approx. QA group size: ",approx_QAgroupsize,"\n",
"Number of random groups: ",num_groups,"\n",
"15% Contour QA size: ",FifteenPercContourQAsize,"\n",
"5% Contour QA size: ",FivePercContourQAsize,"\n"
)
set.seed=(1996)
df["RAND"]=sample(1:num_groups,nrow(df),replace=TRUE)
df[("QA")]=0
df[("Map")]=1
df=df[c(1,2,3,5,8,7,6,4)]
colnames(df) = c("X","Y","Z","CUBE_Uncert","Map","QA","Rand","Source")
print('Writing out tables...')
write.table(df,file=paste(out_path,"/",lake_name,"_xyz_uncert_source.csv",sep=""),sep=",",row.names = FALSE)
write.table(df,file=paste(out_path,"/",lake_name,"_xyz_uncert_source.txt",sep=""),sep=",",row.names = FALSE)
#Stopping the clock to record how long the program took to run
end.time <- Sys.time()
time.taken <- end.time - start.time
print(time.taken)
}
LidarToTextFiles <- function(home_dir, lake_name, WSEL, dam_elev){
memory.size(max=TRUE)
#Required libraries
start.time <- Sys.time() #Timing how long the program takes to run
list.of.packages <- c("lidR", "rgdal", "raster","stringr","dplyr","sf")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
gc()
library(lidR)
library(rgdal)
library(raster)
library(stringr)
library(dplyr)
library(sf)
lidar_path = paste(home_dir,"/Lidar",sep="")
out_path = paste(home_dir,"/RTextFiles",sep="")
###########################################################################
#Setting a variable shortcut for the working directory
#Concatenating LiDAR files. If there is more than one .xyz file in the LiDAR folder, removes overlapping points.
#This section will read both in and combine them into one file and export it with only X, Y, and Z data.
print("Working on Lidar files...")
setwd(lidar_path)
ls=list.files(lidar_path,pattern=(".las"))
#Reading in the Lidar files in a loop
if(length(ls)!=0){
i=1
for(i in 1:length(ls)){
las=readLAS(paste(lidar_path,"/",ls[i],sep=""))
las=filter_duplicates(las) #Removing duplicate points
#las=lasfiltersurfacepoints(las,res=1) #Filtering to get ground returns only and setting the resolution
las_sp=as.spatial(las) #converting from .LAZ to a spatial points dataframe to a regular dataframe
las_df=as.data.frame(las_sp,stringsAsFactors=FALSE)
las_df=filter(las_df,Classification==2)
if(length(ls)==1){
las_df=subset(las_df,select=c("X","Y","Z")) #Filtering out all of the extra information and retaining XYZ if there is only 1 file
las_all=las_df
break
}
if(i==1){
las_df=subset(las_df,select=c("X","Y","Z")) #Filtering out all of the extra information and retaining XYZ for multiple files
las_all=las_df
}else{
las_df=subset(las_df,select=c("X","Y","Z")) #Filtering out all of the extra information and retaining XYZ for the last file if there are multiple
las_all=rbind(las_all,las_df)
}
}
rm(las)
rm(las_sp)
las_all$Source <- "Lidar"
#The National Map DEM-->points conversion and concatenation with lidar
print("Adding in the NED DEM...")
ls = list.files(pattern="proj.tif$")
dem_proj = raster(ls[1],xy=TRUE)
temp_las <- st_as_sf(las_all,coords=c("X","Y"),crs="EPSG:6344")
las_bbox <- st_bbox(temp_las,xmin=min(las_all$X), xmax=max(las_all$X),
ymin=min(las_all$Y), ymax=max(las_all$Y),y)
las_bbox_poly=st_as_sfc(las_bbox)
las_bbox_buffer=st_buffer(las_bbox_poly,1000)
las_buffer_extent <- st_bbox(las_bbox_buffer)
dem_clipped <- crop(x=dem_proj, y=las_buffer_extent)
dem_df=as.data.frame(dem_clipped,xy=TRUE)
dem_df$Source <- "NED"
colnames(dem_df)=c("X","Y","Z","Source")
dem_df=na.omit(dem_df)
las_all <- rbind(las_all,dem_df)
}else{
print("Ope! No Lidar files here. Adding in the NED DEM...")
ls = list.files(pattern="proj.tif$")
dem_proj = raster(ls[1],xy=TRUE)
dem_df = as.data.frame(dem_proj,xy=TRUE)
las_all=dem_df
dem_df$Source <- "NED"
colnames(dem_df)=c("X","Y","Z","Source")
dem_df=na.omit(dem_df)
las_all <- dem_df
}
las_all=unique(las_all)
#Cuts out LiDAR points below water surface elevation and above dam elevation.
#Refer to Excel document for corrected WSE and the WSL Projections Proposal for top of dam/spillway
print("Merging and moving stuff around...")
elev=las_all[,3]
clip_las=subset(las_all,elev<=(dam_elev + 5)) #Selects Lidar points that are less than 5 meters above the dam/spillway
clip_las$Z=clip_las$Z*3.28 #Converting to feet
print("Writing out the best-looking tables you've ever seen...")
write.table(clip_las,file=paste(out_path,"/",lake_name,"_las.csv",sep=""),sep=",",row.names = FALSE)
write.table(clip_las,file=paste(out_path,"/",lake_name,"_las.txt",sep=""),sep=",",row.names = FALSE)
#Stopping the clock to record how long the program took to run
end.time <- Sys.time()
time.taken <- end.time - start.time
print(time.taken)
}
XCheckToTextFiles <- function(home_dir,lake_name){
sort_path = paste(home_dir,"/Sort",sep="")
out_path = paste(home_dir,"/RTextFiles",sep="")
#Concatenating XYZ files only
setwd(sort_path)
ls=list.files(pattern="k.xyz$")
print(ls)
i=1
for(i in 1:length(ls)){
xcheck=read.table(ls[i],sep=" ")
xcheck=as.data.frame(xcheck,stringsAsFactors=FALSE)
if(i==1){
colnames(xcheck)=c("X","Y","Zxcheck")
all=xcheck
temp=NULL
xcheck_all=plyr::rbind.fill(all,temp)
}else{
colnames(xcheck)=c("X","Y","Zxcheck")
temp=xcheck
xcheck_all=plyr::rbind.fill(all,temp)
}
}
xcheck_all$Zxcheck <- xcheck_all$Zxcheck*3.2808
write.table(xcheck_all,file=paste(out_path,"/",lake_name,"_xcheck.csv",sep=""),sep=",",row.names = FALSE)
write.table(xcheck_all,file=paste(out_path,"/",lake_name,"_xcheck.txt",sep=""),sep=",",row.names = FALSE)
}
####INITIAL INPUTS. THESE NEED TO BE SPECIFIED BY THE USER
XYZToTextFiles(home_dir = "E:/HYPACK_Projects/2020_DNR_Lakes/GardenCityOldDEMO/2020-07_GardenCityOld",
lake_name = "GardenCityOld",
WSEL =271.74,
dam_elev =271.874,
alt_source_types=c("ADCP","SB","GPS"))
LidarToTextFiles(home_dir = "E:/HYPACK_Projects/2020_DNR_Lakes/GardenCityOldDEMO/2020-07_GardenCityOld",
lake_name = "GardenCityOld",
WSEL =271.74,
dam_elev =271.874)
XCheckToTextFiles(home_dir = "E:/HYPACK_Projects/2020_DNR_Lakes/GardenCityOldDEMO/2020-07_GardenCityOld",
lake_name = "GardenCityOld")
| /XYZtoTextFilesandLidarProcesing--NoMarkdown.R | no_license | loy74mst/USGS-Lake-Bathymetry | R | false | false | 13,209 | r | library(lidR)
library(rgdal)
library(raster)
library(stringr)
library(dplyr)
###############################################################
#Combining CUBE .xyz and CUBE Uncertainty .xyz into one file and removing overlappling/non-matching points. More specifically,
#The list of files is read in from the directory. Reg==ardless of the number of CUBE files in the directory, it will remove
#the cross check file and read in the uncertainty file separately while combining all of the xyz files into one data frame.
#This also binds the xyz and uncertainty columns while removing duplicates and creates two new columns for QA and Map.
###############################################################################################
XYZToTextFiles<-function(home_dir, lake_name, WSEL, dam_elev,alt_source_types){
gc()
memory.size(max=TRUE)
#Required libraries
start.time <- Sys.time() #Timing how long the program takes to run
options(digits=12)
list.of.packages <- c("lidR", "rgdal", "raster","stringr","dplyr","sf")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
options("rgdal_show_exportToProj4_warnings"="none")
library(lidR)
library(rgdal)
library(raster)
library(stringr)
library(dplyr)
library(sf)
print("Working on XYZ files...")
##Establishing Sort, Lidar, ADCP, GPS, and Output directories
sort_path = paste(home_dir,"/Sort",sep="")
lidar_path = paste(home_dir,"/Lidar",sep="")
out_path = paste(home_dir,"/RTextFiles",sep="")
#Concatenating XYZ files only
setwd(sort_path)
ls=list.files(pattern=".xyz$")
ls=ls[!str_detect(ls,pattern="XCheck")]
ls=ls[!str_detect(ls,pattern="Xcheck")]
ls=ls[!str_detect(ls,pattern="uncert")]
print(ls)
##############
#Looping through the various XYZ files and concatenating them
i=1
for(i in 1:length(ls)){
xyz=read.table(ls[i],sep=" ")
xyz=as.data.frame(xyz,stringsAsFactors=FALSE)
if(i==1){
colnames(xyz)=c("X","Y","Z")
all=xyz
temp=NULL
xyz_all=plyr::rbind.fill(all,temp)
}else{
colnames(xyz)=c("X","Y","Z")
temp=xyz
xyz_all=plyr::rbind.fill(all,temp)
}
}
xyz_all[c("Source")]="MB"
############
#Looping through the various CUBE uncertainty files and concatenating them
print("Working on CUBE Uncertainty files...")
i=1
ls=list.files(pattern=".xyz$")
ls=ls[!str_detect(ls,pattern="XCheck")]
ls=ls[!str_detect(ls,pattern="Xcheck")]
ls=ls[!str_detect(ls,pattern="CUBE.xyz$")]
for(i in 1:length(ls)){
uncert=read.table(ls[i],sep=" ")
uncert=as.data.frame(uncert,stringsAsFactors=FALSE)
if(i==1){
colnames(uncert)=c("X",'Y','TPU')
all=uncert
temp=NULL
uncert_all=plyr::rbind.fill(all,temp)
}else{
colnames(uncert)=c('X','Y','TPU')
temp=uncert
uncert_all=plyr::rbind.fill(all,temp)
}
}
#########
#Looping through the alternative source files (if they exist) and concatenating them. This loop is skipped if they do not exist
i=1
if(length(alt_source_types!=0)){
for(i in 1:length(alt_source_types)){
type = alt_source_types[[i]]
paste("Working on",type,"files...")
alt_path = paste(home_dir,"/",type,sep="")
if(dir.exists(alt_path)){
setwd(alt_path)
j=1
ls=list.files(pattern=".csv$")
for(j in 1:length(ls)){
alt=read.table(ls[j],sep=",",header=TRUE)
alt=as.data.frame(alt,stringsAsFactors=FALSE)
if(j==1){
colnames(alt)=c("X","Y","Z")
all=alt
temp=NULL
alt_all=plyr::rbind.fill(all,temp)
}else{
colnames(alt)=c("X","Y","Z")
temp=alt
alt_all=plyr::rbind.fill(all,temp)
}
}
alt_all[c("Source")]=type
if(i==1){
full_alt_table = plyr::rbind.fill(alt_all)
}else{
full_alt_table = plyr::rbind.fill(full_alt_table,alt_all)
}
}
}
}
clip_xyz=subset(xyz_all,xyz_all$Z < (WSEL+0.5)) #Selects MB points that are 0.5 meter above the WSEL and less
clip_uncert=subset(uncert_all,uncert_all$TPU<=1.52) #Removing points with a CUBE Uncertainty >1.52 meters (~5 feet)
#################DATA FRAME COMBINING, UNIT CONVERSION, REARRANGING, EXPORTING
#df=plyr::rbind.fill(clip_xyz,clip_uncert) #For emergency use only in case the number of Uncertainty points doesn't match the number of XYZ points
print("Merging, combining, and moving some stuff around!")
df=merge(clip_xyz,clip_uncert,by=(c("X","Y"))) #Merging the MB and CUBE uncertainty points by XY location
if(length(alt_source_types!=0)){
df=plyr::rbind.fill(df,full_alt_table)
df$TPU <- ifelse(df$Source!="MB",-9999,df$TPU)
} ##Adding alternative source points into the overall dataframe if they exist
df=unique(df) #Removing duplicates that may have occurred during joining
df$Z=df$Z*3.2808 #Converting the Z column to feet
df$TPU <- ifelse(df$Source=="MB",df$TPU*3.2808,df$TPU)
#df$TPU=df$TPU*3.28 #Converting the CUBE Uncert column
#Setting the number of QA points and randomization groups
total_points = nrow(xyz_all)
map_pt_count = round(total_points*0.25)
lower_qa_pt_bound = round(0.1*map_pt_count)
upper_qa_pt_bound = round(0.15*map_pt_count)
qa_pt_count = round(median(c(lower_qa_pt_bound, upper_qa_pt_bound)))
approx_QAgroupsize = round(qa_pt_count/0.75)
num_groups = round(total_points / approx_QAgroupsize)
FifteenPercContourQAsize = round(qa_pt_count*0.15)
FivePercContourQAsize = round(qa_pt_count*0.05)
#Printing the results
cat(
"Lake: ",lake_name,"\n",
"Total points: ",total_points,"\n",
"Map point count: ",map_pt_count,"\n",
"QA point count: ",qa_pt_count,"\n",
"Approx. QA group size: ",approx_QAgroupsize,"\n",
"Number of random groups: ",num_groups,"\n",
"15% Contour QA size: ",FifteenPercContourQAsize,"\n",
"5% Contour QA size: ",FivePercContourQAsize,"\n"
)
set.seed=(1996)
df["RAND"]=sample(1:num_groups,nrow(df),replace=TRUE)
df[("QA")]=0
df[("Map")]=1
df=df[c(1,2,3,5,8,7,6,4)]
colnames(df) = c("X","Y","Z","CUBE_Uncert","Map","QA","Rand","Source")
print('Writing out tables...')
write.table(df,file=paste(out_path,"/",lake_name,"_xyz_uncert_source.csv",sep=""),sep=",",row.names = FALSE)
write.table(df,file=paste(out_path,"/",lake_name,"_xyz_uncert_source.txt",sep=""),sep=",",row.names = FALSE)
#Stopping the clock to record how long the program took to run
end.time <- Sys.time()
time.taken <- end.time - start.time
print(time.taken)
}
LidarToTextFiles <- function(home_dir, lake_name, WSEL, dam_elev){
memory.size(max=TRUE)
#Required libraries
start.time <- Sys.time() #Timing how long the program takes to run
list.of.packages <- c("lidR", "rgdal", "raster","stringr","dplyr","sf")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
gc()
library(lidR)
library(rgdal)
library(raster)
library(stringr)
library(dplyr)
library(sf)
lidar_path = paste(home_dir,"/Lidar",sep="")
out_path = paste(home_dir,"/RTextFiles",sep="")
###########################################################################
#Setting a variable shortcut for the working directory
#Concatenating LiDAR files. If there is more than one .xyz file in the LiDAR folder, removes overlapping points.
#This section will read both in and combine them into one file and export it with only X, Y, and Z data.
print("Working on Lidar files...")
setwd(lidar_path)
ls=list.files(lidar_path,pattern=(".las"))
#Reading in the Lidar files in a loop
if(length(ls)!=0){
i=1
for(i in 1:length(ls)){
las=readLAS(paste(lidar_path,"/",ls[i],sep=""))
las=filter_duplicates(las) #Removing duplicate points
#las=lasfiltersurfacepoints(las,res=1) #Filtering to get ground returns only and setting the resolution
las_sp=as.spatial(las) #converting from .LAZ to a spatial points dataframe to a regular dataframe
las_df=as.data.frame(las_sp,stringsAsFactors=FALSE)
las_df=filter(las_df,Classification==2)
if(length(ls)==1){
las_df=subset(las_df,select=c("X","Y","Z")) #Filtering out all of the extra information and retaining XYZ if there is only 1 file
las_all=las_df
break
}
if(i==1){
las_df=subset(las_df,select=c("X","Y","Z")) #Filtering out all of the extra information and retaining XYZ for multiple files
las_all=las_df
}else{
las_df=subset(las_df,select=c("X","Y","Z")) #Filtering out all of the extra information and retaining XYZ for the last file if there are multiple
las_all=rbind(las_all,las_df)
}
}
rm(las)
rm(las_sp)
las_all$Source <- "Lidar"
#The National Map DEM-->points conversion and concatenation with lidar
print("Adding in the NED DEM...")
ls = list.files(pattern="proj.tif$")
dem_proj = raster(ls[1],xy=TRUE)
temp_las <- st_as_sf(las_all,coords=c("X","Y"),crs="EPSG:6344")
las_bbox <- st_bbox(temp_las,xmin=min(las_all$X), xmax=max(las_all$X),
ymin=min(las_all$Y), ymax=max(las_all$Y),y)
las_bbox_poly=st_as_sfc(las_bbox)
las_bbox_buffer=st_buffer(las_bbox_poly,1000)
las_buffer_extent <- st_bbox(las_bbox_buffer)
dem_clipped <- crop(x=dem_proj, y=las_buffer_extent)
dem_df=as.data.frame(dem_clipped,xy=TRUE)
dem_df$Source <- "NED"
colnames(dem_df)=c("X","Y","Z","Source")
dem_df=na.omit(dem_df)
las_all <- rbind(las_all,dem_df)
}else{
print("Ope! No Lidar files here. Adding in the NED DEM...")
ls = list.files(pattern="proj.tif$")
dem_proj = raster(ls[1],xy=TRUE)
dem_df = as.data.frame(dem_proj,xy=TRUE)
las_all=dem_df
dem_df$Source <- "NED"
colnames(dem_df)=c("X","Y","Z","Source")
dem_df=na.omit(dem_df)
las_all <- dem_df
}
las_all=unique(las_all)
#Cuts out LiDAR points below water surface elevation and above dam elevation.
#Refer to Excel document for corrected WSE and the WSL Projections Proposal for top of dam/spillway
print("Merging and moving stuff around...")
elev=las_all[,3]
clip_las=subset(las_all,elev<=(dam_elev + 5)) #Selects Lidar points that are less than 5 meters above the dam/spillway
clip_las$Z=clip_las$Z*3.28 #Converting to feet
print("Writing out the best-looking tables you've ever seen...")
write.table(clip_las,file=paste(out_path,"/",lake_name,"_las.csv",sep=""),sep=",",row.names = FALSE)
write.table(clip_las,file=paste(out_path,"/",lake_name,"_las.txt",sep=""),sep=",",row.names = FALSE)
#Stopping the clock to record how long the program took to run
end.time <- Sys.time()
time.taken <- end.time - start.time
print(time.taken)
}
XCheckToTextFiles <- function(home_dir,lake_name){
sort_path = paste(home_dir,"/Sort",sep="")
out_path = paste(home_dir,"/RTextFiles",sep="")
#Concatenating XYZ files only
setwd(sort_path)
ls=list.files(pattern="k.xyz$")
print(ls)
i=1
for(i in 1:length(ls)){
xcheck=read.table(ls[i],sep=" ")
xcheck=as.data.frame(xcheck,stringsAsFactors=FALSE)
if(i==1){
colnames(xcheck)=c("X","Y","Zxcheck")
all=xcheck
temp=NULL
xcheck_all=plyr::rbind.fill(all,temp)
}else{
colnames(xcheck)=c("X","Y","Zxcheck")
temp=xcheck
xcheck_all=plyr::rbind.fill(all,temp)
}
}
xcheck_all$Zxcheck <- xcheck_all$Zxcheck*3.2808
write.table(xcheck_all,file=paste(out_path,"/",lake_name,"_xcheck.csv",sep=""),sep=",",row.names = FALSE)
write.table(xcheck_all,file=paste(out_path,"/",lake_name,"_xcheck.txt",sep=""),sep=",",row.names = FALSE)
}
####INITIAL INPUTS. THESE NEED TO BE SPECIFIED BY THE USER
XYZToTextFiles(home_dir = "E:/HYPACK_Projects/2020_DNR_Lakes/GardenCityOldDEMO/2020-07_GardenCityOld",
lake_name = "GardenCityOld",
WSEL =271.74,
dam_elev =271.874,
alt_source_types=c("ADCP","SB","GPS"))
LidarToTextFiles(home_dir = "E:/HYPACK_Projects/2020_DNR_Lakes/GardenCityOldDEMO/2020-07_GardenCityOld",
lake_name = "GardenCityOld",
WSEL =271.74,
dam_elev =271.874)
XCheckToTextFiles(home_dir = "E:/HYPACK_Projects/2020_DNR_Lakes/GardenCityOldDEMO/2020-07_GardenCityOld",
lake_name = "GardenCityOld")
|
/*
File: ASRegistry.r
Contains: AppleScript Registry constants.
Version: Technology: AppleScript 1.3
Release: Universal Interfaces 3.2
Copyright: © 1991-1998 by Apple Computer, Inc., all rights reserved
Bugs?: For bug reports, consult the following page on
the World Wide Web:
http://developer.apple.com/bugreporter/
*/
#ifndef __ASREGISTRY_R__
#define __ASREGISTRY_R__
#ifndef __CONDITIONALMACROS_R__
#include "ConditionalMacros.r"
#endif
#define keyAETarget 'targ' /* 0x74617267 */
#define keySubjectAttr 'subj' /* 0x7375626a */
/* Magic 'returning' parameter: */
#define keyASReturning 'Krtn' /* 0x4b72746e */
/* AppleScript Specific Codes: */
#define kASAppleScriptSuite 'ascr' /* 0x61736372 */
#define kASScriptEditorSuite 'ToyS' /* AppleScript 1.3 added from private headers */
#define kASTypeNamesSuite 'tpnm' /* 0x74706e6d */
/* dynamic terminologies */
#define typeAETE 'aete' /* 0x61657465 */
#define typeAEUT 'aeut' /* 0x61657574 */
#define kGetAETE 'gdte' /* 0x67647465 */
#define kGetAEUT 'gdut' /* 0x67647574 */
#define kUpdateAEUT 'udut' /* 0x75647574 */
#define kUpdateAETE 'udte' /* 0x75647465 */
#define kCleanUpAEUT 'cdut' /* 0x63647574 */
#define kASComment 'cmnt' /* 0x636d6e74 */
#define kASLaunchEvent 'noop' /* 0x6e6f6f70 */
#define keyScszResource 'scsz' /* 0x7363737A */
#define typeScszResource 'scsz' /* 0x7363737A */
/* subroutine calls */
#define kASSubroutineEvent 'psbr' /* 0x70736272 */
#define keyASSubroutineName 'snam' /* 0x736e616d */
#define kASPrepositionalSubroutine 'psbr' /* AppleScript 1.3 added from private headers */
#define keyASPositionalArgs 'parg' /* AppleScript 1.3 added from private headers */
/* Miscellaneous AppleScript commands */
#define kASStartLogEvent 'log1' /* AppleScript 1.3 Script Editor Start Log */
#define kASStopLogEvent 'log0' /* AppleScript 1.3 Script Editor Stop Log */
#define kASCommentEvent 'cmnt' /* AppleScript 1.3 magic "comment" event */
/* Binary: */
#define kASAdd '+ ' /* 0x2b202020 */
#define kASSubtract '- ' /* 0x2d202020 */
#define kASMultiply '* ' /* 0x2a202020 */
#define kASDivide '/ ' /* 0x2f202020 */
#define kASQuotient 'div ' /* 0x64697620 */
#define kASRemainder 'mod ' /* 0x6d6f6420 */
#define kASPower '^ ' /* 0x5e202020 */
#define kASEqual '= '
#define kASNotEqual '≠ ' /* 0xad202020 */
#define kASGreaterThan '> '
#define kASGreaterThanOrEqual '>= '
#define kASLessThan '< '
#define kASLessThanOrEqual '<= '
#define kASComesBefore 'cbfr' /* 0x63626672 */
#define kASComesAfter 'cafr' /* 0x63616672 */
#define kASConcatenate 'ccat' /* 0x63636174 */
#define kASStartsWith 'bgwt'
#define kASEndsWith 'ends'
#define kASContains 'cont'
#define kASAnd 'AND '
#define kASOr 'OR ' /* Unary: */
#define kASNot 'NOT '
#define kASNegate 'neg ' /* 0x6e656720 */
#define keyASArg 'arg ' /* 0x61726720 */
/* event code for the 'error' statement */
#define kASErrorEventCode 'err ' /* 0x65727220 */
#define kOSAErrorArgs 'erra' /* 0x65727261 */
#define keyAEErrorObject 'erob' /* Added in AppleScript 1.3 from AppleScript private headers */
/* Properties: */
#define pLength 'leng' /* 0x6c656e67 */
#define pReverse 'rvse' /* 0x72767365 */
#define pRest 'rest' /* 0x72657374 */
#define pInherits 'c@#^' /* 0x6340235e */
#define pProperties 'pALL' /* User-Defined Record Fields: */
#define keyASUserRecordFields 'usrf' /* 0x75737266 */
#define typeUserRecordFields 'list'
#define keyASPrepositionAt 'at ' /* 0x61742020 */
#define keyASPrepositionIn 'in ' /* 0x696e2020 */
#define keyASPrepositionFrom 'from' /* 0x66726f6d */
#define keyASPrepositionFor 'for ' /* 0x666f7220 */
#define keyASPrepositionTo 'to ' /* 0x746f2020 */
#define keyASPrepositionThru 'thru' /* 0x74687275 */
#define keyASPrepositionThrough 'thgh' /* 0x74686768 */
#define keyASPrepositionBy 'by ' /* 0x62792020 */
#define keyASPrepositionOn 'on ' /* 0x6f6e2020 */
#define keyASPrepositionInto 'into' /* 0x696e746f */
#define keyASPrepositionOnto 'onto' /* 0x6f6e746f */
#define keyASPrepositionBetween 'btwn' /* 0x6274776e */
#define keyASPrepositionAgainst 'agst' /* 0x61677374 */
#define keyASPrepositionOutOf 'outo' /* 0x6f75746f */
#define keyASPrepositionInsteadOf 'isto' /* 0x6973746f */
#define keyASPrepositionAsideFrom 'asdf' /* 0x61736466 */
#define keyASPrepositionAround 'arnd' /* 0x61726e64 */
#define keyASPrepositionBeside 'bsid' /* 0x62736964 */
#define keyASPrepositionBeneath 'bnth' /* 0x626e7468 */
#define keyASPrepositionUnder 'undr' /* 0x756e6472 */
#define keyASPrepositionOver 'over' /* 0x6f766572 */
#define keyASPrepositionAbove 'abve' /* 0x61627665 */
#define keyASPrepositionBelow 'belw' /* 0x62656c77 */
#define keyASPrepositionApartFrom 'aprt' /* 0x61707274 */
#define keyASPrepositionGiven 'givn' /* 0x6769766e */
#define keyASPrepositionWith 'with' /* 0x77697468 */
#define keyASPrepositionWithout 'wout' /* 0x776f7574 */
#define keyASPrepositionAbout 'abou' /* 0x61626f75 */
#define keyASPrepositionSince 'snce' /* 0x736e6365 */
#define keyASPrepositionUntil 'till' /* 0x74696c6c */
/* Terminology & Dialect things: */
#define kDialectBundleResType 'Dbdl' /* 0x4462646c */
/* AppleScript Classes and Enums: */
#define cConstant 'enum'
#define cClassIdentifier 'pcls'
#define cObjectBeingExamined 'exmn'
#define cList 'list'
#define cSmallReal 'sing'
#define cReal 'doub'
#define cRecord 'reco'
#define cReference 'obj '
#define cUndefined 'undf' /* 0x756e6466 */
#define cMissingValue 'msng' /* AppleScript 1.3 newly created */
#define cSymbol 'symb' /* 0x73796d62 */
#define cLinkedList 'llst' /* 0x6c6c7374 */
#define cVector 'vect' /* 0x76656374 */
#define cEventIdentifier 'evnt' /* 0x65766e74 */
#define cKeyIdentifier 'kyid' /* 0x6b796964 */
#define cUserIdentifier 'uid ' /* 0x75696420 */
#define cPreposition 'prep' /* 0x70726570 */
#define cKeyForm 'kfrm'
#define cScript 'scpt' /* 0x73637074 */
#define cHandler 'hand' /* 0x68616e64 */
#define cProcedure 'proc' /* 0x70726f63 */
#define cClosure 'clsr' /* 0x636c7372 */
#define cRawData 'rdat' /* 0x72646174 */
#define cString 'TEXT'
#define cStringClass 'TEXT'
#define cNumber 'nmbr' /* 0x6e6d6272 */
#define cListElement 'celm' /* AppleScript 1.3 added from private headers */
#define cListOrRecord 'lr ' /* 0x6c722020 */
#define cListOrString 'ls ' /* 0x6c732020 */
#define cListRecordOrString 'lrs ' /* 0x6c727320 */
#define cNumberOrString 'ns ' /* AppleScript 1.3 for Display Dialog */
#define cNumberOrDateTime 'nd ' /* 0x6e642020 */
#define cNumberDateTimeOrString 'nds ' /* 0x6e647320 */
#define cAliasOrString 'sf '
#define cSeconds 'scnd' /* 0x73636e64 */
#define typeSound 'snd '
#define enumBooleanValues 'boov' /* Use this instead of typeBoolean to avoid with/without conversion */
#define kAETrue 'true'
#define kAEFalse 'fals'
#define enumMiscValues 'misc' /* 0x6d697363 */
#define kASCurrentApplication 'cura' /* 0x63757261 */
/* User-defined property ospecs: */
#define formUserPropertyID 'usrp' /* 0x75737270 */
/* Global properties: */
#define pASIt 'it ' /* 0x69742020 */
#define pASMe 'me ' /* 0x6d652020 */
#define pASResult 'rslt' /* 0x72736c74 */
#define pASSpace 'spac' /* 0x73706163 */
#define pASReturn 'ret ' /* 0x72657420 */
#define pASTab 'tab ' /* 0x74616220 */
#define pASPi 'pi ' /* 0x70692020 */
#define pASParent 'pare' /* 0x70617265 */
#define kASInitializeEventCode 'init' /* 0x696e6974 */
#define pASPrintLength 'prln' /* 0x70726c6e */
#define pASPrintDepth 'prdp' /* 0x70726470 */
#define pASTopLevelScript 'ascr' /* 0x61736372 */
/* Considerations */
#define kAECase 'case' /* 0x63617365 */
#define kAEDiacritic 'diac' /* 0x64696163 */
#define kAEWhiteSpace 'whit' /* 0x77686974 */
#define kAEHyphens 'hyph' /* 0x68797068 */
#define kAEExpansion 'expa' /* 0x65787061 */
#define kAEPunctuation 'punc' /* 0x70756e63 */
#define kAEZenkakuHankaku 'zkhk' /* 0x7a6b686b */
#define kAESmallKana 'skna' /* 0x736b6e61 */
#define kAEKataHiragana 'hika' /* 0x68696b61 */
/* AppleScript considerations: */
#define kASConsiderReplies 'rmte' /* 0x726d7465 */
#define enumConsiderations 'cons' /* 0x636f6e73 */
#define cCoercion 'coec' /* 0x636f6563 */
#define cCoerceUpperCase 'txup' /* 0x74787570 */
#define cCoerceLowerCase 'txlo' /* 0x74786c6f */
#define cCoerceRemoveDiacriticals 'txdc' /* 0x74786463 */
#define cCoerceRemovePunctuation 'txpc' /* 0x74787063 */
#define cCoerceRemoveHyphens 'txhy' /* 0x74786879 */
#define cCoerceOneByteToTwoByte 'txex' /* 0x74786578 */
#define cCoerceRemoveWhiteSpace 'txws' /* 0x74787773 */
#define cCoerceSmallKana 'txsk' /* 0x7478736b */
#define cCoerceZenkakuhankaku 'txze' /* 0x74787a65 */
#define cCoerceKataHiragana 'txkh' /* 0x74786b68 */
/* Lorax things: */
#define cZone 'zone' /* 0x7a6f6e65 */
#define cMachine 'mach' /* 0x6d616368 */
#define cAddress 'addr' /* 0x61646472 */
#define cRunningAddress 'radd' /* 0x72616464 */
#define cStorage 'stor' /* 0x73746f72 */
/* DateTime things: */
#define pASWeekday 'wkdy' /* 0x776b6479 */
#define pASMonth 'mnth' /* 0x6d6e7468 */
#define pASDay 'day ' /* 0x64617920 */
#define pASYear 'year' /* 0x79656172 */
#define pASTime 'time' /* 0x74696d65 */
#define pASDateString 'dstr' /* 0x64737472 */
#define pASTimeString 'tstr' /* 0x74737472 */
/* Months */
#define cMonth 'mnth'
#define cJanuary 'jan ' /* 0x6a616e20 */
#define cFebruary 'feb ' /* 0x66656220 */
#define cMarch 'mar ' /* 0x6d617220 */
#define cApril 'apr ' /* 0x61707220 */
#define cMay 'may ' /* 0x6d617920 */
#define cJune 'jun ' /* 0x6a756e20 */
#define cJuly 'jul ' /* 0x6a756c20 */
#define cAugust 'aug ' /* 0x61756720 */
#define cSeptember 'sep ' /* 0x73657020 */
#define cOctober 'oct ' /* 0x6f637420 */
#define cNovember 'nov ' /* 0x6e6f7620 */
#define cDecember 'dec ' /* 0x64656320 */
/* Weekdays */
#define cWeekday 'wkdy'
#define cSunday 'sun ' /* 0x73756e20 */
#define cMonday 'mon ' /* 0x6d6f6e20 */
#define cTuesday 'tue ' /* 0x74756520 */
#define cWednesday 'wed ' /* 0x77656420 */
#define cThursday 'thu ' /* 0x74687520 */
#define cFriday 'fri ' /* 0x66726920 */
#define cSaturday 'sat ' /* 0x73617420 */
/* AS 1.1 Globals: */
#define pASQuote 'quot' /* 0x71756f74 */
#define pASSeconds 'secs' /* 0x73656373 */
#define pASMinutes 'min ' /* 0x6d696e20 */
#define pASHours 'hour' /* 0x686f7572 */
#define pASDays 'days' /* 0x64617973 */
#define pASWeeks 'week' /* 0x7765656b */
/* Writing Code things: */
#define cWritingCodeInfo 'citl' /* 0x6369746c */
#define pScriptCode 'pscd' /* 0x70736364 */
#define pLangCode 'plcd' /* 0x706c6364 */
/* Magic Tell and End Tell events for logging: */
#define kASMagicTellEvent 'tell' /* 0x74656c6c */
#define kASMagicEndTellEvent 'tend' /* 0x74656e64 */
#endif /* __ASREGISTRY_R__ */
| /3.2/Universal/Interfaces/RIncludes/ASRegistry.r | no_license | elliotnunn/UniversalInterfaces | R | false | false | 13,273 | r | /*
File: ASRegistry.r
Contains: AppleScript Registry constants.
Version: Technology: AppleScript 1.3
Release: Universal Interfaces 3.2
Copyright: © 1991-1998 by Apple Computer, Inc., all rights reserved
Bugs?: For bug reports, consult the following page on
the World Wide Web:
http://developer.apple.com/bugreporter/
*/
#ifndef __ASREGISTRY_R__
#define __ASREGISTRY_R__
#ifndef __CONDITIONALMACROS_R__
#include "ConditionalMacros.r"
#endif
#define keyAETarget 'targ' /* 0x74617267 */
#define keySubjectAttr 'subj' /* 0x7375626a */
/* Magic 'returning' parameter: */
#define keyASReturning 'Krtn' /* 0x4b72746e */
/* AppleScript Specific Codes: */
#define kASAppleScriptSuite 'ascr' /* 0x61736372 */
#define kASScriptEditorSuite 'ToyS' /* AppleScript 1.3 added from private headers */
#define kASTypeNamesSuite 'tpnm' /* 0x74706e6d */
/* dynamic terminologies */
#define typeAETE 'aete' /* 0x61657465 */
#define typeAEUT 'aeut' /* 0x61657574 */
#define kGetAETE 'gdte' /* 0x67647465 */
#define kGetAEUT 'gdut' /* 0x67647574 */
#define kUpdateAEUT 'udut' /* 0x75647574 */
#define kUpdateAETE 'udte' /* 0x75647465 */
#define kCleanUpAEUT 'cdut' /* 0x63647574 */
#define kASComment 'cmnt' /* 0x636d6e74 */
#define kASLaunchEvent 'noop' /* 0x6e6f6f70 */
#define keyScszResource 'scsz' /* 0x7363737A */
#define typeScszResource 'scsz' /* 0x7363737A */
/* subroutine calls */
#define kASSubroutineEvent 'psbr' /* 0x70736272 */
#define keyASSubroutineName 'snam' /* 0x736e616d */
#define kASPrepositionalSubroutine 'psbr' /* AppleScript 1.3 added from private headers */
#define keyASPositionalArgs 'parg' /* AppleScript 1.3 added from private headers */
/* Miscellaneous AppleScript commands */
#define kASStartLogEvent 'log1' /* AppleScript 1.3 Script Editor Start Log */
#define kASStopLogEvent 'log0' /* AppleScript 1.3 Script Editor Stop Log */
#define kASCommentEvent 'cmnt' /* AppleScript 1.3 magic "comment" event */
/* Binary: */
#define kASAdd '+ ' /* 0x2b202020 */
#define kASSubtract '- ' /* 0x2d202020 */
#define kASMultiply '* ' /* 0x2a202020 */
#define kASDivide '/ ' /* 0x2f202020 */
#define kASQuotient 'div ' /* 0x64697620 */
#define kASRemainder 'mod ' /* 0x6d6f6420 */
#define kASPower '^ ' /* 0x5e202020 */
#define kASEqual '= '
#define kASNotEqual '≠ ' /* 0xad202020 */
#define kASGreaterThan '> '
#define kASGreaterThanOrEqual '>= '
#define kASLessThan '< '
#define kASLessThanOrEqual '<= '
#define kASComesBefore 'cbfr' /* 0x63626672 */
#define kASComesAfter 'cafr' /* 0x63616672 */
#define kASConcatenate 'ccat' /* 0x63636174 */
#define kASStartsWith 'bgwt'
#define kASEndsWith 'ends'
#define kASContains 'cont'
#define kASAnd 'AND '
#define kASOr 'OR ' /* Unary: */
#define kASNot 'NOT '
#define kASNegate 'neg ' /* 0x6e656720 */
#define keyASArg 'arg ' /* 0x61726720 */
/* event code for the 'error' statement */
#define kASErrorEventCode 'err ' /* 0x65727220 */
#define kOSAErrorArgs 'erra' /* 0x65727261 */
#define keyAEErrorObject 'erob' /* Added in AppleScript 1.3 from AppleScript private headers */
/* Properties: */
#define pLength 'leng' /* 0x6c656e67 */
#define pReverse 'rvse' /* 0x72767365 */
#define pRest 'rest' /* 0x72657374 */
#define pInherits 'c@#^' /* 0x6340235e */
#define pProperties 'pALL' /* User-Defined Record Fields: */
#define keyASUserRecordFields 'usrf' /* 0x75737266 */
#define typeUserRecordFields 'list'
#define keyASPrepositionAt 'at ' /* 0x61742020 */
#define keyASPrepositionIn 'in ' /* 0x696e2020 */
#define keyASPrepositionFrom 'from' /* 0x66726f6d */
#define keyASPrepositionFor 'for ' /* 0x666f7220 */
#define keyASPrepositionTo 'to ' /* 0x746f2020 */
#define keyASPrepositionThru 'thru' /* 0x74687275 */
#define keyASPrepositionThrough 'thgh' /* 0x74686768 */
#define keyASPrepositionBy 'by ' /* 0x62792020 */
#define keyASPrepositionOn 'on ' /* 0x6f6e2020 */
#define keyASPrepositionInto 'into' /* 0x696e746f */
#define keyASPrepositionOnto 'onto' /* 0x6f6e746f */
#define keyASPrepositionBetween 'btwn' /* 0x6274776e */
#define keyASPrepositionAgainst 'agst' /* 0x61677374 */
#define keyASPrepositionOutOf 'outo' /* 0x6f75746f */
#define keyASPrepositionInsteadOf 'isto' /* 0x6973746f */
#define keyASPrepositionAsideFrom 'asdf' /* 0x61736466 */
#define keyASPrepositionAround 'arnd' /* 0x61726e64 */
#define keyASPrepositionBeside 'bsid' /* 0x62736964 */
#define keyASPrepositionBeneath 'bnth' /* 0x626e7468 */
#define keyASPrepositionUnder 'undr' /* 0x756e6472 */
#define keyASPrepositionOver 'over' /* 0x6f766572 */
#define keyASPrepositionAbove 'abve' /* 0x61627665 */
#define keyASPrepositionBelow 'belw' /* 0x62656c77 */
#define keyASPrepositionApartFrom 'aprt' /* 0x61707274 */
#define keyASPrepositionGiven 'givn' /* 0x6769766e */
#define keyASPrepositionWith 'with' /* 0x77697468 */
#define keyASPrepositionWithout 'wout' /* 0x776f7574 */
#define keyASPrepositionAbout 'abou' /* 0x61626f75 */
#define keyASPrepositionSince 'snce' /* 0x736e6365 */
#define keyASPrepositionUntil 'till' /* 0x74696c6c */
/* Terminology & Dialect things: */
#define kDialectBundleResType 'Dbdl' /* 0x4462646c */
/* AppleScript Classes and Enums: */
#define cConstant 'enum'
#define cClassIdentifier 'pcls'
#define cObjectBeingExamined 'exmn'
#define cList 'list'
#define cSmallReal 'sing'
#define cReal 'doub'
#define cRecord 'reco'
#define cReference 'obj '
#define cUndefined 'undf' /* 0x756e6466 */
#define cMissingValue 'msng' /* AppleScript 1.3 newly created */
#define cSymbol 'symb' /* 0x73796d62 */
#define cLinkedList 'llst' /* 0x6c6c7374 */
#define cVector 'vect' /* 0x76656374 */
#define cEventIdentifier 'evnt' /* 0x65766e74 */
#define cKeyIdentifier 'kyid' /* 0x6b796964 */
#define cUserIdentifier 'uid ' /* 0x75696420 */
#define cPreposition 'prep' /* 0x70726570 */
#define cKeyForm 'kfrm'
#define cScript 'scpt' /* 0x73637074 */
#define cHandler 'hand' /* 0x68616e64 */
#define cProcedure 'proc' /* 0x70726f63 */
#define cClosure 'clsr' /* 0x636c7372 */
#define cRawData 'rdat' /* 0x72646174 */
#define cString 'TEXT'
#define cStringClass 'TEXT'
#define cNumber 'nmbr' /* 0x6e6d6272 */
#define cListElement 'celm' /* AppleScript 1.3 added from private headers */
#define cListOrRecord 'lr ' /* 0x6c722020 */
#define cListOrString 'ls ' /* 0x6c732020 */
#define cListRecordOrString 'lrs ' /* 0x6c727320 */
#define cNumberOrString 'ns ' /* AppleScript 1.3 for Display Dialog */
#define cNumberOrDateTime 'nd ' /* 0x6e642020 */
#define cNumberDateTimeOrString 'nds ' /* 0x6e647320 */
#define cAliasOrString 'sf '
#define cSeconds 'scnd' /* 0x73636e64 */
#define typeSound 'snd '
#define enumBooleanValues 'boov' /* Use this instead of typeBoolean to avoid with/without conversion */
#define kAETrue 'true'
#define kAEFalse 'fals'
#define enumMiscValues 'misc' /* 0x6d697363 */
#define kASCurrentApplication 'cura' /* 0x63757261 */
/* User-defined property ospecs: */
#define formUserPropertyID 'usrp' /* 0x75737270 */
/* Global properties: */
#define pASIt 'it ' /* 0x69742020 */
#define pASMe 'me ' /* 0x6d652020 */
#define pASResult 'rslt' /* 0x72736c74 */
#define pASSpace 'spac' /* 0x73706163 */
#define pASReturn 'ret ' /* 0x72657420 */
#define pASTab 'tab ' /* 0x74616220 */
#define pASPi 'pi ' /* 0x70692020 */
#define pASParent 'pare' /* 0x70617265 */
#define kASInitializeEventCode 'init' /* 0x696e6974 */
#define pASPrintLength 'prln' /* 0x70726c6e */
#define pASPrintDepth 'prdp' /* 0x70726470 */
#define pASTopLevelScript 'ascr' /* 0x61736372 */
/* Considerations */
#define kAECase 'case' /* 0x63617365 */
#define kAEDiacritic 'diac' /* 0x64696163 */
#define kAEWhiteSpace 'whit' /* 0x77686974 */
#define kAEHyphens 'hyph' /* 0x68797068 */
#define kAEExpansion 'expa' /* 0x65787061 */
#define kAEPunctuation 'punc' /* 0x70756e63 */
#define kAEZenkakuHankaku 'zkhk' /* 0x7a6b686b */
#define kAESmallKana 'skna' /* 0x736b6e61 */
#define kAEKataHiragana 'hika' /* 0x68696b61 */
/* AppleScript considerations: */
#define kASConsiderReplies 'rmte' /* 0x726d7465 */
#define enumConsiderations 'cons' /* 0x636f6e73 */
#define cCoercion 'coec' /* 0x636f6563 */
#define cCoerceUpperCase 'txup' /* 0x74787570 */
#define cCoerceLowerCase 'txlo' /* 0x74786c6f */
#define cCoerceRemoveDiacriticals 'txdc' /* 0x74786463 */
#define cCoerceRemovePunctuation 'txpc' /* 0x74787063 */
#define cCoerceRemoveHyphens 'txhy' /* 0x74786879 */
#define cCoerceOneByteToTwoByte 'txex' /* 0x74786578 */
#define cCoerceRemoveWhiteSpace 'txws' /* 0x74787773 */
#define cCoerceSmallKana 'txsk' /* 0x7478736b */
#define cCoerceZenkakuhankaku 'txze' /* 0x74787a65 */
#define cCoerceKataHiragana 'txkh' /* 0x74786b68 */
/* Lorax things: */
#define cZone 'zone' /* 0x7a6f6e65 */
#define cMachine 'mach' /* 0x6d616368 */
#define cAddress 'addr' /* 0x61646472 */
#define cRunningAddress 'radd' /* 0x72616464 */
#define cStorage 'stor' /* 0x73746f72 */
/* DateTime things: */
#define pASWeekday 'wkdy' /* 0x776b6479 */
#define pASMonth 'mnth' /* 0x6d6e7468 */
#define pASDay 'day ' /* 0x64617920 */
#define pASYear 'year' /* 0x79656172 */
#define pASTime 'time' /* 0x74696d65 */
#define pASDateString 'dstr' /* 0x64737472 */
#define pASTimeString 'tstr' /* 0x74737472 */
/* Months */
#define cMonth 'mnth'
#define cJanuary 'jan ' /* 0x6a616e20 */
#define cFebruary 'feb ' /* 0x66656220 */
#define cMarch 'mar ' /* 0x6d617220 */
#define cApril 'apr ' /* 0x61707220 */
#define cMay 'may ' /* 0x6d617920 */
#define cJune 'jun ' /* 0x6a756e20 */
#define cJuly 'jul ' /* 0x6a756c20 */
#define cAugust 'aug ' /* 0x61756720 */
#define cSeptember 'sep ' /* 0x73657020 */
#define cOctober 'oct ' /* 0x6f637420 */
#define cNovember 'nov ' /* 0x6e6f7620 */
#define cDecember 'dec ' /* 0x64656320 */
/* Weekdays */
#define cWeekday 'wkdy'
#define cSunday 'sun ' /* 0x73756e20 */
#define cMonday 'mon ' /* 0x6d6f6e20 */
#define cTuesday 'tue ' /* 0x74756520 */
#define cWednesday 'wed ' /* 0x77656420 */
#define cThursday 'thu ' /* 0x74687520 */
#define cFriday 'fri ' /* 0x66726920 */
#define cSaturday 'sat ' /* 0x73617420 */
/* AS 1.1 Globals: */
#define pASQuote 'quot' /* 0x71756f74 */
#define pASSeconds 'secs' /* 0x73656373 */
#define pASMinutes 'min ' /* 0x6d696e20 */
#define pASHours 'hour' /* 0x686f7572 */
#define pASDays 'days' /* 0x64617973 */
#define pASWeeks 'week' /* 0x7765656b */
/* Writing Code things: */
#define cWritingCodeInfo 'citl' /* 0x6369746c */
#define pScriptCode 'pscd' /* 0x70736364 */
#define pLangCode 'plcd' /* 0x706c6364 */
/* Magic Tell and End Tell events for logging: */
#define kASMagicTellEvent 'tell' /* 0x74656c6c */
#define kASMagicEndTellEvent 'tend' /* 0x74656e64 */
#endif /* __ASREGISTRY_R__ */
|
\name{SpatialVS}
\alias{SpatialVS}
\title{Function for spatial variable selection}
\usage{
SpatialVS(dat.obj, alpha.vec = seq(0.6, 1, by = 0.05),
lambda.vec = seq(0.15, 1, len = 50), method = "PQL", plots = F,
intercept = T, verbose = T)
}
\arguments{
\item{dat.obj}{List, input data. Must contains:
\enumerate{
\item \code{X} numeric matrix, the covariates.
\item \code{y} integer vector, the response in counts.
\item \code{dist} numeric matrix, the distance matrix.
\item \code{offset} numeric vector, the offset item.
}}
\item{alpha.vec}{numeric vector, a vector of possible values of regularization parameter. The range is [0,1].}
\item{lambda.vec}{numeric vector, a vector of positive values of regularization parameter. }
\item{method}{string, the method to be used. Options are:
\enumerate{
\item \code{"PQL"} penalized quasi-likelihood method that considers spatial correlation.
\item \code{"PQL.nocor"} penalized quasi-likelihood method that ignores spatial correlation.
\item \code{"APL"} approximate penalized loglikelihood method that considers spatial correlation.
\item \code{"APL.nocor"} approximate penalized loglikelihood method that ignores spatial correlation.
}}
\item{plots}{bool, if \code{True}, contour plot of AIC/BIC values is generated.}
\item{intercept}{bool, if \code{True}, an intercept item will be included in model.}
\item{verbose}{bool, if \code{True}, various updates are printed during each iteration of the algorithm.}
}
\value{
A list of 13 items:
\enumerate{
\item \code{dat.obj}, List, a copy of the \code{dat.obj} input.
\item \code{start}, Initial values of parameters given by glmmPQL().
\item \code{L.obj}, Regression coefficients under each \code{alpha.vec} and \code{lambda.vec}, under the adaptive elastic net.
\item \code{Lout.obj}, AIC and BIC values under each \code{L.obj value}, under the adaptive elastic net.
\item \code{contour.out.obj}, Object used to generate contour plot as a function of \code{alpha.vec} and \code{lambda.vec}, with AIC or BIC as the output. Used to choose best penalty parameter, under the adaptive elastic net.
\item \code{L.best.obj}, Model fitting results under the best chosen \code{alpha.vec} and \code{lambda.vec}, under the adaptive elastic net.
\item \code{Lout.best.obj}, Best BIC value for \code{L.best.obj}.
\item \code{L.EN.obj, Lout.EN.obj, contour.out.EN.obj, L.EN.best.obj}, Similar items but under the elastic penalty.
\item \code{lasso.weight}, Numeric, specifies the adaptive Lasso weight.
\item \code{method}, String, the method used for computing the approximate likelihood function.
}
}
\description{
Perform variable selection for the spatial Poisson regression model under adaptive elastic net penalty.
}
\references{
Xie, Y., Xu, L., Li, J., Deng, X., Hong, Y., Kolivras, K., and Gaines, D. N. (2018). Spatial Variable Selection and An Application to Virginia Lyme Disease Emergence. Preprint, arXiv:1809.06418 [stat.AP].
}
\examples{
#use small.test.dat as the input to fit the spatial Poisson regression model.
#a grid of alpha.vec and lambda.vec is typically used.
#Here one point of alpha.vec and lambda.vec is used for fast illustration.
test.fit<-SpatialVS(dat.obj=small.test.dat, alpha.vec=0.5,
lambda.vec=5, method="PQL", intercept=TRUE, verbose=FALSE)
}
\keyword{function}
| /man/SpatialVS.Rd | no_license | cran/SpatialVS | R | false | false | 3,334 | rd | \name{SpatialVS}
\alias{SpatialVS}
\title{Function for spatial variable selection}
\usage{
SpatialVS(dat.obj, alpha.vec = seq(0.6, 1, by = 0.05),
lambda.vec = seq(0.15, 1, len = 50), method = "PQL", plots = F,
intercept = T, verbose = T)
}
\arguments{
\item{dat.obj}{List, input data. Must contains:
\enumerate{
\item \code{X} numeric matrix, the covariates.
\item \code{y} integer vector, the response in counts.
\item \code{dist} numeric matrix, the distance matrix.
\item \code{offset} numeric vector, the offset item.
}}
\item{alpha.vec}{numeric vector, a vector of possible values of regularization parameter. The range is [0,1].}
\item{lambda.vec}{numeric vector, a vector of positive values of regularization parameter. }
\item{method}{string, the method to be used. Options are:
\enumerate{
\item \code{"PQL"} penalized quasi-likelihood method that considers spatial correlation.
\item \code{"PQL.nocor"} penalized quasi-likelihood method that ignores spatial correlation.
\item \code{"APL"} approximate penalized loglikelihood method that considers spatial correlation.
\item \code{"APL.nocor"} approximate penalized loglikelihood method that ignores spatial correlation.
}}
\item{plots}{bool, if \code{True}, contour plot of AIC/BIC values is generated.}
\item{intercept}{bool, if \code{True}, an intercept item will be included in model.}
\item{verbose}{bool, if \code{True}, various updates are printed during each iteration of the algorithm.}
}
\value{
A list of 13 items:
\enumerate{
\item \code{dat.obj}, List, a copy of the \code{dat.obj} input.
\item \code{start}, Initial values of parameters given by glmmPQL().
\item \code{L.obj}, Regression coefficients under each \code{alpha.vec} and \code{lambda.vec}, under the adaptive elastic net.
\item \code{Lout.obj}, AIC and BIC values under each \code{L.obj value}, under the adaptive elastic net.
\item \code{contour.out.obj}, Object used to generate contour plot as a function of \code{alpha.vec} and \code{lambda.vec}, with AIC or BIC as the output. Used to choose best penalty parameter, under the adaptive elastic net.
\item \code{L.best.obj}, Model fitting results under the best chosen \code{alpha.vec} and \code{lambda.vec}, under the adaptive elastic net.
\item \code{Lout.best.obj}, Best BIC value for \code{L.best.obj}.
\item \code{L.EN.obj, Lout.EN.obj, contour.out.EN.obj, L.EN.best.obj}, Similar items but under the elastic penalty.
\item \code{lasso.weight}, Numeric, specifies the adaptive Lasso weight.
\item \code{method}, String, the method used for computing the approximate likelihood function.
}
}
\description{
Perform variable selection for the spatial Poisson regression model under adaptive elastic net penalty.
}
\references{
Xie, Y., Xu, L., Li, J., Deng, X., Hong, Y., Kolivras, K., and Gaines, D. N. (2018). Spatial Variable Selection and An Application to Virginia Lyme Disease Emergence. Preprint, arXiv:1809.06418 [stat.AP].
}
\examples{
#use small.test.dat as the input to fit the spatial Poisson regression model.
#a grid of alpha.vec and lambda.vec is typically used.
#Here one point of alpha.vec and lambda.vec is used for fast illustration.
test.fit<-SpatialVS(dat.obj=small.test.dat, alpha.vec=0.5,
lambda.vec=5, method="PQL", intercept=TRUE, verbose=FALSE)
}
\keyword{function}
|
#' Product metadata.
#'
#' Product metadata for all products purchased by households participating in
#' the Customer Journey study.
#'
#' @source 84.51, Customer Journey study, \url{http://www.8451.com/area51/}
#' @format A data frame with 92,331 rows and 7 variables
#' \describe{
#' \item{product_id}{Uniquely identifies each product}
#' \item{manufacturer_id}{Uniquely identifies each manufacturer}
#' \item{department}{Groups similar products together}
#' \item{brand}{Indicates Private or National label brand}
#' \item{product_category}{Groups similar products together at lower level}
#' \item{product_type}{Groups similar products together at lowest level}
#' \item{package_size}{Indicates package size (not available for all products)}
#' }
#' @examples
#' \dontrun{
#' if (require("dplyr")) {
#' products
#'
#' # Transaction line items that don't have product metadata
#' transactions %>%
#' anti_join(products, "product_id")
#'
#' }
#' }
"products"
#' @importFrom tibble tibble
NULL | /R/products.R | no_license | StevenMMortimer/completejourney | R | false | false | 997 | r | #' Product metadata.
#'
#' Product metadata for all products purchased by households participating in
#' the Customer Journey study.
#'
#' @source 84.51, Customer Journey study, \url{http://www.8451.com/area51/}
#' @format A data frame with 92,331 rows and 7 variables
#' \describe{
#' \item{product_id}{Uniquely identifies each product}
#' \item{manufacturer_id}{Uniquely identifies each manufacturer}
#' \item{department}{Groups similar products together}
#' \item{brand}{Indicates Private or National label brand}
#' \item{product_category}{Groups similar products together at lower level}
#' \item{product_type}{Groups similar products together at lowest level}
#' \item{package_size}{Indicates package size (not available for all products)}
#' }
#' @examples
#' \dontrun{
#' if (require("dplyr")) {
#' products
#'
#' # Transaction line items that don't have product metadata
#' transactions %>%
#' anti_join(products, "product_id")
#'
#' }
#' }
"products"
#' @importFrom tibble tibble
NULL |
#!/usr/bin/env Rscript
# borro todo
rm( list=ls() )
args = commandArgs(trailingOnly=TRUE)
# PARA DEBUG
# args = c('0.025', '~/repos/dmeyf/path.csv.probs')
if (length(args) != 2) {
stop("Tienen que ser 2 parametros:
1: cantidad de estimulos: 6000, 6500, 7000, ...
2: path probabilidades", call.=FALSE)
}
library(data.table)
cantidad_de_estimulos = as.integer(args[1])
path_prob = args[2]
path_salida = paste0(path_prob,'/promedio_', cantidad_de_estimulos, '.csv')
votos = data.table()
i = 0
for (path in list.files(path_prob)){
i = i + 1
csv = fread(paste0(path_prob,path))
csv[order(numero_de_cliente)]
votos = votos[, paste0('prob',i) := csv$prob]
}
probabilidades = paste0('prob', seq(1,i))
votos[, numero_de_cliente := csv$numero_de_cliente]
votos[, promedio := rowMeans(.SD) , .SDcols = probabilidades]
probs = votos[order(-promedio)]
estimulos = c(seq(1, 1, length.out = cantidad_de_estimulos), seq(0, 0, length.out = length(probs$numero_de_cliente) - cantidad_de_estimulos))
rutiles::kaggle_csv(clientes = probs$numero_de_cliente, estimulos = estimulos, path = path_salida)
| /scripts/promedio.R | permissive | miglesias91/dmeyf | R | false | false | 1,106 | r | #!/usr/bin/env Rscript
# borro todo
rm( list=ls() )
args = commandArgs(trailingOnly=TRUE)
# PARA DEBUG
# args = c('0.025', '~/repos/dmeyf/path.csv.probs')
if (length(args) != 2) {
stop("Tienen que ser 2 parametros:
1: cantidad de estimulos: 6000, 6500, 7000, ...
2: path probabilidades", call.=FALSE)
}
library(data.table)
cantidad_de_estimulos = as.integer(args[1])
path_prob = args[2]
path_salida = paste0(path_prob,'/promedio_', cantidad_de_estimulos, '.csv')
votos = data.table()
i = 0
for (path in list.files(path_prob)){
i = i + 1
csv = fread(paste0(path_prob,path))
csv[order(numero_de_cliente)]
votos = votos[, paste0('prob',i) := csv$prob]
}
probabilidades = paste0('prob', seq(1,i))
votos[, numero_de_cliente := csv$numero_de_cliente]
votos[, promedio := rowMeans(.SD) , .SDcols = probabilidades]
probs = votos[order(-promedio)]
estimulos = c(seq(1, 1, length.out = cantidad_de_estimulos), seq(0, 0, length.out = length(probs$numero_de_cliente) - cantidad_de_estimulos))
rutiles::kaggle_csv(clientes = probs$numero_de_cliente, estimulos = estimulos, path = path_salida)
|
library(readxl)
cola<-read_excel(file.choose())
windows()
plot(cola$Sales,type='o')
X<- data.frame(outer(rep(cola$Quarter,length = 42), cola$Quarter,"==") + 0 )
colnames(X)<-ts (X, frequency = 42, start = c(1986, 1))
H<-data.frame(outer(rep(qua,length = 4), cola$Quarter,"==") + 0 )
Q1 <- ifelse(grepl("Q1",cola$Quarter),'1','0')
Q2 <- ifelse(grepl("Q2",cola$Quarter),'1','0')
Q3 <- ifelse(grepl("Q3",cola$Quarter),'1','0')
Q4 <- ifelse(grepl("Q4",cola$Quarter),'1','0')
CocacolaData<-cbind(cola,Q1,Q2,Q3,Q4)
View(CocacolaData)
colnames(CocacolaData)
CocacolaData["t"]<- 1:42
View(CocacolaData)
CocacolaData["log_Sales"]<-log(CocacolaData["Sales"])
CocacolaData["t_square"]<-CocacolaData["t"]*CocacolaData["t"]
attach(CocacolaData)
train<-CocacolaData[1:36,]
test<-CocacolaData[37:42,]
linear_model<-lm(Sales~t,data=train)
summary(linear_model)
linear_pred<-data.frame(predict(linear_model,interval='predict',newdata =test))
View(linear_pred)
rmse_linear<-sqrt(mean((test$Sales-linear_pred$fit)^2,na.rm = T))
rmse_linear
expo_model<-lm(log_Sales~t,data=train)
summary(expo_model)
expo_pred<-data.frame(predict(expo_model,interval='predict',newdata=test))
rmse_expo<-sqrt(mean((test$Sales-exp(expo_pred$fit))^2,na.rm = T))
rmse_expo
Quad_model<-lm(Sales~t+t_square,data=train)
summary(Quad_model)
Quad_pred<-data.frame(predict(Quad_model,interval='predict',newdata=test))
rmse_Quad<-sqrt(mean((test$Sales-Quad_pred$fit)^2,na.rm=T))
rmse_Quad
Add_sea_Linear_model<-lm(Sales~t+Q1+Q2+Q3+Q4,data=train)
summary(Add_sea_Linear_model)
Add_sea_Linear_pred<-data.frame(predict(Add_sea_Linear_model,interval='predict',newdata=test))
rmse_Add_sea_Linear<-sqrt(mean((test$Sales-Add_sea_Linear_pred$fit)^2,na.rm=T))
rmse_Add_sea_Linear
Add_sea_Quad_model<-lm(Sales~t+t_square+Q1+Q2+Q3+Q4,data=train)
summary(Add_sea_Quad_model)
Add_sea_Quad_pred<-data.frame(predict(Add_sea_Quad_model,interval='predict',newdata=test))
rmse_Add_sea_Quad<-sqrt(mean((test$Sales-Add_sea_Quad_pred$fit)^2,na.rm=T))
rmse_Add_sea_Quad
multi_sea_model<-lm(log_Sales~Q1+Q2+Q3+Q4,data = train)
summary(multi_sea_model)
multi_sea_pred<-data.frame(predict(multi_sea_model,newdata=test,interval='predict'))
rmse_multi_sea<-sqrt(mean((test$Sales-exp(multi_sea_pred$fit))^2,na.rm = T))
rmse_multi_se
multi_add_sea_model<-lm(log_Sales~t+Q1+Q2+Q3+Q4,data = train)
summary(multi_add_sea_model)
multi_add_sea_pred<-data.frame(predict(multi_add_sea_model,newdata=test,interval='predict'))
rmse_multi_add_sea<-sqrt(mean((test$Sales-exp(multi_add_sea_pred$fit))^2,na.rm = T))
new_model_fin <- new_model$fitted.values
View(new_model_fin)
Quarter <- as.data.frame(CocacolaData$Quarter)
Final <- as.data.frame(cbind(Quarter,CocacolaData$Sales,new_model_fin))
colnames(Final) <-c("Quarter","Sales","New_Pred_Value")
plot(Final$Sales,main = "ActualGraph", xlab="Sales(Actual)", ylab="Quarter",
col.axis="blue",type="o")
plot(Final$New_Pred_Value, main = "PredictedGraph", xlab="Sales(Predicted)", ylab="Quarter",
col.axis="Green",type="s")
| /cococola.R | no_license | Sivakrishnairs/Sivakrishnairs | R | false | false | 3,073 | r | library(readxl)
cola<-read_excel(file.choose())
windows()
plot(cola$Sales,type='o')
X<- data.frame(outer(rep(cola$Quarter,length = 42), cola$Quarter,"==") + 0 )
colnames(X)<-ts (X, frequency = 42, start = c(1986, 1))
H<-data.frame(outer(rep(qua,length = 4), cola$Quarter,"==") + 0 )
Q1 <- ifelse(grepl("Q1",cola$Quarter),'1','0')
Q2 <- ifelse(grepl("Q2",cola$Quarter),'1','0')
Q3 <- ifelse(grepl("Q3",cola$Quarter),'1','0')
Q4 <- ifelse(grepl("Q4",cola$Quarter),'1','0')
CocacolaData<-cbind(cola,Q1,Q2,Q3,Q4)
View(CocacolaData)
colnames(CocacolaData)
CocacolaData["t"]<- 1:42
View(CocacolaData)
CocacolaData["log_Sales"]<-log(CocacolaData["Sales"])
CocacolaData["t_square"]<-CocacolaData["t"]*CocacolaData["t"]
attach(CocacolaData)
train<-CocacolaData[1:36,]
test<-CocacolaData[37:42,]
linear_model<-lm(Sales~t,data=train)
summary(linear_model)
linear_pred<-data.frame(predict(linear_model,interval='predict',newdata =test))
View(linear_pred)
rmse_linear<-sqrt(mean((test$Sales-linear_pred$fit)^2,na.rm = T))
rmse_linear
expo_model<-lm(log_Sales~t,data=train)
summary(expo_model)
expo_pred<-data.frame(predict(expo_model,interval='predict',newdata=test))
rmse_expo<-sqrt(mean((test$Sales-exp(expo_pred$fit))^2,na.rm = T))
rmse_expo
Quad_model<-lm(Sales~t+t_square,data=train)
summary(Quad_model)
Quad_pred<-data.frame(predict(Quad_model,interval='predict',newdata=test))
rmse_Quad<-sqrt(mean((test$Sales-Quad_pred$fit)^2,na.rm=T))
rmse_Quad
Add_sea_Linear_model<-lm(Sales~t+Q1+Q2+Q3+Q4,data=train)
summary(Add_sea_Linear_model)
Add_sea_Linear_pred<-data.frame(predict(Add_sea_Linear_model,interval='predict',newdata=test))
rmse_Add_sea_Linear<-sqrt(mean((test$Sales-Add_sea_Linear_pred$fit)^2,na.rm=T))
rmse_Add_sea_Linear
Add_sea_Quad_model<-lm(Sales~t+t_square+Q1+Q2+Q3+Q4,data=train)
summary(Add_sea_Quad_model)
Add_sea_Quad_pred<-data.frame(predict(Add_sea_Quad_model,interval='predict',newdata=test))
rmse_Add_sea_Quad<-sqrt(mean((test$Sales-Add_sea_Quad_pred$fit)^2,na.rm=T))
rmse_Add_sea_Quad
multi_sea_model<-lm(log_Sales~Q1+Q2+Q3+Q4,data = train)
summary(multi_sea_model)
multi_sea_pred<-data.frame(predict(multi_sea_model,newdata=test,interval='predict'))
rmse_multi_sea<-sqrt(mean((test$Sales-exp(multi_sea_pred$fit))^2,na.rm = T))
rmse_multi_se
multi_add_sea_model<-lm(log_Sales~t+Q1+Q2+Q3+Q4,data = train)
summary(multi_add_sea_model)
multi_add_sea_pred<-data.frame(predict(multi_add_sea_model,newdata=test,interval='predict'))
rmse_multi_add_sea<-sqrt(mean((test$Sales-exp(multi_add_sea_pred$fit))^2,na.rm = T))
new_model_fin <- new_model$fitted.values
View(new_model_fin)
Quarter <- as.data.frame(CocacolaData$Quarter)
Final <- as.data.frame(cbind(Quarter,CocacolaData$Sales,new_model_fin))
colnames(Final) <-c("Quarter","Sales","New_Pred_Value")
plot(Final$Sales,main = "ActualGraph", xlab="Sales(Actual)", ylab="Quarter",
col.axis="blue",type="o")
plot(Final$New_Pred_Value, main = "PredictedGraph", xlab="Sales(Predicted)", ylab="Quarter",
col.axis="Green",type="s")
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
# general stuff needed outside of app -------------------------------------
library(shiny)
library(tidyverse)
library(htmltools)
set.seed(1834)
shinyServer(function(input, output, session){
my_data <- reactive({
ability_mu<- 100
ability_sigma <- 15
sigma_g <- input$grade_noise
sigma_s <- input$sat_noise
n <- input$n_samples
minority_perc <- input$minority_perc/100
dist_of_groups <- c(minority_perc, 1-minority_perc)
number_groups <- c(1,2)
group_1_sat_taken <- input$group_1_taken
group_2_sat_taken <- input$group_2_taken
initial_data <-data_frame(ability = rnorm(n, ability_mu, ability_sigma)) %>%
mutate(student = paste0("student_", row_number()),
group_membership = sample(number_groups, n, prob = dist_of_groups, T))
grades <- rnorm(n, initial_data$ability, sigma_g)
initial_data <- initial_data %>% add_column(grades = grades)
# Group 1 Test Takers
group_1 <- filter(initial_data, group_membership == 1)
group_1_sat_matrix <- matrix(
data = rnorm(group_1_sat_taken*nrow(group_1), mean = group_1$ability, sd =sigma_s),
nrow = nrow(group_1),
ncol = group_1_sat_taken,
byrow = F)
group_1_sat_matrix = cbind(group_1_sat_matrix,
max_score = apply(group_1_sat_matrix, MARGIN = 1, max))
group_1 <- group_1 %>%
bind_cols(as_data_frame(group_1_sat_matrix))
# Group 2 data prep
group_2 <- filter(initial_data, group_membership == 2)
group_2_sat_matrix <- matrix(
data = rnorm(group_2_sat_taken*nrow(group_2), mean = group_2$ability, sd =sigma_s),
nrow = nrow(group_2),
ncol = group_2_sat_taken,
byrow = F)
group_2_sat_matrix = cbind(group_2_sat_matrix,
max_score = apply(group_2_sat_matrix, MARGIN = 1, max))
group_2 <- group_2 %>%
bind_cols(as_data_frame(group_2_sat_matrix))
# Bring the Two Groups Together
combined_student_data <- bind_rows(group_1, group_2)
print(head(combined_student_data))
out <- combined_student_data
})
model_fits <- reactive({
if(input$mod_group){
my_data() %>%
group_by(group_membership) %>%
nest() %>%
mutate(fit = map(data, ~broom::augment(lm(ability ~ grades + max_score, data = .))[".fitted"])) %>%
unnest() %>%
mutate(admitted = ifelse(.fitted >= input$intelligence_cut, 1, 0))
} else{
my_data() %>%
add_column(.fitted = broom::augment(lm(ability ~ grades + max_score,
data = my_data()))[[".fitted"]]) %>%
mutate(admitted = ifelse(.fitted >= input$intelligence_cut, 1, 0))
}
})
make_first_plot <- function(){
if(input$mod_group){
my_alpha <- 1/ (nrow(model_fits())/5000)
model_fits() %>%
ggplot(aes(grades, max_score, color = as.factor(group_membership)))+
geom_point(alpha = my_alpha)+
labs(
title = "Distribution of Scores",
subtitle = "Line Represents Cutoff Threshold for Admission",
color = "Group",
x = "Grade Score",
y= "Test Score (max)"
)+
theme_minimal()+
geom_smooth(method = "lm", se = FALSE)
} else{
my_alpha <- 1/ (nrow(model_fits())/5000)
model_fits() %>%
ggplot()+
geom_point(aes(grades, max_score, color = as.factor(group_membership)),alpha = my_alpha)+
labs(
title = "Distribution of Scores",
subtitle = "Line Represents Cutoff Threshold for Admission",
color = "Group",
x = "Grade Score",
y= "Test Score (max)"
)+
theme_minimal()+
geom_smooth(aes(grades, max_score), method = "lm", se = FALSE)
}
}
make_summary_statz <- function(){
dat <-model_fits() %>%
group_by(group_membership) %>%
summarise(total_students = n(),
avg_true_ability = mean(ability),
avg_grade = mean(grades),
avg_sat = mean(max_score),
admit = mean(admitted)*100) %>%
mutate_if(is.numeric, round, 2) %>%
set_names(c("Group", "#", "Avg True Ability", "Avg Grades", "Avg SAT", "% Admit")) %>%
select(`Group`, `#`, `% Admit`, `Avg True Ability`, `Avg Grades`, `Avg SAT`)
}
output$initial_plot <- renderPlot({make_first_plot()})
output$initial_statz <- renderDataTable({make_summary_statz()})
}) | /server.R | no_license | medewitt/bias_simulator | R | false | false | 4,779 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
# general stuff needed outside of app -------------------------------------
library(shiny)
library(tidyverse)
library(htmltools)
set.seed(1834)
shinyServer(function(input, output, session){
my_data <- reactive({
ability_mu<- 100
ability_sigma <- 15
sigma_g <- input$grade_noise
sigma_s <- input$sat_noise
n <- input$n_samples
minority_perc <- input$minority_perc/100
dist_of_groups <- c(minority_perc, 1-minority_perc)
number_groups <- c(1,2)
group_1_sat_taken <- input$group_1_taken
group_2_sat_taken <- input$group_2_taken
initial_data <-data_frame(ability = rnorm(n, ability_mu, ability_sigma)) %>%
mutate(student = paste0("student_", row_number()),
group_membership = sample(number_groups, n, prob = dist_of_groups, T))
grades <- rnorm(n, initial_data$ability, sigma_g)
initial_data <- initial_data %>% add_column(grades = grades)
# Group 1 Test Takers
group_1 <- filter(initial_data, group_membership == 1)
group_1_sat_matrix <- matrix(
data = rnorm(group_1_sat_taken*nrow(group_1), mean = group_1$ability, sd =sigma_s),
nrow = nrow(group_1),
ncol = group_1_sat_taken,
byrow = F)
group_1_sat_matrix = cbind(group_1_sat_matrix,
max_score = apply(group_1_sat_matrix, MARGIN = 1, max))
group_1 <- group_1 %>%
bind_cols(as_data_frame(group_1_sat_matrix))
# Group 2 data prep
group_2 <- filter(initial_data, group_membership == 2)
group_2_sat_matrix <- matrix(
data = rnorm(group_2_sat_taken*nrow(group_2), mean = group_2$ability, sd =sigma_s),
nrow = nrow(group_2),
ncol = group_2_sat_taken,
byrow = F)
group_2_sat_matrix = cbind(group_2_sat_matrix,
max_score = apply(group_2_sat_matrix, MARGIN = 1, max))
group_2 <- group_2 %>%
bind_cols(as_data_frame(group_2_sat_matrix))
# Bring the Two Groups Together
combined_student_data <- bind_rows(group_1, group_2)
print(head(combined_student_data))
out <- combined_student_data
})
model_fits <- reactive({
if(input$mod_group){
my_data() %>%
group_by(group_membership) %>%
nest() %>%
mutate(fit = map(data, ~broom::augment(lm(ability ~ grades + max_score, data = .))[".fitted"])) %>%
unnest() %>%
mutate(admitted = ifelse(.fitted >= input$intelligence_cut, 1, 0))
} else{
my_data() %>%
add_column(.fitted = broom::augment(lm(ability ~ grades + max_score,
data = my_data()))[[".fitted"]]) %>%
mutate(admitted = ifelse(.fitted >= input$intelligence_cut, 1, 0))
}
})
make_first_plot <- function(){
if(input$mod_group){
my_alpha <- 1/ (nrow(model_fits())/5000)
model_fits() %>%
ggplot(aes(grades, max_score, color = as.factor(group_membership)))+
geom_point(alpha = my_alpha)+
labs(
title = "Distribution of Scores",
subtitle = "Line Represents Cutoff Threshold for Admission",
color = "Group",
x = "Grade Score",
y= "Test Score (max)"
)+
theme_minimal()+
geom_smooth(method = "lm", se = FALSE)
} else{
my_alpha <- 1/ (nrow(model_fits())/5000)
model_fits() %>%
ggplot()+
geom_point(aes(grades, max_score, color = as.factor(group_membership)),alpha = my_alpha)+
labs(
title = "Distribution of Scores",
subtitle = "Line Represents Cutoff Threshold for Admission",
color = "Group",
x = "Grade Score",
y= "Test Score (max)"
)+
theme_minimal()+
geom_smooth(aes(grades, max_score), method = "lm", se = FALSE)
}
}
make_summary_statz <- function(){
dat <-model_fits() %>%
group_by(group_membership) %>%
summarise(total_students = n(),
avg_true_ability = mean(ability),
avg_grade = mean(grades),
avg_sat = mean(max_score),
admit = mean(admitted)*100) %>%
mutate_if(is.numeric, round, 2) %>%
set_names(c("Group", "#", "Avg True Ability", "Avg Grades", "Avg SAT", "% Admit")) %>%
select(`Group`, `#`, `% Admit`, `Avg True Ability`, `Avg Grades`, `Avg SAT`)
}
output$initial_plot <- renderPlot({make_first_plot()})
output$initial_statz <- renderDataTable({make_summary_statz()})
}) |
library(reshape2)
library(ggplot2)
library(plyr)
library(SPARQL)
#create the function
population_pyramid <- function(geography="S92000003", year=2016) {
endpoint <- "http://statistics.gov.scot/sparql"
# create query statement
query <- paste(
"PREFIX qb: <http://purl.org/linked-data/cube#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX purl: <http://purl.org/linked-data/sdmx/2009/dimension#>
PREFIX scot: <http://statistics.gov.scot/def/dimension/>
SELECT ?Age ?Sex ?Population
WHERE {
?s qb:dataSet <http://statistics.gov.scot/data/population-estimates-current-geographic-boundaries>;
purl:refArea <http://statistics.gov.scot/id/statistical-geography/",
geography,
">;
purl:refPeriod <http://reference.data.gov.uk/id/year/",
year,
">;
scot:sex ?SexURI;
scot:age ?AgeURI;
<http://statistics.gov.scot/def/measure-properties/count> ?Population.
?AgeURI rdfs:label ?Age;
<http://www.w3.org/ns/ui#sortPriority> ?Sort.
?SexURI rdfs:label ?Sex.
FILTER(?SexURI != <http://statistics.gov.scot/def/concept/sex/all>)
FILTER(?AgeURI NOT IN (<http://statistics.gov.scot/def/concept/age/all>, <http://statistics.gov.scot/def/concept/age/children-under-16>, <http://statistics.gov.scot/def/concept/age/working-age-16-64>, <http://statistics.gov.scot/def/concept/age/pensionable-age-65-and-over> ))
}
ORDER BY ?Sex ?Sort", sep = "")
qd <- SPARQL(endpoint, query)
df <- qd$results
df$Population <- ifelse(df$Sex == "Male",df$Population * -1,df$Population)
#Age has been ordered already by sparql query, so do this to stop 5-9 category appearing in wrong place
df$Age <- factor(df$Age, levels = unique(df$Age))
plot1 <- ggplot(df, aes(x = Age, y = Population, fill = Sex)) +
geom_bar(stat = "identity") +
scale_y_continuous(breaks = pretty(df$Population),
labels = abs(pretty(df$Population))) +
coord_flip() +
scale_fill_brewer(palette = "Set1") +
theme_bw()
plot1
}
#run the function for Scotland in 2016
population_pyramid("S92000003", 2016)
| /using-R/Population Pyramid R function for Statistics.gov.scot.R | permissive | GregorBoyd/sparql-queries | R | false | false | 2,190 | r | library(reshape2)
library(ggplot2)
library(plyr)
library(SPARQL)
#create the function
population_pyramid <- function(geography="S92000003", year=2016) {
endpoint <- "http://statistics.gov.scot/sparql"
# create query statement
query <- paste(
"PREFIX qb: <http://purl.org/linked-data/cube#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX purl: <http://purl.org/linked-data/sdmx/2009/dimension#>
PREFIX scot: <http://statistics.gov.scot/def/dimension/>
SELECT ?Age ?Sex ?Population
WHERE {
?s qb:dataSet <http://statistics.gov.scot/data/population-estimates-current-geographic-boundaries>;
purl:refArea <http://statistics.gov.scot/id/statistical-geography/",
geography,
">;
purl:refPeriod <http://reference.data.gov.uk/id/year/",
year,
">;
scot:sex ?SexURI;
scot:age ?AgeURI;
<http://statistics.gov.scot/def/measure-properties/count> ?Population.
?AgeURI rdfs:label ?Age;
<http://www.w3.org/ns/ui#sortPriority> ?Sort.
?SexURI rdfs:label ?Sex.
FILTER(?SexURI != <http://statistics.gov.scot/def/concept/sex/all>)
FILTER(?AgeURI NOT IN (<http://statistics.gov.scot/def/concept/age/all>, <http://statistics.gov.scot/def/concept/age/children-under-16>, <http://statistics.gov.scot/def/concept/age/working-age-16-64>, <http://statistics.gov.scot/def/concept/age/pensionable-age-65-and-over> ))
}
ORDER BY ?Sex ?Sort", sep = "")
qd <- SPARQL(endpoint, query)
df <- qd$results
df$Population <- ifelse(df$Sex == "Male",df$Population * -1,df$Population)
#Age has been ordered already by sparql query, so do this to stop 5-9 category appearing in wrong place
df$Age <- factor(df$Age, levels = unique(df$Age))
plot1 <- ggplot(df, aes(x = Age, y = Population, fill = Sex)) +
geom_bar(stat = "identity") +
scale_y_continuous(breaks = pretty(df$Population),
labels = abs(pretty(df$Population))) +
coord_flip() +
scale_fill_brewer(palette = "Set1") +
theme_bw()
plot1
}
#run the function for Scotland in 2016
population_pyramid("S92000003", 2016)
|
### =========================================================================
### SummarizedExperiment objects
### -------------------------------------------------------------------------
###
setClass("SummarizedExperiment",
contains="Vector",
representation(
colData="DataFrame", # columns and their annotations
assays="Assays", # Data -- e.g., list of matricies
NAMES="character_OR_NULL",
elementMetadata="DataFrame"
),
prototype(
assays=Assays()
)
)
### Combine the new parallel slots with those of the parent class. Make sure
### to put the new parallel slots *first*.
setMethod("parallelSlotNames", "SummarizedExperiment",
function(x) c("NAMES", callNextMethod())
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Validity.
###
.valid.SummarizedExperiment.assays_nrow <- function(x)
{
if (length(x@assays) == 0L)
return(NULL)
assays_nrow <- nrow(x@assays)
rowData_nrow <- length(x)
if (assays_nrow != rowData_nrow) {
txt <- sprintf(
"\n nb of rows in 'assay' (%d) must equal nb of rows in 'rowData' (%d)",
assays_nrow, rowData_nrow)
return(txt)
}
NULL
}
.valid.SummarizedExperiment.assays_ncol <- function(x)
{
if (length(x@assays) == 0L)
return(NULL)
assays_ncol <- ncol(x@assays)
colData_nrow <- nrow(colData(x))
if (assays_ncol != colData_nrow) {
txt <- sprintf(
"\n nb of cols in 'assay' (%d) must equal nb of rows in 'colData' (%d)",
assays_ncol, colData_nrow)
return(txt)
}
NULL
}
.valid.SummarizedExperiment.assays_dim <- function(x)
{
c(.valid.SummarizedExperiment.assays_nrow(x),
.valid.SummarizedExperiment.assays_ncol(x))
}
.valid.SummarizedExperiment <- function(x)
{
.valid.SummarizedExperiment.assays_dim(x)
}
setValidity2("SummarizedExperiment", .valid.SummarizedExperiment)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Low-level constructor (not exported).
###
new_SummarizedExperiment <- function(assays, names, rowData, colData,
metadata)
{
if (!is(assays, "Assays"))
assays <- Assays(assays)
if (is.null(rowData)) {
if (is.null(names))
nrow <- nrow(assays)
else
nrow <- length(names)
rowData <- S4Vectors:::make_zero_col_DataFrame(nrow)
} else {
rownames(rowData) <- NULL
}
new("SummarizedExperiment", NAMES=names,
elementMetadata=rowData,
colData=colData,
assays=assays,
metadata=as.list(metadata))
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Getters and setters.
###
setMethod("length", "SummarizedExperiment",
function(x) nrow(x@elementMetadata)
)
setMethod("names", "SummarizedExperiment", function(x) x@NAMES)
setReplaceMethod("names", "SummarizedExperiment",
function(x, value)
{
NAMES <- S4Vectors:::normalize_names_replacement_value(value, x)
BiocGenerics:::replaceSlots(x, NAMES=NAMES, check=FALSE)
}
)
## rowData, colData seem too vague, but from eSet derived classes wanted to
## call the rows / cols something different from 'features' or 'samples', so
## might as well avoid the issue
setGeneric("rowData", function(x, ...) standardGeneric("rowData"))
setMethod("rowData", "SummarizedExperiment",
function(x, ...) mcols(x, ...)
)
setGeneric("rowData<-",
function(x, ..., value) standardGeneric("rowData<-"))
setReplaceMethod("rowData", "SummarizedExperiment",
function(x, ..., value) `mcols<-`(x, ..., value=value)
)
setGeneric("colData", function(x, ...) standardGeneric("colData"))
setMethod("colData", "SummarizedExperiment", function(x, ...) x@colData)
setGeneric("colData<-",
function(x, ..., value) standardGeneric("colData<-"))
setReplaceMethod("colData", c("SummarizedExperiment", "DataFrame"),
function(x, ..., value)
{
if (nrow(value) != ncol(x))
stop("nrow of supplied 'colData' must equal ncol of object")
BiocGenerics:::replaceSlots(x, colData=value, check=FALSE)
})
setGeneric("assays",
function(x, ..., withDimnames=TRUE) standardGeneric("assays"),
signature="x")
setMethod("assays", "SummarizedExperiment",
function(x, ..., withDimnames=TRUE)
{
assays <- as(x@assays, "SimpleList")
if (withDimnames) {
assays <- endoapply(assays,
function(assay) {
dimnames(assay)[1:2] <- dimnames(x)
assay
}
)
}
assays
})
setGeneric("assays<-",
function(x, ..., withDimnames=TRUE, value) standardGeneric("assays<-"),
signature=c("x", "value"))
.SummarizedExperiment.assays.replace <-
function(x, ..., withDimnames=TRUE, value)
{
## withDimnames arg allows names(assays(se, withDimnames=FALSE)) <- value
ok <- vapply(value, function(elt, xdimnames) {
e <- dimnames(elt)
(is.null(e[[1]]) || identical(e[[1]], xdimnames[[1]])) &&
(is.null(e[[2]]) || identical(e[[2]], xdimnames[[2]]))
}, logical(1), xdimnames=dimnames(x))
if (!all(ok))
stop("current and replacement dimnames() differ")
x <- BiocGenerics:::replaceSlots(x, assays=Assays(value), check=FALSE)
## validObject(x) should be called below because it would then fully
## re-validate objects that derive from SummarizedExperiment (e.g.
## DESeqDataSet objects) after the user sets the assays slot with
## assays(x) <- value. For example the assays slot of a DESeqDataSet
## object must contain a matrix named 'counts' and calling validObject(x)
## would check that but .valid.SummarizedExperiment(x) doesn't.
## The FourC() constructor function defined in the FourCSeq package
## actually takes advantage of the incomplete validation below to
## purposedly return invalid FourC objects!
msg <- .valid.SummarizedExperiment(x)
if (!is.null(msg))
stop(msg)
x
}
setReplaceMethod("assays", c("SummarizedExperiment", "SimpleList"),
.SummarizedExperiment.assays.replace)
setReplaceMethod("assays", c("SummarizedExperiment", "list"),
.SummarizedExperiment.assays.replace)
setGeneric("assay", function(x, i, ...) standardGeneric("assay"))
## convenience for common use case
setMethod("assay", c("SummarizedExperiment", "missing"),
function(x, i, ...)
{
assays <- assays(x, ...)
if (0L == length(assays))
stop("'assay(<", class(x), ">, i=\"missing\", ...) ",
"length(assays(<", class(x), ">)) is 0'")
assays[[1]]
})
setMethod("assay", c("SummarizedExperiment", "numeric"),
function(x, i, ...)
{
tryCatch({
assays(x, ...)[[i]]
}, error=function(err) {
stop("'assay(<", class(x), ">, i=\"numeric\", ...)' ",
"invalid subscript 'i'\n", conditionMessage(err))
})
})
setMethod("assay", c("SummarizedExperiment", "character"),
function(x, i, ...)
{
msg <- paste0("'assay(<", class(x), ">, i=\"character\", ...)' ",
"invalid subscript 'i'")
res <- tryCatch({
assays(x, ...)[[i]]
}, error=function(err) {
stop(msg, "\n", conditionMessage(err))
})
if (is.null(res))
stop(msg, "\n'i' not in names(assays(<", class(x), ">))")
res
})
setGeneric("assay<-", signature=c("x", "i"),
function(x, i, ..., value) standardGeneric("assay<-"))
setReplaceMethod("assay", c("SummarizedExperiment", "missing"),
function(x, i, ..., value)
{
if (0L == length(assays(x)))
stop("'assay(<", class(x), ">) <- value' ", "length(assays(<",
class(x), ">)) is 0")
assays(x)[[1]] <- value
x
})
setReplaceMethod("assay", c("SummarizedExperiment", "numeric"),
function(x, i = 1, ..., value)
{
assays(x, ...)[[i]] <- value
x
})
setReplaceMethod("assay", c("SummarizedExperiment", "character"),
function(x, i, ..., value)
{
assays(x, ...)[[i]] <- value
x
})
setGeneric("assayNames", function(x, ...) standardGeneric("assayNames"))
setMethod("assayNames", "SummarizedExperiment",
function(x, ...)
{
names(assays(x, withDimnames=FALSE))
})
setGeneric("assayNames<-",
function(x, ..., value) standardGeneric("assayNames<-"))
setReplaceMethod("assayNames", c("SummarizedExperiment", "character"),
function(x, ..., value)
{
names(assays(x, withDimnames=FALSE)) <- value
x
})
## cannonical location for dim, dimnames; dimnames should be checked
## for consistency (if non-null) and stripped from assays on
## construction, or added from assays if row/col names are NULL in
## <SummarizedExperiment> but not assays. dimnames need to be added on
## to assays when assays() invoked
setMethod("dim", "SummarizedExperiment",
function(x)
{
c(length(x), nrow(colData(x)))
})
setMethod("dimnames", "SummarizedExperiment",
function(x)
{
list(names(x), rownames(colData(x)))
})
setReplaceMethod("dimnames", c("SummarizedExperiment", "list"),
function(x, value)
{
NAMES <- S4Vectors:::normalize_names_replacement_value(value[[1]], x)
colData <- colData(x)
rownames(colData) <- value[[2]]
BiocGenerics:::replaceSlots(x, NAMES=NAMES, colData=colData, check=FALSE)
})
setReplaceMethod("dimnames", c("SummarizedExperiment", "NULL"),
function(x, value)
{
dimnames(x) <- list(NULL, NULL)
x
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Subsetting.
###
.SummarizedExperiment.charbound <-
function(idx, txt, fmt)
{
orig <- idx
idx <- match(idx, txt)
if (any(bad <- is.na(idx))) {
msg <- paste(S4Vectors:::selectSome(orig[bad]), collapse=" ")
stop(sprintf(fmt, msg))
}
idx
}
setMethod("[", c("SummarizedExperiment", "ANY", "ANY"),
function(x, i, j, ..., drop=TRUE)
{
if (1L != length(drop) || (!missing(drop) && drop))
warning("'drop' ignored '[,", class(x), ",ANY,ANY-method'")
if (missing(i) && missing(j))
return(x)
if (!missing(i)) {
if (is.character(i)) {
fmt <- paste0("<", class(x), ">[i,] index out of bounds: %s")
i <- .SummarizedExperiment.charbound(i, rownames(x), fmt)
}
ii <- as.vector(i)
ans_elementMetadata <- x@elementMetadata[i, , drop=FALSE]
if (is(x, "RangedSummarizedExperiment")) {
ans_rowRanges <- x@rowRanges[i]
} else {
ans_NAMES <- x@NAMES[ii]
}
}
if (!missing(j)) {
if (is.character(j)) {
fmt <- paste0("<", class(x), ">[,j] index out of bounds: %s")
j <- .SummarizedExperiment.charbound(j, colnames(x), fmt)
}
ans_colData <- x@colData[j, , drop=FALSE]
jj <- as.vector(j)
}
if (missing(i)) {
ans_assays <- x@assays[ , jj]
ans <- BiocGenerics:::replaceSlots(x, ...,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
} else if (missing(j)) {
ans_assays <- x@assays[ii, ]
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
assays=ans_assays,
check=FALSE)
}
} else {
ans_assays <- x@assays[ii, jj]
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
}
}
ans
})
setReplaceMethod("[",
c("SummarizedExperiment", "ANY", "ANY", "SummarizedExperiment"),
function(x, i, j, ..., value)
{
if (missing(i) && missing(j))
return(value)
ans_metadata <- c(metadata(x), metadata(value))
if (!missing(i)) {
if (is.character(i)) {
fmt <- paste0("<", class(x), ">[i,] index out of bounds: %s")
i <- .SummarizedExperiment.charbound(i, rownames(x), fmt)
}
ii <- as.vector(i)
ans_elementMetadata <- local({
emd <- x@elementMetadata
emd[i,] <- value@elementMetadata
emd
})
if (is(x, "RangedSummarizedExperiment")) {
ans_rowRanges <- local({
r <- x@rowRanges
r[i] <- value@rowRanges
names(r)[ii] <- names(value@rowRanges)
r
})
} else {
ans_NAMES <- local({
nms <- x@NAMES
nms[ii] <- value@NAMES
nms
})
}
}
if (!missing(j)) {
if (is.character(j)) {
fmt <- paste0("<", class(x), ">[,j] index out of bounds: %s")
j <- .SummarizedExperiment.charbound(j, colnames(x), fmt)
}
jj <- as.vector(j)
ans_colData <- local({
c <- x@colData
c[j,] <- value@colData
rownames(c)[jj] <- rownames(value@colData)
c
})
}
if (missing(i)) {
ans_assays <- local({
a <- x@assays
a[ , jj] <- value@assays
a
})
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
msg <- .valid.SummarizedExperiment.assays_ncol(ans)
} else if (missing(j)) {
ans_assays <- local({
a <- x@assays
a[ii, ] <- value@assays
a
})
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
assays=ans_assays,
check=FALSE)
}
msg <- .valid.SummarizedExperiment.assays_nrow(ans)
} else {
ans_assays <- local({
a <- x@assays
a[ii, jj] <- value@assays
a
})
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
}
msg <- .valid.SummarizedExperiment.assays_dim(ans)
}
if (!is.null(msg))
stop(msg)
ans
})
setMethod("extractROWS", "SummarizedExperiment",
function(x, i)
{
i <- normalizeSingleBracketSubscript(i, x)
x[i, ]
}
)
setMethod("replaceROWS", "SummarizedExperiment",
function(x, i, value)
{
i <- normalizeSingleBracketSubscript(i, x)
x[i, ] <- value
x
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Quick colData access.
###
setMethod("[[", c("SummarizedExperiment", "ANY", "missing"),
function(x, i, j, ...)
{
colData(x)[[i, ...]]
})
setReplaceMethod("[[", c("SummarizedExperiment", "ANY", "missing"),
function(x, i, j, ..., value)
{
colData(x)[[i, ...]] <- value
x
})
.DollarNames.SummarizedExperiment <- function(x, pattern)
grep(pattern, names(colData(x)), value=TRUE)
setMethod("$", "SummarizedExperiment",
function(x, name)
{
colData(x)[[name]]
})
setReplaceMethod("$", "SummarizedExperiment",
function(x, name, value)
{
colData(x)[[name]] <- value
x
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Display.
###
setMethod("show", "SummarizedExperiment",
function(object)
{
selectSome <- S4Vectors:::selectSome
scat <- function(fmt, vals=character(), exdent=2, ...)
{
vals <- ifelse(nzchar(vals), vals, "''")
lbls <- paste(S4Vectors:::selectSome(vals), collapse=" ")
txt <- sprintf(fmt, length(vals), lbls)
cat(strwrap(txt, exdent=exdent, ...), sep="\n")
}
cat("class:", class(object), "\n")
cat("dim:", dim(object), "\n")
## metadata()
expt <- names(metadata(object))
if (is.null(expt))
expt <- character(length(metadata(object)))
scat("metadata(%d): %s\n", expt)
## assays()
nms <- assayNames(object)
if (is.null(nms))
nms <- character(length(assays(object, withDimnames=FALSE)))
scat("assays(%d): %s\n", nms)
## rownames()
dimnames <- dimnames(object)
dlen <- sapply(dimnames, length)
if (dlen[[1]]) scat("rownames(%d): %s\n", dimnames[[1]])
else scat("rownames: NULL\n")
## rowData()
scat("rowData names(%d): %s\n", names(rowData(object)))
## colnames()
if (dlen[[2]]) scat("colnames(%d): %s\n", dimnames[[2]])
else cat("colnames: NULL\n")
## colData()
scat("colData names(%d): %s\n", names(colData(object)))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Combine.
###
### Appropriate for objects with different ranges and same samples.
setMethod("rbind", "SummarizedExperiment",
function(..., deparse.level=1)
{
args <- unname(list(...))
.rbind.SummarizedExperiment(args)
})
.rbind.SummarizedExperiment <- function(args)
{
if (!.compare(lapply(args, colnames)))
stop("'...' objects must have the same colnames")
if (!.compare(lapply(args, ncol)))
stop("'...' objects must have the same number of samples")
if (is(args[[1L]], "RangedSummarizedExperiment")) {
rowRanges <- do.call(c, lapply(args, rowRanges))
} else {
## Code below taken from combine_GAlignments_objects() from the
## GenomicAlignments package.
## Combine "NAMES" slots.
NAMES_slots <- lapply(args, function(x) x@NAMES)
## TODO: Use elementIsNull() here when it becomes available.
has_no_names <- sapply(NAMES_slots, is.null, USE.NAMES=FALSE)
if (all(has_no_names)) {
NAMES <- NULL
} else {
noname_idx <- which(has_no_names)
if (length(noname_idx) != 0L)
NAMES_slots[noname_idx] <-
lapply(elementNROWS(args[noname_idx]), character)
NAMES <- unlist(NAMES_slots, use.names=FALSE)
}
}
colData <- .cbind.DataFrame(args, colData, "colData")
assays <- do.call(rbind, lapply(args, slot, "assays"))
elementMetadata <- do.call(rbind, lapply(args, slot, "elementMetadata"))
metadata <- do.call(c, lapply(args, metadata))
if (is(args[[1L]], "RangedSummarizedExperiment")) {
BiocGenerics:::replaceSlots(args[[1L]],
rowRanges=rowRanges, colData=colData, assays=assays,
elementMetadata=elementMetadata, metadata=metadata)
} else {
BiocGenerics:::replaceSlots(args[[1L]],
NAMES=NAMES, colData=colData, assays=assays,
elementMetadata=elementMetadata, metadata=metadata)
}
}
### Appropriate for objects with same ranges and different samples.
setMethod("cbind", "SummarizedExperiment",
function(..., deparse.level=1)
{
args <- unname(list(...))
.cbind.SummarizedExperiment(args)
})
.cbind.SummarizedExperiment <- function(args)
{
if (is(args[[1L]], "RangedSummarizedExperiment")) {
if (!.compare(lapply(args, rowRanges), TRUE))
stop("'...' object ranges (rows) are not compatible")
rowRanges <- rowRanges(args[[1L]])
mcols(rowRanges) <- .cbind.DataFrame(args, mcols, "mcols")
} else {
elementMetadata <- .cbind.DataFrame(args, mcols, "mcols")
}
colData <- do.call(rbind, lapply(args, colData))
assays <- do.call(cbind, lapply(args, slot, "assays"))
metadata <- do.call(c, lapply(args, metadata))
if (is(args[[1L]], "RangedSummarizedExperiment")) {
BiocGenerics:::replaceSlots(args[[1L]],
rowRanges=rowRanges,
colData=colData, assays=assays, metadata=metadata)
} else {
BiocGenerics:::replaceSlots(args[[1L]],
elementMetadata=elementMetadata,
colData=colData, assays=assays, metadata=metadata)
}
}
.compare <- function(x, GenomicRanges=FALSE)
{
x1 <- x[[1]]
if (GenomicRanges) {
if (is(x1, "GRangesList")) {
x <- lapply(x, unlist)
x1 <- x[[1]]
}
for (i in seq_along(x)[-1]) {
if (!identicalVals(x1, x[[i]]))
return(FALSE)
}
return(TRUE)
} else {
all(sapply(x[-1],
function(xelt) all(identical(xelt, x[[1]]))))
}
}
.cbind.DataFrame <- function(args, accessor, accessorName)
{
lst <- lapply(args, accessor)
if (!.compare(lst)) {
nms <- lapply(lst, names)
nmsv <- unlist(nms, use.names=FALSE)
names(nmsv) <- rep(seq_along(nms), elementNROWS(nms))
dups <- duplicated(nmsv)
## no duplicates
if (!any(dups))
return(do.call(cbind, lst))
## confirm duplicates are the same
lapply(nmsv[duplicated(nmsv)], function(d) {
if (!.compare(lapply(lst, "[", d)))
stop("column(s) '", unname(d),
"' in ", sQuote(accessorName),
" are duplicated and the data do not match")})
## remove duplicates
do.call(cbind, lst)[,!dups]
} else {
lst[[1]]
}
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### identicalVals()
###
### Internal generic and methods (i.e. not exported).
### Provides a fast implementation of 'length(x) == length(y) && all(x == y)'
### for various kinds of vector-like objects.
### TODO: Move this to S4Vectors (for the generic and methods for factor and
### Rle objects) and IRanges (for the method for Ranges objects).
###
setGeneric("identicalVals", function(x, y) standardGeneric("identicalVals"))
### Semantically equivalent to identical(as.character(x), as.character(y))
### but avoids turning the 2 factor objects into character vectors so is more
### efficient.
setMethod("identicalVals", c("factor", "factor"),
function(x, y)
{
m <- match(levels(y), levels(x), nomatch=0L)
identical(as.integer(x), m[y])
}
)
### Only support factor-Rle objects at the moment!
### Semantically equivalent to identical(as.character(x), as.character(y))
### but avoids turning the 2 factor-Rle objects into character vectors so is
### more efficient.
setMethod("identicalVals", c("Rle", "Rle"),
function(x, y) identical(runLength(x), runLength(y)) &&
identicalVals(runValue(x), runValue(y))
)
setMethod("identicalVals", c("Ranges", "Ranges"),
function(x, y) identical(start(x), start(y)) &&
identical(width(x), width(y))
)
### Like 'x == y' this method ignores circularity of the underlying sequences
### e.g. ranges [1, 10] and [101, 110] represent the same position on a
### circular sequence of length 100 so should be considered equal. However
### for 'x == y' and the method below, they are not.
### TODO: Take circularity of the underlying sequences into account.
setMethod("identicalVals", c("GenomicRanges", "GenomicRanges"),
function(x, y)
{
## Trying to merge 'seqinfo(x)' and 'seqinfo(y)' will raise an error
## if 'x' and 'y' are not based on the same reference genome. This is
## the standard way to check that 'x' and 'y' are based on the same
## reference genome.
merge(seqinfo(x), seqinfo(y)) # we ignore the returned value
identicalVals(seqnames(x), seqnames(y)) &&
identicalVals(ranges(x), ranges(y)) &&
identicalVals(strand(x), strand(y))
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### On-disk realization.
###
setMethod("realize", "SummarizedExperiment",
function(x)
{
for (i in seq_along(assays(x))) {
## We drop the dimnames of the individual assays for 2 reasons:
## 1) These dimnames are kind of irrelevant. The dimnames that
## really matter are 'dimnames(x)' and they are stored
## somewhere else in 'x'. So we don't loose them by not
## realizing the assay dimnames on disk. As a little extra
## bonus, this actually saves a little bit of time and disk
## space.
## 2) Using the HDF5Array backend to realize an array-like object
## on disk doesn't store the dimnames in the HDF5 file at the
## moment.
a <- assay(x, i, withDimnames=FALSE)
dimnames(a) <- NULL
assay(x, i) <- realize(a)
}
x
}
)
| /R/SummarizedExperiment-class.R | no_license | AlfonsoRReyes/SummarizedExperiment | R | false | false | 26,440 | r | ### =========================================================================
### SummarizedExperiment objects
### -------------------------------------------------------------------------
###
setClass("SummarizedExperiment",
contains="Vector",
representation(
colData="DataFrame", # columns and their annotations
assays="Assays", # Data -- e.g., list of matricies
NAMES="character_OR_NULL",
elementMetadata="DataFrame"
),
prototype(
assays=Assays()
)
)
### Combine the new parallel slots with those of the parent class. Make sure
### to put the new parallel slots *first*.
setMethod("parallelSlotNames", "SummarizedExperiment",
function(x) c("NAMES", callNextMethod())
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Validity.
###
.valid.SummarizedExperiment.assays_nrow <- function(x)
{
if (length(x@assays) == 0L)
return(NULL)
assays_nrow <- nrow(x@assays)
rowData_nrow <- length(x)
if (assays_nrow != rowData_nrow) {
txt <- sprintf(
"\n nb of rows in 'assay' (%d) must equal nb of rows in 'rowData' (%d)",
assays_nrow, rowData_nrow)
return(txt)
}
NULL
}
.valid.SummarizedExperiment.assays_ncol <- function(x)
{
if (length(x@assays) == 0L)
return(NULL)
assays_ncol <- ncol(x@assays)
colData_nrow <- nrow(colData(x))
if (assays_ncol != colData_nrow) {
txt <- sprintf(
"\n nb of cols in 'assay' (%d) must equal nb of rows in 'colData' (%d)",
assays_ncol, colData_nrow)
return(txt)
}
NULL
}
.valid.SummarizedExperiment.assays_dim <- function(x)
{
c(.valid.SummarizedExperiment.assays_nrow(x),
.valid.SummarizedExperiment.assays_ncol(x))
}
.valid.SummarizedExperiment <- function(x)
{
.valid.SummarizedExperiment.assays_dim(x)
}
setValidity2("SummarizedExperiment", .valid.SummarizedExperiment)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Low-level constructor (not exported).
###
new_SummarizedExperiment <- function(assays, names, rowData, colData,
metadata)
{
if (!is(assays, "Assays"))
assays <- Assays(assays)
if (is.null(rowData)) {
if (is.null(names))
nrow <- nrow(assays)
else
nrow <- length(names)
rowData <- S4Vectors:::make_zero_col_DataFrame(nrow)
} else {
rownames(rowData) <- NULL
}
new("SummarizedExperiment", NAMES=names,
elementMetadata=rowData,
colData=colData,
assays=assays,
metadata=as.list(metadata))
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Getters and setters.
###
setMethod("length", "SummarizedExperiment",
function(x) nrow(x@elementMetadata)
)
setMethod("names", "SummarizedExperiment", function(x) x@NAMES)
setReplaceMethod("names", "SummarizedExperiment",
function(x, value)
{
NAMES <- S4Vectors:::normalize_names_replacement_value(value, x)
BiocGenerics:::replaceSlots(x, NAMES=NAMES, check=FALSE)
}
)
## rowData, colData seem too vague, but from eSet derived classes wanted to
## call the rows / cols something different from 'features' or 'samples', so
## might as well avoid the issue
setGeneric("rowData", function(x, ...) standardGeneric("rowData"))
setMethod("rowData", "SummarizedExperiment",
function(x, ...) mcols(x, ...)
)
setGeneric("rowData<-",
function(x, ..., value) standardGeneric("rowData<-"))
setReplaceMethod("rowData", "SummarizedExperiment",
function(x, ..., value) `mcols<-`(x, ..., value=value)
)
setGeneric("colData", function(x, ...) standardGeneric("colData"))
setMethod("colData", "SummarizedExperiment", function(x, ...) x@colData)
setGeneric("colData<-",
function(x, ..., value) standardGeneric("colData<-"))
setReplaceMethod("colData", c("SummarizedExperiment", "DataFrame"),
function(x, ..., value)
{
if (nrow(value) != ncol(x))
stop("nrow of supplied 'colData' must equal ncol of object")
BiocGenerics:::replaceSlots(x, colData=value, check=FALSE)
})
setGeneric("assays",
function(x, ..., withDimnames=TRUE) standardGeneric("assays"),
signature="x")
setMethod("assays", "SummarizedExperiment",
function(x, ..., withDimnames=TRUE)
{
assays <- as(x@assays, "SimpleList")
if (withDimnames) {
assays <- endoapply(assays,
function(assay) {
dimnames(assay)[1:2] <- dimnames(x)
assay
}
)
}
assays
})
setGeneric("assays<-",
function(x, ..., withDimnames=TRUE, value) standardGeneric("assays<-"),
signature=c("x", "value"))
.SummarizedExperiment.assays.replace <-
function(x, ..., withDimnames=TRUE, value)
{
## withDimnames arg allows names(assays(se, withDimnames=FALSE)) <- value
ok <- vapply(value, function(elt, xdimnames) {
e <- dimnames(elt)
(is.null(e[[1]]) || identical(e[[1]], xdimnames[[1]])) &&
(is.null(e[[2]]) || identical(e[[2]], xdimnames[[2]]))
}, logical(1), xdimnames=dimnames(x))
if (!all(ok))
stop("current and replacement dimnames() differ")
x <- BiocGenerics:::replaceSlots(x, assays=Assays(value), check=FALSE)
## validObject(x) should be called below because it would then fully
## re-validate objects that derive from SummarizedExperiment (e.g.
## DESeqDataSet objects) after the user sets the assays slot with
## assays(x) <- value. For example the assays slot of a DESeqDataSet
## object must contain a matrix named 'counts' and calling validObject(x)
## would check that but .valid.SummarizedExperiment(x) doesn't.
## The FourC() constructor function defined in the FourCSeq package
## actually takes advantage of the incomplete validation below to
## purposedly return invalid FourC objects!
msg <- .valid.SummarizedExperiment(x)
if (!is.null(msg))
stop(msg)
x
}
setReplaceMethod("assays", c("SummarizedExperiment", "SimpleList"),
.SummarizedExperiment.assays.replace)
setReplaceMethod("assays", c("SummarizedExperiment", "list"),
.SummarizedExperiment.assays.replace)
setGeneric("assay", function(x, i, ...) standardGeneric("assay"))
## convenience for common use case
setMethod("assay", c("SummarizedExperiment", "missing"),
function(x, i, ...)
{
assays <- assays(x, ...)
if (0L == length(assays))
stop("'assay(<", class(x), ">, i=\"missing\", ...) ",
"length(assays(<", class(x), ">)) is 0'")
assays[[1]]
})
setMethod("assay", c("SummarizedExperiment", "numeric"),
function(x, i, ...)
{
tryCatch({
assays(x, ...)[[i]]
}, error=function(err) {
stop("'assay(<", class(x), ">, i=\"numeric\", ...)' ",
"invalid subscript 'i'\n", conditionMessage(err))
})
})
setMethod("assay", c("SummarizedExperiment", "character"),
function(x, i, ...)
{
msg <- paste0("'assay(<", class(x), ">, i=\"character\", ...)' ",
"invalid subscript 'i'")
res <- tryCatch({
assays(x, ...)[[i]]
}, error=function(err) {
stop(msg, "\n", conditionMessage(err))
})
if (is.null(res))
stop(msg, "\n'i' not in names(assays(<", class(x), ">))")
res
})
setGeneric("assay<-", signature=c("x", "i"),
function(x, i, ..., value) standardGeneric("assay<-"))
setReplaceMethod("assay", c("SummarizedExperiment", "missing"),
function(x, i, ..., value)
{
if (0L == length(assays(x)))
stop("'assay(<", class(x), ">) <- value' ", "length(assays(<",
class(x), ">)) is 0")
assays(x)[[1]] <- value
x
})
setReplaceMethod("assay", c("SummarizedExperiment", "numeric"),
function(x, i = 1, ..., value)
{
assays(x, ...)[[i]] <- value
x
})
setReplaceMethod("assay", c("SummarizedExperiment", "character"),
function(x, i, ..., value)
{
assays(x, ...)[[i]] <- value
x
})
setGeneric("assayNames", function(x, ...) standardGeneric("assayNames"))
setMethod("assayNames", "SummarizedExperiment",
function(x, ...)
{
names(assays(x, withDimnames=FALSE))
})
setGeneric("assayNames<-",
function(x, ..., value) standardGeneric("assayNames<-"))
setReplaceMethod("assayNames", c("SummarizedExperiment", "character"),
function(x, ..., value)
{
names(assays(x, withDimnames=FALSE)) <- value
x
})
## cannonical location for dim, dimnames; dimnames should be checked
## for consistency (if non-null) and stripped from assays on
## construction, or added from assays if row/col names are NULL in
## <SummarizedExperiment> but not assays. dimnames need to be added on
## to assays when assays() invoked
setMethod("dim", "SummarizedExperiment",
function(x)
{
c(length(x), nrow(colData(x)))
})
setMethod("dimnames", "SummarizedExperiment",
function(x)
{
list(names(x), rownames(colData(x)))
})
setReplaceMethod("dimnames", c("SummarizedExperiment", "list"),
function(x, value)
{
NAMES <- S4Vectors:::normalize_names_replacement_value(value[[1]], x)
colData <- colData(x)
rownames(colData) <- value[[2]]
BiocGenerics:::replaceSlots(x, NAMES=NAMES, colData=colData, check=FALSE)
})
setReplaceMethod("dimnames", c("SummarizedExperiment", "NULL"),
function(x, value)
{
dimnames(x) <- list(NULL, NULL)
x
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Subsetting.
###
.SummarizedExperiment.charbound <-
function(idx, txt, fmt)
{
orig <- idx
idx <- match(idx, txt)
if (any(bad <- is.na(idx))) {
msg <- paste(S4Vectors:::selectSome(orig[bad]), collapse=" ")
stop(sprintf(fmt, msg))
}
idx
}
setMethod("[", c("SummarizedExperiment", "ANY", "ANY"),
function(x, i, j, ..., drop=TRUE)
{
if (1L != length(drop) || (!missing(drop) && drop))
warning("'drop' ignored '[,", class(x), ",ANY,ANY-method'")
if (missing(i) && missing(j))
return(x)
if (!missing(i)) {
if (is.character(i)) {
fmt <- paste0("<", class(x), ">[i,] index out of bounds: %s")
i <- .SummarizedExperiment.charbound(i, rownames(x), fmt)
}
ii <- as.vector(i)
ans_elementMetadata <- x@elementMetadata[i, , drop=FALSE]
if (is(x, "RangedSummarizedExperiment")) {
ans_rowRanges <- x@rowRanges[i]
} else {
ans_NAMES <- x@NAMES[ii]
}
}
if (!missing(j)) {
if (is.character(j)) {
fmt <- paste0("<", class(x), ">[,j] index out of bounds: %s")
j <- .SummarizedExperiment.charbound(j, colnames(x), fmt)
}
ans_colData <- x@colData[j, , drop=FALSE]
jj <- as.vector(j)
}
if (missing(i)) {
ans_assays <- x@assays[ , jj]
ans <- BiocGenerics:::replaceSlots(x, ...,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
} else if (missing(j)) {
ans_assays <- x@assays[ii, ]
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
assays=ans_assays,
check=FALSE)
}
} else {
ans_assays <- x@assays[ii, jj]
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
}
}
ans
})
setReplaceMethod("[",
c("SummarizedExperiment", "ANY", "ANY", "SummarizedExperiment"),
function(x, i, j, ..., value)
{
if (missing(i) && missing(j))
return(value)
ans_metadata <- c(metadata(x), metadata(value))
if (!missing(i)) {
if (is.character(i)) {
fmt <- paste0("<", class(x), ">[i,] index out of bounds: %s")
i <- .SummarizedExperiment.charbound(i, rownames(x), fmt)
}
ii <- as.vector(i)
ans_elementMetadata <- local({
emd <- x@elementMetadata
emd[i,] <- value@elementMetadata
emd
})
if (is(x, "RangedSummarizedExperiment")) {
ans_rowRanges <- local({
r <- x@rowRanges
r[i] <- value@rowRanges
names(r)[ii] <- names(value@rowRanges)
r
})
} else {
ans_NAMES <- local({
nms <- x@NAMES
nms[ii] <- value@NAMES
nms
})
}
}
if (!missing(j)) {
if (is.character(j)) {
fmt <- paste0("<", class(x), ">[,j] index out of bounds: %s")
j <- .SummarizedExperiment.charbound(j, colnames(x), fmt)
}
jj <- as.vector(j)
ans_colData <- local({
c <- x@colData
c[j,] <- value@colData
rownames(c)[jj] <- rownames(value@colData)
c
})
}
if (missing(i)) {
ans_assays <- local({
a <- x@assays
a[ , jj] <- value@assays
a
})
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
msg <- .valid.SummarizedExperiment.assays_ncol(ans)
} else if (missing(j)) {
ans_assays <- local({
a <- x@assays
a[ii, ] <- value@assays
a
})
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
assays=ans_assays,
check=FALSE)
}
msg <- .valid.SummarizedExperiment.assays_nrow(ans)
} else {
ans_assays <- local({
a <- x@assays
a[ii, jj] <- value@assays
a
})
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
}
msg <- .valid.SummarizedExperiment.assays_dim(ans)
}
if (!is.null(msg))
stop(msg)
ans
})
setMethod("extractROWS", "SummarizedExperiment",
function(x, i)
{
i <- normalizeSingleBracketSubscript(i, x)
x[i, ]
}
)
setMethod("replaceROWS", "SummarizedExperiment",
function(x, i, value)
{
i <- normalizeSingleBracketSubscript(i, x)
x[i, ] <- value
x
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Quick colData access.
###
setMethod("[[", c("SummarizedExperiment", "ANY", "missing"),
function(x, i, j, ...)
{
colData(x)[[i, ...]]
})
setReplaceMethod("[[", c("SummarizedExperiment", "ANY", "missing"),
function(x, i, j, ..., value)
{
colData(x)[[i, ...]] <- value
x
})
.DollarNames.SummarizedExperiment <- function(x, pattern)
grep(pattern, names(colData(x)), value=TRUE)
setMethod("$", "SummarizedExperiment",
function(x, name)
{
colData(x)[[name]]
})
setReplaceMethod("$", "SummarizedExperiment",
function(x, name, value)
{
colData(x)[[name]] <- value
x
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Display.
###
setMethod("show", "SummarizedExperiment",
function(object)
{
selectSome <- S4Vectors:::selectSome
scat <- function(fmt, vals=character(), exdent=2, ...)
{
vals <- ifelse(nzchar(vals), vals, "''")
lbls <- paste(S4Vectors:::selectSome(vals), collapse=" ")
txt <- sprintf(fmt, length(vals), lbls)
cat(strwrap(txt, exdent=exdent, ...), sep="\n")
}
cat("class:", class(object), "\n")
cat("dim:", dim(object), "\n")
## metadata()
expt <- names(metadata(object))
if (is.null(expt))
expt <- character(length(metadata(object)))
scat("metadata(%d): %s\n", expt)
## assays()
nms <- assayNames(object)
if (is.null(nms))
nms <- character(length(assays(object, withDimnames=FALSE)))
scat("assays(%d): %s\n", nms)
## rownames()
dimnames <- dimnames(object)
dlen <- sapply(dimnames, length)
if (dlen[[1]]) scat("rownames(%d): %s\n", dimnames[[1]])
else scat("rownames: NULL\n")
## rowData()
scat("rowData names(%d): %s\n", names(rowData(object)))
## colnames()
if (dlen[[2]]) scat("colnames(%d): %s\n", dimnames[[2]])
else cat("colnames: NULL\n")
## colData()
scat("colData names(%d): %s\n", names(colData(object)))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Combine.
###
### Appropriate for objects with different ranges and same samples.
setMethod("rbind", "SummarizedExperiment",
function(..., deparse.level=1)
{
args <- unname(list(...))
.rbind.SummarizedExperiment(args)
})
.rbind.SummarizedExperiment <- function(args)
{
if (!.compare(lapply(args, colnames)))
stop("'...' objects must have the same colnames")
if (!.compare(lapply(args, ncol)))
stop("'...' objects must have the same number of samples")
if (is(args[[1L]], "RangedSummarizedExperiment")) {
rowRanges <- do.call(c, lapply(args, rowRanges))
} else {
## Code below taken from combine_GAlignments_objects() from the
## GenomicAlignments package.
## Combine "NAMES" slots.
NAMES_slots <- lapply(args, function(x) x@NAMES)
## TODO: Use elementIsNull() here when it becomes available.
has_no_names <- sapply(NAMES_slots, is.null, USE.NAMES=FALSE)
if (all(has_no_names)) {
NAMES <- NULL
} else {
noname_idx <- which(has_no_names)
if (length(noname_idx) != 0L)
NAMES_slots[noname_idx] <-
lapply(elementNROWS(args[noname_idx]), character)
NAMES <- unlist(NAMES_slots, use.names=FALSE)
}
}
colData <- .cbind.DataFrame(args, colData, "colData")
assays <- do.call(rbind, lapply(args, slot, "assays"))
elementMetadata <- do.call(rbind, lapply(args, slot, "elementMetadata"))
metadata <- do.call(c, lapply(args, metadata))
if (is(args[[1L]], "RangedSummarizedExperiment")) {
BiocGenerics:::replaceSlots(args[[1L]],
rowRanges=rowRanges, colData=colData, assays=assays,
elementMetadata=elementMetadata, metadata=metadata)
} else {
BiocGenerics:::replaceSlots(args[[1L]],
NAMES=NAMES, colData=colData, assays=assays,
elementMetadata=elementMetadata, metadata=metadata)
}
}
### Appropriate for objects with same ranges and different samples.
setMethod("cbind", "SummarizedExperiment",
function(..., deparse.level=1)
{
args <- unname(list(...))
.cbind.SummarizedExperiment(args)
})
.cbind.SummarizedExperiment <- function(args)
{
if (is(args[[1L]], "RangedSummarizedExperiment")) {
if (!.compare(lapply(args, rowRanges), TRUE))
stop("'...' object ranges (rows) are not compatible")
rowRanges <- rowRanges(args[[1L]])
mcols(rowRanges) <- .cbind.DataFrame(args, mcols, "mcols")
} else {
elementMetadata <- .cbind.DataFrame(args, mcols, "mcols")
}
colData <- do.call(rbind, lapply(args, colData))
assays <- do.call(cbind, lapply(args, slot, "assays"))
metadata <- do.call(c, lapply(args, metadata))
if (is(args[[1L]], "RangedSummarizedExperiment")) {
BiocGenerics:::replaceSlots(args[[1L]],
rowRanges=rowRanges,
colData=colData, assays=assays, metadata=metadata)
} else {
BiocGenerics:::replaceSlots(args[[1L]],
elementMetadata=elementMetadata,
colData=colData, assays=assays, metadata=metadata)
}
}
.compare <- function(x, GenomicRanges=FALSE)
{
x1 <- x[[1]]
if (GenomicRanges) {
if (is(x1, "GRangesList")) {
x <- lapply(x, unlist)
x1 <- x[[1]]
}
for (i in seq_along(x)[-1]) {
if (!identicalVals(x1, x[[i]]))
return(FALSE)
}
return(TRUE)
} else {
all(sapply(x[-1],
function(xelt) all(identical(xelt, x[[1]]))))
}
}
.cbind.DataFrame <- function(args, accessor, accessorName)
{
lst <- lapply(args, accessor)
if (!.compare(lst)) {
nms <- lapply(lst, names)
nmsv <- unlist(nms, use.names=FALSE)
names(nmsv) <- rep(seq_along(nms), elementNROWS(nms))
dups <- duplicated(nmsv)
## no duplicates
if (!any(dups))
return(do.call(cbind, lst))
## confirm duplicates are the same
lapply(nmsv[duplicated(nmsv)], function(d) {
if (!.compare(lapply(lst, "[", d)))
stop("column(s) '", unname(d),
"' in ", sQuote(accessorName),
" are duplicated and the data do not match")})
## remove duplicates
do.call(cbind, lst)[,!dups]
} else {
lst[[1]]
}
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### identicalVals()
###
### Internal generic and methods (i.e. not exported).
### Provides a fast implementation of 'length(x) == length(y) && all(x == y)'
### for various kinds of vector-like objects.
### TODO: Move this to S4Vectors (for the generic and methods for factor and
### Rle objects) and IRanges (for the method for Ranges objects).
###
setGeneric("identicalVals", function(x, y) standardGeneric("identicalVals"))
### Semantically equivalent to identical(as.character(x), as.character(y))
### but avoids turning the 2 factor objects into character vectors so is more
### efficient.
setMethod("identicalVals", c("factor", "factor"),
function(x, y)
{
m <- match(levels(y), levels(x), nomatch=0L)
identical(as.integer(x), m[y])
}
)
### Only support factor-Rle objects at the moment!
### Semantically equivalent to identical(as.character(x), as.character(y))
### but avoids turning the 2 factor-Rle objects into character vectors so is
### more efficient.
setMethod("identicalVals", c("Rle", "Rle"),
function(x, y) identical(runLength(x), runLength(y)) &&
identicalVals(runValue(x), runValue(y))
)
setMethod("identicalVals", c("Ranges", "Ranges"),
function(x, y) identical(start(x), start(y)) &&
identical(width(x), width(y))
)
### Like 'x == y' this method ignores circularity of the underlying sequences
### e.g. ranges [1, 10] and [101, 110] represent the same position on a
### circular sequence of length 100 so should be considered equal. However
### for 'x == y' and the method below, they are not.
### TODO: Take circularity of the underlying sequences into account.
setMethod("identicalVals", c("GenomicRanges", "GenomicRanges"),
function(x, y)
{
## Trying to merge 'seqinfo(x)' and 'seqinfo(y)' will raise an error
## if 'x' and 'y' are not based on the same reference genome. This is
## the standard way to check that 'x' and 'y' are based on the same
## reference genome.
merge(seqinfo(x), seqinfo(y)) # we ignore the returned value
identicalVals(seqnames(x), seqnames(y)) &&
identicalVals(ranges(x), ranges(y)) &&
identicalVals(strand(x), strand(y))
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### On-disk realization.
###
setMethod("realize", "SummarizedExperiment",
function(x)
{
for (i in seq_along(assays(x))) {
## We drop the dimnames of the individual assays for 2 reasons:
## 1) These dimnames are kind of irrelevant. The dimnames that
## really matter are 'dimnames(x)' and they are stored
## somewhere else in 'x'. So we don't loose them by not
## realizing the assay dimnames on disk. As a little extra
## bonus, this actually saves a little bit of time and disk
## space.
## 2) Using the HDF5Array backend to realize an array-like object
## on disk doesn't store the dimnames in the HDF5 file at the
## moment.
a <- assay(x, i, withDimnames=FALSE)
dimnames(a) <- NULL
assay(x, i) <- realize(a)
}
x
}
)
|
setup <- read.csv("setup.csv", comment.char = "#", header = TRUE)
host <- as.character(setup[grep("omero.host", setup$Key, ignore.case=T), ]$Value)
port <- strtoi(setup[grep("omero.port", setup$Key, ignore.case=T), ]$Value)
user <- as.character(setup[grep("omero.user", setup$Key, ignore.case=T), ]$Value)
pass <- as.character(setup[grep("omero.pass", setup$Key, ignore.case=T), ]$Value)
plateID <- strtoi(setup[grep("plateid", setup$Key, ignore.case=T), ]$Value)
screenID <- strtoi(setup[grep("screenid", setup$Key, ignore.case=T), ]$Value)
plateSize <- strtoi(setup[grep("platesize", setup$Key, ignore.case=T), ]$Value)
wellSize <- strtoi(setup[grep("wellsize", setup$Key, ignore.case=T), ]$Value)
server <- OMEROServer(host=host, port=port, username=user, password=pass)
server <- connect(server)
test_that("Test Plate getWells",{
plate <- loadObject(server, "PlateData", plateID)
wells <- getWells(plate)
expect_that(length(wells), equals(plateSize))
clazz <- class(wells[[1]])[[1]]
expect_that(clazz, equals('Well'))
})
test_that("Test Plate getImages",{
plate <- loadObject(server, "PlateData", plateID)
imgs <- getImages(plate)
expect_that(dim(imgs), equals(c(plateSize, wellSize)))
clazz <- class(imgs[[1, 1]])[[1]]
expect_that(clazz, equals('Image'))
})
test_that("Test Plate getImages of field 1",{
plate <- loadObject(server, "PlateData", plateID)
imgs <- getImages(plate, fieldIndex = 1)
expect_that(length(imgs), equals(plateSize))
clazz <- class(imgs[[1]])[[1]]
expect_that(clazz, equals('Image'))
})
test_that("Test Screen getPlates",{
screen <- loadObject(server, "ScreenData", screenID)
plates <- getPlates(screen)
expect_that(length(plates), equals(1))
clazz <- class(plates[[1]])[[1]]
expect_that(clazz, equals('Plate'))
})
test_that("Test Screen getImages",{
screen <- loadObject(server, "ScreenData", screenID)
imgs <- getImages(screen)
expect_that(length(imgs), equals(plateSize))
clazz <- class(imgs[[1]])[[1]]
expect_that(clazz, equals('Image'))
})
server <- disconnect(server)
| /tests/testthat/test-SPW.R | no_license | jburel/rOMERO-gateway | R | false | false | 2,075 | r | setup <- read.csv("setup.csv", comment.char = "#", header = TRUE)
host <- as.character(setup[grep("omero.host", setup$Key, ignore.case=T), ]$Value)
port <- strtoi(setup[grep("omero.port", setup$Key, ignore.case=T), ]$Value)
user <- as.character(setup[grep("omero.user", setup$Key, ignore.case=T), ]$Value)
pass <- as.character(setup[grep("omero.pass", setup$Key, ignore.case=T), ]$Value)
plateID <- strtoi(setup[grep("plateid", setup$Key, ignore.case=T), ]$Value)
screenID <- strtoi(setup[grep("screenid", setup$Key, ignore.case=T), ]$Value)
plateSize <- strtoi(setup[grep("platesize", setup$Key, ignore.case=T), ]$Value)
wellSize <- strtoi(setup[grep("wellsize", setup$Key, ignore.case=T), ]$Value)
server <- OMEROServer(host=host, port=port, username=user, password=pass)
server <- connect(server)
test_that("Test Plate getWells",{
plate <- loadObject(server, "PlateData", plateID)
wells <- getWells(plate)
expect_that(length(wells), equals(plateSize))
clazz <- class(wells[[1]])[[1]]
expect_that(clazz, equals('Well'))
})
test_that("Test Plate getImages",{
plate <- loadObject(server, "PlateData", plateID)
imgs <- getImages(plate)
expect_that(dim(imgs), equals(c(plateSize, wellSize)))
clazz <- class(imgs[[1, 1]])[[1]]
expect_that(clazz, equals('Image'))
})
test_that("Test Plate getImages of field 1",{
plate <- loadObject(server, "PlateData", plateID)
imgs <- getImages(plate, fieldIndex = 1)
expect_that(length(imgs), equals(plateSize))
clazz <- class(imgs[[1]])[[1]]
expect_that(clazz, equals('Image'))
})
test_that("Test Screen getPlates",{
screen <- loadObject(server, "ScreenData", screenID)
plates <- getPlates(screen)
expect_that(length(plates), equals(1))
clazz <- class(plates[[1]])[[1]]
expect_that(clazz, equals('Plate'))
})
test_that("Test Screen getImages",{
screen <- loadObject(server, "ScreenData", screenID)
imgs <- getImages(screen)
expect_that(length(imgs), equals(plateSize))
clazz <- class(imgs[[1]])[[1]]
expect_that(clazz, equals('Image'))
})
server <- disconnect(server)
|
pkgTest <- function(x){
if (!require(x,character.only = TRUE)){
install.packages(x,dep=TRUE)
}
}
pkgTest("shiny")
library(shiny)
pkgTest("DT")
library(DT)
pkgTest("deSolve")
library(deSolve)
pkgTest("TTR")
library(TTR)
pkgTest("forecast")
library(forecast)
pkgTest("xlsx")
library(xlsx)
pkgTest("plotly")
library(plotly)
pkgTest("ggthemes")
library(ggthemes)
pkgTest("ggplot2")
library(ggplot2)
pkgTest("cowplot")
library(cowplot)
pkgTest("shinyBS")
library(shinyBS)
pkgTest("shinyjs")
library(shinyjs)
pkgTest("dplyr")
library(dplyr)
pkgTest("magrittr")
library(magrittr)
#-----Leer datos---------------------------------------------------------------------------
nacimientos = as.matrix(read.xlsx("ParametrosGenerales.xlsx",2, dec =",", as.data.frame = TRUE))
parametros = as.matrix(read.xlsx("ParametrosGenerales.xlsx",1, dec =",", as.data.frame = TRUE))
transicionesM = as.matrix(read.xlsx2("ParametrosGenerales.xlsx",3, dec =",", as.data.frame = TRUE))
transicionesH = as.matrix(read.xlsx2("ParametrosGenerales.xlsx",4, dec =",", as.data.frame = TRUE))
otros = as.matrix(read.table("generales.txt", header = FALSE, dec=","))
bioloGeneral = as.matrix(read.xlsx2("ParametrosGenerales.xlsx",5,dec =",", as.data.frame = TRUE))
parejasSexuales = as.matrix(read.xlsx2("ParametrosGenerales.xlsx",6,dec =",", as.data.frame = TRUE))
sensibilidad = as.matrix(read.xlsx2("ParametrosGenerales.xlsx",7,dec =",", as.data.frame = TRUE))
cobertura = as.matrix(read.xlsx2("ParametrosGenerales.xlsx",8,dec =",", as.data.frame = TRUE))
#----Generar los nuevos datos-------------------------------------------------------------
#Rango de la simulación
simu <- 50
#Géneros
gen = 2
#Leyendo los dataFrames
transicionesM <- as.matrix.data.frame(transicionesM[,-ncol(transicionesM)])
transicionesH <- as.matrix.data.frame(transicionesH[,-ncol(transicionesH)])
parametros <- as.matrix.data.frame(parametros[,-ncol(parametros)])
nacimientos <- as.matrix.data.frame(nacimientos[,-ncol(nacimientos)])
#Cargar datos del archivo otros
compart <- as.numeric(otros[1,1])
nacDesde = as.numeric(otros[4,1])
nacHasta = as.numeric(otros[5,1])
muerteBiolo = cbind(as.numeric(bioloGeneral[,2]),as.numeric(bioloGeneral[,3]))
edadesFert = nacHasta-nacDesde
te <- as.numeric(otros[2,1])
probMujer <- as.numeric(otros[3,1])
edades <- (ncol(parametros))/compart
#Cargar datos de las matrices de transiciónd de probabilidad
transM <- as.numeric(otros[6,1])
transH <- as.numeric(otros[7,1])
#vector que carga toda la información de los estad�os biológicos
vectorT <- cbind(seq(1,transM,by=1),bioloGeneral[,4],bioloGeneral[,5],bioloGeneral[,6])
#Vector que hace los suscpetibles
vectorS <- subset(vectorT,vectorT[,2]=="S")
vectorPI <- subset(vectorT,vectorT[,2]=="PI")
vectorSPI <- rbind(vectorS,vectorPI)
#Vector que carga la info de los estados biologicos
vectorT <- cbind(seq(1,transM,by=1),bioloGeneral[,4])
vectorI <- subset(vectorT,vectorT[,2]=="I")
vectorLP <- subset(vectorT, vectorT[,2]=="LP")
vectorC <- subset(vectorT, vectorT[,2]=="C")
vectorILPC <- rbind(vectorI,vectorLP,vectorC)
vectorIncidence <- rbind(vectorLP,vectorC)
#Nombres Progress
vectorNT <- cbind(bioloGeneral[,1],bioloGeneral[,4])
vectorNombresP <- subset(vectorNT[,1], vectorNT[,2] == "LP" | vectorNT[,2] == "C")
#Cargar Datos de parejas sexuales
parejasSexualesM <- cbind(parejasSexuales[,1],parejasSexuales[,3])
parejasSexualesH <- cbind(parejasSexuales[,1],parejasSexuales[,2])
#Cargar datos para el screening
coberturaTamizaje <- cbind(as.numeric(cobertura[,1]),as.numeric(cobertura[,2]),as.numeric(cobertura[,3]),as.numeric(cobertura[,4]))
sensibilidadPrubeas <- cbind(sensibilidad[,1],as.numeric(sensibilidad[,2]),as.numeric(sensibilidad[,3]))
#Valor para costos
costoVacuna = as.numeric(otros[8,1])
L=c(1:simu)
n.init <- 0.01
Kd.init <- 0.01
generarTasasMuerte <- function(){
#Generar vectores de muertes y fertilidad
tasaMuerteTotales<- matrix(seq(1),nrow = simu,ncol=edades*compart)
for(i in 1:(ncol(parametros))){
Human <- as.double(parametros[-(1),i])
# fitting suavizamiento exponencial
tasaMuerteiFuturo <- forecast(HoltWinters(Human, beta=FALSE, gamma=FALSE),h=9)
for (j in 1:simu){
tasaMuerteTotales[j,i] = tasaMuerteiFuturo$mean[1]
}
}
return(tasaMuerteTotales)
}
tasaMuerteTotales<-generarTasasMuerte()
#Trayendo el total de población de cada Compartimiento
Humano<-rep(0,times=ncol(parametros))
vacunadosHombres<- rep(0,times=edades)
vacunadosMujeres<- rep(0,times=edades)
progresionesDetectadas <- rep(0, times = edades*nrow(vectorLP))
costoVacunacion = 0
costoPrimaryIni = 0
costoTriageIni = 0
costoFollowUpIni = 0
efectividadIni = 0
incidenciaIni = rep(0,times = nrow(vectorIncidence))
for(i in 1:(ncol(parametros))){
Humano[i]=as.numeric(parametros[1,i])
}
#---Generando tasas de nacimiento ------------
generarTasasNacimiento <- function(){
tasaNacimientosTotales<- matrix(seq(1),nrow = simu,ncol=ncol(nacimientos))
# fitting Suavizamiento exponencial
for(i in 1:(ncol(nacimientos))){
nac <- as.numeric(nacimientos[,i])
tasaNacimientoiFuturo <- forecast(HoltWinters(nac, beta=FALSE, gamma=FALSE),h=9)
for (j in 1:simu){
tasaNacimientosTotales[j,i] = tasaNacimientoiFuturo$mean[1]
}
}
return(tasaNacimientosTotales)
}
tasaNacimientosTotales<-generarTasasNacimiento()
#----Resolver las ecuaciones diferenciales------------------------------------------------
data <- function(vacRangeFemaleIni,vacRangeFemaleEnd,vacRangeMaleIni,vacRangeMaleEnd, vacYearsFemaleIni,vacYearsFemaleEnd, vacYearsMaleIni,vacYearsMaleEnd, vacSex, vacPorcentageFemale, vacPorcentageMale, primaryTest, iniPrimaryTest, maxPrimaryTest, stepPrimaryTest, triageTest, iniTriage, maxTriage, stepTriage,followUp, timeFollowUp){
a = 50;
nacimientosTotales = 0;
#Porcentaje de vacunación
vacMujeres = matrix(0, ncol = edades , nrow = 50)
vacHombres = matrix(0, ncol = edades , nrow = 50)
#Edades de vacunación
edadesVacFemale <- seq(vacRangeFemaleIni,vacRangeFemaleEnd,by=1)
edadesVacMale <- seq(vacRangeMaleIni,vacRangeMaleEnd,by=1)
#Años de Vacunación
yearsVacFemale <- seq(vacYearsFemaleIni,vacYearsFemaleEnd,by=1)
yearsVacMale <- seq(vacYearsMaleIni,vacYearsMaleEnd,by=1)
if(length(vacSex) == 2){
for(i in 1:50){
for(j in 1:edades){
if(j %in% edadesVacFemale && i %in% yearsVacFemale){
vacMujeres[i,j] = vacPorcentageFemale
}
if(j %in% edadesVacMale && i %in% yearsVacMale){
vacHombres[i,j] = vacPorcentageMale
}
}
}
}else if(length(vacSex) == 1 && vacSex == "Mujeres"){
for(i in 1:50){
for(j in 1:edades){
if(j %in% edadesVacFemale && i %in% yearsVacFemale){
vacMujeres[i,j] = vacPorcentageFemale
}
}
}
}else if(length(vacSex) == 1 && vacSex == "Hombres"){
for(i in 1:50){
for(j in 1:edades){
if(j %in% edadesVacMale && i %in% yearsVacMale){
vacHombres[i,j] = vacPorcentageMale
}
}
}
}
#----------Vectores para screening-------------------
xij <- rep(0,times=edades)
triageij <- rep(0,times=edades)
folowUpHay <- 0
seguimiento <- followUp
sensibilidadPrimary = 0
sensibilidadTriage = 0
sensibilidadFollowUp = 0
costoPrimary = 0
costoTriage = 0
costoFollowUp = 0
contadorPrimi2 = 0
contadorPrimi = 1
if(primaryTest != "None"){
for(i in iniPrimaryTest:maxPrimaryTest){
contadorPrimi2 = contadorPrimi2+1
xij[i] = contadorPrimi
contadorPrimi = 0
if(contadorPrimi2%%stepPrimaryTest == 0){
contadorPrimi = 1
}
}
contadorPrimi2 = 0
contadorPrimi = 1
if(triageTest != "None"){
for(i in iniTriage:maxTriage){
contadorPrimi2 = contadorPrimi2+1
triageij[i] = contadorPrimi
contadorPrimi = 0
if(contadorPrimi2%%stepTriage == 0){
contadorPrimi = 1
}
}
}
}
for(i in 1:(length(sensibilidadPrubeas)/3)){
if(primaryTest != "None" && primaryTest == sensibilidadPrubeas[i,1]){
sensibilidadPrimary = as.numeric(sensibilidadPrubeas[i,2])
costoPrimary = as.numeric(sensibilidadPrubeas[i,3])
}
if(triageTest != "None" && triageTest == sensibilidadPrubeas[i,1]){
sensibilidadTriage = as.numeric(sensibilidadPrubeas[i,2])
costoTriage = as.numeric(sensibilidadPrubeas[i,3])
}
if(followUp != "None" && followUp == sensibilidadPrubeas[i,1]){
sensibilidadFollowUp = as.numeric(sensibilidadPrubeas[i,2])
costoFollowUp = as.numeric(sensibilidadPrubeas[i,3])
folowUpHay = 0
}
}
#---Generando vector de diferenciales--------
dH<-rep(0,times=ncol(parametros))
dvH<-rep(0,times=edades)
dvM<-rep(0,times=edades)
dvPD<-rep(0,times=edades*nrow(vectorLP))
dCostoVacunacion = 0
dCostoPrimary = 0
dCostoTriage = 0
dCostoFollowUp = 0
dEfectividad = 0
dIncidencia= rep(0, times = nrow(vectorIncidence))
vectorNoProgressNoSusc <- seq(1,transM, by = 1)[-c(as.numeric(vectorLP[,1]))]
vectorNoProgressNoSusc <- vectorNoProgressNoSusc[-c(as.numeric(vectorS[,1]))]
vectorProgress <-seq(1,transM, by = 1)[c(as.numeric(vectorLP[,1]))]
vectorSusc <- seq(1,transM, by = 1)[c(as.numeric(vectorS[,1]))]
#-- Escribiendo ecuaciones diferenciales-------------
poblacion <- function(t, y, parameters) {
with(as.list(y), {
#Matrices de probabilidad
auxiliarCostoVacuna = 0
auxiliarCostoPrimary = 0
auxiliarCostoTriage = 0
auxiliarCostoFollowUp = 0
auxiliarEfectividad = 0
auxiliarIncidence = rep(0, times = nrow(vectorIncidence))
mujeresTotales <- sum(y[((edades*compart/2)+1):(edades*2*transM)])+ sum(y[(transM*2*edades+edades+1):(transM*2*edades+edades*2)]) + sum(y[(compart*edades+edades*2+1):(compart*edades+edades*2+nrow(vectorLP)*edades)])
hombresTotales <- sum(y[1:(edades*compart/2)]) + sum(y[(transM*2*edades+1):(transM*2*edades+edades)])
mujeresTotalesInfectadas <- 0
hombresTotalesInfectados <- 0
for(k in as.numeric(vectorILPC[,1])){
for(l in ((edades*compart/2)+((k-1)*edades)+1):((edades*compart/2)+((k-1)*edades)+edades)){
mujeresTotalesInfectadas = mujeresTotalesInfectadas + y[l]
}
}
mujeresTotalesInfectadas = mujeresTotalesInfectadas + sum(y[(compart*edades+edades*2+1):(compart*edades+edades*2+nrow(vectorLP)*edades)])
for(k in as.numeric(vectorILPC[,1])){
for(l in (((k-1)*edades)+1):(((k-1)*edades)+edades)){
hombresTotalesInfectados = hombresTotalesInfectados + y[l]
}
}
newTransH <- transicionesH
contadorTrans1 <- 0
contadorTransi <- 0
#Matriz de Hombres:
for(i in as.numeric(vectorSPI[,1])){
contadorTransi = contadorTransi + 1
seqT <- seq(((as.numeric(parejasSexualesH[1,1])-1)*(transM+1))+1+i, 1+(transM+1)*edades ,by=transM+1)
contadorTrans1 = 0
for(j in seqT){
contadorTrans1 = contadorTrans1 +1
newTransH[j,as.numeric(vectorI[,1])]= 1-exp(-(mujeresTotalesInfectadas/mujeresTotales)*as.numeric(parejasSexualesH[contadorTrans1,2])*as.numeric(vectorSPI[contadorTransi,3])*1)
newTransH[j,i]= 1-(1-exp(-(mujeresTotalesInfectadas/mujeresTotales)*as.numeric(parejasSexualesH[contadorTrans1,2])*as.numeric(vectorSPI[contadorTransi,3])*1))
}
}
newTransM <- transicionesM
contadorTrans1 <- 0
contadorTransi <- 0
#Matriz de Mujeres:
for(i in as.numeric(vectorSPI[,1])){
contadorTransi = contadorTransi + 1
seqT <- seq(((as.numeric(parejasSexualesM[1,1])-1)*(transM+1))+1+i, 1+(transM+1)*edades ,by=transM+1)
contadorTrans1 = 0
for(j in seqT){
contadorTrans1 = contadorTrans1 +1
newTransM[j,as.numeric(vectorI[,1])]= 1-exp(-(hombresTotalesInfectados/hombresTotales)*as.numeric(parejasSexualesM[contadorTrans1,2])*as.numeric(vectorSPI[contadorTransi,4])*te)
newTransM[j,i]= 1-(1-exp(-(hombresTotalesInfectados/hombresTotales)*as.numeric(parejasSexualesM[contadorTrans1,2])*as.numeric(vectorSPI[contadorTransi,4])*te))
}
}
#contador para fertilidad
contador3 = 0
#---------Generando nacimientos------------
for(l in seq((edades*compart/2),(edades*(compart-1)),by=edades)){
for(k in nacDesde:nacHasta){
contador3 = contador3 + 1
nacimientosTotales = nacimientosTotales + y[l+k]*tasaNacimientosTotales[t,contador3]
}
}
#Generando nacimientos para las vacunadas
contador3 = 0
for(w in nacDesde:nacHasta){
contador3 = contador3 + 1
nacimientosTotales = nacimientosTotales + y[transM*2*edades+edades+w]*tasaNacimientosTotales[t,contador3]
}
#Generando nacimiento para las detectadas
contador3 = 0
conti = 0
for(l in 1:nrow(vectorLP)){
conti = conti +1
for(w in nacDesde:nacHasta){
contador3 = contador3 + 1
nacimientosTotales = nacimientosTotales + y[transM*2*edades+edades*2+(conti-1)*edades + w]*tasaNacimientosTotales[t,contador3]
}
}
#contador lleva la cuenta de cuál compartimiento biológico es
contador=0
#------------------Ecuaciones Humano-------------------------------------------------------------
# el primer for recorre cada número de edad (ej edad=3 entonces 0,3,6,9)
for(j in seq(0,edades*(compart-1),by=edades)){
contador = contador +1
#---Generando ecuaciones diferenciales por estad�o biológico
if(any(contador == as.numeric(vectorS[,1]))){
dH[1+j] = (1-probMujer)*(nacimientosTotales)-((tasaMuerteTotales[t,1+j]+muerteBiolo[contador,1])*y[1+j])-vacHombres[t,1]*y[1+j] -te*(y[1+j]-min(1,(tasaMuerteTotales[t,1+j]+muerteBiolo[contador,1]+vacHombres[t,1]))*y[1+j])
dvH[1] = vacHombres[t,1]*y[1+j] - tasaMuerteTotales[t,1+j]*y[transM*2*edades+1]- te*(y[transM*2*edades+1]-(tasaMuerteTotales[t,1+j]*y[transM*2*edades+1]))
auxiliarCostoVacuna = auxiliarCostoVacuna + vacHombres[t,1]*y[1+j]
}else if(any(contador == transM + as.numeric(vectorS[,1]))){
dH[1+j] = (probMujer)*(nacimientosTotales)-((tasaMuerteTotales[t,1+j]+muerteBiolo[contador-transM,2])*y[1+j]) -vacMujeres[t,1]*y[1+j] -te*(y[1+j]-min(1,(tasaMuerteTotales[t,1+j]+muerteBiolo[contador-transM,2]+vacMujeres[t,1]))*y[1+j])
dvM[1] = vacMujeres[t,1]*y[1+j] - tasaMuerteTotales[t,1+j]*y[transM*2*edades+edades+1]- te*(y[transM*2*edades+edades+1]-(tasaMuerteTotales[t,1+j]*y[transM*2*edades+edades+1]))
auxiliarCostoVacuna = auxiliarCostoVacuna + vacMujeres[t,1]*y[1+j]
}else{
dH[1+j]=0
}
#----Ecuaciones generales rangos medios de edad
if(edades >2){
if(contador <= compart/2){
for (i in 2:(edades-1)){
#secuencias para las transiciones
seq1 <- seq(((i-2)*(transH))+i, ((i-2)*(transH))+i+transH-1,by=1)
seqState <- seq(0+i-1,edades*(compart/2),by=edades)
if(any(contador == as.numeric(vectorS[,1]))){
dH[i+j] = te*(sum(as.numeric(newTransH[seq1,contador])*(y[seqState]*(1-tasaMuerteTotales[t,seqState]-muerteBiolo[contador,1]-vacHombres[t,i-1])))) -vacHombres[t,i]*y[i+j] -(tasaMuerteTotales[t,i+j]+muerteBiolo[contador,1])*y[i+j] -te*(y[i+j]-(tasaMuerteTotales[t,i+j]+muerteBiolo[contador,1]+vacHombres[t,i])*y[i+j])
dvH[i] = te*(y[transM*2*edades+i-1]-(tasaMuerteTotales[t,i-1]*y[transM*2*edades+i-1])) +vacHombres[t,i]*y[i+j] -tasaMuerteTotales[t,i]*y[transM*2*edades+i]- te*(y[transM*2*edades+i]-(tasaMuerteTotales[t,i]*y[transM*2*edades+i]))
auxiliarCostoVacuna = auxiliarCostoVacuna + vacHombres[t,i]*y[i+j]
}else{
dH[i+j] = te*(sum(as.numeric(newTransH[seq1,contador])*(y[seqState]*(1-tasaMuerteTotales[t,seqState]-muerteBiolo[contador,1])))) -(tasaMuerteTotales[t,i+j]+muerteBiolo[contador,1])*y[i+j] -te*(y[i+j]-min(1,(tasaMuerteTotales[t,i+j]+muerteBiolo[contador,1]))*y[i+j])
}
}
}else{ #acá empiezan las mujeres
for (i in 2:(edades-1)){
#secuencias para las transiciones
seq1 <- seq(((i-2)*(transM))+i, ((i-2)*(transM))+i+transH-1,by=1)
seqState <- seq(edades*(compart/2)+i-1,edades*(compart),by=edades)
seqDetected <- seq(edades*compart+edades*2+ i-1, edades*compart+edades*2+edades*(nrow(vectorLP)-1) +i-1,by=edades)
if(any(contador == transM + as.numeric(vectorS[,1]))){
dH[i+j] = te*(sum(as.numeric(newTransM[seq1[vectorSusc],contador-compart/2])*(y[seqState[vectorSusc]]*(1-tasaMuerteTotales[t,seqState[vectorSusc]]-muerteBiolo[contador-transM,2]-vacMujeres[t,i-1]))))-vacMujeres[t,i]*y[i+j] + te*(sum(as.numeric(newTransM[seq1[vectorNoProgressNoSusc],contador-compart/2])*(y[seqState[vectorNoProgressNoSusc]]*(1-tasaMuerteTotales[t,seqState[vectorNoProgressNoSusc]]-muerteBiolo[contador-transM,2]))))-(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2])*y[i+j] -te*(y[i+j]-(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2]+vacMujeres[t,i])*y[i+j])
#Vacunas
dvM[i] = te*(y[transM*2*edades+edades+i-1]-tasaMuerteTotales[t,i-1]*y[transM*2*edades+edades+i-1]) + vacMujeres[t,i]*y[i+j] - tasaMuerteTotales[t,i]*y[transM*2*edades+edades+i]-te*(y[transM*2*edades+edades+i]-(tasaMuerteTotales[t,i]*y[transM*2*edades+edades+i]))
auxiliarCostoVacuna = auxiliarCostoVacuna + vacMujeres[t,i]*y[i+j]
}else if(any(contador == transM + as.numeric(vectorPI[,1]))){
dH[i+j] = te*(sum(as.numeric(newTransM[seq1[vectorNoProgressNoSusc],contador-compart/2])*(y[seqState[vectorNoProgressNoSusc]]*(1-tasaMuerteTotales[t,seqState[vectorNoProgressNoSusc]]-muerteBiolo[contador-transM,2])))) -(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2])*y[i+j] -te*(y[i+j]-(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2])*y[i+j])+
te*xij[i-1]*as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary*(1-triageij[i-1])*sum(y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))+
te*xij[i-1]*as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary*(triageij[i-1]*sensibilidadTriage*triageij[i-1])*sum(y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))+
te*(1-xij[i-1])*sum(as.numeric(newTransM[seq1[vectorProgress],contador-compart/2])*(y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2])))+
te*(xij[i-1])*(1-as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary)*sum(as.numeric(newTransM[seq1[vectorProgress],contador-compart/2])*(y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2])))+
te*(xij[i-1]*as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary*xij[i-1]*triageij[i-1]*(1-triageij[i-1]*sensibilidadTriage))*sum(as.numeric(newTransM[seq1[vectorProgress],contador-compart/2])*(y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2])))+
((timeFollowUp/12))*sensibilidadFollowUp*folowUpHay*sum(y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))+
((timeFollowUp/12))*folowUpHay*(1-sensibilidadFollowUp*folowUpHay)*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(sum(as.numeric(newTransM[seq1[vectorSusc],contador-compart/2])*(y[seqState[vectorSusc]]*(1-tasaMuerteTotales[t,seqState[vectorSusc]]-muerteBiolo[contador-transM,2]-vacMujeres[t,i-1]))))+(1-((timeFollowUp/12)))*folowUpHay*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))
}else if(any(contador == transM + as.numeric(vectorI[,1]))){
dH[i+j] = te*(sum(as.numeric(newTransM[seq1[vectorNoProgressNoSusc],contador-compart/2])*(y[seqState[vectorNoProgressNoSusc]]*(1-tasaMuerteTotales[t,seqState[vectorNoProgressNoSusc]]-muerteBiolo[contador-transM,2])))) -(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2])*y[i+j] -te*(y[i+j]-(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2])*y[i+j])+
te*(1-xij[i-1])*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(xij[i-1]*(1-as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(xij[i-1]*as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary*(triageij[i-1]*(1-sensibilidadTriage)))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
(((timeFollowUp/12))*folowUpHay*(1-sensibilidadFollowUp*folowUpHay))*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
((1-(timeFollowUp/12))*folowUpHay)*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(sum(as.numeric(newTransM[seq1[vectorSusc],contador-compart/2])*(y[seqState[vectorSusc]]*(1-tasaMuerteTotales[t,seqState[vectorSusc]]-muerteBiolo[contador-transM,2]-vacMujeres[t,i-1]))))+((1-((timeFollowUp/12)))*folowUpHay)*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))
}else if(any(contador == transM + as.numeric(vectorLP[,1]))){
vectorProgressc <-seq(1,transM, by = 1)[c(as.numeric(vectorC[,1]))]
dH[i+j] = te*(sum(as.numeric(newTransM[seq1[vectorNoProgressNoSusc],contador-compart/2])*(y[seqState[vectorNoProgressNoSusc]]*(1-tasaMuerteTotales[t,seqState[vectorNoProgressNoSusc]]-muerteBiolo[contador-transM,2])))) -(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2])*y[i+j] -te*(y[i+j]-min(1,(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2]))*y[i+j])+
te*(1-xij[i-1])*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(xij[i-1]*(1-as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(xij[i-1]*(as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary)*(triageij[i-1])*(1-sensibilidadTriage)*(1-folowUpHay))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
(((timeFollowUp/12))*folowUpHay*(1-folowUpHay*sensibilidadFollowUp))*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(sum(as.numeric(newTransM[seq1[vectorSusc],contador-compart/2])*(y[seqState[vectorSusc]]*(1-tasaMuerteTotales[t,seqState[vectorSusc]]-muerteBiolo[contador-transM,2]-vacMujeres[t,i-1]))))
auxiliarCostoPrimary = auxiliarCostoPrimary + xij[i]*as.numeric(coberturaTamizaje[i,2])*y[i+j]
auxiliarCostoTriage = auxiliarCostoPrimary + xij[i]*as.numeric(coberturaTamizaje[i,2])*sensibilidadPrimary*triageij[i]*y[i+j]
auxiliarCostoFollowUp = auxiliarCostoFollowUp + xij[i]*as.numeric(coberturaTamizaje[i,2])*sensibilidadPrimary*triageij[i]*sensibilidadTriage*y[i+j]
auxiliarEfectividad = auxiliarEfectividad + xij[i]*as.numeric(coberturaTamizaje[i,2])*sensibilidadPrimary*y[i+j]*as.numeric(coberturaTamizaje[i,4])
auxiliarIncidence[contador-transM - nrow(vectorIncidence)] = auxiliarIncidence[contador-transM - nrow(vectorIncidence)] + te*(sum(as.numeric(newTransM[seq1[vectorNoProgressNoSusc],contador-compart/2])*(y[seqState[vectorNoProgressNoSusc]]*(1-tasaMuerteTotales[t,seqState[vectorNoProgressNoSusc]]-muerteBiolo[contador-transM,2])))) +
te*(1-xij[i-1])*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2])) +
te*(xij[i-1]*(1-as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(xij[i-1]*(as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary)*(triageij[i-1])*(1-sensibilidadTriage)*(1-folowUpHay))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
(((timeFollowUp/12))*folowUpHay*(1-folowUpHay*sensibilidadFollowUp))*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(sum(as.numeric(newTransM[seq1[vectorSusc],contador-compart/2])*(y[seqState[vectorSusc]]*(1-tasaMuerteTotales[t,seqState[vectorSusc]]-muerteBiolo[contador-transM,2]-vacMujeres[t,i-1]))))
}else if(any(contador == transM + as.numeric(vectorC[,1]))){
vectorProgressc <-seq(1,transM, by = 1)[c(as.numeric(vectorC[,1]))]
dH[i+j] = te*(sum(as.numeric(newTransM[seq1[vectorProgressc],contador-compart/2])*(y[seqState[vectorProgressc]]*(1-tasaMuerteTotales[t,seqState[vectorProgressc]]-muerteBiolo[contador-transM,2])))) -(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2])*y[i+j] -te*(y[i+j]-min(1,(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2]))*y[i+j])+
te*(xij[i-1]*(1-as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(1-xij[i-1])*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(xij[i-1]*as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary*triageij[i-1]*(1-sensibilidadTriage))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))
te*(((timeFollowUp/12))*folowUpHay*(1-folowUpHay*sensibilidadFollowUp))*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(sum(as.numeric(newTransM[seq1[vectorSusc],contador-compart/2])*(y[seqState[vectorSusc]]*(1-tasaMuerteTotales[t,seqState[vectorSusc]]-muerteBiolo[contador-transM,2]-vacMujeres[t,i-1])))+ te*((1-(timeFollowUp/12))*folowUpHay)*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2])))
auxiliarIncidence[contador-transM - nrow(vectorIncidence)] = auxiliarIncidence[contador-transM - nrow(vectorIncidence)] + te*(sum(as.numeric(newTransM[seq1[vectorProgressc],contador-compart/2])*(y[seqState[vectorProgressc]]*(1-tasaMuerteTotales[t,seqState[vectorProgressc]]-muerteBiolo[contador-transM,2]))))+
te*(xij[i-1]*(1-as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(1-xij[i-1])*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(xij[i-1]*as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary*triageij[i-1]*(1-sensibilidadTriage))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))
te*(((timeFollowUp/12))*(1-folowUpHay*sensibilidadFollowUp))*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))
#te*(1-((input$timeFollowUp/12))*folowUpHay)*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))
}
}
}
}
i = i+1
#Ecuación último rango de edad
if (contador <= compart/2){
#secuencias para las transiciones
seq1 <- seq(((i-2)*(transH))+i, ((i-2)*(transH))+i+transH-1,by=1)
seqState <- seq(0+i-1,edades*(compart/2),by=edades)
i = i+1
seq2 <- seq(((i-2)*(transH))+i, ((i-2)*(transH))+i+transH-1,by=1)
seqState2 <- seq(0+i-1,edades*(compart/2),by=edades)
#Ecuaciones
if(any(contador == as.numeric(vectorS[,1]))){
dH[edades+j] = te*(sum(as.numeric(newTransH[seq1,contador])*(y[seqState]*(1-tasaMuerteTotales[t,seqState]-muerteBiolo[contador,1]-vacHombres[t,i-1]))))+
te*(sum(as.numeric(newTransH[seq2,contador])*(y[seqState2]*(1-tasaMuerteTotales[t,seqState2]-muerteBiolo[contador,1])))) -
(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador,2])*y[edades+j]-
te*(y[edades+j]-min(1,(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador,1]+vacHombres[t,edades]))*y[edades+j])-vacHombres[t,edades]*y[edades+j]
dvH[edades] = te*(y[transM*2*edades+edades-1]-(tasaMuerteTotales[t,edades-1]*y[transM*2*edades+edades-1]))+ vacHombres[t,edades]*y[edades+j] - tasaMuerteTotales[t,edades]*y[transM*2*edades+edades]
auxiliarCostoVacuna = auxiliarCostoVacuna + vacHombres[t,edades]*y[edades+j]
}else{
dH[edades+j] = te*(sum(as.numeric(newTransH[seq1,contador])*(y[seqState]*(1-tasaMuerteTotales[t,seqState]-muerteBiolo[contador,1]))))+
te*(sum(as.numeric(newTransH[seq2,contador])*(y[seqState2]*(1-tasaMuerteTotales[t,seqState2]-muerteBiolo[contador,1])))) -
(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador,2])*y[edades+j]-
te*(y[edades+j]-min(1,(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador,1]))*y[edades+j])
}
} else{
#secuencias para las transiciones
seq1 <- seq(((i-2)*(transM))+i, ((i-2)*(transM))+i+transH-1,by=1)
seqState <- seq(edades*(compart/2)+i-1,edades*(compart),by=edades)
i = i+1
seq2 <- seq(((i-2)*(transM))+i, ((i-2)*(transM))+i+transH-1,by=1)
seqState2 <- seq(edades*(compart/2)+i-1,edades*(compart),by=edades)
seqDetected <- seq(edades*compart+edades*2+ edades-1, edades*compart+edades*2+edades*(nrow(vectorLP)-1) +edades-1,by=edades)
if(any(contador == transM + as.numeric(vectorS[,1]))){
dH[edades+j] = -vacMujeres[t,edades]*y[edades+j] +te*(sum(as.numeric(newTransM[seq1,contador-compart/2])*(y[seqState]*(1-tasaMuerteTotales[t,seqState]-muerteBiolo[contador-compart/2,2] -vacMujeres[t,i-1]))))+
te*(sum(as.numeric(newTransM[seq2,contador-compart/2])*(y[seqState2]*(1-tasaMuerteTotales[t,seqState2]-muerteBiolo[contador-compart/2,2])))) -
(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador-transM,2])*y[edades+j]-
te*(y[edades+j]-min(1,(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador-compart/2,1]+vacMujeres[t,edades]))*y[edades+j])
dvM[edades] = te*(y[transM*2*edades+edades+edades-1]-tasaMuerteTotales[t,edades-1]*y[transM*2*edades+edades+edades-1]) + vacMujeres[t,edades]*y[edades+j] - tasaMuerteTotales[t,edades]*y[transM*2*edades+edades+edades]-te*(y[transM*2*edades+edades+edades]-(tasaMuerteTotales[t,edades])*y[transM*2*edades+edades+edades])
auxiliarCostoVacuna = auxiliarCostoVacuna + vacMujeres[t,edades]*y[edades+j]
}else{
dH[edades+j] = te*(sum(as.numeric(newTransM[seq1[vectorNoProgressNoSusc],contador-compart/2])*(y[seqState[vectorNoProgressNoSusc]]*(1-tasaMuerteTotales[t,seqState[vectorNoProgressNoSusc]]-muerteBiolo[contador-compart/2,2]))))+
te*(sum(as.numeric(newTransM[seq2,contador-compart/2])*(y[seqState2]*(1-tasaMuerteTotales[t,seqState2]-muerteBiolo[contador-compart/2,2])))) -
(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador-transM,2])*y[edades+j]-
te*(y[edades+j]-min(1,(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador-compart/2,1]))*y[edades+j])+
te*(sum(as.numeric(newTransM[seq1[vectorSusc],contador-compart/2])*(y[seqState[vectorSusc]]*(1-tasaMuerteTotales[t,seqState[vectorSusc]]-muerteBiolo[contador-transM,2]-vacMujeres[t,edades-1]))))
}
}
}# cierra el ciclo en los j
dCostoVacunacion = auxiliarCostoVacuna
dCostoPrimary = auxiliarCostoPrimary
dCostoTriage = auxiliarCostoTriage
dCostoFollowUp = auxiliarCostoFollowUp
dEfectividad = auxiliarEfectividad
for(i in 1:nrow(vectorIncidence)){
dIncidencia[i] = auxiliarIncidence[i]
}
ayudaLP = 1
for(j in 1:nrow(vectorLP)){
dvPD[1+edades*(j-1)] = 0
for(i in 2:edades){
seqDetected <- seq(edades*compart+edades*2+ i-1, edades*compart+edades*2+edades*(nrow(vectorLP)-1) +i-1,by=edades)
seqState <- seq(edades*(compart/2)+i-1,edades*(compart),by=edades)
seq1 <- seq(((i-2)*(transM))+i, ((i-2)*(transM))+i+transH-1,by=1)
dvPD[i+edades*(j-1)] = te*(((timeFollowUp/12)))*sum((y[seqDetected])*as.numeric(newTransM[seq1[vectorProgress],as.numeric(vectorLP[,1])]))+
(te*(1-(tasaMuerteTotales[t,i+(as.numeric(vectorLP[j,1])-1)*edades]+muerteBiolo[as.numeric(vectorLP[j,1]),2]))*sum((y[seqState[vectorProgress]])*as.numeric(newTransM[seq1[vectorProgress],as.numeric(vectorLP[j,1])])))*(xij[i-1]*as.numeric(coberturaTamizaje[edades-1,2])*sensibilidadPrimary*triageij[i-1]*(1-sensibilidadTriage)*folowUpHay)-
te*(((timeFollowUp/12))*folowUpHay)*y[compart*edades+edades*2+i+edades*(j-1)]*(1-(tasaMuerteTotales[t,i+as.numeric(vectorLP[j,1])*edades]+muerteBiolo[as.numeric(vectorLP[j,1]),2]))-
te*y[compart*edades+edades*2+i+edades*(j-1)]*((tasaMuerteTotales[t,i+(as.numeric(vectorLP[j,1])-1)*edades]+muerteBiolo[as.numeric(vectorLP[j,1]),2]))+te*((1-((timeFollowUp/12)))*folowUpHay)*y[compart*edades+edades*2+i+edades*(j-1)]*(1-(tasaMuerteTotales[t,i+(as.numeric(vectorLP[ayudaLP,1])-1)*edades]+muerteBiolo[as.numeric(vectorLP[ayudaLP,1]),2]))
}
}
setWinProgressBar(pb, t,label=paste(round(t/50*100, 0), "% simulation that has been runed" ))
list(c(dH,dvH,dvM, dvPD,dCostoVacunacion,dCostoPrimary,dCostoTriage,dCostoFollowUp,dEfectividad,dIncidencia))
})
}
parameters <- c(te ,edades)
stateInicial<- c(Humano,vacunadosHombres,vacunadosMujeres,progresionesDetectadas,costoVacunacion,costoPrimaryIni,costoTriageIni,costoFollowUpIni,efectividadIni,incidenciaIni)
times <- seq(1, 50, by = 1)
pb <- winProgressBar(title = "Progress Bar for Simulation", label = " ",min = 0, max = a, width = 300)
out <- deSolve::ode(y = stateInicial, times = times, func = poblacion, parms = parameters)
close(pb)
#--------------Sacando Medidas de Desempeño----------------------------------
#matriz donde guarda la solución
bioloGeneral = as.matrix(read.xlsx2("ParametrosGenerales.xlsx",5,dec =",", as.data.frame = TRUE))
nombresBiolo <- bioloGeneral[,1]
nombresFile <- vector(length = edades*length(nombresBiolo)*2)
nombresFileVacunasH <- vector(length=edades)
nombresFileVacunasM <- vector(length=edades)
nombresFileIncidence <- vector(length = nrow(vectorIncidence))
seqNombre <- seq(1,edades*length(nombresBiolo)*2,edades)
seqEdad <- seq(1,edades,1)
ayuda1 <- 0
contador <- 1
contador2 <- 1
for(i in 1:length(nombresFile)){
if(contador == (edades+1)){
contador = 1
contador2 = contador2 +1
}
ayuda1 = ayuda1 +1
if(i <= (length(nombresFile)/2)){
nombresFile[ayuda1] = paste0("M_",nombresBiolo[contador2],"_", contador)
}else{
nombresFile[ayuda1] = paste0("F_",nombresBiolo[contador2-transM],"_",contador)
}
contador = contador +1
}
for(i in 1:edades){
nombresFileVacunasH[i] = paste0("M_Vaccines_",i)
}
for(i in 1:edades){
nombresFileVacunasM[i] = paste0("F_Vaccines_",i)
}
for(i in 1:nrow(vectorIncidence)){
nombresFileIncidence[i] = i
}
dat <- matrix(rep(0),nrow=50,ncol=(ncol(parametros)+edades*2 + 1 + 3 + 1 + nrow(vectorIncidence)))
colnames(dat) <- c(nombresFile,nombresFileVacunasH,nombresFileVacunasM,"Vaccination Cost", "Primary Test Cost", "Triage Test Cost", "Follow Up Test Cost", "Efetivness",nombresFileIncidence)
#Por edad
for(i in 1:(ncol(parametros)+edades*2)){
dat[,i]=out[,i+1]
}
#Agregando los Detectados a los Progress
contadorPL = 0
for(j in 1:nrow(vectorLP)){
contadorPL = contadorPL + 1
for (i in 1:edades){
dat[,edades*compart/2+edades*(as.numeric(vectorLP[j,1])-1) + i] = dat[,edades*compart/2+edades*(as.numeric(vectorLP[j,1])-1) + i] + out[,1+edades*compart+edades*2+edades*(contadorPL-1) + i]
}
}
#Arreglando los datos de los costos y la efetividad
dat[1,ncol(parametros)+edades*2 + 1] = out[1,edades*(compart+2+nrow(vectorLP))+2]*costoVacuna
dat[1,ncol(parametros)+edades*2 + 2] = out[1,edades*(compart+2+nrow(vectorLP))+3]*costoPrimary
dat[1,ncol(parametros)+edades*2 + 3] = out[1,edades*(compart+2+nrow(vectorLP))+4]*costoTriage
dat[1,ncol(parametros)+edades*2 + 4] = out[1,edades*(compart+2+nrow(vectorLP))+5]*costoFollowUp
dat[1,ncol(parametros)+edades*2 + 5] = out[1,edades*(compart+2+nrow(vectorLP))+6]
for(i in 2:50){
dat[i,ncol(parametros)+edades*2 + 1] = (out[i,edades*(compart+2+nrow(vectorLP))+2]*costoVacuna - sum(dat[1:(i-1),edades*(compart+2)+1]))
dat[i,ncol(parametros)+edades*2 + 2] = (out[i,edades*(compart+2+nrow(vectorLP))+3]*costoPrimary - sum(dat[1:(i-1),edades*(compart+2)+2]))
dat[i,ncol(parametros)+edades*2 + 3] = (out[i,edades*(compart+2+nrow(vectorLP))+4]*costoTriage - sum(dat[1:(i-1),edades*(compart+2)+3]))
dat[i,ncol(parametros)+edades*2 + 4] = (out[i,edades*(compart+2+nrow(vectorLP))+5]*costoFollowUp - sum(dat[1:(i-1),edades*(compart+2)+4]))
dat[i,ncol(parametros)+edades*2 + 5] = (out[i,edades*(compart+2+nrow(vectorLP))+6] - sum(dat[1:(i-1),edades*(compart+2)+5]))
}
#Agregando la incidencia
for(i in 1:nrow(vectorIncidence)){
dat[1,ncol(parametros)+edades*2 + 5+ i] = out[1,edades*(compart+2+nrow(vectorLP))+6 + i]
}
for(i in 1:nrow(vectorIncidence)){
for(j in 2:50){
dat[j,ncol(parametros)+edades*2 + 5+ i] = (out[j,edades*(compart+2+nrow(vectorLP))+6+i] - sum(dat[1:(j-1),edades*(compart+2)+5+i]))
}
}
return(dat)
}
# data(vacRangeFemaleIni,vacRangeFemaleEnd,vacRangeMaleIni,vacRangeMaleEnd, vacYearsFemaleIni,vacYearsFemaleEnd, vacYearsMaleIni,vacYearsMaleEnd, vacSex, vacPorcentageFemale, vacPorcentageMale, primaryTest, iniPrimaryTest, maxPrimaryTest, stepPrimaryTest, triageTest, iniTriage, maxTriage, stepTriage,followUp, timeFollowUp)
dat<- data(15,30,15,30,5,10,5,10,c("Hombres","Mujeres"),0.15,0.15,"HPV-DNA", 25,65,5,"Citology",25,65,5,"Citology",6)
#----------------------------Gráficas-----------------------------------------------------
buildPlot <- function(sexoUsuario, biologicalCompUsuario, edadUsuario, num){
#Gráfica que muestra el total de la población para un tipo espec�fico de compartimiento
#T�tulo de la gráfica
sexoIndice <- 0
#entra al primer if si no está seleccionado el sum
if(length(sexoUsuario)==2 && ((sexoUsuario[1]=="Mujeres"||sexoUsuario[1]=="Hombres")&&(sexoUsuario[2]=="Mujeres"||sexoUsuario[2]=="Hombres"))){
titulo <- paste0("Number of Males and Females")
sexoIndice <- 3
}else if((length(sexoUsuario)==1 && sexoUsuario[1]=="Hombres") || (length(sexoUsuario)==2 && sexoUsuario[1]=="Hombres" && sexoUsuario[2]=="sumSexo")){
titulo <- paste0("Number of Males")
sexoIndice <- 2
}else if(length(sexoUsuario) == 3){ #entra a este else if si están los tres
titulo <- paste0("Total number of Males and Females")
sexoIndice <- 4
}else if((length(sexoUsuario)==1 && sexoUsuario[1]=="Mujeres") || (length(sexoUsuario)==2 && (sexoUsuario[1]=="Mujeres" && sexoUsuario[2]=="sumSexo"))){
titulo <- paste0("Number of Females")
sexoIndice <- 1
}else if(length(sexoUsuario)==2 && sexoUsuario[2]=="Hombres" && sexoUsuario[1]=="sumSexo"){
titulo <- paste0("Number of Males")
sexoIndice <- 2
}
#Leer el input de biologicalCompUsuario
nombresBiolo <- (bioloGeneral[,1])
nombresBiolo[transM+1] <- "Vaccinated"
nombresBiolo[transM+2] <- "Sum"
inputBiolo <- vector()
filasBiolo <- 0
for(j in 1:length(biologicalCompUsuario)){
for(i in 1:(transM+2)){
if(biologicalCompUsuario[j] == nombresBiolo[i]){
filasBiolo= filasBiolo + 1
inputBiolo[filasBiolo] = i
}
}
}
ap <- setNames(data.frame(matrix(ncol = 4, nrow = 0)), c("year","value", "biologicalCompartment","gender"))
ayuda <-0
#Sacando el rango de edad
rangoEdad <- seq(edadUsuario[1],edadUsuario[2],by = 1)
#¿Sum en el biological compartment?
sumBiolo <- FALSE
hastaBio <- length(biologicalCompUsuario)
bioloCompSum <- inputBiolo
for(m in 1:length(bioloCompSum)){
if(bioloCompSum[m]==(transM + 2)){
sumBiolo <- TRUE
hastaBio <- length(biologicalCompUsuario)-1
}
}
#Sacar los datos que necesito del resultado de las ecuaciones diferenciales
for(k in 1:hastaBio) {
totalTipo = seq(1,num,by=1)
totalTipo1 = seq(1,num,by=1)
for(i in 1:num){
if(sexoIndice==2){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades]
}
}
}else if(sexoIndice==1){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,(compart*edades/2)+j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades+edades]
}
}
}else if(sexoIndice==4){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,(compart*edades/2)+j+edades*((inputBiolo[k])-1)]+
dat[i,j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades+edades] + dat[i,j+2*transM*edades]
}
}
}else if (sexoIndice ==3){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo1[i] = totalTipo1[i] + dat[i,(compart*edades/2)+j+edades*((inputBiolo[k])-1)]
}
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo1[i] = totalTipo1[i] + dat[i,j+2*transM*edades+edades]
}
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades]
}
}
}
totalTipo[i] = round(totalTipo[i]-i,digits = 2)
totalTipo1[i] = round(totalTipo1[i]-i, digits = 2)
}
ayuda <- nrow(ap)
nombre <- nombresBiolo[inputBiolo[k]]
#Guardando los valores en el data frame para graficar
if(sexoIndice==3){
for(i in 1:num){
ap[i+ayuda,1] = i
ap[i+ayuda,2] = totalTipo[i]
ap[i+ayuda,3] = nombre
ap[i+ayuda,4] = "Men"
}
ayuda <- nrow(ap)
for(i in 1:num){
ap[i+ayuda,1] = i
ap[i+ayuda,2] = totalTipo1[i]
ap[i+ayuda,3] = nombre
ap[i+ayuda,4] = "Women"
}
}else{
for(i in 1:
num){
ap[i+ayuda,1] = i
ap[i+ayuda,2] = totalTipo[i]
ap[i+ayuda,3] = nombre
ap[i+ayuda,4] = ""
}
}
}
if(sumBiolo==FALSE){
if(sexoIndice != 3){
ggideal_point <- ggplot(ap, aes(x=year,y=value/1000000,colour=biologicalCompartment))
}else{
ggideal_point <- ggplot(ap, aes(x=year,y=value/1000000,group=interaction(biologicalCompartment,gender),colour=biologicalCompartment,linetype=factor(gender)))
}
}else if (sumBiolo == TRUE){
if(sexoIndice != 3){
ap2 <- setNames(data.frame(matrix(ncol = 2, nrow = num)), c("year","value"))
ap2[,2]<-0
for(l in 1:nrow(ap)){
ap2[ap[l,1],1]=ap[l,1]
ap2[ap[l,1],2]=ap2[ap[l,1],2]+ap[l,2]
}
ggideal_point <- ggplot(ap2, aes(x=year,y=value/1000000, colour="#C9A086"))
}else{
ap2 <- setNames(data.frame(matrix(ncol = 3, nrow = 2*num)), c("year","value","gender"))
ap2[,2]<-0
for(l in 1:nrow(ap)){
if(ap[l,4]=="Men"){
ap2[ap[l,1],1]=ap[l,1]
ap2[ap[l,1],2]=ap2[ap[l,1],2]+ap[l,2]
ap2[ap[l,1],3]="Men"
}else if(ap[l,4]=="Women"){
ap2[ap[l,1]+num,1]=ap[l,1]
ap2[ap[l,1]+num,2]=ap2[ap[l,1]+num,2]+ap[l,2]
ap2[ap[l,1]+num,3]="Women"
}
}
ggideal_point <- ggplot(ap2, aes(x=year,y=value/1000000,colour=gender))
}
}
ggideal_point <- ggideal_point + geom_line() + geom_point(shape=5)+
labs(x = "Year", y= "Unit: Millions", title = titulo) +
scale_colour_hue("",l = 70, c = 150) +
theme(legend.title=element_blank(),legend.text=element_text(size=8),axis.title.y = element_text(size=12), legend.position = c(0.8, 0.2),axis.text.x = element_text(vjust=0.5, size=10),axis.text.y = element_text(vjust=0.5, size=10),axis.title.x = element_text(size = 12)) +
background_grid(major = "xy", minor = "xy",colour.major = "grey90",colour.minor = "grey90") +
panel_border() + scale_x_continuous(breaks=seq(1, num, 1))
#Formato de los ejes
f1 <- list(size = 13,color = "grey")
f2 <- list(size = 11,color = "black")
al <- list(titlefont = f1,showticklabels = TRUE,tickfont = f2,exponentformat = "E")
#Margenes
m <- list(l = 50,r = 110,b = 100,t = 50,pad = 4,autoexpand = FALSE)
# Convert ggplot object to plotly
gg <- plotly_build(ggideal_point)%>%layout(autosize = FALSE,width = 620, height = 450,margin=m,xaxis = al, yaxis = al)
gg
}
buildPlot(c("Mujeres","Hombres"), bioloGeneral[,1],c(5,35),30)
#---------------------------------------------Tablas----------------------------------------
buildTable <- function(sexoUsuario, biologicalCompUsuario, edadUsuario, num){
#Validación de los datos
validate(
need(length(sexoUsuario)>=2 || sexoUsuario[1] == "Mujeres" || sexoUsuario[1] == "Hombres", 'Please select at least one gender and update the graph'),
need(biologicalCompUsuario != "Sum", 'Please select at least one biological compartment and update the table'),
need(edadUsuario[1] >0 && edadUsuario[2] <= edades, 'Please select a valid age range and update the table')
)
sexoIndice <- 0
sexoUsuario
#entra al primer if si no está seleccionado el sum
if(length(sexoUsuario)==2 && ((sexoUsuario[1]=="Mujeres"||sexoUsuario[1]=="Hombres")&&(sexoUsuario[2]=="Mujeres"||sexoUsuario[2]=="Hombres"))){
sexoIndice <- 3
}else if((length(sexoUsuario)==1 && sexoUsuario[1]=="Hombres") || (length(sexoUsuario)==2 && sexoUsuario[1]=="Hombres" && sexoUsuario[2]=="sumSexo")){
sexoIndice <- 2
}else if(length(sexoUsuario) == 3){ #entra a este else if si están los tres
sexoIndice <- 4
}else if((length(sexoUsuario)==1 && sexoUsuario[1]=="Mujeres") || (length(sexoUsuario)==2 && (sexoUsuario[1]=="Mujeres" && sexoUsuario[2]=="sumSexo"))){
sexoIndice <- 1
}else if(length(sexoUsuario)==2 && sexoUsuario[2]=="Hombres" && sexoUsuario[1]=="sumSexo"){
sexoIndice <- 2
}
if(sexoIndice == 3){
ap <- setNames(data.frame(matrix(ncol = 4, nrow = 0)), c("Year","Value", "Biological Comp.","Gender"))
}else{
ap <- setNames(data.frame(matrix(ncol = 3, nrow = 0)), c("Year","Value", "Biological Comp."))
}
#Leer el input de biologicalCompUsuario
nombresBiolo <- (bioloGeneral[,1])
nombresBiolo[transM+1] <- "Vaccinated"
nombresBiolo[transM+2] <- "Sum"
inputBiolo <- vector()
filasBiolo <- 0
for(j in 1:length(biologicalCompUsuario)){
for(i in 1:(transM+2)){
if(biologicalCompUsuario[j] == nombresBiolo[i]){
filasBiolo= filasBiolo + 1
inputBiolo[filasBiolo] = i
}
}
}
ayuda <-0
rangoEdad <- seq(edadUsuario[1],edadUsuario[2],by = 1)
#¿Sum en el biological compartment?
sumBiolo <- FALSE
hastaBio <- length(biologicalCompUsuario)
bioloCompSum <- inputBiolo
for(m in 1:length(bioloCompSum)){
if(bioloCompSum[m]==(transM + 2)){
sumBiolo <- TRUE
hastaBio <- length(biologicalCompUsuario)-1
}
}
#Sacar los datos que necesito de el resultado de las ecuaciones diferenciales
for(k in 1:hastaBio) {
totalTipo = seq(1,num,by=1)
totalTipo1 = seq(1,num,by=1)
for(i in 1:num){
if(sexoIndice==2){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades]
}
}
}else if(sexoIndice==1){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,(compart*edades/2)+j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades+edades]
}
}
}else if(sexoIndice==4){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,(compart*edades/2)+j+edades*((inputBiolo[k])-1)]+
dat[i,j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades+edades] + dat[i,j+2*transM*edades]
}
}
}else if (sexoIndice ==3){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo1[i] = totalTipo1[i] + dat[i,(compart*edades/2)+j+edades*((inputBiolo[k])-1)]
}
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo1[i] = totalTipo1[i] + dat[i,j+2*transM*edades+edades]
}
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades]
}
}
}
totalTipo[i] = round(totalTipo[i]-i,digits = 2)
totalTipo1[i] = round(totalTipo1[i]-i, digits = 2)
}
ayuda <- nrow(ap)
nombre <- nombresBiolo[inputBiolo[k]]
#Guardando los valores en el data frame para graficar
if(sexoIndice==3){ #si entra al if es porque sum está seleccionado
for(i in 1:num){
ap[i+ayuda,1] = i
ap[i+ayuda,2] = totalTipo[i]
ap[i+ayuda,3] = nombre
ap[i+ayuda,4] = "Men"
}
ayuda <- nrow(ap)
for(i in 1:num){
ap[i+ayuda,1] = i
ap[i+ayuda,2] = totalTipo1[i]
ap[i+ayuda,3] = nombre
ap[i+ayuda,4] = "Women"
}
}else{
for(i in 1:num){
ap[i+ayuda,1] = i
ap[i+ayuda,2] = totalTipo[i]
ap[i+ayuda,3] = nombre
}
}
}
ap
}
tabla=buildTable(c("Mujeres","Hombres"), bioloGeneral[,1],c(5,35),30)
| /ModeloCancerFinal/funciones.R | no_license | dcsolano10/evaluacioncc | R | false | false | 56,592 | r |
pkgTest <- function(x){
if (!require(x,character.only = TRUE)){
install.packages(x,dep=TRUE)
}
}
pkgTest("shiny")
library(shiny)
pkgTest("DT")
library(DT)
pkgTest("deSolve")
library(deSolve)
pkgTest("TTR")
library(TTR)
pkgTest("forecast")
library(forecast)
pkgTest("xlsx")
library(xlsx)
pkgTest("plotly")
library(plotly)
pkgTest("ggthemes")
library(ggthemes)
pkgTest("ggplot2")
library(ggplot2)
pkgTest("cowplot")
library(cowplot)
pkgTest("shinyBS")
library(shinyBS)
pkgTest("shinyjs")
library(shinyjs)
pkgTest("dplyr")
library(dplyr)
pkgTest("magrittr")
library(magrittr)
#-----Leer datos---------------------------------------------------------------------------
nacimientos = as.matrix(read.xlsx("ParametrosGenerales.xlsx",2, dec =",", as.data.frame = TRUE))
parametros = as.matrix(read.xlsx("ParametrosGenerales.xlsx",1, dec =",", as.data.frame = TRUE))
transicionesM = as.matrix(read.xlsx2("ParametrosGenerales.xlsx",3, dec =",", as.data.frame = TRUE))
transicionesH = as.matrix(read.xlsx2("ParametrosGenerales.xlsx",4, dec =",", as.data.frame = TRUE))
otros = as.matrix(read.table("generales.txt", header = FALSE, dec=","))
bioloGeneral = as.matrix(read.xlsx2("ParametrosGenerales.xlsx",5,dec =",", as.data.frame = TRUE))
parejasSexuales = as.matrix(read.xlsx2("ParametrosGenerales.xlsx",6,dec =",", as.data.frame = TRUE))
sensibilidad = as.matrix(read.xlsx2("ParametrosGenerales.xlsx",7,dec =",", as.data.frame = TRUE))
cobertura = as.matrix(read.xlsx2("ParametrosGenerales.xlsx",8,dec =",", as.data.frame = TRUE))
#----Generar los nuevos datos-------------------------------------------------------------
#Rango de la simulación
simu <- 50
#Géneros
gen = 2
#Leyendo los dataFrames
transicionesM <- as.matrix.data.frame(transicionesM[,-ncol(transicionesM)])
transicionesH <- as.matrix.data.frame(transicionesH[,-ncol(transicionesH)])
parametros <- as.matrix.data.frame(parametros[,-ncol(parametros)])
nacimientos <- as.matrix.data.frame(nacimientos[,-ncol(nacimientos)])
#Cargar datos del archivo otros
compart <- as.numeric(otros[1,1])
nacDesde = as.numeric(otros[4,1])
nacHasta = as.numeric(otros[5,1])
muerteBiolo = cbind(as.numeric(bioloGeneral[,2]),as.numeric(bioloGeneral[,3]))
edadesFert = nacHasta-nacDesde
te <- as.numeric(otros[2,1])
probMujer <- as.numeric(otros[3,1])
edades <- (ncol(parametros))/compart
#Cargar datos de las matrices de transiciónd de probabilidad
transM <- as.numeric(otros[6,1])
transH <- as.numeric(otros[7,1])
#vector que carga toda la información de los estad�os biológicos
vectorT <- cbind(seq(1,transM,by=1),bioloGeneral[,4],bioloGeneral[,5],bioloGeneral[,6])
#Vector que hace los suscpetibles
vectorS <- subset(vectorT,vectorT[,2]=="S")
vectorPI <- subset(vectorT,vectorT[,2]=="PI")
vectorSPI <- rbind(vectorS,vectorPI)
#Vector que carga la info de los estados biologicos
vectorT <- cbind(seq(1,transM,by=1),bioloGeneral[,4])
vectorI <- subset(vectorT,vectorT[,2]=="I")
vectorLP <- subset(vectorT, vectorT[,2]=="LP")
vectorC <- subset(vectorT, vectorT[,2]=="C")
vectorILPC <- rbind(vectorI,vectorLP,vectorC)
vectorIncidence <- rbind(vectorLP,vectorC)
#Nombres Progress
vectorNT <- cbind(bioloGeneral[,1],bioloGeneral[,4])
vectorNombresP <- subset(vectorNT[,1], vectorNT[,2] == "LP" | vectorNT[,2] == "C")
#Cargar Datos de parejas sexuales
parejasSexualesM <- cbind(parejasSexuales[,1],parejasSexuales[,3])
parejasSexualesH <- cbind(parejasSexuales[,1],parejasSexuales[,2])
#Cargar datos para el screening
coberturaTamizaje <- cbind(as.numeric(cobertura[,1]),as.numeric(cobertura[,2]),as.numeric(cobertura[,3]),as.numeric(cobertura[,4]))
sensibilidadPrubeas <- cbind(sensibilidad[,1],as.numeric(sensibilidad[,2]),as.numeric(sensibilidad[,3]))
#Valor para costos
costoVacuna = as.numeric(otros[8,1])
L=c(1:simu)
n.init <- 0.01
Kd.init <- 0.01
generarTasasMuerte <- function(){
#Generar vectores de muertes y fertilidad
tasaMuerteTotales<- matrix(seq(1),nrow = simu,ncol=edades*compart)
for(i in 1:(ncol(parametros))){
Human <- as.double(parametros[-(1),i])
# fitting suavizamiento exponencial
tasaMuerteiFuturo <- forecast(HoltWinters(Human, beta=FALSE, gamma=FALSE),h=9)
for (j in 1:simu){
tasaMuerteTotales[j,i] = tasaMuerteiFuturo$mean[1]
}
}
return(tasaMuerteTotales)
}
tasaMuerteTotales<-generarTasasMuerte()
#Trayendo el total de población de cada Compartimiento
Humano<-rep(0,times=ncol(parametros))
vacunadosHombres<- rep(0,times=edades)
vacunadosMujeres<- rep(0,times=edades)
progresionesDetectadas <- rep(0, times = edades*nrow(vectorLP))
costoVacunacion = 0
costoPrimaryIni = 0
costoTriageIni = 0
costoFollowUpIni = 0
efectividadIni = 0
incidenciaIni = rep(0,times = nrow(vectorIncidence))
for(i in 1:(ncol(parametros))){
Humano[i]=as.numeric(parametros[1,i])
}
#---Generando tasas de nacimiento ------------
generarTasasNacimiento <- function(){
tasaNacimientosTotales<- matrix(seq(1),nrow = simu,ncol=ncol(nacimientos))
# fitting Suavizamiento exponencial
for(i in 1:(ncol(nacimientos))){
nac <- as.numeric(nacimientos[,i])
tasaNacimientoiFuturo <- forecast(HoltWinters(nac, beta=FALSE, gamma=FALSE),h=9)
for (j in 1:simu){
tasaNacimientosTotales[j,i] = tasaNacimientoiFuturo$mean[1]
}
}
return(tasaNacimientosTotales)
}
tasaNacimientosTotales<-generarTasasNacimiento()
#----Resolver las ecuaciones diferenciales------------------------------------------------
data <- function(vacRangeFemaleIni,vacRangeFemaleEnd,vacRangeMaleIni,vacRangeMaleEnd, vacYearsFemaleIni,vacYearsFemaleEnd, vacYearsMaleIni,vacYearsMaleEnd, vacSex, vacPorcentageFemale, vacPorcentageMale, primaryTest, iniPrimaryTest, maxPrimaryTest, stepPrimaryTest, triageTest, iniTriage, maxTriage, stepTriage,followUp, timeFollowUp){
a = 50;
nacimientosTotales = 0;
#Porcentaje de vacunación
vacMujeres = matrix(0, ncol = edades , nrow = 50)
vacHombres = matrix(0, ncol = edades , nrow = 50)
#Edades de vacunación
edadesVacFemale <- seq(vacRangeFemaleIni,vacRangeFemaleEnd,by=1)
edadesVacMale <- seq(vacRangeMaleIni,vacRangeMaleEnd,by=1)
#Años de Vacunación
yearsVacFemale <- seq(vacYearsFemaleIni,vacYearsFemaleEnd,by=1)
yearsVacMale <- seq(vacYearsMaleIni,vacYearsMaleEnd,by=1)
if(length(vacSex) == 2){
for(i in 1:50){
for(j in 1:edades){
if(j %in% edadesVacFemale && i %in% yearsVacFemale){
vacMujeres[i,j] = vacPorcentageFemale
}
if(j %in% edadesVacMale && i %in% yearsVacMale){
vacHombres[i,j] = vacPorcentageMale
}
}
}
}else if(length(vacSex) == 1 && vacSex == "Mujeres"){
for(i in 1:50){
for(j in 1:edades){
if(j %in% edadesVacFemale && i %in% yearsVacFemale){
vacMujeres[i,j] = vacPorcentageFemale
}
}
}
}else if(length(vacSex) == 1 && vacSex == "Hombres"){
for(i in 1:50){
for(j in 1:edades){
if(j %in% edadesVacMale && i %in% yearsVacMale){
vacHombres[i,j] = vacPorcentageMale
}
}
}
}
#----------Vectores para screening-------------------
xij <- rep(0,times=edades)
triageij <- rep(0,times=edades)
folowUpHay <- 0
seguimiento <- followUp
sensibilidadPrimary = 0
sensibilidadTriage = 0
sensibilidadFollowUp = 0
costoPrimary = 0
costoTriage = 0
costoFollowUp = 0
contadorPrimi2 = 0
contadorPrimi = 1
if(primaryTest != "None"){
for(i in iniPrimaryTest:maxPrimaryTest){
contadorPrimi2 = contadorPrimi2+1
xij[i] = contadorPrimi
contadorPrimi = 0
if(contadorPrimi2%%stepPrimaryTest == 0){
contadorPrimi = 1
}
}
contadorPrimi2 = 0
contadorPrimi = 1
if(triageTest != "None"){
for(i in iniTriage:maxTriage){
contadorPrimi2 = contadorPrimi2+1
triageij[i] = contadorPrimi
contadorPrimi = 0
if(contadorPrimi2%%stepTriage == 0){
contadorPrimi = 1
}
}
}
}
for(i in 1:(length(sensibilidadPrubeas)/3)){
if(primaryTest != "None" && primaryTest == sensibilidadPrubeas[i,1]){
sensibilidadPrimary = as.numeric(sensibilidadPrubeas[i,2])
costoPrimary = as.numeric(sensibilidadPrubeas[i,3])
}
if(triageTest != "None" && triageTest == sensibilidadPrubeas[i,1]){
sensibilidadTriage = as.numeric(sensibilidadPrubeas[i,2])
costoTriage = as.numeric(sensibilidadPrubeas[i,3])
}
if(followUp != "None" && followUp == sensibilidadPrubeas[i,1]){
sensibilidadFollowUp = as.numeric(sensibilidadPrubeas[i,2])
costoFollowUp = as.numeric(sensibilidadPrubeas[i,3])
folowUpHay = 0
}
}
#---Generando vector de diferenciales--------
dH<-rep(0,times=ncol(parametros))
dvH<-rep(0,times=edades)
dvM<-rep(0,times=edades)
dvPD<-rep(0,times=edades*nrow(vectorLP))
dCostoVacunacion = 0
dCostoPrimary = 0
dCostoTriage = 0
dCostoFollowUp = 0
dEfectividad = 0
dIncidencia= rep(0, times = nrow(vectorIncidence))
vectorNoProgressNoSusc <- seq(1,transM, by = 1)[-c(as.numeric(vectorLP[,1]))]
vectorNoProgressNoSusc <- vectorNoProgressNoSusc[-c(as.numeric(vectorS[,1]))]
vectorProgress <-seq(1,transM, by = 1)[c(as.numeric(vectorLP[,1]))]
vectorSusc <- seq(1,transM, by = 1)[c(as.numeric(vectorS[,1]))]
#-- Escribiendo ecuaciones diferenciales-------------
poblacion <- function(t, y, parameters) {
with(as.list(y), {
#Matrices de probabilidad
auxiliarCostoVacuna = 0
auxiliarCostoPrimary = 0
auxiliarCostoTriage = 0
auxiliarCostoFollowUp = 0
auxiliarEfectividad = 0
auxiliarIncidence = rep(0, times = nrow(vectorIncidence))
mujeresTotales <- sum(y[((edades*compart/2)+1):(edades*2*transM)])+ sum(y[(transM*2*edades+edades+1):(transM*2*edades+edades*2)]) + sum(y[(compart*edades+edades*2+1):(compart*edades+edades*2+nrow(vectorLP)*edades)])
hombresTotales <- sum(y[1:(edades*compart/2)]) + sum(y[(transM*2*edades+1):(transM*2*edades+edades)])
mujeresTotalesInfectadas <- 0
hombresTotalesInfectados <- 0
for(k in as.numeric(vectorILPC[,1])){
for(l in ((edades*compart/2)+((k-1)*edades)+1):((edades*compart/2)+((k-1)*edades)+edades)){
mujeresTotalesInfectadas = mujeresTotalesInfectadas + y[l]
}
}
mujeresTotalesInfectadas = mujeresTotalesInfectadas + sum(y[(compart*edades+edades*2+1):(compart*edades+edades*2+nrow(vectorLP)*edades)])
for(k in as.numeric(vectorILPC[,1])){
for(l in (((k-1)*edades)+1):(((k-1)*edades)+edades)){
hombresTotalesInfectados = hombresTotalesInfectados + y[l]
}
}
newTransH <- transicionesH
contadorTrans1 <- 0
contadorTransi <- 0
#Matriz de Hombres:
for(i in as.numeric(vectorSPI[,1])){
contadorTransi = contadorTransi + 1
seqT <- seq(((as.numeric(parejasSexualesH[1,1])-1)*(transM+1))+1+i, 1+(transM+1)*edades ,by=transM+1)
contadorTrans1 = 0
for(j in seqT){
contadorTrans1 = contadorTrans1 +1
newTransH[j,as.numeric(vectorI[,1])]= 1-exp(-(mujeresTotalesInfectadas/mujeresTotales)*as.numeric(parejasSexualesH[contadorTrans1,2])*as.numeric(vectorSPI[contadorTransi,3])*1)
newTransH[j,i]= 1-(1-exp(-(mujeresTotalesInfectadas/mujeresTotales)*as.numeric(parejasSexualesH[contadorTrans1,2])*as.numeric(vectorSPI[contadorTransi,3])*1))
}
}
newTransM <- transicionesM
contadorTrans1 <- 0
contadorTransi <- 0
#Matriz de Mujeres:
for(i in as.numeric(vectorSPI[,1])){
contadorTransi = contadorTransi + 1
seqT <- seq(((as.numeric(parejasSexualesM[1,1])-1)*(transM+1))+1+i, 1+(transM+1)*edades ,by=transM+1)
contadorTrans1 = 0
for(j in seqT){
contadorTrans1 = contadorTrans1 +1
newTransM[j,as.numeric(vectorI[,1])]= 1-exp(-(hombresTotalesInfectados/hombresTotales)*as.numeric(parejasSexualesM[contadorTrans1,2])*as.numeric(vectorSPI[contadorTransi,4])*te)
newTransM[j,i]= 1-(1-exp(-(hombresTotalesInfectados/hombresTotales)*as.numeric(parejasSexualesM[contadorTrans1,2])*as.numeric(vectorSPI[contadorTransi,4])*te))
}
}
#contador para fertilidad
contador3 = 0
#---------Generando nacimientos------------
for(l in seq((edades*compart/2),(edades*(compart-1)),by=edades)){
for(k in nacDesde:nacHasta){
contador3 = contador3 + 1
nacimientosTotales = nacimientosTotales + y[l+k]*tasaNacimientosTotales[t,contador3]
}
}
#Generando nacimientos para las vacunadas
contador3 = 0
for(w in nacDesde:nacHasta){
contador3 = contador3 + 1
nacimientosTotales = nacimientosTotales + y[transM*2*edades+edades+w]*tasaNacimientosTotales[t,contador3]
}
#Generando nacimiento para las detectadas
contador3 = 0
conti = 0
for(l in 1:nrow(vectorLP)){
conti = conti +1
for(w in nacDesde:nacHasta){
contador3 = contador3 + 1
nacimientosTotales = nacimientosTotales + y[transM*2*edades+edades*2+(conti-1)*edades + w]*tasaNacimientosTotales[t,contador3]
}
}
#contador lleva la cuenta de cuál compartimiento biológico es
contador=0
#------------------Ecuaciones Humano-------------------------------------------------------------
# el primer for recorre cada número de edad (ej edad=3 entonces 0,3,6,9)
for(j in seq(0,edades*(compart-1),by=edades)){
contador = contador +1
#---Generando ecuaciones diferenciales por estad�o biológico
if(any(contador == as.numeric(vectorS[,1]))){
dH[1+j] = (1-probMujer)*(nacimientosTotales)-((tasaMuerteTotales[t,1+j]+muerteBiolo[contador,1])*y[1+j])-vacHombres[t,1]*y[1+j] -te*(y[1+j]-min(1,(tasaMuerteTotales[t,1+j]+muerteBiolo[contador,1]+vacHombres[t,1]))*y[1+j])
dvH[1] = vacHombres[t,1]*y[1+j] - tasaMuerteTotales[t,1+j]*y[transM*2*edades+1]- te*(y[transM*2*edades+1]-(tasaMuerteTotales[t,1+j]*y[transM*2*edades+1]))
auxiliarCostoVacuna = auxiliarCostoVacuna + vacHombres[t,1]*y[1+j]
}else if(any(contador == transM + as.numeric(vectorS[,1]))){
dH[1+j] = (probMujer)*(nacimientosTotales)-((tasaMuerteTotales[t,1+j]+muerteBiolo[contador-transM,2])*y[1+j]) -vacMujeres[t,1]*y[1+j] -te*(y[1+j]-min(1,(tasaMuerteTotales[t,1+j]+muerteBiolo[contador-transM,2]+vacMujeres[t,1]))*y[1+j])
dvM[1] = vacMujeres[t,1]*y[1+j] - tasaMuerteTotales[t,1+j]*y[transM*2*edades+edades+1]- te*(y[transM*2*edades+edades+1]-(tasaMuerteTotales[t,1+j]*y[transM*2*edades+edades+1]))
auxiliarCostoVacuna = auxiliarCostoVacuna + vacMujeres[t,1]*y[1+j]
}else{
dH[1+j]=0
}
#----Ecuaciones generales rangos medios de edad
if(edades >2){
if(contador <= compart/2){
for (i in 2:(edades-1)){
#secuencias para las transiciones
seq1 <- seq(((i-2)*(transH))+i, ((i-2)*(transH))+i+transH-1,by=1)
seqState <- seq(0+i-1,edades*(compart/2),by=edades)
if(any(contador == as.numeric(vectorS[,1]))){
dH[i+j] = te*(sum(as.numeric(newTransH[seq1,contador])*(y[seqState]*(1-tasaMuerteTotales[t,seqState]-muerteBiolo[contador,1]-vacHombres[t,i-1])))) -vacHombres[t,i]*y[i+j] -(tasaMuerteTotales[t,i+j]+muerteBiolo[contador,1])*y[i+j] -te*(y[i+j]-(tasaMuerteTotales[t,i+j]+muerteBiolo[contador,1]+vacHombres[t,i])*y[i+j])
dvH[i] = te*(y[transM*2*edades+i-1]-(tasaMuerteTotales[t,i-1]*y[transM*2*edades+i-1])) +vacHombres[t,i]*y[i+j] -tasaMuerteTotales[t,i]*y[transM*2*edades+i]- te*(y[transM*2*edades+i]-(tasaMuerteTotales[t,i]*y[transM*2*edades+i]))
auxiliarCostoVacuna = auxiliarCostoVacuna + vacHombres[t,i]*y[i+j]
}else{
dH[i+j] = te*(sum(as.numeric(newTransH[seq1,contador])*(y[seqState]*(1-tasaMuerteTotales[t,seqState]-muerteBiolo[contador,1])))) -(tasaMuerteTotales[t,i+j]+muerteBiolo[contador,1])*y[i+j] -te*(y[i+j]-min(1,(tasaMuerteTotales[t,i+j]+muerteBiolo[contador,1]))*y[i+j])
}
}
}else{ #acá empiezan las mujeres
for (i in 2:(edades-1)){
#secuencias para las transiciones
seq1 <- seq(((i-2)*(transM))+i, ((i-2)*(transM))+i+transH-1,by=1)
seqState <- seq(edades*(compart/2)+i-1,edades*(compart),by=edades)
seqDetected <- seq(edades*compart+edades*2+ i-1, edades*compart+edades*2+edades*(nrow(vectorLP)-1) +i-1,by=edades)
if(any(contador == transM + as.numeric(vectorS[,1]))){
dH[i+j] = te*(sum(as.numeric(newTransM[seq1[vectorSusc],contador-compart/2])*(y[seqState[vectorSusc]]*(1-tasaMuerteTotales[t,seqState[vectorSusc]]-muerteBiolo[contador-transM,2]-vacMujeres[t,i-1]))))-vacMujeres[t,i]*y[i+j] + te*(sum(as.numeric(newTransM[seq1[vectorNoProgressNoSusc],contador-compart/2])*(y[seqState[vectorNoProgressNoSusc]]*(1-tasaMuerteTotales[t,seqState[vectorNoProgressNoSusc]]-muerteBiolo[contador-transM,2]))))-(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2])*y[i+j] -te*(y[i+j]-(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2]+vacMujeres[t,i])*y[i+j])
#Vacunas
dvM[i] = te*(y[transM*2*edades+edades+i-1]-tasaMuerteTotales[t,i-1]*y[transM*2*edades+edades+i-1]) + vacMujeres[t,i]*y[i+j] - tasaMuerteTotales[t,i]*y[transM*2*edades+edades+i]-te*(y[transM*2*edades+edades+i]-(tasaMuerteTotales[t,i]*y[transM*2*edades+edades+i]))
auxiliarCostoVacuna = auxiliarCostoVacuna + vacMujeres[t,i]*y[i+j]
}else if(any(contador == transM + as.numeric(vectorPI[,1]))){
dH[i+j] = te*(sum(as.numeric(newTransM[seq1[vectorNoProgressNoSusc],contador-compart/2])*(y[seqState[vectorNoProgressNoSusc]]*(1-tasaMuerteTotales[t,seqState[vectorNoProgressNoSusc]]-muerteBiolo[contador-transM,2])))) -(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2])*y[i+j] -te*(y[i+j]-(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2])*y[i+j])+
te*xij[i-1]*as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary*(1-triageij[i-1])*sum(y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))+
te*xij[i-1]*as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary*(triageij[i-1]*sensibilidadTriage*triageij[i-1])*sum(y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))+
te*(1-xij[i-1])*sum(as.numeric(newTransM[seq1[vectorProgress],contador-compart/2])*(y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2])))+
te*(xij[i-1])*(1-as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary)*sum(as.numeric(newTransM[seq1[vectorProgress],contador-compart/2])*(y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2])))+
te*(xij[i-1]*as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary*xij[i-1]*triageij[i-1]*(1-triageij[i-1]*sensibilidadTriage))*sum(as.numeric(newTransM[seq1[vectorProgress],contador-compart/2])*(y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2])))+
((timeFollowUp/12))*sensibilidadFollowUp*folowUpHay*sum(y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))+
((timeFollowUp/12))*folowUpHay*(1-sensibilidadFollowUp*folowUpHay)*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(sum(as.numeric(newTransM[seq1[vectorSusc],contador-compart/2])*(y[seqState[vectorSusc]]*(1-tasaMuerteTotales[t,seqState[vectorSusc]]-muerteBiolo[contador-transM,2]-vacMujeres[t,i-1]))))+(1-((timeFollowUp/12)))*folowUpHay*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))
}else if(any(contador == transM + as.numeric(vectorI[,1]))){
dH[i+j] = te*(sum(as.numeric(newTransM[seq1[vectorNoProgressNoSusc],contador-compart/2])*(y[seqState[vectorNoProgressNoSusc]]*(1-tasaMuerteTotales[t,seqState[vectorNoProgressNoSusc]]-muerteBiolo[contador-transM,2])))) -(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2])*y[i+j] -te*(y[i+j]-(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2])*y[i+j])+
te*(1-xij[i-1])*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(xij[i-1]*(1-as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(xij[i-1]*as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary*(triageij[i-1]*(1-sensibilidadTriage)))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
(((timeFollowUp/12))*folowUpHay*(1-sensibilidadFollowUp*folowUpHay))*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
((1-(timeFollowUp/12))*folowUpHay)*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(sum(as.numeric(newTransM[seq1[vectorSusc],contador-compart/2])*(y[seqState[vectorSusc]]*(1-tasaMuerteTotales[t,seqState[vectorSusc]]-muerteBiolo[contador-transM,2]-vacMujeres[t,i-1]))))+((1-((timeFollowUp/12)))*folowUpHay)*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))
}else if(any(contador == transM + as.numeric(vectorLP[,1]))){
vectorProgressc <-seq(1,transM, by = 1)[c(as.numeric(vectorC[,1]))]
dH[i+j] = te*(sum(as.numeric(newTransM[seq1[vectorNoProgressNoSusc],contador-compart/2])*(y[seqState[vectorNoProgressNoSusc]]*(1-tasaMuerteTotales[t,seqState[vectorNoProgressNoSusc]]-muerteBiolo[contador-transM,2])))) -(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2])*y[i+j] -te*(y[i+j]-min(1,(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2]))*y[i+j])+
te*(1-xij[i-1])*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(xij[i-1]*(1-as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(xij[i-1]*(as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary)*(triageij[i-1])*(1-sensibilidadTriage)*(1-folowUpHay))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
(((timeFollowUp/12))*folowUpHay*(1-folowUpHay*sensibilidadFollowUp))*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(sum(as.numeric(newTransM[seq1[vectorSusc],contador-compart/2])*(y[seqState[vectorSusc]]*(1-tasaMuerteTotales[t,seqState[vectorSusc]]-muerteBiolo[contador-transM,2]-vacMujeres[t,i-1]))))
auxiliarCostoPrimary = auxiliarCostoPrimary + xij[i]*as.numeric(coberturaTamizaje[i,2])*y[i+j]
auxiliarCostoTriage = auxiliarCostoPrimary + xij[i]*as.numeric(coberturaTamizaje[i,2])*sensibilidadPrimary*triageij[i]*y[i+j]
auxiliarCostoFollowUp = auxiliarCostoFollowUp + xij[i]*as.numeric(coberturaTamizaje[i,2])*sensibilidadPrimary*triageij[i]*sensibilidadTriage*y[i+j]
auxiliarEfectividad = auxiliarEfectividad + xij[i]*as.numeric(coberturaTamizaje[i,2])*sensibilidadPrimary*y[i+j]*as.numeric(coberturaTamizaje[i,4])
auxiliarIncidence[contador-transM - nrow(vectorIncidence)] = auxiliarIncidence[contador-transM - nrow(vectorIncidence)] + te*(sum(as.numeric(newTransM[seq1[vectorNoProgressNoSusc],contador-compart/2])*(y[seqState[vectorNoProgressNoSusc]]*(1-tasaMuerteTotales[t,seqState[vectorNoProgressNoSusc]]-muerteBiolo[contador-transM,2])))) +
te*(1-xij[i-1])*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2])) +
te*(xij[i-1]*(1-as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(xij[i-1]*(as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary)*(triageij[i-1])*(1-sensibilidadTriage)*(1-folowUpHay))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
(((timeFollowUp/12))*folowUpHay*(1-folowUpHay*sensibilidadFollowUp))*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(sum(as.numeric(newTransM[seq1[vectorSusc],contador-compart/2])*(y[seqState[vectorSusc]]*(1-tasaMuerteTotales[t,seqState[vectorSusc]]-muerteBiolo[contador-transM,2]-vacMujeres[t,i-1]))))
}else if(any(contador == transM + as.numeric(vectorC[,1]))){
vectorProgressc <-seq(1,transM, by = 1)[c(as.numeric(vectorC[,1]))]
dH[i+j] = te*(sum(as.numeric(newTransM[seq1[vectorProgressc],contador-compart/2])*(y[seqState[vectorProgressc]]*(1-tasaMuerteTotales[t,seqState[vectorProgressc]]-muerteBiolo[contador-transM,2])))) -(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2])*y[i+j] -te*(y[i+j]-min(1,(tasaMuerteTotales[t,i+j]+muerteBiolo[contador-transM,2]))*y[i+j])+
te*(xij[i-1]*(1-as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(1-xij[i-1])*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(xij[i-1]*as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary*triageij[i-1]*(1-sensibilidadTriage))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))
te*(((timeFollowUp/12))*folowUpHay*(1-folowUpHay*sensibilidadFollowUp))*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(sum(as.numeric(newTransM[seq1[vectorSusc],contador-compart/2])*(y[seqState[vectorSusc]]*(1-tasaMuerteTotales[t,seqState[vectorSusc]]-muerteBiolo[contador-transM,2]-vacMujeres[t,i-1])))+ te*((1-(timeFollowUp/12))*folowUpHay)*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2])))
auxiliarIncidence[contador-transM - nrow(vectorIncidence)] = auxiliarIncidence[contador-transM - nrow(vectorIncidence)] + te*(sum(as.numeric(newTransM[seq1[vectorProgressc],contador-compart/2])*(y[seqState[vectorProgressc]]*(1-tasaMuerteTotales[t,seqState[vectorProgressc]]-muerteBiolo[contador-transM,2]))))+
te*(xij[i-1]*(1-as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(1-xij[i-1])*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))+
te*(xij[i-1]*as.numeric(coberturaTamizaje[i-1,2])*sensibilidadPrimary*triageij[i-1]*(1-sensibilidadTriage))*sum((y[seqState[vectorProgress]]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))
te*(((timeFollowUp/12))*(1-folowUpHay*sensibilidadFollowUp))*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))
#te*(1-((input$timeFollowUp/12))*folowUpHay)*sum((y[seqDetected]*(1-tasaMuerteTotales[t,seqState[vectorProgress]]-muerteBiolo[contador-transM,2]))*as.numeric(newTransM[seq1[vectorProgress],contador-compart/2]))
}
}
}
}
i = i+1
#Ecuación último rango de edad
if (contador <= compart/2){
#secuencias para las transiciones
seq1 <- seq(((i-2)*(transH))+i, ((i-2)*(transH))+i+transH-1,by=1)
seqState <- seq(0+i-1,edades*(compart/2),by=edades)
i = i+1
seq2 <- seq(((i-2)*(transH))+i, ((i-2)*(transH))+i+transH-1,by=1)
seqState2 <- seq(0+i-1,edades*(compart/2),by=edades)
#Ecuaciones
if(any(contador == as.numeric(vectorS[,1]))){
dH[edades+j] = te*(sum(as.numeric(newTransH[seq1,contador])*(y[seqState]*(1-tasaMuerteTotales[t,seqState]-muerteBiolo[contador,1]-vacHombres[t,i-1]))))+
te*(sum(as.numeric(newTransH[seq2,contador])*(y[seqState2]*(1-tasaMuerteTotales[t,seqState2]-muerteBiolo[contador,1])))) -
(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador,2])*y[edades+j]-
te*(y[edades+j]-min(1,(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador,1]+vacHombres[t,edades]))*y[edades+j])-vacHombres[t,edades]*y[edades+j]
dvH[edades] = te*(y[transM*2*edades+edades-1]-(tasaMuerteTotales[t,edades-1]*y[transM*2*edades+edades-1]))+ vacHombres[t,edades]*y[edades+j] - tasaMuerteTotales[t,edades]*y[transM*2*edades+edades]
auxiliarCostoVacuna = auxiliarCostoVacuna + vacHombres[t,edades]*y[edades+j]
}else{
dH[edades+j] = te*(sum(as.numeric(newTransH[seq1,contador])*(y[seqState]*(1-tasaMuerteTotales[t,seqState]-muerteBiolo[contador,1]))))+
te*(sum(as.numeric(newTransH[seq2,contador])*(y[seqState2]*(1-tasaMuerteTotales[t,seqState2]-muerteBiolo[contador,1])))) -
(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador,2])*y[edades+j]-
te*(y[edades+j]-min(1,(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador,1]))*y[edades+j])
}
} else{
#secuencias para las transiciones
seq1 <- seq(((i-2)*(transM))+i, ((i-2)*(transM))+i+transH-1,by=1)
seqState <- seq(edades*(compart/2)+i-1,edades*(compart),by=edades)
i = i+1
seq2 <- seq(((i-2)*(transM))+i, ((i-2)*(transM))+i+transH-1,by=1)
seqState2 <- seq(edades*(compart/2)+i-1,edades*(compart),by=edades)
seqDetected <- seq(edades*compart+edades*2+ edades-1, edades*compart+edades*2+edades*(nrow(vectorLP)-1) +edades-1,by=edades)
if(any(contador == transM + as.numeric(vectorS[,1]))){
dH[edades+j] = -vacMujeres[t,edades]*y[edades+j] +te*(sum(as.numeric(newTransM[seq1,contador-compart/2])*(y[seqState]*(1-tasaMuerteTotales[t,seqState]-muerteBiolo[contador-compart/2,2] -vacMujeres[t,i-1]))))+
te*(sum(as.numeric(newTransM[seq2,contador-compart/2])*(y[seqState2]*(1-tasaMuerteTotales[t,seqState2]-muerteBiolo[contador-compart/2,2])))) -
(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador-transM,2])*y[edades+j]-
te*(y[edades+j]-min(1,(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador-compart/2,1]+vacMujeres[t,edades]))*y[edades+j])
dvM[edades] = te*(y[transM*2*edades+edades+edades-1]-tasaMuerteTotales[t,edades-1]*y[transM*2*edades+edades+edades-1]) + vacMujeres[t,edades]*y[edades+j] - tasaMuerteTotales[t,edades]*y[transM*2*edades+edades+edades]-te*(y[transM*2*edades+edades+edades]-(tasaMuerteTotales[t,edades])*y[transM*2*edades+edades+edades])
auxiliarCostoVacuna = auxiliarCostoVacuna + vacMujeres[t,edades]*y[edades+j]
}else{
dH[edades+j] = te*(sum(as.numeric(newTransM[seq1[vectorNoProgressNoSusc],contador-compart/2])*(y[seqState[vectorNoProgressNoSusc]]*(1-tasaMuerteTotales[t,seqState[vectorNoProgressNoSusc]]-muerteBiolo[contador-compart/2,2]))))+
te*(sum(as.numeric(newTransM[seq2,contador-compart/2])*(y[seqState2]*(1-tasaMuerteTotales[t,seqState2]-muerteBiolo[contador-compart/2,2])))) -
(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador-transM,2])*y[edades+j]-
te*(y[edades+j]-min(1,(tasaMuerteTotales[t,edades+j]+muerteBiolo[contador-compart/2,1]))*y[edades+j])+
te*(sum(as.numeric(newTransM[seq1[vectorSusc],contador-compart/2])*(y[seqState[vectorSusc]]*(1-tasaMuerteTotales[t,seqState[vectorSusc]]-muerteBiolo[contador-transM,2]-vacMujeres[t,edades-1]))))
}
}
}# cierra el ciclo en los j
dCostoVacunacion = auxiliarCostoVacuna
dCostoPrimary = auxiliarCostoPrimary
dCostoTriage = auxiliarCostoTriage
dCostoFollowUp = auxiliarCostoFollowUp
dEfectividad = auxiliarEfectividad
for(i in 1:nrow(vectorIncidence)){
dIncidencia[i] = auxiliarIncidence[i]
}
ayudaLP = 1
for(j in 1:nrow(vectorLP)){
dvPD[1+edades*(j-1)] = 0
for(i in 2:edades){
seqDetected <- seq(edades*compart+edades*2+ i-1, edades*compart+edades*2+edades*(nrow(vectorLP)-1) +i-1,by=edades)
seqState <- seq(edades*(compart/2)+i-1,edades*(compart),by=edades)
seq1 <- seq(((i-2)*(transM))+i, ((i-2)*(transM))+i+transH-1,by=1)
dvPD[i+edades*(j-1)] = te*(((timeFollowUp/12)))*sum((y[seqDetected])*as.numeric(newTransM[seq1[vectorProgress],as.numeric(vectorLP[,1])]))+
(te*(1-(tasaMuerteTotales[t,i+(as.numeric(vectorLP[j,1])-1)*edades]+muerteBiolo[as.numeric(vectorLP[j,1]),2]))*sum((y[seqState[vectorProgress]])*as.numeric(newTransM[seq1[vectorProgress],as.numeric(vectorLP[j,1])])))*(xij[i-1]*as.numeric(coberturaTamizaje[edades-1,2])*sensibilidadPrimary*triageij[i-1]*(1-sensibilidadTriage)*folowUpHay)-
te*(((timeFollowUp/12))*folowUpHay)*y[compart*edades+edades*2+i+edades*(j-1)]*(1-(tasaMuerteTotales[t,i+as.numeric(vectorLP[j,1])*edades]+muerteBiolo[as.numeric(vectorLP[j,1]),2]))-
te*y[compart*edades+edades*2+i+edades*(j-1)]*((tasaMuerteTotales[t,i+(as.numeric(vectorLP[j,1])-1)*edades]+muerteBiolo[as.numeric(vectorLP[j,1]),2]))+te*((1-((timeFollowUp/12)))*folowUpHay)*y[compart*edades+edades*2+i+edades*(j-1)]*(1-(tasaMuerteTotales[t,i+(as.numeric(vectorLP[ayudaLP,1])-1)*edades]+muerteBiolo[as.numeric(vectorLP[ayudaLP,1]),2]))
}
}
setWinProgressBar(pb, t,label=paste(round(t/50*100, 0), "% simulation that has been runed" ))
list(c(dH,dvH,dvM, dvPD,dCostoVacunacion,dCostoPrimary,dCostoTriage,dCostoFollowUp,dEfectividad,dIncidencia))
})
}
parameters <- c(te ,edades)
stateInicial<- c(Humano,vacunadosHombres,vacunadosMujeres,progresionesDetectadas,costoVacunacion,costoPrimaryIni,costoTriageIni,costoFollowUpIni,efectividadIni,incidenciaIni)
times <- seq(1, 50, by = 1)
pb <- winProgressBar(title = "Progress Bar for Simulation", label = " ",min = 0, max = a, width = 300)
out <- deSolve::ode(y = stateInicial, times = times, func = poblacion, parms = parameters)
close(pb)
#--------------Sacando Medidas de Desempeño----------------------------------
#matriz donde guarda la solución
bioloGeneral = as.matrix(read.xlsx2("ParametrosGenerales.xlsx",5,dec =",", as.data.frame = TRUE))
nombresBiolo <- bioloGeneral[,1]
nombresFile <- vector(length = edades*length(nombresBiolo)*2)
nombresFileVacunasH <- vector(length=edades)
nombresFileVacunasM <- vector(length=edades)
nombresFileIncidence <- vector(length = nrow(vectorIncidence))
seqNombre <- seq(1,edades*length(nombresBiolo)*2,edades)
seqEdad <- seq(1,edades,1)
ayuda1 <- 0
contador <- 1
contador2 <- 1
for(i in 1:length(nombresFile)){
if(contador == (edades+1)){
contador = 1
contador2 = contador2 +1
}
ayuda1 = ayuda1 +1
if(i <= (length(nombresFile)/2)){
nombresFile[ayuda1] = paste0("M_",nombresBiolo[contador2],"_", contador)
}else{
nombresFile[ayuda1] = paste0("F_",nombresBiolo[contador2-transM],"_",contador)
}
contador = contador +1
}
for(i in 1:edades){
nombresFileVacunasH[i] = paste0("M_Vaccines_",i)
}
for(i in 1:edades){
nombresFileVacunasM[i] = paste0("F_Vaccines_",i)
}
for(i in 1:nrow(vectorIncidence)){
nombresFileIncidence[i] = i
}
dat <- matrix(rep(0),nrow=50,ncol=(ncol(parametros)+edades*2 + 1 + 3 + 1 + nrow(vectorIncidence)))
colnames(dat) <- c(nombresFile,nombresFileVacunasH,nombresFileVacunasM,"Vaccination Cost", "Primary Test Cost", "Triage Test Cost", "Follow Up Test Cost", "Efetivness",nombresFileIncidence)
#Por edad
for(i in 1:(ncol(parametros)+edades*2)){
dat[,i]=out[,i+1]
}
#Agregando los Detectados a los Progress
contadorPL = 0
for(j in 1:nrow(vectorLP)){
contadorPL = contadorPL + 1
for (i in 1:edades){
dat[,edades*compart/2+edades*(as.numeric(vectorLP[j,1])-1) + i] = dat[,edades*compart/2+edades*(as.numeric(vectorLP[j,1])-1) + i] + out[,1+edades*compart+edades*2+edades*(contadorPL-1) + i]
}
}
#Arreglando los datos de los costos y la efetividad
dat[1,ncol(parametros)+edades*2 + 1] = out[1,edades*(compart+2+nrow(vectorLP))+2]*costoVacuna
dat[1,ncol(parametros)+edades*2 + 2] = out[1,edades*(compart+2+nrow(vectorLP))+3]*costoPrimary
dat[1,ncol(parametros)+edades*2 + 3] = out[1,edades*(compart+2+nrow(vectorLP))+4]*costoTriage
dat[1,ncol(parametros)+edades*2 + 4] = out[1,edades*(compart+2+nrow(vectorLP))+5]*costoFollowUp
dat[1,ncol(parametros)+edades*2 + 5] = out[1,edades*(compart+2+nrow(vectorLP))+6]
for(i in 2:50){
dat[i,ncol(parametros)+edades*2 + 1] = (out[i,edades*(compart+2+nrow(vectorLP))+2]*costoVacuna - sum(dat[1:(i-1),edades*(compart+2)+1]))
dat[i,ncol(parametros)+edades*2 + 2] = (out[i,edades*(compart+2+nrow(vectorLP))+3]*costoPrimary - sum(dat[1:(i-1),edades*(compart+2)+2]))
dat[i,ncol(parametros)+edades*2 + 3] = (out[i,edades*(compart+2+nrow(vectorLP))+4]*costoTriage - sum(dat[1:(i-1),edades*(compart+2)+3]))
dat[i,ncol(parametros)+edades*2 + 4] = (out[i,edades*(compart+2+nrow(vectorLP))+5]*costoFollowUp - sum(dat[1:(i-1),edades*(compart+2)+4]))
dat[i,ncol(parametros)+edades*2 + 5] = (out[i,edades*(compart+2+nrow(vectorLP))+6] - sum(dat[1:(i-1),edades*(compart+2)+5]))
}
#Agregando la incidencia
for(i in 1:nrow(vectorIncidence)){
dat[1,ncol(parametros)+edades*2 + 5+ i] = out[1,edades*(compart+2+nrow(vectorLP))+6 + i]
}
for(i in 1:nrow(vectorIncidence)){
for(j in 2:50){
dat[j,ncol(parametros)+edades*2 + 5+ i] = (out[j,edades*(compart+2+nrow(vectorLP))+6+i] - sum(dat[1:(j-1),edades*(compart+2)+5+i]))
}
}
return(dat)
}
# data(vacRangeFemaleIni,vacRangeFemaleEnd,vacRangeMaleIni,vacRangeMaleEnd, vacYearsFemaleIni,vacYearsFemaleEnd, vacYearsMaleIni,vacYearsMaleEnd, vacSex, vacPorcentageFemale, vacPorcentageMale, primaryTest, iniPrimaryTest, maxPrimaryTest, stepPrimaryTest, triageTest, iniTriage, maxTriage, stepTriage,followUp, timeFollowUp)
dat<- data(15,30,15,30,5,10,5,10,c("Hombres","Mujeres"),0.15,0.15,"HPV-DNA", 25,65,5,"Citology",25,65,5,"Citology",6)
#----------------------------Gráficas-----------------------------------------------------
buildPlot <- function(sexoUsuario, biologicalCompUsuario, edadUsuario, num){
#Gráfica que muestra el total de la población para un tipo espec�fico de compartimiento
#T�tulo de la gráfica
sexoIndice <- 0
#entra al primer if si no está seleccionado el sum
if(length(sexoUsuario)==2 && ((sexoUsuario[1]=="Mujeres"||sexoUsuario[1]=="Hombres")&&(sexoUsuario[2]=="Mujeres"||sexoUsuario[2]=="Hombres"))){
titulo <- paste0("Number of Males and Females")
sexoIndice <- 3
}else if((length(sexoUsuario)==1 && sexoUsuario[1]=="Hombres") || (length(sexoUsuario)==2 && sexoUsuario[1]=="Hombres" && sexoUsuario[2]=="sumSexo")){
titulo <- paste0("Number of Males")
sexoIndice <- 2
}else if(length(sexoUsuario) == 3){ #entra a este else if si están los tres
titulo <- paste0("Total number of Males and Females")
sexoIndice <- 4
}else if((length(sexoUsuario)==1 && sexoUsuario[1]=="Mujeres") || (length(sexoUsuario)==2 && (sexoUsuario[1]=="Mujeres" && sexoUsuario[2]=="sumSexo"))){
titulo <- paste0("Number of Females")
sexoIndice <- 1
}else if(length(sexoUsuario)==2 && sexoUsuario[2]=="Hombres" && sexoUsuario[1]=="sumSexo"){
titulo <- paste0("Number of Males")
sexoIndice <- 2
}
#Leer el input de biologicalCompUsuario
nombresBiolo <- (bioloGeneral[,1])
nombresBiolo[transM+1] <- "Vaccinated"
nombresBiolo[transM+2] <- "Sum"
inputBiolo <- vector()
filasBiolo <- 0
for(j in 1:length(biologicalCompUsuario)){
for(i in 1:(transM+2)){
if(biologicalCompUsuario[j] == nombresBiolo[i]){
filasBiolo= filasBiolo + 1
inputBiolo[filasBiolo] = i
}
}
}
ap <- setNames(data.frame(matrix(ncol = 4, nrow = 0)), c("year","value", "biologicalCompartment","gender"))
ayuda <-0
#Sacando el rango de edad
rangoEdad <- seq(edadUsuario[1],edadUsuario[2],by = 1)
#¿Sum en el biological compartment?
sumBiolo <- FALSE
hastaBio <- length(biologicalCompUsuario)
bioloCompSum <- inputBiolo
for(m in 1:length(bioloCompSum)){
if(bioloCompSum[m]==(transM + 2)){
sumBiolo <- TRUE
hastaBio <- length(biologicalCompUsuario)-1
}
}
#Sacar los datos que necesito del resultado de las ecuaciones diferenciales
for(k in 1:hastaBio) {
totalTipo = seq(1,num,by=1)
totalTipo1 = seq(1,num,by=1)
for(i in 1:num){
if(sexoIndice==2){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades]
}
}
}else if(sexoIndice==1){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,(compart*edades/2)+j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades+edades]
}
}
}else if(sexoIndice==4){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,(compart*edades/2)+j+edades*((inputBiolo[k])-1)]+
dat[i,j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades+edades] + dat[i,j+2*transM*edades]
}
}
}else if (sexoIndice ==3){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo1[i] = totalTipo1[i] + dat[i,(compart*edades/2)+j+edades*((inputBiolo[k])-1)]
}
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo1[i] = totalTipo1[i] + dat[i,j+2*transM*edades+edades]
}
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades]
}
}
}
totalTipo[i] = round(totalTipo[i]-i,digits = 2)
totalTipo1[i] = round(totalTipo1[i]-i, digits = 2)
}
ayuda <- nrow(ap)
nombre <- nombresBiolo[inputBiolo[k]]
#Guardando los valores en el data frame para graficar
if(sexoIndice==3){
for(i in 1:num){
ap[i+ayuda,1] = i
ap[i+ayuda,2] = totalTipo[i]
ap[i+ayuda,3] = nombre
ap[i+ayuda,4] = "Men"
}
ayuda <- nrow(ap)
for(i in 1:num){
ap[i+ayuda,1] = i
ap[i+ayuda,2] = totalTipo1[i]
ap[i+ayuda,3] = nombre
ap[i+ayuda,4] = "Women"
}
}else{
for(i in 1:
num){
ap[i+ayuda,1] = i
ap[i+ayuda,2] = totalTipo[i]
ap[i+ayuda,3] = nombre
ap[i+ayuda,4] = ""
}
}
}
if(sumBiolo==FALSE){
if(sexoIndice != 3){
ggideal_point <- ggplot(ap, aes(x=year,y=value/1000000,colour=biologicalCompartment))
}else{
ggideal_point <- ggplot(ap, aes(x=year,y=value/1000000,group=interaction(biologicalCompartment,gender),colour=biologicalCompartment,linetype=factor(gender)))
}
}else if (sumBiolo == TRUE){
if(sexoIndice != 3){
ap2 <- setNames(data.frame(matrix(ncol = 2, nrow = num)), c("year","value"))
ap2[,2]<-0
for(l in 1:nrow(ap)){
ap2[ap[l,1],1]=ap[l,1]
ap2[ap[l,1],2]=ap2[ap[l,1],2]+ap[l,2]
}
ggideal_point <- ggplot(ap2, aes(x=year,y=value/1000000, colour="#C9A086"))
}else{
ap2 <- setNames(data.frame(matrix(ncol = 3, nrow = 2*num)), c("year","value","gender"))
ap2[,2]<-0
for(l in 1:nrow(ap)){
if(ap[l,4]=="Men"){
ap2[ap[l,1],1]=ap[l,1]
ap2[ap[l,1],2]=ap2[ap[l,1],2]+ap[l,2]
ap2[ap[l,1],3]="Men"
}else if(ap[l,4]=="Women"){
ap2[ap[l,1]+num,1]=ap[l,1]
ap2[ap[l,1]+num,2]=ap2[ap[l,1]+num,2]+ap[l,2]
ap2[ap[l,1]+num,3]="Women"
}
}
ggideal_point <- ggplot(ap2, aes(x=year,y=value/1000000,colour=gender))
}
}
ggideal_point <- ggideal_point + geom_line() + geom_point(shape=5)+
labs(x = "Year", y= "Unit: Millions", title = titulo) +
scale_colour_hue("",l = 70, c = 150) +
theme(legend.title=element_blank(),legend.text=element_text(size=8),axis.title.y = element_text(size=12), legend.position = c(0.8, 0.2),axis.text.x = element_text(vjust=0.5, size=10),axis.text.y = element_text(vjust=0.5, size=10),axis.title.x = element_text(size = 12)) +
background_grid(major = "xy", minor = "xy",colour.major = "grey90",colour.minor = "grey90") +
panel_border() + scale_x_continuous(breaks=seq(1, num, 1))
#Formato de los ejes
f1 <- list(size = 13,color = "grey")
f2 <- list(size = 11,color = "black")
al <- list(titlefont = f1,showticklabels = TRUE,tickfont = f2,exponentformat = "E")
#Margenes
m <- list(l = 50,r = 110,b = 100,t = 50,pad = 4,autoexpand = FALSE)
# Convert ggplot object to plotly
gg <- plotly_build(ggideal_point)%>%layout(autosize = FALSE,width = 620, height = 450,margin=m,xaxis = al, yaxis = al)
gg
}
buildPlot(c("Mujeres","Hombres"), bioloGeneral[,1],c(5,35),30)
#---------------------------------------------Tablas----------------------------------------
buildTable <- function(sexoUsuario, biologicalCompUsuario, edadUsuario, num){
#Validación de los datos
validate(
need(length(sexoUsuario)>=2 || sexoUsuario[1] == "Mujeres" || sexoUsuario[1] == "Hombres", 'Please select at least one gender and update the graph'),
need(biologicalCompUsuario != "Sum", 'Please select at least one biological compartment and update the table'),
need(edadUsuario[1] >0 && edadUsuario[2] <= edades, 'Please select a valid age range and update the table')
)
sexoIndice <- 0
sexoUsuario
#entra al primer if si no está seleccionado el sum
if(length(sexoUsuario)==2 && ((sexoUsuario[1]=="Mujeres"||sexoUsuario[1]=="Hombres")&&(sexoUsuario[2]=="Mujeres"||sexoUsuario[2]=="Hombres"))){
sexoIndice <- 3
}else if((length(sexoUsuario)==1 && sexoUsuario[1]=="Hombres") || (length(sexoUsuario)==2 && sexoUsuario[1]=="Hombres" && sexoUsuario[2]=="sumSexo")){
sexoIndice <- 2
}else if(length(sexoUsuario) == 3){ #entra a este else if si están los tres
sexoIndice <- 4
}else if((length(sexoUsuario)==1 && sexoUsuario[1]=="Mujeres") || (length(sexoUsuario)==2 && (sexoUsuario[1]=="Mujeres" && sexoUsuario[2]=="sumSexo"))){
sexoIndice <- 1
}else if(length(sexoUsuario)==2 && sexoUsuario[2]=="Hombres" && sexoUsuario[1]=="sumSexo"){
sexoIndice <- 2
}
if(sexoIndice == 3){
ap <- setNames(data.frame(matrix(ncol = 4, nrow = 0)), c("Year","Value", "Biological Comp.","Gender"))
}else{
ap <- setNames(data.frame(matrix(ncol = 3, nrow = 0)), c("Year","Value", "Biological Comp."))
}
#Leer el input de biologicalCompUsuario
nombresBiolo <- (bioloGeneral[,1])
nombresBiolo[transM+1] <- "Vaccinated"
nombresBiolo[transM+2] <- "Sum"
inputBiolo <- vector()
filasBiolo <- 0
for(j in 1:length(biologicalCompUsuario)){
for(i in 1:(transM+2)){
if(biologicalCompUsuario[j] == nombresBiolo[i]){
filasBiolo= filasBiolo + 1
inputBiolo[filasBiolo] = i
}
}
}
ayuda <-0
rangoEdad <- seq(edadUsuario[1],edadUsuario[2],by = 1)
#¿Sum en el biological compartment?
sumBiolo <- FALSE
hastaBio <- length(biologicalCompUsuario)
bioloCompSum <- inputBiolo
for(m in 1:length(bioloCompSum)){
if(bioloCompSum[m]==(transM + 2)){
sumBiolo <- TRUE
hastaBio <- length(biologicalCompUsuario)-1
}
}
#Sacar los datos que necesito de el resultado de las ecuaciones diferenciales
for(k in 1:hastaBio) {
totalTipo = seq(1,num,by=1)
totalTipo1 = seq(1,num,by=1)
for(i in 1:num){
if(sexoIndice==2){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades]
}
}
}else if(sexoIndice==1){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,(compart*edades/2)+j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades+edades]
}
}
}else if(sexoIndice==4){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,(compart*edades/2)+j+edades*((inputBiolo[k])-1)]+
dat[i,j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades+edades] + dat[i,j+2*transM*edades]
}
}
}else if (sexoIndice ==3){
if(inputBiolo[k] !=(transM+1)){
for(j in rangoEdad){
totalTipo1[i] = totalTipo1[i] + dat[i,(compart*edades/2)+j+edades*((inputBiolo[k])-1)]
}
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+edades*((inputBiolo[k])-1)]
}
}else{
for(j in rangoEdad){
totalTipo1[i] = totalTipo1[i] + dat[i,j+2*transM*edades+edades]
}
for(j in rangoEdad){
totalTipo[i] = totalTipo[i] + dat[i,j+2*transM*edades]
}
}
}
totalTipo[i] = round(totalTipo[i]-i,digits = 2)
totalTipo1[i] = round(totalTipo1[i]-i, digits = 2)
}
ayuda <- nrow(ap)
nombre <- nombresBiolo[inputBiolo[k]]
#Guardando los valores en el data frame para graficar
if(sexoIndice==3){ #si entra al if es porque sum está seleccionado
for(i in 1:num){
ap[i+ayuda,1] = i
ap[i+ayuda,2] = totalTipo[i]
ap[i+ayuda,3] = nombre
ap[i+ayuda,4] = "Men"
}
ayuda <- nrow(ap)
for(i in 1:num){
ap[i+ayuda,1] = i
ap[i+ayuda,2] = totalTipo1[i]
ap[i+ayuda,3] = nombre
ap[i+ayuda,4] = "Women"
}
}else{
for(i in 1:num){
ap[i+ayuda,1] = i
ap[i+ayuda,2] = totalTipo[i]
ap[i+ayuda,3] = nombre
}
}
}
ap
}
tabla=buildTable(c("Mujeres","Hombres"), bioloGeneral[,1],c(5,35),30)
|
#------------------------------------------------------------------
# Load libraries
#------------------------------------------------------------------
library(forecast)
library(TTR)
library(tseries)
library(urca)
# Set up the working environment
#------------------------------------------------------------------
setwd("/InsightProject")
# Load in the data
#------------------------------------------------------------------
mydata <- read.csv("predictEating.csv") # read in the csv file
# Time Series for Modeling
#------------------------------------------------------------------
#
#myvector<-c(7,10,5,11,16,14,9,12,19,21,51)
# Create My Time Series Model
#------------------------------------------------------------------
myvector <- c(mydata[['X3']])
myvector <- as.numeric(myvector)
myts <- ts(myvector, start=c(2005, 1), end=c(2015, 1), frequency=1)
fit <- HoltWinters(myts, gamma=FALSE)
plot(fit)
# predict next two future values
forecastplot <- forecast(fit , n.ahead = 2, prediction.interval = T,)
forecast(fit, 2)
adf.test(myts)
# Save a numeric vector as a time series object
#------------------------------------------------------------------
myts <- ts(myvector, start=c(2005, 1), end=c(2014, 1), frequency=1)
forecast(ets(myts),h=2)
plot(forecast(ets(myts),h=2))
# Subset the time series (June 2014 to December 2014)
#------------------------------------------------------------------
myts2 <- window(myts, start=c(2011, 1), end=c(2014, 1))
## Test for stationary
#-----------------------------------------------------------------
adf.test(myts)
ndiffs(myts)
myts<-diff(log(myts), differences=1)
# Plot series
#------------------
plot(myts)
# Exponential Models
#------------------------------------------------------------------------
##HoltWinters Forecast
#---------------------------
# Simple Exponential - models level
#--------------------------------------------------------
fit <- HoltWinters(myts, beta=FALSE, gamma=FALSE)
plot(fit)
# Double exponential - models level and trend
#--------------------------------------------------------
fit <- HoltWinters(myts, gamma=FALSE)
plot(fit)
# predict next two future values
forecastplot <- forecast(fit , n.ahead = 2, prediction.interval = T,)
forecast(fit, 2)
#plot(forecast(fit, 2))
plot(fit,forecastplot)
myvector <- c(mydata[['X15']],3)
myvector <- as.numeric(myvector)
myts <- ts(myvector, start=c(2005, 1), end=c(2015, 1), frequency=1)
adf.test(myts)
ndiffs(myts)
fit <- HoltWinters(myts, gamma=FALSE)
plot(fit)
# predict next two future values
forecastplot <- forecast(fit , n.ahead = 2, prediction.interval = T,)
forecast(fit, 2)
forecast(myts)
fit <- ets(myts)
plot(forecast(fit, 1))
forecast(fit, 1)
# Triple exponential - models level, trend, and seasonal components
#--------------------------------------------------------
#fit <- HoltWinters(myts)
#plot(fit)
##-------------------------------------------------------
plotgraph<-HoltWinters(myts,gamma = F)
# predictive accuracy
#--------------------------------------------------------
mytstrain <- ts(myvector, start=c(2005, 1), end=c(2015, 1), frequency=1)
mytstest <- window(mytstrain, start=c(2013, 1), end=c(2015, 1), frequency=1)
fittrain <- HoltWinters(mytstrain, beta=FALSE, gamma=FALSE)
fittrain <- HoltWinters(mytstrain, gamma=FALSE)
accuracy(fittrain,test=mytstest)
#
#
#
#ARIMA Models
#------------------------------------------------------------------------
myvector <- as.numeric(mydata[['X15']])
# Save a numeric vector as a time series object
#------------------------------------------------------------------
myts <- ts(myvector, start=c(2005, 1), end=c(2014, 1), frequency=1)
sample <- window(myts, start=c(2011, 1), end=c(2014, 1))
#------------------------------------------------------------------------
ndiffs(myts)
adf.test(myts, alternative = "stationary")
ndiffs(sample)
fit <- auto.arima(myts, seasonal = FALSE)
fit
forecast(fit,2)
plot(forecast(fit,2))
myvector <- c(mydata[['X15']])
myvector <- as.numeric(myvector)
myts <- ts(myvector, start=c(2005, 1), end=c(2014, 1), frequency=1)
#------------------------------------------------------------------------------
myvector <- c(mydata[['X15']],51)
myvector <- as.numeric(myvector)
myts <- ts(myvector, start=c(2005, 1), end=c(2015, 1), frequency=1)
adf.test(myts)
myts<-diff(myts, differences=4)
adf.test(myts)
library(forecast)
ndiffs(myts)
#Box.test(sample, lag = 1,type="Ljung-Box")
#acf(myts)
# pacf(myts)
# fit an ARIMA model of order P, D, Q
#--------------------------------------------------------
auto.arima(myts,ic='aic',trace = T)
fittwo <- arima(myts, order=c(0,1, 0))
# predictive accuracy
#--------------------------------------------------------
accuracy(fittwo)
# predict next 2 observations
#--------------------------------------------------------
forecast(fittwo, 2)
#predict(fittwo,2)
plot(forecast(fittwo, 2))
# Automated forecasting
#--------------------------------------------------------
#
# Automated forecasting using an exponential model
#--------------------------------------------------------
fit <- ets(myts, additive.only = TRUE)
fit
#--------------------------------------------------------
# Automated forecasting using an ARIMA model
#--------------------------------------------------------
fit <- auto.arima(myts)
fit
#--------------------------------------------------------
auto.arima(myts,ic='aic',trace = T)
myvector <- c(mydata[['X14']],9)
myvector <- as.numeric(myvector)
myts <- ts(myvector, start=c(2005, 1), end=c(2015, 1), frequency=1)
# predictive accuracy for Arima Model
#--------------------------------------------------------
data <- ts(myts, start=c(2005, 1), end=c(2015, 1), frequency=1)
train <- window(data, end=c(2013, 1), frequency=1)
test <- window(data,start=c(2014,1))
## fit a model on training data
aaFit <- auto.arima(train)
plot(aaFit)
## forecaset training model
## over the testing period
aaPred <- forecast(aaFit,h=length(test))
plot(aaPred)
## extract point forecasts
yhat <- aaPred$mean
## a few functions
## mean squared (prediction) error
MSE <- function(y, yhat){
mean((y-yhat)**2)
}
## mean absolute (prediction) error
MAE <- function(y, yhat){
mean(abs(y-yhat))
}
## mean absolute percentage (prediction) error
MAPE <- function(y,yhat,percent=TRUE){
if(percent){
100*mean(abs((y-yhat)/y))
} else {
mean(abs((y-yhat)/y))
}
}
MSE(test,yhat)
MAE(test,yhat)
MAPE(test,yhat)
accuracy(aaPred, data)
## Examine measures for only forecast
accuracy(yhat,test)
--------------------------------------------------------------------
## predictive accuracy using Holtwinters double exponntial
Data <- ts(myts, start=c(2005, 1), end=c(2015, 1), frequency=1)
train2 <- window(Data, end=c(2013, 1), frequency=1)
test2 <- window(Data,start=c(2014,1))
## fit a model on training data
aaFit2 <- HoltWinters(train2, gamma=FALSE)
fit <- HoltWinters(train2, gamma=FALSE)
plot(fit)
## forecaset training model
## over the testing period
aaPred2 <- forecast(aaFit2,h=length(test2))
plot(aaPred2)
## extract point forecasts
yhat2 <- aaPred2$mean
## a few functions
## mean squared (prediction) error
MSE <- function(y, yhat2){
mean((y-yhat)**2)
}
## mean absolute (prediction) error
MAE <- function(y, yhat2){
mean(abs(y-yhat))
}
## mean absolute percentage (prediction) error
MAPE <- function(y,yhat2,percent=TRUE){
if(percent){
100*mean(abs((y-yhat)/y))
} else {
mean(abs((y-yhat)/y))
}
}
MSE(test,yhat2)
MAE(test,yhat2)
MAPE(test,yhat2)
accuracy(aaPred2, Data)
## Examine measures for only forecast
accuracy(yhat2,test2)
| /TimeSeriesModels.R | no_license | gatewj/Insight_Data_Science | R | false | false | 7,591 | r |
#------------------------------------------------------------------
# Load libraries
#------------------------------------------------------------------
library(forecast)
library(TTR)
library(tseries)
library(urca)
# Set up the working environment
#------------------------------------------------------------------
setwd("/InsightProject")
# Load in the data
#------------------------------------------------------------------
mydata <- read.csv("predictEating.csv") # read in the csv file
# Time Series for Modeling
#------------------------------------------------------------------
#
#myvector<-c(7,10,5,11,16,14,9,12,19,21,51)
# Create My Time Series Model
#------------------------------------------------------------------
myvector <- c(mydata[['X3']])
myvector <- as.numeric(myvector)
myts <- ts(myvector, start=c(2005, 1), end=c(2015, 1), frequency=1)
fit <- HoltWinters(myts, gamma=FALSE)
plot(fit)
# predict next two future values
forecastplot <- forecast(fit , n.ahead = 2, prediction.interval = T,)
forecast(fit, 2)
adf.test(myts)
# Save a numeric vector as a time series object
#------------------------------------------------------------------
myts <- ts(myvector, start=c(2005, 1), end=c(2014, 1), frequency=1)
forecast(ets(myts),h=2)
plot(forecast(ets(myts),h=2))
# Subset the time series (June 2014 to December 2014)
#------------------------------------------------------------------
myts2 <- window(myts, start=c(2011, 1), end=c(2014, 1))
## Test for stationary
#-----------------------------------------------------------------
adf.test(myts)
ndiffs(myts)
myts<-diff(log(myts), differences=1)
# Plot series
#------------------
plot(myts)
# Exponential Models
#------------------------------------------------------------------------
##HoltWinters Forecast
#---------------------------
# Simple Exponential - models level
#--------------------------------------------------------
fit <- HoltWinters(myts, beta=FALSE, gamma=FALSE)
plot(fit)
# Double exponential - models level and trend
#--------------------------------------------------------
fit <- HoltWinters(myts, gamma=FALSE)
plot(fit)
# predict next two future values
forecastplot <- forecast(fit , n.ahead = 2, prediction.interval = T,)
forecast(fit, 2)
#plot(forecast(fit, 2))
plot(fit,forecastplot)
myvector <- c(mydata[['X15']],3)
myvector <- as.numeric(myvector)
myts <- ts(myvector, start=c(2005, 1), end=c(2015, 1), frequency=1)
adf.test(myts)
ndiffs(myts)
fit <- HoltWinters(myts, gamma=FALSE)
plot(fit)
# predict next two future values
forecastplot <- forecast(fit , n.ahead = 2, prediction.interval = T,)
forecast(fit, 2)
forecast(myts)
fit <- ets(myts)
plot(forecast(fit, 1))
forecast(fit, 1)
# Triple exponential - models level, trend, and seasonal components
#--------------------------------------------------------
#fit <- HoltWinters(myts)
#plot(fit)
##-------------------------------------------------------
plotgraph<-HoltWinters(myts,gamma = F)
# predictive accuracy
#--------------------------------------------------------
mytstrain <- ts(myvector, start=c(2005, 1), end=c(2015, 1), frequency=1)
mytstest <- window(mytstrain, start=c(2013, 1), end=c(2015, 1), frequency=1)
fittrain <- HoltWinters(mytstrain, beta=FALSE, gamma=FALSE)
fittrain <- HoltWinters(mytstrain, gamma=FALSE)
accuracy(fittrain,test=mytstest)
#
#
#
#ARIMA Models
#------------------------------------------------------------------------
myvector <- as.numeric(mydata[['X15']])
# Save a numeric vector as a time series object
#------------------------------------------------------------------
myts <- ts(myvector, start=c(2005, 1), end=c(2014, 1), frequency=1)
sample <- window(myts, start=c(2011, 1), end=c(2014, 1))
#------------------------------------------------------------------------
ndiffs(myts)
adf.test(myts, alternative = "stationary")
ndiffs(sample)
fit <- auto.arima(myts, seasonal = FALSE)
fit
forecast(fit,2)
plot(forecast(fit,2))
myvector <- c(mydata[['X15']])
myvector <- as.numeric(myvector)
myts <- ts(myvector, start=c(2005, 1), end=c(2014, 1), frequency=1)
#------------------------------------------------------------------------------
myvector <- c(mydata[['X15']],51)
myvector <- as.numeric(myvector)
myts <- ts(myvector, start=c(2005, 1), end=c(2015, 1), frequency=1)
adf.test(myts)
myts<-diff(myts, differences=4)
adf.test(myts)
library(forecast)
ndiffs(myts)
#Box.test(sample, lag = 1,type="Ljung-Box")
#acf(myts)
# pacf(myts)
# fit an ARIMA model of order P, D, Q
#--------------------------------------------------------
auto.arima(myts,ic='aic',trace = T)
fittwo <- arima(myts, order=c(0,1, 0))
# predictive accuracy
#--------------------------------------------------------
accuracy(fittwo)
# predict next 2 observations
#--------------------------------------------------------
forecast(fittwo, 2)
#predict(fittwo,2)
plot(forecast(fittwo, 2))
# Automated forecasting
#--------------------------------------------------------
#
# Automated forecasting using an exponential model
#--------------------------------------------------------
fit <- ets(myts, additive.only = TRUE)
fit
#--------------------------------------------------------
# Automated forecasting using an ARIMA model
#--------------------------------------------------------
fit <- auto.arima(myts)
fit
#--------------------------------------------------------
auto.arima(myts,ic='aic',trace = T)
myvector <- c(mydata[['X14']],9)
myvector <- as.numeric(myvector)
myts <- ts(myvector, start=c(2005, 1), end=c(2015, 1), frequency=1)
# predictive accuracy for Arima Model
#--------------------------------------------------------
data <- ts(myts, start=c(2005, 1), end=c(2015, 1), frequency=1)
train <- window(data, end=c(2013, 1), frequency=1)
test <- window(data,start=c(2014,1))
## fit a model on training data
aaFit <- auto.arima(train)
plot(aaFit)
## forecaset training model
## over the testing period
aaPred <- forecast(aaFit,h=length(test))
plot(aaPred)
## extract point forecasts
yhat <- aaPred$mean
## a few functions
## mean squared (prediction) error
MSE <- function(y, yhat){
mean((y-yhat)**2)
}
## mean absolute (prediction) error
MAE <- function(y, yhat){
mean(abs(y-yhat))
}
## mean absolute percentage (prediction) error
MAPE <- function(y,yhat,percent=TRUE){
if(percent){
100*mean(abs((y-yhat)/y))
} else {
mean(abs((y-yhat)/y))
}
}
MSE(test,yhat)
MAE(test,yhat)
MAPE(test,yhat)
accuracy(aaPred, data)
## Examine measures for only forecast
accuracy(yhat,test)
--------------------------------------------------------------------
## predictive accuracy using Holtwinters double exponntial
Data <- ts(myts, start=c(2005, 1), end=c(2015, 1), frequency=1)
train2 <- window(Data, end=c(2013, 1), frequency=1)
test2 <- window(Data,start=c(2014,1))
## fit a model on training data
aaFit2 <- HoltWinters(train2, gamma=FALSE)
fit <- HoltWinters(train2, gamma=FALSE)
plot(fit)
## forecaset training model
## over the testing period
aaPred2 <- forecast(aaFit2,h=length(test2))
plot(aaPred2)
## extract point forecasts
yhat2 <- aaPred2$mean
## a few functions
## mean squared (prediction) error
MSE <- function(y, yhat2){
mean((y-yhat)**2)
}
## mean absolute (prediction) error
MAE <- function(y, yhat2){
mean(abs(y-yhat))
}
## mean absolute percentage (prediction) error
MAPE <- function(y,yhat2,percent=TRUE){
if(percent){
100*mean(abs((y-yhat)/y))
} else {
mean(abs((y-yhat)/y))
}
}
MSE(test,yhat2)
MAE(test,yhat2)
MAPE(test,yhat2)
accuracy(aaPred2, Data)
## Examine measures for only forecast
accuracy(yhat2,test2)
|
cont_both_mod <- function(t, x, params) {
# initial conditions
LogB_A <- x[1]
LogB_F <- x[2]
LogB_P <- x[3]
I_A <- x[4]
I_F <- x[5]
I_P <- x[6]
D <- x[7]
C <- x[8]
# parameters
r_A <- as.numeric(params[params$Parameter == "r_A", "Estimate"])
r_F <- as.numeric(params[params$Parameter == "r_F", "Estimate"])
r_P <- as.numeric(params[params$Parameter == "r_P", "Estimate"])
alpha_AA <- as.numeric(params[params$Parameter == "alpha_AA", "Estimate"])
alpha_AF <- as.numeric(params[params$Parameter == "alpha_AF", "Estimate"])
alpha_AP <- as.numeric(params[params$Parameter == "alpha_AP", "Estimate"])
alpha_FA <- as.numeric(params[params$Parameter == "alpha_FA", "Estimate"])
alpha_FF <- as.numeric(params[params$Parameter == "alpha_FF", "Estimate"])
alpha_FP <- as.numeric(params[params$Parameter == "alpha_FP", "Estimate"])
alpha_PA <- as.numeric(params[params$Parameter == "alpha_PA", "Estimate"])
alpha_PF <- as.numeric(params[params$Parameter == "alpha_PF", "Estimate"])
alpha_PP <- as.numeric(params[params$Parameter == "alpha_PP", "Estimate"])
beta_AC <- as.numeric(params[params$Parameter == "beta_AC", "Estimate"])
beta_FC <- as.numeric(params[params$Parameter == "beta_FC", "Estimate"])
beta_PC <- as.numeric(params[params$Parameter == "beta_PC", "Estimate"])
beta_AA <- as.numeric(params[params$Parameter == "beta_AA", "Estimate"])
beta_AF <- as.numeric(params[params$Parameter == "beta_AF", "Estimate"])
beta_AP <- as.numeric(params[params$Parameter == "beta_AP", "Estimate"])
beta_FA <- as.numeric(params[params$Parameter == "beta_FA", "Estimate"])
beta_FF <- as.numeric(params[params$Parameter == "beta_FF", "Estimate"])
beta_FP <- as.numeric(params[params$Parameter == "beta_FP", "Estimate"])
beta_PA <- as.numeric(params[params$Parameter == "beta_PA", "Estimate"])
beta_PF <- as.numeric(params[params$Parameter == "beta_PF", "Estimate"])
beta_PP <- as.numeric(params[params$Parameter == "beta_PP", "Estimate"])
k_A <- as.numeric(params[params$Parameter == "k_A", "Estimate"])
k_F <- as.numeric(params[params$Parameter == "k_F", "Estimate"])
k_P <- as.numeric(params[params$Parameter == "k_P", "Estimate"])
v_A <- as.numeric(params[params$Parameter == "v_A", "Estimate"])
v_F <- as.numeric(params[params$Parameter == "v_F", "Estimate"])
v_P <- as.numeric(params[params$Parameter == "v_P", "Estimate"])
m_A <- as.numeric(params[params$Parameter == "m_A", "Estimate"])
m_F <- as.numeric(params[params$Parameter == "m_F", "Estimate"])
m_P <- as.numeric(params[params$Parameter == "m_P", "Estimate"])
h <- as.numeric(params[params$Parameter == "h", "Estimate"])
b <- as.numeric(params[params$Parameter == "b", "Estimate"])
# derived values
B_A <- exp(LogB_A)
B_F <- exp(LogB_F)
B_P <- exp(LogB_P)
S_A <- B_A - I_A
S_F <- B_F - I_F
S_P <- B_P - I_P
# model with asymptotic transmission
dLogBAdt <- r_A * (1 - alpha_AA * B_A - alpha_AF * B_F - alpha_AP * B_P) - m_A - v_A * I_A / B_A
dLogBFdt <- r_F * (1 - alpha_FA * B_A - alpha_FF * B_F - alpha_FP * B_P) - m_F - v_F * I_F / B_F
dLogBPdt <- r_P * (1 - alpha_PA * B_A - alpha_PF * B_F - alpha_PP * B_P) - m_P - v_P * I_P / B_P
dIAdt <- (beta_AC * S_A * C + beta_AA * S_A * I_A + beta_AF * S_A * I_F + beta_AP * S_A * I_P)/(k_A + B_A) - (m_A + v_A) * I_A
dIFdt <- (beta_FC * S_F * C + beta_FA * S_F * I_A + beta_FF * S_F * I_F + beta_FP * S_F * I_P)/(k_F + B_F) - (m_F + v_F) * I_F
dIPdt <- (beta_PC * S_P * C + beta_PA * S_P * I_A + beta_PF * S_P * I_F + beta_PP * S_P * I_P)/(k_P + B_P) - (m_P + v_P) * I_P
dDdt <- m_A * B_A + v_A * I_A + m_F * B_F + v_F * I_F + m_P * B_P + v_P * I_P
dCdt <- h * ((m_A + v_A) * I_A + (m_F + v_F) * I_F + (m_P + v_P) * I_P) - b * C
# combine values
dxdt <- c(dLogBAdt, dLogBFdt, dLogBPdt, dIAdt, dIFdt, dIPdt, dDdt, dCdt)
# output
list(dxdt)
} | /code/continuous_both_species_model.R | no_license | aekendig/microstegium-bipolaris | R | false | false | 3,899 | r | cont_both_mod <- function(t, x, params) {
# initial conditions
LogB_A <- x[1]
LogB_F <- x[2]
LogB_P <- x[3]
I_A <- x[4]
I_F <- x[5]
I_P <- x[6]
D <- x[7]
C <- x[8]
# parameters
r_A <- as.numeric(params[params$Parameter == "r_A", "Estimate"])
r_F <- as.numeric(params[params$Parameter == "r_F", "Estimate"])
r_P <- as.numeric(params[params$Parameter == "r_P", "Estimate"])
alpha_AA <- as.numeric(params[params$Parameter == "alpha_AA", "Estimate"])
alpha_AF <- as.numeric(params[params$Parameter == "alpha_AF", "Estimate"])
alpha_AP <- as.numeric(params[params$Parameter == "alpha_AP", "Estimate"])
alpha_FA <- as.numeric(params[params$Parameter == "alpha_FA", "Estimate"])
alpha_FF <- as.numeric(params[params$Parameter == "alpha_FF", "Estimate"])
alpha_FP <- as.numeric(params[params$Parameter == "alpha_FP", "Estimate"])
alpha_PA <- as.numeric(params[params$Parameter == "alpha_PA", "Estimate"])
alpha_PF <- as.numeric(params[params$Parameter == "alpha_PF", "Estimate"])
alpha_PP <- as.numeric(params[params$Parameter == "alpha_PP", "Estimate"])
beta_AC <- as.numeric(params[params$Parameter == "beta_AC", "Estimate"])
beta_FC <- as.numeric(params[params$Parameter == "beta_FC", "Estimate"])
beta_PC <- as.numeric(params[params$Parameter == "beta_PC", "Estimate"])
beta_AA <- as.numeric(params[params$Parameter == "beta_AA", "Estimate"])
beta_AF <- as.numeric(params[params$Parameter == "beta_AF", "Estimate"])
beta_AP <- as.numeric(params[params$Parameter == "beta_AP", "Estimate"])
beta_FA <- as.numeric(params[params$Parameter == "beta_FA", "Estimate"])
beta_FF <- as.numeric(params[params$Parameter == "beta_FF", "Estimate"])
beta_FP <- as.numeric(params[params$Parameter == "beta_FP", "Estimate"])
beta_PA <- as.numeric(params[params$Parameter == "beta_PA", "Estimate"])
beta_PF <- as.numeric(params[params$Parameter == "beta_PF", "Estimate"])
beta_PP <- as.numeric(params[params$Parameter == "beta_PP", "Estimate"])
k_A <- as.numeric(params[params$Parameter == "k_A", "Estimate"])
k_F <- as.numeric(params[params$Parameter == "k_F", "Estimate"])
k_P <- as.numeric(params[params$Parameter == "k_P", "Estimate"])
v_A <- as.numeric(params[params$Parameter == "v_A", "Estimate"])
v_F <- as.numeric(params[params$Parameter == "v_F", "Estimate"])
v_P <- as.numeric(params[params$Parameter == "v_P", "Estimate"])
m_A <- as.numeric(params[params$Parameter == "m_A", "Estimate"])
m_F <- as.numeric(params[params$Parameter == "m_F", "Estimate"])
m_P <- as.numeric(params[params$Parameter == "m_P", "Estimate"])
h <- as.numeric(params[params$Parameter == "h", "Estimate"])
b <- as.numeric(params[params$Parameter == "b", "Estimate"])
# derived values
B_A <- exp(LogB_A)
B_F <- exp(LogB_F)
B_P <- exp(LogB_P)
S_A <- B_A - I_A
S_F <- B_F - I_F
S_P <- B_P - I_P
# model with asymptotic transmission
dLogBAdt <- r_A * (1 - alpha_AA * B_A - alpha_AF * B_F - alpha_AP * B_P) - m_A - v_A * I_A / B_A
dLogBFdt <- r_F * (1 - alpha_FA * B_A - alpha_FF * B_F - alpha_FP * B_P) - m_F - v_F * I_F / B_F
dLogBPdt <- r_P * (1 - alpha_PA * B_A - alpha_PF * B_F - alpha_PP * B_P) - m_P - v_P * I_P / B_P
dIAdt <- (beta_AC * S_A * C + beta_AA * S_A * I_A + beta_AF * S_A * I_F + beta_AP * S_A * I_P)/(k_A + B_A) - (m_A + v_A) * I_A
dIFdt <- (beta_FC * S_F * C + beta_FA * S_F * I_A + beta_FF * S_F * I_F + beta_FP * S_F * I_P)/(k_F + B_F) - (m_F + v_F) * I_F
dIPdt <- (beta_PC * S_P * C + beta_PA * S_P * I_A + beta_PF * S_P * I_F + beta_PP * S_P * I_P)/(k_P + B_P) - (m_P + v_P) * I_P
dDdt <- m_A * B_A + v_A * I_A + m_F * B_F + v_F * I_F + m_P * B_P + v_P * I_P
dCdt <- h * ((m_A + v_A) * I_A + (m_F + v_F) * I_F + (m_P + v_P) * I_P) - b * C
# combine values
dxdt <- c(dLogBAdt, dLogBFdt, dLogBPdt, dIAdt, dIFdt, dIPdt, dDdt, dCdt)
# output
list(dxdt)
} |
find_modes <- function (b, y_lis, N_lis, X_lis, Z_lis, offset_lis, X_zi_lis, Z_zi_lis,
offset_zi_lis, betas, invD, phis, gammas,
canonical, user_defined, Zty_lis, log_dens, mu_fun, var_fun,
mu.eta_fun, score_eta_fun, score_phis_fun, score_eta_zi_fun) {
log_post_b <- function (b_i, y_i, N_i, X_i, Z_i, offset_i, X_zi_i, Z_zi_i, offset_zi_i,
betas, invD, phis, gammas, canonical,
user_defined, Zty_i, log_dens, mu_fun, var_fun, mu.eta_fun,
score_eta_fun, score_phis_fun, score_eta_zi_fun) {
ind_Z <- seq_len(ncol(Z_i))
eta_y <- as.vector(X_i %*% betas + Z_i %*% b_i[ind_Z])
if (!is.null(offset_i))
eta_y <- eta_y + offset_i
eta_zi <- if (!is.null(X_zi_i)) as.vector(X_zi_i %*% gammas)
if (!is.null(Z_zi_i))
eta_zi <- eta_zi + as.vector(Z_zi_i %*% b_i[-ind_Z])
if (!is.null(offset_zi_i))
eta_zi <- eta_zi + offset_zi_i
- sum(log_dens(y_i, eta_y, mu_fun, phis, eta_zi), na.rm = TRUE) +
c(0.5 * crossprod(b_i, invD) %*% b_i)
}
score_log_post_b <- function (b_i, y_i, N_i, X_i, Z_i, offset_i,X_zi_i, Z_zi_i, offset_zi_i,
betas, invD, phis, gammas,
canonical, user_defined, Zty_i, log_dens, mu_fun,
var_fun, mu.eta_fun, score_eta_fun, score_phis_fun,
score_eta_zi_fun) {
eta_y <- as.vector(X_i %*% betas + Z_i %*% b_i[seq_len(ncol(Z_i))])
if (!is.null(offset_i))
eta_y <- eta_y + offset_i
eta_zi <- if (!is.null(X_zi_i)) as.vector(X_zi_i %*% gammas)
if (!is.null(Z_zi_i))
eta_zi <- eta_zi + as.vector(Z_zi_i %*% b_i[-seq_len(ncol(Z_i))])
if (!is.null(offset_zi_i))
eta_zi <- eta_zi + offset_zi_i
mu_y <- mu_fun(eta_y)
log_dens_part <- if (user_defined) {
out <- if (!is.null(score_eta_fun)) {
- crossprod(Z_i, score_eta_fun(y_i, mu_y, phis, eta_zi))
} else {
l1 <- log_dens(y_i, eta_y + 1e-04, mu_fun, phis, eta_zi)
l2 <- log_dens(y_i, eta_y - 1e-04, mu_fun, phis, eta_zi)
- crossprod(Z_i, (l1 - l2) / (2 * 1e-04))
}
if (!is.null(Z_zi_i)) {
out <- if (!is.null(score_eta_zi_fun)) {
c(out, - crossprod(Z_zi_i, score_eta_zi_fun(y_i, mu_y, phis, eta_zi)))
} else {
l1 <- log_dens(y_i, eta_y, mu_fun, phis, eta_zi + 1e-04)
l2 <- log_dens(y_i, eta_y, mu_fun, phis, eta_zi - 1e-04)
c(out, - crossprod(Z_zi_i, (l1 - l2) / (2 * 1e-04)))
}
}
out
} else {
if (canonical) {
if (!is.null(N_i))- Zty_i + crossprod(Z_i, N_i * mu_y) else
- Zty_i + crossprod(Z_i, mu_y)
} else {
var <- var_fun(mu_y)
deriv <- mu.eta_fun(eta_y)
if (!is.null(N_i)) - crossprod(Z_i, (y_i[, 1] - N_i * mu_y) * deriv / var) else
- crossprod(Z_i, (y_i - mu_y) * deriv / var)
}
}
drop(log_dens_part + invD %*% b_i)
}
n <- length(y_lis)
post_modes <- b
post_hessians <- vector("list", n)
for (i in seq_len(n)) {
y_i <- y_lis[[i]]
N_i <- if (!is.null(N_lis)) N_lis[[i]]
X_i <- X_lis[[i]]
Z_i <- Z_lis[[i]]
offset_i <- if (!is.null(offset_lis)) offset_lis[[i]]
Zty_i <- Zty_lis[[i]]
X_zi_i <- if (!is.null(X_zi_lis)) X_zi_lis[[i]]
Z_zi_i <- if (!is.null(Z_zi_lis)) Z_zi_lis[[i]]
offset_zi_i <- if (!is.null(offset_zi_lis)) offset_zi_lis[[i]]
b_i <- b[i, ]
opt_i <- optim(par = b_i, fn = log_post_b, gr = score_log_post_b, method = "BFGS",
y_i = y_i, N_i = N_i, X_i = X_i, Z_i = Z_i, offset_i = offset_i,
X_zi_i = X_zi_i, Z_zi_i = Z_zi_i, offset_zi_i = offset_zi_i,
betas = betas, invD = invD, phis = phis, gammas = gammas,
canonical = canonical,
user_defined = user_defined, Zty_i = Zty_i, log_dens = log_dens,
mu_fun = mu_fun, var_fun = var_fun, mu.eta_fun = mu.eta_fun,
score_eta_fun = score_eta_fun, score_phis_fun = score_phis_fun,
score_eta_zi_fun = score_eta_zi_fun)
post_modes[i, ] <- opt_i$par
post_hessians[[i]] <- cd_vec(post_modes[i, ], score_log_post_b,
y_i = y_i, N_i = N_i, X_i = X_i, Z_i = Z_i,
offset_i = offset_i, X_zi_i = X_zi_i, Z_zi_i = Z_zi_i,
offset_zi_i = offset_zi_i, betas = betas, invD = invD,
phis = phis, gammas = gammas, canonical = canonical,
user_defined = user_defined,
Zty_i = Zty_i, log_dens = log_dens, mu_fun = mu_fun,
var_fun = var_fun, mu.eta_fun = mu.eta_fun,
score_eta_fun = score_eta_fun,
score_phis_fun = score_phis_fun,
score_eta_zi_fun = score_eta_zi_fun)
}
list(post_modes = post_modes, post_hessians = post_hessians)
}
GHfun <- function (b, y_lis, N_lis, X_lis, Z_lis, offset_lis, X_zi_lis, Z_zi_lis, offset_zi_lis,
betas, inv_D, phis, gammas, k, q,
canonical, user_defined, Zty_lis, log_dens, mu_fun, var_fun, mu.eta_fun,
score_eta_fun, score_phis_fun, score_eta_zi_fun) {
GH <- gauher(k)
aGH <- find_modes(b, y_lis, N_lis, X_lis, Z_lis, offset_lis, X_zi_lis, Z_zi_lis,
offset_zi_lis, betas, inv_D, phis, gammas,
canonical, user_defined, Zty_lis, log_dens, mu_fun, var_fun,
mu.eta_fun, score_eta_fun, score_phis_fun, score_eta_zi_fun)
modes <- aGH$post_modes
chol_hessians <- lapply(aGH$post_hessian, chol)
b <- as.matrix(expand.grid(lapply(seq_len(q), function (k, u) u$x, u = GH)))
n <- nrow(modes)
b_new <- vector("list", n)
log_dets <- numeric(n)
for (i in seq_len(n)) {
b_new[[i]] <- t(sqrt(2) * solve(chol_hessians[[i]], t(b)) + modes[i, ])
log_dets[i] <- - determinant.matrix(chol_hessians[[i]], logarithm = TRUE)$modulus
}
wGH <- as.matrix(expand.grid(lapply(seq_len(q), function (k, u) u$w, u = GH)))
wGH <- 2^(q/2) * apply(wGH, 1, prod) * exp(rowSums(b * b))
b2 <- lapply(b_new, function (b) if (q == 1) b * b else
t(apply(b, 1, function (x) x %o% x)))
ind_Z <- seq_len(ncol(Z_lis[[1]]))
Ztb <- do.call('rbind', mapply(function (z, b) z %*% t(b[, ind_Z, drop = FALSE]),
Z_lis, b_new, SIMPLIFY = FALSE))
Z_zitb <- if (!is.null(Z_zi_lis[[1]])) {
do.call('rbind', mapply(function (z, b) z %*% t(b[, -ind_Z, drop = FALSE]),
Z_zi_lis, b_new, SIMPLIFY = FALSE))
}
list(b = do.call('rbind', b_new), b2 = do.call('rbind', b2), Ztb = Ztb, Z_zitb = Z_zitb,
wGH = wGH, log_dets = log_dets, post_modes = modes,
post_vars = lapply(aGH$post_hessian, solve))
}
chol_transf <- function (x) {
if (any(is.na(x) | !is.finite(x)))
stop("NA or infinite values in 'x'.\n")
if (is.matrix(x)) {
k <- nrow(x)
U <- chol(x)
U[cbind(1:k, 1:k)] <- log(U[cbind(1:k, 1:k)])
U[upper.tri(U, TRUE)]
} else {
nx <- length(x)
k <- round((-1 + sqrt(1 + 8 * nx))/2)
mat <- matrix(0, k, k)
mat[upper.tri(mat, TRUE)] <- x
mat[cbind(1:k, 1:k)] <- exp(mat[cbind(1:k, 1:k)])
res <- crossprod(mat)
attr(res, "L") <- t(mat)[lower.tri(mat, TRUE)]
res
}
}
deriv_D <- function (D) {
ncz <- nrow(D)
ind <- which(lower.tri(D, TRUE), arr.ind = TRUE)
dimnames(ind) <- NULL
nind <- nrow(ind)
svD <- solve(D)
lapply(seq_len(nind), function (x, ind) {
mat <- matrix(0, ncz, ncz)
ii <- ind[x, , drop = FALSE]
mat[ii[1], ii[2]] <- mat[ii[2], ii[1]] <- 1
mat
}, ind = ind[, 2:1, drop = FALSE])
}
jacobian2 <- function (L, ncz) {
ind <- which(lower.tri(matrix(0, ncz, ncz), TRUE), arr.ind = TRUE)
dimnames(ind) <- NULL
nind <- nrow(ind)
id <- 1:nind
rind <- which(ind[, 1] == ind[, 2])
lind <- vector("list", length(rind))
for (i in seq_along(rind)) {
tt <- matrix(0, ncz - i + 1, ncz - i + 1)
tt[lower.tri(tt, TRUE)] <- seq(rind[i], nind)
tt <- tt + t(tt)
diag(tt) <- diag(tt)/2
lind[[i]] <- tt
}
out <- matrix(0, nind, nind)
for (g in 1:ncz) {
gind <- id[g == ind[, 2]]
vals <- L[gind]
for (j in gind) {
k <- which(j == gind)
out[cbind(lind[[g]][k, ], j)] <- if (j %in% rind) vals[1] * vals else vals
}
}
out[rind, ] <- 2 * out[rind, ]
col.ind <- matrix(0, ncz, ncz)
col.ind[lower.tri(col.ind, TRUE)] <- seq(1, length(L))
col.ind <- t(col.ind)
out[, col.ind[upper.tri(col.ind, TRUE)]]
}
fd <- function (x, f, ..., eps = .Machine$double.eps^0.25) {
n <- length(x)
res <- numeric(n)
ex <- eps * (abs(x) + eps)
f0 <- f(x, ...)
for (i in seq_len(n)) {
x1 <- x
x1[i] <- x[i] + ex[i]
diff.f <- c(f(x1, ...) - f0)
diff.x <- x1[i] - x[i]
res[i] <- diff.f / diff.x
}
res
}
fd_vec <- function (x, f, ..., eps = .Machine$double.eps^0.25) {
n <- length(x)
res <- matrix(0, n, n)
ex <- pmax(abs(x), 1)
f0 <- f(x, ...)
for (i in 1:n) {
x1 <- x
x1[i] <- x[i] + eps * ex[i]
diff.f <- c(f(x1, ...) - f0)
diff.x <- x1[i] - x[i]
res[, i] <- diff.f / diff.x
}
0.5 * (res + t(res))
}
cd <- function (x, f, ..., eps = 0.001) {
n <- length(x)
res <- numeric(n)
ex <- pmax(abs(x), 1)
for (i in seq_len(n)) {
x1 <- x2 <- x
x1[i] <- x[i] + eps * ex[i]
x2[i] <- x[i] - eps * ex[i]
diff.f <- c(f(x1, ...) - f(x2, ...))
diff.x <- x1[i] - x2[i]
res[i] <- diff.f / diff.x
}
res
}
cd_vec <- function (x, f, ..., eps = 0.001) {
n <- length(x)
res <- matrix(0, n, n)
ex <- pmax(abs(x), 1)
for (i in seq_len(n)) {
x1 <- x2 <- x
x1[i] <- x[i] + eps * ex[i]
x2[i] <- x[i] - eps * ex[i]
diff.f <- c(f(x1, ...) - f(x2, ...))
diff.x <- x1[i] - x2[i]
res[, i] <- diff.f / diff.x
}
0.5 * (res + t(res))
}
dmvnorm <- function (x, mu, Sigma, log = FALSE) {
if (!is.matrix(x))
x <- rbind(x)
p <- length(mu)
if (p == 1) {
dnorm(x, mu, sqrt(Sigma), log = log)
} else {
t1 <- length(mu) == length(Sigma)
t2 <- all(abs(Sigma[lower.tri(Sigma)]) < sqrt(.Machine$double.eps))
if (t1 || t2) {
if (!t1)
Sigma <- diag(Sigma)
nx <- nrow(x)
ff <- rowSums(dnorm(x, rep(mu, each = nx),
sd = rep(sqrt(Sigma), each = nx), log = TRUE))
if (log) ff else exp(ff)
} else {
ed <- eigen(Sigma, symmetric = TRUE)
ev <- ed$values
evec <- ed$vectors
if (!all(ev >= -1e-06 * abs(ev[1])))
stop("'Sigma' is not positive definite")
ss <- x - rep(mu, each = nrow(x))
inv.Sigma <- evec %*% (t(evec)/ev)
quad <- 0.5 * rowSums((ss %*% inv.Sigma) * ss)
fact <- - 0.5 * (p * log(2 * pi) + sum(log(ev)))
if (log) as.vector(fact - quad) else as.vector(exp(fact - quad))
}
}
}
unattr <- function (x) {
if (is_mat <- is.matrix(x)) {
d <- dim(x)
}
attributes(x) <- NULL
if (is_mat) {
dim(x) <- d
}
x
}
gauher <- function (n) {
m <- trunc((n + 1) / 2)
x <- w <- rep(-1, n)
for (i in seq_len(m)) {
z <- if (i == 1) {
sqrt(2 * n + 1) - 1.85575 * (2 * n + 1)^(-0.16667)
} else if (i == 2) {
z - 1.14 * n^0.426/z
} else if (i == 3) {
1.86 * z - 0.86 * x[1]
} else if (i == 4) {
1.91 * z - 0.91 * x[2]
} else {
2 * z - x[i - 2]
}
for (its in seq_len(10)) {
p1 <- 0.751125544464943
p2 <- 0
for (j in seq_len(n)) {
p3 <- p2
p2 <- p1
p1 <- z * sqrt(2 / j) * p2 - sqrt((j - 1) / j) * p3
}
pp <- sqrt(2 * n) * p2
z1 <- z
z <- z1 - p1/pp
if (abs(z - z1) <= 3e-14)
break
}
x[i] <- z
x[n + 1 - i] <- -z
w[i] <- 2 / (pp * pp)
w[n + 1 - i] <- w[i]
}
list(x = x, w = w)
}
nearPD <- function (M, eig.tol = 1e-06, conv.tol = 1e-07, posd.tol = 1e-08,
maxits = 100) {
if (!(is.numeric(M) && is.matrix(M) && identical(M, t(M))))
stop("Input matrix M must be square and symmetric.\n")
inorm <- function(x) max(rowSums(abs(x)))
n <- ncol(M)
U <- matrix(0.0, n, n)
X <- M
iter <- 0
converged <- FALSE
while (iter < maxits && !converged) {
Y <- X
T <- Y - U
e <- eigen(Y, symmetric = TRUE)
Q <- e$vectors
d <- e$values
D <- if (length(d) > 1) diag(d) else as.matrix(d)
p <- (d > eig.tol * d[1])
QQ <- Q[, p, drop = FALSE]
X <- QQ %*% D[p, p, drop = FALSE] %*% t(QQ)
U <- X - T
X <- (X + t(X)) / 2
conv <- inorm(Y - X)/inorm(Y)
iter <- iter + 1
converged <- conv <= conv.tol
}
X <- (X + t(X)) / 2
e <- eigen(X, symmetric = TRUE)
d <- e$values
Eps <- posd.tol * abs(d[1L])
if (d[n] < Eps) {
d[d < Eps] <- Eps
Q <- e$vectors
o.diag <- diag(X)
X <- Q %*% (d * t(Q))
D <- sqrt(pmax(Eps, o.diag) / diag(X))
X[] <- D * X * rep(D, each = n)
}
(X + t(X)) / 2
}
getRE_Formula <- function (form) {
if (!(inherits(form, "formula"))) {
stop("formula(object) must return a formula")
}
form <- form[[length(form)]]
if (length(form) == 3 && (form[[1]] == as.name("|") || form[[1]] == as.name("||"))) {
form <- form[[2]]
}
eval(substitute(~form))
}
getID_Formula <- function (form) {
if (is.list(form)) {
nams <- names(form)
as.formula(paste0("~", nams[1L], "/", nams[2L]))
} else {
form <- form[[length(form)]]
asOneSidedFormula(form[[3]])
}
}
printCall <- function (call) {
d <- deparse(call)
if (length(d) <= 3) {
paste(d, sep = "\n", collapse = "\n")
} else {
d <- d[1:3]
d[3] <- paste0(d[3], "...")
paste(d, sep = "\n", collapse = "\n")
}
}
dgt <- function (x, mu = 0, sigma = 1, df = stop("no df argument."), log = FALSE) {
if (log) {
dt(x = (x - mu) / sigma, df = df, log = TRUE) - log(sigma)
} else {
dt(x = (x - mu) / sigma, df = df) / sigma
}
}
dmvt <- function (x, mu, Sigma = NULL, invSigma = NULL, df, log = TRUE, prop = TRUE) {
if (!is.numeric(x))
stop("'x' must be a numeric matrix or vector")
if (!is.matrix(x))
x <- rbind(x)
p <- length(mu)
if (is.null(Sigma) && is.null(invSigma))
stop("'Sigma' or 'invSigma' must be given.")
if (!is.null(Sigma)) {
if (is.list(Sigma)) {
ev <- Sigma$values
evec <- Sigma$vectors
} else {
ed <- eigen(Sigma, symmetric = TRUE)
ev <- ed$values
evec <- ed$vectors
}
if (!all(ev >= -1e-06 * abs(ev[1])))
stop("'Sigma' is not positive definite")
invSigma <- evec %*% (t(evec)/ev)
if (!prop)
logdetSigma <- sum(log(ev))
} else {
if (!prop)
logdetSigma <- c(-determinant(invSigma)$modulus)
}
ss <- x - rep(mu, each = nrow(x))
quad <- rowSums((ss %*% invSigma) * ss)/df
if (!prop)
fact <- lgamma((df + p)/2) - lgamma(df/2) -
0.5 * (p * (log(pi) + log(df)) + logdetSigma)
if (log) {
if (!prop) as.vector(fact - 0.5 * (df + p) * log(1 + quad)) else
as.vector(- 0.5 * (df + p) * log(1 + quad))
} else {
if (!prop) as.vector(exp(fact) * ((1 + quad)^(-(df + p)/2))) else
as.vector(((1 + quad)^(-(df + p)/2)))
}
}
rmvt <- function (n, mu, Sigma, df) {
p <- length(mu)
if (is.list(Sigma)) {
ev <- Sigma$values
evec <- Sigma$vectors
} else {
ed <- eigen(Sigma, symmetric = TRUE)
ev <- ed$values
evec <- ed$vectors
}
X <- drop(mu) + tcrossprod(evec * rep(sqrt(pmax(ev, 0)), each = p),
matrix(rnorm(n * p), n)) / rep(sqrt(rchisq(n, df)/df), each = p)
if (n == 1L) drop(X) else t.default(X)
}
register_s3_method <- function (pkg, generic, class) {
fun <- get(paste0(generic, ".", class), envir = parent.frame())
if (isNamespaceLoaded(pkg))
registerS3method(generic, class, fun, envir = asNamespace(pkg))
# Also ensure registration is done if pkg is loaded later:
setHook(
packageEvent(pkg, "onLoad"),
function (...)
registerS3method(generic, class, fun, envir = asNamespace(pkg))
)
}
.onLoad <- function (libname, pkgname) {
if (requireNamespace("emmeans", quietly = TRUE)) {
register_s3_method("emmeans", "recover_data", "MixMod")
register_s3_method("emmeans", "emm_basis", "MixMod")
}
if (requireNamespace("effects", quietly = TRUE)) {
register_s3_method("effects", "Effect", "MixMod")
}
}
constructor_form_random <- function (formula, data) {
groups <- all.vars(getID_Formula(formula))
ngroups <- length(groups)
formula <- if (!is.list(formula)) {
form_random <- vector("list", ngroups)
names(form_random) <- groups
form_random[] <- lapply(form_random, function (x) getRE_Formula(formula))
} else formula
if (ngroups > 1) {
nesting <- function (form, group_name) {
terms_form <- attr(terms(form), "term.labels")
if (length(terms_form)) {
interaction_terms <- paste0(group_name, ":", terms_form, collapse = " + ")
as.formula(paste0("~ 0 + ", group_name, " + ", interaction_terms))
} else {
as.formula(paste0("~ 0 + ", group_name))
}
}
formula[-1] <- mapply(nesting, formula[-1], groups[-1], SIMPLIFY = FALSE)
}
formula
}
constructor_Z <- function (termsZ_i, mfZ_i, id) {
n <- length(unique(id))
Zmats <- vector("list", n)
for (i in seq_len(n)) {
#mf <- model.frame(termsZ_i, mfZ_i[id == i, , drop = FALSE],
# drop.unused.levels = TRUE)
mf <- mfZ_i[id == i, , drop = FALSE]
mm <- model.matrix(termsZ_i, mf)
assign <- attr(mm, "assign")
assgn <- sapply(unique(assign), function (x) which(assign == x))
if (is.list(assgn))
assgn <- unlist(assgn, use.names = FALSE)
Zmats[[i]] <- mm[, c(t(assgn)), drop = FALSE]
}
do.call("rbind", Zmats)
}
cr_setup <- function (y, direction = c("forward", "backward")) {
direction <- match.arg(direction)
yname <- as.character(substitute("y"))
if (!is.factor(y)) {
y <- factor(y)
}
ylevels <- levels(y)
ncoefs <- length(ylevels) - 1
if (ncoefs < 2) {
stop("it seems that variable ", yname, " has two levels; use a mixed effects ",
"logistic regression instead.\n")
}
y <- as.numeric(unclass(y) - 1)
if (direction == "forward") {
reps <- ifelse(is.na(y), 1, ifelse(y < ncoefs - 1, y + 1, ncoefs))
subs <- rep(seq_along(y), reps)
cuts <- vector("list", ncoefs + 2)
cuts[[1]] <- NA
for (j in seq(0, ncoefs)) {
cuts[[j + 2]] <- seq(0, if (j < ncoefs - 1) j else ncoefs - 1)
}
cuts <- unlist(cuts[ifelse(is.na(y), 1, y + 2)], use.names = FALSE)
labels <- c("all", paste0(yname, ">=", ylevels[2:ncoefs]))
y <- rep(y, reps)
Y <- as.numeric(y == cuts)
} else {
reps <- ifelse(is.na(y), 1, ifelse(y > ncoefs - 3, ncoefs - (y - 1), ncoefs))
subs <- rep(seq_along(y), reps)
cuts <- vector("list", ncoefs + 2)
cuts[[ncoefs + 2]] <- NA
for (j in seq(ncoefs, 0)) {
cuts[[j + 1]] <- seq(0, ncoefs - if (j > ncoefs - 3) j else 1)
}
cuts <- unlist(cuts[ifelse(is.na(y), 1, y + 1)], use.names = FALSE)
labels <- c("all", paste0(yname, "<=", ylevels[ncoefs:2]))
y <- rep(y, reps)
Y <- as.numeric(y == (ncoefs - cuts))
}
cohort <- factor(cuts, levels = seq(0, ncoefs - 1), labels = labels)
list(y = Y, cohort = cohort, subs = subs, reps = reps)
}
cr_marg_probs <- function (eta, direction = c("forward", "backward")) {
direction <- match.arg(direction)
ncoefs <- ncol(eta)
if (direction == "forward") {
cumsum_1_minus_p <- t(apply(plogis(eta[, -ncoefs], log.p = TRUE,
lower.tail = FALSE), 1, cumsum))
probs <- exp(plogis(eta, log.p = TRUE) + cbind(0, cumsum_1_minus_p))
cbind(probs, 1 - rowSums(probs))
} else {
cumsum_1_minus_p <- t(apply(plogis(eta[, seq(ncoefs, 2)], log.p = TRUE,
lower.tail = FALSE), 1, cumsum))
probs <- exp(plogis(eta, log.p = TRUE) +
cbind(cumsum_1_minus_p[, seq(ncoefs - 1, 1)], 0))
cbind(1 - rowSums(probs), probs)
}
} | /R/Functions.R | no_license | Rowling2392/GLMMadaptive | R | false | false | 22,210 | r | find_modes <- function (b, y_lis, N_lis, X_lis, Z_lis, offset_lis, X_zi_lis, Z_zi_lis,
offset_zi_lis, betas, invD, phis, gammas,
canonical, user_defined, Zty_lis, log_dens, mu_fun, var_fun,
mu.eta_fun, score_eta_fun, score_phis_fun, score_eta_zi_fun) {
log_post_b <- function (b_i, y_i, N_i, X_i, Z_i, offset_i, X_zi_i, Z_zi_i, offset_zi_i,
betas, invD, phis, gammas, canonical,
user_defined, Zty_i, log_dens, mu_fun, var_fun, mu.eta_fun,
score_eta_fun, score_phis_fun, score_eta_zi_fun) {
ind_Z <- seq_len(ncol(Z_i))
eta_y <- as.vector(X_i %*% betas + Z_i %*% b_i[ind_Z])
if (!is.null(offset_i))
eta_y <- eta_y + offset_i
eta_zi <- if (!is.null(X_zi_i)) as.vector(X_zi_i %*% gammas)
if (!is.null(Z_zi_i))
eta_zi <- eta_zi + as.vector(Z_zi_i %*% b_i[-ind_Z])
if (!is.null(offset_zi_i))
eta_zi <- eta_zi + offset_zi_i
- sum(log_dens(y_i, eta_y, mu_fun, phis, eta_zi), na.rm = TRUE) +
c(0.5 * crossprod(b_i, invD) %*% b_i)
}
score_log_post_b <- function (b_i, y_i, N_i, X_i, Z_i, offset_i,X_zi_i, Z_zi_i, offset_zi_i,
betas, invD, phis, gammas,
canonical, user_defined, Zty_i, log_dens, mu_fun,
var_fun, mu.eta_fun, score_eta_fun, score_phis_fun,
score_eta_zi_fun) {
eta_y <- as.vector(X_i %*% betas + Z_i %*% b_i[seq_len(ncol(Z_i))])
if (!is.null(offset_i))
eta_y <- eta_y + offset_i
eta_zi <- if (!is.null(X_zi_i)) as.vector(X_zi_i %*% gammas)
if (!is.null(Z_zi_i))
eta_zi <- eta_zi + as.vector(Z_zi_i %*% b_i[-seq_len(ncol(Z_i))])
if (!is.null(offset_zi_i))
eta_zi <- eta_zi + offset_zi_i
mu_y <- mu_fun(eta_y)
log_dens_part <- if (user_defined) {
out <- if (!is.null(score_eta_fun)) {
- crossprod(Z_i, score_eta_fun(y_i, mu_y, phis, eta_zi))
} else {
l1 <- log_dens(y_i, eta_y + 1e-04, mu_fun, phis, eta_zi)
l2 <- log_dens(y_i, eta_y - 1e-04, mu_fun, phis, eta_zi)
- crossprod(Z_i, (l1 - l2) / (2 * 1e-04))
}
if (!is.null(Z_zi_i)) {
out <- if (!is.null(score_eta_zi_fun)) {
c(out, - crossprod(Z_zi_i, score_eta_zi_fun(y_i, mu_y, phis, eta_zi)))
} else {
l1 <- log_dens(y_i, eta_y, mu_fun, phis, eta_zi + 1e-04)
l2 <- log_dens(y_i, eta_y, mu_fun, phis, eta_zi - 1e-04)
c(out, - crossprod(Z_zi_i, (l1 - l2) / (2 * 1e-04)))
}
}
out
} else {
if (canonical) {
if (!is.null(N_i))- Zty_i + crossprod(Z_i, N_i * mu_y) else
- Zty_i + crossprod(Z_i, mu_y)
} else {
var <- var_fun(mu_y)
deriv <- mu.eta_fun(eta_y)
if (!is.null(N_i)) - crossprod(Z_i, (y_i[, 1] - N_i * mu_y) * deriv / var) else
- crossprod(Z_i, (y_i - mu_y) * deriv / var)
}
}
drop(log_dens_part + invD %*% b_i)
}
n <- length(y_lis)
post_modes <- b
post_hessians <- vector("list", n)
for (i in seq_len(n)) {
y_i <- y_lis[[i]]
N_i <- if (!is.null(N_lis)) N_lis[[i]]
X_i <- X_lis[[i]]
Z_i <- Z_lis[[i]]
offset_i <- if (!is.null(offset_lis)) offset_lis[[i]]
Zty_i <- Zty_lis[[i]]
X_zi_i <- if (!is.null(X_zi_lis)) X_zi_lis[[i]]
Z_zi_i <- if (!is.null(Z_zi_lis)) Z_zi_lis[[i]]
offset_zi_i <- if (!is.null(offset_zi_lis)) offset_zi_lis[[i]]
b_i <- b[i, ]
opt_i <- optim(par = b_i, fn = log_post_b, gr = score_log_post_b, method = "BFGS",
y_i = y_i, N_i = N_i, X_i = X_i, Z_i = Z_i, offset_i = offset_i,
X_zi_i = X_zi_i, Z_zi_i = Z_zi_i, offset_zi_i = offset_zi_i,
betas = betas, invD = invD, phis = phis, gammas = gammas,
canonical = canonical,
user_defined = user_defined, Zty_i = Zty_i, log_dens = log_dens,
mu_fun = mu_fun, var_fun = var_fun, mu.eta_fun = mu.eta_fun,
score_eta_fun = score_eta_fun, score_phis_fun = score_phis_fun,
score_eta_zi_fun = score_eta_zi_fun)
post_modes[i, ] <- opt_i$par
post_hessians[[i]] <- cd_vec(post_modes[i, ], score_log_post_b,
y_i = y_i, N_i = N_i, X_i = X_i, Z_i = Z_i,
offset_i = offset_i, X_zi_i = X_zi_i, Z_zi_i = Z_zi_i,
offset_zi_i = offset_zi_i, betas = betas, invD = invD,
phis = phis, gammas = gammas, canonical = canonical,
user_defined = user_defined,
Zty_i = Zty_i, log_dens = log_dens, mu_fun = mu_fun,
var_fun = var_fun, mu.eta_fun = mu.eta_fun,
score_eta_fun = score_eta_fun,
score_phis_fun = score_phis_fun,
score_eta_zi_fun = score_eta_zi_fun)
}
list(post_modes = post_modes, post_hessians = post_hessians)
}
GHfun <- function (b, y_lis, N_lis, X_lis, Z_lis, offset_lis, X_zi_lis, Z_zi_lis, offset_zi_lis,
betas, inv_D, phis, gammas, k, q,
canonical, user_defined, Zty_lis, log_dens, mu_fun, var_fun, mu.eta_fun,
score_eta_fun, score_phis_fun, score_eta_zi_fun) {
GH <- gauher(k)
aGH <- find_modes(b, y_lis, N_lis, X_lis, Z_lis, offset_lis, X_zi_lis, Z_zi_lis,
offset_zi_lis, betas, inv_D, phis, gammas,
canonical, user_defined, Zty_lis, log_dens, mu_fun, var_fun,
mu.eta_fun, score_eta_fun, score_phis_fun, score_eta_zi_fun)
modes <- aGH$post_modes
chol_hessians <- lapply(aGH$post_hessian, chol)
b <- as.matrix(expand.grid(lapply(seq_len(q), function (k, u) u$x, u = GH)))
n <- nrow(modes)
b_new <- vector("list", n)
log_dets <- numeric(n)
for (i in seq_len(n)) {
b_new[[i]] <- t(sqrt(2) * solve(chol_hessians[[i]], t(b)) + modes[i, ])
log_dets[i] <- - determinant.matrix(chol_hessians[[i]], logarithm = TRUE)$modulus
}
wGH <- as.matrix(expand.grid(lapply(seq_len(q), function (k, u) u$w, u = GH)))
wGH <- 2^(q/2) * apply(wGH, 1, prod) * exp(rowSums(b * b))
b2 <- lapply(b_new, function (b) if (q == 1) b * b else
t(apply(b, 1, function (x) x %o% x)))
ind_Z <- seq_len(ncol(Z_lis[[1]]))
Ztb <- do.call('rbind', mapply(function (z, b) z %*% t(b[, ind_Z, drop = FALSE]),
Z_lis, b_new, SIMPLIFY = FALSE))
Z_zitb <- if (!is.null(Z_zi_lis[[1]])) {
do.call('rbind', mapply(function (z, b) z %*% t(b[, -ind_Z, drop = FALSE]),
Z_zi_lis, b_new, SIMPLIFY = FALSE))
}
list(b = do.call('rbind', b_new), b2 = do.call('rbind', b2), Ztb = Ztb, Z_zitb = Z_zitb,
wGH = wGH, log_dets = log_dets, post_modes = modes,
post_vars = lapply(aGH$post_hessian, solve))
}
chol_transf <- function (x) {
if (any(is.na(x) | !is.finite(x)))
stop("NA or infinite values in 'x'.\n")
if (is.matrix(x)) {
k <- nrow(x)
U <- chol(x)
U[cbind(1:k, 1:k)] <- log(U[cbind(1:k, 1:k)])
U[upper.tri(U, TRUE)]
} else {
nx <- length(x)
k <- round((-1 + sqrt(1 + 8 * nx))/2)
mat <- matrix(0, k, k)
mat[upper.tri(mat, TRUE)] <- x
mat[cbind(1:k, 1:k)] <- exp(mat[cbind(1:k, 1:k)])
res <- crossprod(mat)
attr(res, "L") <- t(mat)[lower.tri(mat, TRUE)]
res
}
}
deriv_D <- function (D) {
ncz <- nrow(D)
ind <- which(lower.tri(D, TRUE), arr.ind = TRUE)
dimnames(ind) <- NULL
nind <- nrow(ind)
svD <- solve(D)
lapply(seq_len(nind), function (x, ind) {
mat <- matrix(0, ncz, ncz)
ii <- ind[x, , drop = FALSE]
mat[ii[1], ii[2]] <- mat[ii[2], ii[1]] <- 1
mat
}, ind = ind[, 2:1, drop = FALSE])
}
jacobian2 <- function (L, ncz) {
ind <- which(lower.tri(matrix(0, ncz, ncz), TRUE), arr.ind = TRUE)
dimnames(ind) <- NULL
nind <- nrow(ind)
id <- 1:nind
rind <- which(ind[, 1] == ind[, 2])
lind <- vector("list", length(rind))
for (i in seq_along(rind)) {
tt <- matrix(0, ncz - i + 1, ncz - i + 1)
tt[lower.tri(tt, TRUE)] <- seq(rind[i], nind)
tt <- tt + t(tt)
diag(tt) <- diag(tt)/2
lind[[i]] <- tt
}
out <- matrix(0, nind, nind)
for (g in 1:ncz) {
gind <- id[g == ind[, 2]]
vals <- L[gind]
for (j in gind) {
k <- which(j == gind)
out[cbind(lind[[g]][k, ], j)] <- if (j %in% rind) vals[1] * vals else vals
}
}
out[rind, ] <- 2 * out[rind, ]
col.ind <- matrix(0, ncz, ncz)
col.ind[lower.tri(col.ind, TRUE)] <- seq(1, length(L))
col.ind <- t(col.ind)
out[, col.ind[upper.tri(col.ind, TRUE)]]
}
fd <- function (x, f, ..., eps = .Machine$double.eps^0.25) {
n <- length(x)
res <- numeric(n)
ex <- eps * (abs(x) + eps)
f0 <- f(x, ...)
for (i in seq_len(n)) {
x1 <- x
x1[i] <- x[i] + ex[i]
diff.f <- c(f(x1, ...) - f0)
diff.x <- x1[i] - x[i]
res[i] <- diff.f / diff.x
}
res
}
fd_vec <- function (x, f, ..., eps = .Machine$double.eps^0.25) {
n <- length(x)
res <- matrix(0, n, n)
ex <- pmax(abs(x), 1)
f0 <- f(x, ...)
for (i in 1:n) {
x1 <- x
x1[i] <- x[i] + eps * ex[i]
diff.f <- c(f(x1, ...) - f0)
diff.x <- x1[i] - x[i]
res[, i] <- diff.f / diff.x
}
0.5 * (res + t(res))
}
cd <- function (x, f, ..., eps = 0.001) {
n <- length(x)
res <- numeric(n)
ex <- pmax(abs(x), 1)
for (i in seq_len(n)) {
x1 <- x2 <- x
x1[i] <- x[i] + eps * ex[i]
x2[i] <- x[i] - eps * ex[i]
diff.f <- c(f(x1, ...) - f(x2, ...))
diff.x <- x1[i] - x2[i]
res[i] <- diff.f / diff.x
}
res
}
cd_vec <- function (x, f, ..., eps = 0.001) {
n <- length(x)
res <- matrix(0, n, n)
ex <- pmax(abs(x), 1)
for (i in seq_len(n)) {
x1 <- x2 <- x
x1[i] <- x[i] + eps * ex[i]
x2[i] <- x[i] - eps * ex[i]
diff.f <- c(f(x1, ...) - f(x2, ...))
diff.x <- x1[i] - x2[i]
res[, i] <- diff.f / diff.x
}
0.5 * (res + t(res))
}
dmvnorm <- function (x, mu, Sigma, log = FALSE) {
if (!is.matrix(x))
x <- rbind(x)
p <- length(mu)
if (p == 1) {
dnorm(x, mu, sqrt(Sigma), log = log)
} else {
t1 <- length(mu) == length(Sigma)
t2 <- all(abs(Sigma[lower.tri(Sigma)]) < sqrt(.Machine$double.eps))
if (t1 || t2) {
if (!t1)
Sigma <- diag(Sigma)
nx <- nrow(x)
ff <- rowSums(dnorm(x, rep(mu, each = nx),
sd = rep(sqrt(Sigma), each = nx), log = TRUE))
if (log) ff else exp(ff)
} else {
ed <- eigen(Sigma, symmetric = TRUE)
ev <- ed$values
evec <- ed$vectors
if (!all(ev >= -1e-06 * abs(ev[1])))
stop("'Sigma' is not positive definite")
ss <- x - rep(mu, each = nrow(x))
inv.Sigma <- evec %*% (t(evec)/ev)
quad <- 0.5 * rowSums((ss %*% inv.Sigma) * ss)
fact <- - 0.5 * (p * log(2 * pi) + sum(log(ev)))
if (log) as.vector(fact - quad) else as.vector(exp(fact - quad))
}
}
}
unattr <- function (x) {
if (is_mat <- is.matrix(x)) {
d <- dim(x)
}
attributes(x) <- NULL
if (is_mat) {
dim(x) <- d
}
x
}
gauher <- function (n) {
m <- trunc((n + 1) / 2)
x <- w <- rep(-1, n)
for (i in seq_len(m)) {
z <- if (i == 1) {
sqrt(2 * n + 1) - 1.85575 * (2 * n + 1)^(-0.16667)
} else if (i == 2) {
z - 1.14 * n^0.426/z
} else if (i == 3) {
1.86 * z - 0.86 * x[1]
} else if (i == 4) {
1.91 * z - 0.91 * x[2]
} else {
2 * z - x[i - 2]
}
for (its in seq_len(10)) {
p1 <- 0.751125544464943
p2 <- 0
for (j in seq_len(n)) {
p3 <- p2
p2 <- p1
p1 <- z * sqrt(2 / j) * p2 - sqrt((j - 1) / j) * p3
}
pp <- sqrt(2 * n) * p2
z1 <- z
z <- z1 - p1/pp
if (abs(z - z1) <= 3e-14)
break
}
x[i] <- z
x[n + 1 - i] <- -z
w[i] <- 2 / (pp * pp)
w[n + 1 - i] <- w[i]
}
list(x = x, w = w)
}
nearPD <- function (M, eig.tol = 1e-06, conv.tol = 1e-07, posd.tol = 1e-08,
maxits = 100) {
if (!(is.numeric(M) && is.matrix(M) && identical(M, t(M))))
stop("Input matrix M must be square and symmetric.\n")
inorm <- function(x) max(rowSums(abs(x)))
n <- ncol(M)
U <- matrix(0.0, n, n)
X <- M
iter <- 0
converged <- FALSE
while (iter < maxits && !converged) {
Y <- X
T <- Y - U
e <- eigen(Y, symmetric = TRUE)
Q <- e$vectors
d <- e$values
D <- if (length(d) > 1) diag(d) else as.matrix(d)
p <- (d > eig.tol * d[1])
QQ <- Q[, p, drop = FALSE]
X <- QQ %*% D[p, p, drop = FALSE] %*% t(QQ)
U <- X - T
X <- (X + t(X)) / 2
conv <- inorm(Y - X)/inorm(Y)
iter <- iter + 1
converged <- conv <= conv.tol
}
X <- (X + t(X)) / 2
e <- eigen(X, symmetric = TRUE)
d <- e$values
Eps <- posd.tol * abs(d[1L])
if (d[n] < Eps) {
d[d < Eps] <- Eps
Q <- e$vectors
o.diag <- diag(X)
X <- Q %*% (d * t(Q))
D <- sqrt(pmax(Eps, o.diag) / diag(X))
X[] <- D * X * rep(D, each = n)
}
(X + t(X)) / 2
}
getRE_Formula <- function (form) {
if (!(inherits(form, "formula"))) {
stop("formula(object) must return a formula")
}
form <- form[[length(form)]]
if (length(form) == 3 && (form[[1]] == as.name("|") || form[[1]] == as.name("||"))) {
form <- form[[2]]
}
eval(substitute(~form))
}
getID_Formula <- function (form) {
if (is.list(form)) {
nams <- names(form)
as.formula(paste0("~", nams[1L], "/", nams[2L]))
} else {
form <- form[[length(form)]]
asOneSidedFormula(form[[3]])
}
}
printCall <- function (call) {
d <- deparse(call)
if (length(d) <= 3) {
paste(d, sep = "\n", collapse = "\n")
} else {
d <- d[1:3]
d[3] <- paste0(d[3], "...")
paste(d, sep = "\n", collapse = "\n")
}
}
dgt <- function (x, mu = 0, sigma = 1, df = stop("no df argument."), log = FALSE) {
if (log) {
dt(x = (x - mu) / sigma, df = df, log = TRUE) - log(sigma)
} else {
dt(x = (x - mu) / sigma, df = df) / sigma
}
}
dmvt <- function (x, mu, Sigma = NULL, invSigma = NULL, df, log = TRUE, prop = TRUE) {
if (!is.numeric(x))
stop("'x' must be a numeric matrix or vector")
if (!is.matrix(x))
x <- rbind(x)
p <- length(mu)
if (is.null(Sigma) && is.null(invSigma))
stop("'Sigma' or 'invSigma' must be given.")
if (!is.null(Sigma)) {
if (is.list(Sigma)) {
ev <- Sigma$values
evec <- Sigma$vectors
} else {
ed <- eigen(Sigma, symmetric = TRUE)
ev <- ed$values
evec <- ed$vectors
}
if (!all(ev >= -1e-06 * abs(ev[1])))
stop("'Sigma' is not positive definite")
invSigma <- evec %*% (t(evec)/ev)
if (!prop)
logdetSigma <- sum(log(ev))
} else {
if (!prop)
logdetSigma <- c(-determinant(invSigma)$modulus)
}
ss <- x - rep(mu, each = nrow(x))
quad <- rowSums((ss %*% invSigma) * ss)/df
if (!prop)
fact <- lgamma((df + p)/2) - lgamma(df/2) -
0.5 * (p * (log(pi) + log(df)) + logdetSigma)
if (log) {
if (!prop) as.vector(fact - 0.5 * (df + p) * log(1 + quad)) else
as.vector(- 0.5 * (df + p) * log(1 + quad))
} else {
if (!prop) as.vector(exp(fact) * ((1 + quad)^(-(df + p)/2))) else
as.vector(((1 + quad)^(-(df + p)/2)))
}
}
rmvt <- function (n, mu, Sigma, df) {
p <- length(mu)
if (is.list(Sigma)) {
ev <- Sigma$values
evec <- Sigma$vectors
} else {
ed <- eigen(Sigma, symmetric = TRUE)
ev <- ed$values
evec <- ed$vectors
}
X <- drop(mu) + tcrossprod(evec * rep(sqrt(pmax(ev, 0)), each = p),
matrix(rnorm(n * p), n)) / rep(sqrt(rchisq(n, df)/df), each = p)
if (n == 1L) drop(X) else t.default(X)
}
register_s3_method <- function (pkg, generic, class) {
fun <- get(paste0(generic, ".", class), envir = parent.frame())
if (isNamespaceLoaded(pkg))
registerS3method(generic, class, fun, envir = asNamespace(pkg))
# Also ensure registration is done if pkg is loaded later:
setHook(
packageEvent(pkg, "onLoad"),
function (...)
registerS3method(generic, class, fun, envir = asNamespace(pkg))
)
}
.onLoad <- function (libname, pkgname) {
if (requireNamespace("emmeans", quietly = TRUE)) {
register_s3_method("emmeans", "recover_data", "MixMod")
register_s3_method("emmeans", "emm_basis", "MixMod")
}
if (requireNamespace("effects", quietly = TRUE)) {
register_s3_method("effects", "Effect", "MixMod")
}
}
constructor_form_random <- function (formula, data) {
groups <- all.vars(getID_Formula(formula))
ngroups <- length(groups)
formula <- if (!is.list(formula)) {
form_random <- vector("list", ngroups)
names(form_random) <- groups
form_random[] <- lapply(form_random, function (x) getRE_Formula(formula))
} else formula
if (ngroups > 1) {
nesting <- function (form, group_name) {
terms_form <- attr(terms(form), "term.labels")
if (length(terms_form)) {
interaction_terms <- paste0(group_name, ":", terms_form, collapse = " + ")
as.formula(paste0("~ 0 + ", group_name, " + ", interaction_terms))
} else {
as.formula(paste0("~ 0 + ", group_name))
}
}
formula[-1] <- mapply(nesting, formula[-1], groups[-1], SIMPLIFY = FALSE)
}
formula
}
constructor_Z <- function (termsZ_i, mfZ_i, id) {
n <- length(unique(id))
Zmats <- vector("list", n)
for (i in seq_len(n)) {
#mf <- model.frame(termsZ_i, mfZ_i[id == i, , drop = FALSE],
# drop.unused.levels = TRUE)
mf <- mfZ_i[id == i, , drop = FALSE]
mm <- model.matrix(termsZ_i, mf)
assign <- attr(mm, "assign")
assgn <- sapply(unique(assign), function (x) which(assign == x))
if (is.list(assgn))
assgn <- unlist(assgn, use.names = FALSE)
Zmats[[i]] <- mm[, c(t(assgn)), drop = FALSE]
}
do.call("rbind", Zmats)
}
cr_setup <- function (y, direction = c("forward", "backward")) {
direction <- match.arg(direction)
yname <- as.character(substitute("y"))
if (!is.factor(y)) {
y <- factor(y)
}
ylevels <- levels(y)
ncoefs <- length(ylevels) - 1
if (ncoefs < 2) {
stop("it seems that variable ", yname, " has two levels; use a mixed effects ",
"logistic regression instead.\n")
}
y <- as.numeric(unclass(y) - 1)
if (direction == "forward") {
reps <- ifelse(is.na(y), 1, ifelse(y < ncoefs - 1, y + 1, ncoefs))
subs <- rep(seq_along(y), reps)
cuts <- vector("list", ncoefs + 2)
cuts[[1]] <- NA
for (j in seq(0, ncoefs)) {
cuts[[j + 2]] <- seq(0, if (j < ncoefs - 1) j else ncoefs - 1)
}
cuts <- unlist(cuts[ifelse(is.na(y), 1, y + 2)], use.names = FALSE)
labels <- c("all", paste0(yname, ">=", ylevels[2:ncoefs]))
y <- rep(y, reps)
Y <- as.numeric(y == cuts)
} else {
reps <- ifelse(is.na(y), 1, ifelse(y > ncoefs - 3, ncoefs - (y - 1), ncoefs))
subs <- rep(seq_along(y), reps)
cuts <- vector("list", ncoefs + 2)
cuts[[ncoefs + 2]] <- NA
for (j in seq(ncoefs, 0)) {
cuts[[j + 1]] <- seq(0, ncoefs - if (j > ncoefs - 3) j else 1)
}
cuts <- unlist(cuts[ifelse(is.na(y), 1, y + 1)], use.names = FALSE)
labels <- c("all", paste0(yname, "<=", ylevels[ncoefs:2]))
y <- rep(y, reps)
Y <- as.numeric(y == (ncoefs - cuts))
}
cohort <- factor(cuts, levels = seq(0, ncoefs - 1), labels = labels)
list(y = Y, cohort = cohort, subs = subs, reps = reps)
}
cr_marg_probs <- function (eta, direction = c("forward", "backward")) {
direction <- match.arg(direction)
ncoefs <- ncol(eta)
if (direction == "forward") {
cumsum_1_minus_p <- t(apply(plogis(eta[, -ncoefs], log.p = TRUE,
lower.tail = FALSE), 1, cumsum))
probs <- exp(plogis(eta, log.p = TRUE) + cbind(0, cumsum_1_minus_p))
cbind(probs, 1 - rowSums(probs))
} else {
cumsum_1_minus_p <- t(apply(plogis(eta[, seq(ncoefs, 2)], log.p = TRUE,
lower.tail = FALSE), 1, cumsum))
probs <- exp(plogis(eta, log.p = TRUE) +
cbind(cumsum_1_minus_p[, seq(ncoefs - 1, 1)], 0))
cbind(1 - rowSums(probs), probs)
}
} |
install.packages("dplyr")
library("dplyr")
install.packages("NbClust")
library("NbClust")
cus <- read.csv("C:/Users/Customer2001.csv")
cus %>% head()
cus.spe <- cus %>% filter(cus$Peak_calls_Sum, cus$OffPeak_calls_Sum, cus$Weekend_calls_Sum, cus$AvePeak, cus$AveOffPeak, cus$AveWeekend)
cus.spe <- cus %>% select("Peak_calls_Sum", "OffPeak_calls_Sum", "Weekend_calls_Sum", "AvePeak", "AveOffPeak", "AveWeekend")
pred <- kc$cluster
target <- cus
### 최종 분류된 kmeans를 바탕으로 집단을 나누고 어떤 상품을 최종적으로 추천해줄지 결과물 도출하기
### 까지가 기말보고서
nc <- NbClust(cus.spe, min.nc = 2, max.nc = 10, method = "kmeans")
kc2 <- kmeans(cus.spe,2)
kc3 <- kmeans(cus.spe,3)
kc$cluster
kc$centers
############ 교수님 코드
data.raw <- read.csv("C:/Users/Customer2001.csv")
data.raw %>% dim()
data.raw %>% head()
library("NbClust")
nc <- NbClust(data.raw, min.nc=2, max.nc=10, method="kmeans")
kc <- kmeans(data.raw, 2)
kc$cluster
kc$centers
kc.3 <- kmeans(data.raw, 3)
kc.3$cluster
| /19_11_05_kmeans_useCustomer.R | no_license | johnjeongukhur/2nd_semester_opendata | R | false | false | 1,123 | r | install.packages("dplyr")
library("dplyr")
install.packages("NbClust")
library("NbClust")
cus <- read.csv("C:/Users/Customer2001.csv")
cus %>% head()
cus.spe <- cus %>% filter(cus$Peak_calls_Sum, cus$OffPeak_calls_Sum, cus$Weekend_calls_Sum, cus$AvePeak, cus$AveOffPeak, cus$AveWeekend)
cus.spe <- cus %>% select("Peak_calls_Sum", "OffPeak_calls_Sum", "Weekend_calls_Sum", "AvePeak", "AveOffPeak", "AveWeekend")
pred <- kc$cluster
target <- cus
### 최종 분류된 kmeans를 바탕으로 집단을 나누고 어떤 상품을 최종적으로 추천해줄지 결과물 도출하기
### 까지가 기말보고서
nc <- NbClust(cus.spe, min.nc = 2, max.nc = 10, method = "kmeans")
kc2 <- kmeans(cus.spe,2)
kc3 <- kmeans(cus.spe,3)
kc$cluster
kc$centers
############ 교수님 코드
data.raw <- read.csv("C:/Users/Customer2001.csv")
data.raw %>% dim()
data.raw %>% head()
library("NbClust")
nc <- NbClust(data.raw, min.nc=2, max.nc=10, method="kmeans")
kc <- kmeans(data.raw, 2)
kc$cluster
kc$centers
kc.3 <- kmeans(data.raw, 3)
kc.3$cluster
|
gendata10=function(p,set=1,method,s_sign,s=10,n=200,sigmaT=1.5,seed=555)
{ ## Input:
## n: Sample size
## p: Dimension
## s: Number of non-zero coefficients
## s_sign: sign of the non-zero coefficients, sX1 vector of 0 or 1
## sigmaT: True error standard deviation
## set: Takes value 1;
## If set=1, non-zero values of beta = {0.75, 1, 1.25,1.5, 1.75, 2, 2.25, 2.5,2.75,3}
## Else The non-zero coefficients are generated according to Fan and Lv (2007)
## method: Takes values {1, 2, 3, 4, 5};
## If method=1, rows of X are independently generated from Np(0,Ip)
## If method=2, rows of X are independently generated from Np(0,\Sigma)
## where diagonals of \Sigma are 1 and all off-diagonals are 0.5
## If method=3, rows of X are independently generated from Np(0,\Sigma)
## where diagonals of \Sigma are 1 and all off-diagonals are 0.5^|j-j'|
## i.e. TOEPLITZ STRUCTURE
## If method=4, rows of X are independently generated from Np(0,\Sigma)
## where diagonals of \Sigma are 1 and all off-diagonals are 0.7^|j-j'|
## i.e. TOEPLITZ STRUCTURE
## If method=5, rows of X are independently generated from Np(0,\Sigma)
## where diagonals of \Sigma are 1 and all off-diagonals are 0.9^|j-j'|
## i.e. TOEPLITZ STRUCTURE
## seed: random seed
## Output:
## y: Response, a n by 1 vector
## X: Design matrix of dimension n by p
## betaT: True coefficient vector with s non-zero elements and (p-s) zeros
set.seed(seed)
library(MASS)
if(method==1)
{
X=matrix(rnorm(n*p),n,p)
}
else if(method==2)
{
X=matrix(mvrnorm(n,rep(0,p),diag(0.5,p)+matrix(0.5,p,p)),n,p)
}
else if(method==3)
{
X=matrix(mvrnorm(n,rep(0,p),toeplitz(0.5^(0:(p-1)))),n,p)
}
else if(method==4)
{
X=matrix(mvrnorm(n,rep(0,p),toeplitz(0.7^(0:(p-1)))),n,p)
}
else if(method==5)
{
X=matrix(mvrnorm(n,rep(0,p),toeplitz(0.9^(0:(p-1)))),n,p)
}
else
{
stop("Invalid value for method")
}
#
if(set==1)
{
beta.true=(-1)^s_sign*(c(0.75, 1, 1.25,1.5, 1.75, 2, 2.25, 2.5,2.75,3))
}
else
{
a=4*log(n)/sqrt(n) # Refer to Fan and Lv (2007)
beta.true=(-1)^s_sign*(a+abs(rnorm(s)))
}
betaT=c(beta.true,rep(0,p-s))
y=X%*%betaT+rnorm(n)*sigmaT
result=list("Response"=y,"Design_matrix"=X,"True_Beta"=betaT)
return(result)
}
# End of function | /gendata10.R | no_license | raypallavi/SAVS | R | false | false | 2,613 | r | gendata10=function(p,set=1,method,s_sign,s=10,n=200,sigmaT=1.5,seed=555)
{ ## Input:
## n: Sample size
## p: Dimension
## s: Number of non-zero coefficients
## s_sign: sign of the non-zero coefficients, sX1 vector of 0 or 1
## sigmaT: True error standard deviation
## set: Takes value 1;
## If set=1, non-zero values of beta = {0.75, 1, 1.25,1.5, 1.75, 2, 2.25, 2.5,2.75,3}
## Else The non-zero coefficients are generated according to Fan and Lv (2007)
## method: Takes values {1, 2, 3, 4, 5};
## If method=1, rows of X are independently generated from Np(0,Ip)
## If method=2, rows of X are independently generated from Np(0,\Sigma)
## where diagonals of \Sigma are 1 and all off-diagonals are 0.5
## If method=3, rows of X are independently generated from Np(0,\Sigma)
## where diagonals of \Sigma are 1 and all off-diagonals are 0.5^|j-j'|
## i.e. TOEPLITZ STRUCTURE
## If method=4, rows of X are independently generated from Np(0,\Sigma)
## where diagonals of \Sigma are 1 and all off-diagonals are 0.7^|j-j'|
## i.e. TOEPLITZ STRUCTURE
## If method=5, rows of X are independently generated from Np(0,\Sigma)
## where diagonals of \Sigma are 1 and all off-diagonals are 0.9^|j-j'|
## i.e. TOEPLITZ STRUCTURE
## seed: random seed
## Output:
## y: Response, a n by 1 vector
## X: Design matrix of dimension n by p
## betaT: True coefficient vector with s non-zero elements and (p-s) zeros
set.seed(seed)
library(MASS)
if(method==1)
{
X=matrix(rnorm(n*p),n,p)
}
else if(method==2)
{
X=matrix(mvrnorm(n,rep(0,p),diag(0.5,p)+matrix(0.5,p,p)),n,p)
}
else if(method==3)
{
X=matrix(mvrnorm(n,rep(0,p),toeplitz(0.5^(0:(p-1)))),n,p)
}
else if(method==4)
{
X=matrix(mvrnorm(n,rep(0,p),toeplitz(0.7^(0:(p-1)))),n,p)
}
else if(method==5)
{
X=matrix(mvrnorm(n,rep(0,p),toeplitz(0.9^(0:(p-1)))),n,p)
}
else
{
stop("Invalid value for method")
}
#
if(set==1)
{
beta.true=(-1)^s_sign*(c(0.75, 1, 1.25,1.5, 1.75, 2, 2.25, 2.5,2.75,3))
}
else
{
a=4*log(n)/sqrt(n) # Refer to Fan and Lv (2007)
beta.true=(-1)^s_sign*(a+abs(rnorm(s)))
}
betaT=c(beta.true,rep(0,p-s))
y=X%*%betaT+rnorm(n)*sigmaT
result=list("Response"=y,"Design_matrix"=X,"True_Beta"=betaT)
return(result)
}
# End of function |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modRisk.R
\docType{methods}
\name{modRisk}
\alias{modRisk}
\title{Global risk using log-linear models.}
\usage{
modRisk(obj, method = "default", weights, formulaM, bound = Inf, ...)
}
\arguments{
\item{obj}{An \code{\link{sdcMicroObj-class}}-object or a numeric matrix
or data.frame containing all variables required in the specified model.}
\item{method}{chose method for model-based risk-estimation. Currently, the
following methods can be selected:
\itemize{
\item "default": the standard log-linear model.
\item "CE": the Clogg Eliason method, additionally, considers survey weights by using an offset term.
\item "PML": the pseudo maximum likelihood method.
\item "weightedLLM": the weighted maximum likelihood method, considers survey weights by including them as one of the predictors.
\item "IPF": iterative proportional fitting as used in deprecated method 'LLmodGlobalRisk'.
}}
\item{weights}{a variable name specifying sampling weights}
\item{formulaM}{A formula specifying the model.}
\item{bound}{a number specifying a threshold for 'risky' observations in the sample.}
\item{...}{additional parameters passed through, currently ignored.}
}
\value{
Two global risk measures and some model output given the specified model. If this method
is applied to an \code{\link{sdcMicroObj-class}}-object, the slot 'risk' in the object ist updated
with the result of the model-based risk-calculation.
}
\description{
The sample frequencies are assumed to be independent and following a Poisson
distribution. The parameters of the corresponding parameters are estimated
by a log-linear model including the main effects and possible interactions.
}
\details{
This measure aims to (1) calculate the number of sample uniques that are
population uniques with a probabilistic Poisson model and (2) to estimate
the expected number of correct matches for sample uniques.
ad 1) this risk measure is defined over all sample uniques as \deqn{ \tau_1
= \sum\limits_{j:f_j=1} P(F_j=1 | f_j=1) \quad , } i.e. the expected number
of sample uniques that are population uniques.
ad 2) this risk measure is defined over all sample uniques as \deqn{ \tau_2
= \sum\limits_{j:f_j=1} P(1 / F_j | f_j=1) \quad . }
Since population frequencies \eqn{F_k} are unknown, they need to be
estimated.
The iterative proportional fitting method is used to fit the parameters of
the Poisson distributed frequency counts related to the model specified to
fit the frequency counts. The obtained parameters are used to estimate a
global risk, defined in Skinner and Holmes (1998).
}
\examples{
## data.frame method
data(testdata2)
form <- ~sex+water+roof
w <- "sampling_weight"
(modRisk(testdata2, method="default", formulaM=form, weights=w))
(modRisk(testdata2, method="CE", formulaM=form, weights=w))
(modRisk(testdata2, method="PML", formulaM=form, weights=w))
(modRisk(testdata2, method="weightedLLM", formulaM=form, weights=w))
(modRisk(testdata2, method="IPF", formulaM=form, weights=w))
## application to a sdcMicroObj
data(testdata2)
sdc <- createSdcObj(testdata2,
keyVars=c('urbrur','roof','walls','electcon','relat','sex'),
numVars=c('expend','income','savings'), w='sampling_weight')
sdc <- modRisk(sdc,form=~sex+water+roof)
slot(sdc, "risk")$model
}
\references{
Skinner, C.J. and Holmes, D.J. (1998) \emph{Estimating the
re-identification risk per record in microdata}. Journal of Official
Statistics, 14:361-372, 1998.
Rinott, Y. and Shlomo, N. (1998). \emph{A Generalized Negative Binomial
Smoothing Model for Sample Disclosure Risk Estimation}. Privacy in
Statistical Databases. Lecture Notes in Computer Science. Springer-Verlag,
82--93.
Clogg, C.C. and Eliasson, S.R. (1987). \emph{Some Common Problems in Log-Linear Analysis}. Sociological Methods and Research, 8-44.
}
\seealso{
\code{\link{loglm}}, \code{\link{measure_risk}}
}
\author{
Matthias Templ, Marius Totter, Bernhard Meindl
}
\keyword{manip}
| /man/modRisk.Rd | no_license | leebrian/sdcMicro | R | false | true | 3,982 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modRisk.R
\docType{methods}
\name{modRisk}
\alias{modRisk}
\title{Global risk using log-linear models.}
\usage{
modRisk(obj, method = "default", weights, formulaM, bound = Inf, ...)
}
\arguments{
\item{obj}{An \code{\link{sdcMicroObj-class}}-object or a numeric matrix
or data.frame containing all variables required in the specified model.}
\item{method}{chose method for model-based risk-estimation. Currently, the
following methods can be selected:
\itemize{
\item "default": the standard log-linear model.
\item "CE": the Clogg Eliason method, additionally, considers survey weights by using an offset term.
\item "PML": the pseudo maximum likelihood method.
\item "weightedLLM": the weighted maximum likelihood method, considers survey weights by including them as one of the predictors.
\item "IPF": iterative proportional fitting as used in deprecated method 'LLmodGlobalRisk'.
}}
\item{weights}{a variable name specifying sampling weights}
\item{formulaM}{A formula specifying the model.}
\item{bound}{a number specifying a threshold for 'risky' observations in the sample.}
\item{...}{additional parameters passed through, currently ignored.}
}
\value{
Two global risk measures and some model output given the specified model. If this method
is applied to an \code{\link{sdcMicroObj-class}}-object, the slot 'risk' in the object ist updated
with the result of the model-based risk-calculation.
}
\description{
The sample frequencies are assumed to be independent and following a Poisson
distribution. The parameters of the corresponding parameters are estimated
by a log-linear model including the main effects and possible interactions.
}
\details{
This measure aims to (1) calculate the number of sample uniques that are
population uniques with a probabilistic Poisson model and (2) to estimate
the expected number of correct matches for sample uniques.
ad 1) this risk measure is defined over all sample uniques as \deqn{ \tau_1
= \sum\limits_{j:f_j=1} P(F_j=1 | f_j=1) \quad , } i.e. the expected number
of sample uniques that are population uniques.
ad 2) this risk measure is defined over all sample uniques as \deqn{ \tau_2
= \sum\limits_{j:f_j=1} P(1 / F_j | f_j=1) \quad . }
Since population frequencies \eqn{F_k} are unknown, they need to be
estimated.
The iterative proportional fitting method is used to fit the parameters of
the Poisson distributed frequency counts related to the model specified to
fit the frequency counts. The obtained parameters are used to estimate a
global risk, defined in Skinner and Holmes (1998).
}
\examples{
## data.frame method
data(testdata2)
form <- ~sex+water+roof
w <- "sampling_weight"
(modRisk(testdata2, method="default", formulaM=form, weights=w))
(modRisk(testdata2, method="CE", formulaM=form, weights=w))
(modRisk(testdata2, method="PML", formulaM=form, weights=w))
(modRisk(testdata2, method="weightedLLM", formulaM=form, weights=w))
(modRisk(testdata2, method="IPF", formulaM=form, weights=w))
## application to a sdcMicroObj
data(testdata2)
sdc <- createSdcObj(testdata2,
keyVars=c('urbrur','roof','walls','electcon','relat','sex'),
numVars=c('expend','income','savings'), w='sampling_weight')
sdc <- modRisk(sdc,form=~sex+water+roof)
slot(sdc, "risk")$model
}
\references{
Skinner, C.J. and Holmes, D.J. (1998) \emph{Estimating the
re-identification risk per record in microdata}. Journal of Official
Statistics, 14:361-372, 1998.
Rinott, Y. and Shlomo, N. (1998). \emph{A Generalized Negative Binomial
Smoothing Model for Sample Disclosure Risk Estimation}. Privacy in
Statistical Databases. Lecture Notes in Computer Science. Springer-Verlag,
82--93.
Clogg, C.C. and Eliasson, S.R. (1987). \emph{Some Common Problems in Log-Linear Analysis}. Sociological Methods and Research, 8-44.
}
\seealso{
\code{\link{loglm}}, \code{\link{measure_risk}}
}
\author{
Matthias Templ, Marius Totter, Bernhard Meindl
}
\keyword{manip}
|
setwd("~/Documents/Data Science Files/Course 4 Project 1")
table <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"))
table$Date <- as.Date(table$Date, "%d/%m/%Y")
table <- subset(table, Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
table <- table[complete.cases(table), ]
dateTime <- paste(table$Date, table$Time)
dateTime <- setNames(dateTime, "Date/Time")
table <- table[ , !(names(table) %in% c("Date","Time"))]
table <- cbind(dateTime, table)
table$dateTime <- as.POSIXct(dateTime)
hist(table$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red")
dev.copy(png, "plot1.png", width = 480, height = 480)
dev.off()
| /plot1.R | no_license | vmagati/ExData_Plotting1 | R | false | false | 841 | r | setwd("~/Documents/Data Science Files/Course 4 Project 1")
table <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"))
table$Date <- as.Date(table$Date, "%d/%m/%Y")
table <- subset(table, Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
table <- table[complete.cases(table), ]
dateTime <- paste(table$Date, table$Time)
dateTime <- setNames(dateTime, "Date/Time")
table <- table[ , !(names(table) %in% c("Date","Time"))]
table <- cbind(dateTime, table)
table$dateTime <- as.POSIXct(dateTime)
hist(table$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red")
dev.copy(png, "plot1.png", width = 480, height = 480)
dev.off()
|
install.packages("lubridate")
install.packages("forecast")
## Libaries I need
library(forecast)
library(caret)
library(tidyverse)
library(lubridate)
library(DataExplorer)
## Read in the data
store.train <- vroom::vroom("./train.csv")
store.test <- vroom::vroom("./test.csv")
#combine the dataset
store <-bind_rows(store.train, store.test)
glimpse(store)
#Feature Engineering
store$item <- as.factor(store$item)
store$store <- as.factor(store$store)
store$month <- month(store$date)
store$weekday <- weekdays(store$date)
store$year <- year(store$date)
store$quarter <- quarter(store$date)
store <- store %>%
group_by(store, item, month) %>%
mutate(mean_month_sales = mean(sales, na.rm = TRUE)) %>%
ungroup() %>%
group_by(store, item, weekday) %>%
mutate(mean_weekday_sales = mean(sales,na.rm = TRUE)) %>%
ungroup() %>%
group_by(store,item, year) %>%
mutate(mean_year_sales = mean(sales, na.rm = TRUE)) %>%
ungroup() %>%
group_by(store,item, quarter) %>%
mutate(mean_quarter_sales = mean(sales, na.rm = TRUE)) %>%
ungroup()
head(store)
summary(store)
store.train <- store[!is.na(store$sales),]
store.test <- store[is.na(store$sales),]
# EDA
# Distribution of Sales
ggplot(data = store.train,
mapping = aes(x = sales)) +
geom_histogram(bins = 20)
# Distribution of Sales by store
ggplot(data = store.train,
mapping = aes(x = sales)) +
geom_histogram(bins = 20) +
facet_wrap(~store)
# Since the above is right skewed, we can do transformation
#boxcox transformation
bc <- BoxCoxTrans(store.train$sales+1)
bc # "best" lambda value 0.3
#Predict sales after boxcox transformation
trans_sales<-predict(bc, as.vector(store.train$sales+1))
summary(trans_sales)
store.train$sales<-trans_sales
ggplot(store.train, aes(sales))+
geom_histogram()+
ggtitle("Distribution of Sales After Boxcox")
ggplot(store.train, aes(sales, fill = as.factor(store)))+
geom_histogram()+
facet_wrap(~as.factor(store))+
ggtitle("Distribution of Sales by Store After Boxcox")
#The overall distribution of sales is almost perfectly symmetrical
#The distributions of sales by store also look very symmetrical
#Impact of Time with Sales (YEAR)
ggplot(store.train, aes(as.factor(year), sales))+
geom_boxplot()
#No much difference between year in terms of sales
#Impact of Time with Sales (Quarter)
ggplot(store.train, aes(as.factor(quarter), sales))+
geom_boxplot()
ggplot(store.train, aes(as.factor(quarter), sales, fill = as.factor(year)))+
geom_boxplot()
#From the first plot we can see Quarter 2 and 3 has slightly higher sales and quarter 4 has lower sales
#second plot I group the quarter by years. There is some type of
#positive relationship between year and sales
#Impact of Time with Sales (Month)
ggplot(store.train, aes(as.factor(month), sales))+
geom_boxplot()
#distribution shift up and down but December seems to have much drop in sales.
#Change to log because the metric for the competition is rmsle and also the distribution of counts is skewed.
store$sales = log1p(store$sales)
#Split test/train
store.train <- store %>% filter(!is.na(sales))
store.test <- store %>% filter(is.na(sales))
plot_missing(store.train)
plot_missing(store.test)
#use gbm
gbm <- train(form=sales~.,
data=store.train,
method="gbm",
trControl=trainControl(method="repeatedcv",
number=3, #Number of pieces of your data
repeats=1)) #repeats=1 = "cv"
gbm$results
gbm.preds <- data.frame(Id=store.test$id, sales=predict(gbm, newdata=store.test))
| /Submission.R | no_license | JessieCMiao/Forecasting | R | false | false | 3,624 | r | install.packages("lubridate")
install.packages("forecast")
## Libaries I need
library(forecast)
library(caret)
library(tidyverse)
library(lubridate)
library(DataExplorer)
## Read in the data
store.train <- vroom::vroom("./train.csv")
store.test <- vroom::vroom("./test.csv")
#combine the dataset
store <-bind_rows(store.train, store.test)
glimpse(store)
#Feature Engineering
store$item <- as.factor(store$item)
store$store <- as.factor(store$store)
store$month <- month(store$date)
store$weekday <- weekdays(store$date)
store$year <- year(store$date)
store$quarter <- quarter(store$date)
store <- store %>%
group_by(store, item, month) %>%
mutate(mean_month_sales = mean(sales, na.rm = TRUE)) %>%
ungroup() %>%
group_by(store, item, weekday) %>%
mutate(mean_weekday_sales = mean(sales,na.rm = TRUE)) %>%
ungroup() %>%
group_by(store,item, year) %>%
mutate(mean_year_sales = mean(sales, na.rm = TRUE)) %>%
ungroup() %>%
group_by(store,item, quarter) %>%
mutate(mean_quarter_sales = mean(sales, na.rm = TRUE)) %>%
ungroup()
head(store)
summary(store)
store.train <- store[!is.na(store$sales),]
store.test <- store[is.na(store$sales),]
# EDA
# Distribution of Sales
ggplot(data = store.train,
mapping = aes(x = sales)) +
geom_histogram(bins = 20)
# Distribution of Sales by store
ggplot(data = store.train,
mapping = aes(x = sales)) +
geom_histogram(bins = 20) +
facet_wrap(~store)
# Since the above is right skewed, we can do transformation
#boxcox transformation
bc <- BoxCoxTrans(store.train$sales+1)
bc # "best" lambda value 0.3
#Predict sales after boxcox transformation
trans_sales<-predict(bc, as.vector(store.train$sales+1))
summary(trans_sales)
store.train$sales<-trans_sales
ggplot(store.train, aes(sales))+
geom_histogram()+
ggtitle("Distribution of Sales After Boxcox")
ggplot(store.train, aes(sales, fill = as.factor(store)))+
geom_histogram()+
facet_wrap(~as.factor(store))+
ggtitle("Distribution of Sales by Store After Boxcox")
#The overall distribution of sales is almost perfectly symmetrical
#The distributions of sales by store also look very symmetrical
#Impact of Time with Sales (YEAR)
ggplot(store.train, aes(as.factor(year), sales))+
geom_boxplot()
#No much difference between year in terms of sales
#Impact of Time with Sales (Quarter)
ggplot(store.train, aes(as.factor(quarter), sales))+
geom_boxplot()
ggplot(store.train, aes(as.factor(quarter), sales, fill = as.factor(year)))+
geom_boxplot()
#From the first plot we can see Quarter 2 and 3 has slightly higher sales and quarter 4 has lower sales
#second plot I group the quarter by years. There is some type of
#positive relationship between year and sales
#Impact of Time with Sales (Month)
ggplot(store.train, aes(as.factor(month), sales))+
geom_boxplot()
#distribution shift up and down but December seems to have much drop in sales.
#Change to log because the metric for the competition is rmsle and also the distribution of counts is skewed.
store$sales = log1p(store$sales)
#Split test/train
store.train <- store %>% filter(!is.na(sales))
store.test <- store %>% filter(is.na(sales))
plot_missing(store.train)
plot_missing(store.test)
#use gbm
gbm <- train(form=sales~.,
data=store.train,
method="gbm",
trControl=trainControl(method="repeatedcv",
number=3, #Number of pieces of your data
repeats=1)) #repeats=1 = "cv"
gbm$results
gbm.preds <- data.frame(Id=store.test$id, sales=predict(gbm, newdata=store.test))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sc_checks.R
\name{check_metadata}
\alias{check_metadata}
\title{Check metadata variables against Seurat object}
\usage{
check_metadata(seurat_object, varnames)
}
\arguments{
\item{seurat_object}{An object of type 'Seurat'}
\item{varnames}{Character vector of metadata names}
}
\value{
No return- function will stop if supplied metadata list is not in object.
}
\description{
Check metadata variables against Seurat object
}
| /man/check_metadata.Rd | no_license | ebi-gene-expression-group/workflowscriptscommon | R | false | true | 503 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sc_checks.R
\name{check_metadata}
\alias{check_metadata}
\title{Check metadata variables against Seurat object}
\usage{
check_metadata(seurat_object, varnames)
}
\arguments{
\item{seurat_object}{An object of type 'Seurat'}
\item{varnames}{Character vector of metadata names}
}
\value{
No return- function will stop if supplied metadata list is not in object.
}
\description{
Check metadata variables against Seurat object
}
|
rm(list = ls())
library(tidyverse)
library(butteR)
library(survey)
library(srvyr)
library(forcats)
library(dplyr)
population<-c("host","refugee")[2]
write_output<-c("yes","no")[1]
day_to_run <- Sys.Date()
source("scripts/active_path.R")
# read_data ---------------------------------------------------------------
hh_data <- read.csv(recoding_output_hh, stringsAsFactors = FALSE, na.strings=c("", " ", NA))
ind_data <- read.csv(recoding_output_indv, stringsAsFactors = FALSE, na.strings=c("", " ", NA))
tool_survey_sheet <- readxl::read_xls(tool_path,sheet = "survey")
tool_choices_sheet <- readxl::read_xls(tool_path,sheet = "choices")
pop <- read.csv(pop_path,stringsAsFactors = FALSE, na.strings=c("", " ", NA))
# responserate ------------------------------------------------------------
if(population == "host"){
hh_data2 <- hh_data %>% select(X_uuid,df_strata,resp_gender,I.HH_CHAR.gender_hoh.HH,I.HH_CHAR.adlt_male_in_hh.INDVHH)
}
if(population == "refugee"){
hh_data2 <- hh_data %>% select(X_uuid,"upazila",resp_gender,I.HH_CHAR.gender_hoh.HH,I.HH_CHAR.adlt_male_in_hh.INDVHH)
}
ind_data2 <- ind_data %>% left_join(hh_data2,by =c("X_submission__uuid"= "X_uuid"))
hh_cols <- hh_data %>% colnames()
indv_cols <- ind_data2 %>% colnames()
overall_rate_hh <- hh_data %>% summarise_at(.vars = hh_cols,.funs = ~ sum(!is.na(.)))
overall_rate_indv <- ind_data2 %>% summarise_at(.vars = indv_cols,.funs = ~ sum(!is.na(.)))
overall_rate_by_resp_gender_hh <- hh_data %>% dplyr::group_by(resp_gender)%>% dplyr::summarise_at(names(hh_data %>% select(-resp_gender)),.funs = ~ sum(!is.na(.)))
overall_rate_by_resp_gender_indv <- ind_data2 %>% dplyr::group_by(resp_gender) %>% summarise_at(names(ind_data2 %>% select(-resp_gender)),.funs = ~ sum(!is.na(.)))
overall_rate_by_adult_male_hh <- hh_data %>% dplyr::group_by(I.HH_CHAR.adlt_male_in_hh.INDVHH)%>% dplyr::summarise_at(names(hh_data %>% select(-I.HH_CHAR.adlt_male_in_hh.INDVHH)),.funs = ~ sum(!is.na(.)))
overall_rate_by_adult_male_indv <- ind_data2 %>% dplyr::group_by(I.HH_CHAR.adlt_male_in_hh.INDVHH) %>% summarise_at(names(ind_data2 %>% select(-I.HH_CHAR.adlt_male_in_hh.INDVHH)),.funs = ~ sum(!is.na(.)))
overall_rate_by_hoh_gender_hh <- hh_data %>% dplyr::group_by(I.HH_CHAR.gender_hoh.HH)%>% dplyr::summarise_at(names(hh_data %>% select(-I.HH_CHAR.gender_hoh.HH)),.funs = ~ sum(!is.na(.)))
overall_rate_by_hoh_gender_indv <- ind_data2 %>% dplyr::group_by(I.HH_CHAR.gender_hoh.HH) %>% summarise_at(names(ind_data2 %>% select(-I.HH_CHAR.gender_hoh.HH)),.funs = ~ sum(!is.na(.)))
if(population == "host"){
overall_rate_by_upazila_hh <- hh_data %>% dplyr::group_by(upazilla_name)%>% dplyr::summarise_at(names(hh_data %>% select(-upazilla_name)),.funs = ~ sum(!is.na(.)))
overall_rate_by_upazila_indv <- ind_data2 %>% dplyr::group_by(upazilla_name) %>% summarise_at(names(ind_data2 %>% select(-upazilla_name)),.funs = ~ sum(!is.na(.)))
}
if(population == "refugee"){
overall_rate_by_upazila_hh <- hh_data %>% dplyr::group_by(upazila)%>% dplyr::summarise_at(names(hh_data %>% select(-upazila)),.funs = ~ sum(!is.na(.)))
overall_rate_by_upazila_indv <- ind_data2 %>% dplyr::group_by(upazila) %>% summarise_at(names(ind_data2 %>% select(-upazila)),.funs = ~ sum(!is.na(.)))
}
if(write_output == "yes"){
write.csv(overall_rate_hh,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_HH.csv"))
write.csv(overall_rate_indv,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_indv.csv"))
write.csv(overall_rate_by_resp_gender_hh,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_by_resp_genderHH.csv"))
write.csv(overall_rate_by_resp_gender_indv,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_by_resp_genderindv.csv"))
write.csv(overall_rate_by_hoh_gender_hh,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_by_hoh_genderHH.csv"))
write.csv(overall_rate_by_hoh_gender_indv,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_by_hoh_gender_indv.csv"))
write.csv(overall_rate_by_adult_male_hh,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_by_adult_male_HH.csv"))
write.csv(overall_rate_by_adult_male_indv,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_adult_male_indv.csv"))
write.csv(overall_rate_by_upazila_hh,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_by_upazila_HH.csv"))
write.csv(overall_rate_by_upazila_indv,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_by_upazila_indv.csv"))
}
# weighting ---------------------------------------------------------------
if(population == "refugee"){
pop<-pop %>%
filter(!is.na(Camp),is.na(Block)) %>%
# filter(Camp!="Kutupalong RC") %>%
mutate(
!!(sf_strata):=stringr::str_replace(Camp, "Total","") %>% trimws(),
!!(sf_strata):= stringr::str_replace_all(Camp,"Extension","Ext"),
Total.Families=readr::parse_number(Total.Families %>% stringr::str_replace_all(",","")),
Total.Individuals= readr::parse_number(Total.Individuals %>% stringr::str_replace_all(",",""))
)
hh_data$camp_name_fix <- hh_data$camp_name %>% str_replace_all("_"," ") %>% str_replace_all("e","E") %>%
str_replace_all("w","W") %>% str_replace_all("camp ktp","Kutupalong RC") %>%
str_replace_all("camp nya","Nayapara RC") %>% str_replace_all("c","C") %>%
str_replace_all("20E","20 Ext") %>% str_replace_all("4E","4 Ext")
sf_with_weights<-hh_data %>%
group_by(!!sym(df_strata)) %>%
summarise(sample_strata_num=n()) %>%
right_join(pop, by=setNames(sf_strata,df_strata)) %>%
mutate(sample_global=sum(sample_strata_num),
pop_global=sum(!!sym(sf_pop)),
survey_weight= (!!sym(sf_pop)/pop_global)/(sample_strata_num/sample_global)
)
write.csv(sf_with_weights,paste0("outputs/",population,"/weights/",population,"_weights.csv"))
sf_with_weights <- sf_with_weights %>% select(camp_name_fix,survey_weight)
}
if(population == "host"){
pop <- pop %>% dplyr::group_by(Upazila) %>% dplyr::summarise(
HH_pop = sum(HH_pop)
)
hh_data$upazilla_name <- hh_data$upazilla_name %>% str_replace_all("teknaf","Teknaf") %>%
str_replace_all("ukhiya","Ukhiya")
sf_with_weights<-hh_data %>%
group_by(!!sym(df_strata)) %>%
summarise(sample_strata_num=n()) %>%
right_join(pop, by=setNames(sf_strata,df_strata)) %>%
mutate(sample_global=sum(sample_strata_num),
pop_global=sum(!!sym(sf_pop)),
survey_weight= (!!sym(sf_pop)/pop_global)/(sample_strata_num/sample_global)
)
write.csv(sf_with_weights,paste0("outputs/",population,"/weights/",population,"_weights.csv"))
sf_with_weights <- sf_with_weights %>% select(upazilla_name,survey_weight)
}
# data_for_butteR ---------------------------------------------------------
df <- hh_data %>% left_join(sf_with_weights)
write.csv(df,paste0("outputs/",population,"/data_sets_with_weights/",population,"_data_set_weights_hh.csv"))
# lt<-butteR::make_xlsform_lookup_table(kobo_survey = tool_survey_sheet,kobo_choices = tool_choices_sheet
# ,label_column = "label::english")
#
#
# select_multiples<-lt %>% filter(str_detect(question_type, "^select_mult")) %>% pull(xml_format_data_col) %>%
# unique()
#
# select_multiples_hh<-select_multiples[select_multiples %in% colnames(hh_data)]
#
#
# df<-df %>%
# mutate_at(select_multiples_hh, function(x) fct_expand(as.factor(x),c("0","1")))
# df<-df %>%
# mutate_at(.vars = select_multiples_hh, ~fct_expand("0","1"))
#
# df$modality_shelter.materials %>% AMR::freq()
# df$shelter_issues.shelter_is_hard_to_access %>% AMR::freq()
#
# df$shelter_issues.shelter_is_hard_to_access %>% unique()
df <- butteR::refactor_to_xlsform(data = df,kobo_survey = tool_survey_sheet ,
kobo_choices = tool_choices_sheet ,label_column = "label::english")
rank_cols<-df %>% select(starts_with("rank"),-ends_with("label")) %>% colnames()
df <- df %>% mutate_at(rank_cols,function(x) (as.character(x)))
df <- df %>% mutate_at(rank_cols,function(x) (as.integer(x)))
# butter analysis --------------------------------------------------------
dfsvy<-svydesign(ids = ~1,strata = formula(paste0("~",df_strata)),data = df,weights = formula(paste0("~", "survey_weight")))
if (population == "refugee") {
dont_analyze<-c( "X", "survey_date", "survey_start", "deviceid", "end_survey",
"instance_name", "enum_organisation", "enumerator_id", "datearrival_shelter",
"enum_gender", "respondent_id", "upazila", "camp_name",
"informed_consent", "fcn_consent", "hh_fcn", "block_number",
"camp_name_fix","referral_contact", "phone_number", "enum_comment",
"X_id", "X_uuid", "X_submission_time",
"X_index")}
if (population == "host") {
dont_analyze<-c( "X", "survey_date", "survey_start", "deviceid", "end_survey",
"instance_name", "enum_organisation", "enumerator_id",
"enum_gender", "respondent_id", "upazilla_name", "union_name",
"ward_name",
"informed_consent",
"referral_contact", "phone_number", "enum_comment",
"X_id", "X_uuid", "X_submission_time",
"X_index")}
dont_analyze_in_data<-dont_analyze[dont_analyze %in% colnames(df)]
is_not_empty<-function(x){ all(is.na(x))==FALSE}
cols_to_analyze<-df %>% select(-starts_with("Other"), -ends_with("_other")) %>%
select_if(.,is_not_empty) %>% select(-dont_analyze_in_data) %>% colnames()
yes_other_cols <- df %>% select(ends_with(".yes_other")) %>% colnames() %>% dput()
cols_to_analyze <- c(cols_to_analyze,"masks_source.mask_source_other",yes_other_cols,"other_reasons.yes_covid","other_reasons.no")
if(population == "host"){
dfsvy$variables$I.HH_CHAR.enough_information_for_all.HH<- forcats::fct_expand(dfsvy$variables$I.HH_CHAR.enough_information_for_all.HH,c( "no", "yes"))
dfsvy$variables$I.FSL.food_source_assistance.HH<- forcats::fct_expand(dfsvy$variables$I.FSL.food_source_assistance.HH,c( "no", "yes"))
dfsvy$variables$I.HH_CHAR.no_working_age.INDVHH<- forcats::fct_expand(dfsvy$variables$I.HH_CHAR.no_working_age.INDVHH,c( "no", "yes"))
}
basic_analysis<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze)
basic_analysis_by_hoh<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze,
aggregation_level = "I.HH_CHAR.gender_hoh.HH")
basic_analysis_by_adlt_male<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze,
aggregation_level = "I.HH_CHAR.adlt_male_in_hh.INDVHH")
basic_analysis_by_resp_gender<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze,
aggregation_level = "resp_gender")
if (population == "host") {
basic_analysis_by_strata<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze,
aggregation_level = df_strata)
basic_analysis_by_strata_hoh<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze,
aggregation_level = c(df_strata,"I.HH_CHAR.gender_hoh.HH"))
}
if (population == "refugee") {
basic_analysis_by_strata<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze,
aggregation_level = "upazila")
basic_analysis_by_strata_hoh<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze,
aggregation_level = c("upazila","I.HH_CHAR.gender_hoh.HH"))
}
if (write_output == "yes") {
write.csv(basic_analysis,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_HH.csv"))
write.csv(basic_analysis_by_hoh,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_HoH_HH.csv"))
write.csv(basic_analysis_by_resp_gender,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_resp_gender_HH.csv"))
write.csv(basic_analysis_by_strata,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_strata_HH.csv"))
write.csv(basic_analysis_by_strata_hoh,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_strata_HoH_HH.csv"))
write.csv(basic_analysis_by_adlt_male,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_adlt_male_HH.csv"))
}
# individual loop ---------------------------------------------------------
if(population == "host"){
df_weight <- df %>% select(X_uuid,survey_weight,df_strata,resp_gender,I.HH_CHAR.adlt_male_in_hh.INDVHH)
}
if(population == "refugee"){
df_weight <- df %>% select(X_uuid,survey_weight,"upazila",resp_gender,I.HH_CHAR.adlt_male_in_hh.INDVHH)
}
indv_with_weights <- ind_data %>% left_join(df_weight,by=c("X_submission__uuid"="X_uuid"))
indv_with_weights<- indv_with_weights %>% filter(!is.na(survey_weight))
write.csv(indv_with_weights,paste0("outputs/",population,"/data_sets_with_weights/",population,"_data_set_weights_indv.csv"))
indv_with_weights <- butteR::refactor_to_xlsform(data = indv_with_weights,kobo_survey = tool_survey_sheet ,
kobo_choices = tool_choices_sheet ,label_column = "label::english")
dfsvy_indv<-svydesign(ids = ~1,data = indv_with_weights,weights = formula(paste0("~", "survey_weight")))
dont_analyze_indv<-c( "X", "parent_instance_name","repeat_instance_name",
"X_index", "X_parent_table_name", "X_parent_index", "X_submission__id",
"X_submission__uuid", "X_submission__submission_time")
dont_analyze_in_data_indv<-dont_analyze_indv[dont_analyze_indv %in% colnames(indv_with_weights)]
is_not_empty<-function(x){ all(is.na(x))==FALSE}
cols_to_analyze_indv<-indv_with_weights %>% select(-starts_with("Other"), -ends_with("_other")) %>%
select_if(.,is_not_empty) %>% select(-dont_analyze_in_data_indv) %>% colnames()
# if(population == "host"){
# dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_f_60.INDV<- forcats::fct_expand(dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_f_60.INDV,c( "no", "yes"))
# }
if(population == "refugee"){
dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_0_17.INDV<- forcats::fct_expand(dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_0_17.INDV,c( "no", "yes"))
dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_18_59.INDV<- forcats::fct_expand(dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_18_59.INDV,c( "no", "yes"))
dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_60.INDV<- forcats::fct_expand(dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_60.INDV,c( "no", "yes"))
dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_m.INDV<- forcats::fct_expand(dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_m.INDV,c( "no", "yes"))
dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_f.INDV<- forcats::fct_expand(dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_f.INDV,c( "no", "yes"))
}
basic_analysis_indv<-butteR::mean_prop_working(design = dfsvy_indv,list_of_variables = cols_to_analyze_indv)
basic_analysis_indv_by_resp_gender<-butteR::mean_prop_working(design = dfsvy_indv,list_of_variables = cols_to_analyze_indv,
aggregation_level = "resp_gender" )
basic_analysis_indv_by_adlt_male<-butteR::mean_prop_working(design = dfsvy_indv,list_of_variables = cols_to_analyze_indv,
aggregation_level = "I.HH_CHAR.adlt_male_in_hh.INDVHH" )
if (population == "host"){
basic_analysis_indv_by_strata<-butteR::mean_prop_working(design = dfsvy_indv,list_of_variables = cols_to_analyze_indv,
aggregation_level = df_strata )
}
if (population == "refugee"){
basic_analysis_indv_by_strata<-butteR::mean_prop_working(design = dfsvy_indv,list_of_variables = cols_to_analyze_indv,
aggregation_level = "upazila" )
}
if (write_output == "yes") {
write.csv(basic_analysis_indv,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_INDV.csv"))
write.csv(basic_analysis_indv_by_strata,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_strata_INDV.csv"))
write.csv(basic_analysis_indv_by_resp_gender,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_resp_gender_INDV.csv"))
write.csv(basic_analysis_indv_by_adlt_male,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_adlt_male_INDV.csv"))
}
| /scripts/basic_analysis.R | no_license | mhkhan27/2020_BGD_MSNAs | R | false | false | 17,859 | r | rm(list = ls())
library(tidyverse)
library(butteR)
library(survey)
library(srvyr)
library(forcats)
library(dplyr)
population<-c("host","refugee")[2]
write_output<-c("yes","no")[1]
day_to_run <- Sys.Date()
source("scripts/active_path.R")
# read_data ---------------------------------------------------------------
hh_data <- read.csv(recoding_output_hh, stringsAsFactors = FALSE, na.strings=c("", " ", NA))
ind_data <- read.csv(recoding_output_indv, stringsAsFactors = FALSE, na.strings=c("", " ", NA))
tool_survey_sheet <- readxl::read_xls(tool_path,sheet = "survey")
tool_choices_sheet <- readxl::read_xls(tool_path,sheet = "choices")
pop <- read.csv(pop_path,stringsAsFactors = FALSE, na.strings=c("", " ", NA))
# responserate ------------------------------------------------------------
if(population == "host"){
hh_data2 <- hh_data %>% select(X_uuid,df_strata,resp_gender,I.HH_CHAR.gender_hoh.HH,I.HH_CHAR.adlt_male_in_hh.INDVHH)
}
if(population == "refugee"){
hh_data2 <- hh_data %>% select(X_uuid,"upazila",resp_gender,I.HH_CHAR.gender_hoh.HH,I.HH_CHAR.adlt_male_in_hh.INDVHH)
}
ind_data2 <- ind_data %>% left_join(hh_data2,by =c("X_submission__uuid"= "X_uuid"))
hh_cols <- hh_data %>% colnames()
indv_cols <- ind_data2 %>% colnames()
overall_rate_hh <- hh_data %>% summarise_at(.vars = hh_cols,.funs = ~ sum(!is.na(.)))
overall_rate_indv <- ind_data2 %>% summarise_at(.vars = indv_cols,.funs = ~ sum(!is.na(.)))
overall_rate_by_resp_gender_hh <- hh_data %>% dplyr::group_by(resp_gender)%>% dplyr::summarise_at(names(hh_data %>% select(-resp_gender)),.funs = ~ sum(!is.na(.)))
overall_rate_by_resp_gender_indv <- ind_data2 %>% dplyr::group_by(resp_gender) %>% summarise_at(names(ind_data2 %>% select(-resp_gender)),.funs = ~ sum(!is.na(.)))
overall_rate_by_adult_male_hh <- hh_data %>% dplyr::group_by(I.HH_CHAR.adlt_male_in_hh.INDVHH)%>% dplyr::summarise_at(names(hh_data %>% select(-I.HH_CHAR.adlt_male_in_hh.INDVHH)),.funs = ~ sum(!is.na(.)))
overall_rate_by_adult_male_indv <- ind_data2 %>% dplyr::group_by(I.HH_CHAR.adlt_male_in_hh.INDVHH) %>% summarise_at(names(ind_data2 %>% select(-I.HH_CHAR.adlt_male_in_hh.INDVHH)),.funs = ~ sum(!is.na(.)))
overall_rate_by_hoh_gender_hh <- hh_data %>% dplyr::group_by(I.HH_CHAR.gender_hoh.HH)%>% dplyr::summarise_at(names(hh_data %>% select(-I.HH_CHAR.gender_hoh.HH)),.funs = ~ sum(!is.na(.)))
overall_rate_by_hoh_gender_indv <- ind_data2 %>% dplyr::group_by(I.HH_CHAR.gender_hoh.HH) %>% summarise_at(names(ind_data2 %>% select(-I.HH_CHAR.gender_hoh.HH)),.funs = ~ sum(!is.na(.)))
if(population == "host"){
overall_rate_by_upazila_hh <- hh_data %>% dplyr::group_by(upazilla_name)%>% dplyr::summarise_at(names(hh_data %>% select(-upazilla_name)),.funs = ~ sum(!is.na(.)))
overall_rate_by_upazila_indv <- ind_data2 %>% dplyr::group_by(upazilla_name) %>% summarise_at(names(ind_data2 %>% select(-upazilla_name)),.funs = ~ sum(!is.na(.)))
}
if(population == "refugee"){
overall_rate_by_upazila_hh <- hh_data %>% dplyr::group_by(upazila)%>% dplyr::summarise_at(names(hh_data %>% select(-upazila)),.funs = ~ sum(!is.na(.)))
overall_rate_by_upazila_indv <- ind_data2 %>% dplyr::group_by(upazila) %>% summarise_at(names(ind_data2 %>% select(-upazila)),.funs = ~ sum(!is.na(.)))
}
if(write_output == "yes"){
write.csv(overall_rate_hh,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_HH.csv"))
write.csv(overall_rate_indv,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_indv.csv"))
write.csv(overall_rate_by_resp_gender_hh,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_by_resp_genderHH.csv"))
write.csv(overall_rate_by_resp_gender_indv,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_by_resp_genderindv.csv"))
write.csv(overall_rate_by_hoh_gender_hh,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_by_hoh_genderHH.csv"))
write.csv(overall_rate_by_hoh_gender_indv,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_by_hoh_gender_indv.csv"))
write.csv(overall_rate_by_adult_male_hh,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_by_adult_male_HH.csv"))
write.csv(overall_rate_by_adult_male_indv,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_adult_male_indv.csv"))
write.csv(overall_rate_by_upazila_hh,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_by_upazila_HH.csv"))
write.csv(overall_rate_by_upazila_indv,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_response_rate_by_upazila_indv.csv"))
}
# weighting ---------------------------------------------------------------
if(population == "refugee"){
pop<-pop %>%
filter(!is.na(Camp),is.na(Block)) %>%
# filter(Camp!="Kutupalong RC") %>%
mutate(
!!(sf_strata):=stringr::str_replace(Camp, "Total","") %>% trimws(),
!!(sf_strata):= stringr::str_replace_all(Camp,"Extension","Ext"),
Total.Families=readr::parse_number(Total.Families %>% stringr::str_replace_all(",","")),
Total.Individuals= readr::parse_number(Total.Individuals %>% stringr::str_replace_all(",",""))
)
hh_data$camp_name_fix <- hh_data$camp_name %>% str_replace_all("_"," ") %>% str_replace_all("e","E") %>%
str_replace_all("w","W") %>% str_replace_all("camp ktp","Kutupalong RC") %>%
str_replace_all("camp nya","Nayapara RC") %>% str_replace_all("c","C") %>%
str_replace_all("20E","20 Ext") %>% str_replace_all("4E","4 Ext")
sf_with_weights<-hh_data %>%
group_by(!!sym(df_strata)) %>%
summarise(sample_strata_num=n()) %>%
right_join(pop, by=setNames(sf_strata,df_strata)) %>%
mutate(sample_global=sum(sample_strata_num),
pop_global=sum(!!sym(sf_pop)),
survey_weight= (!!sym(sf_pop)/pop_global)/(sample_strata_num/sample_global)
)
write.csv(sf_with_weights,paste0("outputs/",population,"/weights/",population,"_weights.csv"))
sf_with_weights <- sf_with_weights %>% select(camp_name_fix,survey_weight)
}
if(population == "host"){
pop <- pop %>% dplyr::group_by(Upazila) %>% dplyr::summarise(
HH_pop = sum(HH_pop)
)
hh_data$upazilla_name <- hh_data$upazilla_name %>% str_replace_all("teknaf","Teknaf") %>%
str_replace_all("ukhiya","Ukhiya")
sf_with_weights<-hh_data %>%
group_by(!!sym(df_strata)) %>%
summarise(sample_strata_num=n()) %>%
right_join(pop, by=setNames(sf_strata,df_strata)) %>%
mutate(sample_global=sum(sample_strata_num),
pop_global=sum(!!sym(sf_pop)),
survey_weight= (!!sym(sf_pop)/pop_global)/(sample_strata_num/sample_global)
)
write.csv(sf_with_weights,paste0("outputs/",population,"/weights/",population,"_weights.csv"))
sf_with_weights <- sf_with_weights %>% select(upazilla_name,survey_weight)
}
# data_for_butteR ---------------------------------------------------------
df <- hh_data %>% left_join(sf_with_weights)
write.csv(df,paste0("outputs/",population,"/data_sets_with_weights/",population,"_data_set_weights_hh.csv"))
# lt<-butteR::make_xlsform_lookup_table(kobo_survey = tool_survey_sheet,kobo_choices = tool_choices_sheet
# ,label_column = "label::english")
#
#
# select_multiples<-lt %>% filter(str_detect(question_type, "^select_mult")) %>% pull(xml_format_data_col) %>%
# unique()
#
# select_multiples_hh<-select_multiples[select_multiples %in% colnames(hh_data)]
#
#
# df<-df %>%
# mutate_at(select_multiples_hh, function(x) fct_expand(as.factor(x),c("0","1")))
# df<-df %>%
# mutate_at(.vars = select_multiples_hh, ~fct_expand("0","1"))
#
# df$modality_shelter.materials %>% AMR::freq()
# df$shelter_issues.shelter_is_hard_to_access %>% AMR::freq()
#
# df$shelter_issues.shelter_is_hard_to_access %>% unique()
df <- butteR::refactor_to_xlsform(data = df,kobo_survey = tool_survey_sheet ,
kobo_choices = tool_choices_sheet ,label_column = "label::english")
rank_cols<-df %>% select(starts_with("rank"),-ends_with("label")) %>% colnames()
df <- df %>% mutate_at(rank_cols,function(x) (as.character(x)))
df <- df %>% mutate_at(rank_cols,function(x) (as.integer(x)))
# butter analysis --------------------------------------------------------
dfsvy<-svydesign(ids = ~1,strata = formula(paste0("~",df_strata)),data = df,weights = formula(paste0("~", "survey_weight")))
if (population == "refugee") {
dont_analyze<-c( "X", "survey_date", "survey_start", "deviceid", "end_survey",
"instance_name", "enum_organisation", "enumerator_id", "datearrival_shelter",
"enum_gender", "respondent_id", "upazila", "camp_name",
"informed_consent", "fcn_consent", "hh_fcn", "block_number",
"camp_name_fix","referral_contact", "phone_number", "enum_comment",
"X_id", "X_uuid", "X_submission_time",
"X_index")}
if (population == "host") {
dont_analyze<-c( "X", "survey_date", "survey_start", "deviceid", "end_survey",
"instance_name", "enum_organisation", "enumerator_id",
"enum_gender", "respondent_id", "upazilla_name", "union_name",
"ward_name",
"informed_consent",
"referral_contact", "phone_number", "enum_comment",
"X_id", "X_uuid", "X_submission_time",
"X_index")}
dont_analyze_in_data<-dont_analyze[dont_analyze %in% colnames(df)]
is_not_empty<-function(x){ all(is.na(x))==FALSE}
cols_to_analyze<-df %>% select(-starts_with("Other"), -ends_with("_other")) %>%
select_if(.,is_not_empty) %>% select(-dont_analyze_in_data) %>% colnames()
yes_other_cols <- df %>% select(ends_with(".yes_other")) %>% colnames() %>% dput()
cols_to_analyze <- c(cols_to_analyze,"masks_source.mask_source_other",yes_other_cols,"other_reasons.yes_covid","other_reasons.no")
if(population == "host"){
dfsvy$variables$I.HH_CHAR.enough_information_for_all.HH<- forcats::fct_expand(dfsvy$variables$I.HH_CHAR.enough_information_for_all.HH,c( "no", "yes"))
dfsvy$variables$I.FSL.food_source_assistance.HH<- forcats::fct_expand(dfsvy$variables$I.FSL.food_source_assistance.HH,c( "no", "yes"))
dfsvy$variables$I.HH_CHAR.no_working_age.INDVHH<- forcats::fct_expand(dfsvy$variables$I.HH_CHAR.no_working_age.INDVHH,c( "no", "yes"))
}
basic_analysis<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze)
basic_analysis_by_hoh<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze,
aggregation_level = "I.HH_CHAR.gender_hoh.HH")
basic_analysis_by_adlt_male<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze,
aggregation_level = "I.HH_CHAR.adlt_male_in_hh.INDVHH")
basic_analysis_by_resp_gender<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze,
aggregation_level = "resp_gender")
if (population == "host") {
basic_analysis_by_strata<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze,
aggregation_level = df_strata)
basic_analysis_by_strata_hoh<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze,
aggregation_level = c(df_strata,"I.HH_CHAR.gender_hoh.HH"))
}
if (population == "refugee") {
basic_analysis_by_strata<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze,
aggregation_level = "upazila")
basic_analysis_by_strata_hoh<-butteR::mean_prop_working(design = dfsvy,list_of_variables = cols_to_analyze,
aggregation_level = c("upazila","I.HH_CHAR.gender_hoh.HH"))
}
if (write_output == "yes") {
write.csv(basic_analysis,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_HH.csv"))
write.csv(basic_analysis_by_hoh,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_HoH_HH.csv"))
write.csv(basic_analysis_by_resp_gender,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_resp_gender_HH.csv"))
write.csv(basic_analysis_by_strata,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_strata_HH.csv"))
write.csv(basic_analysis_by_strata_hoh,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_strata_HoH_HH.csv"))
write.csv(basic_analysis_by_adlt_male,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_adlt_male_HH.csv"))
}
# individual loop ---------------------------------------------------------
if(population == "host"){
df_weight <- df %>% select(X_uuid,survey_weight,df_strata,resp_gender,I.HH_CHAR.adlt_male_in_hh.INDVHH)
}
if(population == "refugee"){
df_weight <- df %>% select(X_uuid,survey_weight,"upazila",resp_gender,I.HH_CHAR.adlt_male_in_hh.INDVHH)
}
indv_with_weights <- ind_data %>% left_join(df_weight,by=c("X_submission__uuid"="X_uuid"))
indv_with_weights<- indv_with_weights %>% filter(!is.na(survey_weight))
write.csv(indv_with_weights,paste0("outputs/",population,"/data_sets_with_weights/",population,"_data_set_weights_indv.csv"))
indv_with_weights <- butteR::refactor_to_xlsform(data = indv_with_weights,kobo_survey = tool_survey_sheet ,
kobo_choices = tool_choices_sheet ,label_column = "label::english")
dfsvy_indv<-svydesign(ids = ~1,data = indv_with_weights,weights = formula(paste0("~", "survey_weight")))
dont_analyze_indv<-c( "X", "parent_instance_name","repeat_instance_name",
"X_index", "X_parent_table_name", "X_parent_index", "X_submission__id",
"X_submission__uuid", "X_submission__submission_time")
dont_analyze_in_data_indv<-dont_analyze_indv[dont_analyze_indv %in% colnames(indv_with_weights)]
is_not_empty<-function(x){ all(is.na(x))==FALSE}
cols_to_analyze_indv<-indv_with_weights %>% select(-starts_with("Other"), -ends_with("_other")) %>%
select_if(.,is_not_empty) %>% select(-dont_analyze_in_data_indv) %>% colnames()
# if(population == "host"){
# dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_f_60.INDV<- forcats::fct_expand(dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_f_60.INDV,c( "no", "yes"))
# }
if(population == "refugee"){
dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_0_17.INDV<- forcats::fct_expand(dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_0_17.INDV,c( "no", "yes"))
dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_18_59.INDV<- forcats::fct_expand(dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_18_59.INDV,c( "no", "yes"))
dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_60.INDV<- forcats::fct_expand(dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_60.INDV,c( "no", "yes"))
dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_m.INDV<- forcats::fct_expand(dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_m.INDV,c( "no", "yes"))
dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_f.INDV<- forcats::fct_expand(dfsvy_indv$variables$I.HEALTH.ind_need_treatment_DONT_KNOW_f.INDV,c( "no", "yes"))
}
basic_analysis_indv<-butteR::mean_prop_working(design = dfsvy_indv,list_of_variables = cols_to_analyze_indv)
basic_analysis_indv_by_resp_gender<-butteR::mean_prop_working(design = dfsvy_indv,list_of_variables = cols_to_analyze_indv,
aggregation_level = "resp_gender" )
basic_analysis_indv_by_adlt_male<-butteR::mean_prop_working(design = dfsvy_indv,list_of_variables = cols_to_analyze_indv,
aggregation_level = "I.HH_CHAR.adlt_male_in_hh.INDVHH" )
if (population == "host"){
basic_analysis_indv_by_strata<-butteR::mean_prop_working(design = dfsvy_indv,list_of_variables = cols_to_analyze_indv,
aggregation_level = df_strata )
}
if (population == "refugee"){
basic_analysis_indv_by_strata<-butteR::mean_prop_working(design = dfsvy_indv,list_of_variables = cols_to_analyze_indv,
aggregation_level = "upazila" )
}
if (write_output == "yes") {
write.csv(basic_analysis_indv,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_INDV.csv"))
write.csv(basic_analysis_indv_by_strata,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_strata_INDV.csv"))
write.csv(basic_analysis_indv_by_resp_gender,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_resp_gender_INDV.csv"))
write.csv(basic_analysis_indv_by_adlt_male,paste0("outputs/butteR_basic_analysis/",population,"/",str_replace_all(day_to_run,"-","_"),"_basic_analysis_by_adlt_male_INDV.csv"))
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.24371410383372e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615827132-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 361 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.24371410383372e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) |
library(DBI)
library(ggplot2)
library(envDocument)
con <- dbConnect(RSQLite::SQLite(), "display/r/graphdata.db")
con
print(dbListTables(con))
query <- dbSendQuery(con, "SELECT * FROM scale")
data <- dbFetch(query, n=-1)
dbClearResult(query)
data$teamNumber <- as.factor(data$teamNumber) # who tf knows
head(data)
print(data)
ggplot(data, aes(x=teamNumber, y=mean)) +
geom_col(fill="steelblue", color="black") +
geom_text(aes(label=mean), vjust=1.6, color="white", size=3.5) +
geom_errorbar(aes(ymin=(mean-standev), ymax=(mean+standev)), width=.1) +
ggtitle("scale")
ggsave("display/r/graphs/scale.png")
| /display/r/scale.r | no_license | Team74/Scouting-App-2018 | R | false | false | 616 | r | library(DBI)
library(ggplot2)
library(envDocument)
con <- dbConnect(RSQLite::SQLite(), "display/r/graphdata.db")
con
print(dbListTables(con))
query <- dbSendQuery(con, "SELECT * FROM scale")
data <- dbFetch(query, n=-1)
dbClearResult(query)
data$teamNumber <- as.factor(data$teamNumber) # who tf knows
head(data)
print(data)
ggplot(data, aes(x=teamNumber, y=mean)) +
geom_col(fill="steelblue", color="black") +
geom_text(aes(label=mean), vjust=1.6, color="white", size=3.5) +
geom_errorbar(aes(ymin=(mean-standev), ymax=(mean+standev)), width=.1) +
ggtitle("scale")
ggsave("display/r/graphs/scale.png")
|
## `make.effect.figure.on.roger.newATACseq.R' makes effect size figures from multiseq, wavelets, DESeq for selected sites
#
## Example Usage : R CMD BATCH --no-save --no-restore "--args info.path='/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/summary/Copper.1024.both.msOnly.ms.DESeq300.info' DESeq.100.info.path='/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/deseq/Copper.1024.both.100.alt.run/output/res.Robj' DESeq.300.info.path='/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/deseq/Copper.1024.both.300.alt.run/output/res.Robj' DESeq.full.info.path='/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/deseq/Copper.1024.both.1024.alt.run/output/res.Robj' out.path='/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/code/' wave.out.path='/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/wave/' file.name='ES' siteSize=1024 treatment='Copper' strand='both' sig.level=2 wave.effect=TRUE multiseq.effect=TRUE deseq.100.effect=TRUE deseq.300.effect=TRUE" /mnt/lustre/home/shim/multiscale_analysis/src/R/make.effect.figure.on.roger.ATACseq.R
##
##
## info.path : path to file that contains information on sites of interest (index chr sites st.posi en.posi pval.wave pval.ms pval.deseq.full pval.deseq.600 pval.deseq.300 pval.deseq.100 qval.wave qval.ms qval.deseq.full qval.deseq.600 qval.deseq.300 qval.deseq.100 logLR.wave logLR.ms)
## DESeq.100.info.path : path to file that contains DESeq2 (bin : 100) results for sub windows (p-value and fold change)
## DESeq.300.info.path : path to file that contains DESeq2 (bin : 300) results for sub windows (p-value and fold change)
## DESeq.full.info.path : path to file that contains DESeq2 (bin : size size) results for sub windows (p-value or fold change)
## out.path : path to directory where figures will be saved
## wave.out.path : path to directory which contains results from wavelet analysis
## file.name : output figure file name
## siteSize : site size
## treatment : treatment name
## strand : 'both', 'plus', 'minus'; add two strands, use + strand, or use - strand
## sig.level : +/- sig.level * standard deviation
## wave.effect : indicate whether effect size from wavelet is plotted
## multiseq.effect : indicate whether effect size from multiseq is plotted
## deseq.100.effect : indicate whether effect size from DESeq2 (bin :100) is plotted
## deseq.300.effect : indicate whether effect size from DESeq2 (bin :300) is plotted
##
## Copyright (C) 2014 Heejung Shim
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
setwd("/mnt/lustre/home/shim/multiscale_analysis")
library("multiseq")
library("ashr")
multiscale.analysis.repodir <- scan(".multiscale_analysis.repodir.txt", what=character())
source(paste0(multiscale.analysis.repodir, "/src/R/utils.R"))
source(paste0(multiscale.analysis.repodir, "/src/R/my.utils.R"))
WaveQTL.repodir <- scan(".WaveQTL.repodir.txt", what=character())
##info.path = '/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/summary/Copper.1024.both.all.ms.DESeq300.info'
##DESeq.100.info.path = '/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/deseq/Copper.1024.both.100.alt.run/output/res.Robj'
##DESeq.300.info.path = '/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/deseq/Copper.1024.both.300.alt.run/output/res.Robj'
##DESeq.full.info.path = '/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/deseq/Copper.1024.both.1024.alt.run/output/res.Robj'
##out.path = '/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/gen.fig/Copper.1024.both/fig/'
##wave.out.path = '/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/wave/'
##file.name= 'ES'
##siteSize=1024
##treatment='Copper'
##strand='both'
##strand='plus'
##strand='minus'
##sig.level = 2
##wave.effect=TRUE
##multiseq.effect=TRUE
##deseq.100.effect=TRUE
##deseq.300.effect=TRUE
args = (commandArgs(TRUE))
eval(parse(text=args[[1]]))
eval(parse(text=args[[2]]))
eval(parse(text=args[[3]]))
eval(parse(text=args[[4]]))
eval(parse(text=args[[5]]))
eval(parse(text=args[[6]]))
eval(parse(text=args[[7]]))
eval(parse(text=args[[8]]))
eval(parse(text=args[[9]]))
eval(parse(text=args[[10]]))
eval(parse(text=args[[11]]))
eval(parse(text=args[[12]]))
eval(parse(text=args[[13]]))
eval(parse(text=args[[14]]))
eval(parse(text=args[[15]]))
## assigen treatment and control name according to input
## treatment alt null control
## Copper N702 N705 N706
## Selenium N703 N705 N706
## Retinoic N704 N706 N705
##############################################
## sample name and sample file for null data
##############################################
null = TRUE
name.treatment = NULL
name.control = NULL
if(treatment=='Copper'){
name.control = "N706"
if(!null){
name.treatment = "N702"
}else{
name.treatment = "N705"
}
}
if(treatment=='Selenium'){
name.control = "N706"
if(!null){
name.treatment = "N703"
}else{
name.treatment = "N705"
}
}
if(treatment=='Retinoic'){
name.control = "N705"
if(!null){
name.treatment = "N704"
}else{
name.treatment = "N706"
}
}
## sample names
names.Sam = c("N501", "N502", "N503")
## Make a list of sample names and a list of hdf5 file names : treatment first and control later.
sample.names = c(paste0(name.treatment, names.Sam), paste0(name.control, names.Sam))
sample.files = paste0(sample.names, ".qfiltered10")
sample.names.null = sample.names
sample.files.null = sample.files
##############################################
## sample name and sample file for alternative data
##############################################
null = FALSE
name.treatment = NULL
name.control = NULL
if(treatment=='Copper'){
name.control = "N706"
if(!null){
name.treatment = "N702"
}else{
name.treatment = "N705"
}
}
if(treatment=='Selenium'){
name.control = "N706"
if(!null){
name.treatment = "N703"
}else{
name.treatment = "N705"
}
}
if(treatment=='Retinoic'){
name.control = "N705"
if(!null){
name.treatment = "N704"
}else{
name.treatment = "N706"
}
}
## sample names
names.Sam = c("N501", "N502", "N503")
## Make a list of sample names and a list of hdf5 file names : treatment first and control later.
sample.names = c(paste0(name.treatment, names.Sam), paste0(name.control, names.Sam))
sample.files = paste0(sample.names, ".qfiltered10")
sample.names.alt = sample.names
sample.files.alt = sample.files
## Path to directory which contain ATAC-seq data as hdf5 format,
hdf5.data.path = "/data/share/genome_db/hg19/roger_atacseq2/"
## Make a covariate
g = c(rep(0, length(names.Sam)), rep(1, length(names.Sam)))
## Path to library read depth
library.read.depth.path = "/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/info/"
## read TF
all_bed = read.table(gzfile('/mnt/gluster/data/external_private_supp/roger_atacseq/dnasefootprints/OpenChromDnaseGm19239.6c.bed.gz'))
#TssAnno <- read.table('../data/Ensembl2.txt',header=F,as.is=T)
TssAnno = read.table(gzfile('/mnt/gluster/data/external_private_supp/roger_atacseq/dnasefootprints/Copper.TSS.DiffExpressed.FDR10.bed.gz'))
#/mnt/gluster/data/external_private_supp/roger_atacseq/dnasefootprints
#OpenChromDnaseGm19239.6c.bed.gz
#factorNames2.txt
## set up working directory and open figure file
setwd(out.path)
numfig = wave.effect + multiseq.effect + deseq.100.effect + deseq.300.effect + 2
if(numfig <= 2){
pdf(paste0(out.path, file.name, ".effect.pdf"), width=10, height=5)
}else{
pdf(paste0(out.path, file.name, ".effect.pdf"), width=10, height=7)
}
nf <- layout(matrix(1:numfig,numfig,1,byrow=TRUE),TRUE)
#############################
# read all information
#############################
dat.info = read.table(file=info.path, header = TRUE, as.is=TRUE)
if(deseq.100.effect){
load(DESeq.100.info.path)
deseq.100.info.pval = deseq.100.info.log2FC = rep(NA, length(use))
deseq.100.info.pval[use==TRUE] = res$pvalue
deseq.100.info.log2FC[use==TRUE] = res$log2FoldChange
window.size = 100
numC = siteSize%/%window.size
deseq.100.info.pval = (matrix(deseq.100.info.pval,ncol=numC,byrow=T))[dat.info$index,]
deseq.100.info.log2FC = (matrix(deseq.100.info.log2FC,ncol=numC,byrow=T))[dat.info$index,]
}
if(deseq.300.effect){
load(DESeq.300.info.path)
deseq.300.info.pval = deseq.300.info.log2FC = rep(NA, length(use))
deseq.300.info.pval[use==TRUE] = res$pvalue
deseq.300.info.log2FC[use==TRUE] = res$log2FoldChange
window.size = 300
numC = siteSize%/%window.size
deseq.300.info.pval = (matrix(deseq.300.info.pval,ncol=numC,byrow=T))[dat.info$index,]
deseq.300.info.log2FC = (matrix(deseq.300.info.log2FC,ncol=numC,byrow=T))[dat.info$index,]
}
load(DESeq.full.info.path)
deseq.full.info.pval = deseq.full.info.log2FC = rep(NA, length(use))
deseq.full.info.pval[use==TRUE] = res$pvalue
deseq.full.info.log2FC[use==TRUE] = res$log2FoldChange
deseq.full.info.pval = deseq.full.info.pval[dat.info$index]
deseq.full.info.log2FC = deseq.full.info.log2FC[dat.info$index]
numSites = dim(dat.info)[1]
for(ss in 1:numSites){
## ss = 1
## read location information
chr = dat.info$chr[ss]
st.posi = dat.info$st.posi[ss]
en.posi = dat.info$en.posi[ss]
#####################
# read data nulll
#####################
null = TRUE
sample.names = sample.names.null
sample.files = sample.files.null
numSam = length(sample.names)
numBPs = siteSize
library.read.depth = rep(0, numSam)
ATAC.dat = matrix(data=0, nr = numSam, nc = numBPs)
pcr.posi = vector("list", 2)
pcr.ix = 1
## for fwd
if((strand=='both') | (strand=='plus')){
## read library read depth
path.read.depth = paste0(library.read.depth.path, "library.read.depth.fwd")
library.read.depth.dat = read.table(path.read.depth, as.is=TRUE)
for(i in 1:numSam){
library.read.depth[i] = library.read.depth[i] + library.read.depth.dat[which(library.read.depth.dat[,1] == sample.names[i]),2]
}
## read read counts for a given region
## for + strand, we need get reads at locations that are shifted 4bp to left
ATAC.dat.fwd = matrix(data=NA, nr = numSam, nc = numBPs)
for(i in 1:numSam){
path.fwd = paste0(hdf5.data.path, sample.files[i] , ".fwd.h5")
ATAC.dat.fwd[i, 1:numBPs] = as.matrix(get.counts.h5(path.fwd, paste0("chr", chr), st.posi-4, en.posi-4))
}
## remove pcr artifacts
pcr.removed.fwd = remove.pcr.artifacts(data=ATAC.dat.fwd, win.half.size=50, prop.thresh=0.9)
ATAC.dat = ATAC.dat + pcr.removed.fwd$data
if(!is.null(pcr.removed.fwd$posi.with.pcr.artifacts)){
pcr.posi[[pcr.ix]] = pcr.removed.fwd$posi.with.pcr.artifacts
}
pcr.ix = pcr.ix + 1
}
## for reverse
if((strand=='both') | (strand=='minus')){
## read library read depth
path.read.depth = paste0(library.read.depth.path, "library.read.depth.rev")
library.read.depth.dat = read.table(path.read.depth, as.is=TRUE)
for(i in 1:6){
library.read.depth[i] = library.read.depth[i] + library.read.depth.dat[which(library.read.depth.dat[,1] == sample.names[i]),2]
}
## read read counts for a given region
## for - strand, we need get reads at locations that are shifted 4bp to right
ATAC.dat.rev = matrix(data=NA, nr = numSam, nc = numBPs)
for(i in 1:numSam){
path.rev = paste0(hdf5.data.path, sample.files[i] , ".rev.h5")
ATAC.dat.rev[i, 1:numBPs] = as.matrix(get.counts.h5(path.rev, paste0("chr", chr), st.posi+4, en.posi+4))
}
## remove pcr artifacts
pcr.removed.rev = remove.pcr.artifacts(data=ATAC.dat.rev, win.half.size=50, prop.thresh=0.9)
ATAC.dat = ATAC.dat + pcr.removed.rev$data
if(!is.null(pcr.removed.rev$posi.with.pcr.artifacts)){
pcr.posi[[pcr.ix]] = pcr.removed.rev$posi.with.pcr.artifacts
}
pcr.ix = pcr.ix + 1
}
phenoD = ATAC.dat
####################
# plot raw data
####################
## get phenotype
xmin = st.posi
xmax = en.posi
phe.D = phenoD/library.read.depth
trt.pheno = apply(phe.D[1:(numSam/2),], 2, mean)
ctr.pheno = apply(phe.D[(numSam/2+1):numSam,], 2, mean)
trt.RC = sum(phenoD[1:(numSam/2),])
ctr.RC = sum(phenoD[(numSam/2+1):numSam,])
ymin = 0
ymaxT = max(trt.pheno, ctr.pheno)*(1+ 0.05)
xval = xmin:xmax
ymax = ymaxT*10^6
## get pcr information
xval_mapp = NULL
if(length(unlist(pcr.posi)) > 0){
xval_mapp = xval[unlist(pcr.posi)]
}
## Make a raw phenotype figure
raw.title = paste0("chr", chr, ":", st.posi, "-", en.posi, ", control(red):", trt.RC, " control(blue):", ctr.RC)
par(mar = c(1,4,1,2))
plot(1,1,type="n", xlab = "position", ylab = "DNaseI cut rate per million reads",ylim=c(ymin, ymax),xlim=c(xmin, xmax),main =raw.title, axes=FALSE)
axis(1)
if(!is.null(xval_mapp)){
axis(1, at=xval_mapp, labels = FALSE, lwd.ticks = 2, col="dark green")
}
axis(2)
box()
### Transcription factor
sel.sites = all_bed[all_bed[,1] == paste("chr", chr, sep="") & all_bed[,2] < (xmax+1) & all_bed[,3] > xmin, ]
offset = -0.0025
if(dim(sel.sites)[1] > 0){
for(k in 1:dim(sel.sites)[1]){
offset = -offset
text(x=(sel.sites[k,2] + sel.sites[k,3])/2, y=(ymax -abs(offset) - offset), strsplit(as.character(sel.sites[k,4]), split="=")[[1]][2])
rect(sel.sites[k,2], 0, sel.sites[k,3], ymax + 1, col=rgb(0,1,0,0.3), border='NA')
}
}
points(xval, ctr.pheno*10^6, col = rgb(0,0,1,alpha=0.7), type="l")
points(xval, trt.pheno*10^6, col = rgb(1,0,0,alpha=0.7), type="l")
#GETS AND PLOTS ANY TSSs IN THE REGION
TSS <- TssAnno[(as.character(TssAnno[,1]) == paste("chr", chr, sep="")) & (TssAnno[,2] > xmin) & (TssAnno[,2] < (xmax+1)),]
if(dim(TSS)[1] > 0) {
for(k in 1:dim(TSS)[1]){
mtext('*', side=1, at=TSS[k,2], col='purple', cex=1.5, padj=1)
}
}
#####################
# read data alt
#####################
null = FALSE
sample.names = sample.names.alt
sample.files = sample.files.alt
numSam = length(sample.names)
numBPs = siteSize
library.read.depth = rep(0, numSam)
ATAC.dat = matrix(data=0, nr = numSam, nc = numBPs)
pcr.posi = vector("list", 2)
pcr.ix = 1
## for fwd
if((strand=='both') | (strand=='plus')){
## read library read depth
path.read.depth = paste0(library.read.depth.path, "library.read.depth.fwd")
library.read.depth.dat = read.table(path.read.depth, as.is=TRUE)
for(i in 1:numSam){
library.read.depth[i] = library.read.depth[i] + library.read.depth.dat[which(library.read.depth.dat[,1] == sample.names[i]),2]
}
## read read counts for a given region
## for + strand, we need get reads at locations that are shifted 4bp to left
ATAC.dat.fwd = matrix(data=NA, nr = numSam, nc = numBPs)
for(i in 1:numSam){
path.fwd = paste0(hdf5.data.path, sample.files[i] , ".fwd.h5")
ATAC.dat.fwd[i, 1:numBPs] = as.matrix(get.counts.h5(path.fwd, paste0("chr", chr), st.posi-4, en.posi-4))
}
## remove pcr artifacts
pcr.removed.fwd = remove.pcr.artifacts(data=ATAC.dat.fwd, win.half.size=50, prop.thresh=0.9)
ATAC.dat = ATAC.dat + pcr.removed.fwd$data
if(!is.null(pcr.removed.fwd$posi.with.pcr.artifacts)){
pcr.posi[[pcr.ix]] = pcr.removed.fwd$posi.with.pcr.artifacts
}
pcr.ix = pcr.ix + 1
}
## for reverse
if((strand=='both') | (strand=='minus')){
## read library read depth
path.read.depth = paste0(library.read.depth.path, "library.read.depth.rev")
library.read.depth.dat = read.table(path.read.depth, as.is=TRUE)
for(i in 1:6){
library.read.depth[i] = library.read.depth[i] + library.read.depth.dat[which(library.read.depth.dat[,1] == sample.names[i]),2]
}
## read read counts for a given region
## for - strand, we need get reads at locations that are shifted 4bp to right
ATAC.dat.rev = matrix(data=NA, nr = numSam, nc = numBPs)
for(i in 1:numSam){
path.rev = paste0(hdf5.data.path, sample.files[i] , ".rev.h5")
ATAC.dat.rev[i, 1:numBPs] = as.matrix(get.counts.h5(path.rev, paste0("chr", chr), st.posi+4, en.posi+4))
}
## remove pcr artifacts
pcr.removed.rev = remove.pcr.artifacts(data=ATAC.dat.rev, win.half.size=50, prop.thresh=0.9)
ATAC.dat = ATAC.dat + pcr.removed.rev$data
if(!is.null(pcr.removed.rev$posi.with.pcr.artifacts)){
pcr.posi[[pcr.ix]] = pcr.removed.rev$posi.with.pcr.artifacts
}
pcr.ix = pcr.ix + 1
}
phenoD = ATAC.dat
####################
# plot raw data
####################
## get phenotype
xmin = st.posi
xmax = en.posi
phe.D = phenoD/library.read.depth
trt.pheno = apply(phe.D[1:(numSam/2),], 2, mean)
ctr.pheno = apply(phe.D[(numSam/2+1):numSam,], 2, mean)
trt.RC = sum(phenoD[1:(numSam/2),])
ctr.RC = sum(phenoD[(numSam/2+1):numSam,])
ymin = 0
ymaxT = max(trt.pheno, ctr.pheno)*(1+ 0.05)
xval = xmin:xmax
ymax = ymaxT*10^6
## get pcr information
xval_mapp = NULL
if(length(unlist(pcr.posi)) > 0){
xval_mapp = xval[unlist(pcr.posi)]
}
## Make a raw phenotype figure
if(!null){
raw.title = paste0("chr", chr, ":", st.posi, "-", en.posi, ", treatment(red):", trt.RC, " control(blue):", ctr.RC)
}else{
raw.title = paste0("chr", chr, ":", st.posi, "-", en.posi, ", control(red):", trt.RC, " control(blue):", ctr.RC)
}
par(mar = c(1,4,4,2))
plot(1,1,type="n", xlab = "position", ylab = "DNaseI cut rate per million reads",ylim=c(ymin, ymax),xlim=c(xmin, xmax),main =raw.title, axes=FALSE)
axis(1)
if(!is.null(xval_mapp)){
axis(1, at=xval_mapp, labels = FALSE, lwd.ticks = 2, col="dark green")
}
axis(2)
box()
### Transcription factor
sel.sites = all_bed[all_bed[,1] == paste("chr", chr, sep="") & all_bed[,2] < (xmax+1) & all_bed[,3] > xmin, ]
offset = -0.0025
if(dim(sel.sites)[1] > 0){
for(k in 1:dim(sel.sites)[1]){
offset = -offset
text(x=(sel.sites[k,2] + sel.sites[k,3])/2, y=(ymax -abs(offset) - offset), strsplit(as.character(sel.sites[k,4]), split="=")[[1]][2])
rect(sel.sites[k,2], 0, sel.sites[k,3], ymax + 1, col=rgb(0,1,0,0.3), border='NA')
}
}
points(xval, ctr.pheno*10^6, col = rgb(0,0,1,alpha=0.7), type="l")
points(xval, trt.pheno*10^6, col = rgb(1,0,0,alpha=0.7), type="l")
#GETS AND PLOTS ANY TSSs IN THE REGION
TSS <- TssAnno[(as.character(TssAnno[,1]) == paste("chr", chr, sep="")) & (TssAnno[,2] > xmin) & (TssAnno[,2] < (xmax+1)),]
if(dim(TSS)[1] > 0) {
for(k in 1:dim(TSS)[1]){
mtext('*', side=1, at=TSS[k,2], col='purple', cex=1.5, padj=1)
}
}
########################
# multiseq effect size
########################
if(multiseq.effect){
## title
if(!null){
title = paste0("multiseq [+/-", sig.level, "] -log10(pval): ", round(-log(dat.info$pval.ms[ss],10),2), " logLR: ", round(dat.info$logLR.ms.alt[ss],2))
}else{
title = paste0("multiseq [+/-", sig.level, "] logLR: ", round(dat.info$logLR.ms.null[ss],2))
}
## get effect size
genoD = g
res = multiseq(x = phenoD, g = genoD, read.depth = library.read.depth)
effect.mean = -res$effect.mean
effect.sd = sqrt(res$effect.var)
effect.low = effect.mean - sig.level*effect.sd
effect.high= effect.mean + sig.level*effect.sd
ymax = max(effect.high) + 10^(-7)
ymin = min(effect.low) - 10^(-7)
wh.low = which(effect.low > 0)
wh.high = which(effect.high < 0)
high_wh = sort(unique(union(wh.low, wh.high)))
col_posi = xval[high_wh]
par(mar = c(1,4,4,2))
plot(1,1,type="n", xlab = "position", ylab = "Effect size",ylim=c(ymin, ymax),xlim=c(xmin, xmax),main =title, axes=FALSE)
axis(2)
if(length(col_posi) > 0){
for(j in 1:length(col_posi)){
polygon(c(col_posi[j]-0.5, col_posi[j]-0.5, col_posi[j]+0.5, col_posi[j]+0.5), c(ymin-2, ymax+2, ymax+2, ymin-2), col ="pink", border = NA)
}
}
abline(h = 0, col = "red")
points(xval, effect.mean, col = "blue", type="l")
points(xval, effect.high, col = "skyblue", type="l")
points(xval, effect.low, col = "skyblue", type="l")
box()
}
########################
# wavelet effect size
########################
if(wave.effect){
## title
if(!null){
title = paste0("wavelet [+/-", sig.level, "] -log10(pval): ", round(-log(dat.info$pval.wave[ss],10),2), " logLR: ", round(dat.info$logLR.wave.alt[ss],2))
}else{
title = paste0("wavelet [+/-", sig.level, "] logLR: ", round(dat.info$logLR.wave.null[ss],2))
}
## get effect size
Wmat = read.table(paste0(WaveQTL.repodir, "data/DWT/Wmat_", siteSize), as.is = TRUE)
W2mat = Wmat*Wmat
if(!null){
path.wave = paste0(wave.out.path, treatment, ".", siteSize, ".", strand, ".alt.run/output/", treatment, ".", siteSize, ".", strand, ".alt.", chr, ".", dat.info$sites[ss], ".fph.")
}else{
path.wave = paste0(wave.out.path, treatment, ".", siteSize, ".", strand, ".null.run/output/", treatment, ".", siteSize, ".", strand, ".null.", chr, ".", dat.info$sites[ss], ".fph.")
}
effect.mean.w = as.numeric(read.table(paste0(path.wave, "mean.txt", sep=""))[-1])
effect.mean = -matrix(data=effect.mean.w, nr = 1, nc = siteSize)%*%as.matrix(Wmat)
effect.var.w = as.numeric(read.table(paste0(path.wave, "var.txt", sep=""))[-1])
effect.sd = sqrt(matrix(data=effect.var.w, nr = 1, nc = siteSize)%*%as.matrix(W2mat))
effect.low = effect.mean - sig.level*effect.sd
effect.high= effect.mean + sig.level*effect.sd
ymax = max(effect.high) + 10^(-7)
ymin = min(effect.low) - 10^(-7)
wh.low = which(effect.low > 0)
wh.high = which(effect.high < 0)
high_wh = sort(unique(union(wh.low, wh.high)))
col_posi = xval[high_wh]
par(mar = c(1,4,2,2))
plot(1,1,type="n", xlab = "position", ylab = "Effect size",ylim=c(ymin, ymax),xlim=c(xmin, xmax),main =title, axes=FALSE)
axis(2)
if(length(col_posi) > 0){
for(j in 1:length(col_posi)){
polygon(c(col_posi[j]-0.5, col_posi[j]-0.5, col_posi[j]+0.5, col_posi[j]+0.5), c(ymin-2, ymax+2, ymax+2, ymin-2), col ="pink", border = NA)
}
}
abline(h = 0, col = "red")
points(xval, effect.mean, col = "blue", type="l")
points(xval, effect.high, col = "skyblue", type="l")
points(xval, effect.low, col = "skyblue", type="l")
box()
}
########################
## deseq 100 effect
########################
if(deseq.100.effect){
window.size = 100
numC = siteSize%/%window.size
## read p-value
deseq.mlogpval.all = -log(as.numeric(deseq.100.info.pval[ss,]),10)
deseq.mlogpval.max = max(deseq.mlogpval.all, na.rm = TRUE)
## read log2FC
deseq.log2FC = as.numeric(deseq.100.info.log2FC[ss,])
## title
if(!null){
title = paste0("DESeq2 -log10(pval): ", round(-log(dat.info$pval.deseq.100[ss],10),2), " -log(min(pval)): ", round(deseq.mlogpval.max,2), "-log10(pval) full: ", round(-log(dat.info$pval.deseq.1024[ss],10),2))
}else{
title = paste0("DESeq")
}
ymax.t = 4
ymin.t = 0
plot(1,1,type="n", xlab = "position", ylab = "DESeq -log10(pvalue)",ylim=c(ymin.t, ymax.t),xlim=c(xmin, xmax),main = title, axes=FALSE)
axis(2)
xleft = rep(NA, numC)
xright = rep(NA, numC)
xleft[1] = xmin
for(j in 1:(numC-1)){
xleft[j+1] = xleft[j] + window.size
xright[j] = xleft[j+1] - 1
}
xright[numC] = xmax
ybottom = ytop = rep(0,numC)
ytop = deseq.mlogpval.all
rect(xleft, ybottom, xright, ytop, col = "grey")
box()
}
########################
## deseq 300 effect
########################
if(deseq.300.effect){
window.size = 300
numC = siteSize%/%window.size
## read p-value
deseq.mlogpval.all = -log(as.numeric(deseq.300.info.pval[ss,]),10)
deseq.mlogpval.max = max(deseq.mlogpval.all, na.rm = TRUE)
## read log2FC
deseq.log2FC = as.numeric(deseq.300.info.log2FC[ss,])
## title
if(!null){
title = paste0("DESeq2 -log10(pval): ", round(-log(dat.info$pval.deseq.300[ss],10),2), " -log(min(pval)): ", round(deseq.mlogpval.max,2), "-log10(pval) full: ", round(-log(dat.info$pval.deseq.1024[ss],10),2))
}else{
title = paste0("DESeq")
}
ymax.t = 4
ymin.t = 0
plot(1,1,type="n", xlab = "position", ylab = "DESeq -log10(pvalue)",ylim=c(ymin.t, ymax.t),xlim=c(xmin, xmax),main = title, axes=FALSE)
axis(2)
xleft = rep(NA, numC)
xright = rep(NA, numC)
xleft[1] = xmin
for(j in 1:(numC-1)){
xleft[j+1] = xleft[j] + window.size
xright[j] = xleft[j+1] - 1
}
xright[numC] = xmax
ybottom = ytop = rep(0,numC)
ytop = deseq.mlogpval.all
rect(xleft, ybottom, xright, ytop, col = "grey")
box()
}
}
dev.off()
| /src/R/make.effect.figure.on.roger.newATACseq.R | no_license | heejungshim/multiscale_analysis | R | false | false | 25,153 | r | ## `make.effect.figure.on.roger.newATACseq.R' makes effect size figures from multiseq, wavelets, DESeq for selected sites
#
## Example Usage : R CMD BATCH --no-save --no-restore "--args info.path='/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/summary/Copper.1024.both.msOnly.ms.DESeq300.info' DESeq.100.info.path='/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/deseq/Copper.1024.both.100.alt.run/output/res.Robj' DESeq.300.info.path='/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/deseq/Copper.1024.both.300.alt.run/output/res.Robj' DESeq.full.info.path='/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/deseq/Copper.1024.both.1024.alt.run/output/res.Robj' out.path='/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/code/' wave.out.path='/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/wave/' file.name='ES' siteSize=1024 treatment='Copper' strand='both' sig.level=2 wave.effect=TRUE multiseq.effect=TRUE deseq.100.effect=TRUE deseq.300.effect=TRUE" /mnt/lustre/home/shim/multiscale_analysis/src/R/make.effect.figure.on.roger.ATACseq.R
##
##
## info.path : path to file that contains information on sites of interest (index chr sites st.posi en.posi pval.wave pval.ms pval.deseq.full pval.deseq.600 pval.deseq.300 pval.deseq.100 qval.wave qval.ms qval.deseq.full qval.deseq.600 qval.deseq.300 qval.deseq.100 logLR.wave logLR.ms)
## DESeq.100.info.path : path to file that contains DESeq2 (bin : 100) results for sub windows (p-value and fold change)
## DESeq.300.info.path : path to file that contains DESeq2 (bin : 300) results for sub windows (p-value and fold change)
## DESeq.full.info.path : path to file that contains DESeq2 (bin : size size) results for sub windows (p-value or fold change)
## out.path : path to directory where figures will be saved
## wave.out.path : path to directory which contains results from wavelet analysis
## file.name : output figure file name
## siteSize : site size
## treatment : treatment name
## strand : 'both', 'plus', 'minus'; add two strands, use + strand, or use - strand
## sig.level : +/- sig.level * standard deviation
## wave.effect : indicate whether effect size from wavelet is plotted
## multiseq.effect : indicate whether effect size from multiseq is plotted
## deseq.100.effect : indicate whether effect size from DESeq2 (bin :100) is plotted
## deseq.300.effect : indicate whether effect size from DESeq2 (bin :300) is plotted
##
## Copyright (C) 2014 Heejung Shim
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
setwd("/mnt/lustre/home/shim/multiscale_analysis")
library("multiseq")
library("ashr")
multiscale.analysis.repodir <- scan(".multiscale_analysis.repodir.txt", what=character())
source(paste0(multiscale.analysis.repodir, "/src/R/utils.R"))
source(paste0(multiscale.analysis.repodir, "/src/R/my.utils.R"))
WaveQTL.repodir <- scan(".WaveQTL.repodir.txt", what=character())
##info.path = '/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/summary/Copper.1024.both.all.ms.DESeq300.info'
##DESeq.100.info.path = '/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/deseq/Copper.1024.both.100.alt.run/output/res.Robj'
##DESeq.300.info.path = '/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/deseq/Copper.1024.both.300.alt.run/output/res.Robj'
##DESeq.full.info.path = '/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/deseq/Copper.1024.both.1024.alt.run/output/res.Robj'
##out.path = '/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/gen.fig/Copper.1024.both/fig/'
##wave.out.path = '/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/run/wave/'
##file.name= 'ES'
##siteSize=1024
##treatment='Copper'
##strand='both'
##strand='plus'
##strand='minus'
##sig.level = 2
##wave.effect=TRUE
##multiseq.effect=TRUE
##deseq.100.effect=TRUE
##deseq.300.effect=TRUE
args = (commandArgs(TRUE))
eval(parse(text=args[[1]]))
eval(parse(text=args[[2]]))
eval(parse(text=args[[3]]))
eval(parse(text=args[[4]]))
eval(parse(text=args[[5]]))
eval(parse(text=args[[6]]))
eval(parse(text=args[[7]]))
eval(parse(text=args[[8]]))
eval(parse(text=args[[9]]))
eval(parse(text=args[[10]]))
eval(parse(text=args[[11]]))
eval(parse(text=args[[12]]))
eval(parse(text=args[[13]]))
eval(parse(text=args[[14]]))
eval(parse(text=args[[15]]))
## assigen treatment and control name according to input
## treatment alt null control
## Copper N702 N705 N706
## Selenium N703 N705 N706
## Retinoic N704 N706 N705
##############################################
## sample name and sample file for null data
##############################################
null = TRUE
name.treatment = NULL
name.control = NULL
if(treatment=='Copper'){
name.control = "N706"
if(!null){
name.treatment = "N702"
}else{
name.treatment = "N705"
}
}
if(treatment=='Selenium'){
name.control = "N706"
if(!null){
name.treatment = "N703"
}else{
name.treatment = "N705"
}
}
if(treatment=='Retinoic'){
name.control = "N705"
if(!null){
name.treatment = "N704"
}else{
name.treatment = "N706"
}
}
## sample names
names.Sam = c("N501", "N502", "N503")
## Make a list of sample names and a list of hdf5 file names : treatment first and control later.
sample.names = c(paste0(name.treatment, names.Sam), paste0(name.control, names.Sam))
sample.files = paste0(sample.names, ".qfiltered10")
sample.names.null = sample.names
sample.files.null = sample.files
##############################################
## sample name and sample file for alternative data
##############################################
null = FALSE
name.treatment = NULL
name.control = NULL
if(treatment=='Copper'){
name.control = "N706"
if(!null){
name.treatment = "N702"
}else{
name.treatment = "N705"
}
}
if(treatment=='Selenium'){
name.control = "N706"
if(!null){
name.treatment = "N703"
}else{
name.treatment = "N705"
}
}
if(treatment=='Retinoic'){
name.control = "N705"
if(!null){
name.treatment = "N704"
}else{
name.treatment = "N706"
}
}
## sample names
names.Sam = c("N501", "N502", "N503")
## Make a list of sample names and a list of hdf5 file names : treatment first and control later.
sample.names = c(paste0(name.treatment, names.Sam), paste0(name.control, names.Sam))
sample.files = paste0(sample.names, ".qfiltered10")
sample.names.alt = sample.names
sample.files.alt = sample.files
## Path to directory which contain ATAC-seq data as hdf5 format,
hdf5.data.path = "/data/share/genome_db/hg19/roger_atacseq2/"
## Make a covariate
g = c(rep(0, length(names.Sam)), rep(1, length(names.Sam)))
## Path to library read depth
library.read.depth.path = "/mnt/lustre/home/shim/multiscale_analysis/analysis/roger_ATAC2/info/"
## read TF
all_bed = read.table(gzfile('/mnt/gluster/data/external_private_supp/roger_atacseq/dnasefootprints/OpenChromDnaseGm19239.6c.bed.gz'))
#TssAnno <- read.table('../data/Ensembl2.txt',header=F,as.is=T)
TssAnno = read.table(gzfile('/mnt/gluster/data/external_private_supp/roger_atacseq/dnasefootprints/Copper.TSS.DiffExpressed.FDR10.bed.gz'))
#/mnt/gluster/data/external_private_supp/roger_atacseq/dnasefootprints
#OpenChromDnaseGm19239.6c.bed.gz
#factorNames2.txt
## set up working directory and open figure file
setwd(out.path)
numfig = wave.effect + multiseq.effect + deseq.100.effect + deseq.300.effect + 2
if(numfig <= 2){
pdf(paste0(out.path, file.name, ".effect.pdf"), width=10, height=5)
}else{
pdf(paste0(out.path, file.name, ".effect.pdf"), width=10, height=7)
}
nf <- layout(matrix(1:numfig,numfig,1,byrow=TRUE),TRUE)
#############################
# read all information
#############################
dat.info = read.table(file=info.path, header = TRUE, as.is=TRUE)
if(deseq.100.effect){
load(DESeq.100.info.path)
deseq.100.info.pval = deseq.100.info.log2FC = rep(NA, length(use))
deseq.100.info.pval[use==TRUE] = res$pvalue
deseq.100.info.log2FC[use==TRUE] = res$log2FoldChange
window.size = 100
numC = siteSize%/%window.size
deseq.100.info.pval = (matrix(deseq.100.info.pval,ncol=numC,byrow=T))[dat.info$index,]
deseq.100.info.log2FC = (matrix(deseq.100.info.log2FC,ncol=numC,byrow=T))[dat.info$index,]
}
if(deseq.300.effect){
load(DESeq.300.info.path)
deseq.300.info.pval = deseq.300.info.log2FC = rep(NA, length(use))
deseq.300.info.pval[use==TRUE] = res$pvalue
deseq.300.info.log2FC[use==TRUE] = res$log2FoldChange
window.size = 300
numC = siteSize%/%window.size
deseq.300.info.pval = (matrix(deseq.300.info.pval,ncol=numC,byrow=T))[dat.info$index,]
deseq.300.info.log2FC = (matrix(deseq.300.info.log2FC,ncol=numC,byrow=T))[dat.info$index,]
}
load(DESeq.full.info.path)
deseq.full.info.pval = deseq.full.info.log2FC = rep(NA, length(use))
deseq.full.info.pval[use==TRUE] = res$pvalue
deseq.full.info.log2FC[use==TRUE] = res$log2FoldChange
deseq.full.info.pval = deseq.full.info.pval[dat.info$index]
deseq.full.info.log2FC = deseq.full.info.log2FC[dat.info$index]
numSites = dim(dat.info)[1]
for(ss in 1:numSites){
## ss = 1
## read location information
chr = dat.info$chr[ss]
st.posi = dat.info$st.posi[ss]
en.posi = dat.info$en.posi[ss]
#####################
# read data nulll
#####################
null = TRUE
sample.names = sample.names.null
sample.files = sample.files.null
numSam = length(sample.names)
numBPs = siteSize
library.read.depth = rep(0, numSam)
ATAC.dat = matrix(data=0, nr = numSam, nc = numBPs)
pcr.posi = vector("list", 2)
pcr.ix = 1
## for fwd
if((strand=='both') | (strand=='plus')){
## read library read depth
path.read.depth = paste0(library.read.depth.path, "library.read.depth.fwd")
library.read.depth.dat = read.table(path.read.depth, as.is=TRUE)
for(i in 1:numSam){
library.read.depth[i] = library.read.depth[i] + library.read.depth.dat[which(library.read.depth.dat[,1] == sample.names[i]),2]
}
## read read counts for a given region
## for + strand, we need get reads at locations that are shifted 4bp to left
ATAC.dat.fwd = matrix(data=NA, nr = numSam, nc = numBPs)
for(i in 1:numSam){
path.fwd = paste0(hdf5.data.path, sample.files[i] , ".fwd.h5")
ATAC.dat.fwd[i, 1:numBPs] = as.matrix(get.counts.h5(path.fwd, paste0("chr", chr), st.posi-4, en.posi-4))
}
## remove pcr artifacts
pcr.removed.fwd = remove.pcr.artifacts(data=ATAC.dat.fwd, win.half.size=50, prop.thresh=0.9)
ATAC.dat = ATAC.dat + pcr.removed.fwd$data
if(!is.null(pcr.removed.fwd$posi.with.pcr.artifacts)){
pcr.posi[[pcr.ix]] = pcr.removed.fwd$posi.with.pcr.artifacts
}
pcr.ix = pcr.ix + 1
}
## for reverse
if((strand=='both') | (strand=='minus')){
## read library read depth
path.read.depth = paste0(library.read.depth.path, "library.read.depth.rev")
library.read.depth.dat = read.table(path.read.depth, as.is=TRUE)
for(i in 1:6){
library.read.depth[i] = library.read.depth[i] + library.read.depth.dat[which(library.read.depth.dat[,1] == sample.names[i]),2]
}
## read read counts for a given region
## for - strand, we need get reads at locations that are shifted 4bp to right
ATAC.dat.rev = matrix(data=NA, nr = numSam, nc = numBPs)
for(i in 1:numSam){
path.rev = paste0(hdf5.data.path, sample.files[i] , ".rev.h5")
ATAC.dat.rev[i, 1:numBPs] = as.matrix(get.counts.h5(path.rev, paste0("chr", chr), st.posi+4, en.posi+4))
}
## remove pcr artifacts
pcr.removed.rev = remove.pcr.artifacts(data=ATAC.dat.rev, win.half.size=50, prop.thresh=0.9)
ATAC.dat = ATAC.dat + pcr.removed.rev$data
if(!is.null(pcr.removed.rev$posi.with.pcr.artifacts)){
pcr.posi[[pcr.ix]] = pcr.removed.rev$posi.with.pcr.artifacts
}
pcr.ix = pcr.ix + 1
}
phenoD = ATAC.dat
####################
# plot raw data
####################
## get phenotype
xmin = st.posi
xmax = en.posi
phe.D = phenoD/library.read.depth
trt.pheno = apply(phe.D[1:(numSam/2),], 2, mean)
ctr.pheno = apply(phe.D[(numSam/2+1):numSam,], 2, mean)
trt.RC = sum(phenoD[1:(numSam/2),])
ctr.RC = sum(phenoD[(numSam/2+1):numSam,])
ymin = 0
ymaxT = max(trt.pheno, ctr.pheno)*(1+ 0.05)
xval = xmin:xmax
ymax = ymaxT*10^6
## get pcr information
xval_mapp = NULL
if(length(unlist(pcr.posi)) > 0){
xval_mapp = xval[unlist(pcr.posi)]
}
## Make a raw phenotype figure
raw.title = paste0("chr", chr, ":", st.posi, "-", en.posi, ", control(red):", trt.RC, " control(blue):", ctr.RC)
par(mar = c(1,4,1,2))
plot(1,1,type="n", xlab = "position", ylab = "DNaseI cut rate per million reads",ylim=c(ymin, ymax),xlim=c(xmin, xmax),main =raw.title, axes=FALSE)
axis(1)
if(!is.null(xval_mapp)){
axis(1, at=xval_mapp, labels = FALSE, lwd.ticks = 2, col="dark green")
}
axis(2)
box()
### Transcription factor
sel.sites = all_bed[all_bed[,1] == paste("chr", chr, sep="") & all_bed[,2] < (xmax+1) & all_bed[,3] > xmin, ]
offset = -0.0025
if(dim(sel.sites)[1] > 0){
for(k in 1:dim(sel.sites)[1]){
offset = -offset
text(x=(sel.sites[k,2] + sel.sites[k,3])/2, y=(ymax -abs(offset) - offset), strsplit(as.character(sel.sites[k,4]), split="=")[[1]][2])
rect(sel.sites[k,2], 0, sel.sites[k,3], ymax + 1, col=rgb(0,1,0,0.3), border='NA')
}
}
points(xval, ctr.pheno*10^6, col = rgb(0,0,1,alpha=0.7), type="l")
points(xval, trt.pheno*10^6, col = rgb(1,0,0,alpha=0.7), type="l")
#GETS AND PLOTS ANY TSSs IN THE REGION
TSS <- TssAnno[(as.character(TssAnno[,1]) == paste("chr", chr, sep="")) & (TssAnno[,2] > xmin) & (TssAnno[,2] < (xmax+1)),]
if(dim(TSS)[1] > 0) {
for(k in 1:dim(TSS)[1]){
mtext('*', side=1, at=TSS[k,2], col='purple', cex=1.5, padj=1)
}
}
#####################
# read data alt
#####################
null = FALSE
sample.names = sample.names.alt
sample.files = sample.files.alt
numSam = length(sample.names)
numBPs = siteSize
library.read.depth = rep(0, numSam)
ATAC.dat = matrix(data=0, nr = numSam, nc = numBPs)
pcr.posi = vector("list", 2)
pcr.ix = 1
## for fwd
if((strand=='both') | (strand=='plus')){
## read library read depth
path.read.depth = paste0(library.read.depth.path, "library.read.depth.fwd")
library.read.depth.dat = read.table(path.read.depth, as.is=TRUE)
for(i in 1:numSam){
library.read.depth[i] = library.read.depth[i] + library.read.depth.dat[which(library.read.depth.dat[,1] == sample.names[i]),2]
}
## read read counts for a given region
## for + strand, we need get reads at locations that are shifted 4bp to left
ATAC.dat.fwd = matrix(data=NA, nr = numSam, nc = numBPs)
for(i in 1:numSam){
path.fwd = paste0(hdf5.data.path, sample.files[i] , ".fwd.h5")
ATAC.dat.fwd[i, 1:numBPs] = as.matrix(get.counts.h5(path.fwd, paste0("chr", chr), st.posi-4, en.posi-4))
}
## remove pcr artifacts
pcr.removed.fwd = remove.pcr.artifacts(data=ATAC.dat.fwd, win.half.size=50, prop.thresh=0.9)
ATAC.dat = ATAC.dat + pcr.removed.fwd$data
if(!is.null(pcr.removed.fwd$posi.with.pcr.artifacts)){
pcr.posi[[pcr.ix]] = pcr.removed.fwd$posi.with.pcr.artifacts
}
pcr.ix = pcr.ix + 1
}
## for reverse
if((strand=='both') | (strand=='minus')){
## read library read depth
path.read.depth = paste0(library.read.depth.path, "library.read.depth.rev")
library.read.depth.dat = read.table(path.read.depth, as.is=TRUE)
for(i in 1:6){
library.read.depth[i] = library.read.depth[i] + library.read.depth.dat[which(library.read.depth.dat[,1] == sample.names[i]),2]
}
## read read counts for a given region
## for - strand, we need get reads at locations that are shifted 4bp to right
ATAC.dat.rev = matrix(data=NA, nr = numSam, nc = numBPs)
for(i in 1:numSam){
path.rev = paste0(hdf5.data.path, sample.files[i] , ".rev.h5")
ATAC.dat.rev[i, 1:numBPs] = as.matrix(get.counts.h5(path.rev, paste0("chr", chr), st.posi+4, en.posi+4))
}
## remove pcr artifacts
pcr.removed.rev = remove.pcr.artifacts(data=ATAC.dat.rev, win.half.size=50, prop.thresh=0.9)
ATAC.dat = ATAC.dat + pcr.removed.rev$data
if(!is.null(pcr.removed.rev$posi.with.pcr.artifacts)){
pcr.posi[[pcr.ix]] = pcr.removed.rev$posi.with.pcr.artifacts
}
pcr.ix = pcr.ix + 1
}
phenoD = ATAC.dat
####################
# plot raw data
####################
## get phenotype
xmin = st.posi
xmax = en.posi
phe.D = phenoD/library.read.depth
trt.pheno = apply(phe.D[1:(numSam/2),], 2, mean)
ctr.pheno = apply(phe.D[(numSam/2+1):numSam,], 2, mean)
trt.RC = sum(phenoD[1:(numSam/2),])
ctr.RC = sum(phenoD[(numSam/2+1):numSam,])
ymin = 0
ymaxT = max(trt.pheno, ctr.pheno)*(1+ 0.05)
xval = xmin:xmax
ymax = ymaxT*10^6
## get pcr information
xval_mapp = NULL
if(length(unlist(pcr.posi)) > 0){
xval_mapp = xval[unlist(pcr.posi)]
}
## Make a raw phenotype figure
if(!null){
raw.title = paste0("chr", chr, ":", st.posi, "-", en.posi, ", treatment(red):", trt.RC, " control(blue):", ctr.RC)
}else{
raw.title = paste0("chr", chr, ":", st.posi, "-", en.posi, ", control(red):", trt.RC, " control(blue):", ctr.RC)
}
par(mar = c(1,4,4,2))
plot(1,1,type="n", xlab = "position", ylab = "DNaseI cut rate per million reads",ylim=c(ymin, ymax),xlim=c(xmin, xmax),main =raw.title, axes=FALSE)
axis(1)
if(!is.null(xval_mapp)){
axis(1, at=xval_mapp, labels = FALSE, lwd.ticks = 2, col="dark green")
}
axis(2)
box()
### Transcription factor
sel.sites = all_bed[all_bed[,1] == paste("chr", chr, sep="") & all_bed[,2] < (xmax+1) & all_bed[,3] > xmin, ]
offset = -0.0025
if(dim(sel.sites)[1] > 0){
for(k in 1:dim(sel.sites)[1]){
offset = -offset
text(x=(sel.sites[k,2] + sel.sites[k,3])/2, y=(ymax -abs(offset) - offset), strsplit(as.character(sel.sites[k,4]), split="=")[[1]][2])
rect(sel.sites[k,2], 0, sel.sites[k,3], ymax + 1, col=rgb(0,1,0,0.3), border='NA')
}
}
points(xval, ctr.pheno*10^6, col = rgb(0,0,1,alpha=0.7), type="l")
points(xval, trt.pheno*10^6, col = rgb(1,0,0,alpha=0.7), type="l")
#GETS AND PLOTS ANY TSSs IN THE REGION
TSS <- TssAnno[(as.character(TssAnno[,1]) == paste("chr", chr, sep="")) & (TssAnno[,2] > xmin) & (TssAnno[,2] < (xmax+1)),]
if(dim(TSS)[1] > 0) {
for(k in 1:dim(TSS)[1]){
mtext('*', side=1, at=TSS[k,2], col='purple', cex=1.5, padj=1)
}
}
########################
# multiseq effect size
########################
if(multiseq.effect){
## title
if(!null){
title = paste0("multiseq [+/-", sig.level, "] -log10(pval): ", round(-log(dat.info$pval.ms[ss],10),2), " logLR: ", round(dat.info$logLR.ms.alt[ss],2))
}else{
title = paste0("multiseq [+/-", sig.level, "] logLR: ", round(dat.info$logLR.ms.null[ss],2))
}
## get effect size
genoD = g
res = multiseq(x = phenoD, g = genoD, read.depth = library.read.depth)
effect.mean = -res$effect.mean
effect.sd = sqrt(res$effect.var)
effect.low = effect.mean - sig.level*effect.sd
effect.high= effect.mean + sig.level*effect.sd
ymax = max(effect.high) + 10^(-7)
ymin = min(effect.low) - 10^(-7)
wh.low = which(effect.low > 0)
wh.high = which(effect.high < 0)
high_wh = sort(unique(union(wh.low, wh.high)))
col_posi = xval[high_wh]
par(mar = c(1,4,4,2))
plot(1,1,type="n", xlab = "position", ylab = "Effect size",ylim=c(ymin, ymax),xlim=c(xmin, xmax),main =title, axes=FALSE)
axis(2)
if(length(col_posi) > 0){
for(j in 1:length(col_posi)){
polygon(c(col_posi[j]-0.5, col_posi[j]-0.5, col_posi[j]+0.5, col_posi[j]+0.5), c(ymin-2, ymax+2, ymax+2, ymin-2), col ="pink", border = NA)
}
}
abline(h = 0, col = "red")
points(xval, effect.mean, col = "blue", type="l")
points(xval, effect.high, col = "skyblue", type="l")
points(xval, effect.low, col = "skyblue", type="l")
box()
}
########################
# wavelet effect size
########################
if(wave.effect){
## title
if(!null){
title = paste0("wavelet [+/-", sig.level, "] -log10(pval): ", round(-log(dat.info$pval.wave[ss],10),2), " logLR: ", round(dat.info$logLR.wave.alt[ss],2))
}else{
title = paste0("wavelet [+/-", sig.level, "] logLR: ", round(dat.info$logLR.wave.null[ss],2))
}
## get effect size
Wmat = read.table(paste0(WaveQTL.repodir, "data/DWT/Wmat_", siteSize), as.is = TRUE)
W2mat = Wmat*Wmat
if(!null){
path.wave = paste0(wave.out.path, treatment, ".", siteSize, ".", strand, ".alt.run/output/", treatment, ".", siteSize, ".", strand, ".alt.", chr, ".", dat.info$sites[ss], ".fph.")
}else{
path.wave = paste0(wave.out.path, treatment, ".", siteSize, ".", strand, ".null.run/output/", treatment, ".", siteSize, ".", strand, ".null.", chr, ".", dat.info$sites[ss], ".fph.")
}
effect.mean.w = as.numeric(read.table(paste0(path.wave, "mean.txt", sep=""))[-1])
effect.mean = -matrix(data=effect.mean.w, nr = 1, nc = siteSize)%*%as.matrix(Wmat)
effect.var.w = as.numeric(read.table(paste0(path.wave, "var.txt", sep=""))[-1])
effect.sd = sqrt(matrix(data=effect.var.w, nr = 1, nc = siteSize)%*%as.matrix(W2mat))
effect.low = effect.mean - sig.level*effect.sd
effect.high= effect.mean + sig.level*effect.sd
ymax = max(effect.high) + 10^(-7)
ymin = min(effect.low) - 10^(-7)
wh.low = which(effect.low > 0)
wh.high = which(effect.high < 0)
high_wh = sort(unique(union(wh.low, wh.high)))
col_posi = xval[high_wh]
par(mar = c(1,4,2,2))
plot(1,1,type="n", xlab = "position", ylab = "Effect size",ylim=c(ymin, ymax),xlim=c(xmin, xmax),main =title, axes=FALSE)
axis(2)
if(length(col_posi) > 0){
for(j in 1:length(col_posi)){
polygon(c(col_posi[j]-0.5, col_posi[j]-0.5, col_posi[j]+0.5, col_posi[j]+0.5), c(ymin-2, ymax+2, ymax+2, ymin-2), col ="pink", border = NA)
}
}
abline(h = 0, col = "red")
points(xval, effect.mean, col = "blue", type="l")
points(xval, effect.high, col = "skyblue", type="l")
points(xval, effect.low, col = "skyblue", type="l")
box()
}
########################
## deseq 100 effect
########################
if(deseq.100.effect){
window.size = 100
numC = siteSize%/%window.size
## read p-value
deseq.mlogpval.all = -log(as.numeric(deseq.100.info.pval[ss,]),10)
deseq.mlogpval.max = max(deseq.mlogpval.all, na.rm = TRUE)
## read log2FC
deseq.log2FC = as.numeric(deseq.100.info.log2FC[ss,])
## title
if(!null){
title = paste0("DESeq2 -log10(pval): ", round(-log(dat.info$pval.deseq.100[ss],10),2), " -log(min(pval)): ", round(deseq.mlogpval.max,2), "-log10(pval) full: ", round(-log(dat.info$pval.deseq.1024[ss],10),2))
}else{
title = paste0("DESeq")
}
ymax.t = 4
ymin.t = 0
plot(1,1,type="n", xlab = "position", ylab = "DESeq -log10(pvalue)",ylim=c(ymin.t, ymax.t),xlim=c(xmin, xmax),main = title, axes=FALSE)
axis(2)
xleft = rep(NA, numC)
xright = rep(NA, numC)
xleft[1] = xmin
for(j in 1:(numC-1)){
xleft[j+1] = xleft[j] + window.size
xright[j] = xleft[j+1] - 1
}
xright[numC] = xmax
ybottom = ytop = rep(0,numC)
ytop = deseq.mlogpval.all
rect(xleft, ybottom, xright, ytop, col = "grey")
box()
}
########################
## deseq 300 effect
########################
if(deseq.300.effect){
window.size = 300
numC = siteSize%/%window.size
## read p-value
deseq.mlogpval.all = -log(as.numeric(deseq.300.info.pval[ss,]),10)
deseq.mlogpval.max = max(deseq.mlogpval.all, na.rm = TRUE)
## read log2FC
deseq.log2FC = as.numeric(deseq.300.info.log2FC[ss,])
## title
if(!null){
title = paste0("DESeq2 -log10(pval): ", round(-log(dat.info$pval.deseq.300[ss],10),2), " -log(min(pval)): ", round(deseq.mlogpval.max,2), "-log10(pval) full: ", round(-log(dat.info$pval.deseq.1024[ss],10),2))
}else{
title = paste0("DESeq")
}
ymax.t = 4
ymin.t = 0
plot(1,1,type="n", xlab = "position", ylab = "DESeq -log10(pvalue)",ylim=c(ymin.t, ymax.t),xlim=c(xmin, xmax),main = title, axes=FALSE)
axis(2)
xleft = rep(NA, numC)
xright = rep(NA, numC)
xleft[1] = xmin
for(j in 1:(numC-1)){
xleft[j+1] = xleft[j] + window.size
xright[j] = xleft[j+1] - 1
}
xright[numC] = xmax
ybottom = ytop = rep(0,numC)
ytop = deseq.mlogpval.all
rect(xleft, ybottom, xright, ytop, col = "grey")
box()
}
}
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vortexRdata_pkg_data.R
\name{sta.evy5.b11}
\alias{sta.evy5.b11}
\title{Collated results from Vortex scenarios - Campbell et al (2016)}
\format{a \code{data.frame} with 1020 observations of 47 variables.}
\source{
Campbell et al. (2016). Assessing the economic benefits of starling
detection and control to Western Australia. Australasian Journal of
Environmental Management, 23, 81-99
\href{https://dx.doi.org/10.1080/14486563.2015.1028486}{DOI:10.1080/14486563.2015.1028486}
}
\description{
A dataset with the results from Vortex scenarios used in Campbell
et al (2016) to simulate major application of control measures in every
5 year cycle, maintaining 2011 levels of investment. Vortex outputs, from
the project named 'Starlingv3PopBased' and the sensitivity test scenario
'MReduction_B11_09Evy5' (.stdat files), were collated with \code{collate_dat}.
}
\examples{
data("sta.evy5.b11")
head(sta.evy5.b11)
}
| /man/sta.evy5.b11.Rd | no_license | cran/vortexRdata | R | false | true | 1,019 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vortexRdata_pkg_data.R
\name{sta.evy5.b11}
\alias{sta.evy5.b11}
\title{Collated results from Vortex scenarios - Campbell et al (2016)}
\format{a \code{data.frame} with 1020 observations of 47 variables.}
\source{
Campbell et al. (2016). Assessing the economic benefits of starling
detection and control to Western Australia. Australasian Journal of
Environmental Management, 23, 81-99
\href{https://dx.doi.org/10.1080/14486563.2015.1028486}{DOI:10.1080/14486563.2015.1028486}
}
\description{
A dataset with the results from Vortex scenarios used in Campbell
et al (2016) to simulate major application of control measures in every
5 year cycle, maintaining 2011 levels of investment. Vortex outputs, from
the project named 'Starlingv3PopBased' and the sensitivity test scenario
'MReduction_B11_09Evy5' (.stdat files), were collated with \code{collate_dat}.
}
\examples{
data("sta.evy5.b11")
head(sta.evy5.b11)
}
|
#---------------------------------------------------------------------------------
# Modify MA data so that the number of negative peptides is
# of the same ratio to the number of positive peptides
#---------------------------------------------------------------------------------
library(data.table)
library(stringr)
dir0 = "../../../data/NetMHCpan4_1_train"
dir1 = "../../../data/MA_data"
for(i in 0:4){
fnm = sprintf("%s/train_v4_el_multi_HLA_%d.txt.gz", dir1, i)
cat(i, " ", fnm, "\n")
dat_i = fread(fnm)
dim(dat_i)
dat_i[1:5,]
t2 = tapply(dat_i$peptide, dat_i$cell_line, anyDuplicated)
stopifnot(all(t2 == 0))
stopifnot(all(str_length(dat_i$peptide) == 15))
dat_i$len = 15 - str_count(dat_i$peptide, "X")
tb1 = table(dat_i$len)
tb2 = table(dat_i$binder, dat_i$len)
print(tb1)
print(tb2)
print(tb2[1,]/tb2[2,])
# keep the number for 9aa peptide and select 5x negatives
# for all other lengths
w2kp = NULL
set.seed(111)
for(l1 in c(8, 10:15)){
print(l1)
w_pos = which(dat_i$len == l1 & dat_i$binder == 1)
w_neg = which(dat_i$len == l1 & dat_i$binder == 0)
n_pos = length(w_pos)
n_neg = 5*n_pos
w2kp = c(w2kp, w_pos, sample(w_neg, n_neg))
}
dat_i_new = dat_i[w2kp,]
dat_i_new = rbind(dat_i_new, dat_i[which(dat_i$len ==9),])
dim(dat_i_new)
tb1 = table(dat_i_new$len)
tb2 = table(dat_i_new$binder, dat_i_new$len)
print(tb1)
print(tb2)
print(tb2[1,]/tb2[2,])
fnm1_new = sub("multi_HLA_", "multi_HLA_balanced_", fnm)
fnm1_new = sub(".gz", "", fnm1_new)
cols2kp = c("peptide", "binder", "cell_line")
write.table(dat_i_new[,..cols2kp], fnm1_new, sep="\t", quote = FALSE,
row.names = FALSE)
system(sprintf("gzip %s", fnm1_new))
}
sessionInfo()
q(save="no")
| /R/Train/step3a_other_MA_datasets/step3a_extract_MA_data_balanced.R | permissive | alexchen7/PEPPRMINT | R | false | false | 1,822 | r | #---------------------------------------------------------------------------------
# Modify MA data so that the number of negative peptides is
# of the same ratio to the number of positive peptides
#---------------------------------------------------------------------------------
library(data.table)
library(stringr)
dir0 = "../../../data/NetMHCpan4_1_train"
dir1 = "../../../data/MA_data"
for(i in 0:4){
fnm = sprintf("%s/train_v4_el_multi_HLA_%d.txt.gz", dir1, i)
cat(i, " ", fnm, "\n")
dat_i = fread(fnm)
dim(dat_i)
dat_i[1:5,]
t2 = tapply(dat_i$peptide, dat_i$cell_line, anyDuplicated)
stopifnot(all(t2 == 0))
stopifnot(all(str_length(dat_i$peptide) == 15))
dat_i$len = 15 - str_count(dat_i$peptide, "X")
tb1 = table(dat_i$len)
tb2 = table(dat_i$binder, dat_i$len)
print(tb1)
print(tb2)
print(tb2[1,]/tb2[2,])
# keep the number for 9aa peptide and select 5x negatives
# for all other lengths
w2kp = NULL
set.seed(111)
for(l1 in c(8, 10:15)){
print(l1)
w_pos = which(dat_i$len == l1 & dat_i$binder == 1)
w_neg = which(dat_i$len == l1 & dat_i$binder == 0)
n_pos = length(w_pos)
n_neg = 5*n_pos
w2kp = c(w2kp, w_pos, sample(w_neg, n_neg))
}
dat_i_new = dat_i[w2kp,]
dat_i_new = rbind(dat_i_new, dat_i[which(dat_i$len ==9),])
dim(dat_i_new)
tb1 = table(dat_i_new$len)
tb2 = table(dat_i_new$binder, dat_i_new$len)
print(tb1)
print(tb2)
print(tb2[1,]/tb2[2,])
fnm1_new = sub("multi_HLA_", "multi_HLA_balanced_", fnm)
fnm1_new = sub(".gz", "", fnm1_new)
cols2kp = c("peptide", "binder", "cell_line")
write.table(dat_i_new[,..cols2kp], fnm1_new, sep="\t", quote = FALSE,
row.names = FALSE)
system(sprintf("gzip %s", fnm1_new))
}
sessionInfo()
q(save="no")
|
#' @title Draw spiral plot
#'
#' @description
#'
#' @import pals
#' @import plotrix
#'
#' @export
drawSpiralPlot <- function(data, meta, coord, off=1, col=NULL, segm=NULL, leg=F, x.leg=NULL, y.leg=NULL, cex.leg=1, leg.cut=1) {
## area
tmp <- data.frame(do.call(rbind, meta))
tmp$x <- as.numeric(as.character(tmp$x))
tmp$y <- as.numeric(as.character(tmp$y))
#print(tmp)
## plot + legende
if (leg) {
if (is.null(x.leg)) {
x.leg <- max(coord$x)
}
if (is.null(y.leg)) {
y.leg <- max(coord$y)
}
par(mar=c(4,4,4,10))
}
plot(coord$y~coord$x, data=tmp, xlab="", ylab="", bty='n', col="white", xaxt='n', yaxt='n')
if (leg) {
m <- apply(data, 1, max)
legend(x.leg, y.leg, cex=cex.leg, rownames(data)[which(m > leg.cut)], fill=col[which(m > leg.cut)], bty='n')
}
# points(tmp$y~tmp$x)
if (!is.null(segm)) {
for (i in 1:length(segm)) {
segments(segm[[i]]$xFrom, segm[[i]]$yFrom, segm[[i]]$xTo, segm[[i]]$yTo, lwd=3)
}
}
for (i in 1:length(data[1,])) {
drawCircle(tmp$label[i], centr=c(tmp$x[i], tmp$y[i]),
data=data[,i,drop=F], off, scale=tmp$scale[i],
phiOff=tmp$phiOff[i], col=col)
}
}
drawCircle <- function(label, centr=c(0,0), data, off=1, col=NULL, scale=1,phiOff=0) {
# col supplied
if (is.null(col)) {
col <- kelly()
}
#print(col)
##central circle with text
df <- list()
for (phi in seq(0, 2*pi, 0.01)) {
df[[length(df)+1]] <- data.frame(x=centr[1]+off*cos(phi),
y=centr[2]+off*sin(phi),
phi=phi)
}
df <- do.call(rbind, df)
#plot(y~x, data=df, pch=19, ylim=c(-3, 3), xlim=c(-3,3), t="l", lwd=1,
# xaxt='n', yaxt='n', xlab="", ylab="", bty='n')
lines(y~x, data=df)
#draw.circle(df$x, df$y, off)
#print(df)
text(centr[1], centr[2], label)
##Circles around
nCirc <- length(data[,1])
col <- col[order(data[,1])]
data <- data[order(data[,1]),,drop=F]
#Area corresponds to fractions
data$radius <- sqrt(data[,1]/pi)
data$x <- NA
data$y <- NA
data$phi <- NA
####################
##calculate phi
for (i in 1:length(data[,1])) {
data$phi[i] <- asin(data$radius[i]/(off+data$radius[i]))
}
### scale
scl <- pi/sum(data$phi)*scale
data$phi <- data$phi*scl
data$radius <- data$radius*scl
for (i in 1:length(data[,1])) {
sumPhi <- 0
if (i > 1) {
sumPhi <- sum(data$phi[1:(i-1)]*2)
}
data$x[i] <- centr[1]+(off+data$radius[i])*cos(data$phi[i]+sumPhi+phiOff)
data$y[i] <- centr[2]+(off+data$radius[i])*sin(data$phi[i]+sumPhi+phiOff)
#circ(data$x[i], data$y[i], data$radius[i])
draw.circle(data$x[i], data$y[i], data$radius[i], col=col[i])
}
}
| /R/drawSpiralPlot.R | no_license | mknoll/dataAnalysisMisc | R | false | false | 3,044 | r | #' @title Draw spiral plot
#'
#' @description
#'
#' @import pals
#' @import plotrix
#'
#' @export
drawSpiralPlot <- function(data, meta, coord, off=1, col=NULL, segm=NULL, leg=F, x.leg=NULL, y.leg=NULL, cex.leg=1, leg.cut=1) {
## area
tmp <- data.frame(do.call(rbind, meta))
tmp$x <- as.numeric(as.character(tmp$x))
tmp$y <- as.numeric(as.character(tmp$y))
#print(tmp)
## plot + legende
if (leg) {
if (is.null(x.leg)) {
x.leg <- max(coord$x)
}
if (is.null(y.leg)) {
y.leg <- max(coord$y)
}
par(mar=c(4,4,4,10))
}
plot(coord$y~coord$x, data=tmp, xlab="", ylab="", bty='n', col="white", xaxt='n', yaxt='n')
if (leg) {
m <- apply(data, 1, max)
legend(x.leg, y.leg, cex=cex.leg, rownames(data)[which(m > leg.cut)], fill=col[which(m > leg.cut)], bty='n')
}
# points(tmp$y~tmp$x)
if (!is.null(segm)) {
for (i in 1:length(segm)) {
segments(segm[[i]]$xFrom, segm[[i]]$yFrom, segm[[i]]$xTo, segm[[i]]$yTo, lwd=3)
}
}
for (i in 1:length(data[1,])) {
drawCircle(tmp$label[i], centr=c(tmp$x[i], tmp$y[i]),
data=data[,i,drop=F], off, scale=tmp$scale[i],
phiOff=tmp$phiOff[i], col=col)
}
}
drawCircle <- function(label, centr=c(0,0), data, off=1, col=NULL, scale=1,phiOff=0) {
# col supplied
if (is.null(col)) {
col <- kelly()
}
#print(col)
##central circle with text
df <- list()
for (phi in seq(0, 2*pi, 0.01)) {
df[[length(df)+1]] <- data.frame(x=centr[1]+off*cos(phi),
y=centr[2]+off*sin(phi),
phi=phi)
}
df <- do.call(rbind, df)
#plot(y~x, data=df, pch=19, ylim=c(-3, 3), xlim=c(-3,3), t="l", lwd=1,
# xaxt='n', yaxt='n', xlab="", ylab="", bty='n')
lines(y~x, data=df)
#draw.circle(df$x, df$y, off)
#print(df)
text(centr[1], centr[2], label)
##Circles around
nCirc <- length(data[,1])
col <- col[order(data[,1])]
data <- data[order(data[,1]),,drop=F]
#Area corresponds to fractions
data$radius <- sqrt(data[,1]/pi)
data$x <- NA
data$y <- NA
data$phi <- NA
####################
##calculate phi
for (i in 1:length(data[,1])) {
data$phi[i] <- asin(data$radius[i]/(off+data$radius[i]))
}
### scale
scl <- pi/sum(data$phi)*scale
data$phi <- data$phi*scl
data$radius <- data$radius*scl
for (i in 1:length(data[,1])) {
sumPhi <- 0
if (i > 1) {
sumPhi <- sum(data$phi[1:(i-1)]*2)
}
data$x[i] <- centr[1]+(off+data$radius[i])*cos(data$phi[i]+sumPhi+phiOff)
data$y[i] <- centr[2]+(off+data$radius[i])*sin(data$phi[i]+sumPhi+phiOff)
#circ(data$x[i], data$y[i], data$radius[i])
draw.circle(data$x[i], data$y[i], data$radius[i], col=col[i])
}
}
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # ## # # # # # # # # # # # # # # # # # # # # # # # # # # #
# ==========================================================================================================
# Comparative analyses of GC-biased gene conversion (B)
# ==========================================================================================================
# Jesper Boman 5 mar 2021
# ==========================================================================================================
# # # # # # # # # # # # # # # # # # # # # # # # # # # ## # # # # # # # # # # # # # # # # # # # # # # # # # # #
#### gBGC point estimates LRT tests ####
gBGC_genomewide_stats <- read.csv(file = file.choose() , header = T, sep = ";", dec =".", stringsAsFactors = F)
gBGC_genomewide_stats
library(wesanderson)
library(ggplot2)
col <- wes_palette("Darjeeling1")
col2 <-wes_palette("Darjeeling2")
#M1 vs M0
pchisq(-2*(gBGC_genomewide_stats[grep("Swe-sin", gBGC_genomewide_stats$Population),]$lnL0-gBGC_genomewide_stats[grep("Swe-sin", gBGC_genomewide_stats$Population),]$lnL1), df=1, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Spa-sin", gBGC_genomewide_stats$Population),]$lnL0-gBGC_genomewide_stats[grep("Spa-sin", gBGC_genomewide_stats$Population),]$lnL1), df=1, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Kaz-sin", gBGC_genomewide_stats$Population),]$lnL0-gBGC_genomewide_stats[grep("Kaz-sin", gBGC_genomewide_stats$Population),]$lnL1), df=1, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Kaz-juv", gBGC_genomewide_stats$Population),]$lnL0-gBGC_genomewide_stats[grep("Kaz-juv", gBGC_genomewide_stats$Population),]$lnL1), df=1, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Ire-juv", gBGC_genomewide_stats$Population),]$lnL0-gBGC_genomewide_stats[grep("Ire-juv", gBGC_genomewide_stats$Population),]$lnL1), df=1, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Spa-rea", gBGC_genomewide_stats$Population),]$lnL0-gBGC_genomewide_stats[grep("Spa-rea", gBGC_genomewide_stats$Population),]$lnL1), df=1, lower.tail=F)
#
#M1* vs M1
pchisq(-2*(gBGC_genomewide_stats[grep("Swe-sin", gBGC_genomewide_stats$Population),]$lnL1-gBGC_genomewide_stats[grep("Swe-sin", gBGC_genomewide_stats$Population),]$lnL1cor), df=3, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Spa-sin", gBGC_genomewide_stats$Population),]$lnL1-gBGC_genomewide_stats[grep("Spa-sin", gBGC_genomewide_stats$Population),]$lnL1cor), df=3, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Kaz-sin", gBGC_genomewide_stats$Population),]$lnL1-gBGC_genomewide_stats[grep("Kaz-sin", gBGC_genomewide_stats$Population),]$lnL1cor), df=3, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Kaz-juv", gBGC_genomewide_stats$Population),]$lnL1-gBGC_genomewide_stats[grep("Kaz-juv", gBGC_genomewide_stats$Population),]$lnL1cor), df=3, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Ire-juv", gBGC_genomewide_stats$Population),]$lnL1-gBGC_genomewide_stats[grep("Ire-juv", gBGC_genomewide_stats$Population),]$lnL1cor), df=3, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Spa-rea", gBGC_genomewide_stats$Population),]$lnL1-gBGC_genomewide_stats[grep("Spa-rea", gBGC_genomewide_stats$Population),]$lnL1cor), df=3, lower.tail=F)
#
#Examples of data exploration (Comparing different filter sets)
t.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$eWS, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_lib",]$eWS, paired = T)
t.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$eSW, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_lib",]$eSW, paired = T)
t.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$B_M1, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_lib",]$B_M1, paired = T)
t.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Bcor_M1, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_lib",]$Bcor_M1, paired = T)
t.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_noCpG",]$B_M1, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$B_M1, paired = T)
t.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_noCpG",]$Bcor_M1, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_lib",]$Bcor_M1, paired = T)
1/(1+gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_lib",]$lambdacor_M1*exp(-gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_lib",]$Bcor_M1))
gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$lambdacor_M1-gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_noCpG",]$lambdacor_M1
gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$lambdacor_M1-gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_onlyCpG",]$lambdacor_M1
gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_noCpG",]$lambdacor_M1-gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_onlyCpG",]$lambdacor_M1
#### Genomewide - bootstrapped sites, no exons ####
swe_sin_gw_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
swe_sin_gw_bs$Population <- "Swe_sin"
spa_sin_gw_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
spa_sin_gw_bs$Population <- "Spa_sin"
kaz_sin_gw_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
kaz_sin_gw_bs$Population <- "Kaz_sin"
kaz_juv_gw_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
kaz_juv_gw_bs$Population <- "Kaz_juv"
ire_juv_gw_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
ire_juv_gw_bs$Population <- "Ire_juv"
spa_rea_gw_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
spa_rea_gw_bs$Population <- "Spa_rea"
pchisq(-2*(mean(swe_sin_gw_bs$lnL1)-mean(swe_sin_gw_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(spa_sin_gw_bs$lnL1)-mean(spa_sin_gw_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(kaz_sin_gw_bs$lnL1)-mean(kaz_sin_gw_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(kaz_juv_gw_bs$lnL1)-mean(kaz_juv_gw_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(ire_juv_gw_bs$lnL1)-mean(ire_juv_gw_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(spa_rea_gw_bs$lnL1)-mean(spa_rea_gw_bs$lnL1cor)), df=3, lower.tail=F)
comb_gw_bs <- rbind(swe_sin_gw_bs, spa_sin_gw_bs, kaz_sin_gw_bs, kaz_juv_gw_bs, ire_juv_gw_bs, spa_rea_gw_bs)
leptidea_Bcor_M1_estimates_NE <- data.frame(Population = unique(factor(comb_gw_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin"))), Bcor_M1 =gBGC_genomewide_stats[gBGC_genomewide_stats$Filter=="noExons",]$Bcor_M1 )
"6.04 x 5"
ggplot(comb_gw_bs, aes(x=factor(comb_gw_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin")), y=Bcor_M1, fill = Population)) + geom_violin(draw_quantiles = 0.5) + scale_fill_manual(values = c(col[2], col[5], col[4], col2[2], col[1], col[3])) + theme_classic() +
xlab("Population") +
ylab("B") +
theme(legend.position="none", plot.title = element_text(face = "bold", hjust = 0.5), panel.border = element_rect(colour = "black", fill=NA, size=0.5), axis.text=element_text(size=11, colour="black"), axis.title=element_text(size=14)) +
labs(title="All non-exonic sites") +
ylim(0,1) +
geom_violin(data=comb_gw_bs, aes(x=factor(comb_gw_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin")), y=B_M1, fill=Population), alpha=0.5, draw_quantiles = 0.5) +
geom_violin(draw_quantiles = 0.5)+
geom_point(data=leptidea_Bcor_M1_estimates_NE, shape=1, aes(x=Population, y=Bcor_M1))+
scale_x_discrete(labels=c("Ire-juv","Kaz-juv", "Spa-rea", "Kaz-sin", "Swe-sin", "Spa-sin"))+
geom_text(x=6.17, y=0.9, label="B M1*")+
annotate(geom="point", x=5.8, y=0.9, shape=15, size = 5)+
geom_text(x=6.15, y=0.85, label="B M1")+
annotate(geom="point", x=5.8, y=0.85, shape=15, alpha=0.5, size = 5)
#### Genomewide - bootstrapped sites, no exons no CpG ####
swe_sin_gw_filt_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
swe_sin_gw_filt_bs$Population <- "Swe_sin"
spa_sin_gw_filt_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
spa_sin_gw_filt_bs$Population <- "Spa_sin"
kaz_sin_gw_filt_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
kaz_sin_gw_filt_bs$Population <- "Kaz_sin"
kaz_juv_gw_filt_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
kaz_juv_gw_filt_bs$Population <- "Kaz_juv"
ire_juv_gw_filt_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
ire_juv_gw_filt_bs$Population <- "Ire_juv"
spa_rea_gw_filt_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
spa_rea_gw_filt_bs$Population <- "Spa_rea"
pchisq(-2*(mean(swe_sin_gw_filt_bs$lnL1)-mean(swe_sin_gw_filt_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(spa_sin_gw_filt_bs$lnL1)-mean(spa_sin_gw_filt_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(kaz_sin_gw_filt_bs$lnL1)-mean(kaz_sin_gw_filt_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(kaz_juv_gw_filt_bs$lnL1)-mean(kaz_juv_gw_filt_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(ire_juv_gw_filt_bs$lnL1)-mean(ire_juv_gw_filt_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(spa_rea_gw_filt_bs$lnL1)-mean(spa_rea_gw_filt_bs$lnL1cor)), df=3, lower.tail=F)
comb_gw_filt_bs <- rbind(swe_sin_gw_filt_bs, spa_sin_gw_filt_bs, kaz_sin_gw_filt_bs, kaz_juv_gw_filt_bs, ire_juv_gw_filt_bs, spa_rea_gw_filt_bs)
leptidea_Bcor_M1_estimates_noCpG <- data.frame(Population = unique(factor(comb_gw_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin"))), Bcor_M1 =gBGC_genomewide_stats[gBGC_genomewide_stats$Filter=="noExons_noCpG",]$Bcor_M1 )
ggplot(comb_gw_filt_bs, aes(x=factor(comb_gw_filt_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin")), y=Bcor_M1, fill = Population)) + geom_violin(draw_quantiles = 0.5) + scale_fill_manual(values = c(col[2], col[5], col[4], col2[2], col[1], col[3])) + theme_classic() +
xlab("Population") +
ylab("B") +
theme(legend.position="none", plot.title = element_text(face = "bold", hjust = 0.5), panel.border = element_rect(colour = "black", fill=NA, size=0.5), axis.text=element_text(size=11, colour="black"), axis.title=element_text(size=14)) +
labs(title="No ancestral CpG-prone sites") +
ylim(0,1.5) +
geom_violin(data=comb_gw_filt_bs, aes(x=factor(comb_gw_filt_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin")), y=B_M1, fill=Population, alpha=Population), draw_quantiles = 0.5) +
geom_violin(draw_quantiles = 0.5) +
geom_point(data=leptidea_Bcor_M1_estimates_noCpG, shape=1, aes(x=Population, y=Bcor_M1))+
scale_x_discrete(labels=c("Ire-juv","Kaz-juv", "Spa-rea", "Kaz-sin", "Swe-sin", "Spa-sin"))
#### Genomewide - bootstrapped sites, no exons CpGonly ####
swe_sin_gw_CpG_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
swe_sin_gw_CpG_bs$Population <- "Swe_sin"
spa_sin_gw_CpG_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
spa_sin_gw_CpG_bs$Population <- "Spa_sin"
kaz_sin_gw_CpG_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
kaz_sin_gw_CpG_bs$Population <- "Kaz_sin"
kaz_juv_gw_CpG_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
kaz_juv_gw_CpG_bs$Population <- "Kaz_juv"
ire_juv_gw_CpG_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
ire_juv_gw_CpG_bs$Population <- "Ire_juv"
spa_rea_gw_CpG_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
spa_rea_gw_CpG_bs$Population <- "Spa_rea"
pchisq(-2*(mean(swe_sin_gw_CpG_bs$lnL1)-mean(swe_sin_gw_CpG_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(spa_sin_gw_CpG_bs$lnL1)-mean(spa_sin_gw_CpG_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(kaz_sin_gw_CpG_bs$lnL1)-mean(kaz_sin_gw_CpG_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(kaz_juv_gw_CpG_bs$lnL1)-mean(kaz_juv_gw_CpG_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(ire_juv_gw_CpG_bs$lnL1)-mean(ire_juv_gw_CpG_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(spa_rea_gw_CpG_bs$lnL1)-mean(spa_rea_gw_CpG_bs$lnL1cor)), df=3, lower.tail=F)
comb_gw_CpG_bs <- rbind(swe_sin_gw_CpG_bs, spa_sin_gw_CpG_bs, kaz_sin_gw_CpG_bs, kaz_juv_gw_CpG_bs, ire_juv_gw_CpG_bs, spa_rea_gw_CpG_bs)
#gBGC_genomewide_stats <- read.csv(file = file.choose() , header = T, sep = ";", dec =".", stringsAsFactors = F)
leptidea_Bcor_M1_estimates_CpG <- data.frame(Population = unique(factor(comb_gw_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin"))), Bcor_M1 =gBGC_genomewide_stats[gBGC_genomewide_stats$Filter=="noExons_onlyCpG",]$Bcor_M1 )
"6.04 x 5"
ggplot(comb_gw_CpG_bs, aes(x=factor(comb_gw_CpG_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin")), y=Bcor_M1, fill = Population)) + geom_violin(draw_quantiles = 0.5) + scale_fill_manual(values = c(col[2], col[5], col[4], col2[2], col[1], col[3])) + theme_classic() +
xlab("Population") +
ylab("B") +
theme(legend.position="none", plot.title = element_text(face = "bold", hjust = 0.5), panel.border = element_rect(colour = "black", fill=NA, size=0.5), axis.text=element_text(size=11, colour="black"), axis.title=element_text(size=14)) +
labs(title="Ancestral CpG-prone sites") +
ylim(0,1.5) +
geom_violin(data=comb_gw_CpG_bs, aes(x=factor(comb_gw_CpG_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin")), y=B_M1, fill=Population, alpha=Population), draw_quantiles = 0.5) +
geom_violin(draw_quantiles = 0.5) +
geom_point(data=leptidea_Bcor_M1_estimates_CpG, shape=1, aes(x=Population, y=Bcor_M1))+
scale_x_discrete(labels=c("Ire-juv","Kaz-juv", "Spa-rea", "Kaz-sin", "Swe-sin", "Spa-sin"))
#### DAF spectrum plot ####
DAF_swe_sin <- read.table(file.choose(), stringsAsFactors = F, header=T, dec=",")
ggplot(data=DAF_swe_sin, aes(x=Freq_cat_num, y=Freq, fill=Class)) +
geom_bar(stat="identity", color="black", position=position_dodge())+
scale_fill_manual(values=c("Black", "Grey", "White"))+
theme_classic()+
ylab("Density") +
xlab("Derived allele count") +
labs(title="All non-exonic sites: Swe-sin") +
theme(plot.title = element_text(face = "bold", hjust = 0.5), axis.text=element_text(size=12, colour="black"), axis.title=element_text(size=14), legend.text=element_text(size=12), legend.title=element_text(size=14))
#### Leptidea phylogeny ####
#Read in phylogeny
library(ape)
leptidea_phylogeny <- read.tree(file = file.choose())
plot(leptidea_phylogeny)
tips <- leptidea_phylogeny$tip.label
tips <- tips[-(grep("leptidea", tips))]
leptidea_phylogeny_pruned <- drop.tip(leptidea_phylogeny, leptidea_phylogeny$tip.label[match(as.vector(tips), leptidea_phylogeny$tip.label)])
tips <- leptidea_phylogeny_pruned$tip.label
leptidea_phylogeny_pruned$tip.label <- c("Leptidea juvernica - Ireland", "Leptidea juvernica - Kazakhstan", "Leptidea reali - Spain", "Leptidea sinapis - Spain", "Leptidea sinapis - Kazakhstan", "Leptidea sinapis - Sweden" )
#Plot phylogeny
plot(leptidea_phylogeny_pruned, show.node.label =T, direction="rightwards", tip.color=c(col[2], col[5], col2[2], col[1], col[4], col[3]))
plot(leptidea_phylogeny_pruned)
leptidea_phylogeny_pruned$tip.label <- c("Ire-juv", "Kaz-juv", "Spa-rea", "Spa-sin", "Kaz-sin", "Swe-sin" )
#### Leptidea phylostats ####
library("ape")
### Population comparative (dependent on pi) ####
#Needs GC_centiles.results from the script GC_centiles.R, or just add it to the gBGC_genomewide_stats input file
genomewide_pi <- cbind(aggregate((pi_sum_unanchor+pi_sum_anchor)~population, GC_centiles.results, sum), aggregate((invar_L+unanchor_L+anchor_L)~population, GC_centiles.results, sum)[,2])
colnames(genomewide_pi) <- c("Population", "Pi_sum", "L")
genomewide_pi$pi <- genomewide_pi$Pi_sum/genomewide_pi$L
gBGC_genomewide_stats <- read.csv(file = file.choose() , header = T, sep = ";", dec =".", stringsAsFactors = F)
gBGC_genomewide_stats$Chr_num_mean <- (gBGC_genomewide_stats$Chr_num_high+gBGC_genomewide_stats$Chr_num_low)/2
cor.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$pi, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Bcor_M1)
cor.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Chr_num_low, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Bcor_M1)
cor.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Chr_num_high, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Bcor_M1)
#Plot π vs B - N.B needs plot p2 from below for inset
"6.04 x 5"
ggplot(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",], aes(x=pi, y=Bcor_M1, col=Population)) + geom_point(size=4) +
ylim(0,0.85)+
scale_color_manual(name="Population", values = c(col[2], col[5], col[4], col2[2], col[1], col[3])) +
theme_classic() +
ylab("B") +
xlab(expression(pi)) +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.5), legend.position = "none", plot.title = element_text(face = "bold", hjust = 0.5), axis.text=element_text(size=12, colour="black"), axis.title=element_text(size=14), legend.text=element_text(size=12), legend.title=element_text(size=14)) +
annotation_custom(ggplotGrob(p2), xmin = 0.005, xmax = 0.00703,
ymin = 0.45, ymax = 0.908)
#Plot chromosome number vs B- N.B needs plot p3 from below for inset
"6.04 x 5"
ggplot(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",], aes(x=Chr_num_low, y=Bcor_M1, col=Population)) + geom_point(size=4) + geom_point(size=4, aes(x=Chr_num_high))+
ylim(0,0.85)+
scale_color_manual(name="Population", values = c(col[2], col[5], col[4], col2[2], col[1], col[3])) +
theme_classic() +
xlim(50, 125)+
ylab("B") +
xlab("Diploid chromosome number") +
geom_segment(aes(x=Chr_num_low, y=Bcor_M1, xend=Chr_num_high, yend=Bcor_M1), lty=3) +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.5), legend.position ="none", plot.title = element_text(face = "bold", hjust = 0.5), axis.text=element_text(size=12, colour="black"), axis.title=element_text(size=14), legend.text=element_text(size=12), legend.title=element_text(size=14))+
annotation_custom(ggplotGrob(p3), xmin = 94, xmax = 129.9,
ymin = 0.45, ymax = 0.908)
#Produce phylogenetic independent contrasts using the method described by Felsenstein (1985)
pic(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Bcor_M1,leptidea_phylogeny_pruned)
pic(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$pi,leptidea_phylogeny_pruned)
pipic <- as.data.frame(pic(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$pi,leptidea_phylogeny_pruned, var.contrasts = T, scaled=F))
chrpic <- as.data.frame(pic(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Chr_num_mean,leptidea_phylogeny_pruned, var.contrasts = T, scaled=F))
Bpic <- as.data.frame(pic(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Bcor_M1,leptidea_phylogeny_pruned, var.contrasts = T, scaled=F))
summary(lm(scale(Bpic$contrasts)~scale(pipic$contrasts)))
summary(lm(scale(Bpic$contrasts)~scale(chrpic$contrasts)))
summary(lm(scale(Bpic$contrasts)~scale(chrpic$contrasts)))
summary(lm(Bpic$contrasts~chrpic$contrasts+pipic$contrasts))
p2 <- ggplot(Bpic, aes(x=pipic$contrasts, y=Bpic$contrasts))+geom_point(size=3)+
theme_classic()+
xlim(-0.0022, 0.0025)+
xlab("") +
ylab("") +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.5), legend.position ="none", plot.title = element_text(face = "bold", hjust = 0.5), axis.text=element_text(size=8, colour="black"), axis.title=element_text(size=14), legend.text=element_text(size=12), legend.title=element_text(size=14))
p3 <- ggplot(Bpic, aes(x=chrpic$contrasts, y=Bpic$contrasts))+geom_point(size=3)+
theme_classic()+
xlim(-50,40)+
xlab("") +
ylab("") +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.5), legend.position ="none", plot.title = element_text(face = "bold", hjust = 0.5), axis.text=element_text(size=8, colour="black"), axis.title=element_text(size=14), legend.text=element_text(size=12), legend.title=element_text(size=14))
#Same procedure as above but excluding the Spanish L. sinapis population
gBGC_genomewide_stats_noSpaSin <- gBGC_genomewide_stats[gBGC_genomewide_stats$Population != "Spa-sin",]
cor.test(gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Chr_num_low, gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Bcor_M1)
cor.test(gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Chr_num_high, gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Bcor_M1)
cor.test(gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Chr_num_mean, gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Bcor_M1)
leptidea_phylogeny_pruned_noSpasin <- drop.tip(leptidea_phylogeny_pruned,"Spa-sin")
plot(leptidea_phylogeny_pruned_noSpasin)
pipic_noSpasin <- as.data.frame(pic(gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$pi,leptidea_phylogeny_pruned_noSpasin, var.contrasts = T, scaled=F))
chrpic_noSpasin <- as.data.frame(pic(gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Chr_num_mean,leptidea_phylogeny_pruned_noSpasin, var.contrasts = T, scaled=F))
Bpic_noSpasin <- as.data.frame(pic(gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Bcor_M1,leptidea_phylogeny_pruned_noSpasin, var.contrasts = T, scaled=F))
cor.test(chrpic_noSpasin$contrasts, Bpic_noSpasin$contrasts)
summary(lm(scale(Bpic_noSpasin$contrasts)~scale(chrpic_noSpasin$contrasts)))
summary(lm(scale(Bpic_noSpasin$contrasts)~scale(chrpic_noSpasin$contrasts)+scale(pipic_noSpasin$contrasts)))
plot(chrpic_noSpasin$contrasts, Bpic_noSpasin$contrasts)
| /Scripts/B_comp_GH.R | no_license | wangchengww/GC-biased-gene-conversion-and-genetic-diversity-in-butterflies | R | false | false | 22,483 | r | # # # # # # # # # # # # # # # # # # # # # # # # # # # ## # # # # # # # # # # # # # # # # # # # # # # # # # # #
# ==========================================================================================================
# Comparative analyses of GC-biased gene conversion (B)
# ==========================================================================================================
# Jesper Boman 5 mar 2021
# ==========================================================================================================
# # # # # # # # # # # # # # # # # # # # # # # # # # # ## # # # # # # # # # # # # # # # # # # # # # # # # # # #
#### gBGC point estimates LRT tests ####
gBGC_genomewide_stats <- read.csv(file = file.choose() , header = T, sep = ";", dec =".", stringsAsFactors = F)
gBGC_genomewide_stats
library(wesanderson)
library(ggplot2)
col <- wes_palette("Darjeeling1")
col2 <-wes_palette("Darjeeling2")
#M1 vs M0
pchisq(-2*(gBGC_genomewide_stats[grep("Swe-sin", gBGC_genomewide_stats$Population),]$lnL0-gBGC_genomewide_stats[grep("Swe-sin", gBGC_genomewide_stats$Population),]$lnL1), df=1, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Spa-sin", gBGC_genomewide_stats$Population),]$lnL0-gBGC_genomewide_stats[grep("Spa-sin", gBGC_genomewide_stats$Population),]$lnL1), df=1, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Kaz-sin", gBGC_genomewide_stats$Population),]$lnL0-gBGC_genomewide_stats[grep("Kaz-sin", gBGC_genomewide_stats$Population),]$lnL1), df=1, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Kaz-juv", gBGC_genomewide_stats$Population),]$lnL0-gBGC_genomewide_stats[grep("Kaz-juv", gBGC_genomewide_stats$Population),]$lnL1), df=1, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Ire-juv", gBGC_genomewide_stats$Population),]$lnL0-gBGC_genomewide_stats[grep("Ire-juv", gBGC_genomewide_stats$Population),]$lnL1), df=1, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Spa-rea", gBGC_genomewide_stats$Population),]$lnL0-gBGC_genomewide_stats[grep("Spa-rea", gBGC_genomewide_stats$Population),]$lnL1), df=1, lower.tail=F)
#
#M1* vs M1
pchisq(-2*(gBGC_genomewide_stats[grep("Swe-sin", gBGC_genomewide_stats$Population),]$lnL1-gBGC_genomewide_stats[grep("Swe-sin", gBGC_genomewide_stats$Population),]$lnL1cor), df=3, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Spa-sin", gBGC_genomewide_stats$Population),]$lnL1-gBGC_genomewide_stats[grep("Spa-sin", gBGC_genomewide_stats$Population),]$lnL1cor), df=3, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Kaz-sin", gBGC_genomewide_stats$Population),]$lnL1-gBGC_genomewide_stats[grep("Kaz-sin", gBGC_genomewide_stats$Population),]$lnL1cor), df=3, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Kaz-juv", gBGC_genomewide_stats$Population),]$lnL1-gBGC_genomewide_stats[grep("Kaz-juv", gBGC_genomewide_stats$Population),]$lnL1cor), df=3, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Ire-juv", gBGC_genomewide_stats$Population),]$lnL1-gBGC_genomewide_stats[grep("Ire-juv", gBGC_genomewide_stats$Population),]$lnL1cor), df=3, lower.tail=F)
pchisq(-2*(gBGC_genomewide_stats[grep("Spa-rea", gBGC_genomewide_stats$Population),]$lnL1-gBGC_genomewide_stats[grep("Spa-rea", gBGC_genomewide_stats$Population),]$lnL1cor), df=3, lower.tail=F)
#
#Examples of data exploration (Comparing different filter sets)
t.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$eWS, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_lib",]$eWS, paired = T)
t.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$eSW, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_lib",]$eSW, paired = T)
t.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$B_M1, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_lib",]$B_M1, paired = T)
t.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Bcor_M1, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_lib",]$Bcor_M1, paired = T)
t.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_noCpG",]$B_M1, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$B_M1, paired = T)
t.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_noCpG",]$Bcor_M1, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_lib",]$Bcor_M1, paired = T)
1/(1+gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_lib",]$lambdacor_M1*exp(-gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_lib",]$Bcor_M1))
gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$lambdacor_M1-gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_noCpG",]$lambdacor_M1
gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$lambdacor_M1-gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_onlyCpG",]$lambdacor_M1
gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_noCpG",]$lambdacor_M1-gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons_onlyCpG",]$lambdacor_M1
#### Genomewide - bootstrapped sites, no exons ####
swe_sin_gw_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
swe_sin_gw_bs$Population <- "Swe_sin"
spa_sin_gw_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
spa_sin_gw_bs$Population <- "Spa_sin"
kaz_sin_gw_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
kaz_sin_gw_bs$Population <- "Kaz_sin"
kaz_juv_gw_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
kaz_juv_gw_bs$Population <- "Kaz_juv"
ire_juv_gw_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
ire_juv_gw_bs$Population <- "Ire_juv"
spa_rea_gw_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
spa_rea_gw_bs$Population <- "Spa_rea"
pchisq(-2*(mean(swe_sin_gw_bs$lnL1)-mean(swe_sin_gw_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(spa_sin_gw_bs$lnL1)-mean(spa_sin_gw_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(kaz_sin_gw_bs$lnL1)-mean(kaz_sin_gw_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(kaz_juv_gw_bs$lnL1)-mean(kaz_juv_gw_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(ire_juv_gw_bs$lnL1)-mean(ire_juv_gw_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(spa_rea_gw_bs$lnL1)-mean(spa_rea_gw_bs$lnL1cor)), df=3, lower.tail=F)
comb_gw_bs <- rbind(swe_sin_gw_bs, spa_sin_gw_bs, kaz_sin_gw_bs, kaz_juv_gw_bs, ire_juv_gw_bs, spa_rea_gw_bs)
leptidea_Bcor_M1_estimates_NE <- data.frame(Population = unique(factor(comb_gw_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin"))), Bcor_M1 =gBGC_genomewide_stats[gBGC_genomewide_stats$Filter=="noExons",]$Bcor_M1 )
"6.04 x 5"
ggplot(comb_gw_bs, aes(x=factor(comb_gw_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin")), y=Bcor_M1, fill = Population)) + geom_violin(draw_quantiles = 0.5) + scale_fill_manual(values = c(col[2], col[5], col[4], col2[2], col[1], col[3])) + theme_classic() +
xlab("Population") +
ylab("B") +
theme(legend.position="none", plot.title = element_text(face = "bold", hjust = 0.5), panel.border = element_rect(colour = "black", fill=NA, size=0.5), axis.text=element_text(size=11, colour="black"), axis.title=element_text(size=14)) +
labs(title="All non-exonic sites") +
ylim(0,1) +
geom_violin(data=comb_gw_bs, aes(x=factor(comb_gw_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin")), y=B_M1, fill=Population), alpha=0.5, draw_quantiles = 0.5) +
geom_violin(draw_quantiles = 0.5)+
geom_point(data=leptidea_Bcor_M1_estimates_NE, shape=1, aes(x=Population, y=Bcor_M1))+
scale_x_discrete(labels=c("Ire-juv","Kaz-juv", "Spa-rea", "Kaz-sin", "Swe-sin", "Spa-sin"))+
geom_text(x=6.17, y=0.9, label="B M1*")+
annotate(geom="point", x=5.8, y=0.9, shape=15, size = 5)+
geom_text(x=6.15, y=0.85, label="B M1")+
annotate(geom="point", x=5.8, y=0.85, shape=15, alpha=0.5, size = 5)
#### Genomewide - bootstrapped sites, no exons no CpG ####
swe_sin_gw_filt_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
swe_sin_gw_filt_bs$Population <- "Swe_sin"
spa_sin_gw_filt_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
spa_sin_gw_filt_bs$Population <- "Spa_sin"
kaz_sin_gw_filt_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
kaz_sin_gw_filt_bs$Population <- "Kaz_sin"
kaz_juv_gw_filt_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
kaz_juv_gw_filt_bs$Population <- "Kaz_juv"
ire_juv_gw_filt_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
ire_juv_gw_filt_bs$Population <- "Ire_juv"
spa_rea_gw_filt_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
spa_rea_gw_filt_bs$Population <- "Spa_rea"
pchisq(-2*(mean(swe_sin_gw_filt_bs$lnL1)-mean(swe_sin_gw_filt_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(spa_sin_gw_filt_bs$lnL1)-mean(spa_sin_gw_filt_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(kaz_sin_gw_filt_bs$lnL1)-mean(kaz_sin_gw_filt_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(kaz_juv_gw_filt_bs$lnL1)-mean(kaz_juv_gw_filt_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(ire_juv_gw_filt_bs$lnL1)-mean(ire_juv_gw_filt_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(spa_rea_gw_filt_bs$lnL1)-mean(spa_rea_gw_filt_bs$lnL1cor)), df=3, lower.tail=F)
comb_gw_filt_bs <- rbind(swe_sin_gw_filt_bs, spa_sin_gw_filt_bs, kaz_sin_gw_filt_bs, kaz_juv_gw_filt_bs, ire_juv_gw_filt_bs, spa_rea_gw_filt_bs)
leptidea_Bcor_M1_estimates_noCpG <- data.frame(Population = unique(factor(comb_gw_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin"))), Bcor_M1 =gBGC_genomewide_stats[gBGC_genomewide_stats$Filter=="noExons_noCpG",]$Bcor_M1 )
ggplot(comb_gw_filt_bs, aes(x=factor(comb_gw_filt_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin")), y=Bcor_M1, fill = Population)) + geom_violin(draw_quantiles = 0.5) + scale_fill_manual(values = c(col[2], col[5], col[4], col2[2], col[1], col[3])) + theme_classic() +
xlab("Population") +
ylab("B") +
theme(legend.position="none", plot.title = element_text(face = "bold", hjust = 0.5), panel.border = element_rect(colour = "black", fill=NA, size=0.5), axis.text=element_text(size=11, colour="black"), axis.title=element_text(size=14)) +
labs(title="No ancestral CpG-prone sites") +
ylim(0,1.5) +
geom_violin(data=comb_gw_filt_bs, aes(x=factor(comb_gw_filt_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin")), y=B_M1, fill=Population, alpha=Population), draw_quantiles = 0.5) +
geom_violin(draw_quantiles = 0.5) +
geom_point(data=leptidea_Bcor_M1_estimates_noCpG, shape=1, aes(x=Population, y=Bcor_M1))+
scale_x_discrete(labels=c("Ire-juv","Kaz-juv", "Spa-rea", "Kaz-sin", "Swe-sin", "Spa-sin"))
#### Genomewide - bootstrapped sites, no exons CpGonly ####
swe_sin_gw_CpG_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
swe_sin_gw_CpG_bs$Population <- "Swe_sin"
spa_sin_gw_CpG_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
spa_sin_gw_CpG_bs$Population <- "Spa_sin"
kaz_sin_gw_CpG_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
kaz_sin_gw_CpG_bs$Population <- "Kaz_sin"
kaz_juv_gw_CpG_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
kaz_juv_gw_CpG_bs$Population <- "Kaz_juv"
ire_juv_gw_CpG_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
ire_juv_gw_CpG_bs$Population <- "Ire_juv"
spa_rea_gw_CpG_bs <- read.table(file = file.choose() , header = T, dec =".", stringsAsFactors = F)
spa_rea_gw_CpG_bs$Population <- "Spa_rea"
pchisq(-2*(mean(swe_sin_gw_CpG_bs$lnL1)-mean(swe_sin_gw_CpG_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(spa_sin_gw_CpG_bs$lnL1)-mean(spa_sin_gw_CpG_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(kaz_sin_gw_CpG_bs$lnL1)-mean(kaz_sin_gw_CpG_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(kaz_juv_gw_CpG_bs$lnL1)-mean(kaz_juv_gw_CpG_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(ire_juv_gw_CpG_bs$lnL1)-mean(ire_juv_gw_CpG_bs$lnL1cor)), df=3, lower.tail=F)
pchisq(-2*(mean(spa_rea_gw_CpG_bs$lnL1)-mean(spa_rea_gw_CpG_bs$lnL1cor)), df=3, lower.tail=F)
comb_gw_CpG_bs <- rbind(swe_sin_gw_CpG_bs, spa_sin_gw_CpG_bs, kaz_sin_gw_CpG_bs, kaz_juv_gw_CpG_bs, ire_juv_gw_CpG_bs, spa_rea_gw_CpG_bs)
#gBGC_genomewide_stats <- read.csv(file = file.choose() , header = T, sep = ";", dec =".", stringsAsFactors = F)
leptidea_Bcor_M1_estimates_CpG <- data.frame(Population = unique(factor(comb_gw_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin"))), Bcor_M1 =gBGC_genomewide_stats[gBGC_genomewide_stats$Filter=="noExons_onlyCpG",]$Bcor_M1 )
"6.04 x 5"
ggplot(comb_gw_CpG_bs, aes(x=factor(comb_gw_CpG_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin")), y=Bcor_M1, fill = Population)) + geom_violin(draw_quantiles = 0.5) + scale_fill_manual(values = c(col[2], col[5], col[4], col2[2], col[1], col[3])) + theme_classic() +
xlab("Population") +
ylab("B") +
theme(legend.position="none", plot.title = element_text(face = "bold", hjust = 0.5), panel.border = element_rect(colour = "black", fill=NA, size=0.5), axis.text=element_text(size=11, colour="black"), axis.title=element_text(size=14)) +
labs(title="Ancestral CpG-prone sites") +
ylim(0,1.5) +
geom_violin(data=comb_gw_CpG_bs, aes(x=factor(comb_gw_CpG_bs$Population, levels=c("Ire_juv","Kaz_juv", "Spa_rea", "Kaz_sin", "Swe_sin", "Spa_sin")), y=B_M1, fill=Population, alpha=Population), draw_quantiles = 0.5) +
geom_violin(draw_quantiles = 0.5) +
geom_point(data=leptidea_Bcor_M1_estimates_CpG, shape=1, aes(x=Population, y=Bcor_M1))+
scale_x_discrete(labels=c("Ire-juv","Kaz-juv", "Spa-rea", "Kaz-sin", "Swe-sin", "Spa-sin"))
#### DAF spectrum plot ####
DAF_swe_sin <- read.table(file.choose(), stringsAsFactors = F, header=T, dec=",")
ggplot(data=DAF_swe_sin, aes(x=Freq_cat_num, y=Freq, fill=Class)) +
geom_bar(stat="identity", color="black", position=position_dodge())+
scale_fill_manual(values=c("Black", "Grey", "White"))+
theme_classic()+
ylab("Density") +
xlab("Derived allele count") +
labs(title="All non-exonic sites: Swe-sin") +
theme(plot.title = element_text(face = "bold", hjust = 0.5), axis.text=element_text(size=12, colour="black"), axis.title=element_text(size=14), legend.text=element_text(size=12), legend.title=element_text(size=14))
#### Leptidea phylogeny ####
#Read in phylogeny
library(ape)
leptidea_phylogeny <- read.tree(file = file.choose())
plot(leptidea_phylogeny)
tips <- leptidea_phylogeny$tip.label
tips <- tips[-(grep("leptidea", tips))]
leptidea_phylogeny_pruned <- drop.tip(leptidea_phylogeny, leptidea_phylogeny$tip.label[match(as.vector(tips), leptidea_phylogeny$tip.label)])
tips <- leptidea_phylogeny_pruned$tip.label
leptidea_phylogeny_pruned$tip.label <- c("Leptidea juvernica - Ireland", "Leptidea juvernica - Kazakhstan", "Leptidea reali - Spain", "Leptidea sinapis - Spain", "Leptidea sinapis - Kazakhstan", "Leptidea sinapis - Sweden" )
#Plot phylogeny
plot(leptidea_phylogeny_pruned, show.node.label =T, direction="rightwards", tip.color=c(col[2], col[5], col2[2], col[1], col[4], col[3]))
plot(leptidea_phylogeny_pruned)
leptidea_phylogeny_pruned$tip.label <- c("Ire-juv", "Kaz-juv", "Spa-rea", "Spa-sin", "Kaz-sin", "Swe-sin" )
#### Leptidea phylostats ####
library("ape")
### Population comparative (dependent on pi) ####
#Needs GC_centiles.results from the script GC_centiles.R, or just add it to the gBGC_genomewide_stats input file
genomewide_pi <- cbind(aggregate((pi_sum_unanchor+pi_sum_anchor)~population, GC_centiles.results, sum), aggregate((invar_L+unanchor_L+anchor_L)~population, GC_centiles.results, sum)[,2])
colnames(genomewide_pi) <- c("Population", "Pi_sum", "L")
genomewide_pi$pi <- genomewide_pi$Pi_sum/genomewide_pi$L
gBGC_genomewide_stats <- read.csv(file = file.choose() , header = T, sep = ";", dec =".", stringsAsFactors = F)
gBGC_genomewide_stats$Chr_num_mean <- (gBGC_genomewide_stats$Chr_num_high+gBGC_genomewide_stats$Chr_num_low)/2
cor.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$pi, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Bcor_M1)
cor.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Chr_num_low, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Bcor_M1)
cor.test(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Chr_num_high, gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Bcor_M1)
#Plot π vs B - N.B needs plot p2 from below for inset
"6.04 x 5"
ggplot(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",], aes(x=pi, y=Bcor_M1, col=Population)) + geom_point(size=4) +
ylim(0,0.85)+
scale_color_manual(name="Population", values = c(col[2], col[5], col[4], col2[2], col[1], col[3])) +
theme_classic() +
ylab("B") +
xlab(expression(pi)) +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.5), legend.position = "none", plot.title = element_text(face = "bold", hjust = 0.5), axis.text=element_text(size=12, colour="black"), axis.title=element_text(size=14), legend.text=element_text(size=12), legend.title=element_text(size=14)) +
annotation_custom(ggplotGrob(p2), xmin = 0.005, xmax = 0.00703,
ymin = 0.45, ymax = 0.908)
#Plot chromosome number vs B- N.B needs plot p3 from below for inset
"6.04 x 5"
ggplot(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",], aes(x=Chr_num_low, y=Bcor_M1, col=Population)) + geom_point(size=4) + geom_point(size=4, aes(x=Chr_num_high))+
ylim(0,0.85)+
scale_color_manual(name="Population", values = c(col[2], col[5], col[4], col2[2], col[1], col[3])) +
theme_classic() +
xlim(50, 125)+
ylab("B") +
xlab("Diploid chromosome number") +
geom_segment(aes(x=Chr_num_low, y=Bcor_M1, xend=Chr_num_high, yend=Bcor_M1), lty=3) +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.5), legend.position ="none", plot.title = element_text(face = "bold", hjust = 0.5), axis.text=element_text(size=12, colour="black"), axis.title=element_text(size=14), legend.text=element_text(size=12), legend.title=element_text(size=14))+
annotation_custom(ggplotGrob(p3), xmin = 94, xmax = 129.9,
ymin = 0.45, ymax = 0.908)
#Produce phylogenetic independent contrasts using the method described by Felsenstein (1985)
pic(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Bcor_M1,leptidea_phylogeny_pruned)
pic(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$pi,leptidea_phylogeny_pruned)
pipic <- as.data.frame(pic(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$pi,leptidea_phylogeny_pruned, var.contrasts = T, scaled=F))
chrpic <- as.data.frame(pic(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Chr_num_mean,leptidea_phylogeny_pruned, var.contrasts = T, scaled=F))
Bpic <- as.data.frame(pic(gBGC_genomewide_stats[gBGC_genomewide_stats$Filter == "noExons",]$Bcor_M1,leptidea_phylogeny_pruned, var.contrasts = T, scaled=F))
summary(lm(scale(Bpic$contrasts)~scale(pipic$contrasts)))
summary(lm(scale(Bpic$contrasts)~scale(chrpic$contrasts)))
summary(lm(scale(Bpic$contrasts)~scale(chrpic$contrasts)))
summary(lm(Bpic$contrasts~chrpic$contrasts+pipic$contrasts))
p2 <- ggplot(Bpic, aes(x=pipic$contrasts, y=Bpic$contrasts))+geom_point(size=3)+
theme_classic()+
xlim(-0.0022, 0.0025)+
xlab("") +
ylab("") +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.5), legend.position ="none", plot.title = element_text(face = "bold", hjust = 0.5), axis.text=element_text(size=8, colour="black"), axis.title=element_text(size=14), legend.text=element_text(size=12), legend.title=element_text(size=14))
p3 <- ggplot(Bpic, aes(x=chrpic$contrasts, y=Bpic$contrasts))+geom_point(size=3)+
theme_classic()+
xlim(-50,40)+
xlab("") +
ylab("") +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.5), legend.position ="none", plot.title = element_text(face = "bold", hjust = 0.5), axis.text=element_text(size=8, colour="black"), axis.title=element_text(size=14), legend.text=element_text(size=12), legend.title=element_text(size=14))
#Same procedure as above but excluding the Spanish L. sinapis population
gBGC_genomewide_stats_noSpaSin <- gBGC_genomewide_stats[gBGC_genomewide_stats$Population != "Spa-sin",]
cor.test(gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Chr_num_low, gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Bcor_M1)
cor.test(gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Chr_num_high, gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Bcor_M1)
cor.test(gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Chr_num_mean, gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Bcor_M1)
leptidea_phylogeny_pruned_noSpasin <- drop.tip(leptidea_phylogeny_pruned,"Spa-sin")
plot(leptidea_phylogeny_pruned_noSpasin)
pipic_noSpasin <- as.data.frame(pic(gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$pi,leptidea_phylogeny_pruned_noSpasin, var.contrasts = T, scaled=F))
chrpic_noSpasin <- as.data.frame(pic(gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Chr_num_mean,leptidea_phylogeny_pruned_noSpasin, var.contrasts = T, scaled=F))
Bpic_noSpasin <- as.data.frame(pic(gBGC_genomewide_stats_noSpaSin[gBGC_genomewide_stats_noSpaSin$Filter == "noExons",]$Bcor_M1,leptidea_phylogeny_pruned_noSpasin, var.contrasts = T, scaled=F))
cor.test(chrpic_noSpasin$contrasts, Bpic_noSpasin$contrasts)
summary(lm(scale(Bpic_noSpasin$contrasts)~scale(chrpic_noSpasin$contrasts)))
summary(lm(scale(Bpic_noSpasin$contrasts)~scale(chrpic_noSpasin$contrasts)+scale(pipic_noSpasin$contrasts)))
plot(chrpic_noSpasin$contrasts, Bpic_noSpasin$contrasts)
|
# dependencies
library(dplyr)
library(lazyeval)
#' group_lag.R
#' function to apply grouping and ordering to a dataframe, with associated lag function.
#'
#' @param mydf input dataframe
#' @param grouping colnames of vectors in mydf to use for dataframe grouping
#' @param ranking colname of column in mydf to use for ranking
#' @param lag number of entries to lag by
#' @param lagValue colname of column in mydf to use for lagged values
#'
#' @return mydf returned with additional column of lagged values
#' @export
#'
#' @examples
#'df <- data.frame(Names = c(rep('Dan',50),rep('Dave',100)),
#' Dates = c(seq(1,100,by=2),seq(1,100,by=1)),
#' Values = rnorm(150,0,1))
#'groupLag(df,c('Names'),c('Dates'),1,'Values')
groupLag <- function(mydf,grouping=NULL,ranking='Date',lag=1,lagValue='Values'){
df <- mydf
groupL <- lapply(grouping,as.symbol)
names <- c('Rank','RankDown')
foos <- list(interp(~rank(var),var=as.name(ranking)),~Rank-lag)
df <- df %>% group_by_(.dots=groupL) %>% mutate_(.dots=setNames(foos,names))
selectedNames <- c('Rank',lagValue,grouping)
df2 <- df %>% select_(.dots=selectedNames)
colnames(df2) <- c('Rank','lagValue',grouping)
df <- df %>% left_join(df2,by=c('RankDown'='Rank',grouping)) %>% select(-Rank,-RankDown)
return(df)
}
| /group_lag.R | no_license | hoofay/groupLag | R | false | false | 1,386 | r | # dependencies
library(dplyr)
library(lazyeval)
#' group_lag.R
#' function to apply grouping and ordering to a dataframe, with associated lag function.
#'
#' @param mydf input dataframe
#' @param grouping colnames of vectors in mydf to use for dataframe grouping
#' @param ranking colname of column in mydf to use for ranking
#' @param lag number of entries to lag by
#' @param lagValue colname of column in mydf to use for lagged values
#'
#' @return mydf returned with additional column of lagged values
#' @export
#'
#' @examples
#'df <- data.frame(Names = c(rep('Dan',50),rep('Dave',100)),
#' Dates = c(seq(1,100,by=2),seq(1,100,by=1)),
#' Values = rnorm(150,0,1))
#'groupLag(df,c('Names'),c('Dates'),1,'Values')
groupLag <- function(mydf,grouping=NULL,ranking='Date',lag=1,lagValue='Values'){
df <- mydf
groupL <- lapply(grouping,as.symbol)
names <- c('Rank','RankDown')
foos <- list(interp(~rank(var),var=as.name(ranking)),~Rank-lag)
df <- df %>% group_by_(.dots=groupL) %>% mutate_(.dots=setNames(foos,names))
selectedNames <- c('Rank',lagValue,grouping)
df2 <- df %>% select_(.dots=selectedNames)
colnames(df2) <- c('Rank','lagValue',grouping)
df <- df %>% left_join(df2,by=c('RankDown'='Rank',grouping)) %>% select(-Rank,-RankDown)
return(df)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chk-logical.R
\name{chk_logical}
\alias{chk_logical}
\alias{vld_logical}
\title{Check Logical}
\usage{
chk_logical(x, x_name = NULL)
vld_logical(x)
}
\arguments{
\item{x}{The object to check.}
\item{x_name}{A string of the name of object x or NULL.}
}
\value{
The \code{chk_} function throws an informative error if the test fails or
returns the original object if successful so it can used in pipes.
The \code{vld_} function returns a flag indicating whether the test was met.
}
\description{
Checks if logical using
\code{is.logical(x)}
}
\section{Functions}{
\itemize{
\item \code{vld_logical}: Validate Logical
}}
\examples{
# chk_logical
chk_logical(TRUE)
try(chk_logical(1))
# vld_logical
vld_logical(TRUE)
vld_logical(matrix(TRUE))
vld_logical(logical(0))
vld_logical(NA)
vld_logical(1)
vld_logical("TRUE")
}
\seealso{
Other chk_typeof:
\code{\link{chk_character_or_factor}()},
\code{\link{chk_character}()},
\code{\link{chk_count}()},
\code{\link{chk_double}()},
\code{\link{chk_environment}()},
\code{\link{chk_factor}()},
\code{\link{chk_integer}()},
\code{\link{chk_list}()}
}
\concept{chk_typeof}
| /man/chk_logical.Rd | permissive | pre-processing-r/chk | R | false | true | 1,193 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chk-logical.R
\name{chk_logical}
\alias{chk_logical}
\alias{vld_logical}
\title{Check Logical}
\usage{
chk_logical(x, x_name = NULL)
vld_logical(x)
}
\arguments{
\item{x}{The object to check.}
\item{x_name}{A string of the name of object x or NULL.}
}
\value{
The \code{chk_} function throws an informative error if the test fails or
returns the original object if successful so it can used in pipes.
The \code{vld_} function returns a flag indicating whether the test was met.
}
\description{
Checks if logical using
\code{is.logical(x)}
}
\section{Functions}{
\itemize{
\item \code{vld_logical}: Validate Logical
}}
\examples{
# chk_logical
chk_logical(TRUE)
try(chk_logical(1))
# vld_logical
vld_logical(TRUE)
vld_logical(matrix(TRUE))
vld_logical(logical(0))
vld_logical(NA)
vld_logical(1)
vld_logical("TRUE")
}
\seealso{
Other chk_typeof:
\code{\link{chk_character_or_factor}()},
\code{\link{chk_character}()},
\code{\link{chk_count}()},
\code{\link{chk_double}()},
\code{\link{chk_environment}()},
\code{\link{chk_factor}()},
\code{\link{chk_integer}()},
\code{\link{chk_list}()}
}
\concept{chk_typeof}
|
# format Internalizing GWAS data
library(here)
library(dplyr)
library(magrittr)
library(readr)
library(GenomicRanges)
library(rtracklayer)
library(liftOver)
# OUTPUT FILES #########################################################################################################
output.granges.rds <- here("data/gwas_datasets/internalizing/internalizing.hg38.GRanges.rds")
# INPUT FILES ##########################################################################################################
raw.data <- here("data/gwas_datasets/internalizing/download/meta3.INTsnplist_F.txt.gz")
# chain file for hg19 to hg38 liftOver
path.hg19.to.hg38 <- system.file(package="liftOver", "extdata", "hg19ToHg38.over.chain")
# GLOBALS ##############################################################################################################
CHROMS <- c("chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12",
"chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX")
HG38.SEQINFO <- Seqinfo(genome = "hg38")
# Import Data ##########################################################################################################
print("Importing GWAS data...")
df.raw <- read_delim(raw.data, delim = " ")
# columns from educational attainment used as model for the rest
# RSID CHR POS A1.effect A2 EUR.freq Beta SE Pval
# rename columns for consistency
df.raw %<>%
dplyr::select(RSID = SNP,
CHR = chr,
POS = pos,
A1.effect = A1,
A2,
Beta = meta.effect,
SE = meta.se,
Pval = meta.pval)
df.raw %<>%
mutate(A1.effect = toupper(A1.effect),
A2 = toupper(A2))
levels(factor(df.raw$CHR))
# remove NA positions
df.raw %<>%
filter(!is.na(CHR))
# modify to UCSC naming, this dataset does not have X
df.raw %<>%
mutate(CHR = paste0("chr", CHR))
#df.raw$CHR[df.raw$CHR == "chr23"] <- "chrX"
all(levels(factor(df.raw$CHR)) %in% CHROMS)
print("Converting to GRanges...")
# make GRanges on hg19
gr.raw <- makeGRangesFromDataFrame(df.raw,
keep.extra.columns = TRUE,
ignore.strand = TRUE,
seqnames.field = "CHR",
start.field = "POS",
end.field = "POS")
print("Finished converting to GRanges.")
rm(df.raw)
# LiftOver to hg38 #####################################################################################################
print("LiftOver to hg38...")
# chain for hg19 to hg38 conversion
ch <- import.chain(path.hg19.to.hg38)
# GRangesList of GRanges conversion
lo <- liftOver(gr.raw, ch)
# unlist to get GRanges
gr.raw.hg38 <- unlist(lo)
# modify seqlevels and seqinfo
seqlevels(gr.raw.hg38) <- CHROMS
seqinfo(gr.raw.hg38) <- HG38.SEQINFO
gr.raw.hg38 <- keepSeqlevels(gr.raw.hg38,
CHROMS,
pruning.mode = "coarse")
print("Finished liftOver.")
rm(ch, lo, gr.raw)
# Save RDS #############################################################################################################
print("Saving RDS...")
saveRDS(gr.raw.hg38, output.granges.rds)
print("Finished saving RDS.")
rm(gr.raw.hg38)
| /src/gwas_datasets/munge_internalizing.R | no_license | mikelaff/mirna-eqtl-manuscript | R | false | false | 3,381 | r | # format Internalizing GWAS data
library(here)
library(dplyr)
library(magrittr)
library(readr)
library(GenomicRanges)
library(rtracklayer)
library(liftOver)
# OUTPUT FILES #########################################################################################################
output.granges.rds <- here("data/gwas_datasets/internalizing/internalizing.hg38.GRanges.rds")
# INPUT FILES ##########################################################################################################
raw.data <- here("data/gwas_datasets/internalizing/download/meta3.INTsnplist_F.txt.gz")
# chain file for hg19 to hg38 liftOver
path.hg19.to.hg38 <- system.file(package="liftOver", "extdata", "hg19ToHg38.over.chain")
# GLOBALS ##############################################################################################################
CHROMS <- c("chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12",
"chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX")
HG38.SEQINFO <- Seqinfo(genome = "hg38")
# Import Data ##########################################################################################################
print("Importing GWAS data...")
df.raw <- read_delim(raw.data, delim = " ")
# columns from educational attainment used as model for the rest
# RSID CHR POS A1.effect A2 EUR.freq Beta SE Pval
# rename columns for consistency
df.raw %<>%
dplyr::select(RSID = SNP,
CHR = chr,
POS = pos,
A1.effect = A1,
A2,
Beta = meta.effect,
SE = meta.se,
Pval = meta.pval)
df.raw %<>%
mutate(A1.effect = toupper(A1.effect),
A2 = toupper(A2))
levels(factor(df.raw$CHR))
# remove NA positions
df.raw %<>%
filter(!is.na(CHR))
# modify to UCSC naming, this dataset does not have X
df.raw %<>%
mutate(CHR = paste0("chr", CHR))
#df.raw$CHR[df.raw$CHR == "chr23"] <- "chrX"
all(levels(factor(df.raw$CHR)) %in% CHROMS)
print("Converting to GRanges...")
# make GRanges on hg19
gr.raw <- makeGRangesFromDataFrame(df.raw,
keep.extra.columns = TRUE,
ignore.strand = TRUE,
seqnames.field = "CHR",
start.field = "POS",
end.field = "POS")
print("Finished converting to GRanges.")
rm(df.raw)
# LiftOver to hg38 #####################################################################################################
print("LiftOver to hg38...")
# chain for hg19 to hg38 conversion
ch <- import.chain(path.hg19.to.hg38)
# GRangesList of GRanges conversion
lo <- liftOver(gr.raw, ch)
# unlist to get GRanges
gr.raw.hg38 <- unlist(lo)
# modify seqlevels and seqinfo
seqlevels(gr.raw.hg38) <- CHROMS
seqinfo(gr.raw.hg38) <- HG38.SEQINFO
gr.raw.hg38 <- keepSeqlevels(gr.raw.hg38,
CHROMS,
pruning.mode = "coarse")
print("Finished liftOver.")
rm(ch, lo, gr.raw)
# Save RDS #############################################################################################################
print("Saving RDS...")
saveRDS(gr.raw.hg38, output.granges.rds)
print("Finished saving RDS.")
rm(gr.raw.hg38)
|
# Librerías -----
library(tidyverse)
library(gt)
library(scales)
library(extrafont)
library(readxl)
# Configuraciones generales ----------
# Colores
verde <- "#01B3B6"
negro <- "#333132"
gris <- "#AEB6BF"
color3 <- c(verde, gris, negro)
color2 <- c(verde, negro)
# Opciones de visualización --------
options(scipen = 999) # Modifica la visualización de los ejes numérico a valores nominales
loadfonts(quiet = TRUE) # Permite cargar en R otros tipos de fuentes.
# Estilo limpio sin líneas de fondo
estilo <- theme(panel.grid = element_blank(),
plot.background = element_rect(fill = "#FBFCFC"),
panel.background = element_blank(),
text = element_text(family = "Ubuntu Mono"))
# Estilo limpio con líneas de referencia verticales en gris claro
estilov <- theme(panel.grid = element_blank(),
plot.background = element_rect(fill = "#FBFCFC"),
panel.background = element_blank(),
panel.grid.major.x = element_line(color = "#ecf0f1"),
text = element_text(family = "Ubuntu Mono"))
# Estilo limpio con líneas de referencia horizontales en gris claro
estiloh <- theme(panel.grid = element_blank(),
plot.background = element_rect(fill = "#FBFCFC"),
panel.background = element_blank(),
panel.grid.major.y = element_line(color = "#ecf0f1"),
text = element_text(family = "Ubuntu Mono"))
# Creo un objeto con un texto que se va a repetir mucho a lo largo del análisis
fuente <- "Fuente: Datos Ficticios\nClub de R para RRHH"
# Creo objetos para formatear las etiquetas numéricas de los ejes x e y
eje_x_per <- scale_x_continuous(labels = scales::percent_format(accuracy = 1))
eje_y_per <- scale_y_continuous(labels = scales::percent_format(accuracy = 1))
# Carga de Datos -----
encuesta <- read_excel("data/encuesta.xlsx")
plantel <- read_excel("data/plantel.xlsx")
# Preparación de datos -----------
# Pivotea el dataset a un formato largo
enc <- encuesta %>%
pivot_longer(cols = c(7:11),
names_to = "pregunta",
values_to = "valor")
# Cambia nombres y Organiza variables ordinales
enc <- enc %>%
rename(id = "ID",
genero = `¿Cómo definirías tu identidad de género?`,
unidad = "Unidad de Negocio",
pais = "País",
sector = "Sector",
cargo = "Tu cargo/nivel:") %>%
mutate(cargo = factor(cargo,
levels = c("Management", "Líder", "Contribuidor individual")))
# Crea categorías de resultados
enc <- enc %>%
mutate(resultado = if_else(valor == "Totalmente de acuerdo", "Positivo",
if_else(valor == "De acuerdo", "Positivo",
if_else(valor == "Ni de acuerdo ni en desacuerdo",
"Neutral", "Negativo"
)
)
),
resultado = factor(resultado,
levels = c("Positivo", "Neutral", "Negativo")))
## ----grafico1--------------------------------------------------------------------------------
ggplot(enc, aes(x = pais, fill = resultado)) +
geom_bar(position = "fill") +
scale_fill_manual(values = c(color3)) +
estiloh +
eje_y_per +
labs(title = "Resultados por país",
fill = "Resultado",
x = "", y = "",
caption = fuente)
## ----etiq-largas-----------------------------------------------------------------------------
enc %>%
group_by(pregunta, resultado) %>%
summarise(cant = n()) %>%
mutate(prop = cant/sum(cant)) %>%
filter(resultado == "Positivo") %>%
ggplot(aes(x = prop, y = pregunta)) +
geom_col(fill = verde) +
estilov +
eje_x_per +
labs(title = "Ranking de Respuestas Positivas",
x = "", y = "",
caption = fuente)
## ----etiq-largas1----------------------------------------------------------------------------
# Divide el largo de 'función' en varias líneas
enc$preg2 <- str_wrap(enc$pregunta, width = 40)
# Veamos como queda esto en el df
head(enc$preg2,5)
## ----etiq-largas2----------------------------------------------------------------------------
enc %>%
group_by(preg2, resultado) %>%
summarise(cant = n()) %>%
mutate(prop = cant/sum(cant)) %>%
filter(resultado == "Positivo") %>%
ggplot(aes(x = prop, y = reorder(preg2, prop))) +
geom_col(fill = verde) +
estilov +
eje_x_per +
labs(title = "Ranking de Respuestas Positivas",
x = "", y = "",
caption = fuente)
## ----etiq-largas3, fig.height=8--------------------------------------------------------------
ranking <- enc %>%
group_by(preg2, resultado) %>%
summarise(cant = n()) %>%
mutate(prop = cant/sum(cant)) %>%
filter(resultado == "Positivo") %>%
ggplot(aes(x = prop, y = reorder(preg2, prop))) +
geom_col(fill = verde) +
estilov +
eje_x_per +
labs(title = "Ranking de Respuestas Positivas",
x = "", y = "",
caption = fuente)
ranking
## ----texto1----------------------------------------------------------------------------------
ranking +
geom_text(aes(label = percent(prop, # Muestra los resultados como porcentaje
accuracy = 1)), # Indica la cantidad de decimales
size = 3, # Cambia el tamaño de la letra
hjust = 1.2) # Mueve la etiqueta para la izquierda
# Funciones -------
str(enc)
cant_prop_gen <- function(df){
df %>%
group_by(genero,resultado) %>%
summarise(cant = n()) %>%
mutate(prop = cant / sum(cant))
}
enc %>%
cant_prop_gen() %>%
filter(resultado == "Positivo")
#sucundun <- function(df, ){
# df %>%
# group_by() %>%
# summarise(cant = n()) %>%
# mutate(prop = cant / sum(cant))
#}
# sucundun(enc, genero, resultado)
# Loops -----------
enc %>%
group_by(sector, genero, resultado) %>%
summarise(cant = n()) %>%
mutate(prop = cant / sum(cant)) %>%
ggplot(aes(y = sector, x = cant, fill = genero)) +
geom_col(position = "dodge")
graficos <- enc %>%
group_by(sector, genero, resultado) %>%
summarise(cant = n()) %>%
mutate(prop = cant / sum(cant)) %>%
split(.$sector) %>%
map(~ggplot(.x, aes(y = sector, x = cant, fill = genero)) +
geom_col(position = "dodge") + estilov)
paths <- stringr::str_c(names(graficos), ".png")
pwalk(list(paths, graficos), ggsave, path = "files/")
# Trust the Tidyverse ------
# Cuento la cantidad de líderes por sector y géenero
plantel <- plantel %>%
rename(division = `Unidad de Negocio`,
lider = Líder,
sexo = Género,
sector = Sector,
pais = País) %>%
filter(lider == "true") %>%
group_by(pais, division, sector, lider, sexo) %>%
tally() %>%
ungroup()
# Pivoteo el dataset a un dataset ancho
plantel <- plantel %>%
pivot_wider(.,
names_from = sexo,
values_from = n)
# Reemplaza los NA con un 0
plantel[is.na(plantel)] <- 0
# Calculo porcentaje de líderes hombres
plantel %>%
mutate(prop_lider_hombre = if_else(Femenino == 0, 1, Masculino / (Masculino +Femenino))) %>%
select(-lider)
| /r4hr_microaprendizajes.R | no_license | r4hr/r4hr_microaprendizajes | R | false | false | 7,273 | r | # Librerías -----
library(tidyverse)
library(gt)
library(scales)
library(extrafont)
library(readxl)
# Configuraciones generales ----------
# Colores
verde <- "#01B3B6"
negro <- "#333132"
gris <- "#AEB6BF"
color3 <- c(verde, gris, negro)
color2 <- c(verde, negro)
# Opciones de visualización --------
options(scipen = 999) # Modifica la visualización de los ejes numérico a valores nominales
loadfonts(quiet = TRUE) # Permite cargar en R otros tipos de fuentes.
# Estilo limpio sin líneas de fondo
estilo <- theme(panel.grid = element_blank(),
plot.background = element_rect(fill = "#FBFCFC"),
panel.background = element_blank(),
text = element_text(family = "Ubuntu Mono"))
# Estilo limpio con líneas de referencia verticales en gris claro
estilov <- theme(panel.grid = element_blank(),
plot.background = element_rect(fill = "#FBFCFC"),
panel.background = element_blank(),
panel.grid.major.x = element_line(color = "#ecf0f1"),
text = element_text(family = "Ubuntu Mono"))
# Estilo limpio con líneas de referencia horizontales en gris claro
estiloh <- theme(panel.grid = element_blank(),
plot.background = element_rect(fill = "#FBFCFC"),
panel.background = element_blank(),
panel.grid.major.y = element_line(color = "#ecf0f1"),
text = element_text(family = "Ubuntu Mono"))
# Creo un objeto con un texto que se va a repetir mucho a lo largo del análisis
fuente <- "Fuente: Datos Ficticios\nClub de R para RRHH"
# Creo objetos para formatear las etiquetas numéricas de los ejes x e y
eje_x_per <- scale_x_continuous(labels = scales::percent_format(accuracy = 1))
eje_y_per <- scale_y_continuous(labels = scales::percent_format(accuracy = 1))
# Carga de Datos -----
encuesta <- read_excel("data/encuesta.xlsx")
plantel <- read_excel("data/plantel.xlsx")
# Preparación de datos -----------
# Pivotea el dataset a un formato largo
enc <- encuesta %>%
pivot_longer(cols = c(7:11),
names_to = "pregunta",
values_to = "valor")
# Cambia nombres y Organiza variables ordinales
enc <- enc %>%
rename(id = "ID",
genero = `¿Cómo definirías tu identidad de género?`,
unidad = "Unidad de Negocio",
pais = "País",
sector = "Sector",
cargo = "Tu cargo/nivel:") %>%
mutate(cargo = factor(cargo,
levels = c("Management", "Líder", "Contribuidor individual")))
# Crea categorías de resultados
enc <- enc %>%
mutate(resultado = if_else(valor == "Totalmente de acuerdo", "Positivo",
if_else(valor == "De acuerdo", "Positivo",
if_else(valor == "Ni de acuerdo ni en desacuerdo",
"Neutral", "Negativo"
)
)
),
resultado = factor(resultado,
levels = c("Positivo", "Neutral", "Negativo")))
## ----grafico1--------------------------------------------------------------------------------
ggplot(enc, aes(x = pais, fill = resultado)) +
geom_bar(position = "fill") +
scale_fill_manual(values = c(color3)) +
estiloh +
eje_y_per +
labs(title = "Resultados por país",
fill = "Resultado",
x = "", y = "",
caption = fuente)
## ----etiq-largas-----------------------------------------------------------------------------
enc %>%
group_by(pregunta, resultado) %>%
summarise(cant = n()) %>%
mutate(prop = cant/sum(cant)) %>%
filter(resultado == "Positivo") %>%
ggplot(aes(x = prop, y = pregunta)) +
geom_col(fill = verde) +
estilov +
eje_x_per +
labs(title = "Ranking de Respuestas Positivas",
x = "", y = "",
caption = fuente)
## ----etiq-largas1----------------------------------------------------------------------------
# Divide el largo de 'función' en varias líneas
enc$preg2 <- str_wrap(enc$pregunta, width = 40)
# Veamos como queda esto en el df
head(enc$preg2,5)
## ----etiq-largas2----------------------------------------------------------------------------
enc %>%
group_by(preg2, resultado) %>%
summarise(cant = n()) %>%
mutate(prop = cant/sum(cant)) %>%
filter(resultado == "Positivo") %>%
ggplot(aes(x = prop, y = reorder(preg2, prop))) +
geom_col(fill = verde) +
estilov +
eje_x_per +
labs(title = "Ranking de Respuestas Positivas",
x = "", y = "",
caption = fuente)
## ----etiq-largas3, fig.height=8--------------------------------------------------------------
ranking <- enc %>%
group_by(preg2, resultado) %>%
summarise(cant = n()) %>%
mutate(prop = cant/sum(cant)) %>%
filter(resultado == "Positivo") %>%
ggplot(aes(x = prop, y = reorder(preg2, prop))) +
geom_col(fill = verde) +
estilov +
eje_x_per +
labs(title = "Ranking de Respuestas Positivas",
x = "", y = "",
caption = fuente)
ranking
## ----texto1----------------------------------------------------------------------------------
ranking +
geom_text(aes(label = percent(prop, # Muestra los resultados como porcentaje
accuracy = 1)), # Indica la cantidad de decimales
size = 3, # Cambia el tamaño de la letra
hjust = 1.2) # Mueve la etiqueta para la izquierda
# Funciones -------
str(enc)
cant_prop_gen <- function(df){
df %>%
group_by(genero,resultado) %>%
summarise(cant = n()) %>%
mutate(prop = cant / sum(cant))
}
enc %>%
cant_prop_gen() %>%
filter(resultado == "Positivo")
#sucundun <- function(df, ){
# df %>%
# group_by() %>%
# summarise(cant = n()) %>%
# mutate(prop = cant / sum(cant))
#}
# sucundun(enc, genero, resultado)
# Loops -----------
enc %>%
group_by(sector, genero, resultado) %>%
summarise(cant = n()) %>%
mutate(prop = cant / sum(cant)) %>%
ggplot(aes(y = sector, x = cant, fill = genero)) +
geom_col(position = "dodge")
graficos <- enc %>%
group_by(sector, genero, resultado) %>%
summarise(cant = n()) %>%
mutate(prop = cant / sum(cant)) %>%
split(.$sector) %>%
map(~ggplot(.x, aes(y = sector, x = cant, fill = genero)) +
geom_col(position = "dodge") + estilov)
paths <- stringr::str_c(names(graficos), ".png")
pwalk(list(paths, graficos), ggsave, path = "files/")
# Trust the Tidyverse ------
# Cuento la cantidad de líderes por sector y géenero
plantel <- plantel %>%
rename(division = `Unidad de Negocio`,
lider = Líder,
sexo = Género,
sector = Sector,
pais = País) %>%
filter(lider == "true") %>%
group_by(pais, division, sector, lider, sexo) %>%
tally() %>%
ungroup()
# Pivoteo el dataset a un dataset ancho
plantel <- plantel %>%
pivot_wider(.,
names_from = sexo,
values_from = n)
# Reemplaza los NA con un 0
plantel[is.na(plantel)] <- 0
# Calculo porcentaje de líderes hombres
plantel %>%
mutate(prop_lider_hombre = if_else(Femenino == 0, 1, Masculino / (Masculino +Femenino))) %>%
select(-lider)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/QPredictions.R
\name{Get.Def.Par.QPredictions}
\alias{Get.Def.Par.QPredictions}
\title{Delivers some default Parameters of Q-Predictions}
\usage{
Get.Def.Par.QPredictions()
}
\description{
Delivers some default Parameters of Q-Predictions
}
| /man/Get.Def.Par.QPredictions.Rd | no_license | NiklasPaluszkiewicz/RLR | R | false | true | 319 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/QPredictions.R
\name{Get.Def.Par.QPredictions}
\alias{Get.Def.Par.QPredictions}
\title{Delivers some default Parameters of Q-Predictions}
\usage{
Get.Def.Par.QPredictions()
}
\description{
Delivers some default Parameters of Q-Predictions
}
|
library(tidyverse)
library(janitor)
data_RABO_1 = read_csv("data/bank_account_transactions_RABO_1.csv")
data_RABO_2 = read_csv("data/bank_account_transactions_RABO_2.csv")
data_RABO_3 = read_csv("data/bank_account_transactions_RABO_3.csv")
data_INGB_1 = read_csv("data/bank_account_transactions_INGB_1.csv")
names(clean_names(data_INGB_1, case = "snake"))
parse_RABO <- function(dataset) {
dataset <- clean_names(dataset, case = "snake")
dataset %>%
mutate(
bedrag = parse_number(bedrag)
,datum = as.Date(datum, tryFormats = c("%m/%d/%Y", "%d-%m-%y", "%d/%m/%Y"))
,dag = parse_number(lubridate::day(datum))
,maand = parse_number(lubridate::month(datum))
,is_vaste_last = parse_factor(is_vaste_last, levels = c(0,1))
,categorie = categorie %>%
trimws() %>%
tolower() %>%
replace(categorie %in% c("uitgaven", "overige", "ovee"), "overige uitgaven") %>%
as_factor()
,naam = parse_character(naam_tegenpartij)
,iban = iban_bban
) %>%
select(
bedrag
,naam
,dag
,maand
,is_vaste_last
,categorie
,iban
)
}
parse_INGB <- function(dataset) {
dataset <- clean_names(dataset, case = "snake")
dataset %>%
mutate(
bedrag = parse_number(ifelse(kolom6 =="Af",kolom7 * -1, kolom7))
,datum = parse_date(kolom1,"%Y%m%d")
,dag = parse_number(lubridate::day(datum))
,maand = parse_number(lubridate::month(datum))
,is_vaste_last = parse_factor(is_vaste_last, levels = c(0,1))
,categorie = categorie %>%
trimws() %>%
tolower() %>%
replace(categorie %in% c("uitgaven", "overige", "ovee"), "overige uitgaven") %>%
replace(categorie %in% c("medisch"), "medische kosten") %>%
replace(categorie == "uitgaven", "overige uitgaven") %>%
as_factor()
,naam = parse_character(kolom2)
,iban = kolom3
) %>%
select(
bedrag
,naam
,dag
,maand
,is_vaste_last
,categorie
)
}
data_RABO_1_clean <- parse_RABO(data_RABO_1)
data_RABO_2_clean <- parse_RABO(data_RABO_2)
data_RABO_3_clean <- parse_RABO(data_RABO_3)
data_INGB_1_clean <- parse_INGB(data_INGB_1)
table(data_RABO_1_clean$categorie)
write_csv(data_RABO_1_clean, "data/bank_account_transactions_RABO_1_clean.csv")
write_csv(data_RABO_2_clean, "data/bank_account_transactions_RABO_2_clean.csv")
write_csv(data_RABO_3_clean, "data/bank_account_transactions_RABO_3_clean.csv")
write_csv(data_INGB_1_clean, "data/bank_account_transactions_INGB_1_clean.csv")
write_rds(data_RABO_1_clean, "data/bank_account_transactions_RABO_1_clean.rds")
write_rds(data_RABO_2_clean, "data/bank_account_transactions_RABO_2_clean.rds")
write_rds(data_RABO_3_clean, "data/bank_account_transactions_RABO_3_clean.rds")
write_rds(data_INGB_1_clean, "data/bank_account_transactions_INGB_1_clean.rds")
master <- rbind(
data_RABO_1_clean
,data_RABO_2_clean
,data_RABO_3_clean
,data_INGB_1_clean
) %>%
drop_na()
write_csv(master, "data/transactions_master.csv")
write_rds(master, "data/transactions_master.rds")
| /analytics/01_cleanup.R | permissive | GewoonMaarten/StashDash | R | false | false | 3,141 | r | library(tidyverse)
library(janitor)
data_RABO_1 = read_csv("data/bank_account_transactions_RABO_1.csv")
data_RABO_2 = read_csv("data/bank_account_transactions_RABO_2.csv")
data_RABO_3 = read_csv("data/bank_account_transactions_RABO_3.csv")
data_INGB_1 = read_csv("data/bank_account_transactions_INGB_1.csv")
names(clean_names(data_INGB_1, case = "snake"))
parse_RABO <- function(dataset) {
dataset <- clean_names(dataset, case = "snake")
dataset %>%
mutate(
bedrag = parse_number(bedrag)
,datum = as.Date(datum, tryFormats = c("%m/%d/%Y", "%d-%m-%y", "%d/%m/%Y"))
,dag = parse_number(lubridate::day(datum))
,maand = parse_number(lubridate::month(datum))
,is_vaste_last = parse_factor(is_vaste_last, levels = c(0,1))
,categorie = categorie %>%
trimws() %>%
tolower() %>%
replace(categorie %in% c("uitgaven", "overige", "ovee"), "overige uitgaven") %>%
as_factor()
,naam = parse_character(naam_tegenpartij)
,iban = iban_bban
) %>%
select(
bedrag
,naam
,dag
,maand
,is_vaste_last
,categorie
,iban
)
}
parse_INGB <- function(dataset) {
dataset <- clean_names(dataset, case = "snake")
dataset %>%
mutate(
bedrag = parse_number(ifelse(kolom6 =="Af",kolom7 * -1, kolom7))
,datum = parse_date(kolom1,"%Y%m%d")
,dag = parse_number(lubridate::day(datum))
,maand = parse_number(lubridate::month(datum))
,is_vaste_last = parse_factor(is_vaste_last, levels = c(0,1))
,categorie = categorie %>%
trimws() %>%
tolower() %>%
replace(categorie %in% c("uitgaven", "overige", "ovee"), "overige uitgaven") %>%
replace(categorie %in% c("medisch"), "medische kosten") %>%
replace(categorie == "uitgaven", "overige uitgaven") %>%
as_factor()
,naam = parse_character(kolom2)
,iban = kolom3
) %>%
select(
bedrag
,naam
,dag
,maand
,is_vaste_last
,categorie
)
}
data_RABO_1_clean <- parse_RABO(data_RABO_1)
data_RABO_2_clean <- parse_RABO(data_RABO_2)
data_RABO_3_clean <- parse_RABO(data_RABO_3)
data_INGB_1_clean <- parse_INGB(data_INGB_1)
table(data_RABO_1_clean$categorie)
write_csv(data_RABO_1_clean, "data/bank_account_transactions_RABO_1_clean.csv")
write_csv(data_RABO_2_clean, "data/bank_account_transactions_RABO_2_clean.csv")
write_csv(data_RABO_3_clean, "data/bank_account_transactions_RABO_3_clean.csv")
write_csv(data_INGB_1_clean, "data/bank_account_transactions_INGB_1_clean.csv")
write_rds(data_RABO_1_clean, "data/bank_account_transactions_RABO_1_clean.rds")
write_rds(data_RABO_2_clean, "data/bank_account_transactions_RABO_2_clean.rds")
write_rds(data_RABO_3_clean, "data/bank_account_transactions_RABO_3_clean.rds")
write_rds(data_INGB_1_clean, "data/bank_account_transactions_INGB_1_clean.rds")
master <- rbind(
data_RABO_1_clean
,data_RABO_2_clean
,data_RABO_3_clean
,data_INGB_1_clean
) %>%
drop_na()
write_csv(master, "data/transactions_master.csv")
write_rds(master, "data/transactions_master.rds")
|
a=c(2:14)
b=c("hello","cat","dog","pig")
b=list(b)
m = matrix(data=1:12, nrow=4, ncol=3,
dimnames = list(c("r1","r2","r3","r4"),
c("c1","c2","c3")))
a.ray = array(data=1:24, dim = c(3,4,2))
df = data.frame(a=c(1,2,3,4), b=c(1,2,3,4)) | /Lab2.R | no_license | lukeboagni/course_materials | R | false | false | 277 | r | a=c(2:14)
b=c("hello","cat","dog","pig")
b=list(b)
m = matrix(data=1:12, nrow=4, ncol=3,
dimnames = list(c("r1","r2","r3","r4"),
c("c1","c2","c3")))
a.ray = array(data=1:24, dim = c(3,4,2))
df = data.frame(a=c(1,2,3,4), b=c(1,2,3,4)) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CreateArgFunctions.R
\name{createRunSelfControlledCohortArgs}
\alias{createRunSelfControlledCohortArgs}
\title{Create a parameter object for the function runSelfControlledCohort}
\usage{
createRunSelfControlledCohortArgs(
firstExposureOnly = TRUE,
firstOutcomeOnly = TRUE,
minAge = "",
maxAge = "",
studyStartDate = "",
studyEndDate = "",
addLengthOfExposureExposed = TRUE,
riskWindowStartExposed = 1,
riskWindowEndExposed = 30,
addLengthOfExposureUnexposed = TRUE,
riskWindowEndUnexposed = -1,
riskWindowStartUnexposed = -30,
hasFullTimeAtRisk = FALSE,
washoutPeriod = 0,
followupPeriod = 0,
computeTarDistribution = FALSE
)
}
\arguments{
\item{firstExposureOnly}{If TRUE, only use first occurrence of each drug concept idfor
each person}
\item{firstOutcomeOnly}{If TRUE, only use first occurrence of each condition conceptid
for each person.}
\item{minAge}{Integer for minimum allowable age.}
\item{maxAge}{Integer for maximum allowable age.}
\item{studyStartDate}{Date for minimum allowable data for index exposure. Dateformat
is 'yyyymmdd'.}
\item{studyEndDate}{Date for maximum allowable data for index exposure. Dateformat
is 'yyyymmdd'.}
\item{addLengthOfExposureExposed}{If TRUE, use the duration from drugEraStart -> drugEraEnd
as part of timeAtRisk.}
\item{riskWindowStartExposed}{Integer of days to add to drugEraStart for start oftimeAtRisk
(0 to include index date, 1 to start the dayafter).}
\item{riskWindowEndExposed}{Additional window to add to end of exposure period
(ifaddLengthOfExposureExposed = TRUE, then add to exposure
enddate, else add to exposure start date).}
\item{addLengthOfExposureUnexposed}{If TRUE, use the duration from exposure start -> exposureend
as part of timeAtRisk looking back before exposurestart.}
\item{riskWindowEndUnexposed}{Integer of days to add to exposure start for end oftimeAtRisk
(0 to include index date, -1 to end the daybefore).}
\item{riskWindowStartUnexposed}{Additional window to add to start of exposure period
(ifaddLengthOfExposureUnexposed = TRUE, then add to
exposureend date, else add to exposure start date).}
\item{hasFullTimeAtRisk}{If TRUE, restrict to people who have full time-at-riskexposed
and unexposed.}
\item{washoutPeriod}{Integer to define required time observed before exposurestart.}
\item{followupPeriod}{Integer to define required time observed after exposurestart.}
\item{computeTarDistribution}{If TRUE, computer the distribution of time-at-risk and
average absolute time between treatment and outcome. Note,
may add significant computation time on some database
engines. If set true in one analysis will default to true for all others.}
}
\description{
Create a parameter object for the function runSelfControlledCohort
}
\details{
Create an object defining the parameter values.
}
| /man/createRunSelfControlledCohortArgs.Rd | permissive | OHDSI/SelfControlledCohort | R | false | true | 2,894 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CreateArgFunctions.R
\name{createRunSelfControlledCohortArgs}
\alias{createRunSelfControlledCohortArgs}
\title{Create a parameter object for the function runSelfControlledCohort}
\usage{
createRunSelfControlledCohortArgs(
firstExposureOnly = TRUE,
firstOutcomeOnly = TRUE,
minAge = "",
maxAge = "",
studyStartDate = "",
studyEndDate = "",
addLengthOfExposureExposed = TRUE,
riskWindowStartExposed = 1,
riskWindowEndExposed = 30,
addLengthOfExposureUnexposed = TRUE,
riskWindowEndUnexposed = -1,
riskWindowStartUnexposed = -30,
hasFullTimeAtRisk = FALSE,
washoutPeriod = 0,
followupPeriod = 0,
computeTarDistribution = FALSE
)
}
\arguments{
\item{firstExposureOnly}{If TRUE, only use first occurrence of each drug concept idfor
each person}
\item{firstOutcomeOnly}{If TRUE, only use first occurrence of each condition conceptid
for each person.}
\item{minAge}{Integer for minimum allowable age.}
\item{maxAge}{Integer for maximum allowable age.}
\item{studyStartDate}{Date for minimum allowable data for index exposure. Dateformat
is 'yyyymmdd'.}
\item{studyEndDate}{Date for maximum allowable data for index exposure. Dateformat
is 'yyyymmdd'.}
\item{addLengthOfExposureExposed}{If TRUE, use the duration from drugEraStart -> drugEraEnd
as part of timeAtRisk.}
\item{riskWindowStartExposed}{Integer of days to add to drugEraStart for start oftimeAtRisk
(0 to include index date, 1 to start the dayafter).}
\item{riskWindowEndExposed}{Additional window to add to end of exposure period
(ifaddLengthOfExposureExposed = TRUE, then add to exposure
enddate, else add to exposure start date).}
\item{addLengthOfExposureUnexposed}{If TRUE, use the duration from exposure start -> exposureend
as part of timeAtRisk looking back before exposurestart.}
\item{riskWindowEndUnexposed}{Integer of days to add to exposure start for end oftimeAtRisk
(0 to include index date, -1 to end the daybefore).}
\item{riskWindowStartUnexposed}{Additional window to add to start of exposure period
(ifaddLengthOfExposureUnexposed = TRUE, then add to
exposureend date, else add to exposure start date).}
\item{hasFullTimeAtRisk}{If TRUE, restrict to people who have full time-at-riskexposed
and unexposed.}
\item{washoutPeriod}{Integer to define required time observed before exposurestart.}
\item{followupPeriod}{Integer to define required time observed after exposurestart.}
\item{computeTarDistribution}{If TRUE, computer the distribution of time-at-risk and
average absolute time between treatment and outcome. Note,
may add significant computation time on some database
engines. If set true in one analysis will default to true for all others.}
}
\description{
Create a parameter object for the function runSelfControlledCohort
}
\details{
Create an object defining the parameter values.
}
|
library(segmented)
library(tidyverse)
library(changepoint.np)
library(cpm)
library(strucchange)
library(modelr)
library(dpseg)
# library(goepp/aspline)
#### Dataset 3
### Linear, 2 changepoints
## Generate X
xi_1 = 26
xi_2 = 70
xi = c(2,xi_1, xi_2)
n = 1000
sigma = 3
m_true = 2
X = seq(from=0, to=100, length.out = n)
X1 = X[X < xi_1]
X2 = X[X >= xi_1 & X < xi_2]
X3 = X[X >= xi_2]
y1 = -.6*X1 + 67.4
y2 = 2*X2
y3 = -.4*X3+168
y_true = c(y1,y2,y3)
eps = rnorm(n,0,sigma)
y = y_true + eps
df0 = tibble(X,y,y_true)
ggplot(df0) + geom_point(aes(X,y), color='gray') +
geom_line(aes(X,y_true), color='red')
#### Simulation Study
## Simulation study results: GA
# p.mut = .01
# max.itr = 150
# x.inc = 45
ga1 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_1_50_0.01_150_45')
ga2 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_51_100_0.01_150_45')
ga3 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_101_150_0.01_150_45')
ga4 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_151_200_0.01_150_45')
# ga20 = read_csv('data/sim_study/2_changepoints/ga/')
ga5 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_251_300_0.01_150_45')
ga6 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_301_350_0.01_150_45')
ga7 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_351_400_0.01_150_45')
ga8 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_401_450_0.01_150_45')
ga9 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_451_500_0.01_150_45')
ga10 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_501_550_0.01_150_45')
ga11 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_551_600_0.01_150_45')
ga12 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_601_650_0.01_150_45')
ga13 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_651_700_0.01_150_45')
ga14 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_701_751_0.01_150_45')
ga15 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_751_800_0.01_150_45')
ga16 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_801_850_0.01_150_45')
ga17 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_851_900_0.01_150_45')
ga18 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_901_950_0.01_150_45')
ga19 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_951_1000_0.01_150_45')
ga = rbind(ga1,ga2,ga3,ga4,ga5,ga6,ga7,ga8,ga9,ga10,ga11,
ga12,ga13,ga14,ga15,ga16,ga17,ga18,ga19)
# Proportion where m==0
prop_correct = function(df,m_true){
m = df['m']
correct = m[m==m_true,]
good = nrow(correct)
total = nrow(m)
good/total
}
prop_correct(ga,2)
ggplot(ga)+geom_histogram(aes(c1))
ggplot(ga)+geom_histogram(aes(c2))
temp = c(ga$c1,ga$c2)
hist(temp)
## Simulation study results: SG
sg1 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_1_50_0.01_150_45')
sg2 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_51_100_0.01_150_45')
sg3 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_101_150_0.01_150_45')
sg4 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_151_200_0.01_150_45')
# sg20 = read_csv('data/sim_study/2_changepoints/sg/')
sg5 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_251_300_0.01_150_45')
sg6 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_301_350_0.01_150_45')
sg7 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_351_400_0.01_150_45')
sg8 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_401_450_0.01_150_45')
sg9 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_451_500_0.01_150_45')
sg10 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_501_550_0.01_150_45')
sg11 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_551_600_0.01_150_45')
sg12 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_601_650_0.01_150_45')
sg13 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_651_700_0.01_150_45')
sg14 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_701_751_0.01_150_45')
sg15 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_751_800_0.01_150_45')
sg16 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_801_850_0.01_150_45')
sg17 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_851_900_0.01_150_45')
sg18 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_901_950_0.01_150_45')
sg19 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_951_1000_0.01_150_45')
sg = rbind(sg1,sg2,sg3,sg4,sg5,sg6,sg7,sg8,sg9,sg10,sg11,
sg12,sg13,sg14,sg15,sg16,sg17,sg18,sg19)
# Proportion where m==2
prop_correct(sg,2)
ggplot(sg)+geom_histogram(aes(c1))
ggplot(sg)+geom_histogram(aes(c2))
## Simulation study results: dp
dp1 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_1_50_0.01_150_45')
dp2 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_51_100_0.01_150_45')
dp3 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_101_150_0.01_150_45')
dp4 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_151_200_0.01_150_45')
# dp20 = read_csv('data/sim_study/2_changepoints/dp/')
dp5 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_251_300_0.01_150_45')
dp6 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_301_350_0.01_150_45')
dp7 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_351_400_0.01_150_45')
dp8 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_401_450_0.01_150_45')
dp9 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_451_500_0.01_150_45')
dp10 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_501_550_0.01_150_45')
dp11 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_551_600_0.01_150_45')
dp12 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_601_650_0.01_150_45')
dp13 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_651_700_0.01_150_45')
dp14 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_701_751_0.01_150_45')
dp15 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_751_800_0.01_150_45')
dp16 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_801_850_0.01_150_45')
dp17 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_851_900_0.01_150_45')
dp18 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_901_950_0.01_150_45')
dp19 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_951_1000_0.01_150_45')
dp = rbind(dp1,dp2,dp3,dp4,dp5,dp6,dp7,dp8,dp9,dp10,dp11,
dp12,dp13,dp14,dp15,dp16,dp17,dp18,dp19)
# Proportion where m==0
prop_correct = function(df,m_true){
m = df['m']
correct = m[m==m_true,]
good = nrow(correct)
total = nrow(m)
good/total
}
prop_correct(dp,2)
ggplot(dp)+geom_histogram(aes(c1))
ggplot(dp)+geom_histogram(aes(c2))
| /Results_2.R | no_license | claytonfields/simstudy | R | false | false | 7,082 | r | library(segmented)
library(tidyverse)
library(changepoint.np)
library(cpm)
library(strucchange)
library(modelr)
library(dpseg)
# library(goepp/aspline)
#### Dataset 3
### Linear, 2 changepoints
## Generate X
xi_1 = 26
xi_2 = 70
xi = c(2,xi_1, xi_2)
n = 1000
sigma = 3
m_true = 2
X = seq(from=0, to=100, length.out = n)
X1 = X[X < xi_1]
X2 = X[X >= xi_1 & X < xi_2]
X3 = X[X >= xi_2]
y1 = -.6*X1 + 67.4
y2 = 2*X2
y3 = -.4*X3+168
y_true = c(y1,y2,y3)
eps = rnorm(n,0,sigma)
y = y_true + eps
df0 = tibble(X,y,y_true)
ggplot(df0) + geom_point(aes(X,y), color='gray') +
geom_line(aes(X,y_true), color='red')
#### Simulation Study
## Simulation study results: GA
# p.mut = .01
# max.itr = 150
# x.inc = 45
ga1 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_1_50_0.01_150_45')
ga2 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_51_100_0.01_150_45')
ga3 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_101_150_0.01_150_45')
ga4 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_151_200_0.01_150_45')
# ga20 = read_csv('data/sim_study/2_changepoints/ga/')
ga5 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_251_300_0.01_150_45')
ga6 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_301_350_0.01_150_45')
ga7 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_351_400_0.01_150_45')
ga8 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_401_450_0.01_150_45')
ga9 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_451_500_0.01_150_45')
ga10 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_501_550_0.01_150_45')
ga11 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_551_600_0.01_150_45')
ga12 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_601_650_0.01_150_45')
ga13 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_651_700_0.01_150_45')
ga14 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_701_751_0.01_150_45')
ga15 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_751_800_0.01_150_45')
ga16 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_801_850_0.01_150_45')
ga17 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_851_900_0.01_150_45')
ga18 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_901_950_0.01_150_45')
ga19 = read_csv('data/sim_study/2_changepoints/ga/results_ga_2_v01_i_951_1000_0.01_150_45')
ga = rbind(ga1,ga2,ga3,ga4,ga5,ga6,ga7,ga8,ga9,ga10,ga11,
ga12,ga13,ga14,ga15,ga16,ga17,ga18,ga19)
# Proportion where m==0
prop_correct = function(df,m_true){
m = df['m']
correct = m[m==m_true,]
good = nrow(correct)
total = nrow(m)
good/total
}
prop_correct(ga,2)
ggplot(ga)+geom_histogram(aes(c1))
ggplot(ga)+geom_histogram(aes(c2))
temp = c(ga$c1,ga$c2)
hist(temp)
## Simulation study results: SG
sg1 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_1_50_0.01_150_45')
sg2 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_51_100_0.01_150_45')
sg3 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_101_150_0.01_150_45')
sg4 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_151_200_0.01_150_45')
# sg20 = read_csv('data/sim_study/2_changepoints/sg/')
sg5 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_251_300_0.01_150_45')
sg6 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_301_350_0.01_150_45')
sg7 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_351_400_0.01_150_45')
sg8 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_401_450_0.01_150_45')
sg9 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_451_500_0.01_150_45')
sg10 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_501_550_0.01_150_45')
sg11 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_551_600_0.01_150_45')
sg12 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_601_650_0.01_150_45')
sg13 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_651_700_0.01_150_45')
sg14 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_701_751_0.01_150_45')
sg15 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_751_800_0.01_150_45')
sg16 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_801_850_0.01_150_45')
sg17 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_851_900_0.01_150_45')
sg18 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_901_950_0.01_150_45')
sg19 = read_csv('data/sim_study/2_changepoints/sg/results_sg_2_v01_i_951_1000_0.01_150_45')
sg = rbind(sg1,sg2,sg3,sg4,sg5,sg6,sg7,sg8,sg9,sg10,sg11,
sg12,sg13,sg14,sg15,sg16,sg17,sg18,sg19)
# Proportion where m==2
prop_correct(sg,2)
ggplot(sg)+geom_histogram(aes(c1))
ggplot(sg)+geom_histogram(aes(c2))
## Simulation study results: dp
dp1 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_1_50_0.01_150_45')
dp2 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_51_100_0.01_150_45')
dp3 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_101_150_0.01_150_45')
dp4 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_151_200_0.01_150_45')
# dp20 = read_csv('data/sim_study/2_changepoints/dp/')
dp5 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_251_300_0.01_150_45')
dp6 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_301_350_0.01_150_45')
dp7 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_351_400_0.01_150_45')
dp8 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_401_450_0.01_150_45')
dp9 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_451_500_0.01_150_45')
dp10 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_501_550_0.01_150_45')
dp11 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_551_600_0.01_150_45')
dp12 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_601_650_0.01_150_45')
dp13 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_651_700_0.01_150_45')
dp14 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_701_751_0.01_150_45')
dp15 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_751_800_0.01_150_45')
dp16 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_801_850_0.01_150_45')
dp17 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_851_900_0.01_150_45')
dp18 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_901_950_0.01_150_45')
dp19 = read_csv('data/sim_study/2_changepoints/dp/results_dp_2_v01_i_951_1000_0.01_150_45')
dp = rbind(dp1,dp2,dp3,dp4,dp5,dp6,dp7,dp8,dp9,dp10,dp11,
dp12,dp13,dp14,dp15,dp16,dp17,dp18,dp19)
# Proportion where m==0
prop_correct = function(df,m_true){
m = df['m']
correct = m[m==m_true,]
good = nrow(correct)
total = nrow(m)
good/total
}
prop_correct(dp,2)
ggplot(dp)+geom_histogram(aes(c1))
ggplot(dp)+geom_histogram(aes(c2))
|
\name{ConstantPortfolio}
\alias{ConstantPortfolio}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~]
Constant-weighted Portfolio
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
The function \code{ConstantPortfolio} is creates an \code{fgp} object representing the constant-weighted portfolio with a given weight vector.
}
\usage{
ConstantPortfolio(weight)
}
\arguments{
\item{weight}{
%% ~~Describe \code{x} here~~
a numeric probability vector.
}
}
%- maybe also 'usage' for other objects documented here.
\details{
%% ~~ If necessary, more details than the description above ~~
The constant-weighetd portfolio is a functionally generated portfolio generated by the geometric mean (see \code{\link{GeometricMean}}). One example is the equal-weighted portfolio. The portfolio maintains the same weight in every period.
}
\value{
An \code{fgp} object.
}
\references{
%% ~put references to the literature/web site here ~
Fernholz, E. R. (2002) \emph{Stochastic portfolio theory}. Springer.
}
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
\code{\link{GeometricMean}}
}
\examples{
# Define the constant-weighted portfolio (0.2, 0.3, 0.5) for 3 stocks
portfolio <- ConstantPortfolio(c(0.2, 0.3, 0.5))
}
| /man/ConstantPortfolio.Rd | no_license | cran/RelValAnalysis | R | false | false | 1,349 | rd | \name{ConstantPortfolio}
\alias{ConstantPortfolio}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~]
Constant-weighted Portfolio
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
The function \code{ConstantPortfolio} is creates an \code{fgp} object representing the constant-weighted portfolio with a given weight vector.
}
\usage{
ConstantPortfolio(weight)
}
\arguments{
\item{weight}{
%% ~~Describe \code{x} here~~
a numeric probability vector.
}
}
%- maybe also 'usage' for other objects documented here.
\details{
%% ~~ If necessary, more details than the description above ~~
The constant-weighetd portfolio is a functionally generated portfolio generated by the geometric mean (see \code{\link{GeometricMean}}). One example is the equal-weighted portfolio. The portfolio maintains the same weight in every period.
}
\value{
An \code{fgp} object.
}
\references{
%% ~put references to the literature/web site here ~
Fernholz, E. R. (2002) \emph{Stochastic portfolio theory}. Springer.
}
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
\code{\link{GeometricMean}}
}
\examples{
# Define the constant-weighted portfolio (0.2, 0.3, 0.5) for 3 stocks
portfolio <- ConstantPortfolio(c(0.2, 0.3, 0.5))
}
|
### Paquetes ----
library(pacman)
p_load(cowplot, extrafont, ggcal, ggrepel, grid, gridExtra, ineq, janitor, kableExtra, knitr, lubridate, readxl, rmarkdown, scales, sf, tidyverse, treemapify, wesanderson, zoo)
### Setup general ----
Sys.setlocale("LC_ALL", "es_ES.UTF-8")
options(scipen = 9999)
theme_set(theme_gray())
### Definir tema de gráficas ----
tema <-
theme_minimal() +
theme(text = element_text(family = "Didact Gothic Regular", color = "grey35"),
plot.title = element_text(size = 28, face = "bold", margin = margin(10,0,20,0), family = "Trebuchet MS Bold", color = "grey25"),
plot.subtitle = element_text(size = 16, face = "bold", colour = "#666666", margin = margin(0, 0, 20, 0), family = "Didact Gothic Regular"),
plot.caption = element_text(hjust = 0, size = 15),
panel.grid = element_line(linetype = 2),
panel.grid.minor = element_blank(),
legend.position = "bottom",
legend.title = element_text(size = 14, face = "bold"),
legend.text = element_text(size = 12),
legend.title.align = 0.5,
axis.title = element_text(size = 14, hjust = 1, face = "bold", margin = margin(0,0,0,0)),
axis.text = element_text(size = 12),
strip.background = element_rect(color = "grey60", fill = "grey60"),
strip.text = element_text(color = "white", size = 14))
### Importar datos de carpetas de investigación ----
bd <- read_delim("01_datos/adip/carpetas-de-investigacion-pgj-cdmx.csv",
";",
col_types = cols(fecha_hechos = col_character(),
fecha_inicio = col_character()),
escape_double = FALSE,
trim_ws = TRUE) %>%
clean_names() # "Limpiar" nombre de columnas
problems(bd)
### Corregir problemas con formatos en fechas ----
bd <-
bd %>%
mutate(fecha_hechos = parse_date_time(fecha_hechos,
orders = c("Ymd HMS", "d/m/y HM")),
fecha_inicio = parse_date_time(fecha_inicio,
orders = c("Ymd HMS", "d/m/y HM")))
### Renombrar variables ----
bd <-
bd %>%
rename(ano = ano_hechos,
mes = mes_hechos)
### Reordenar niveles de la variable mes ----
bd <-
bd %>%
mutate(mes = fct_relevel(mes,
"Enero", "Febrero", "Marzo", "Abril",
"Mayo", "Junio", "Julio", "Agosto",
"Septiembre", "Octubre", "Noviembre", "Diciembre"))
### Cambiar valores de diversas variables a mayúscula (primera letra) y nminúsculas (resto de las letras) ----
bd <-
bd %>%
mutate(alcaldia = str_to_title(alcaldia_hechos),
categoria_de_delito = str_to_sentence(categoria_delito),
delito = str_to_sentence(delito))
### Generar variable para registrar en qué día de la semana ocurrió el hecho denunciado, y otra para registrar el día del año al que corresponde cada día calendario ----
# Tomamos dia_semana como proxy de la fecha en que ocurrió el presunto delito
bd <-
bd %>%
mutate(dia_semana = wday(fecha_inicio,
week_start = getOption("lubridate.week.start", 1), # Especificar que semana empieza en lunes, no en domingo (el default)
locale = Sys.getlocale("LC_TIME")),
dia_ano = yday(fecha_inicio))
### Generar versión en texto del día de la semana se abrió la carpeta de investigación ----
bd <-
bd %>%
mutate(dia_semana_texto = case_when(dia_semana == 1 ~ "Lun",
dia_semana == 2 ~ "Mar",
dia_semana == 3 ~ "Mié",
dia_semana == 4 ~ "Jue",
dia_semana == 5 ~ "Vie",
dia_semana == 6 ~ "Sáb",
dia_semana == 7 ~ "Dom"),
dia_semana_texto = fct_relevel(dia_semana_texto, "Lun", "Mar", "Mié", "Jue", "Vie", "Sáb", "Dom"))
### Generar dataframe con presuntos delitos cometidos en las alcaldías de la CDMX ----
bd_cdmx <-
bd %>%
filter(alcaldia %in% c("Alvaro Obregon", "Azcapotzalco", "Benito Juarez", "Coyoacan", "Cuajimalpa De Morelos", "Cuauhtemoc", "Gustavo A Madero", "Iztacalco", "Iztapalapa", "La Magdalena Contreras", "Miguel Hidalgo", "Milpa Alta", "Tlahuac", "Tlalpan", "Venustiano Carranza", "Xochimilco"))
### Corregir nombres de alcaldías de la CDMX ----
bd_cdmx <-
bd_cdmx %>%
mutate(alcaldia = case_when(alcaldia == "Alvaro Obregon" ~ "Álvaro Obregón",
alcaldia == "Benito Juarez" ~ "Benito Juárez",
alcaldia == "Coyoacan" ~ "Coyoacán",
alcaldia == "Cuajimalpa De Morelos" ~ "Cuajimalpa",
alcaldia == "Cuauhtemoc" ~ "Cuauhtémoc",
alcaldia == "Gustavo A Madero" ~ "Gustavo A. Madero",
alcaldia == "La Magdalena Contreras" ~ "Magdalena Contreras",
alcaldia == "Tlahuac" ~ "Tláhuac",
TRUE ~ alcaldia))
### Construir una versión de la fecha de inicio de las carpetas en texto ----
bd_cdmx <-
bd_cdmx %>%
mutate(dia = day(fecha_inicio),
fecha_texto = str_c(dia, "de", str_to_lower(mes), "de", ano, sep = " "))
### Agregar comas a algunos nombres de categorías de delito ----
bd_cdmx <-
bd_cdmx %>%
mutate(categoria_de_delito = str_replace(categoria_de_delito, " con y sin v", ", con y sin v"),
categoria_de_delito = str_replace(categoria_de_delito, " con v", ", con v"))
### Filtrar datos para solo mantener CI de delitos con timestamp entre el 1 de enero de 2016 y el 31 de mayo de 2019 ----
bd_cdmx <-
bd_cdmx %>%
filter(ano > 2015,
fecha_inicio < as_datetime("2019-06-01 00:00:00")) | /02_codigo/cargar_limpiar_datos.R | permissive | segasi/analisis_ci_pgjcdmx_a_mayo_2019 | R | false | false | 5,953 | r | ### Paquetes ----
library(pacman)
p_load(cowplot, extrafont, ggcal, ggrepel, grid, gridExtra, ineq, janitor, kableExtra, knitr, lubridate, readxl, rmarkdown, scales, sf, tidyverse, treemapify, wesanderson, zoo)
### Setup general ----
Sys.setlocale("LC_ALL", "es_ES.UTF-8")
options(scipen = 9999)
theme_set(theme_gray())
### Definir tema de gráficas ----
tema <-
theme_minimal() +
theme(text = element_text(family = "Didact Gothic Regular", color = "grey35"),
plot.title = element_text(size = 28, face = "bold", margin = margin(10,0,20,0), family = "Trebuchet MS Bold", color = "grey25"),
plot.subtitle = element_text(size = 16, face = "bold", colour = "#666666", margin = margin(0, 0, 20, 0), family = "Didact Gothic Regular"),
plot.caption = element_text(hjust = 0, size = 15),
panel.grid = element_line(linetype = 2),
panel.grid.minor = element_blank(),
legend.position = "bottom",
legend.title = element_text(size = 14, face = "bold"),
legend.text = element_text(size = 12),
legend.title.align = 0.5,
axis.title = element_text(size = 14, hjust = 1, face = "bold", margin = margin(0,0,0,0)),
axis.text = element_text(size = 12),
strip.background = element_rect(color = "grey60", fill = "grey60"),
strip.text = element_text(color = "white", size = 14))
### Importar datos de carpetas de investigación ----
bd <- read_delim("01_datos/adip/carpetas-de-investigacion-pgj-cdmx.csv",
";",
col_types = cols(fecha_hechos = col_character(),
fecha_inicio = col_character()),
escape_double = FALSE,
trim_ws = TRUE) %>%
clean_names() # "Limpiar" nombre de columnas
problems(bd)
### Corregir problemas con formatos en fechas ----
bd <-
bd %>%
mutate(fecha_hechos = parse_date_time(fecha_hechos,
orders = c("Ymd HMS", "d/m/y HM")),
fecha_inicio = parse_date_time(fecha_inicio,
orders = c("Ymd HMS", "d/m/y HM")))
### Renombrar variables ----
bd <-
bd %>%
rename(ano = ano_hechos,
mes = mes_hechos)
### Reordenar niveles de la variable mes ----
bd <-
bd %>%
mutate(mes = fct_relevel(mes,
"Enero", "Febrero", "Marzo", "Abril",
"Mayo", "Junio", "Julio", "Agosto",
"Septiembre", "Octubre", "Noviembre", "Diciembre"))
### Cambiar valores de diversas variables a mayúscula (primera letra) y nminúsculas (resto de las letras) ----
bd <-
bd %>%
mutate(alcaldia = str_to_title(alcaldia_hechos),
categoria_de_delito = str_to_sentence(categoria_delito),
delito = str_to_sentence(delito))
### Generar variable para registrar en qué día de la semana ocurrió el hecho denunciado, y otra para registrar el día del año al que corresponde cada día calendario ----
# Tomamos dia_semana como proxy de la fecha en que ocurrió el presunto delito
bd <-
bd %>%
mutate(dia_semana = wday(fecha_inicio,
week_start = getOption("lubridate.week.start", 1), # Especificar que semana empieza en lunes, no en domingo (el default)
locale = Sys.getlocale("LC_TIME")),
dia_ano = yday(fecha_inicio))
### Generar versión en texto del día de la semana se abrió la carpeta de investigación ----
bd <-
bd %>%
mutate(dia_semana_texto = case_when(dia_semana == 1 ~ "Lun",
dia_semana == 2 ~ "Mar",
dia_semana == 3 ~ "Mié",
dia_semana == 4 ~ "Jue",
dia_semana == 5 ~ "Vie",
dia_semana == 6 ~ "Sáb",
dia_semana == 7 ~ "Dom"),
dia_semana_texto = fct_relevel(dia_semana_texto, "Lun", "Mar", "Mié", "Jue", "Vie", "Sáb", "Dom"))
### Generar dataframe con presuntos delitos cometidos en las alcaldías de la CDMX ----
bd_cdmx <-
bd %>%
filter(alcaldia %in% c("Alvaro Obregon", "Azcapotzalco", "Benito Juarez", "Coyoacan", "Cuajimalpa De Morelos", "Cuauhtemoc", "Gustavo A Madero", "Iztacalco", "Iztapalapa", "La Magdalena Contreras", "Miguel Hidalgo", "Milpa Alta", "Tlahuac", "Tlalpan", "Venustiano Carranza", "Xochimilco"))
### Corregir nombres de alcaldías de la CDMX ----
bd_cdmx <-
bd_cdmx %>%
mutate(alcaldia = case_when(alcaldia == "Alvaro Obregon" ~ "Álvaro Obregón",
alcaldia == "Benito Juarez" ~ "Benito Juárez",
alcaldia == "Coyoacan" ~ "Coyoacán",
alcaldia == "Cuajimalpa De Morelos" ~ "Cuajimalpa",
alcaldia == "Cuauhtemoc" ~ "Cuauhtémoc",
alcaldia == "Gustavo A Madero" ~ "Gustavo A. Madero",
alcaldia == "La Magdalena Contreras" ~ "Magdalena Contreras",
alcaldia == "Tlahuac" ~ "Tláhuac",
TRUE ~ alcaldia))
### Construir una versión de la fecha de inicio de las carpetas en texto ----
bd_cdmx <-
bd_cdmx %>%
mutate(dia = day(fecha_inicio),
fecha_texto = str_c(dia, "de", str_to_lower(mes), "de", ano, sep = " "))
### Agregar comas a algunos nombres de categorías de delito ----
bd_cdmx <-
bd_cdmx %>%
mutate(categoria_de_delito = str_replace(categoria_de_delito, " con y sin v", ", con y sin v"),
categoria_de_delito = str_replace(categoria_de_delito, " con v", ", con v"))
### Filtrar datos para solo mantener CI de delitos con timestamp entre el 1 de enero de 2016 y el 31 de mayo de 2019 ----
bd_cdmx <-
bd_cdmx %>%
filter(ano > 2015,
fecha_inicio < as_datetime("2019-06-01 00:00:00")) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sptensor-group-generics.r
\docType{methods}
\name{sptensor-Summary}
\alias{sptensor-Summary}
\alias{Summary,sptensor-method}
\title{Summary Methods for sparse tensors}
\usage{
\S4method{Summary}{sptensor}(x, ..., na.rm = FALSE)
}
\arguments{
\item{x}{sparse tensor}
\item{...}{further arguments passed to or from methods.}
\item{na.rm}{logical: should missing values be removed?}
}
\description{
Summary Methods for sparse tensors
}
\keyword{internal}
| /man/sptensor-Summary.Rd | no_license | zamorarr/tensorr | R | false | true | 532 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sptensor-group-generics.r
\docType{methods}
\name{sptensor-Summary}
\alias{sptensor-Summary}
\alias{Summary,sptensor-method}
\title{Summary Methods for sparse tensors}
\usage{
\S4method{Summary}{sptensor}(x, ..., na.rm = FALSE)
}
\arguments{
\item{x}{sparse tensor}
\item{...}{further arguments passed to or from methods.}
\item{na.rm}{logical: should missing values be removed?}
}
\description{
Summary Methods for sparse tensors
}
\keyword{internal}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.6,family="gaussian",standardize=TRUE)
sink('./breast_067.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/ReliefF/breast/breast_067.R | no_license | esbgkannan/QSMART | R | false | false | 345 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.6,family="gaussian",standardize=TRUE)
sink('./breast_067.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
testlist <- list(a = -1L, b = -215L, x = c(-1L, -63998L, -855638017L, -1L, 1073686312L, 117438463L, -65536L, 0L, 505085951L, -1L, -16776961L, 116850943L, -16383488L, 1703936L, NA, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, NA, -16056321L, -230L, 436207616L, 0L, 0L, 0L, 219L, -620756992L, 0L, 0L, 0L, 6656L, -606381440L, 1510005723L, 1515870810L, 1515870810L, 1515870810L, 1515913215L, 105126655L, -1L, -12713984L, -65281L, -43521L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610131521-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 533 | r | testlist <- list(a = -1L, b = -215L, x = c(-1L, -63998L, -855638017L, -1L, 1073686312L, 117438463L, -65536L, 0L, 505085951L, -1L, -16776961L, 116850943L, -16383488L, 1703936L, NA, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, NA, -16056321L, -230L, 436207616L, 0L, 0L, 0L, 219L, -620756992L, 0L, 0L, 0L, 6656L, -606381440L, 1510005723L, 1515870810L, 1515870810L, 1515870810L, 1515913215L, 105126655L, -1L, -12713984L, -65281L, -43521L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
#===============================================================================
# Name :
# Author : Jorge Flores
# Date :
# Version:
# Aim :
# URL :
#===============================================================================
source('source/recruitment_day.R')
dirpath <- 'F:/ichthyop_output_analysis/RUN2/csv_files/recruited/'
out_path <- 'F:/ichthyop_output_analysis/RUN2/figures/recruited/'
### NUEMERO DE LARVAS EN CADA ZONA
dat <- read.table('F:/TESIS/TESIS_MAESTRIA/concha_sechura_lobos.csv', sep = ';', header = T)
#----- Population in each zone---------------#
pop_sechura <- max(dat$Sechura_poblacion, na.rm = T) # millones de individuos
pop_lobos <- mean(dat$Lobos_poblacion, na.rm = T) # millones de individuos
#-------Number of Larvae in each zone-----------#
larvae_sechura <- 2 * 10^5 * pop_sechura * 10^6 # 4 * 10^6 Fecundity
larvae_lobos <- 2 * 10^5 * pop_lobos * 10^6 # 4 * 10^6 Fecundity
#### #### ###
# dat1 <- read.table(paste0(dirpath, 'clim_sechura_lobos.csv'), sep = '', header = T)
dat2 <- read.table(paste0(dirpath, 'daily_sechura_lobos.csv'), sep = '', header = T)
# dat1 <- recruitment_day(dat1); dat1 <- dat1[,1] * larvae_sechura; dat1 <- sum(dat1)
dat2 <- recruitment_day(dat2) ; dat2 <- dat2[,1] * larvae_sechura; dat2 <- sum(dat2)
# dat3 <- read.table(paste0(dirpath, 'clim_lobos_sechura.csv'), sep = '', header = T)
dat4 <- read.table(paste0(dirpath, 'daily_lobos_sechura.csv'), sep = '', header = T)
# dat3 <- recruitment_day(dat3) ; dat3 <- dat3[,1] * larvae_lobos; dat3 <- sum(dat3)
dat4 <-recruitment_day(dat4) ; dat4 <- dat4[,1] * larvae_lobos; dat4 <- sum(dat4)
# dat5 <- read.table(paste0(dirpath, 'clim_sechura.csv'), sep = '', header = T)
dat6 <- read.table(paste0(dirpath, 'daily_sechura.csv'), sep = '', header = T)
# dat5 <- recruitment_day(dat5) ; dat5 <- dat5[,1] * larvae_sechura; dat5 <- sum(dat5)
dat6 <- recruitment_day(dat6) ; dat6 <- dat6[,1] * larvae_sechura; dat6 <- sum(dat6)
# dat7 <- read.table(paste0(dirpath, 'clim_lobos.csv'), sep = '', header = T)
dat8 <- read.table(paste0(dirpath, 'daily_lobos.csv'), sep = '', header = T)
# dat7 <- recruitment_day(dat7) ; dat7 <- dat7[,1] * larvae_lobos; dat7 <- sum(dat7)
dat8 <- recruitment_day(dat8) ; dat8 <- dat8[,1] * larvae_lobos; dat8 <- sum(dat8)
###
sechura_lobos <- rbind(dat2)/10^9
lobos_sechura <- rbind(dat4)/10^9
sechura <- rbind(dat6)/10^9
lobos <- rbind(dat8)/10^9
# png(paste0(out_path, 'plot_recruitment_by_area_comparison_RUN2_absolute_number.png') ,width = 1050 , height = 850 , res=120)
### plot
par(mfrow = c(2,2))
col_bars <- c('grey20','grey80')
labels <- c('Daily Winds', 'Monthly Winds')
bars <- barplot(c(lobos, sechura_lobos, sechura, lobos_sechura),
col = col_bars)
#-------Sechura - Lobos -------#
ymax <- 19000
sechura_lobos_plot <- barplot(sechura_lobos[,1], xlab="" ,ylim = c(0,ymax),
axes = FALSE, axisnames = FALSE, col = col_bars)
mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# arrows(sechura_lobos_plot,100*(sechura_lobos[,2]+sechura_lobos[,3]),
# sechura_lobos_plot,100*(sechura_lobos[,2]-sechura_lobos[,3]),
# angle=90,code=3,length=0.05)
axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
# mtext(ylab, side=2, line=2.5 , cex=1.2)
axis(2, lwd = 3, cex.axis = 1.4)
# legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars, cex = 0.5)
# bquote aprender esta funcion
Lines <- list('Larval Transport', 'Sechura - Lobos')
mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
#-------Lobos - Sechura -------#
ymax <- ymax
lobos_sechura_plot <- barplot(lobos_sechura[,1], xlab="" ,ylim = c(0,ymax),
axes = FALSE, axisnames = FALSE, col = col_bars)
mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# arrows(lobos_sechura_plot,100*(lobos_sechura[,2]+lobos_sechura[,3]),
# lobos_sechura_plot,100*(lobos_sechura[,2]-lobos_sechura[,3]),
# angle=90,code=3,length=0.05)
axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
axis(2, lwd = 3, cex.axis = 1.4)
# legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars,cex = 0.5)
# bquote aprender esta funcion
Lines <- list('Larval Transport', 'Lobos - Sechura')
mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
#-------Sechura -------#
ymax <- 460000
sechura_plot <- barplot(sechura[,1], xlab="" ,ylim = c(0,ymax),
axes = FALSE, axisnames = FALSE, col = col_bars)
mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# arrows(sechura_plot,100*(sechura[,2]+sechura[,3]),
# sechura_plot,100*(sechura[,2]-sechura[,3]),
# angle=90,code=3,length=0.05)
axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
axis(2, lwd = 3, cex.axis = 1.4)
# legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars,cex = 0.5)
# bquote aprender esta funcion
Lines <- list('Larval Retention', 'Sechura Bay')
mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
#-------Lobos -------#
ymax <- ymax
lobos_plot <- barplot(lobos[,1], xlab="" ,ylim = c(0,ymax),
axes = FALSE, axisnames = FALSE, col = col_bars)
mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# arrows(lobos_plot,100*(lobos[,2]+lobos[,3]),
# lobos_plot,100*(lobos[,2]-lobos[,3]),
# angle=90,code=3,length=0.05)
axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
axis(2, lwd = 3, cex.axis = 1.4)
# legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars,cex = 0.5)
# bquote aprender esta funcion
Lines <- list('Larval Retention', 'Lobos de Tierra Island')
mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
# dev.off()
#---- back up ---------#
# source('source/recruitment_day.R')
#
# dirpath <- 'F:/ichthyop_output_analysis/RUN2/csv_files/recruited/'
# out_path <- 'F:/ichthyop_output_analysis/RUN2/figures/recruited/'
#
# ### NUEMERO DE LARVAS EN CADA ZONA
# dat <- read.table('F:/TESIS/TESIS_MAESTRIA/concha_sechura_lobos.csv', sep = ';', header = T)
#
# #----- Population in each zone---------------#
# pop_sechura <- max(dat$Sechura_poblacion, na.rm = T) # millones de individuos
# pop_lobos <- mean(dat$Lobos_poblacion, na.rm = T) # millones de individuos
#
# #-------Larvae number in each zone-----------#
# larvae_sechura <- 2 * 10^5 * pop_sechura * 10^6 # 4 * 10^6 Fecundity
# larvae_lobos <- 2 * 10^5 * pop_lobos * 10^6 # 4 * 10^6 Fecundity
#
# #### #### ###
# dat1 <- read.table(paste0(dirpath, 'clim_sechura_lobos.csv'), sep = '', header = T)
# dat2 <- read.table(paste0(dirpath, 'daily_sechura_lobos.csv'), sep = '', header = T)
# dat1 <- recruitment_day(dat1); dat1 <- dat1[,1] * larvae_sechura; dat1 <- sum(dat1)
# dat2 <-recruitment_day(dat2) ; dat2 <- dat2[,1] * larvae_sechura; dat2 <- sum(dat2)
#
# dat3 <- read.table(paste0(dirpath, 'clim_lobos_sechura.csv'), sep = '', header = T)
# dat4 <- read.table(paste0(dirpath, 'daily_lobos_sechura.csv'), sep = '', header = T)
# dat3 <- recruitment_day(dat3) ; dat3 <- dat3[,1] * larvae_lobos; dat3 <- sum(dat3)
# dat4 <-recruitment_day(dat4) ; dat4 <- dat4[,1] * larvae_lobos; dat4 <- sum(dat4)
#
# dat5 <- read.table(paste0(dirpath, 'clim_sechura.csv'), sep = '', header = T)
# dat6 <- read.table(paste0(dirpath, 'daily_sechura.csv'), sep = '', header = T)
# dat5 <- recruitment_day(dat5) ; dat5 <- dat5[,1] * larvae_sechura; dat5 <- sum(dat5)
# dat6 <- recruitment_day(dat6) ; dat6 <- dat6[,1] * larvae_sechura; dat6 <- sum(dat6)
#
# dat7 <- read.table(paste0(dirpath, 'clim_lobos.csv'), sep = '', header = T)
# dat8 <- read.table(paste0(dirpath, 'daily_lobos.csv'), sep = '', header = T)
# dat7 <- recruitment_day(dat7) ; dat7 <- dat7[,1] * larvae_lobos; dat7 <- sum(dat7)
# dat8 <- recruitment_day(dat8) ; dat8 <- dat8[,1] * larvae_lobos; dat8 <- sum(dat8)
#
# ###
# sechura_lobos <- rbind(dat2, dat1)/10^9
# lobos_sechura <- rbind(dat4, dat3)/10^9
# sechura <- rbind(dat6, dat5)/10^9
# lobos <- rbind(dat8, dat7)/10^9
#
# # png(paste0(out_path, 'plot_recruitment_by_area_comparison_RUN2_absolute_number.png') ,width = 1050 , height = 850 , res=120)
# ### plot
# par(mfrow = c(2,2))
# col_bars <- c('grey20','grey80')
# labels <- c('Daily Winds', 'Monthly Winds')
#
# #-------Sechura - Lobos -------#
# ymax <- 19000
#
# sechura_lobos_plot <- barplot(sechura_lobos[,1], xlab="" ,ylim = c(0,ymax),
# axes = FALSE, axisnames = FALSE, col = col_bars)
# mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# # arrows(sechura_lobos_plot,100*(sechura_lobos[,2]+sechura_lobos[,3]),
# # sechura_lobos_plot,100*(sechura_lobos[,2]-sechura_lobos[,3]),
# # angle=90,code=3,length=0.05)
# axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
# # mtext(ylab, side=2, line=2.5 , cex=1.2)
# axis(2, lwd = 3, cex.axis = 1.4)
# # legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars, cex = 0.5)
# # bquote aprender esta funcion
# Lines <- list('Larval Transport', 'Sechura - Lobos')
# mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
#
# #-------Lobos - Sechura -------#
# ymax <- ymax
# lobos_sechura_plot <- barplot(lobos_sechura[,1], xlab="" ,ylim = c(0,ymax),
# axes = FALSE, axisnames = FALSE, col = col_bars)
# mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# # arrows(lobos_sechura_plot,100*(lobos_sechura[,2]+lobos_sechura[,3]),
# # lobos_sechura_plot,100*(lobos_sechura[,2]-lobos_sechura[,3]),
# # angle=90,code=3,length=0.05)
# axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
# axis(2, lwd = 3, cex.axis = 1.4)
# # legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars,cex = 0.5)
# # bquote aprender esta funcion
# Lines <- list('Larval Transport', 'Lobos - Sechura')
# mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
#
# #-------Sechura -------#
# ymax <- 460000
# sechura_plot <- barplot(sechura[,1], xlab="" ,ylim = c(0,ymax),
# axes = FALSE, axisnames = FALSE, col = col_bars)
# mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# # arrows(sechura_plot,100*(sechura[,2]+sechura[,3]),
# # sechura_plot,100*(sechura[,2]-sechura[,3]),
# # angle=90,code=3,length=0.05)
# axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
# axis(2, lwd = 3, cex.axis = 1.4)
# # legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars,cex = 0.5)
# # bquote aprender esta funcion
# Lines <- list('Larval Retention', 'Sechura Bay')
# mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
#
# #-------Lobos -------#
# ymax <- ymax
# lobos_plot <- barplot(lobos[,1], xlab="" ,ylim = c(0,ymax),
# axes = FALSE, axisnames = FALSE, col = col_bars)
# mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# # arrows(lobos_plot,100*(lobos[,2]+lobos[,3]),
# # lobos_plot,100*(lobos[,2]-lobos[,3]),
# # angle=90,code=3,length=0.05)
# axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
# axis(2, lwd = 3, cex.axis = 1.4)
# # legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars,cex = 0.5)
# # bquote aprender esta funcion
# Lines <- list('Larval Retention', 'Lobos de Tierra Island')
# mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
#
# # dev.off()
#===============================================================================
# END OF PROGRAM
#===============================================================================
| /R/special_scripts/plot_recruitment_by_area_comparison_winds_absolute_number.R | no_license | jfloresvaliente/ichthyop_analysis | R | false | false | 11,882 | r | #===============================================================================
# Name :
# Author : Jorge Flores
# Date :
# Version:
# Aim :
# URL :
#===============================================================================
source('source/recruitment_day.R')
dirpath <- 'F:/ichthyop_output_analysis/RUN2/csv_files/recruited/'
out_path <- 'F:/ichthyop_output_analysis/RUN2/figures/recruited/'
### NUEMERO DE LARVAS EN CADA ZONA
dat <- read.table('F:/TESIS/TESIS_MAESTRIA/concha_sechura_lobos.csv', sep = ';', header = T)
#----- Population in each zone---------------#
pop_sechura <- max(dat$Sechura_poblacion, na.rm = T) # millones de individuos
pop_lobos <- mean(dat$Lobos_poblacion, na.rm = T) # millones de individuos
#-------Number of Larvae in each zone-----------#
larvae_sechura <- 2 * 10^5 * pop_sechura * 10^6 # 4 * 10^6 Fecundity
larvae_lobos <- 2 * 10^5 * pop_lobos * 10^6 # 4 * 10^6 Fecundity
#### #### ###
# dat1 <- read.table(paste0(dirpath, 'clim_sechura_lobos.csv'), sep = '', header = T)
dat2 <- read.table(paste0(dirpath, 'daily_sechura_lobos.csv'), sep = '', header = T)
# dat1 <- recruitment_day(dat1); dat1 <- dat1[,1] * larvae_sechura; dat1 <- sum(dat1)
dat2 <- recruitment_day(dat2) ; dat2 <- dat2[,1] * larvae_sechura; dat2 <- sum(dat2)
# dat3 <- read.table(paste0(dirpath, 'clim_lobos_sechura.csv'), sep = '', header = T)
dat4 <- read.table(paste0(dirpath, 'daily_lobos_sechura.csv'), sep = '', header = T)
# dat3 <- recruitment_day(dat3) ; dat3 <- dat3[,1] * larvae_lobos; dat3 <- sum(dat3)
dat4 <-recruitment_day(dat4) ; dat4 <- dat4[,1] * larvae_lobos; dat4 <- sum(dat4)
# dat5 <- read.table(paste0(dirpath, 'clim_sechura.csv'), sep = '', header = T)
dat6 <- read.table(paste0(dirpath, 'daily_sechura.csv'), sep = '', header = T)
# dat5 <- recruitment_day(dat5) ; dat5 <- dat5[,1] * larvae_sechura; dat5 <- sum(dat5)
dat6 <- recruitment_day(dat6) ; dat6 <- dat6[,1] * larvae_sechura; dat6 <- sum(dat6)
# dat7 <- read.table(paste0(dirpath, 'clim_lobos.csv'), sep = '', header = T)
dat8 <- read.table(paste0(dirpath, 'daily_lobos.csv'), sep = '', header = T)
# dat7 <- recruitment_day(dat7) ; dat7 <- dat7[,1] * larvae_lobos; dat7 <- sum(dat7)
dat8 <- recruitment_day(dat8) ; dat8 <- dat8[,1] * larvae_lobos; dat8 <- sum(dat8)
###
sechura_lobos <- rbind(dat2)/10^9
lobos_sechura <- rbind(dat4)/10^9
sechura <- rbind(dat6)/10^9
lobos <- rbind(dat8)/10^9
# png(paste0(out_path, 'plot_recruitment_by_area_comparison_RUN2_absolute_number.png') ,width = 1050 , height = 850 , res=120)
### plot
par(mfrow = c(2,2))
col_bars <- c('grey20','grey80')
labels <- c('Daily Winds', 'Monthly Winds')
bars <- barplot(c(lobos, sechura_lobos, sechura, lobos_sechura),
col = col_bars)
#-------Sechura - Lobos -------#
ymax <- 19000
sechura_lobos_plot <- barplot(sechura_lobos[,1], xlab="" ,ylim = c(0,ymax),
axes = FALSE, axisnames = FALSE, col = col_bars)
mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# arrows(sechura_lobos_plot,100*(sechura_lobos[,2]+sechura_lobos[,3]),
# sechura_lobos_plot,100*(sechura_lobos[,2]-sechura_lobos[,3]),
# angle=90,code=3,length=0.05)
axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
# mtext(ylab, side=2, line=2.5 , cex=1.2)
axis(2, lwd = 3, cex.axis = 1.4)
# legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars, cex = 0.5)
# bquote aprender esta funcion
Lines <- list('Larval Transport', 'Sechura - Lobos')
mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
#-------Lobos - Sechura -------#
ymax <- ymax
lobos_sechura_plot <- barplot(lobos_sechura[,1], xlab="" ,ylim = c(0,ymax),
axes = FALSE, axisnames = FALSE, col = col_bars)
mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# arrows(lobos_sechura_plot,100*(lobos_sechura[,2]+lobos_sechura[,3]),
# lobos_sechura_plot,100*(lobos_sechura[,2]-lobos_sechura[,3]),
# angle=90,code=3,length=0.05)
axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
axis(2, lwd = 3, cex.axis = 1.4)
# legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars,cex = 0.5)
# bquote aprender esta funcion
Lines <- list('Larval Transport', 'Lobos - Sechura')
mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
#-------Sechura -------#
ymax <- 460000
sechura_plot <- barplot(sechura[,1], xlab="" ,ylim = c(0,ymax),
axes = FALSE, axisnames = FALSE, col = col_bars)
mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# arrows(sechura_plot,100*(sechura[,2]+sechura[,3]),
# sechura_plot,100*(sechura[,2]-sechura[,3]),
# angle=90,code=3,length=0.05)
axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
axis(2, lwd = 3, cex.axis = 1.4)
# legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars,cex = 0.5)
# bquote aprender esta funcion
Lines <- list('Larval Retention', 'Sechura Bay')
mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
#-------Lobos -------#
ymax <- ymax
lobos_plot <- barplot(lobos[,1], xlab="" ,ylim = c(0,ymax),
axes = FALSE, axisnames = FALSE, col = col_bars)
mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# arrows(lobos_plot,100*(lobos[,2]+lobos[,3]),
# lobos_plot,100*(lobos[,2]-lobos[,3]),
# angle=90,code=3,length=0.05)
axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
axis(2, lwd = 3, cex.axis = 1.4)
# legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars,cex = 0.5)
# bquote aprender esta funcion
Lines <- list('Larval Retention', 'Lobos de Tierra Island')
mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
# dev.off()
#---- back up ---------#
# source('source/recruitment_day.R')
#
# dirpath <- 'F:/ichthyop_output_analysis/RUN2/csv_files/recruited/'
# out_path <- 'F:/ichthyop_output_analysis/RUN2/figures/recruited/'
#
# ### NUEMERO DE LARVAS EN CADA ZONA
# dat <- read.table('F:/TESIS/TESIS_MAESTRIA/concha_sechura_lobos.csv', sep = ';', header = T)
#
# #----- Population in each zone---------------#
# pop_sechura <- max(dat$Sechura_poblacion, na.rm = T) # millones de individuos
# pop_lobos <- mean(dat$Lobos_poblacion, na.rm = T) # millones de individuos
#
# #-------Larvae number in each zone-----------#
# larvae_sechura <- 2 * 10^5 * pop_sechura * 10^6 # 4 * 10^6 Fecundity
# larvae_lobos <- 2 * 10^5 * pop_lobos * 10^6 # 4 * 10^6 Fecundity
#
# #### #### ###
# dat1 <- read.table(paste0(dirpath, 'clim_sechura_lobos.csv'), sep = '', header = T)
# dat2 <- read.table(paste0(dirpath, 'daily_sechura_lobos.csv'), sep = '', header = T)
# dat1 <- recruitment_day(dat1); dat1 <- dat1[,1] * larvae_sechura; dat1 <- sum(dat1)
# dat2 <-recruitment_day(dat2) ; dat2 <- dat2[,1] * larvae_sechura; dat2 <- sum(dat2)
#
# dat3 <- read.table(paste0(dirpath, 'clim_lobos_sechura.csv'), sep = '', header = T)
# dat4 <- read.table(paste0(dirpath, 'daily_lobos_sechura.csv'), sep = '', header = T)
# dat3 <- recruitment_day(dat3) ; dat3 <- dat3[,1] * larvae_lobos; dat3 <- sum(dat3)
# dat4 <-recruitment_day(dat4) ; dat4 <- dat4[,1] * larvae_lobos; dat4 <- sum(dat4)
#
# dat5 <- read.table(paste0(dirpath, 'clim_sechura.csv'), sep = '', header = T)
# dat6 <- read.table(paste0(dirpath, 'daily_sechura.csv'), sep = '', header = T)
# dat5 <- recruitment_day(dat5) ; dat5 <- dat5[,1] * larvae_sechura; dat5 <- sum(dat5)
# dat6 <- recruitment_day(dat6) ; dat6 <- dat6[,1] * larvae_sechura; dat6 <- sum(dat6)
#
# dat7 <- read.table(paste0(dirpath, 'clim_lobos.csv'), sep = '', header = T)
# dat8 <- read.table(paste0(dirpath, 'daily_lobos.csv'), sep = '', header = T)
# dat7 <- recruitment_day(dat7) ; dat7 <- dat7[,1] * larvae_lobos; dat7 <- sum(dat7)
# dat8 <- recruitment_day(dat8) ; dat8 <- dat8[,1] * larvae_lobos; dat8 <- sum(dat8)
#
# ###
# sechura_lobos <- rbind(dat2, dat1)/10^9
# lobos_sechura <- rbind(dat4, dat3)/10^9
# sechura <- rbind(dat6, dat5)/10^9
# lobos <- rbind(dat8, dat7)/10^9
#
# # png(paste0(out_path, 'plot_recruitment_by_area_comparison_RUN2_absolute_number.png') ,width = 1050 , height = 850 , res=120)
# ### plot
# par(mfrow = c(2,2))
# col_bars <- c('grey20','grey80')
# labels <- c('Daily Winds', 'Monthly Winds')
#
# #-------Sechura - Lobos -------#
# ymax <- 19000
#
# sechura_lobos_plot <- barplot(sechura_lobos[,1], xlab="" ,ylim = c(0,ymax),
# axes = FALSE, axisnames = FALSE, col = col_bars)
# mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# # arrows(sechura_lobos_plot,100*(sechura_lobos[,2]+sechura_lobos[,3]),
# # sechura_lobos_plot,100*(sechura_lobos[,2]-sechura_lobos[,3]),
# # angle=90,code=3,length=0.05)
# axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
# # mtext(ylab, side=2, line=2.5 , cex=1.2)
# axis(2, lwd = 3, cex.axis = 1.4)
# # legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars, cex = 0.5)
# # bquote aprender esta funcion
# Lines <- list('Larval Transport', 'Sechura - Lobos')
# mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
#
# #-------Lobos - Sechura -------#
# ymax <- ymax
# lobos_sechura_plot <- barplot(lobos_sechura[,1], xlab="" ,ylim = c(0,ymax),
# axes = FALSE, axisnames = FALSE, col = col_bars)
# mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# # arrows(lobos_sechura_plot,100*(lobos_sechura[,2]+lobos_sechura[,3]),
# # lobos_sechura_plot,100*(lobos_sechura[,2]-lobos_sechura[,3]),
# # angle=90,code=3,length=0.05)
# axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
# axis(2, lwd = 3, cex.axis = 1.4)
# # legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars,cex = 0.5)
# # bquote aprender esta funcion
# Lines <- list('Larval Transport', 'Lobos - Sechura')
# mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
#
# #-------Sechura -------#
# ymax <- 460000
# sechura_plot <- barplot(sechura[,1], xlab="" ,ylim = c(0,ymax),
# axes = FALSE, axisnames = FALSE, col = col_bars)
# mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# # arrows(sechura_plot,100*(sechura[,2]+sechura[,3]),
# # sechura_plot,100*(sechura[,2]-sechura[,3]),
# # angle=90,code=3,length=0.05)
# axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
# axis(2, lwd = 3, cex.axis = 1.4)
# # legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars,cex = 0.5)
# # bquote aprender esta funcion
# Lines <- list('Larval Retention', 'Sechura Bay')
# mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
#
# #-------Lobos -------#
# ymax <- ymax
# lobos_plot <- barplot(lobos[,1], xlab="" ,ylim = c(0,ymax),
# axes = FALSE, axisnames = FALSE, col = col_bars)
# mtext(text = 'Billions of larvae', side = 2, lwd = 3, line = 2.3)
# # arrows(lobos_plot,100*(lobos[,2]+lobos[,3]),
# # lobos_plot,100*(lobos[,2]-lobos[,3]),
# # angle=90,code=3,length=0.05)
# axis(1, at=sechura_lobos_plot, labels = labels, tick = FALSE)
# axis(2, lwd = 3, cex.axis = 1.4)
# # legend('topright', legend = c('Daily Winds', 'Monthly Winds'), bty = 'n', fill = col_bars,cex = 0.5)
# # bquote aprender esta funcion
# Lines <- list('Larval Retention', 'Lobos de Tierra Island')
# mtext(do.call(expression, Lines), side=3, line=0:1 , cex=0.9, adj = 1)
#
# # dev.off()
#===============================================================================
# END OF PROGRAM
#===============================================================================
|
load.data.values <- reactiveValues()
load.data.core <- function() {
withBusyIndicatorServer("loadDataButton", {
rcc.path <- input$rcc.path
rlf.path <- input$rlf.path
key.of.interest <- input$key.of.interest
label.of.key <- input$label.of.key
key.value.1 <- input$key.value.1
key.value.2 <- input$key.value.2
# Setup
load.data.values$keys.vector <- reactive(unlist(lapply(seq_len(key.of.interest), function(x) paste("key",x,sep = "")), use.names = FALSE))
load.data.values$key.label <- reactive(tail(load.data.values$keys.vector(), 1))
load.data.values$values.of.key.of.interest <- reactive(c(key.value.1, key.value.2))
load.data.values$label.of.interest <- reactive(label.of.key)
# Load data + Extract metadata
load.data.values$metadata <- reactive(extract.rcc.metadata(rcc.path, load.data.values$keys.vector(), load.data.values$values.of.key.of.interest()))
# Retrieve Set and Count matrix
load.data.values$rcc.set.and.count.matrix <- get.RCC.set.and.counts(load.data.values$metadata(), rcc.path, rlf.path)
load.data.values$eset <- reactive(load.data.values$rcc.set.and.count.matrix$set)
load.data.values$counts <- reactive(load.data.values$rcc.set.and.count.matrix$count.matrix)
# Store variables used in other scripts in Global Environment
assign("key.label", load.data.values$key.label(), envir = globalenv())
assign("label.of.interest", load.data.values$label.of.interest(), envir = globalenv())
assign("metadata", load.data.values$metadata(), envir = globalenv())
assign("eset", load.data.values$eset(), envir = globalenv())
assign("counts", load.data.values$counts(), envir = globalenv())
# OUTPUT
output$loaded.metadata <- renderDataTable(load.data.values$metadata(),
options = list(pageLength = 10, searching = FALSE, lengthChange = FALSE), escape=FALSE, selection = 'single'
)
output$loaded.counts <- renderDataTable(load.data.values$counts()[,c(1,2,3,4,5)],
options = list(pageLength = 10, searching = FALSE, lengthChange = FALSE), escape=FALSE, selection = 'single'
)
})
}
observeEvent(input$loadDataButton, load.data.core())
#hideElement("ld.res") | /modules/load_data/load_data.R | no_license | pabrodbra/DEAKit | R | false | false | 2,225 | r | load.data.values <- reactiveValues()
load.data.core <- function() {
withBusyIndicatorServer("loadDataButton", {
rcc.path <- input$rcc.path
rlf.path <- input$rlf.path
key.of.interest <- input$key.of.interest
label.of.key <- input$label.of.key
key.value.1 <- input$key.value.1
key.value.2 <- input$key.value.2
# Setup
load.data.values$keys.vector <- reactive(unlist(lapply(seq_len(key.of.interest), function(x) paste("key",x,sep = "")), use.names = FALSE))
load.data.values$key.label <- reactive(tail(load.data.values$keys.vector(), 1))
load.data.values$values.of.key.of.interest <- reactive(c(key.value.1, key.value.2))
load.data.values$label.of.interest <- reactive(label.of.key)
# Load data + Extract metadata
load.data.values$metadata <- reactive(extract.rcc.metadata(rcc.path, load.data.values$keys.vector(), load.data.values$values.of.key.of.interest()))
# Retrieve Set and Count matrix
load.data.values$rcc.set.and.count.matrix <- get.RCC.set.and.counts(load.data.values$metadata(), rcc.path, rlf.path)
load.data.values$eset <- reactive(load.data.values$rcc.set.and.count.matrix$set)
load.data.values$counts <- reactive(load.data.values$rcc.set.and.count.matrix$count.matrix)
# Store variables used in other scripts in Global Environment
assign("key.label", load.data.values$key.label(), envir = globalenv())
assign("label.of.interest", load.data.values$label.of.interest(), envir = globalenv())
assign("metadata", load.data.values$metadata(), envir = globalenv())
assign("eset", load.data.values$eset(), envir = globalenv())
assign("counts", load.data.values$counts(), envir = globalenv())
# OUTPUT
output$loaded.metadata <- renderDataTable(load.data.values$metadata(),
options = list(pageLength = 10, searching = FALSE, lengthChange = FALSE), escape=FALSE, selection = 'single'
)
output$loaded.counts <- renderDataTable(load.data.values$counts()[,c(1,2,3,4,5)],
options = list(pageLength = 10, searching = FALSE, lengthChange = FALSE), escape=FALSE, selection = 'single'
)
})
}
observeEvent(input$loadDataButton, load.data.core())
#hideElement("ld.res") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dec_tree.R
\name{dec_tree.fit}
\alias{dec_tree.fit}
\title{Bayesian Decision Tree Fit}
\usage{
dec_tree.fit(formuler, data = NULL, d = NULL, alpha = NULL,
method = "classification", depth.max = 5L, size = 1L,
debug = FALSE, ...)
}
\arguments{
\item{formuler}{ravioli ravioli give me the formuoli.}
\item{data}{the data associated with the formuler. Note: if you want an intercept, you must
add it ahead of time.}
\item{d}{the number of features to subsample at each node. Defaults to \code{NULL}, which tries every feature.}
\item{alpha}{the prior parameters for the feature probabilities. A \code{[p]} vector. If \code{NULL}, samples uniformly.
Defaults to \code{NULL}.}
\item{method}{whether you want "classification" or "regression". Defaults to \code{"classification"}.}
\item{depth.max}{the maximum allowed tree depth. Defaults to \code{5L}.}
\item{size}{the minimum allowed number of samples for an individual node. Defaults to \code{1L}.}
\item{debug}{whether to save the predictors and responses that are categorized. Defaults to \code{FALSE}.}
\item{...}{trailing arguments.}
}
\value{
an object of class \code{dec.tree.class} containing the following:
\item{\code{tree}}{the decision tree.}
\item{\code{X}}{The training predictors.}
\item{\code{Y}}{the training responses.}
\item{\code{d}}{d the number of features subsampled at each node.}
\item{\code{alpha}}{the sampling distribution for the features. A \code{[p]} vector.}
\item{\code{depth.max}}{the maximum allowed tree depth.}
\item{\code{size}}{the maximum allowed tree depth.}
\item{\code{debug}}{whether to save the predictors and responses that are categorized.}
}
\description{
Fit a Bayesian Decision Tree with a \code{stats}-like formula frontend interface.
}
\author{
Eric Bridgeford
}
| /man/dec_tree.fit.Rd | permissive | ebridge2/badmf | R | false | true | 1,851 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dec_tree.R
\name{dec_tree.fit}
\alias{dec_tree.fit}
\title{Bayesian Decision Tree Fit}
\usage{
dec_tree.fit(formuler, data = NULL, d = NULL, alpha = NULL,
method = "classification", depth.max = 5L, size = 1L,
debug = FALSE, ...)
}
\arguments{
\item{formuler}{ravioli ravioli give me the formuoli.}
\item{data}{the data associated with the formuler. Note: if you want an intercept, you must
add it ahead of time.}
\item{d}{the number of features to subsample at each node. Defaults to \code{NULL}, which tries every feature.}
\item{alpha}{the prior parameters for the feature probabilities. A \code{[p]} vector. If \code{NULL}, samples uniformly.
Defaults to \code{NULL}.}
\item{method}{whether you want "classification" or "regression". Defaults to \code{"classification"}.}
\item{depth.max}{the maximum allowed tree depth. Defaults to \code{5L}.}
\item{size}{the minimum allowed number of samples for an individual node. Defaults to \code{1L}.}
\item{debug}{whether to save the predictors and responses that are categorized. Defaults to \code{FALSE}.}
\item{...}{trailing arguments.}
}
\value{
an object of class \code{dec.tree.class} containing the following:
\item{\code{tree}}{the decision tree.}
\item{\code{X}}{The training predictors.}
\item{\code{Y}}{the training responses.}
\item{\code{d}}{d the number of features subsampled at each node.}
\item{\code{alpha}}{the sampling distribution for the features. A \code{[p]} vector.}
\item{\code{depth.max}}{the maximum allowed tree depth.}
\item{\code{size}}{the maximum allowed tree depth.}
\item{\code{debug}}{whether to save the predictors and responses that are categorized.}
}
\description{
Fit a Bayesian Decision Tree with a \code{stats}-like formula frontend interface.
}
\author{
Eric Bridgeford
}
|
library(shiny)
library(shinydashboard)
library(dplyr)
library(ggplot2)
library(lubridate)
library(plotly)
library(DT)
library(MLmetrics)
library(tidyr)
forecasts <- readRDS('./county_hts_forecasts.rds')
# Define UI for application that draws a histogram
ui <- dashboardPage(
dashboardHeader(
title="Kenya HTS Forecasts"
),
# Sidebar with a slider input for number of bins
dashboardSidebar(
selectInput("county",
"Select a County:",
choices = names(forecasts))
),
# Show a plot of the generated distribution
dashboardBody(
fluidRow(
box(plotlyOutput("hts_forecast_plot"),
title = "HTS Positive Forecasts",
status = "primary",
solidHeader = TRUE,
width = 6),
box(plotlyOutput("hts_plot"),
title = "HTS Number of Tests",
status = "primary",
solidHeader = TRUE,
width = 6))
))
# Define server logic required to draw a histogram
server <- function(input, output) {
dat_prep <- reactive({
dat <- forecasts[[which(names(forecasts) == input$county)]]
# low_mape <-min(dat$arima_mape, dat$var_mape, dat$stlf_naive_mape, dat$stlf_ets_mape, dat$stlf_arima_mape)
# mod_low_mape <- which(sapply(dat[c(2,4,6,8,10)], function(x) x == low_mape))
low_mape <-min(dat$stlf_arima_mape, dat$stlf_naive_mape, dat$stlf_ets_mape)
mod_low_mape <- which(sapply(dat[c(4,6,8)], function(x) x == low_mape))
dat_forecasts <- dat[[which(names(dat) == names(mod_low_mape)) - 1]]
dates <- data.frame(Date = seq(as.Date("2016/01/01"), as.Date("2021/12/01"), by = "month"))
forecast <- cbind(Date = seq(as.Date("2020/01/01"), as.Date("2021/12/01"), by = "month"),
as.data.frame(dat_forecasts))
actuals <- cbind(Date = seq(as.Date("2016/01/01"), as.Date("2020/12/01"), by = "month"),
num_pos = dat$num_pos)
num_tests <- cbind(Date = seq(as.Date("2016/01/01"), as.Date("2020/12/01"), by = "month"),
num_tests = dat$num_tests)
combined <- merge(dates, forecast, by = "Date", all.x = TRUE) %>%
merge(., actuals, by = "Date", all.x = TRUE) %>%
merge(., num_tests, by = "Date", all.x = TRUE)
combined
})
output$hts_forecast_plot <- renderPlotly({
# generate bins based on input$bins from ui.R
dat_long <- pivot_longer(dat_prep(), cols = c("Point.Forecast", "num_pos"))
dat_plot <- dat_long %>% ggplot(aes(x = Date, y = value, color = name)) +
geom_line() +
geom_ribbon(aes(ymin = Lo.95, ymax = Hi.95),
fill = "grey70",
alpha = 0.5,
color = NA) +
ggtitle("Twelve-Month Forecast of Positive HIV Tests") +
xlab("Month of Testing") +
ylab("Forecast vs Actuals") +
theme(legend.position = c(.9, .9),
legend.title = element_blank())
ggplotly(dat_plot)
})
output$hts_plot <- renderPlotly({
dat_plot <- dat_prep() %>% ggplot(aes(x = Date, y = num_tests)) +
geom_line() +
ggtitle("Number of HIV Tests") +
xlab("Month of Testing") +
ylab("Number of Tests") +
theme(legend.position = c(.9, .9),
legend.title = element_blank())
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /ARIMA Models/Arima HTS KHIS/app.R | no_license | MaringaM/Analytics | R | false | false | 3,607 | r | library(shiny)
library(shinydashboard)
library(dplyr)
library(ggplot2)
library(lubridate)
library(plotly)
library(DT)
library(MLmetrics)
library(tidyr)
forecasts <- readRDS('./county_hts_forecasts.rds')
# Define UI for application that draws a histogram
ui <- dashboardPage(
dashboardHeader(
title="Kenya HTS Forecasts"
),
# Sidebar with a slider input for number of bins
dashboardSidebar(
selectInput("county",
"Select a County:",
choices = names(forecasts))
),
# Show a plot of the generated distribution
dashboardBody(
fluidRow(
box(plotlyOutput("hts_forecast_plot"),
title = "HTS Positive Forecasts",
status = "primary",
solidHeader = TRUE,
width = 6),
box(plotlyOutput("hts_plot"),
title = "HTS Number of Tests",
status = "primary",
solidHeader = TRUE,
width = 6))
))
# Define server logic required to draw a histogram
server <- function(input, output) {
dat_prep <- reactive({
dat <- forecasts[[which(names(forecasts) == input$county)]]
# low_mape <-min(dat$arima_mape, dat$var_mape, dat$stlf_naive_mape, dat$stlf_ets_mape, dat$stlf_arima_mape)
# mod_low_mape <- which(sapply(dat[c(2,4,6,8,10)], function(x) x == low_mape))
low_mape <-min(dat$stlf_arima_mape, dat$stlf_naive_mape, dat$stlf_ets_mape)
mod_low_mape <- which(sapply(dat[c(4,6,8)], function(x) x == low_mape))
dat_forecasts <- dat[[which(names(dat) == names(mod_low_mape)) - 1]]
dates <- data.frame(Date = seq(as.Date("2016/01/01"), as.Date("2021/12/01"), by = "month"))
forecast <- cbind(Date = seq(as.Date("2020/01/01"), as.Date("2021/12/01"), by = "month"),
as.data.frame(dat_forecasts))
actuals <- cbind(Date = seq(as.Date("2016/01/01"), as.Date("2020/12/01"), by = "month"),
num_pos = dat$num_pos)
num_tests <- cbind(Date = seq(as.Date("2016/01/01"), as.Date("2020/12/01"), by = "month"),
num_tests = dat$num_tests)
combined <- merge(dates, forecast, by = "Date", all.x = TRUE) %>%
merge(., actuals, by = "Date", all.x = TRUE) %>%
merge(., num_tests, by = "Date", all.x = TRUE)
combined
})
output$hts_forecast_plot <- renderPlotly({
# generate bins based on input$bins from ui.R
dat_long <- pivot_longer(dat_prep(), cols = c("Point.Forecast", "num_pos"))
dat_plot <- dat_long %>% ggplot(aes(x = Date, y = value, color = name)) +
geom_line() +
geom_ribbon(aes(ymin = Lo.95, ymax = Hi.95),
fill = "grey70",
alpha = 0.5,
color = NA) +
ggtitle("Twelve-Month Forecast of Positive HIV Tests") +
xlab("Month of Testing") +
ylab("Forecast vs Actuals") +
theme(legend.position = c(.9, .9),
legend.title = element_blank())
ggplotly(dat_plot)
})
output$hts_plot <- renderPlotly({
dat_plot <- dat_prep() %>% ggplot(aes(x = Date, y = num_tests)) +
geom_line() +
ggtitle("Number of HIV Tests") +
xlab("Month of Testing") +
ylab("Number of Tests") +
theme(legend.position = c(.9, .9),
legend.title = element_blank())
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
#PAGE=159
m=72
s=15
a1=60
a=(a1-m)/s
a
b1=93
b=(b1-m)/s
b
c1=72
c=(c1-m)/s
c
| /Schaum'S_Outline_Series_-_Theory_And_Problems_Of_Statistics_by_Murray_R._Spiegel/CH7/EX7.7.14/Ex7_7_14.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 95 | r | #PAGE=159
m=72
s=15
a1=60
a=(a1-m)/s
a
b1=93
b=(b1-m)/s
b
c1=72
c=(c1-m)/s
c
|
context("Multivariate PMM")
## set random seed for reproducability
set.seed(4711)
## create data set to test for match_vars error handling
mammal2 <- mammal_data
mammal2[!is.na(mammal2$sws),"pi"] <- 2L
# minor helper variables for error checks
mammal_blocks_fail1 <- c(0,0,0,1,1,0,0,2,2,0,0,0)
mammal_blocks_fail2 <- c(1,0,0,1,1,0,2,2,0,0,0)
## load other test data
mids_nh <- readRDS("mids_nh.rds")
mids_mammal <- readRDS("mids_mammal.rds")
mids_fail1 <- readRDS("mids_fail1.rds")
mids_fail2 <- readRDS("mids_fail2.rds")
## setup result objects of mice.post.matching to compare those to reference data in test
# general functionality, euclidian metric to avoid issues with eigen()/LAPACK
post_nh1 <- mice.post.matching(mids_nh, distmetric = "euclidian")
# apply match_vars and further parameters
post_nh2 <- mice.post.matching(mids_nh, blocks = c("bmi","hyp"), donors = 2L, distmetric = "manhattan", matchtype = 2L, minvar = 0.0002, weights = c(1,5), match_vars = "age")
# apply blocks and weights using vector notation
post_nh3 <- mice.post.matching(mids_nh, blocks = c(0,1,1,0), weights = c(1,2,3,4), distmetric = "euclidian")
## test input checks
test_that("Test input checks",
{
expect_error(mice.post.matching(mammal_data), "Argument 'obj' should be of type mids.")
expect_error(mice.post.matching(mids_mammal, blocks = "gt"), "Argument 'blocks' contains a tuple of length 1 with imputation method 'pmm'.")
expect_error(mice.post.matching(mids_mammal, blocks = "ct"), "Argument 'blocks' contains a tuple with invalid column names.")
expect_error(mice.post.matching(mids_mammal, blocks = c("sws","gt")), "Not all column tuples in given set of blocks are either blockwise NA or blockwise non-NA.")
expect_error(mice.post.matching(mids_mammal, blocks = c(10,20)), "Argument 'blocks' contains a tuple with an out-of-bounds column index.")
expect_error(mice.post.matching(mids_mammal, blocks = list(c("sws","ps"), c("sws","ps"))), "Argument 'blocks' contains duplicate columns among its elements.")
expect_error(mice.post.matching(mids_mammal, blocks = list(c("sws","ps"), c("mls","gt")), donors = c(1,2,3)), "Argument 'donors' has to be either a single number or a vector with as many elements as there are blocks.")
expect_error(mice.post.matching(mids_mammal, blocks = mammal_blocks_fail1), "Argument 'blocks' contains a tuple with an out-of-bounds column index.")
expect_error(mice.post.matching(mids_mammal, blocks = mammal_blocks_fail2), "Argument 'blocks' contains a tuple with a column index that is not in the visit sequence.")
expect_error(mice.post.matching(mids_mammal, weights = c(2,3)), "Input argument 'weights' must not be in list format if blocks haven't been explicitly specified.")
expect_error(mice.post.matching(mids_mammal, blocks = c("sws","ps"), weights = c(2,3,4)), "Length of weights tuple 1 does not match length of block 1.")
expect_error(mice.post.matching(mids_mammal, blocks = c(0,0,0,0,0,0,0,2,3,0,0)), "Argument 'blocks' contains invalid group indices.")
expect_error(mice.post.matching(mids_fail1), "There are no column tuples with identical missing data patterns and valid imputation methods.")
expect_error(mice.post.matching(mids_fail2, blocks = c("sws","ps"), match_vars = "pi"), "Column block*")
})
## test functionality
test_that("Test functionality",
{
expect_equal_to_reference(post_nh1, file = "post_nh1.rds")
expect_equal_to_reference(post_nh2, file = "post_nh2.rds")
expect_equal_to_reference(post_nh3, file = "post_nh3.rds")
})
| /tests/testthat/test_multpmm.R | no_license | cran/miceExt | R | false | false | 3,793 | r | context("Multivariate PMM")
## set random seed for reproducability
set.seed(4711)
## create data set to test for match_vars error handling
mammal2 <- mammal_data
mammal2[!is.na(mammal2$sws),"pi"] <- 2L
# minor helper variables for error checks
mammal_blocks_fail1 <- c(0,0,0,1,1,0,0,2,2,0,0,0)
mammal_blocks_fail2 <- c(1,0,0,1,1,0,2,2,0,0,0)
## load other test data
mids_nh <- readRDS("mids_nh.rds")
mids_mammal <- readRDS("mids_mammal.rds")
mids_fail1 <- readRDS("mids_fail1.rds")
mids_fail2 <- readRDS("mids_fail2.rds")
## setup result objects of mice.post.matching to compare those to reference data in test
# general functionality, euclidian metric to avoid issues with eigen()/LAPACK
post_nh1 <- mice.post.matching(mids_nh, distmetric = "euclidian")
# apply match_vars and further parameters
post_nh2 <- mice.post.matching(mids_nh, blocks = c("bmi","hyp"), donors = 2L, distmetric = "manhattan", matchtype = 2L, minvar = 0.0002, weights = c(1,5), match_vars = "age")
# apply blocks and weights using vector notation
post_nh3 <- mice.post.matching(mids_nh, blocks = c(0,1,1,0), weights = c(1,2,3,4), distmetric = "euclidian")
## test input checks
test_that("Test input checks",
{
expect_error(mice.post.matching(mammal_data), "Argument 'obj' should be of type mids.")
expect_error(mice.post.matching(mids_mammal, blocks = "gt"), "Argument 'blocks' contains a tuple of length 1 with imputation method 'pmm'.")
expect_error(mice.post.matching(mids_mammal, blocks = "ct"), "Argument 'blocks' contains a tuple with invalid column names.")
expect_error(mice.post.matching(mids_mammal, blocks = c("sws","gt")), "Not all column tuples in given set of blocks are either blockwise NA or blockwise non-NA.")
expect_error(mice.post.matching(mids_mammal, blocks = c(10,20)), "Argument 'blocks' contains a tuple with an out-of-bounds column index.")
expect_error(mice.post.matching(mids_mammal, blocks = list(c("sws","ps"), c("sws","ps"))), "Argument 'blocks' contains duplicate columns among its elements.")
expect_error(mice.post.matching(mids_mammal, blocks = list(c("sws","ps"), c("mls","gt")), donors = c(1,2,3)), "Argument 'donors' has to be either a single number or a vector with as many elements as there are blocks.")
expect_error(mice.post.matching(mids_mammal, blocks = mammal_blocks_fail1), "Argument 'blocks' contains a tuple with an out-of-bounds column index.")
expect_error(mice.post.matching(mids_mammal, blocks = mammal_blocks_fail2), "Argument 'blocks' contains a tuple with a column index that is not in the visit sequence.")
expect_error(mice.post.matching(mids_mammal, weights = c(2,3)), "Input argument 'weights' must not be in list format if blocks haven't been explicitly specified.")
expect_error(mice.post.matching(mids_mammal, blocks = c("sws","ps"), weights = c(2,3,4)), "Length of weights tuple 1 does not match length of block 1.")
expect_error(mice.post.matching(mids_mammal, blocks = c(0,0,0,0,0,0,0,2,3,0,0)), "Argument 'blocks' contains invalid group indices.")
expect_error(mice.post.matching(mids_fail1), "There are no column tuples with identical missing data patterns and valid imputation methods.")
expect_error(mice.post.matching(mids_fail2, blocks = c("sws","ps"), match_vars = "pi"), "Column block*")
})
## test functionality
test_that("Test functionality",
{
expect_equal_to_reference(post_nh1, file = "post_nh1.rds")
expect_equal_to_reference(post_nh2, file = "post_nh2.rds")
expect_equal_to_reference(post_nh3, file = "post_nh3.rds")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/problem_samples.R
\name{problem_samples.log}
\alias{problem_samples.log}
\title{Find Problem Samples within Training Dataset}
\usage{
problem_samples.log(
model,
data,
k,
standard = 2,
student = 2,
df_fits = 1,
cooks = 1
)
}
\arguments{
\item{model}{the model to be used}
\item{data}{TRAINING data set}
\item{k}{number of predictors in model}
\item{standard}{cut-off for standardized residuals- samples with values above
abs(standard) will be returned}
\item{student}{cut-off for studentized residuals - samples with values above
abs(standard) will be returned}
\item{df_fits}{cut-off for DFFITS - samples with values above
abs(standard) will be returned}
\item{cooks}{cut-off for cook's distance - samples with values above
abs(standard) will be returned}
}
\value{
a data frame
}
\description{
only supports binary logistic regression at this time.
doesn't work when dfbeta(model) is included - recommend still looking at
these values outside of this function
}
\examples{
\dontrun{
iris2 <- iris[stringr::str_detect(Species, "setosa", negate = T), ]
irismodel <- glm(Species ~ ., data = iris2, family = binomial)
problem_samples(irismodel, iris2, k = 4)}
}
| /man/problem_samples.log.Rd | permissive | bmcguir8/mcguiR | R | false | true | 1,258 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/problem_samples.R
\name{problem_samples.log}
\alias{problem_samples.log}
\title{Find Problem Samples within Training Dataset}
\usage{
problem_samples.log(
model,
data,
k,
standard = 2,
student = 2,
df_fits = 1,
cooks = 1
)
}
\arguments{
\item{model}{the model to be used}
\item{data}{TRAINING data set}
\item{k}{number of predictors in model}
\item{standard}{cut-off for standardized residuals- samples with values above
abs(standard) will be returned}
\item{student}{cut-off for studentized residuals - samples with values above
abs(standard) will be returned}
\item{df_fits}{cut-off for DFFITS - samples with values above
abs(standard) will be returned}
\item{cooks}{cut-off for cook's distance - samples with values above
abs(standard) will be returned}
}
\value{
a data frame
}
\description{
only supports binary logistic regression at this time.
doesn't work when dfbeta(model) is included - recommend still looking at
these values outside of this function
}
\examples{
\dontrun{
iris2 <- iris[stringr::str_detect(Species, "setosa", negate = T), ]
irismodel <- glm(Species ~ ., data = iris2, family = binomial)
problem_samples(irismodel, iris2, k = 4)}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.