content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
\name{rp.ci}
\alias{rp.ci}
\title{Simulations of normal-based confidence intervals}
\description{
This function shows simulated confidence intervals for the mean of
a normal distribution. It also creates a panel which controls the
mean and standard deviation of the population and the size of the
simulated sample.
}
\usage{
rp.ci(mu = 0, sigma = 1, sample.sizes = c(30, 50, 100, 200, 500), confidence = 0.95,
panel = TRUE, panel.plot = TRUE, hscale = NA, vscale = hscale)
}
\arguments{
\item{mu, sigma}{the population mean and standard deviation.}
\item{sample.sizes}{the available sample sizes (\code{30}, \code{50}, \code{100}, \code{200}, \code{500}) for simulated data.}
\item{confidence}{the available confidence levels (\code{0.90}, \code{0.95}, \code{0.99}).}
\item{panel}{a logical parameter which determines whether interactive controls are provided or a simple static plot is produced.}
\item{panel.plot}{a logical parameter which determines whether the plot is placed inside the panel (TRUE) or the standard graphics window (FALSE). If the plot is to be placed inside the panel then the \code{tkrplot} library is required.}
\item{hscale, vscale}{scaling parameters for the size of the plot when \code{panel.plot} is set to \code{TRUE}. The default values are 1 on Unix platforms and 1.4 on Windows platforms.}
}
\details{A button is provided to sample repeatedly from the current settings. Confidence intervals which cover the population mean are coloured blue while those which miss are coloured red. Repeated simulations illustrate the property of confidence intervals to capture the true value with probability determined by the confidence level (which here is set to 0.95).}
\value{Nothing is returned.}
\references{
rpanel: Simple interactive controls for R functions using the tcltk package.
Journal of Statistical Software, 17, issue 9.
}
\examples{
\dontrun{
rp.ci()
}}
\keyword{iplot}
\keyword{dynamic}
|
/man/rp.ci.Rd
|
no_license
|
cran/rpanel
|
R
| false
| false
| 2,005
|
rd
|
\name{rp.ci}
\alias{rp.ci}
\title{Simulations of normal-based confidence intervals}
\description{
This function shows simulated confidence intervals for the mean of
a normal distribution. It also creates a panel which controls the
mean and standard deviation of the population and the size of the
simulated sample.
}
\usage{
rp.ci(mu = 0, sigma = 1, sample.sizes = c(30, 50, 100, 200, 500), confidence = 0.95,
panel = TRUE, panel.plot = TRUE, hscale = NA, vscale = hscale)
}
\arguments{
\item{mu, sigma}{the population mean and standard deviation.}
\item{sample.sizes}{the available sample sizes (\code{30}, \code{50}, \code{100}, \code{200}, \code{500}) for simulated data.}
\item{confidence}{the available confidence levels (\code{0.90}, \code{0.95}, \code{0.99}).}
\item{panel}{a logical parameter which determines whether interactive controls are provided or a simple static plot is produced.}
\item{panel.plot}{a logical parameter which determines whether the plot is placed inside the panel (TRUE) or the standard graphics window (FALSE). If the plot is to be placed inside the panel then the \code{tkrplot} library is required.}
\item{hscale, vscale}{scaling parameters for the size of the plot when \code{panel.plot} is set to \code{TRUE}. The default values are 1 on Unix platforms and 1.4 on Windows platforms.}
}
\details{A button is provided to sample repeatedly from the current settings. Confidence intervals which cover the population mean are coloured blue while those which miss are coloured red. Repeated simulations illustrate the property of confidence intervals to capture the true value with probability determined by the confidence level (which here is set to 0.95).}
\value{Nothing is returned.}
\references{
rpanel: Simple interactive controls for R functions using the tcltk package.
Journal of Statistical Software, 17, issue 9.
}
\examples{
\dontrun{
rp.ci()
}}
\keyword{iplot}
\keyword{dynamic}
|
test_that("Some data that cannot be fitted with nls_list also fail with nlme", {
# with this seed, cf[10] does not fit with nls_list
data = cleanup_data(simulate_breathtest_data(seed = 100)$data)
fit = nlme_fit(data)
expect_null(fit$coef)
})
test_that("One-group nlme fit returns valid result", {
data("usz_13c")
data = usz_13c %>%
dplyr::filter( patient_id %in%
c("pat_001","pat_002","pat_003","pat_004","pat_005","pat_006",
"norm_001", "norm_002", "norm_003", "norm_004", "norm_005", "norm_006"),
group != "liquid_normal") %>%
cleanup_data()
comment(data) = "comment"
fit = nlme_fit(data)
expect_s3_class(fit, "breathtestfit")
expect_s3_class(fit, "breathtestnlmefit")
expect_equal(comment(fit$data), "comment")
expect_identical(names(fit), c("coef", "data", "nlme_fit"))
cf = coef(fit)
expect_equal(comment(cf), "comment")
expect_equal(nrow(cf), 104)
expect_identical(names(cf), c("patient_id", "group", "parameter", "method", "value"))
expect_type(AIC(fit), "double" )
expect_type(sigma(fit), "double" )
expect_gt(sigma(fit), 0.5)
# Check if subsampling done
expect_equal(nrow(fit$data), 225)
expect_identical(names(fit$data), c("patient_id", "group", "minute", "pdr"))
# Check summary
s = summary(fit)
expect_equal(comment(s), "comment")
expect_identical(nrow(s), 13L)
expect_identical(names(s), c("patient_id", "group", "value"))
})
test_that("Two-group nlme fit returns valid result", {
data("usz_13c")
data = usz_13c %>%
dplyr::filter( patient_id %in%
c("norm_001", "norm_002", "norm_003", "norm_004", "norm_005", "norm_006")) %>%
cleanup_data()
fit = nlme_fit(data)
expect_identical(names(fit), c("coef", "data", "nlme_fit"))
cf = coef(fit)
expect_equal(nrow(cf), 72)
expect_identical(names(cf), c("patient_id", "group", "parameter", "method", "value"))
expect_type(AIC(fit), "double" )
expect_gt(sigma(fit), 0)
expect_equal(unique(cf$group), c("liquid_normal", "solid_normal"))
# Check if subsampling done
expect_equal(nrow(fit$data), 123) # denser sampling early
expect_identical(names(fit$data), c("patient_id", "group", "minute", "pdr"))
})
test_that("Three-group nlme fit returns valid result", {
data("usz_13c")
data = usz_13c %>%
dplyr::filter( patient_id %in%
c("norm_001", "norm_002", "norm_003", "pat_001", "pat_003", "pat_016")) %>%
breathtestcore::cleanup_data()
fit_nlme = breathtestcore::nlme_fit(data)
expect_identical(names(fit_nlme), c("coef", "data", "nlme_fit"))
cf = coef(fit_nlme)
expect_equal(nrow(cf), 64)
expect_gt(sigma(fit_nlme), 0)
expect_equal(unique(cf$group), c("liquid_normal", "solid_normal", "solid_patient"))
})
|
/tests/testthat/test_nlme_fit.R
|
no_license
|
dmenne/breathtestcore
|
R
| false
| false
| 2,732
|
r
|
test_that("Some data that cannot be fitted with nls_list also fail with nlme", {
# with this seed, cf[10] does not fit with nls_list
data = cleanup_data(simulate_breathtest_data(seed = 100)$data)
fit = nlme_fit(data)
expect_null(fit$coef)
})
test_that("One-group nlme fit returns valid result", {
data("usz_13c")
data = usz_13c %>%
dplyr::filter( patient_id %in%
c("pat_001","pat_002","pat_003","pat_004","pat_005","pat_006",
"norm_001", "norm_002", "norm_003", "norm_004", "norm_005", "norm_006"),
group != "liquid_normal") %>%
cleanup_data()
comment(data) = "comment"
fit = nlme_fit(data)
expect_s3_class(fit, "breathtestfit")
expect_s3_class(fit, "breathtestnlmefit")
expect_equal(comment(fit$data), "comment")
expect_identical(names(fit), c("coef", "data", "nlme_fit"))
cf = coef(fit)
expect_equal(comment(cf), "comment")
expect_equal(nrow(cf), 104)
expect_identical(names(cf), c("patient_id", "group", "parameter", "method", "value"))
expect_type(AIC(fit), "double" )
expect_type(sigma(fit), "double" )
expect_gt(sigma(fit), 0.5)
# Check if subsampling done
expect_equal(nrow(fit$data), 225)
expect_identical(names(fit$data), c("patient_id", "group", "minute", "pdr"))
# Check summary
s = summary(fit)
expect_equal(comment(s), "comment")
expect_identical(nrow(s), 13L)
expect_identical(names(s), c("patient_id", "group", "value"))
})
test_that("Two-group nlme fit returns valid result", {
data("usz_13c")
data = usz_13c %>%
dplyr::filter( patient_id %in%
c("norm_001", "norm_002", "norm_003", "norm_004", "norm_005", "norm_006")) %>%
cleanup_data()
fit = nlme_fit(data)
expect_identical(names(fit), c("coef", "data", "nlme_fit"))
cf = coef(fit)
expect_equal(nrow(cf), 72)
expect_identical(names(cf), c("patient_id", "group", "parameter", "method", "value"))
expect_type(AIC(fit), "double" )
expect_gt(sigma(fit), 0)
expect_equal(unique(cf$group), c("liquid_normal", "solid_normal"))
# Check if subsampling done
expect_equal(nrow(fit$data), 123) # denser sampling early
expect_identical(names(fit$data), c("patient_id", "group", "minute", "pdr"))
})
test_that("Three-group nlme fit returns valid result", {
data("usz_13c")
data = usz_13c %>%
dplyr::filter( patient_id %in%
c("norm_001", "norm_002", "norm_003", "pat_001", "pat_003", "pat_016")) %>%
breathtestcore::cleanup_data()
fit_nlme = breathtestcore::nlme_fit(data)
expect_identical(names(fit_nlme), c("coef", "data", "nlme_fit"))
cf = coef(fit_nlme)
expect_equal(nrow(cf), 64)
expect_gt(sigma(fit_nlme), 0)
expect_equal(unique(cf$group), c("liquid_normal", "solid_normal", "solid_patient"))
})
|
# Introduction to R
Name = c("Yash", "Yash2")
Mark = c(90,91)
Data = data.frame(Name,Mark)
#To convert into different datatypes
a=85/23
#a=3.69
b=as.integer(a)
#a=3
#to round off the numbers, use
ceiling()
floor()
round(a,digits=1)
#To repeat numbers or vectors
x <- rep(c(1,2,3),c(2,1,4)
#To add rows to this dataframe, create another dataframe
Name= c("Yash3","Yash4")
Mark= c(50,60)
NewData= data.frame(Name,Mark)
Combine= rbind(Data,NewData)
#########################
summary(WHO)
tapply(WHO$ChildMortality, WHO$Region, mean)
WHO_Europe = subset(WHO, Region == "Europe")
str(WHO_Europe)
ls()
which.min(WHO$Under15)
plot(WHO$GNI, WHO$FertilityRate)
Outliers= subset(WHO, GNI > 10000 & FertilityRate > 2.5)
hist(WHO$CellularSubscribers)
boxplot(WHO$LifeExpectancy ~ WHO$Region)
table(WHO$Region)
tapply(WHO$Over60,WHO$Region,mean)
match("Candy", USDA$Description)
x <- matrix(c(2,3,4,5), nrow=2,ncol=2,byrow="TRUE")
seq(0,100,2) #0 to 100 with diff 2
#One variable linear Regression
model1 = lm(Price ~ AGST, data=wine)
#lm() is the function for linear regression and data gives idea from where the data is to be fetched
summary(model1)
#Gives the intercept value
model1$residuals
#model1 has residual values for computing SSE
SSE = sum(model1$residuals^2)
#lower the value higher the efficiency, and higher the residual value higher the efficiency
model3 = lm(Price ~ AGST+HarvestRain+WinterRain+Age+FrancePop, data=wine)
summary(model3)
SSE = sum(model3$residuals^2)
#has the highest efficiency and lowest residual value
#MONEYBALL
moneyball = subset(baseball, Year<2002)
moneyball$RD = moneyball$RS - moneyball$RA
#Linear Regression Equation
Runs Scored/Allowed= intercept estimate + OBP/OOBP + SLG/OSLG
Wins= intercept estimate + (W~RD)(Runs scored-Runs Allowed)
#NBA points prediction for current season
NBA= read.csv("NBA_train.csv")
str(NBA)
PointsReg= lm(PTS ~ X2PA + X3PA + STL, data=NBA)
SSE= sum(PointsReg$residuals^2) #if SSE is too big, then go for RMSE
RMSE= sqrt(SSE/nrow(NBA))
#Predict points for upcoming seasons using the test.csv file
PointsPrediction = predict(PointsReg4, newdata=NBA_test)
SSE= sum(PointsPrediction - NBA_test$PTS)^2)
SST = sum((mean(NBA$PTS) - NBA_test$PTS)^2)
R2 = 1 - SSE/SST
RMSE = sqrt(SSE/nrow(NBA_test))
#Logistic Regression
#to split the training and testing set, we need to install a new package to R
library(caTools)
split=sample.split(quality$PoorCare,SplitRatio=0.75)
qualityTrain=subset(quality,split==TRUE)
qualityTest=subset(quality,split==FALSE)
nrow(qualityTrain)
nrow(qualityTest)
LogRegr=glm(PoorCare ~ OfficeVisits+Narcotics, data=qualityTrain,family=binomial)
summary(LogRegr)
#Predicting the Quality of the logisitic regression model
#Model
set.seed(1000)
framingham=read.csv("framingham.csv")
split=sample.split(framingham$TenYearCHD, SplitRatio= 0.65)
Train=subset(framingham, split==TRUE)
Test=subset(framingham, split==FALSE)
#taking all the other fields for logistic regression
framinghamlog=glm(TenYearCHD ~ ., data=Train, family=binomial)
summary(framinghamlog)
#check out for significant coefficients
#Designing a Prediction model
predictTest=predict(framinghamlog, type="response", newdata=Test)
table(Test$TenYearCHD, predictTest> 0.5)
#Checking for Accuracy
table(Test$TenYearCHD, predictTest > 0.5)
FALSE TRUE
0 1069 6
1 187 11
Accuracy= (1069+11)/(1069+11+6+187)= 0.84= 84% accuracy
--> # 0.5 is the threshold value which is to be manually picked
#Picking threshold is better explained using an ROC Curve
install.packages("ROCR")
ROCRPred=predict(predictTrain,qualityTrain$PoorCare)
ROCRPerf=performance(ROCRPred,"tpr","fpr")
plot(ROCRPerf,colorize=TRUE)
#TREE USING RPART AND RPART.PLOT
spl=sample.split(stevens$Reverse, SplitRatio=0.7)
Train=subset(stevens,spl==TRUE)
Test=subset(stevens,spl=FALSE)
StevensTree = rpart(Reverse ~ Circuit + Issue, data=Train, method="class", minbucket=25)
prp(StevensTree)
PredictCART=predict(StevensTree, type="class", newdata=Test)
table(Test$Reverse, PredictCART)
#use of method=class is used to show that the tree is a classification tree and minbucket denotes that the tree fits in properly. The lower the value, the more it fits. The higher the value, the more generalised the tree is
#Using Random Forest method(to build a regression tree)
spl=sample.split(stevens$reverse, SplitRatio=0.6)
Train=subset(stevens,spl==TRUE)
Test=subset(stevens,spl==FALSE)
StevensTree=randomForest(Reverse~Circuit+Issue, data=Train, nodesize=25,ntree=200)
Train$Reverse=as.factor(Train$Reverse)
Test$Reverse=as.factor(Test$Reverse)
StevensTree=randomForest(Reverse~Circuit+Issue, data=Train, nodesize=25,ntree=200)
#To check the accuracy of the model:
PredictForest=predict(StevensTree,newdata=Test)
table(Test$Reverse, PredictForest)
#Using Baseline method
spl=sample.split(Claims$bucket2009, SplitRatio=0.6)
Train=subset(Claims, spl=TRUE)
Test=subset(Claims, spl=FALSE)
table(Claims$bucket2009, Claims$bucket2008)
#now upon calculating the sum of the elements in the diagonal and dividing by nrow(Claims) will give the accuracy of the model
#BOSTON DATA
plot(boston$LON, boston$LAT)
#CHAS refers to proximity to the river, if 1, city is close to river and viceversa
points(boston$LON[boston$CHAS==1],boston$LAT[boston$CHAS==1],col="blue",pch=19)
#Here 3531 is the TRACT value that represents MIT(according to data)
points(boston$LON[boston$TRACT==3531],boston$LAT[boston$TRACT==3531],col="red",pch=19)
#Now to detect the area with the most polluted area, we have the NOX variable, so
points(boston$LON[boston$NOX>=0.55],boston$LAT[boston$NOX>=0.55],col="green",pch=0.19)
# Now to detect the area where housing is costly, we have the MEDV variable, so
points(boston$LON[boston$MEDV>=21.2], boston$LAT[boston$MEDV>=21.2], col="red",pch=19)
# Draw a vertical or horizontal line in the plot
abline(v=-71.07)
abline(h=42.17)
|
/commands.r
|
no_license
|
yashreds/hello-world
|
R
| false
| false
| 6,085
|
r
|
# Introduction to R
Name = c("Yash", "Yash2")
Mark = c(90,91)
Data = data.frame(Name,Mark)
#To convert into different datatypes
a=85/23
#a=3.69
b=as.integer(a)
#a=3
#to round off the numbers, use
ceiling()
floor()
round(a,digits=1)
#To repeat numbers or vectors
x <- rep(c(1,2,3),c(2,1,4)
#To add rows to this dataframe, create another dataframe
Name= c("Yash3","Yash4")
Mark= c(50,60)
NewData= data.frame(Name,Mark)
Combine= rbind(Data,NewData)
#########################
summary(WHO)
tapply(WHO$ChildMortality, WHO$Region, mean)
WHO_Europe = subset(WHO, Region == "Europe")
str(WHO_Europe)
ls()
which.min(WHO$Under15)
plot(WHO$GNI, WHO$FertilityRate)
Outliers= subset(WHO, GNI > 10000 & FertilityRate > 2.5)
hist(WHO$CellularSubscribers)
boxplot(WHO$LifeExpectancy ~ WHO$Region)
table(WHO$Region)
tapply(WHO$Over60,WHO$Region,mean)
match("Candy", USDA$Description)
x <- matrix(c(2,3,4,5), nrow=2,ncol=2,byrow="TRUE")
seq(0,100,2) #0 to 100 with diff 2
#One variable linear Regression
model1 = lm(Price ~ AGST, data=wine)
#lm() is the function for linear regression and data gives idea from where the data is to be fetched
summary(model1)
#Gives the intercept value
model1$residuals
#model1 has residual values for computing SSE
SSE = sum(model1$residuals^2)
#lower the value higher the efficiency, and higher the residual value higher the efficiency
model3 = lm(Price ~ AGST+HarvestRain+WinterRain+Age+FrancePop, data=wine)
summary(model3)
SSE = sum(model3$residuals^2)
#has the highest efficiency and lowest residual value
#MONEYBALL
moneyball = subset(baseball, Year<2002)
moneyball$RD = moneyball$RS - moneyball$RA
#Linear Regression Equation
Runs Scored/Allowed= intercept estimate + OBP/OOBP + SLG/OSLG
Wins= intercept estimate + (W~RD)(Runs scored-Runs Allowed)
#NBA points prediction for current season
NBA= read.csv("NBA_train.csv")
str(NBA)
PointsReg= lm(PTS ~ X2PA + X3PA + STL, data=NBA)
SSE= sum(PointsReg$residuals^2) #if SSE is too big, then go for RMSE
RMSE= sqrt(SSE/nrow(NBA))
#Predict points for upcoming seasons using the test.csv file
PointsPrediction = predict(PointsReg4, newdata=NBA_test)
SSE= sum(PointsPrediction - NBA_test$PTS)^2)
SST = sum((mean(NBA$PTS) - NBA_test$PTS)^2)
R2 = 1 - SSE/SST
RMSE = sqrt(SSE/nrow(NBA_test))
#Logistic Regression
#to split the training and testing set, we need to install a new package to R
library(caTools)
split=sample.split(quality$PoorCare,SplitRatio=0.75)
qualityTrain=subset(quality,split==TRUE)
qualityTest=subset(quality,split==FALSE)
nrow(qualityTrain)
nrow(qualityTest)
LogRegr=glm(PoorCare ~ OfficeVisits+Narcotics, data=qualityTrain,family=binomial)
summary(LogRegr)
#Predicting the Quality of the logisitic regression model
#Model
set.seed(1000)
framingham=read.csv("framingham.csv")
split=sample.split(framingham$TenYearCHD, SplitRatio= 0.65)
Train=subset(framingham, split==TRUE)
Test=subset(framingham, split==FALSE)
#taking all the other fields for logistic regression
framinghamlog=glm(TenYearCHD ~ ., data=Train, family=binomial)
summary(framinghamlog)
#check out for significant coefficients
#Designing a Prediction model
predictTest=predict(framinghamlog, type="response", newdata=Test)
table(Test$TenYearCHD, predictTest> 0.5)
#Checking for Accuracy
table(Test$TenYearCHD, predictTest > 0.5)
FALSE TRUE
0 1069 6
1 187 11
Accuracy= (1069+11)/(1069+11+6+187)= 0.84= 84% accuracy
--> # 0.5 is the threshold value which is to be manually picked
#Picking threshold is better explained using an ROC Curve
install.packages("ROCR")
ROCRPred=predict(predictTrain,qualityTrain$PoorCare)
ROCRPerf=performance(ROCRPred,"tpr","fpr")
plot(ROCRPerf,colorize=TRUE)
#TREE USING RPART AND RPART.PLOT
spl=sample.split(stevens$Reverse, SplitRatio=0.7)
Train=subset(stevens,spl==TRUE)
Test=subset(stevens,spl=FALSE)
StevensTree = rpart(Reverse ~ Circuit + Issue, data=Train, method="class", minbucket=25)
prp(StevensTree)
PredictCART=predict(StevensTree, type="class", newdata=Test)
table(Test$Reverse, PredictCART)
#use of method=class is used to show that the tree is a classification tree and minbucket denotes that the tree fits in properly. The lower the value, the more it fits. The higher the value, the more generalised the tree is
#Using Random Forest method(to build a regression tree)
spl=sample.split(stevens$reverse, SplitRatio=0.6)
Train=subset(stevens,spl==TRUE)
Test=subset(stevens,spl==FALSE)
StevensTree=randomForest(Reverse~Circuit+Issue, data=Train, nodesize=25,ntree=200)
Train$Reverse=as.factor(Train$Reverse)
Test$Reverse=as.factor(Test$Reverse)
StevensTree=randomForest(Reverse~Circuit+Issue, data=Train, nodesize=25,ntree=200)
#To check the accuracy of the model:
PredictForest=predict(StevensTree,newdata=Test)
table(Test$Reverse, PredictForest)
#Using Baseline method
spl=sample.split(Claims$bucket2009, SplitRatio=0.6)
Train=subset(Claims, spl=TRUE)
Test=subset(Claims, spl=FALSE)
table(Claims$bucket2009, Claims$bucket2008)
#now upon calculating the sum of the elements in the diagonal and dividing by nrow(Claims) will give the accuracy of the model
#BOSTON DATA
plot(boston$LON, boston$LAT)
#CHAS refers to proximity to the river, if 1, city is close to river and viceversa
points(boston$LON[boston$CHAS==1],boston$LAT[boston$CHAS==1],col="blue",pch=19)
#Here 3531 is the TRACT value that represents MIT(according to data)
points(boston$LON[boston$TRACT==3531],boston$LAT[boston$TRACT==3531],col="red",pch=19)
#Now to detect the area with the most polluted area, we have the NOX variable, so
points(boston$LON[boston$NOX>=0.55],boston$LAT[boston$NOX>=0.55],col="green",pch=0.19)
# Now to detect the area where housing is costly, we have the MEDV variable, so
points(boston$LON[boston$MEDV>=21.2], boston$LAT[boston$MEDV>=21.2], col="red",pch=19)
# Draw a vertical or horizontal line in the plot
abline(v=-71.07)
abline(h=42.17)
|
test_that("KeptPaths() works", {
tree <- BalancedTree(7)
tree$edge.length <- 1:12
paths <- PathLengths(tree)
keptTips <- c(2, 4, 6)
kept <- KeptVerts(tree, 1:7 %in% keptTips)
lengths <- c(1 + 2 + 4,
2 + 4,
1 + 5 + 7,
5 + 7,
8 + 9 + 11,
1)
expect_equal(paths[KeptPaths(paths, kept), "length"], lengths)
expect_equal(paths[KeptPaths(paths, kept, FALSE), "length"],
lengths[c(2, 4:6)])
expected <- PathLengths(KeepTip(tree, keptTips))
expected[, 1:2] <- which(kept)[unlist(expected[, 1:2])]
KeptPaths(PathLengths(tree, TRUE), kept)
})
|
/tests/testthat/test-KeptPaths.R
|
no_license
|
cran/TreeTools
|
R
| false
| false
| 669
|
r
|
test_that("KeptPaths() works", {
tree <- BalancedTree(7)
tree$edge.length <- 1:12
paths <- PathLengths(tree)
keptTips <- c(2, 4, 6)
kept <- KeptVerts(tree, 1:7 %in% keptTips)
lengths <- c(1 + 2 + 4,
2 + 4,
1 + 5 + 7,
5 + 7,
8 + 9 + 11,
1)
expect_equal(paths[KeptPaths(paths, kept), "length"], lengths)
expect_equal(paths[KeptPaths(paths, kept, FALSE), "length"],
lengths[c(2, 4:6)])
expected <- PathLengths(KeepTip(tree, keptTips))
expected[, 1:2] <- which(kept)[unlist(expected[, 1:2])]
KeptPaths(PathLengths(tree, TRUE), kept)
})
|
#make cellxgene
|
/make_cellxgene.R
|
no_license
|
GKild/neuroblastoma
|
R
| false
| false
| 16
|
r
|
#make cellxgene
|
#' Values as Functions
#'
#' @description
#' A **constant** is a fixed value that incorporates its very computation. This
#' is none other than a _function_ that computes a fixed value when called
#' without arguments. `constant()` declares such a function as a bona fide
#' constant by transforming it to a function that caches the value of its void
#' call (i.e., `constant()`
#' [memoizes](https://en.wikipedia.org/wiki/Memoization) void functions).
#'
#' Combine ``\link[=compose]{`%>>>%`}`` with `constant()` for a _lazy_,
#' _structured_ alternative to the
#' [\pkg{magrittr}](https://cran.r-project.org/package=magrittr) `` `%>%` ``
#' (see ‘Examples’).
#'
#' @param f Function, or symbol or name (string) thereof, that can be called
#' without arguments. (NB: `constant()` itself does not check whether `f()` is
#' indeed a valid call.)
#'
#' @return `constant()` yields a function without formal arguments that returns
#' the (cached, visibility-preserving) value of the void call `f()`.
#'
#' @seealso ``\link[=compose]{`%>>>%`}``
#'
#' @examples
#' # Function with a constant return value
#' val <- {message("Computing from scratch"); mtcars} %>>>%
#' split(.$cyl) %>>>%
#' lapply(function(data) lm(mpg ~ wt, data)) %>>>%
#' lapply(summary) %>>>%
#' sapply(`[[`, "r.squared")
#'
#' # With every invocation, `val()` is computed anew:
#' val()
#' val()
#'
#' # Declaring `val` as a constant ensures that its value is computed only once.
#' # On subsequent calls, the computed value is simply fetched:
#' const <- constant(val)
#' const()
#' const()
#'
#' # As values, `val()` and `const()` are identical. But `const()`, moreover,
#' # has structure, namely the function `const`:
#' const
#'
#' # For instance, you can inspect the intermediate summaries:
#' head(const, -1)()
#'
#' # Which can itself be a constant:
#' summ <- constant(head(const, -1))
#' summ()
#' summ()
#'
#' \dontrun{
#' # Think of `%>>>%` combined with `constant()` as a lazy, structured
#' # alternative to the magrittr `%>%`.
#' library(magrittr)
#'
#' val2 <- mtcars %>%
#' split(.$cyl) %>%
#' lapply(function(data) lm(mpg ~ wt, data)) %>%
#' lapply(summary) %>%
#' sapply(`[[`, "r.squared")
#'
#' # `val2` and `const()` are identical values. But whereas `val2` is computed
#' # immediately and carries no structure, `const` embodies the process that
#' # produces its value, and allows you to defer its realization to the
#' # invocation `const()`.
#' stopifnot(identical(val2, const()))}
#'
#' @export
constant <- local({
const <- function() {
if (is.null(`__const__`)) {
res <- withVisible(`__value__`())
val <- .subset2(res, "value")
if (.subset2(res, "visible"))
`__const__` <<- function() val
else
`__const__` <<- function() invisible(val)
}
`__const__`()
}
function(f) {
f <- match.fun(f)
if (inherits(f, "ConstantFunction"))
return(f)
environment(const) <- envir(f) %encloses% list(
`__value__` = f,
`__const__` = NULL
)
attributes(const) <- attributes(f)
class(const) <- "ConstantFunction" %subclass% class(f)
const
}
})
#' @rdname constant
#'
#' @return `variable()` is the inverse transformation of `constant()`: it
#' recovers the underlying (uncached) function of a constant function.
#'
#' @examples
#' # Use `variable()` to recover the original (“variable”) function
#' val_var <- variable(const)
#' stopifnot(identical(val_var, val))
#' val_var()
#' val_var()
#'
#' @export
variable <- local({
get_variable <- getter("__value__")
function(f) {
f <- match.fun(f)
get_variable(f) %||% f
}
})
#' @export
print.ConstantFunction <- function(x, ...) {
cat("Constant Function:\n")
NextMethod()
invisible(x)
}
|
/R/constant.R
|
permissive
|
mkcor/gestalt
|
R
| false
| false
| 3,763
|
r
|
#' Values as Functions
#'
#' @description
#' A **constant** is a fixed value that incorporates its very computation. This
#' is none other than a _function_ that computes a fixed value when called
#' without arguments. `constant()` declares such a function as a bona fide
#' constant by transforming it to a function that caches the value of its void
#' call (i.e., `constant()`
#' [memoizes](https://en.wikipedia.org/wiki/Memoization) void functions).
#'
#' Combine ``\link[=compose]{`%>>>%`}`` with `constant()` for a _lazy_,
#' _structured_ alternative to the
#' [\pkg{magrittr}](https://cran.r-project.org/package=magrittr) `` `%>%` ``
#' (see ‘Examples’).
#'
#' @param f Function, or symbol or name (string) thereof, that can be called
#' without arguments. (NB: `constant()` itself does not check whether `f()` is
#' indeed a valid call.)
#'
#' @return `constant()` yields a function without formal arguments that returns
#' the (cached, visibility-preserving) value of the void call `f()`.
#'
#' @seealso ``\link[=compose]{`%>>>%`}``
#'
#' @examples
#' # Function with a constant return value
#' val <- {message("Computing from scratch"); mtcars} %>>>%
#' split(.$cyl) %>>>%
#' lapply(function(data) lm(mpg ~ wt, data)) %>>>%
#' lapply(summary) %>>>%
#' sapply(`[[`, "r.squared")
#'
#' # With every invocation, `val()` is computed anew:
#' val()
#' val()
#'
#' # Declaring `val` as a constant ensures that its value is computed only once.
#' # On subsequent calls, the computed value is simply fetched:
#' const <- constant(val)
#' const()
#' const()
#'
#' # As values, `val()` and `const()` are identical. But `const()`, moreover,
#' # has structure, namely the function `const`:
#' const
#'
#' # For instance, you can inspect the intermediate summaries:
#' head(const, -1)()
#'
#' # Which can itself be a constant:
#' summ <- constant(head(const, -1))
#' summ()
#' summ()
#'
#' \dontrun{
#' # Think of `%>>>%` combined with `constant()` as a lazy, structured
#' # alternative to the magrittr `%>%`.
#' library(magrittr)
#'
#' val2 <- mtcars %>%
#' split(.$cyl) %>%
#' lapply(function(data) lm(mpg ~ wt, data)) %>%
#' lapply(summary) %>%
#' sapply(`[[`, "r.squared")
#'
#' # `val2` and `const()` are identical values. But whereas `val2` is computed
#' # immediately and carries no structure, `const` embodies the process that
#' # produces its value, and allows you to defer its realization to the
#' # invocation `const()`.
#' stopifnot(identical(val2, const()))}
#'
#' @export
constant <- local({
const <- function() {
if (is.null(`__const__`)) {
res <- withVisible(`__value__`())
val <- .subset2(res, "value")
if (.subset2(res, "visible"))
`__const__` <<- function() val
else
`__const__` <<- function() invisible(val)
}
`__const__`()
}
function(f) {
f <- match.fun(f)
if (inherits(f, "ConstantFunction"))
return(f)
environment(const) <- envir(f) %encloses% list(
`__value__` = f,
`__const__` = NULL
)
attributes(const) <- attributes(f)
class(const) <- "ConstantFunction" %subclass% class(f)
const
}
})
#' @rdname constant
#'
#' @return `variable()` is the inverse transformation of `constant()`: it
#' recovers the underlying (uncached) function of a constant function.
#'
#' @examples
#' # Use `variable()` to recover the original (“variable”) function
#' val_var <- variable(const)
#' stopifnot(identical(val_var, val))
#' val_var()
#' val_var()
#'
#' @export
variable <- local({
get_variable <- getter("__value__")
function(f) {
f <- match.fun(f)
get_variable(f) %||% f
}
})
#' @export
print.ConstantFunction <- function(x, ...) {
cat("Constant Function:\n")
NextMethod()
invisible(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slim_setup.R
\name{slim_setup}
\alias{slim_setup}
\title{Attempt to install and / or setup SLiM for use with slimr}
\usage{
slim_setup(verbose = TRUE, force = FALSE)
}
\arguments{
\item{verbose}{Whether to print out progress of the installation.}
\item{force}{If \code{FALSE} (the default) \code{slim_setup} will not install SLiM if it is already
installed and can be found. If you want to force an installation, even if SLiM is already installed
(perhaps to install a newer version), then use \code{force=TRUE}.}
\item{install_dir}{Directory to install SLiM to. If "default" `slim_setup()` will install in the default
directory. Be careful to make sure you have write and execution permissions for the installation folder
you specify. We recommend using the default directory "~/slim", in which case you will not have to set an
environmental variable to tell slimr where to find your slim installation. Note that for Windows, this
refers to a linux path from the perspective of your Windows Subsystem for Linux (WSL) distribution, not a
Windows path.}
}
\description{
`slim_setup()` will attempt to determine the user's OS and install SLiM automatically.
Note that on Windows, this will attempt to download a precompiled executable.
}
\examples{
\dontrun{
slim_setup()
}
}
|
/man/slim_setup.Rd
|
permissive
|
rdinnager/slimr
|
R
| false
| true
| 1,354
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slim_setup.R
\name{slim_setup}
\alias{slim_setup}
\title{Attempt to install and / or setup SLiM for use with slimr}
\usage{
slim_setup(verbose = TRUE, force = FALSE)
}
\arguments{
\item{verbose}{Whether to print out progress of the installation.}
\item{force}{If \code{FALSE} (the default) \code{slim_setup} will not install SLiM if it is already
installed and can be found. If you want to force an installation, even if SLiM is already installed
(perhaps to install a newer version), then use \code{force=TRUE}.}
\item{install_dir}{Directory to install SLiM to. If "default" `slim_setup()` will install in the default
directory. Be careful to make sure you have write and execution permissions for the installation folder
you specify. We recommend using the default directory "~/slim", in which case you will not have to set an
environmental variable to tell slimr where to find your slim installation. Note that for Windows, this
refers to a linux path from the perspective of your Windows Subsystem for Linux (WSL) distribution, not a
Windows path.}
}
\description{
`slim_setup()` will attempt to determine the user's OS and install SLiM automatically.
Note that on Windows, this will attempt to download a precompiled executable.
}
\examples{
\dontrun{
slim_setup()
}
}
|
# Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComDayCqReplicationImplReplicationContentFactoryProviderImplProperties Class
#'
#' @field replication.content.useFileStorage
#' @field replication.content.maxCommitAttempts
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComDayCqReplicationImplReplicationContentFactoryProviderImplProperties <- R6::R6Class(
'ComDayCqReplicationImplReplicationContentFactoryProviderImplProperties',
public = list(
`replication.content.useFileStorage` = NULL,
`replication.content.maxCommitAttempts` = NULL,
initialize = function(`replication.content.useFileStorage`, `replication.content.maxCommitAttempts`){
if (!missing(`replication.content.useFileStorage`)) {
stopifnot(R6::is.R6(`replication.content.useFileStorage`))
self$`replication.content.useFileStorage` <- `replication.content.useFileStorage`
}
if (!missing(`replication.content.maxCommitAttempts`)) {
stopifnot(R6::is.R6(`replication.content.maxCommitAttempts`))
self$`replication.content.maxCommitAttempts` <- `replication.content.maxCommitAttempts`
}
},
toJSON = function() {
ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject <- list()
if (!is.null(self$`replication.content.useFileStorage`)) {
ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject[['replication.content.useFileStorage']] <- self$`replication.content.useFileStorage`$toJSON()
}
if (!is.null(self$`replication.content.maxCommitAttempts`)) {
ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject[['replication.content.maxCommitAttempts']] <- self$`replication.content.maxCommitAttempts`$toJSON()
}
ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject
},
fromJSON = function(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesJson) {
ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject <- jsonlite::fromJSON(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesJson)
if (!is.null(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject$`replication.content.useFileStorage`)) {
replication.content.useFileStorageObject <- ConfigNodePropertyBoolean$new()
replication.content.useFileStorageObject$fromJSON(jsonlite::toJSON(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject$replication.content.useFileStorage, auto_unbox = TRUE))
self$`replication.content.useFileStorage` <- replication.content.useFileStorageObject
}
if (!is.null(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject$`replication.content.maxCommitAttempts`)) {
replication.content.maxCommitAttemptsObject <- ConfigNodePropertyInteger$new()
replication.content.maxCommitAttemptsObject$fromJSON(jsonlite::toJSON(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject$replication.content.maxCommitAttempts, auto_unbox = TRUE))
self$`replication.content.maxCommitAttempts` <- replication.content.maxCommitAttemptsObject
}
},
toJSONString = function() {
sprintf(
'{
"replication.content.useFileStorage": %s,
"replication.content.maxCommitAttempts": %s
}',
self$`replication.content.useFileStorage`$toJSON(),
self$`replication.content.maxCommitAttempts`$toJSON()
)
},
fromJSONString = function(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesJson) {
ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject <- jsonlite::fromJSON(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesJson)
ConfigNodePropertyBooleanObject <- ConfigNodePropertyBoolean$new()
self$`replication.content.useFileStorage` <- ConfigNodePropertyBooleanObject$fromJSON(jsonlite::toJSON(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject$replication.content.useFileStorage, auto_unbox = TRUE))
ConfigNodePropertyIntegerObject <- ConfigNodePropertyInteger$new()
self$`replication.content.maxCommitAttempts` <- ConfigNodePropertyIntegerObject$fromJSON(jsonlite::toJSON(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject$replication.content.maxCommitAttempts, auto_unbox = TRUE))
}
)
)
|
/clients/r/generated/R/ComDayCqReplicationImplReplicationContentFactoryProviderImplProperties.r
|
permissive
|
shinesolutions/swagger-aem-osgi
|
R
| false
| false
| 4,745
|
r
|
# Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComDayCqReplicationImplReplicationContentFactoryProviderImplProperties Class
#'
#' @field replication.content.useFileStorage
#' @field replication.content.maxCommitAttempts
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComDayCqReplicationImplReplicationContentFactoryProviderImplProperties <- R6::R6Class(
'ComDayCqReplicationImplReplicationContentFactoryProviderImplProperties',
public = list(
`replication.content.useFileStorage` = NULL,
`replication.content.maxCommitAttempts` = NULL,
initialize = function(`replication.content.useFileStorage`, `replication.content.maxCommitAttempts`){
if (!missing(`replication.content.useFileStorage`)) {
stopifnot(R6::is.R6(`replication.content.useFileStorage`))
self$`replication.content.useFileStorage` <- `replication.content.useFileStorage`
}
if (!missing(`replication.content.maxCommitAttempts`)) {
stopifnot(R6::is.R6(`replication.content.maxCommitAttempts`))
self$`replication.content.maxCommitAttempts` <- `replication.content.maxCommitAttempts`
}
},
toJSON = function() {
ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject <- list()
if (!is.null(self$`replication.content.useFileStorage`)) {
ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject[['replication.content.useFileStorage']] <- self$`replication.content.useFileStorage`$toJSON()
}
if (!is.null(self$`replication.content.maxCommitAttempts`)) {
ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject[['replication.content.maxCommitAttempts']] <- self$`replication.content.maxCommitAttempts`$toJSON()
}
ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject
},
fromJSON = function(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesJson) {
ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject <- jsonlite::fromJSON(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesJson)
if (!is.null(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject$`replication.content.useFileStorage`)) {
replication.content.useFileStorageObject <- ConfigNodePropertyBoolean$new()
replication.content.useFileStorageObject$fromJSON(jsonlite::toJSON(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject$replication.content.useFileStorage, auto_unbox = TRUE))
self$`replication.content.useFileStorage` <- replication.content.useFileStorageObject
}
if (!is.null(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject$`replication.content.maxCommitAttempts`)) {
replication.content.maxCommitAttemptsObject <- ConfigNodePropertyInteger$new()
replication.content.maxCommitAttemptsObject$fromJSON(jsonlite::toJSON(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject$replication.content.maxCommitAttempts, auto_unbox = TRUE))
self$`replication.content.maxCommitAttempts` <- replication.content.maxCommitAttemptsObject
}
},
toJSONString = function() {
sprintf(
'{
"replication.content.useFileStorage": %s,
"replication.content.maxCommitAttempts": %s
}',
self$`replication.content.useFileStorage`$toJSON(),
self$`replication.content.maxCommitAttempts`$toJSON()
)
},
fromJSONString = function(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesJson) {
ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject <- jsonlite::fromJSON(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesJson)
ConfigNodePropertyBooleanObject <- ConfigNodePropertyBoolean$new()
self$`replication.content.useFileStorage` <- ConfigNodePropertyBooleanObject$fromJSON(jsonlite::toJSON(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject$replication.content.useFileStorage, auto_unbox = TRUE))
ConfigNodePropertyIntegerObject <- ConfigNodePropertyInteger$new()
self$`replication.content.maxCommitAttempts` <- ConfigNodePropertyIntegerObject$fromJSON(jsonlite::toJSON(ComDayCqReplicationImplReplicationContentFactoryProviderImplPropertiesObject$replication.content.maxCommitAttempts, auto_unbox = TRUE))
}
)
)
|
##########################################################
# Section 3.3 Estimating a Heart Transplant Mortality Rate
##########################################################
alpha=16;beta=15174
yobs=1; ex=66
y=0:10
lam=alpha/beta
py=dpois(y, lam*ex)*dgamma(lam, shape = alpha,
rate = beta)/dgamma(lam, shape= alpha + y,
rate = beta + ex)
cbind(y, round(py, 3))
lambdaA = rgamma(1000, shape = alpha + yobs, rate = beta + ex)
ex = 1767; yobs=4
y = 0:10
py = dpois(y, lam * ex) * dgamma(lam, shape = alpha,
rate = beta)/dgamma(lam, shape = alpha + y,
rate = beta + ex)
cbind(y, round(py, 3))
lambdaB = rgamma(1000, shape = alpha + yobs, rate = beta + ex)
par(mfrow = c(2, 1))
plot(density(lambdaA), main="HOSPITAL A", xlab="lambdaA", lwd=3)
curve(dgamma(x, shape = alpha, rate = beta), add=TRUE)
legend("topright",legend=c("prior","posterior"),lwd=c(1,3))
plot(density(lambdaB), main="HOSPITAL B", xlab="lambdaB", lwd=3)
curve(dgamma(x, shape = alpha, rate = beta), add=TRUE)
legend("topright",legend=c("prior","posterior"),lwd=c(1,3))
|
/LearnBayes/demo/Chapter.3.3.R
|
permissive
|
solgenomics/R_libs
|
R
| false
| false
| 1,108
|
r
|
##########################################################
# Section 3.3 Estimating a Heart Transplant Mortality Rate
##########################################################
alpha=16;beta=15174
yobs=1; ex=66
y=0:10
lam=alpha/beta
py=dpois(y, lam*ex)*dgamma(lam, shape = alpha,
rate = beta)/dgamma(lam, shape= alpha + y,
rate = beta + ex)
cbind(y, round(py, 3))
lambdaA = rgamma(1000, shape = alpha + yobs, rate = beta + ex)
ex = 1767; yobs=4
y = 0:10
py = dpois(y, lam * ex) * dgamma(lam, shape = alpha,
rate = beta)/dgamma(lam, shape = alpha + y,
rate = beta + ex)
cbind(y, round(py, 3))
lambdaB = rgamma(1000, shape = alpha + yobs, rate = beta + ex)
par(mfrow = c(2, 1))
plot(density(lambdaA), main="HOSPITAL A", xlab="lambdaA", lwd=3)
curve(dgamma(x, shape = alpha, rate = beta), add=TRUE)
legend("topright",legend=c("prior","posterior"),lwd=c(1,3))
plot(density(lambdaB), main="HOSPITAL B", xlab="lambdaB", lwd=3)
curve(dgamma(x, shape = alpha, rate = beta), add=TRUE)
legend("topright",legend=c("prior","posterior"),lwd=c(1,3))
|
# https://www.hcup-us.ahrq.gov/db/nation/nis/tools/stats/FileSpecifications_NIS_2017_Core.TXT
nis_core[, .(n = sum(DISCWT), los = sum(LOS * DISCWT)), by = .(YEAR)]
nis_core[, .(n = sum(DISCWT)), by = .(YEAR, DRG)] %>%
pivot_wider(names_from = YEAR, values_from = n)
plot_yq <- function(.df, .group_var, .value_var = "value") {
.df %>%
mutate(yq = paste(YEAR, DQTR)) %>%
ggplot(aes(
yq, !!rlang::sym(.value_var),
col = !!rlang::sym(.group_var),
group = !!rlang::sym(.group_var)
)) +
geom_point() +
geom_line()
}
summarise_category_and_plot <- function(.var) {
summary <- nis_core[, .(value = sum(DISCWT)), by = c("YEAR", "DQTR", .var)] %>%
filter(DQTR %in% 1:4) %>%
drop_na()
summary[[.var]] <- as.factor(summary[[.var]])
plot_yq(summary, .var)
}
summarise_category_and_plot("age")
summarise_category_and_plot("FEMALE")
summarise_category_and_plot("HOSP_DIVISION")
summarise_category_and_plot("PL_NCHS")
summarise_category_and_plot("ZIPINC_QRTL")
summarise_category_and_plot("sameday")
nis_core[sameday == "overnight",
.(n = sum(DISCWT), los = sum(LOS * DISCWT)),
by = .(YEAR, DQTR)] %>%
filter(DQTR %in% 1:4) %>%
mutate(value = los / n, group = "Total") %>%
plot_yq("group") +
ylim(0, 6)
# Note taking
# Data prior to 2014
|
/202105_eda.R
|
permissive
|
healthpolicy/HPA.inpatientProjection
|
R
| false
| false
| 1,320
|
r
|
# https://www.hcup-us.ahrq.gov/db/nation/nis/tools/stats/FileSpecifications_NIS_2017_Core.TXT
nis_core[, .(n = sum(DISCWT), los = sum(LOS * DISCWT)), by = .(YEAR)]
nis_core[, .(n = sum(DISCWT)), by = .(YEAR, DRG)] %>%
pivot_wider(names_from = YEAR, values_from = n)
plot_yq <- function(.df, .group_var, .value_var = "value") {
.df %>%
mutate(yq = paste(YEAR, DQTR)) %>%
ggplot(aes(
yq, !!rlang::sym(.value_var),
col = !!rlang::sym(.group_var),
group = !!rlang::sym(.group_var)
)) +
geom_point() +
geom_line()
}
summarise_category_and_plot <- function(.var) {
summary <- nis_core[, .(value = sum(DISCWT)), by = c("YEAR", "DQTR", .var)] %>%
filter(DQTR %in% 1:4) %>%
drop_na()
summary[[.var]] <- as.factor(summary[[.var]])
plot_yq(summary, .var)
}
summarise_category_and_plot("age")
summarise_category_and_plot("FEMALE")
summarise_category_and_plot("HOSP_DIVISION")
summarise_category_and_plot("PL_NCHS")
summarise_category_and_plot("ZIPINC_QRTL")
summarise_category_and_plot("sameday")
nis_core[sameday == "overnight",
.(n = sum(DISCWT), los = sum(LOS * DISCWT)),
by = .(YEAR, DQTR)] %>%
filter(DQTR %in% 1:4) %>%
mutate(value = los / n, group = "Total") %>%
plot_yq("group") +
ylim(0, 6)
# Note taking
# Data prior to 2014
|
library(dplyr)
# Read datasets and clean dates
setwd("C:/Users/rr5743/Projects/2017/Data Science/Exploratory analysis/Wk1")
powerdata <- read.table("household_power_consumption.txt", header =TRUE,sep=";",na.strings="?")
powerdata$Date <- as.Date(powerdata$Date,"%d/%m/%Y")
# Filter for Feb data and plot histogram
globalpowerFeb <- powerdata %>%
select(Date,Global_active_power) %>% filter(Date == "2007-02-01"|Date == "2007-02-02")
hist(globalpowerFeb$Global_active_power,main = "Global Active Power", xlim = c(0,6),col="red",xlab="Global Active Power (kilowatts)")
|
/plot1.r
|
no_license
|
ramrkris/Exploratory-analysis
|
R
| false
| false
| 585
|
r
|
library(dplyr)
# Read datasets and clean dates
setwd("C:/Users/rr5743/Projects/2017/Data Science/Exploratory analysis/Wk1")
powerdata <- read.table("household_power_consumption.txt", header =TRUE,sep=";",na.strings="?")
powerdata$Date <- as.Date(powerdata$Date,"%d/%m/%Y")
# Filter for Feb data and plot histogram
globalpowerFeb <- powerdata %>%
select(Date,Global_active_power) %>% filter(Date == "2007-02-01"|Date == "2007-02-02")
hist(globalpowerFeb$Global_active_power,main = "Global Active Power", xlim = c(0,6),col="red",xlab="Global Active Power (kilowatts)")
|
library(testthat)
library(viafr)
test_check("viafr")
|
/tests/testthat.R
|
no_license
|
cran/viafr
|
R
| false
| false
| 58
|
r
|
library(testthat)
library(viafr)
test_check("viafr")
|
\name{SDMXType-class}
\docType{class}
\alias{SDMXType-class}
\alias{SDMXType}
\alias{SDMXType-method}
\title{Class "SDMXType"}
\description{ A basic class to handle the type of a SDMX-ML document}
\section{Objects from the Class}{are never to be generated; used by SDMX derived classes}
\section{Slots}{
\describe{
\item{\code{type}}{Object of class "character" giving the type of the SDMX-ML document}
}
}
\section{Methods}{
\describe{
\item{\code{getType}}{Returns the type of the SDMX-ML document}
}
}
\usage{
SDMXType(xmlObj)
}
\arguments{
\item{xmlObj}{an object of class "XMLInternalDocument"}
}
\author{ Emmanuel Blondel, \email{emmanuel.blondel1@gmail.com}}
\note{
At now, the following types have been implemented and tested: \code{GenericDataType}, \code{CompactDataType}, and {MessageGroupType}. At now the \code{MessageGroupType} was only modeled to allow reading OECD generic data which is provided. Other message types handled in \code{MessageGroup} document type are not yet supported.
}
\section{Warning }{this class is not useful in itself, but all SDMX non-abstract classes will encapsulate itas slot, when parsing an SDMX-ML document}
\keyword{classes}
|
/man/SDMXType-class.Rd
|
no_license
|
johndharrison/rsdmx
|
R
| false
| false
| 1,193
|
rd
|
\name{SDMXType-class}
\docType{class}
\alias{SDMXType-class}
\alias{SDMXType}
\alias{SDMXType-method}
\title{Class "SDMXType"}
\description{ A basic class to handle the type of a SDMX-ML document}
\section{Objects from the Class}{are never to be generated; used by SDMX derived classes}
\section{Slots}{
\describe{
\item{\code{type}}{Object of class "character" giving the type of the SDMX-ML document}
}
}
\section{Methods}{
\describe{
\item{\code{getType}}{Returns the type of the SDMX-ML document}
}
}
\usage{
SDMXType(xmlObj)
}
\arguments{
\item{xmlObj}{an object of class "XMLInternalDocument"}
}
\author{ Emmanuel Blondel, \email{emmanuel.blondel1@gmail.com}}
\note{
At now, the following types have been implemented and tested: \code{GenericDataType}, \code{CompactDataType}, and {MessageGroupType}. At now the \code{MessageGroupType} was only modeled to allow reading OECD generic data which is provided. Other message types handled in \code{MessageGroup} document type are not yet supported.
}
\section{Warning }{this class is not useful in itself, but all SDMX non-abstract classes will encapsulate itas slot, when parsing an SDMX-ML document}
\keyword{classes}
|
#' @rdname put_object
#' @title Put object
#' @description Puts an object into an S3 bucket
#' @param file A character string containing the filename (or full path) of the file you want to upload to S3. Alternatively, an raw vector containing the file can be passed directly, in which case \code{object} needs to be specified explicitly.
#' @param object A character string containing the name the object should have in S3 (i.e., its "object key"). If missing, the filename is used.
#' @param folder A character string containing a folder name. (A trailing slash is not required.)
#' @template bucket
#' @param multipart A logical indicating whether to use multipart uploads. See \url{http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html}. If \code{file} is less than 100 MB, this is ignored.
#' @template acl
#' @param headers List of request headers for the REST call.
#' @template dots
#' @details This provide a generic interface for sending files (or serialized, in-memory representations thereof) to S3. Some convenience wrappers are provided for common tasks: e.g., \code{\link{s3save}} and \code{\link{s3saveRDS}}.
#'
#' Note that S3 is a flat file store. So there is no folder hierarchy as in a traditional hard drive. However, S3 allows users to create pseudo-folders by prepending object keys with \code{foldername/}. The \code{put_folder} function is provided as a high-level convenience function for creating folders. This is not actually necessary as objects with slashes in their key will be displayed in the S3 web console as if they were in folders, but it may be useful for creating an empty directory (which is possible in the web console).
#'
#' @return If successful, \code{TRUE}.
#' @examples
#' \dontrun{
#' library("datasets")
#'
#' # write file to S3
#' tmp <- tempfile()
#' on.exit(unlink(tmp))
#' utils::write.csv(mtcars, file = tmp)
#' put_object(tmp, object = "mtcars.csv", bucket = "myexamplebucket")
#'
#' # create a "folder" in a bucket
#' put_folder("example", bucket = "myexamplebucket")
#' ## write object to the "folder"
#' put_object(tmp, object = "example/mtcars.csv", bucket = "myexamplebucket")
#'
#' # write serialized, in-memory object to S3
#' x <- rawConnection(raw(0), "w")
#' utils::write.csv(mtcars, x)
#' put_object(rawConnectionValue(x), object = "mtcars.csv", bucket = "myexamplebucketname")
#'
#' # use `headers` for server-side encryption
#' ## require appropriate bucket policy
#' put_object(file = tmp, object = "mtcars.csv", bucket = "myexamplebucket",
#' headers = c('x-amz-server-side-encryption' = 'AES256'))
#'
#' # alternative "S3 URI" syntax:
#' put_object(rawConnectionValue(x), object = "s3://myexamplebucketname/mtcars.csv")
#' close(x)
#'
#' # read the object back from S3
#' read.csv(text = rawToChar(get_object(object = "s3://myexamplebucketname/mtcars.csv")))
#' }
#' @references \href{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html}{API Documentation}
#' @seealso \code{\link{put_bucket}}, \code{\link{get_object}}, \code{\link{delete_object}}
#' @importFrom utils head
#' @export
put_object <-
function(file,
object,
bucket,
multipart = FALSE,
acl = c("private", "public-read", "public-read-write",
"aws-exec-read", "authenticated-read",
"bucket-owner-read", "bucket-owner-full-control"),
headers = list(),
...) {
if (missing(object) && is.character(file)) {
object <- basename(file)
} else {
if (missing(bucket)) {
bucket <- get_bucketname(object)
}
object <- get_objectkey(object)
}
acl <- match.arg(acl)
headers <- c(list(`x-amz-acl` = acl), headers)
if (isTRUE(multipart)) {
if (is.character(file) && file.exists(file)) {
file <- readBin(file, what = "raw")
}
size <- length(file)
partsize <- 1e8 # 100 MB
nparts <- ceiling(size/partsize)
# if file is small, there is no need for multipart upload
if (size < partsize) {
put_object(file = file, object = object, bucket = bucket, multipart = FALSE, headers = headers, ...)
return(TRUE)
}
# function to call abort if any part fails
abort <- function(id) delete_object(object = object, bucket = bucket, query = list(uploadId = id), ...)
# split object into parts
seqparts <- seq_len(partsize)
parts <- list()
for (i in seq_len(nparts)) {
parts[[i]] <- head(file, partsize)
if (i < nparts) {
file <- file[-seqparts]
}
}
# initialize the upload
initialize <- post_object(file = NULL, object = object, bucket = bucket, query = list(uploads = ""), headers = headers, ...)
id <- initialize[["UploadId"]]
# loop over parts
partlist <- list(Number = character(length(parts)),
ETag = character(length(parts)))
for (i in seq_along(parts)) {
query <- list(partNumber = i, uploadId = id)
r <- try(put_object(file = parts[[i]], object = object, bucket = bucket,
multipart = FALSE, headers = headers, query = query),
silent = FALSE)
if (inherits(r, "try-error")) {
abort(id)
stop("Multipart upload failed.")
} else {
partlist[["Number"]][i] <- i
partlist[["ETag"]][i] <- attributes(r)[["ETag"]]
}
}
# complete
complete_parts(object = object, bucket = bucket, id = id, parts = partlist, ...)
return(TRUE)
} else {
r <- s3HTTP(verb = "PUT",
bucket = bucket,
path = paste0('/', object),
headers = c(headers, list(
`Content-Length` = ifelse(is.character(file) && file.exists(file),
file.size(file), length(file))
)),
request_body = file,
...)
return(TRUE)
}
}
#' @rdname put_object
#' @export
put_folder <- function(folder, bucket, ...) {
if (!grepl("/$", folder)) {
folder <- paste0(folder, "/")
}
put_object(raw(0), object = folder, bucket = bucket, ...)
}
post_object <- function(file, object, bucket, headers = list(), ...) {
if (missing(object) && is.character(file)) {
object <- basename(file)
} else {
if (missing(bucket)) {
bucket <- get_bucketname(object)
}
object <- get_objectkey(object)
}
r <- s3HTTP(verb = "POST",
bucket = bucket,
path = paste0("/", object),
headers = c(headers, list(
`Content-Length` = ifelse(is.character(file) && file.exists(file),
file.size(file), length(file))
)),
request_body = file,
...)
structure(r, class = "s3_object")
}
list_parts <- function(object, bucket, id, ...) {
if (missing(bucket)) {
bucket <- get_bucketname(object)
}
object <- get_objectkey(object)
get_object(object = object, bucket = bucket, query = list(uploadId = id), ...)
}
upload_part <- function(part, object, bucket, number, id, ...) {
if (missing(bucket)) {
bucket <- get_bucketname(object)
}
object <- get_objectkey(object)
query <- list(partNumber = number, uploadId = id)
put_object(file = part, object = object, bucket = bucket, query = query, multipart = FALSE, ...)
}
complete_parts <- function(object, bucket, id, parts, ...) {
if (missing(bucket)) {
bucket <- get_bucketname(object)
}
object <- get_objectkey(object)
# construct body
bod <- paste0("<CompleteMultipartUpload>",
paste0("<Part><PartNumber>", parts[["Number"]], "</PartNumber>",
"<ETag>", parts[["ETag"]], "</ETag></Part>", collapse = ""),
"</CompleteMultipartUpload>", collapse = "")
post_object(object = object, bucket = bucket, query = list(uploadId = id), body = bod, ...)
}
|
/R/put_object.R
|
no_license
|
kaneplusplus/aws.s3
|
R
| false
| false
| 8,387
|
r
|
#' @rdname put_object
#' @title Put object
#' @description Puts an object into an S3 bucket
#' @param file A character string containing the filename (or full path) of the file you want to upload to S3. Alternatively, an raw vector containing the file can be passed directly, in which case \code{object} needs to be specified explicitly.
#' @param object A character string containing the name the object should have in S3 (i.e., its "object key"). If missing, the filename is used.
#' @param folder A character string containing a folder name. (A trailing slash is not required.)
#' @template bucket
#' @param multipart A logical indicating whether to use multipart uploads. See \url{http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html}. If \code{file} is less than 100 MB, this is ignored.
#' @template acl
#' @param headers List of request headers for the REST call.
#' @template dots
#' @details This provide a generic interface for sending files (or serialized, in-memory representations thereof) to S3. Some convenience wrappers are provided for common tasks: e.g., \code{\link{s3save}} and \code{\link{s3saveRDS}}.
#'
#' Note that S3 is a flat file store. So there is no folder hierarchy as in a traditional hard drive. However, S3 allows users to create pseudo-folders by prepending object keys with \code{foldername/}. The \code{put_folder} function is provided as a high-level convenience function for creating folders. This is not actually necessary as objects with slashes in their key will be displayed in the S3 web console as if they were in folders, but it may be useful for creating an empty directory (which is possible in the web console).
#'
#' @return If successful, \code{TRUE}.
#' @examples
#' \dontrun{
#' library("datasets")
#'
#' # write file to S3
#' tmp <- tempfile()
#' on.exit(unlink(tmp))
#' utils::write.csv(mtcars, file = tmp)
#' put_object(tmp, object = "mtcars.csv", bucket = "myexamplebucket")
#'
#' # create a "folder" in a bucket
#' put_folder("example", bucket = "myexamplebucket")
#' ## write object to the "folder"
#' put_object(tmp, object = "example/mtcars.csv", bucket = "myexamplebucket")
#'
#' # write serialized, in-memory object to S3
#' x <- rawConnection(raw(0), "w")
#' utils::write.csv(mtcars, x)
#' put_object(rawConnectionValue(x), object = "mtcars.csv", bucket = "myexamplebucketname")
#'
#' # use `headers` for server-side encryption
#' ## require appropriate bucket policy
#' put_object(file = tmp, object = "mtcars.csv", bucket = "myexamplebucket",
#' headers = c('x-amz-server-side-encryption' = 'AES256'))
#'
#' # alternative "S3 URI" syntax:
#' put_object(rawConnectionValue(x), object = "s3://myexamplebucketname/mtcars.csv")
#' close(x)
#'
#' # read the object back from S3
#' read.csv(text = rawToChar(get_object(object = "s3://myexamplebucketname/mtcars.csv")))
#' }
#' @references \href{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html}{API Documentation}
#' @seealso \code{\link{put_bucket}}, \code{\link{get_object}}, \code{\link{delete_object}}
#' @importFrom utils head
#' @export
put_object <-
function(file,
object,
bucket,
multipart = FALSE,
acl = c("private", "public-read", "public-read-write",
"aws-exec-read", "authenticated-read",
"bucket-owner-read", "bucket-owner-full-control"),
headers = list(),
...) {
if (missing(object) && is.character(file)) {
object <- basename(file)
} else {
if (missing(bucket)) {
bucket <- get_bucketname(object)
}
object <- get_objectkey(object)
}
acl <- match.arg(acl)
headers <- c(list(`x-amz-acl` = acl), headers)
if (isTRUE(multipart)) {
if (is.character(file) && file.exists(file)) {
file <- readBin(file, what = "raw")
}
size <- length(file)
partsize <- 1e8 # 100 MB
nparts <- ceiling(size/partsize)
# if file is small, there is no need for multipart upload
if (size < partsize) {
put_object(file = file, object = object, bucket = bucket, multipart = FALSE, headers = headers, ...)
return(TRUE)
}
# function to call abort if any part fails
abort <- function(id) delete_object(object = object, bucket = bucket, query = list(uploadId = id), ...)
# split object into parts
seqparts <- seq_len(partsize)
parts <- list()
for (i in seq_len(nparts)) {
parts[[i]] <- head(file, partsize)
if (i < nparts) {
file <- file[-seqparts]
}
}
# initialize the upload
initialize <- post_object(file = NULL, object = object, bucket = bucket, query = list(uploads = ""), headers = headers, ...)
id <- initialize[["UploadId"]]
# loop over parts
partlist <- list(Number = character(length(parts)),
ETag = character(length(parts)))
for (i in seq_along(parts)) {
query <- list(partNumber = i, uploadId = id)
r <- try(put_object(file = parts[[i]], object = object, bucket = bucket,
multipart = FALSE, headers = headers, query = query),
silent = FALSE)
if (inherits(r, "try-error")) {
abort(id)
stop("Multipart upload failed.")
} else {
partlist[["Number"]][i] <- i
partlist[["ETag"]][i] <- attributes(r)[["ETag"]]
}
}
# complete
complete_parts(object = object, bucket = bucket, id = id, parts = partlist, ...)
return(TRUE)
} else {
r <- s3HTTP(verb = "PUT",
bucket = bucket,
path = paste0('/', object),
headers = c(headers, list(
`Content-Length` = ifelse(is.character(file) && file.exists(file),
file.size(file), length(file))
)),
request_body = file,
...)
return(TRUE)
}
}
#' @rdname put_object
#' @export
put_folder <- function(folder, bucket, ...) {
if (!grepl("/$", folder)) {
folder <- paste0(folder, "/")
}
put_object(raw(0), object = folder, bucket = bucket, ...)
}
post_object <- function(file, object, bucket, headers = list(), ...) {
if (missing(object) && is.character(file)) {
object <- basename(file)
} else {
if (missing(bucket)) {
bucket <- get_bucketname(object)
}
object <- get_objectkey(object)
}
r <- s3HTTP(verb = "POST",
bucket = bucket,
path = paste0("/", object),
headers = c(headers, list(
`Content-Length` = ifelse(is.character(file) && file.exists(file),
file.size(file), length(file))
)),
request_body = file,
...)
structure(r, class = "s3_object")
}
list_parts <- function(object, bucket, id, ...) {
if (missing(bucket)) {
bucket <- get_bucketname(object)
}
object <- get_objectkey(object)
get_object(object = object, bucket = bucket, query = list(uploadId = id), ...)
}
upload_part <- function(part, object, bucket, number, id, ...) {
if (missing(bucket)) {
bucket <- get_bucketname(object)
}
object <- get_objectkey(object)
query <- list(partNumber = number, uploadId = id)
put_object(file = part, object = object, bucket = bucket, query = query, multipart = FALSE, ...)
}
complete_parts <- function(object, bucket, id, parts, ...) {
if (missing(bucket)) {
bucket <- get_bucketname(object)
}
object <- get_objectkey(object)
# construct body
bod <- paste0("<CompleteMultipartUpload>",
paste0("<Part><PartNumber>", parts[["Number"]], "</PartNumber>",
"<ETag>", parts[["ETag"]], "</ETag></Part>", collapse = ""),
"</CompleteMultipartUpload>", collapse = "")
post_object(object = object, bucket = bucket, query = list(uploadId = id), body = bod, ...)
}
|
library(phylosim)
### Name: omegaVarM4.CodonSequence
### Title: The M4 (freqs) model of variable omega ratios among sites
### Aliases: omegaVarM4.CodonSequence CodonSequence.omegaVarM4
### omegaVarM4,CodonSequence-method
### ** Examples
# create a GY94 object
p<-GY94(kappa=2)
# create a CodonSequence object, attach process p
s<-CodonSequence(length=25, processes=list(list(p)))
# sample states
sampleStates(s)
# sample omegas in range 1:20 from model M4
omegaVarM4(s,p,probs=c(2/5,1/5,1/5,1/10,1/10),1:20)
# get omega values
getOmegas(s,p)
# get a histogram of omega values in range 1:20
omegaHist(s,p,breaks=50,1:20)
|
/data/genthat_extracted_code/phylosim/examples/omegaVarM4.CodonSequence.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 642
|
r
|
library(phylosim)
### Name: omegaVarM4.CodonSequence
### Title: The M4 (freqs) model of variable omega ratios among sites
### Aliases: omegaVarM4.CodonSequence CodonSequence.omegaVarM4
### omegaVarM4,CodonSequence-method
### ** Examples
# create a GY94 object
p<-GY94(kappa=2)
# create a CodonSequence object, attach process p
s<-CodonSequence(length=25, processes=list(list(p)))
# sample states
sampleStates(s)
# sample omegas in range 1:20 from model M4
omegaVarM4(s,p,probs=c(2/5,1/5,1/5,1/10,1/10),1:20)
# get omega values
getOmegas(s,p)
# get a histogram of omega values in range 1:20
omegaHist(s,p,breaks=50,1:20)
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.19683994249215e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613102577-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 343
|
r
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.19683994249215e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/result.R
\docType{class}
\name{PqResult-class}
\alias{PqResult-class}
\alias{dbColumnInfo,PqResult-method}
\alias{dbGetRowCount,PqResult-method}
\alias{dbGetRowsAffected,PqResult-method}
\alias{dbGetStatement,PqResult-method}
\alias{dbIsValid,PqResult-method}
\alias{show,PqResult-method}
\title{PostgreSQL results.}
\usage{
\S4method{dbGetStatement}{PqResult}(res, ...)
\S4method{dbIsValid}{PqResult}(dbObj, ...)
\S4method{dbGetRowCount}{PqResult}(res, ...)
\S4method{dbGetRowsAffected}{PqResult}(res, ...)
\S4method{dbColumnInfo}{PqResult}(res, ...)
\S4method{show}{PqResult}(object)
}
\description{
PostgreSQL results.
}
\keyword{internal}
|
/man/PqResult-class.Rd
|
no_license
|
CesarMaalouf/RPostgres
|
R
| false
| false
| 735
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/result.R
\docType{class}
\name{PqResult-class}
\alias{PqResult-class}
\alias{dbColumnInfo,PqResult-method}
\alias{dbGetRowCount,PqResult-method}
\alias{dbGetRowsAffected,PqResult-method}
\alias{dbGetStatement,PqResult-method}
\alias{dbIsValid,PqResult-method}
\alias{show,PqResult-method}
\title{PostgreSQL results.}
\usage{
\S4method{dbGetStatement}{PqResult}(res, ...)
\S4method{dbIsValid}{PqResult}(dbObj, ...)
\S4method{dbGetRowCount}{PqResult}(res, ...)
\S4method{dbGetRowsAffected}{PqResult}(res, ...)
\S4method{dbColumnInfo}{PqResult}(res, ...)
\S4method{show}{PqResult}(object)
}
\description{
PostgreSQL results.
}
\keyword{internal}
|
#
# Author: Rui La <larui529@gmail.com>
#
# Note: this code can be run on Mac OSX
#
## download zip file from internet
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
fileName <- "powerData.zip"
#check if the file is exist
if (!file.exists(fileName)) {
message ("downloading file")
download.file(fileURL, destfile = "powerData.zip", method = "curl")
}
fileName2 <- "household_power_consumption.txt"
## unzip the file
if (!file.exists(fileName2)) {
message ("unzip the file")
unzip(zipfile = "powerData.zip")
}
## extract data and read it
system ("(head -1 household_power_consumption.txt;
grep '^[1|2]/2/2007' household_power_consumption.txt;)>mydata.csv")
## read csv file
Edata <- read.csv ("mydata.csv", sep = ';', header = T, na.strings = '?')
## output as PNG file
png ("plot1.png", width = 480, height = 480)
## plot the histgram
hist (Edata$Global_active_power,
xlab = 'Global Active Power (kilowatts)',
ylab = 'Frequency',
col = "red",
main = "Global Active Power")
## close PNG device
dev.off ()
|
/plot1.R
|
no_license
|
larui529/ExData_Plotting1
|
R
| false
| false
| 1,112
|
r
|
#
# Author: Rui La <larui529@gmail.com>
#
# Note: this code can be run on Mac OSX
#
## download zip file from internet
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
fileName <- "powerData.zip"
#check if the file is exist
if (!file.exists(fileName)) {
message ("downloading file")
download.file(fileURL, destfile = "powerData.zip", method = "curl")
}
fileName2 <- "household_power_consumption.txt"
## unzip the file
if (!file.exists(fileName2)) {
message ("unzip the file")
unzip(zipfile = "powerData.zip")
}
## extract data and read it
system ("(head -1 household_power_consumption.txt;
grep '^[1|2]/2/2007' household_power_consumption.txt;)>mydata.csv")
## read csv file
Edata <- read.csv ("mydata.csv", sep = ';', header = T, na.strings = '?')
## output as PNG file
png ("plot1.png", width = 480, height = 480)
## plot the histgram
hist (Edata$Global_active_power,
xlab = 'Global Active Power (kilowatts)',
ylab = 'Frequency',
col = "red",
main = "Global Active Power")
## close PNG device
dev.off ()
|
#' Format p values.
#'
#' @param pvalues P values (scalar or vector).
#'
#' @author \href{https://dominiquemakowski.github.io/}{Dominique Makowski}
#'
#' @export
format_p <- function(pvalues) {
ifelse(pvalues < 0.001, "< .001***",
ifelse(pvalues < 0.01, "< .01**",
ifelse(pvalues < 0.05, "< .05*",
ifelse(pvalues < 0.1, paste0("= ", round(pvalues, 2), "\xB0"),
"> .1"))))
}
|
/R/format_p.R
|
permissive
|
ericker/psycho.R
|
R
| false
| false
| 404
|
r
|
#' Format p values.
#'
#' @param pvalues P values (scalar or vector).
#'
#' @author \href{https://dominiquemakowski.github.io/}{Dominique Makowski}
#'
#' @export
format_p <- function(pvalues) {
ifelse(pvalues < 0.001, "< .001***",
ifelse(pvalues < 0.01, "< .01**",
ifelse(pvalues < 0.05, "< .05*",
ifelse(pvalues < 0.1, paste0("= ", round(pvalues, 2), "\xB0"),
"> .1"))))
}
|
setwd("~/my-papers-2017/phyloBayesHMM/ontoFast/ontoFast/data")
library("plyr", lib.loc="~/.local/R/site-library")
# Operations over matrices and chars reports give the follwoing subobjects
id_characters
name_characters
id_character_states
name_character_states #coding_states_report
contains_inapplicable
contains_missing
contains_polymorph
unused_chr_states
coding_states_matrix
same_chrs_patterns
taxa_missing_states
#######################
# Using pipline
########################
# Incorporating Character Report
#
#######################
# creating character ids for all 392 characters
paste("CHAR:",c(1:392), sep="")->id_characters
# reading characters and states
char_et_states<-read.csv("Sharkey-chars-and-states.csv", header=F, stringsAsFactors = F, na.strings = "")
# creating character name vector
char_et_states[,1] %>% setNames(id_characters)-> name_characters
###
char_et_states %>% table2list(.) %>% setNames(id_characters) -> coding_states_report
coding_states_report %>%
#setNames(id_characters) %>%
lapply(function(x) {x<-paste0("STATE:", c(1:length(x)))}) -> id_character_states
# assigning states ids to state names: coding_states_report
for (x in seq_along(coding_states_report)) paste0("STATE:",
c(1:length(coding_states_report[[x]]))) -> names(coding_states_report[[x]])
########################
# Working on Matrix
#
#######################
char_matrix<-read.csv("Sharkey-matrix.csv", header=F, row.names=1, na.strings = "")
names(char_matrix)<-(id_characters) # names to data frame
names(char_matrix)[apply(char_matrix, 2, function(x) any(unique(x)=="-"))] ->contains_inapplicable #chrs with "-"
names(char_matrix)[apply(char_matrix, 2, function(x) any(unique(x)=="?"))] ->contains_missing #chrs with "?"
names(char_matrix)[apply(char_matrix, 2, function(x) any(grepl("/", unique(x))))]->contains_polymorph #contains polymorphic states
#############################
# Miscelaneous operations
#
############################
apply(char_matrix, 2, function(x) any(unique(x)=="-")) #if char contains symbol
contains_missing%>%length
apply(char_matrix, 2, function(x) (any(unique(x)=="-")*any(unique(x)=="?"))==T) #if char contains two symbols
#################################
# FUNCTIONS
#
#################################
#' @title Gives full report and all subobjects for charactert matrix
#' @description Takes dataframe (character matrix) with taxa as rows and ids as column names and returns a list of various objects
#' @param char_matrix Character matrix (dataframe)
#' @param coding_states_report list of chrs and states from character report
#' @return List.
#' @examples
#' parse_matrix(char_matrix, coding_states_report)
parse_matrix<-function(char_matrix, coding_states_report){
names(char_matrix)[apply(char_matrix, 2, function(x) any(unique(x)=="-"))] ->contains_inapplicable #chrs with "-"
names(char_matrix)[apply(char_matrix, 2, function(x) any(unique(x)=="?"))] ->contains_missing #chrs with "?"
names(char_matrix)[apply(char_matrix, 2, function(x) any(grepl("/", unique(x))))]->contains_polymorph #contains polymorphic states
coding_states_mt(char_matrix)->coding_states_matrix
same_patterns(char_matrix)->same_chrs_patterns
unused_states(coding_states_matrix, coding_states_report)->unused_chr_states
taxa_missing_states(char_matrix)->taxa_missing_chrs_states
matrix=list(
contains_inapplicable=contains_inapplicable,
contains_missing=contains_missing,
contains_polymorph=contains_polymorph,
unused_chr_states=unused_chr_states,
coding_states_matrix=coding_states_matrix,
same_chrs_patterns=same_chrs_patterns,
taxa_missing_states=taxa_missing_chrs_states
)
return(matrix)
}
cbind(paste0("CHAR:", c(1:392)),(paste0("CHAR:", c(1:392)) %in% mt_data$contains_inapplicable)) %>% write.csv(., file="contains_inaplic.csv")
getwd()
#' @title Check if enumertion of states in matrix is sequential e.g. 0, 1, 2, ... per each character
#' @description Returns chars that are not sequential
#' Each character is assigned its own ID CHAR:XXXX
#' @param char_matrix Character matrix (dataframe)
#' @return characters.
#' @examples
#' not_seq=enumeration_not_seq(char_matrix)
#' lapply(not_seq, function(x) levels(char_matrix[[x]])) %>% setNames(not_seq) # get char info for not sequential characters
enumeration_not_seq<-function(char_matrix){
chars_only_numbers<-apply(char_matrix, 2, function(x) { unique(x)[!grepl("\\D", unique(x))==T ] %>% as.numeric }) # retrive chars encoded with integers (excl. - and ?)
diff=lapply(chars_only_numbers, function(x) identical(x[order(x)], as.numeric(c(0:max(x)))) )%>%unlist #chaeck if enumeration in matrix is sequential
return(names(which(diff==F))) #which chars are not sequential
}
#' @title Unused char states; function compares which states are different between report and character matrix
#' @description Returns a list unused_states with two components unused_matrix (states present in report but absent in matrix)
#' and unused_chrs_report (states present in matrix but absent in report)
#' Each character is assigned its own ID CHAR:XXXX
#' @param coding_states_matrix list of states and chars from matrix
#' @param coding_states_report list of state and chars from report
#' @return The list.
#' @examples
#' unused_states(coding_states_matrix, coding_states_report)->unused_chr_states
unused_states<-function(coding_states_matrix, coding_states_report){
names_mt=lapply(coding_states_matrix, names)
names_report=lapply(coding_states_report, names)
unused_states<-list(unused_matrix=list(), unused_chrs_report=list())
for (i in seq_along(names_mt)){
x=setdiff(names_mt[[i]], names_report[[i]])
y=setdiff(names_report[[i]], names_mt[[i]])
if (length(x)>0) unused_states$unused_chrs_report[[names(names_mt[i])]]=x
#unused_states$unused_chrs_report[[names(names_mt[i])]]=setdiff(names_mt[[i]], names_report[[i]])
if (length(y)>0) unused_states$unused_matrix[[names(names_mt[i])]]=y
#unused_states$unused_matrix[[names(names_mt[i])]]=setdiff(names_report[[i]], names_mt[[i]])
}
return(unused_states)
}
#' @title List of ids: states and chars (including coding symbols) from matrix
#' @description Make list of states and their coding from matrix
#' @param char_matrix character matrix
#' @return The list.
#' @examples
#' coding_states_mt(char_matrix)->coding_states_matrix
coding_states_mt<-function(char_matrix){
chars_only_numbers<-apply(char_matrix, 2, function(x) { unique(x)[!grepl("\\D", unique(x))==T ] %>% as.numeric }) # retrive chars encoded with integers (excl. - and ?)
chars_only_numbers=lapply(chars_only_numbers, function(x) x[order(x)])
for (x in seq_along(chars_only_numbers)){
paste0("STATE:", chars_only_numbers[[x]]+1)->names(chars_only_numbers[[x]])
}
return(chars_only_numbers)
}
#' @title Characters with the same patterns are returned
#' @description Same chrs pattern
#' @param char_matrix character matrix
#' @return The list.
#' @examples
#' same_patterns(char_matrix)->same_chrs_patterns
##########
str(dt_rates)
char_matrix<-dt_rates
unlist(same_patterns(dt_rates))
unique(pattern_str)
same_patterns<-function(char_matrix){
char_matrix_nofac=apply(char_matrix, 2, function(x) as.character(x))
recode_mt=c()
for (i in 1:ncol(char_matrix_nofac)){
recode_mt=cbind(recode_mt, mapvalues(char_matrix_nofac[,i], from = unique(char_matrix_nofac[,i]), to = c(1:length(unique(char_matrix_nofac[,i])))))
}
pattern_str=apply(recode_mt, 2, function(x) paste(x, collapse=""))
pattern_str[duplicated(pattern_str)] %>% unique ->pattern_uniq
return(lapply(c(1:length(pattern_uniq)), function(x) names(char_matrix[,which(pattern_str==pattern_uniq[x])])))
}
#' @title Number of missing characters per each taxon
#' @description Number of missing characters per each taxon
#' @param char_matrix character matrix
#' @return Vector.
#' @examples
#' taxa_missing_states(char_matrix)->taxa_missing_chrs_states
#'
taxa_missing_states<-function(char_matrix){
miss=apply(char_matrix, 1, function(x) {which(x=="?") %>% length})
return(miss[order(miss, decreasing=T)])
}
|
/test/Matrix_chrs_report.R
|
no_license
|
sergeitarasov/ontoFAST
|
R
| false
| false
| 8,209
|
r
|
setwd("~/my-papers-2017/phyloBayesHMM/ontoFast/ontoFast/data")
library("plyr", lib.loc="~/.local/R/site-library")
# Operations over matrices and chars reports give the follwoing subobjects
id_characters
name_characters
id_character_states
name_character_states #coding_states_report
contains_inapplicable
contains_missing
contains_polymorph
unused_chr_states
coding_states_matrix
same_chrs_patterns
taxa_missing_states
#######################
# Using pipline
########################
# Incorporating Character Report
#
#######################
# creating character ids for all 392 characters
paste("CHAR:",c(1:392), sep="")->id_characters
# reading characters and states
char_et_states<-read.csv("Sharkey-chars-and-states.csv", header=F, stringsAsFactors = F, na.strings = "")
# creating character name vector
char_et_states[,1] %>% setNames(id_characters)-> name_characters
###
char_et_states %>% table2list(.) %>% setNames(id_characters) -> coding_states_report
coding_states_report %>%
#setNames(id_characters) %>%
lapply(function(x) {x<-paste0("STATE:", c(1:length(x)))}) -> id_character_states
# assigning states ids to state names: coding_states_report
for (x in seq_along(coding_states_report)) paste0("STATE:",
c(1:length(coding_states_report[[x]]))) -> names(coding_states_report[[x]])
########################
# Working on Matrix
#
#######################
char_matrix<-read.csv("Sharkey-matrix.csv", header=F, row.names=1, na.strings = "")
names(char_matrix)<-(id_characters) # names to data frame
names(char_matrix)[apply(char_matrix, 2, function(x) any(unique(x)=="-"))] ->contains_inapplicable #chrs with "-"
names(char_matrix)[apply(char_matrix, 2, function(x) any(unique(x)=="?"))] ->contains_missing #chrs with "?"
names(char_matrix)[apply(char_matrix, 2, function(x) any(grepl("/", unique(x))))]->contains_polymorph #contains polymorphic states
#############################
# Miscelaneous operations
#
############################
apply(char_matrix, 2, function(x) any(unique(x)=="-")) #if char contains symbol
contains_missing%>%length
apply(char_matrix, 2, function(x) (any(unique(x)=="-")*any(unique(x)=="?"))==T) #if char contains two symbols
#################################
# FUNCTIONS
#
#################################
#' @title Gives full report and all subobjects for charactert matrix
#' @description Takes dataframe (character matrix) with taxa as rows and ids as column names and returns a list of various objects
#' @param char_matrix Character matrix (dataframe)
#' @param coding_states_report list of chrs and states from character report
#' @return List.
#' @examples
#' parse_matrix(char_matrix, coding_states_report)
parse_matrix<-function(char_matrix, coding_states_report){
names(char_matrix)[apply(char_matrix, 2, function(x) any(unique(x)=="-"))] ->contains_inapplicable #chrs with "-"
names(char_matrix)[apply(char_matrix, 2, function(x) any(unique(x)=="?"))] ->contains_missing #chrs with "?"
names(char_matrix)[apply(char_matrix, 2, function(x) any(grepl("/", unique(x))))]->contains_polymorph #contains polymorphic states
coding_states_mt(char_matrix)->coding_states_matrix
same_patterns(char_matrix)->same_chrs_patterns
unused_states(coding_states_matrix, coding_states_report)->unused_chr_states
taxa_missing_states(char_matrix)->taxa_missing_chrs_states
matrix=list(
contains_inapplicable=contains_inapplicable,
contains_missing=contains_missing,
contains_polymorph=contains_polymorph,
unused_chr_states=unused_chr_states,
coding_states_matrix=coding_states_matrix,
same_chrs_patterns=same_chrs_patterns,
taxa_missing_states=taxa_missing_chrs_states
)
return(matrix)
}
cbind(paste0("CHAR:", c(1:392)),(paste0("CHAR:", c(1:392)) %in% mt_data$contains_inapplicable)) %>% write.csv(., file="contains_inaplic.csv")
getwd()
#' @title Check if enumertion of states in matrix is sequential e.g. 0, 1, 2, ... per each character
#' @description Returns chars that are not sequential
#' Each character is assigned its own ID CHAR:XXXX
#' @param char_matrix Character matrix (dataframe)
#' @return characters.
#' @examples
#' not_seq=enumeration_not_seq(char_matrix)
#' lapply(not_seq, function(x) levels(char_matrix[[x]])) %>% setNames(not_seq) # get char info for not sequential characters
enumeration_not_seq<-function(char_matrix){
chars_only_numbers<-apply(char_matrix, 2, function(x) { unique(x)[!grepl("\\D", unique(x))==T ] %>% as.numeric }) # retrive chars encoded with integers (excl. - and ?)
diff=lapply(chars_only_numbers, function(x) identical(x[order(x)], as.numeric(c(0:max(x)))) )%>%unlist #chaeck if enumeration in matrix is sequential
return(names(which(diff==F))) #which chars are not sequential
}
#' @title Unused char states; function compares which states are different between report and character matrix
#' @description Returns a list unused_states with two components unused_matrix (states present in report but absent in matrix)
#' and unused_chrs_report (states present in matrix but absent in report)
#' Each character is assigned its own ID CHAR:XXXX
#' @param coding_states_matrix list of states and chars from matrix
#' @param coding_states_report list of state and chars from report
#' @return The list.
#' @examples
#' unused_states(coding_states_matrix, coding_states_report)->unused_chr_states
unused_states<-function(coding_states_matrix, coding_states_report){
names_mt=lapply(coding_states_matrix, names)
names_report=lapply(coding_states_report, names)
unused_states<-list(unused_matrix=list(), unused_chrs_report=list())
for (i in seq_along(names_mt)){
x=setdiff(names_mt[[i]], names_report[[i]])
y=setdiff(names_report[[i]], names_mt[[i]])
if (length(x)>0) unused_states$unused_chrs_report[[names(names_mt[i])]]=x
#unused_states$unused_chrs_report[[names(names_mt[i])]]=setdiff(names_mt[[i]], names_report[[i]])
if (length(y)>0) unused_states$unused_matrix[[names(names_mt[i])]]=y
#unused_states$unused_matrix[[names(names_mt[i])]]=setdiff(names_report[[i]], names_mt[[i]])
}
return(unused_states)
}
#' @title List of ids: states and chars (including coding symbols) from matrix
#' @description Make list of states and their coding from matrix
#' @param char_matrix character matrix
#' @return The list.
#' @examples
#' coding_states_mt(char_matrix)->coding_states_matrix
coding_states_mt<-function(char_matrix){
chars_only_numbers<-apply(char_matrix, 2, function(x) { unique(x)[!grepl("\\D", unique(x))==T ] %>% as.numeric }) # retrive chars encoded with integers (excl. - and ?)
chars_only_numbers=lapply(chars_only_numbers, function(x) x[order(x)])
for (x in seq_along(chars_only_numbers)){
paste0("STATE:", chars_only_numbers[[x]]+1)->names(chars_only_numbers[[x]])
}
return(chars_only_numbers)
}
#' @title Characters with the same patterns are returned
#' @description Same chrs pattern
#' @param char_matrix character matrix
#' @return The list.
#' @examples
#' same_patterns(char_matrix)->same_chrs_patterns
##########
str(dt_rates)
char_matrix<-dt_rates
unlist(same_patterns(dt_rates))
unique(pattern_str)
same_patterns<-function(char_matrix){
char_matrix_nofac=apply(char_matrix, 2, function(x) as.character(x))
recode_mt=c()
for (i in 1:ncol(char_matrix_nofac)){
recode_mt=cbind(recode_mt, mapvalues(char_matrix_nofac[,i], from = unique(char_matrix_nofac[,i]), to = c(1:length(unique(char_matrix_nofac[,i])))))
}
pattern_str=apply(recode_mt, 2, function(x) paste(x, collapse=""))
pattern_str[duplicated(pattern_str)] %>% unique ->pattern_uniq
return(lapply(c(1:length(pattern_uniq)), function(x) names(char_matrix[,which(pattern_str==pattern_uniq[x])])))
}
#' @title Number of missing characters per each taxon
#' @description Number of missing characters per each taxon
#' @param char_matrix character matrix
#' @return Vector.
#' @examples
#' taxa_missing_states(char_matrix)->taxa_missing_chrs_states
#'
taxa_missing_states<-function(char_matrix){
miss=apply(char_matrix, 1, function(x) {which(x=="?") %>% length})
return(miss[order(miss, decreasing=T)])
}
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(agegrps,ind, DUPERSID, PERWT.yy.F, VARSTR, VARPSU)
# Load event files
RX <- read_sas('C:/MEPS/.RX..sas7bdat')
DVT <- read_sas('C:/MEPS/.DV..sas7bdat')
IPT <- read_sas('C:/MEPS/.IP..sas7bdat')
ERT <- read_sas('C:/MEPS/.ER..sas7bdat')
OPT <- read_sas('C:/MEPS/.OP..sas7bdat')
OBV <- read_sas('C:/MEPS/.OB..sas7bdat')
HHT <- read_sas('C:/MEPS/.HH..sas7bdat')
# Define sub-levels for office-based and outpatient
# To compute estimates for these sub-events, replace 'event' with 'event_v2X'
# in the 'svyby' statement below, when applicable
OBV <- OBV %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OBD', '2' = 'OBO'))
OPT <- OPT %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OPY', '2' = 'OPZ'))
# Stack events
stacked_events <- stack_events(RX, DVT, IPT, ERT, OPT, OBV, HHT,
keep.vars = c('SEEDOC','event_v2X'))
stacked_events <- stacked_events %>%
mutate(event = data,
PR.yy.X = PV.yy.X + TR.yy.X,
OZ.yy.X = OF.yy.X + SL.yy.X + OT.yy.X + OR.yy.X + OU.yy.X + WC.yy.X + VA.yy.X) %>%
select(DUPERSID, event, event_v2X, SEEDOC,
XP.yy.X, SF.yy.X, MR.yy.X, MD.yy.X, PR.yy.X, OZ.yy.X)
EVENTS <- stacked_events %>% full_join(FYCsub, by='DUPERSID')
EVNTdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = EVENTS,
nest = TRUE)
results <- svyby(~XP.yy.X, FUN=svymean, by = ~agegrps + event, design = subset(EVNTdsgn, XP.yy.X >= 0))
print(results)
|
/mepstrends/hc_use/json/code/r/meanEVT__event__agegrps__.r
|
permissive
|
HHS-AHRQ/MEPS-summary-tables
|
R
| false
| false
| 3,121
|
r
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(agegrps,ind, DUPERSID, PERWT.yy.F, VARSTR, VARPSU)
# Load event files
RX <- read_sas('C:/MEPS/.RX..sas7bdat')
DVT <- read_sas('C:/MEPS/.DV..sas7bdat')
IPT <- read_sas('C:/MEPS/.IP..sas7bdat')
ERT <- read_sas('C:/MEPS/.ER..sas7bdat')
OPT <- read_sas('C:/MEPS/.OP..sas7bdat')
OBV <- read_sas('C:/MEPS/.OB..sas7bdat')
HHT <- read_sas('C:/MEPS/.HH..sas7bdat')
# Define sub-levels for office-based and outpatient
# To compute estimates for these sub-events, replace 'event' with 'event_v2X'
# in the 'svyby' statement below, when applicable
OBV <- OBV %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OBD', '2' = 'OBO'))
OPT <- OPT %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OPY', '2' = 'OPZ'))
# Stack events
stacked_events <- stack_events(RX, DVT, IPT, ERT, OPT, OBV, HHT,
keep.vars = c('SEEDOC','event_v2X'))
stacked_events <- stacked_events %>%
mutate(event = data,
PR.yy.X = PV.yy.X + TR.yy.X,
OZ.yy.X = OF.yy.X + SL.yy.X + OT.yy.X + OR.yy.X + OU.yy.X + WC.yy.X + VA.yy.X) %>%
select(DUPERSID, event, event_v2X, SEEDOC,
XP.yy.X, SF.yy.X, MR.yy.X, MD.yy.X, PR.yy.X, OZ.yy.X)
EVENTS <- stacked_events %>% full_join(FYCsub, by='DUPERSID')
EVNTdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = EVENTS,
nest = TRUE)
results <- svyby(~XP.yy.X, FUN=svymean, by = ~agegrps + event, design = subset(EVNTdsgn, XP.yy.X >= 0))
print(results)
|
\name{wormParse}
\alias{wormParse}
\title{
Parse chromosome, bp matched and allignment lenght from wormBLAST output
}
\description{
Parses relevant information from returned by wormDownload. Returns some of
the information in a named list. The names of each entry is the name of the
corresponding chromosome. At this stage not all available information in the
blast result is parsed.
}
\usage{
wormParse(BLASTresult)
}
\arguments{
\item{BLASTresult}{HTML data from downloadForm, containing output
from wormblast form.}
}
\author{
Author: Gerben Gaastra
Maintainer: Gerben Gaastra <gaastra@gtga.nl>
}
\references{
\url{http://www.wormbase.org/db/searches/blast_blat},
\url{www.rug.nl/gbic}
}
\examples{
## List of fields and values
query <-"tcgtttattatttgtcaccgggttccatcccccttacgtttgacaatcattgcactcact"
eValue <- "1E-16"
db <- "elegans_genome"
handle <- getCurlHandle()
## Post form and retrieve result
BLASTresult <- wormDownload(query, eValue, db, handle)
## Post form and retrieve result
list <- wormParse(BLASTresult)
}
\seealso{
\code{\link{wormGetPos}} - Get data of the first BLAST result from wormbase.org,
\code{\link{wormDownload}} - Post and download one BLAST request from wormbase.org.
}
\keyword{ package }
|
/Build-Package/BLASTnParse/man/wormParse.Rd
|
no_license
|
GerbenGaastra/BLASTnParse
|
R
| false
| false
| 1,247
|
rd
|
\name{wormParse}
\alias{wormParse}
\title{
Parse chromosome, bp matched and allignment lenght from wormBLAST output
}
\description{
Parses relevant information from returned by wormDownload. Returns some of
the information in a named list. The names of each entry is the name of the
corresponding chromosome. At this stage not all available information in the
blast result is parsed.
}
\usage{
wormParse(BLASTresult)
}
\arguments{
\item{BLASTresult}{HTML data from downloadForm, containing output
from wormblast form.}
}
\author{
Author: Gerben Gaastra
Maintainer: Gerben Gaastra <gaastra@gtga.nl>
}
\references{
\url{http://www.wormbase.org/db/searches/blast_blat},
\url{www.rug.nl/gbic}
}
\examples{
## List of fields and values
query <-"tcgtttattatttgtcaccgggttccatcccccttacgtttgacaatcattgcactcact"
eValue <- "1E-16"
db <- "elegans_genome"
handle <- getCurlHandle()
## Post form and retrieve result
BLASTresult <- wormDownload(query, eValue, db, handle)
## Post form and retrieve result
list <- wormParse(BLASTresult)
}
\seealso{
\code{\link{wormGetPos}} - Get data of the first BLAST result from wormbase.org,
\code{\link{wormDownload}} - Post and download one BLAST request from wormbase.org.
}
\keyword{ package }
|
# Title: Devereux Student Strengths Assessment (DESSA) - Spring 2019
# Author: Annick Eudes JB Research Manager
# Date: July 23, 2018
# Version: 1.0
# Reviewd by :
# Date :
# ---- Preliminaries ----
# Loading the packages that we might need :
# Don't forget to load these at each session....
# library(tidyverse)
# library(ggplot2)
# library(plyr)
# library(dplyr)
# ---- Step 3 (B) : Determining DESSA Percentile Scores ----
# attach(sel_data_imputed) # Use this if you are starting a new R session.
# ---- 3.1 Percentile-Scores for of each Child's Personal Responsibility ----
# This has been verified ; there are no typos!
PRPercentile <- ifelse(Personal_Responsibility <= 8, 1,
ifelse(Personal_Responsibility == 9, 2,
ifelse(Personal_Responsibility == 10, 3,
ifelse(Personal_Responsibility == 11, 4,
ifelse(Personal_Responsibility == 12, 4,
ifelse(Personal_Responsibility == 13, 7,
ifelse(Personal_Responsibility == 14, 8,
ifelse(Personal_Responsibility == 15, 10,
ifelse(Personal_Responsibility == 16, 12,
ifelse(Personal_Responsibility == 17, 14,
ifelse(Personal_Responsibility == 18, 16,
ifelse(Personal_Responsibility == 19, 18,
ifelse(Personal_Responsibility == 20, 21,
ifelse(Personal_Responsibility == 21, 27,
ifelse(Personal_Responsibility == 22, 31,
ifelse(Personal_Responsibility == 23, 34,
ifelse(Personal_Responsibility == 24, 38,
ifelse(Personal_Responsibility == 25, 42,
ifelse(Personal_Responsibility == 26, 46,
ifelse(Personal_Responsibility == 27, 50,
ifelse(Personal_Responsibility == 28, 54,
ifelse(Personal_Responsibility == 29, 62,
ifelse(Personal_Responsibility == 30, 66,
ifelse(Personal_Responsibility == 31, 73,
ifelse(Personal_Responsibility == 32, 79,
ifelse(Personal_Responsibility == 33, 82,
ifelse(Personal_Responsibility == 34, 86,
ifelse(Personal_Responsibility == 35, 88,
ifelse(Personal_Responsibility == 36, 90,
ifelse(Personal_Responsibility == 37, 93,
ifelse(Personal_Responsibility == 38, 96,
ifelse(Personal_Responsibility == 39, 97,
ifelse(Personal_Responsibility == 40, 99,
ifelse(is.na(Personal_Responsibility),0, 0))))))))))))))))))))))))))))))))))
# We can now add the Percentile-Scores for Personal Responsibility onto the dataset :
sel_data_imputed$PRPercentile <- PRPercentile
# View(sel_data_impute)
# Verifications :
table_PRPercentile_PR_Child <- select(sel_data_imputed, Child_ID, Personal_Responsibility, PRPercentile)
# View(table_PRPercentile_PR_Child)
# ---- 3.2 Percentile Scores for of each Child's Optimistic Thinking ----
# This has been verified ; there are no typos!
Percentile_OT = ifelse(Optimistic_Thinking <=5, 1,
ifelse(Optimistic_Thinking == 6, 2,
ifelse(Optimistic_Thinking == 7, 3,
ifelse(Optimistic_Thinking == 8, 4,
ifelse(Optimistic_Thinking == 9, 5,
ifelse(Optimistic_Thinking == 10, 7,
ifelse(Optimistic_Thinking == 11, 10,
ifelse(Optimistic_Thinking == 12, 14,
ifelse(Optimistic_Thinking == 13, 16,
ifelse(Optimistic_Thinking == 14, 21,
ifelse(Optimistic_Thinking == 15, 24,
ifelse(Optimistic_Thinking == 16, 31,
ifelse(Optimistic_Thinking == 17, 38,
ifelse(Optimistic_Thinking == 18, 46,
ifelse(Optimistic_Thinking == 19, 54,
ifelse(Optimistic_Thinking == 20, 62,
ifelse(Optimistic_Thinking == 21, 69,
ifelse(Optimistic_Thinking == 22, 76,
ifelse(Optimistic_Thinking == 23, 84,
ifelse(Optimistic_Thinking == 24, 88,
ifelse(Optimistic_Thinking == 25, 92,
ifelse(Optimistic_Thinking == 26, 95,
ifelse(Optimistic_Thinking == 27, 97,
ifelse(Optimistic_Thinking == 28, 99,
ifelse(is.na(Personal_Responsibility),0, 0)))))))))))))))))))))))))
# We can now add the Percentile Scores for Optimistic Thinking to the dataset :
sel_data_imputed$Percentile_OT <- Percentile_OT
# Verification :
table_Percentile_OT_Child <- select(sel_data_imputed, Child_ID, Optimistic_Thinking, Percentile_OT)
# View(table_Percentile_OT_Child)
# ---- 3.3 Percentile for of each Child's Goal_Directed_Behavior ----
# This has been verified ; there are no typos!
Percentile_GDB = ifelse(Goal_Directed_Behavior <= 6, 1,
ifelse(Goal_Directed_Behavior == 7,2,
ifelse(Goal_Directed_Behavior == 8, 2,
ifelse(Goal_Directed_Behavior == 9,3,
ifelse(Goal_Directed_Behavior == 10,4,
ifelse(Goal_Directed_Behavior == 11, 4,
ifelse(Goal_Directed_Behavior == 12, 5,
ifelse(Goal_Directed_Behavior == 13, 7,
ifelse(Goal_Directed_Behavior == 14, 8,
ifelse(Goal_Directed_Behavior == 15, 10,
ifelse(Goal_Directed_Behavior == 16, 12,
ifelse(Goal_Directed_Behavior == 17, 16,
ifelse(Goal_Directed_Behavior == 18, 18,
ifelse(Goal_Directed_Behavior == 19, 21,
ifelse(Goal_Directed_Behavior == 20, 24,
ifelse(Goal_Directed_Behavior == 21, 27,
ifelse(Goal_Directed_Behavior == 22, 31,
ifelse(Goal_Directed_Behavior == 23, 34,
ifelse(Goal_Directed_Behavior == 24,38,
ifelse(Goal_Directed_Behavior == 25,42,
ifelse(Goal_Directed_Behavior == 26,46,
ifelse(Goal_Directed_Behavior == 27,54,
ifelse(Goal_Directed_Behavior == 28,58,
ifelse(Goal_Directed_Behavior == 29,62,
ifelse(Goal_Directed_Behavior == 30,69,
ifelse(Goal_Directed_Behavior == 31,76,
ifelse(Goal_Directed_Behavior == 32, 79,
ifelse(Goal_Directed_Behavior == 33,82,
ifelse(Goal_Directed_Behavior == 34, 86,
ifelse(Goal_Directed_Behavior == 35, 88,
ifelse(Goal_Directed_Behavior == 36, 92,
ifelse(Goal_Directed_Behavior == 37, 93,
ifelse(Goal_Directed_Behavior == 38, 96,
ifelse(Goal_Directed_Behavior == 39, 97,
ifelse(Goal_Directed_Behavior == 40, 99,
ifelse(is.na(Goal_Directed_Behavior),0, 0))))))))))))))))))))))))))))))))))))
# We can now add the Percentile Scores for Goal Directed Behavior to the dataset :
sel_data_imputed$Percentile_GDB <- Percentile_GDB
# Verification :
table_Percentile_GDB_Child <- select(sel_data_imputed, Child_ID, Goal_Directed_Behavior, Percentile_GDB)
# View(table_Percentile_GDB_Child)
# ---- 3.4 Percentile for of each Child's Social Awareness ----
# This has been verified ; there are no typos!
Percentile_SocAw = ifelse(Social_Awareness <= 6,1,
ifelse(Social_Awareness == 7,2,
ifelse(Social_Awareness == 8,2,
ifelse(Social_Awareness == 9,3,
ifelse(Social_Awareness == 10,4,
ifelse(Social_Awareness == 11,4,
ifelse(Social_Awareness == 12,5,
ifelse(Social_Awareness == 13,7,
ifelse(Social_Awareness == 14,10,
ifelse(Social_Awareness == 15,12,
ifelse(Social_Awareness == 16,14,
ifelse(Social_Awareness == 17, 16,
ifelse(Social_Awareness == 18, 18,
ifelse(Social_Awareness == 19, 24,
ifelse(Social_Awareness == 20, 27,
ifelse(Social_Awareness == 21, 31,
ifelse(Social_Awareness == 22, 38,
ifelse(Social_Awareness == 23, 46,
ifelse(Social_Awareness == 24,54,
ifelse(Social_Awareness == 25,58,
ifelse(Social_Awareness == 26, 66,
ifelse(Social_Awareness == 27, 73,
ifelse(Social_Awareness == 28, 79,
ifelse(Social_Awareness == 29, 84,
ifelse(Social_Awareness == 30, 88,
ifelse(Social_Awareness == 31, 92,
ifelse(Social_Awareness == 32, 95,
ifelse(Social_Awareness == 33, 96,
ifelse(Social_Awareness == 34, 97,
ifelse(Social_Awareness == 35, 98,
ifelse(Social_Awareness == 36, 99,
ifelse(is.na(Social_Awareness),0, 0))))))))))))))))))))))))))))))))
# We can now add the Percentile Scores for Social Awareness to the dataset :
sel_data_imputed$Percentile_SocAw <- Percentile_SocAw
# View(sel_data_imputed)
# Verification :
Social_Awareness_Percentile <- select(sel_data_imputed, Child_ID, Social_Awareness, Percentile_SocAw)
# View(Social_Awareness_Percentile)
# ---- 3.5 Percentile for of each Child's Decision Making ----
# This has been verified ; there are no typos!
Percentile_DM = ifelse(Decision_Making <= 6, 1,
ifelse(Decision_Making == 7, 2,
ifelse(Decision_Making == 8, 3,
ifelse(Decision_Making == 9, 4,
ifelse(Decision_Making == 10, 5,
ifelse(Decision_Making == 11, 7,
ifelse(Decision_Making == 12, 10,
ifelse(Decision_Making == 13, 12,
ifelse(Decision_Making == 14, 14,
ifelse(Decision_Making == 15, 18,
ifelse(Decision_Making == 16, 21,
ifelse(Decision_Making == 17, 24,
ifelse(Decision_Making == 18, 31,
ifelse(Decision_Making == 19, 34,
ifelse(Decision_Making == 20, 38,
ifelse(Decision_Making == 21, 42,
ifelse(Decision_Making == 22,50,
ifelse(Decision_Making == 23, 58,
ifelse(Decision_Making == 24, 66,
ifelse(Decision_Making == 25, 76,
ifelse(Decision_Making == 26, 82,
ifelse(Decision_Making == 27, 86,
ifelse(Decision_Making == 28, 90,
ifelse(Decision_Making == 29,93,
ifelse(Decision_Making == 30, 96,
ifelse(Decision_Making == 31, 97,
ifelse(Decision_Making == 32,99,
ifelse(is.na(Decision_Making),0, 0))))))))))))))))))))))))))))
# We can now add the Percentiles for Decision Making to the dataset :
sel_data_imputed$Percentile_DM <- Percentile_DM
# View(sel_data_imputed)
# Verification :
Decision_Making_Percentile <- select(sel_data_imputed, Child_ID, Decision_Making, Percentile_DM)
# View(Decision_Making_Percentile)
# ---- 3.6 Percentile for of each Child's Relationship Skills ----
# This has been verified ; there are no typos!
Percentile_RS = ifelse(Relationship_Skills <= 7, 1,
ifelse(Relationship_Skills == 8,2,
ifelse(Relationship_Skills == 9, 2,
ifelse(Relationship_Skills == 10, 3,
ifelse(Relationship_Skills == 11, 4,
ifelse(Relationship_Skills == 12, 4,
ifelse(Relationship_Skills == 13, 5,
ifelse(Relationship_Skills == 14, 7,
ifelse(Relationship_Skills == 15, 8,
ifelse(Relationship_Skills == 16, 10,
ifelse(Relationship_Skills == 17, 12,
ifelse(Relationship_Skills == 18, 14,
ifelse(Relationship_Skills == 19, 16,
ifelse(Relationship_Skills == 20, 21,
ifelse(Relationship_Skills == 21, 24,
ifelse(Relationship_Skills == 22, 27,
ifelse(Relationship_Skills == 23, 31,
ifelse(Relationship_Skills == 24, 34,
ifelse(Relationship_Skills == 25, 38,
ifelse(Relationship_Skills == 26, 42,
ifelse(Relationship_Skills == 27,50,
ifelse(Relationship_Skills == 28,58,
ifelse(Relationship_Skills == 29, 62,
ifelse(Relationship_Skills == 30, 66,
ifelse(Relationship_Skills == 31, 73,
ifelse(Relationship_Skills == 32, 79,
ifelse(Relationship_Skills == 33, 82,
ifelse(Relationship_Skills == 34, 84,
ifelse(Relationship_Skills == 35, 88,
ifelse(Relationship_Skills == 36, 90,
ifelse(Relationship_Skills == 37, 92,
ifelse(Relationship_Skills == 38, 95,
ifelse(Relationship_Skills == 39, 97,
ifelse(Relationship_Skills == 40, 99,
ifelse(is.na(Relationship_Skills),0,0)))))))))))))))))))))))))))))))))))
# We can now add the Percentile for Relationship Skills to the dataset :
sel_data_imputed$Percentile_RS <- Percentile_RS
# View(sel_data_imputed)
# Verification :
Relationship_Skills_Percentile <- select(sel_data_imputed, Child_ID, Relationship_Skills, Percentile_RS)
# View(Relationship_Skills_Percentile)
# ---- 3.7 Percentile for of each Child's Self Awareness ----
# This has been verified ; there are no typos!
Percentile_Self_Aw = ifelse(Self_Awareness <= 3, 1,
ifelse(Self_Awareness == 4, 2,
ifelse(Self_Awareness == 5, 2,
ifelse(Self_Awareness == 6,3,
ifelse(Self_Awareness == 7, 4,
ifelse(Self_Awareness == 8, 5,
ifelse(Self_Awareness == 9, 7,
ifelse(Self_Awareness == 10, 8,
ifelse(Self_Awareness == 11, 12,
ifelse(Self_Awareness == 12, 16,
ifelse(Self_Awareness == 13, 18,
ifelse(Self_Awareness == 14, 21,
ifelse(Self_Awareness == 15, 27,
ifelse(Self_Awareness == 16, 34,
ifelse(Self_Awareness == 17, 42,
ifelse(Self_Awareness == 18,50,
ifelse(Self_Awareness == 19,58,
ifelse(Self_Awareness == 20,69,
ifelse(Self_Awareness == 21,76,
ifelse(Self_Awareness == 22,82,
ifelse(Self_Awareness == 23,86,
ifelse(Self_Awareness == 24, 90,
ifelse(Self_Awareness == 25, 93,
ifelse(Self_Awareness == 26, 96,
ifelse(Self_Awareness == 27, 98,
ifelse(Self_Awareness == 28,99,
ifelse(is.na(Self_Awareness),0,0)))))))))))))))))))))))))))
# We can now add the Percentile for Self Awareness to the dataset :
sel_data_imputed$Percentile_Self_Aw <- Percentile_Self_Aw
# View(sel_data_imputed)
# Verification : by viewing the raw scores and the percentiles side by side
Self_Awareness_Percentile <- select(sel_data_imputed, Child_ID, Self_Awareness, Percentile_Self_Aw)
# View(Self_Awareness_Percentile)
# ---- 3.8 Percentile for of each Child's Self_Management ----
# This has been verified ; there are no typos!
Percentile_Self_M = ifelse(Self_Management <= 8, 1,
ifelse(Self_Management == 9, 2,
ifelse(Self_Management == 10, 2,
ifelse(Self_Management == 11, 3,
ifelse(Self_Management == 12, 4,
ifelse(Self_Management == 13, 4,
ifelse(Self_Management == 14, 5,
ifelse(Self_Management == 15, 7,
ifelse(Self_Management == 16, 8,
ifelse(Self_Management == 17, 10,
ifelse(Self_Management == 18, 12,
ifelse(Self_Management == 19, 14,
ifelse(Self_Management == 20, 16,
ifelse(Self_Management == 21, 18,
ifelse(Self_Management == 22, 21,
ifelse(Self_Management == 23, 24,
ifelse(Self_Management == 24, 27,
ifelse(Self_Management == 25, 31,
ifelse(Self_Management == 26, 34,
ifelse(Self_Management == 27, 42,
ifelse(Self_Management == 28, 46,
ifelse(Self_Management == 29, 50,
ifelse(Self_Management == 30, 58,
ifelse(Self_Management == 31, 62,
ifelse(Self_Management == 32, 66,
ifelse(Self_Management == 33, 73,
ifelse(Self_Management == 34, 79,
ifelse(Self_Management == 35, 82,
ifelse(Self_Management == 36, 84,
ifelse(Self_Management == 37, 86,
ifelse(Self_Management == 38, 90,
ifelse(Self_Management == 39, 92,
ifelse(Self_Management == 40, 95,
ifelse(Self_Management == 41, 96,
ifelse(Self_Management == 42, 97,
ifelse(Self_Management == 43, 98,
ifelse(Self_Management == 44, 99,
ifelse(is.na(Self_Awareness),0,0))))))))))))))))))))))))))))))))))))))
# We can now add the Percentile Score for Self_Management to the dataset :
sel_data_imputed$Percentile_Self_M <- Percentile_Self_M
# View(sel_data_imputed)
# Verification :
# Remember that the selection function uses the columns of the dataset and not the names of the R objects!
Self_Management_Percentile <- select(sel_data_imputed, Child_ID, Self_Management, Percentile_Self_M)
# View(Self_Management_Percentile)
# ---- Percentiles Score Results summary ----
# Exporting the results of the Percentiles in a excel spreadsheet
# Creating a dataframe with all the Percentiles of the 8 dimentions and exporting it to excel spreadsheet :
all_Percentile_Scores_table <- select(sel_data_imputed, Child_ID, PRPercentile, Percentile_OT, Percentile_GDB, Percentile_SocAw, Percentile_DM, Percentile_RS, Percentile_Self_Aw, Percentile_Self_M)
# View(all_Percentile_Scores_table)
# We can now export the results into a excel file :
openxlsx::write.xlsx(all_Percentile_Scores_table, file = "P:/RE/Private/SEL Assessment Results/DESSA Scoring/results/Post_Test/Percentile_Post_Test_Scores.xlsx")
file.exists("P:/RE/Private/SEL Assessment Results/DESSA Scoring/results/Post_Test/Percentile_Post_Test_Scores.xlsx")
|
/Post_test/05_DESSA_post_test_Percentiles.R
|
no_license
|
aejb22122/Youth-Social-and-emotional-learning
|
R
| false
| false
| 47,395
|
r
|
# Title: Devereux Student Strengths Assessment (DESSA) - Spring 2019
# Author: Annick Eudes JB Research Manager
# Date: July 23, 2018
# Version: 1.0
# Reviewd by :
# Date :
# ---- Preliminaries ----
# Loading the packages that we might need :
# Don't forget to load these at each session....
# library(tidyverse)
# library(ggplot2)
# library(plyr)
# library(dplyr)
# ---- Step 3 (B) : Determining DESSA Percentile Scores ----
# attach(sel_data_imputed) # Use this if you are starting a new R session.
# ---- 3.1 Percentile-Scores for of each Child's Personal Responsibility ----
# This has been verified ; there are no typos!
PRPercentile <- ifelse(Personal_Responsibility <= 8, 1,
ifelse(Personal_Responsibility == 9, 2,
ifelse(Personal_Responsibility == 10, 3,
ifelse(Personal_Responsibility == 11, 4,
ifelse(Personal_Responsibility == 12, 4,
ifelse(Personal_Responsibility == 13, 7,
ifelse(Personal_Responsibility == 14, 8,
ifelse(Personal_Responsibility == 15, 10,
ifelse(Personal_Responsibility == 16, 12,
ifelse(Personal_Responsibility == 17, 14,
ifelse(Personal_Responsibility == 18, 16,
ifelse(Personal_Responsibility == 19, 18,
ifelse(Personal_Responsibility == 20, 21,
ifelse(Personal_Responsibility == 21, 27,
ifelse(Personal_Responsibility == 22, 31,
ifelse(Personal_Responsibility == 23, 34,
ifelse(Personal_Responsibility == 24, 38,
ifelse(Personal_Responsibility == 25, 42,
ifelse(Personal_Responsibility == 26, 46,
ifelse(Personal_Responsibility == 27, 50,
ifelse(Personal_Responsibility == 28, 54,
ifelse(Personal_Responsibility == 29, 62,
ifelse(Personal_Responsibility == 30, 66,
ifelse(Personal_Responsibility == 31, 73,
ifelse(Personal_Responsibility == 32, 79,
ifelse(Personal_Responsibility == 33, 82,
ifelse(Personal_Responsibility == 34, 86,
ifelse(Personal_Responsibility == 35, 88,
ifelse(Personal_Responsibility == 36, 90,
ifelse(Personal_Responsibility == 37, 93,
ifelse(Personal_Responsibility == 38, 96,
ifelse(Personal_Responsibility == 39, 97,
ifelse(Personal_Responsibility == 40, 99,
ifelse(is.na(Personal_Responsibility),0, 0))))))))))))))))))))))))))))))))))
# We can now add the Percentile-Scores for Personal Responsibility onto the dataset :
sel_data_imputed$PRPercentile <- PRPercentile
# View(sel_data_impute)
# Verifications :
table_PRPercentile_PR_Child <- select(sel_data_imputed, Child_ID, Personal_Responsibility, PRPercentile)
# View(table_PRPercentile_PR_Child)
# ---- 3.2 Percentile Scores for of each Child's Optimistic Thinking ----
# This has been verified ; there are no typos!
Percentile_OT = ifelse(Optimistic_Thinking <=5, 1,
ifelse(Optimistic_Thinking == 6, 2,
ifelse(Optimistic_Thinking == 7, 3,
ifelse(Optimistic_Thinking == 8, 4,
ifelse(Optimistic_Thinking == 9, 5,
ifelse(Optimistic_Thinking == 10, 7,
ifelse(Optimistic_Thinking == 11, 10,
ifelse(Optimistic_Thinking == 12, 14,
ifelse(Optimistic_Thinking == 13, 16,
ifelse(Optimistic_Thinking == 14, 21,
ifelse(Optimistic_Thinking == 15, 24,
ifelse(Optimistic_Thinking == 16, 31,
ifelse(Optimistic_Thinking == 17, 38,
ifelse(Optimistic_Thinking == 18, 46,
ifelse(Optimistic_Thinking == 19, 54,
ifelse(Optimistic_Thinking == 20, 62,
ifelse(Optimistic_Thinking == 21, 69,
ifelse(Optimistic_Thinking == 22, 76,
ifelse(Optimistic_Thinking == 23, 84,
ifelse(Optimistic_Thinking == 24, 88,
ifelse(Optimistic_Thinking == 25, 92,
ifelse(Optimistic_Thinking == 26, 95,
ifelse(Optimistic_Thinking == 27, 97,
ifelse(Optimistic_Thinking == 28, 99,
ifelse(is.na(Personal_Responsibility),0, 0)))))))))))))))))))))))))
# We can now add the Percentile Scores for Optimistic Thinking to the dataset :
sel_data_imputed$Percentile_OT <- Percentile_OT
# Verification :
table_Percentile_OT_Child <- select(sel_data_imputed, Child_ID, Optimistic_Thinking, Percentile_OT)
# View(table_Percentile_OT_Child)
# ---- 3.3 Percentile for of each Child's Goal_Directed_Behavior ----
# This has been verified ; there are no typos!
Percentile_GDB = ifelse(Goal_Directed_Behavior <= 6, 1,
ifelse(Goal_Directed_Behavior == 7,2,
ifelse(Goal_Directed_Behavior == 8, 2,
ifelse(Goal_Directed_Behavior == 9,3,
ifelse(Goal_Directed_Behavior == 10,4,
ifelse(Goal_Directed_Behavior == 11, 4,
ifelse(Goal_Directed_Behavior == 12, 5,
ifelse(Goal_Directed_Behavior == 13, 7,
ifelse(Goal_Directed_Behavior == 14, 8,
ifelse(Goal_Directed_Behavior == 15, 10,
ifelse(Goal_Directed_Behavior == 16, 12,
ifelse(Goal_Directed_Behavior == 17, 16,
ifelse(Goal_Directed_Behavior == 18, 18,
ifelse(Goal_Directed_Behavior == 19, 21,
ifelse(Goal_Directed_Behavior == 20, 24,
ifelse(Goal_Directed_Behavior == 21, 27,
ifelse(Goal_Directed_Behavior == 22, 31,
ifelse(Goal_Directed_Behavior == 23, 34,
ifelse(Goal_Directed_Behavior == 24,38,
ifelse(Goal_Directed_Behavior == 25,42,
ifelse(Goal_Directed_Behavior == 26,46,
ifelse(Goal_Directed_Behavior == 27,54,
ifelse(Goal_Directed_Behavior == 28,58,
ifelse(Goal_Directed_Behavior == 29,62,
ifelse(Goal_Directed_Behavior == 30,69,
ifelse(Goal_Directed_Behavior == 31,76,
ifelse(Goal_Directed_Behavior == 32, 79,
ifelse(Goal_Directed_Behavior == 33,82,
ifelse(Goal_Directed_Behavior == 34, 86,
ifelse(Goal_Directed_Behavior == 35, 88,
ifelse(Goal_Directed_Behavior == 36, 92,
ifelse(Goal_Directed_Behavior == 37, 93,
ifelse(Goal_Directed_Behavior == 38, 96,
ifelse(Goal_Directed_Behavior == 39, 97,
ifelse(Goal_Directed_Behavior == 40, 99,
ifelse(is.na(Goal_Directed_Behavior),0, 0))))))))))))))))))))))))))))))))))))
# We can now add the Percentile Scores for Goal Directed Behavior to the dataset :
sel_data_imputed$Percentile_GDB <- Percentile_GDB
# Verification :
table_Percentile_GDB_Child <- select(sel_data_imputed, Child_ID, Goal_Directed_Behavior, Percentile_GDB)
# View(table_Percentile_GDB_Child)
# ---- 3.4 Percentile for of each Child's Social Awareness ----
# This has been verified ; there are no typos!
Percentile_SocAw = ifelse(Social_Awareness <= 6,1,
ifelse(Social_Awareness == 7,2,
ifelse(Social_Awareness == 8,2,
ifelse(Social_Awareness == 9,3,
ifelse(Social_Awareness == 10,4,
ifelse(Social_Awareness == 11,4,
ifelse(Social_Awareness == 12,5,
ifelse(Social_Awareness == 13,7,
ifelse(Social_Awareness == 14,10,
ifelse(Social_Awareness == 15,12,
ifelse(Social_Awareness == 16,14,
ifelse(Social_Awareness == 17, 16,
ifelse(Social_Awareness == 18, 18,
ifelse(Social_Awareness == 19, 24,
ifelse(Social_Awareness == 20, 27,
ifelse(Social_Awareness == 21, 31,
ifelse(Social_Awareness == 22, 38,
ifelse(Social_Awareness == 23, 46,
ifelse(Social_Awareness == 24,54,
ifelse(Social_Awareness == 25,58,
ifelse(Social_Awareness == 26, 66,
ifelse(Social_Awareness == 27, 73,
ifelse(Social_Awareness == 28, 79,
ifelse(Social_Awareness == 29, 84,
ifelse(Social_Awareness == 30, 88,
ifelse(Social_Awareness == 31, 92,
ifelse(Social_Awareness == 32, 95,
ifelse(Social_Awareness == 33, 96,
ifelse(Social_Awareness == 34, 97,
ifelse(Social_Awareness == 35, 98,
ifelse(Social_Awareness == 36, 99,
ifelse(is.na(Social_Awareness),0, 0))))))))))))))))))))))))))))))))
# We can now add the Percentile Scores for Social Awareness to the dataset :
sel_data_imputed$Percentile_SocAw <- Percentile_SocAw
# View(sel_data_imputed)
# Verification :
Social_Awareness_Percentile <- select(sel_data_imputed, Child_ID, Social_Awareness, Percentile_SocAw)
# View(Social_Awareness_Percentile)
# ---- 3.5 Percentile for of each Child's Decision Making ----
# This has been verified ; there are no typos!
Percentile_DM = ifelse(Decision_Making <= 6, 1,
ifelse(Decision_Making == 7, 2,
ifelse(Decision_Making == 8, 3,
ifelse(Decision_Making == 9, 4,
ifelse(Decision_Making == 10, 5,
ifelse(Decision_Making == 11, 7,
ifelse(Decision_Making == 12, 10,
ifelse(Decision_Making == 13, 12,
ifelse(Decision_Making == 14, 14,
ifelse(Decision_Making == 15, 18,
ifelse(Decision_Making == 16, 21,
ifelse(Decision_Making == 17, 24,
ifelse(Decision_Making == 18, 31,
ifelse(Decision_Making == 19, 34,
ifelse(Decision_Making == 20, 38,
ifelse(Decision_Making == 21, 42,
ifelse(Decision_Making == 22,50,
ifelse(Decision_Making == 23, 58,
ifelse(Decision_Making == 24, 66,
ifelse(Decision_Making == 25, 76,
ifelse(Decision_Making == 26, 82,
ifelse(Decision_Making == 27, 86,
ifelse(Decision_Making == 28, 90,
ifelse(Decision_Making == 29,93,
ifelse(Decision_Making == 30, 96,
ifelse(Decision_Making == 31, 97,
ifelse(Decision_Making == 32,99,
ifelse(is.na(Decision_Making),0, 0))))))))))))))))))))))))))))
# We can now add the Percentiles for Decision Making to the dataset :
sel_data_imputed$Percentile_DM <- Percentile_DM
# View(sel_data_imputed)
# Verification :
Decision_Making_Percentile <- select(sel_data_imputed, Child_ID, Decision_Making, Percentile_DM)
# View(Decision_Making_Percentile)
# ---- 3.6 Percentile for of each Child's Relationship Skills ----
# This has been verified ; there are no typos!
Percentile_RS = ifelse(Relationship_Skills <= 7, 1,
ifelse(Relationship_Skills == 8,2,
ifelse(Relationship_Skills == 9, 2,
ifelse(Relationship_Skills == 10, 3,
ifelse(Relationship_Skills == 11, 4,
ifelse(Relationship_Skills == 12, 4,
ifelse(Relationship_Skills == 13, 5,
ifelse(Relationship_Skills == 14, 7,
ifelse(Relationship_Skills == 15, 8,
ifelse(Relationship_Skills == 16, 10,
ifelse(Relationship_Skills == 17, 12,
ifelse(Relationship_Skills == 18, 14,
ifelse(Relationship_Skills == 19, 16,
ifelse(Relationship_Skills == 20, 21,
ifelse(Relationship_Skills == 21, 24,
ifelse(Relationship_Skills == 22, 27,
ifelse(Relationship_Skills == 23, 31,
ifelse(Relationship_Skills == 24, 34,
ifelse(Relationship_Skills == 25, 38,
ifelse(Relationship_Skills == 26, 42,
ifelse(Relationship_Skills == 27,50,
ifelse(Relationship_Skills == 28,58,
ifelse(Relationship_Skills == 29, 62,
ifelse(Relationship_Skills == 30, 66,
ifelse(Relationship_Skills == 31, 73,
ifelse(Relationship_Skills == 32, 79,
ifelse(Relationship_Skills == 33, 82,
ifelse(Relationship_Skills == 34, 84,
ifelse(Relationship_Skills == 35, 88,
ifelse(Relationship_Skills == 36, 90,
ifelse(Relationship_Skills == 37, 92,
ifelse(Relationship_Skills == 38, 95,
ifelse(Relationship_Skills == 39, 97,
ifelse(Relationship_Skills == 40, 99,
ifelse(is.na(Relationship_Skills),0,0)))))))))))))))))))))))))))))))))))
# We can now add the Percentile for Relationship Skills to the dataset :
sel_data_imputed$Percentile_RS <- Percentile_RS
# View(sel_data_imputed)
# Verification :
Relationship_Skills_Percentile <- select(sel_data_imputed, Child_ID, Relationship_Skills, Percentile_RS)
# View(Relationship_Skills_Percentile)
# ---- 3.7 Percentile for of each Child's Self Awareness ----
# This has been verified ; there are no typos!
Percentile_Self_Aw = ifelse(Self_Awareness <= 3, 1,
ifelse(Self_Awareness == 4, 2,
ifelse(Self_Awareness == 5, 2,
ifelse(Self_Awareness == 6,3,
ifelse(Self_Awareness == 7, 4,
ifelse(Self_Awareness == 8, 5,
ifelse(Self_Awareness == 9, 7,
ifelse(Self_Awareness == 10, 8,
ifelse(Self_Awareness == 11, 12,
ifelse(Self_Awareness == 12, 16,
ifelse(Self_Awareness == 13, 18,
ifelse(Self_Awareness == 14, 21,
ifelse(Self_Awareness == 15, 27,
ifelse(Self_Awareness == 16, 34,
ifelse(Self_Awareness == 17, 42,
ifelse(Self_Awareness == 18,50,
ifelse(Self_Awareness == 19,58,
ifelse(Self_Awareness == 20,69,
ifelse(Self_Awareness == 21,76,
ifelse(Self_Awareness == 22,82,
ifelse(Self_Awareness == 23,86,
ifelse(Self_Awareness == 24, 90,
ifelse(Self_Awareness == 25, 93,
ifelse(Self_Awareness == 26, 96,
ifelse(Self_Awareness == 27, 98,
ifelse(Self_Awareness == 28,99,
ifelse(is.na(Self_Awareness),0,0)))))))))))))))))))))))))))
# We can now add the Percentile for Self Awareness to the dataset :
sel_data_imputed$Percentile_Self_Aw <- Percentile_Self_Aw
# View(sel_data_imputed)
# Verification : by viewing the raw scores and the percentiles side by side
Self_Awareness_Percentile <- select(sel_data_imputed, Child_ID, Self_Awareness, Percentile_Self_Aw)
# View(Self_Awareness_Percentile)
# ---- 3.8 Percentile for of each Child's Self_Management ----
# This has been verified ; there are no typos!
Percentile_Self_M = ifelse(Self_Management <= 8, 1,
ifelse(Self_Management == 9, 2,
ifelse(Self_Management == 10, 2,
ifelse(Self_Management == 11, 3,
ifelse(Self_Management == 12, 4,
ifelse(Self_Management == 13, 4,
ifelse(Self_Management == 14, 5,
ifelse(Self_Management == 15, 7,
ifelse(Self_Management == 16, 8,
ifelse(Self_Management == 17, 10,
ifelse(Self_Management == 18, 12,
ifelse(Self_Management == 19, 14,
ifelse(Self_Management == 20, 16,
ifelse(Self_Management == 21, 18,
ifelse(Self_Management == 22, 21,
ifelse(Self_Management == 23, 24,
ifelse(Self_Management == 24, 27,
ifelse(Self_Management == 25, 31,
ifelse(Self_Management == 26, 34,
ifelse(Self_Management == 27, 42,
ifelse(Self_Management == 28, 46,
ifelse(Self_Management == 29, 50,
ifelse(Self_Management == 30, 58,
ifelse(Self_Management == 31, 62,
ifelse(Self_Management == 32, 66,
ifelse(Self_Management == 33, 73,
ifelse(Self_Management == 34, 79,
ifelse(Self_Management == 35, 82,
ifelse(Self_Management == 36, 84,
ifelse(Self_Management == 37, 86,
ifelse(Self_Management == 38, 90,
ifelse(Self_Management == 39, 92,
ifelse(Self_Management == 40, 95,
ifelse(Self_Management == 41, 96,
ifelse(Self_Management == 42, 97,
ifelse(Self_Management == 43, 98,
ifelse(Self_Management == 44, 99,
ifelse(is.na(Self_Awareness),0,0))))))))))))))))))))))))))))))))))))))
# We can now add the Percentile Score for Self_Management to the dataset :
sel_data_imputed$Percentile_Self_M <- Percentile_Self_M
# View(sel_data_imputed)
# Verification :
# Remember that the selection function uses the columns of the dataset and not the names of the R objects!
Self_Management_Percentile <- select(sel_data_imputed, Child_ID, Self_Management, Percentile_Self_M)
# View(Self_Management_Percentile)
# ---- Percentiles Score Results summary ----
# Exporting the results of the Percentiles in a excel spreadsheet
# Creating a dataframe with all the Percentiles of the 8 dimentions and exporting it to excel spreadsheet :
all_Percentile_Scores_table <- select(sel_data_imputed, Child_ID, PRPercentile, Percentile_OT, Percentile_GDB, Percentile_SocAw, Percentile_DM, Percentile_RS, Percentile_Self_Aw, Percentile_Self_M)
# View(all_Percentile_Scores_table)
# We can now export the results into a excel file :
openxlsx::write.xlsx(all_Percentile_Scores_table, file = "P:/RE/Private/SEL Assessment Results/DESSA Scoring/results/Post_Test/Percentile_Post_Test_Scores.xlsx")
file.exists("P:/RE/Private/SEL Assessment Results/DESSA Scoring/results/Post_Test/Percentile_Post_Test_Scores.xlsx")
|
library(data.table)
args<-commandArgs(TRUE)
getScriptPath <- function(){
cmd.args <- commandArgs()
m <- regexpr("(?<=^--file=).+", cmd.args, perl=TRUE)
script.dir <- dirname(regmatches(cmd.args, m))
if(length(script.dir) == 0) stop("can't determine script dir: please call the script with Rscript")
if(length(script.dir) > 1) stop("can't determine script dir: more than one '--file' argument detected")
return(normalizePath(script.dir))
}
scriptDir <- getScriptPath()
#print(scriptDir)
#Right now the comparison uses only SNPs that belong to the positive strand of hgTable files, SNPs from negative strand of hgTables are not compared to avoid complications
#Todo: Development of function for detecting flips
#When flip detection is done, use the function bellow to fix flip by generating reverse complement
convertToComplement<-function(x){
bases=c("A","C","G","T")
xx<-unlist(strsplit(toupper(x),NULL))
paste(unlist(lapply(xx,function(bbb){
if(bbb=="A") compString<-"T"
if(bbb=="C") compString<-"G"
if(bbb=="G") compString<-"C"
if(bbb=="T") compString<-"A"
if(!bbb %in% bases) compString<-"N"
return(compString)
})),collapse="")
}
filename <- args[1]
# enable reading both vcf and vcf.gz files
command <- paste0("less ", filename, " | grep -m 100000 -w \"^1\\|^chr1\" | cut -f 1,2,3,4,5")
#print(command)
print(paste0("Reading first 100K markers from chr1 in input file: ", filename, "..."))
input <- fread(cmd=command)
input <- as.data.frame(input)
names(input) <- c("chr", "pos", "originalID", "ref", "alt")
#head(input)
hgtables <- list()
table_list <- c("chr1_GRCh37-hg19.hgTable.bi", "chr1_GRCh38-hg38.hgTable.bi", "chr1_NCBI34-hg16.hgTable.bi", "chr1_NCBI35-hg17.hgTable.bi", "chr1_NCBI36-hg18.hgTable.bi")
ref_names <- c("GRCh37/hg19", "GRCh38/hg38", "NCBI34/hg16", "NCBI35/hg17", "NCBI36/hg18")
chain_names1 <- c("none, already GRCh37", "GRCh38_to_GRCh37.chain", "NCBI34_to_GRCh37.chain", "NCBI35_to_GRCh37.chain", "NCBI36_to_GRCh37.chain")
chain_names2 <- c("hg19_to_GRCh37.chain", "hg38_to_GRCh37.chain", "hg16_to_hg19.chain -> hg19_to_GRCh37.chain", "hg17_to_hg19.chain -> hg19_to_GRCh37.chain", "hg18_to_hg19.chain -> hg19_to_GRCh37.chain")
#hg -> chr1
#GRCh -> 1
i<-1
Nmatches <- NULL
myheader <- c("chr", "start","pos", "rs", "strand", "ref", "alt")
for(hgtablename in table_list){
print(paste0("Reading ", hgtablename, " from ", scriptDir))
hgtablename <- paste0(scriptDir,"/",hgtablename)
hgtables[[i]] <- read.table(hgtablename, header=FALSE, sep="\t")
names(hgtables[[i]]) <- myheader
hgtables[[i]] <- subset(hgtables[[i]], strand == "+")
print(paste0("Identifying matches between ", hgtablename, " and ", filename))
matches <- merge(input, hgtables[[i]], by=c("pos", "ref", "alt"))
Nmatches[i] <- nrow(matches)
print(paste0("Exact pos/ref/alt matches found: ", filename, " versus ", hgtablename, " = ", Nmatches[i]))
i<-i+1
}
maxindex <- which(Nmatches==max(Nmatches))
others <- sum(Nmatches[-maxindex])
print(paste0("Dataset is based on build: ", ref_names[maxindex], ". Exact pos/ref/alt matches: ", Nmatches[maxindex], ". Total accidental matches from other builds, summed together: ", others) )
#hg -> chr1
#GRCh -> 1
if(input$chr[1]=="1"){
print(paste0("Use chain file(s): ", chain_names1[maxindex]))
}
if(input$chr[1]=="chr1"){
print(paste0("Use chain file(s): ", chain_names2[maxindex]))
}
|
/required_tools/check_vcf_build/.ipynb_checkpoints/check_vcf_build-checkpoint.R
|
no_license
|
bioinformatics-lab/Genotype_Imputation_Pipeline
|
R
| false
| false
| 3,411
|
r
|
library(data.table)
args<-commandArgs(TRUE)
getScriptPath <- function(){
cmd.args <- commandArgs()
m <- regexpr("(?<=^--file=).+", cmd.args, perl=TRUE)
script.dir <- dirname(regmatches(cmd.args, m))
if(length(script.dir) == 0) stop("can't determine script dir: please call the script with Rscript")
if(length(script.dir) > 1) stop("can't determine script dir: more than one '--file' argument detected")
return(normalizePath(script.dir))
}
scriptDir <- getScriptPath()
#print(scriptDir)
#Right now the comparison uses only SNPs that belong to the positive strand of hgTable files, SNPs from negative strand of hgTables are not compared to avoid complications
#Todo: Development of function for detecting flips
#When flip detection is done, use the function bellow to fix flip by generating reverse complement
convertToComplement<-function(x){
bases=c("A","C","G","T")
xx<-unlist(strsplit(toupper(x),NULL))
paste(unlist(lapply(xx,function(bbb){
if(bbb=="A") compString<-"T"
if(bbb=="C") compString<-"G"
if(bbb=="G") compString<-"C"
if(bbb=="T") compString<-"A"
if(!bbb %in% bases) compString<-"N"
return(compString)
})),collapse="")
}
filename <- args[1]
# enable reading both vcf and vcf.gz files
command <- paste0("less ", filename, " | grep -m 100000 -w \"^1\\|^chr1\" | cut -f 1,2,3,4,5")
#print(command)
print(paste0("Reading first 100K markers from chr1 in input file: ", filename, "..."))
input <- fread(cmd=command)
input <- as.data.frame(input)
names(input) <- c("chr", "pos", "originalID", "ref", "alt")
#head(input)
hgtables <- list()
table_list <- c("chr1_GRCh37-hg19.hgTable.bi", "chr1_GRCh38-hg38.hgTable.bi", "chr1_NCBI34-hg16.hgTable.bi", "chr1_NCBI35-hg17.hgTable.bi", "chr1_NCBI36-hg18.hgTable.bi")
ref_names <- c("GRCh37/hg19", "GRCh38/hg38", "NCBI34/hg16", "NCBI35/hg17", "NCBI36/hg18")
chain_names1 <- c("none, already GRCh37", "GRCh38_to_GRCh37.chain", "NCBI34_to_GRCh37.chain", "NCBI35_to_GRCh37.chain", "NCBI36_to_GRCh37.chain")
chain_names2 <- c("hg19_to_GRCh37.chain", "hg38_to_GRCh37.chain", "hg16_to_hg19.chain -> hg19_to_GRCh37.chain", "hg17_to_hg19.chain -> hg19_to_GRCh37.chain", "hg18_to_hg19.chain -> hg19_to_GRCh37.chain")
#hg -> chr1
#GRCh -> 1
i<-1
Nmatches <- NULL
myheader <- c("chr", "start","pos", "rs", "strand", "ref", "alt")
for(hgtablename in table_list){
print(paste0("Reading ", hgtablename, " from ", scriptDir))
hgtablename <- paste0(scriptDir,"/",hgtablename)
hgtables[[i]] <- read.table(hgtablename, header=FALSE, sep="\t")
names(hgtables[[i]]) <- myheader
hgtables[[i]] <- subset(hgtables[[i]], strand == "+")
print(paste0("Identifying matches between ", hgtablename, " and ", filename))
matches <- merge(input, hgtables[[i]], by=c("pos", "ref", "alt"))
Nmatches[i] <- nrow(matches)
print(paste0("Exact pos/ref/alt matches found: ", filename, " versus ", hgtablename, " = ", Nmatches[i]))
i<-i+1
}
maxindex <- which(Nmatches==max(Nmatches))
others <- sum(Nmatches[-maxindex])
print(paste0("Dataset is based on build: ", ref_names[maxindex], ". Exact pos/ref/alt matches: ", Nmatches[maxindex], ". Total accidental matches from other builds, summed together: ", others) )
#hg -> chr1
#GRCh -> 1
if(input$chr[1]=="1"){
print(paste0("Use chain file(s): ", chain_names1[maxindex]))
}
if(input$chr[1]=="chr1"){
print(paste0("Use chain file(s): ", chain_names2[maxindex]))
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
render_scene_rcpp <- function(nx, ny, ns, fov, ambient_light, lookfromvec, lookatvec, aperture, camera_up, type, radius, shape, x, y, z, properties, velocity, moving, n, bghigh, bglow, shutteropen, shutterclose, ischeckered, checkercolors, noise, isnoise, noisephase, noiseintensity, noisecolorlist, angle, isimage, filelocation, islight, lightintensity, isflipped, focus_distance, isvolume, voldensity, parallel, implicit_sample, order_rotation_list, clampval, isgrouped, group_pivot, group_translate, group_angle, group_order_rotation, group_scale, tri_normal_bools, is_tri_color, tri_color_vert, fileinfo, filebasedir, toneval, progress_bar, numbercores, debugval, hasbackground, background, scale_list) {
.Call(`_rayrender_render_scene_rcpp`, nx, ny, ns, fov, ambient_light, lookfromvec, lookatvec, aperture, camera_up, type, radius, shape, x, y, z, properties, velocity, moving, n, bghigh, bglow, shutteropen, shutterclose, ischeckered, checkercolors, noise, isnoise, noisephase, noiseintensity, noisecolorlist, angle, isimage, filelocation, islight, lightintensity, isflipped, focus_distance, isvolume, voldensity, parallel, implicit_sample, order_rotation_list, clampval, isgrouped, group_pivot, group_translate, group_angle, group_order_rotation, group_scale, tri_normal_bools, is_tri_color, tri_color_vert, fileinfo, filebasedir, toneval, progress_bar, numbercores, debugval, hasbackground, background, scale_list)
}
|
/R/RcppExports.R
|
no_license
|
javierluraschi/rayrender
|
R
| false
| false
| 1,558
|
r
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
render_scene_rcpp <- function(nx, ny, ns, fov, ambient_light, lookfromvec, lookatvec, aperture, camera_up, type, radius, shape, x, y, z, properties, velocity, moving, n, bghigh, bglow, shutteropen, shutterclose, ischeckered, checkercolors, noise, isnoise, noisephase, noiseintensity, noisecolorlist, angle, isimage, filelocation, islight, lightintensity, isflipped, focus_distance, isvolume, voldensity, parallel, implicit_sample, order_rotation_list, clampval, isgrouped, group_pivot, group_translate, group_angle, group_order_rotation, group_scale, tri_normal_bools, is_tri_color, tri_color_vert, fileinfo, filebasedir, toneval, progress_bar, numbercores, debugval, hasbackground, background, scale_list) {
.Call(`_rayrender_render_scene_rcpp`, nx, ny, ns, fov, ambient_light, lookfromvec, lookatvec, aperture, camera_up, type, radius, shape, x, y, z, properties, velocity, moving, n, bghigh, bglow, shutteropen, shutterclose, ischeckered, checkercolors, noise, isnoise, noisephase, noiseintensity, noisecolorlist, angle, isimage, filelocation, islight, lightintensity, isflipped, focus_distance, isvolume, voldensity, parallel, implicit_sample, order_rotation_list, clampval, isgrouped, group_pivot, group_translate, group_angle, group_order_rotation, group_scale, tri_normal_bools, is_tri_color, tri_color_vert, fileinfo, filebasedir, toneval, progress_bar, numbercores, debugval, hasbackground, background, scale_list)
}
|
library(mlbench)
library(parallel)
library(RMySQL)
source('./main.r')
#DB connection
con <- dbConnect(MySQL(), user='root', password='demos30', dbname='memoria', host='localhost')
set.seed(1)
#Data
data(Glass)
data(PimaIndiansDiabetes)
data(iris)
dataGlass<-Glass[,1:9]
dataDiabetes<-PimaIndiansDiabetes[,1:8]
dataIris<-iris[,1:4]
D1=dist(dataGlass)
D2=as.dist(cor(t(dataGlass)))
D3=dist(dataIris)
D4=as.dist(cor(t(dataIris)))
D5=dist(dataDiabetes)
D6=as.dist(cor(t(dataDiabetes)))
#Parameters
dataset=c('glass','iris','diabetes')
init=c('cmdscaleMean','cmdscalePca')
runs=c(1:11)
radio=seq(0.1,1,by=0.1)
popSize=c(10,50,100)
gen=100
parameters=expand.grid(dataset,init,radio,popSize,gen)
parameters$id=seq.int(nrow(parameters))
par=data.frame()
for(i in 1:nrow(parameters)){
for(j in runs){
par=rbind(par,cbind(parameters[i,],j))
}
}
parameters=par
names(parameters)=c('dataset','initialization','radio','population_size','generations','id','runs')
#save parameters
dbWriteTable(con,'parameters',parameters,row.name=FALSE,overwrite=TRUE)
#Clear results table
dbSendQuery(con,'truncate table results')
dbSendQuery(con,'truncate table individuals')
dbDisconnect(con)
#Paralel run
no_cores <- detectCores()
cl <- makeCluster(no_cores,outfile='log.txt',type='FORK')
#clusterExport(cl)#,list('geneticMds2','D1','D2','D3','D4','D5','D6','Population','Individual'))
parRapply(
cl,
parameters,
function(param){
con <- dbConnect(MySQL(), user='root', password='demos30', dbname='memoria', host='localhost')
if(param['dataset']=='glass'){
D01<-D1
D02<-D2
}
if(param['dataset']=='iris'){
D01<-D3
D02<-D4
}
if(param['dataset']=='diabetes'){
D01<-D5
D02<-D6
}
#Results(orderByFitness)
res<-geneticMds2(D01,D02,gen=as.numeric(param['generations']),size=param['population_size'],m=2,initMethod=param['initialization'],radio=as.numeric(param['radio']))
res[[1]]<-cbind(param['id'],seq.int(nrow(res[[1]])),res[[1]],param['runs'])
names(res[[1]])<-c('parameters_id','individual_id','x','y','rank','crowding','generation','run')
dbWriteTable(con,'results',res[[1]],row.name=FALSE,append=TRUE)
#individuals (points)
res[[2]]=cbind(param['id'],param['runs'],res[[2]])
names(res[[2]])=c('parameters_id','run','individual_id','point_id','x','y')
dbWriteTable(con,'individuals',res[[2]],row.name=FALSE,append=TRUE)
dbDisconnect(con)
}
)
stopCluster(cl)
|
/src/par_exec.r
|
no_license
|
fealuin/app-genetic-mds-2d
|
R
| false
| false
| 2,460
|
r
|
library(mlbench)
library(parallel)
library(RMySQL)
source('./main.r')
#DB connection
con <- dbConnect(MySQL(), user='root', password='demos30', dbname='memoria', host='localhost')
set.seed(1)
#Data
data(Glass)
data(PimaIndiansDiabetes)
data(iris)
dataGlass<-Glass[,1:9]
dataDiabetes<-PimaIndiansDiabetes[,1:8]
dataIris<-iris[,1:4]
D1=dist(dataGlass)
D2=as.dist(cor(t(dataGlass)))
D3=dist(dataIris)
D4=as.dist(cor(t(dataIris)))
D5=dist(dataDiabetes)
D6=as.dist(cor(t(dataDiabetes)))
#Parameters
dataset=c('glass','iris','diabetes')
init=c('cmdscaleMean','cmdscalePca')
runs=c(1:11)
radio=seq(0.1,1,by=0.1)
popSize=c(10,50,100)
gen=100
parameters=expand.grid(dataset,init,radio,popSize,gen)
parameters$id=seq.int(nrow(parameters))
par=data.frame()
for(i in 1:nrow(parameters)){
for(j in runs){
par=rbind(par,cbind(parameters[i,],j))
}
}
parameters=par
names(parameters)=c('dataset','initialization','radio','population_size','generations','id','runs')
#save parameters
dbWriteTable(con,'parameters',parameters,row.name=FALSE,overwrite=TRUE)
#Clear results table
dbSendQuery(con,'truncate table results')
dbSendQuery(con,'truncate table individuals')
dbDisconnect(con)
#Paralel run
no_cores <- detectCores()
cl <- makeCluster(no_cores,outfile='log.txt',type='FORK')
#clusterExport(cl)#,list('geneticMds2','D1','D2','D3','D4','D5','D6','Population','Individual'))
parRapply(
cl,
parameters,
function(param){
con <- dbConnect(MySQL(), user='root', password='demos30', dbname='memoria', host='localhost')
if(param['dataset']=='glass'){
D01<-D1
D02<-D2
}
if(param['dataset']=='iris'){
D01<-D3
D02<-D4
}
if(param['dataset']=='diabetes'){
D01<-D5
D02<-D6
}
#Results(orderByFitness)
res<-geneticMds2(D01,D02,gen=as.numeric(param['generations']),size=param['population_size'],m=2,initMethod=param['initialization'],radio=as.numeric(param['radio']))
res[[1]]<-cbind(param['id'],seq.int(nrow(res[[1]])),res[[1]],param['runs'])
names(res[[1]])<-c('parameters_id','individual_id','x','y','rank','crowding','generation','run')
dbWriteTable(con,'results',res[[1]],row.name=FALSE,append=TRUE)
#individuals (points)
res[[2]]=cbind(param['id'],param['runs'],res[[2]])
names(res[[2]])=c('parameters_id','run','individual_id','point_id','x','y')
dbWriteTable(con,'individuals',res[[2]],row.name=FALSE,append=TRUE)
dbDisconnect(con)
}
)
stopCluster(cl)
|
load("benchmark with dakar dataset/person.state.d.RData")
#240
person.state.d=person.state.d[(240+1):nrow(person.state.d),]
n.row=nrow(person.state.d)
n.col=ncol(person.state.d)
n.locations=length(unique(as.vector(person.state.d)))
person.state.d=matrix(match(as.vector(person.state.d), sort(unique(as.vector(person.state.d)))),nrow = n.row,ncol = n.col)
locations=1:n.locations
loc.d = t(apply(person.state.d, 1, function(x) table(factor(x, levels=locations ))))
td=seq(from=0,to=24*3600,by=60)
m.time = table(c(head(person.state.d,-1)), c(tail(person.state.d,-1)),cut(td[row(head(person.state.d,-1)) + 240 ]/3600,breaks = 0:4 *6))
m.time = sweep(m.time*.99, MARGIN = 1:2, STATS = table(c(head(person.state.d,-1)), c(tail(person.state.d,-1)))*.01, FUN = '+')
m.time = sweep(m.time, MARGIN = c(1,3), STATS = colSums(aperm(m.time,perm = c(2,1,3)),dims = 1),FUN = '/')
dimnames(m.time)[[1]] = dimnames(m.time)[[2]] = 1:length(locations)
#only consider neighbors
rate_in=list()
rate_out=list()
loc_in=list()
loc_out=list()
for(i in 1:dim(m.time)[3]){
m=m.time[,,i]
diag(m)=0
rownames(m)=1:length(locations)
colnames(m)=1:length(locations)
rate_in[[i]]=lapply(1:ncol(m), function(n) {
m[,n][m[,n]!=0]
})
loc_in[[i]]=lapply(1:ncol(m), function(n) {
as.integer(names(rate_in[[i]][[n]]))
})
rate_out[[i]]=lapply(1:ncol(m), function(n) {
m[n,][m[n,]!=0]
})
loc_out[[i]]=lapply(1:ncol(m), function(n) {
as.integer(names(rate_out[[i]][[n]]))
})
}
rate_in_f=function(i) rate_in[[ceiling((i+240)/(nrow(loc.d)+240)*length(rate_in))]]
rate_out_f=function(i) rate_out[[ceiling((i+240)/(nrow(loc.d)+240)*length(rate_out))]]
loc_in_f=function(i) loc_in[[ceiling((i+240)/(nrow(loc.d)+240)*length(loc_in))]]
loc_out_f=function(i) loc_out[[ceiling((i+240)/(nrow(loc.d)+240)*length(loc_out))]]
sample.obs.matrix = function(person.state.d, obs.scale){
obs.training = lapply(1:100, function(n){
ndx0 = sample(1:ncol(person.state.d), ceiling(ncol(person.state.d)/obs.scale))
obs = t(apply(person.state.d[,ndx0], 1, function(x) table(factor(x, levels=locations) )))
ndx2 = which(loc.d>0)
data.frame(groundtruth=loc.d[ndx2], obs = trunc(obs[ndx2]*ncol(person.state.d)/length(ndx0)) )
})
obs.training = do.call(rbind, obs.training)
obs.training = rbind(obs.training, c(0,0))
obs.table = table(obs.training[,1], obs.training[,2])
max.obs=max(obs.training)+10
obs.rows = approx(as.numeric(rownames(obs.table)), 1:nrow(obs.table), 0:max.obs, method = 'constant', ties = 'ordered', f = 0, rule = 2)
obs.cols = approx(as.numeric(colnames(obs.table)), 1:ncol(obs.table), 0:max.obs, method = 'constant', ties = 'ordered', f = 0, rule = 2)
obs.matrix=obs.table[obs.rows$y,obs.cols$y] #
obs.matrix=sweep(obs.matrix, 1, rowSums(obs.matrix),'/') #
obs.matrix
}
obs.scale = 2
obs.matrix = sample.obs.matrix(person.state.d, obs.scale = obs.scale)
#image(z=t(asinh(1000*obs.matrix)),x=1:ncol(obs.matrix) -1, y=1:nrow(obs.matrix) -1,xlab='# oberved vehicles scaled', ylab='# vehicles', asp=1)
#abline(coef=c(0,1))
obs.matrix[obs.matrix==0]=1e-20
obs.matrix=sweep(obs.matrix,1,rowSums(obs.matrix),FUN = '/')
maxloc.d=apply(loc.d,2, max )
max.person=ifelse(maxloc.d<=10,maxloc.d+5,maxloc.d+10)
dataempty=lapply(1:nrow(loc.d), function(n){
lapply(1:length(locations), function(m){
rep(1,max.person[m]+1)
})
})
sliceempty=lapply(1:length(locations), function(m){
rep(0,max.person[m]+1)
})
start=sliceempty
for( i in 1:length(locations)) start[[i]][loc.d[1,i]+1]=1
end=sliceempty
for( i in 1:length(locations)) end[[i]][loc.d[nrow(loc.d),i]+1]=1
la=dataempty
la[[1]]=start
lb=dataempty
lb[[length(lb)]]=end
alloc = function(x){
old.t = attr(x,'t')
old.c = attr(x,'c')
if(length(attr(x, 't'))==length(x)) length(x) = length(x)*2 #alloc memory
attr(x,'t') = old.t
attr(x,'c') = old.c
x
}
# read a slice from filtration, previous nearest one
getSlice <- function(x, t ){
tt = attr(x, 't')
if(attr(x,'c')=="a"){
t0 = which(tt==max(tt[tt<=t]))
y=x[[t0]]
}
if(attr(x,'c')=="b"){
t0 = which(tt==min(tt[tt>=t]))
y=x[[t0]]
}
y
}
lg=la
for(i in 1:length(lg)){
lg[[i]]=lapply(1:length(locations), function(n) la[[i]][[n]]*lb[[i]][[n]]/sum(la[[i]][[n]]*lb[[i]][[n]]))
}
attr(la,'t') =attr(lb,'t') = attr(lg,'t') = 1:nrow(loc.d)
attr(la,'c')="a"
attr(lb,'c')="b"
attr(lg,'c')="a"
obs.scale=5
observable= sort( order( sapply(rate_in_f(500),length), decreasing = T) [1:ceiling(length(locations)/obs.scale)] ) # setdiff(dimnames(obs.prob)$location,c("h","w"))
unobservable=setdiff(1:length(locations),observable)
observable_nominal=as.character(observable)
if(max(max.person[observable])+1 > nrow(obs.matrix) ){
k1=max(max.person[observable])+1
k2=nrow(obs.matrix)
obs.matrix=rbind(obs.matrix,matrix(0,nrow = k1-k2,ncol=ncol(obs.matrix)))
for(k in (k2+1):k1){
obs.matrix[k,(1+k-k2):ncol(obs.matrix)]=obs.matrix[k2,1:(ncol(obs.matrix)+k2-k)]
}
}
obs.matrix[obs.matrix==0]=1e-20
obs.matrix=sweep(obs.matrix,1,rowSums(obs.matrix),FUN = '/')
#obs = t(apply(person.state.d[,observable], 1, function(x) table(factor(x, levels=locations) )))
#obs=sapply(1:ncol(obs),function(n) pmin(max.person[n],round(obs[,n]*obs.scale)))
obs=loc.d
observation=lapply(observable, function(n) obs[,n])
names(observation)=observable_nominal
remove(list = setdiff(ls(),c('observation','obs.matrix','lg','loc.d','rate_in','obs','person.state.d',
'rate_out','rate_in_f','rate_out_f',
'loc_in','loc_out','loc_in_f','loc_out_f',
'la','lb','m.time','max.person','observable_nominal','unobservable','observable','alloc','getSlice','locations')))
save.image(file = "benchmark with dakar dataset/inference.RData")
|
/Inference/VI/Senegal/1.preprocessing-senegal.R
|
no_license
|
Fanfanyang/Projects
|
R
| false
| false
| 5,776
|
r
|
load("benchmark with dakar dataset/person.state.d.RData")
#240
person.state.d=person.state.d[(240+1):nrow(person.state.d),]
n.row=nrow(person.state.d)
n.col=ncol(person.state.d)
n.locations=length(unique(as.vector(person.state.d)))
person.state.d=matrix(match(as.vector(person.state.d), sort(unique(as.vector(person.state.d)))),nrow = n.row,ncol = n.col)
locations=1:n.locations
loc.d = t(apply(person.state.d, 1, function(x) table(factor(x, levels=locations ))))
td=seq(from=0,to=24*3600,by=60)
m.time = table(c(head(person.state.d,-1)), c(tail(person.state.d,-1)),cut(td[row(head(person.state.d,-1)) + 240 ]/3600,breaks = 0:4 *6))
m.time = sweep(m.time*.99, MARGIN = 1:2, STATS = table(c(head(person.state.d,-1)), c(tail(person.state.d,-1)))*.01, FUN = '+')
m.time = sweep(m.time, MARGIN = c(1,3), STATS = colSums(aperm(m.time,perm = c(2,1,3)),dims = 1),FUN = '/')
dimnames(m.time)[[1]] = dimnames(m.time)[[2]] = 1:length(locations)
#only consider neighbors
rate_in=list()
rate_out=list()
loc_in=list()
loc_out=list()
for(i in 1:dim(m.time)[3]){
m=m.time[,,i]
diag(m)=0
rownames(m)=1:length(locations)
colnames(m)=1:length(locations)
rate_in[[i]]=lapply(1:ncol(m), function(n) {
m[,n][m[,n]!=0]
})
loc_in[[i]]=lapply(1:ncol(m), function(n) {
as.integer(names(rate_in[[i]][[n]]))
})
rate_out[[i]]=lapply(1:ncol(m), function(n) {
m[n,][m[n,]!=0]
})
loc_out[[i]]=lapply(1:ncol(m), function(n) {
as.integer(names(rate_out[[i]][[n]]))
})
}
rate_in_f=function(i) rate_in[[ceiling((i+240)/(nrow(loc.d)+240)*length(rate_in))]]
rate_out_f=function(i) rate_out[[ceiling((i+240)/(nrow(loc.d)+240)*length(rate_out))]]
loc_in_f=function(i) loc_in[[ceiling((i+240)/(nrow(loc.d)+240)*length(loc_in))]]
loc_out_f=function(i) loc_out[[ceiling((i+240)/(nrow(loc.d)+240)*length(loc_out))]]
sample.obs.matrix = function(person.state.d, obs.scale){
obs.training = lapply(1:100, function(n){
ndx0 = sample(1:ncol(person.state.d), ceiling(ncol(person.state.d)/obs.scale))
obs = t(apply(person.state.d[,ndx0], 1, function(x) table(factor(x, levels=locations) )))
ndx2 = which(loc.d>0)
data.frame(groundtruth=loc.d[ndx2], obs = trunc(obs[ndx2]*ncol(person.state.d)/length(ndx0)) )
})
obs.training = do.call(rbind, obs.training)
obs.training = rbind(obs.training, c(0,0))
obs.table = table(obs.training[,1], obs.training[,2])
max.obs=max(obs.training)+10
obs.rows = approx(as.numeric(rownames(obs.table)), 1:nrow(obs.table), 0:max.obs, method = 'constant', ties = 'ordered', f = 0, rule = 2)
obs.cols = approx(as.numeric(colnames(obs.table)), 1:ncol(obs.table), 0:max.obs, method = 'constant', ties = 'ordered', f = 0, rule = 2)
obs.matrix=obs.table[obs.rows$y,obs.cols$y] #
obs.matrix=sweep(obs.matrix, 1, rowSums(obs.matrix),'/') #
obs.matrix
}
obs.scale = 2
obs.matrix = sample.obs.matrix(person.state.d, obs.scale = obs.scale)
#image(z=t(asinh(1000*obs.matrix)),x=1:ncol(obs.matrix) -1, y=1:nrow(obs.matrix) -1,xlab='# oberved vehicles scaled', ylab='# vehicles', asp=1)
#abline(coef=c(0,1))
obs.matrix[obs.matrix==0]=1e-20
obs.matrix=sweep(obs.matrix,1,rowSums(obs.matrix),FUN = '/')
maxloc.d=apply(loc.d,2, max )
max.person=ifelse(maxloc.d<=10,maxloc.d+5,maxloc.d+10)
dataempty=lapply(1:nrow(loc.d), function(n){
lapply(1:length(locations), function(m){
rep(1,max.person[m]+1)
})
})
sliceempty=lapply(1:length(locations), function(m){
rep(0,max.person[m]+1)
})
start=sliceempty
for( i in 1:length(locations)) start[[i]][loc.d[1,i]+1]=1
end=sliceempty
for( i in 1:length(locations)) end[[i]][loc.d[nrow(loc.d),i]+1]=1
la=dataempty
la[[1]]=start
lb=dataempty
lb[[length(lb)]]=end
alloc = function(x){
old.t = attr(x,'t')
old.c = attr(x,'c')
if(length(attr(x, 't'))==length(x)) length(x) = length(x)*2 #alloc memory
attr(x,'t') = old.t
attr(x,'c') = old.c
x
}
# read a slice from filtration, previous nearest one
getSlice <- function(x, t ){
tt = attr(x, 't')
if(attr(x,'c')=="a"){
t0 = which(tt==max(tt[tt<=t]))
y=x[[t0]]
}
if(attr(x,'c')=="b"){
t0 = which(tt==min(tt[tt>=t]))
y=x[[t0]]
}
y
}
lg=la
for(i in 1:length(lg)){
lg[[i]]=lapply(1:length(locations), function(n) la[[i]][[n]]*lb[[i]][[n]]/sum(la[[i]][[n]]*lb[[i]][[n]]))
}
attr(la,'t') =attr(lb,'t') = attr(lg,'t') = 1:nrow(loc.d)
attr(la,'c')="a"
attr(lb,'c')="b"
attr(lg,'c')="a"
obs.scale=5
observable= sort( order( sapply(rate_in_f(500),length), decreasing = T) [1:ceiling(length(locations)/obs.scale)] ) # setdiff(dimnames(obs.prob)$location,c("h","w"))
unobservable=setdiff(1:length(locations),observable)
observable_nominal=as.character(observable)
if(max(max.person[observable])+1 > nrow(obs.matrix) ){
k1=max(max.person[observable])+1
k2=nrow(obs.matrix)
obs.matrix=rbind(obs.matrix,matrix(0,nrow = k1-k2,ncol=ncol(obs.matrix)))
for(k in (k2+1):k1){
obs.matrix[k,(1+k-k2):ncol(obs.matrix)]=obs.matrix[k2,1:(ncol(obs.matrix)+k2-k)]
}
}
obs.matrix[obs.matrix==0]=1e-20
obs.matrix=sweep(obs.matrix,1,rowSums(obs.matrix),FUN = '/')
#obs = t(apply(person.state.d[,observable], 1, function(x) table(factor(x, levels=locations) )))
#obs=sapply(1:ncol(obs),function(n) pmin(max.person[n],round(obs[,n]*obs.scale)))
obs=loc.d
observation=lapply(observable, function(n) obs[,n])
names(observation)=observable_nominal
remove(list = setdiff(ls(),c('observation','obs.matrix','lg','loc.d','rate_in','obs','person.state.d',
'rate_out','rate_in_f','rate_out_f',
'loc_in','loc_out','loc_in_f','loc_out_f',
'la','lb','m.time','max.person','observable_nominal','unobservable','observable','alloc','getSlice','locations')))
save.image(file = "benchmark with dakar dataset/inference.RData")
|
## The functions provided below can calculate the inverse of
## a matrix in a faster way by caching the result, since we
## suppose that the input matrix will not be changed in the
## future.
## Function name: makeCacheMatrix
## Inputs: A matrix x
## Outputs: A special matrix, with its inverse cached.
makeCacheMatrix <- function(x = matrix()) {
inverseX <- NULL
set <- function(y) {
x <<- y
inverseX <<- NULL
}
get <- function() x
setInverse <- function(inv) inverseX <<- inv
getInverse <- function() inverseX
list(set = set, get = get,
setInverse = setInverse, getInverse = getInverse)
}
## Function name: cacheSolve
## Inputs: The special matrix like the one produced by
## "makeCacheMatrix(x)"
## Outputs: The inverse of the matrix x
cacheSolve <- function(x, ...) {
inverseX <- x$getInverse()
if(!is.null(inverseX)) {
message("Getting cached data.")
return(inverseX)
}
## Else we need to calculate the inverse and cache it.
data <- x$get()
inverseX <- solve(data)
x$setInverse(inverseX)
inverseX
}
|
/datascience/R_Programming/ProgrammingAssignment2/cachematrix.R
|
no_license
|
elfinhuangcat/coursera
|
R
| false
| false
| 1,119
|
r
|
## The functions provided below can calculate the inverse of
## a matrix in a faster way by caching the result, since we
## suppose that the input matrix will not be changed in the
## future.
## Function name: makeCacheMatrix
## Inputs: A matrix x
## Outputs: A special matrix, with its inverse cached.
makeCacheMatrix <- function(x = matrix()) {
inverseX <- NULL
set <- function(y) {
x <<- y
inverseX <<- NULL
}
get <- function() x
setInverse <- function(inv) inverseX <<- inv
getInverse <- function() inverseX
list(set = set, get = get,
setInverse = setInverse, getInverse = getInverse)
}
## Function name: cacheSolve
## Inputs: The special matrix like the one produced by
## "makeCacheMatrix(x)"
## Outputs: The inverse of the matrix x
cacheSolve <- function(x, ...) {
inverseX <- x$getInverse()
if(!is.null(inverseX)) {
message("Getting cached data.")
return(inverseX)
}
## Else we need to calculate the inverse and cache it.
data <- x$get()
inverseX <- solve(data)
x$setInverse(inverseX)
inverseX
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/pcm03extractProtFP.R
\name{extractProtFP}
\alias{extractProtFP}
\title{Amino Acid Properties Based Scales Descriptors (Protein Fingerprint)}
\usage{
extractProtFP(x, index = NULL, pc, lag, scale = TRUE, silent = TRUE)
}
\arguments{
\item{x}{A character vector, as the input protein sequence.}
\item{index}{Integer vector or character vector. Specify which AAindex
properties to select from the AAindex database by specify the
numerical or character index of the properties in the
AAindex database.
Default is \code{NULL}, means selecting all the AA properties
in the AAindex database.}
\item{pc}{Integer. Use the first pc principal components as the scales.
Must be no greater than the number of AA properties provided.}
\item{lag}{The lag parameter. Must be less than the amino acids.}
\item{scale}{Logical. Should we auto-scale the property matrix
before PCA? Default is \code{TRUE}.}
\item{silent}{Logical. Whether we print the standard deviation,
proportion of variance and the cumulative proportion of
the selected principal components or not.
Default is \code{TRUE}.}
}
\value{
A length \code{lag * p^2} named vector,
\code{p} is the number of scales (principal components) selected.
}
\description{
Amino Acid Properties Based Scales Descriptors (Protein Fingerprint)
}
\details{
This function calculates amino acid properties based scales descriptors
(protein fingerprint). Users could specify which AAindex properties to
select from the AAindex database by specify the numerical or character
index of the properties in the AAindex database.
}
\examples{
x = readFASTA(system.file('protseq/P00750.fasta', package = 'protr'))[[1]]
fp = extractProtFP(x, index = c(160:165, 258:296), pc = 5, lag = 7, silent = FALSE)
}
\author{
Nan Xiao <\url{http://r2s.name}>
}
\keyword{AAindex}
\keyword{extract}
\keyword{scales}
|
/man/extractProtFP.Rd
|
no_license
|
Jmonee/protr
|
R
| false
| false
| 1,942
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/pcm03extractProtFP.R
\name{extractProtFP}
\alias{extractProtFP}
\title{Amino Acid Properties Based Scales Descriptors (Protein Fingerprint)}
\usage{
extractProtFP(x, index = NULL, pc, lag, scale = TRUE, silent = TRUE)
}
\arguments{
\item{x}{A character vector, as the input protein sequence.}
\item{index}{Integer vector or character vector. Specify which AAindex
properties to select from the AAindex database by specify the
numerical or character index of the properties in the
AAindex database.
Default is \code{NULL}, means selecting all the AA properties
in the AAindex database.}
\item{pc}{Integer. Use the first pc principal components as the scales.
Must be no greater than the number of AA properties provided.}
\item{lag}{The lag parameter. Must be less than the amino acids.}
\item{scale}{Logical. Should we auto-scale the property matrix
before PCA? Default is \code{TRUE}.}
\item{silent}{Logical. Whether we print the standard deviation,
proportion of variance and the cumulative proportion of
the selected principal components or not.
Default is \code{TRUE}.}
}
\value{
A length \code{lag * p^2} named vector,
\code{p} is the number of scales (principal components) selected.
}
\description{
Amino Acid Properties Based Scales Descriptors (Protein Fingerprint)
}
\details{
This function calculates amino acid properties based scales descriptors
(protein fingerprint). Users could specify which AAindex properties to
select from the AAindex database by specify the numerical or character
index of the properties in the AAindex database.
}
\examples{
x = readFASTA(system.file('protseq/P00750.fasta', package = 'protr'))[[1]]
fp = extractProtFP(x, index = c(160:165, 258:296), pc = 5, lag = 7, silent = FALSE)
}
\author{
Nan Xiao <\url{http://r2s.name}>
}
\keyword{AAindex}
\keyword{extract}
\keyword{scales}
|
## Compile/Filter Gene Coordinates ##
## Called by Gene_Score Pipeline ##
## February 18, 2015 ##
## Kristopher Standish ##
## Usage ##
# Rscript 2-Compile_Gene_Coords.R <Path/To/Out_Dir>
###############################################################
## PARSE COMMAND LINE #########################################
###############################################################
LINE <- commandArgs(trailingOnly = TRUE)
# LINE <- "/projects/janssen/Phased/20150218_Test_Genes"
PathToOut <- LINE[1]
# Specify other Paths
PathToGeneCoords <- paste( PathToOut, "Gene_Info.raw.txt", sep="/" )
# Check for proper parsing
print(paste( "Output:", PathToOut ))
print(paste( "Gene Coords:", PathToGeneCoords ))
###############################################################
## LOAD DATA ##################################################
###############################################################
## Load Gene Coordinate File
TAB <- read.table( PathToGeneCoords, sep="\t", header=T, comment.char="",quote="" )
###############################################################
## EDIT FOR NEW TABLES ########################################
###############################################################
## Combine Gene_Transcript Columns
GTX <- paste( TAB[,15],TAB[,1],sep="_")
## Rename Chromosome Column
CHR.range <- TAB[,2]
# Remove "chr" tag
CHR.plink <- gsub( "chr","", CHR.range )
## Buffer Gene Coordinates
# TX_S
TX_S.range <- TAB[,3]
TX_S.plink <- TAB[,3] - 5000
# TX_E
TX_E.range <- TAB[,4]
TX_E.plink <- TAB[,4] + 5000
###############################################################
## FILTER OUT TRANSCRIPTS #####################################
###############################################################
## Non-Autosomal
RM.chr <- which( !(CHR.plink %in% 1:22) )
## Transcripts with same coordinates
# String everything together: GENE_CHR_TXs_TXe_EXs_EXe_nEX
STR <- paste( TAB[,15],TAB[,2],TAB[,3],TAB[,4],TAB[,7],TAB[,8],TAB[,9], sep="_" )
RM.rpt <- which(duplicated( STR ))
## Combine Removal Criteria
RM <- Reduce( union, list(RM.chr,RM.rpt) )
## Create new Tables w/ Transcripts Removed
# New "Gene_Info" table
TAB.rm <- TAB[ -RM, ]
# "Gene_Range" table
RNG <- data.frame( GTX=GTX, CHR=CHR.range, TX_S=TX_S.range, TX_E=TX_E.range )
RNG.rm <- RNG[ -RM, ]
PLNK <- data.frame( CHR=CHR.plink, TX_S=TX_S.plink, TX_E=TX_E.plink, GTX=GTX )
PLNK.rm <- PLNK[ -RM, ]
## Write Tables
write.table( TAB.rm, paste(PathToOut,"Gene_Info.txt",sep="/" ), sep="\t",row.names=F,col.names=T,quote=F )
write.table( RNG.rm, paste(PathToOut,"Gene_Range.txt",sep="/" ), sep="\t",row.names=F,col.names=T,quote=F )
write.table( PLNK.rm, paste(PathToOut,"Gene_Range.plink.txt",sep="/" ), sep="\t",row.names=F,col.names=F,quote=F )
###############################################################
## END OF DOC #################################################
###############################################################
|
/2-Compile_Gene_Coords.R
|
no_license
|
k26stan/Gene_Score
|
R
| false
| false
| 2,944
|
r
|
## Compile/Filter Gene Coordinates ##
## Called by Gene_Score Pipeline ##
## February 18, 2015 ##
## Kristopher Standish ##
## Usage ##
# Rscript 2-Compile_Gene_Coords.R <Path/To/Out_Dir>
###############################################################
## PARSE COMMAND LINE #########################################
###############################################################
LINE <- commandArgs(trailingOnly = TRUE)
# LINE <- "/projects/janssen/Phased/20150218_Test_Genes"
PathToOut <- LINE[1]
# Specify other Paths
PathToGeneCoords <- paste( PathToOut, "Gene_Info.raw.txt", sep="/" )
# Check for proper parsing
print(paste( "Output:", PathToOut ))
print(paste( "Gene Coords:", PathToGeneCoords ))
###############################################################
## LOAD DATA ##################################################
###############################################################
## Load Gene Coordinate File
TAB <- read.table( PathToGeneCoords, sep="\t", header=T, comment.char="",quote="" )
###############################################################
## EDIT FOR NEW TABLES ########################################
###############################################################
## Combine Gene_Transcript Columns
GTX <- paste( TAB[,15],TAB[,1],sep="_")
## Rename Chromosome Column
CHR.range <- TAB[,2]
# Remove "chr" tag
CHR.plink <- gsub( "chr","", CHR.range )
## Buffer Gene Coordinates
# TX_S
TX_S.range <- TAB[,3]
TX_S.plink <- TAB[,3] - 5000
# TX_E
TX_E.range <- TAB[,4]
TX_E.plink <- TAB[,4] + 5000
###############################################################
## FILTER OUT TRANSCRIPTS #####################################
###############################################################
## Non-Autosomal
RM.chr <- which( !(CHR.plink %in% 1:22) )
## Transcripts with same coordinates
# String everything together: GENE_CHR_TXs_TXe_EXs_EXe_nEX
STR <- paste( TAB[,15],TAB[,2],TAB[,3],TAB[,4],TAB[,7],TAB[,8],TAB[,9], sep="_" )
RM.rpt <- which(duplicated( STR ))
## Combine Removal Criteria
RM <- Reduce( union, list(RM.chr,RM.rpt) )
## Create new Tables w/ Transcripts Removed
# New "Gene_Info" table
TAB.rm <- TAB[ -RM, ]
# "Gene_Range" table
RNG <- data.frame( GTX=GTX, CHR=CHR.range, TX_S=TX_S.range, TX_E=TX_E.range )
RNG.rm <- RNG[ -RM, ]
PLNK <- data.frame( CHR=CHR.plink, TX_S=TX_S.plink, TX_E=TX_E.plink, GTX=GTX )
PLNK.rm <- PLNK[ -RM, ]
## Write Tables
write.table( TAB.rm, paste(PathToOut,"Gene_Info.txt",sep="/" ), sep="\t",row.names=F,col.names=T,quote=F )
write.table( RNG.rm, paste(PathToOut,"Gene_Range.txt",sep="/" ), sep="\t",row.names=F,col.names=T,quote=F )
write.table( PLNK.rm, paste(PathToOut,"Gene_Range.plink.txt",sep="/" ), sep="\t",row.names=F,col.names=F,quote=F )
###############################################################
## END OF DOC #################################################
###############################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coverage_matrix.R
\name{coverage_matrix}
\alias{coverage_matrix}
\title{Given a set of regions for a chromosome, compute the coverage matrix for a
given SRA study.}
\usage{
coverage_matrix(
project,
chr,
regions,
chunksize = 1000,
bpparam = NULL,
outdir = NULL,
chrlen = NULL,
verbose = TRUE,
verboseLoad = verbose,
scale = TRUE,
round = FALSE,
...
)
}
\arguments{
\item{project}{A character vector with one SRA study id.}
\item{chr}{A character vector with the name of the chromosome.}
\item{regions}{A \link[GenomicRanges:GRanges-class]{GRanges-class} object with regions
for \code{chr} for which to calculate the coverage matrix.}
\item{chunksize}{A single integer vector defining the chunksize to use for
computing the coverage matrix. Regions will be split into different chunks
which can be useful when using a parallel instance as defined by
\code{bpparam}.}
\item{bpparam}{A \link[BiocParallel:BiocParallelParam-class]{BiocParallelParam-class} instance which
will be used to calculate the coverage matrix in parallel. By default,
\link[BiocParallel:SerialParam-class]{SerialParam-class} will be used.}
\item{outdir}{The destination directory for the downloaded file(s) that were
previously downloaded with \link{download_study}. If the files are missing,
but \code{outdir} is specified, they will get downloaded first. By default
\code{outdir} is set to \code{NULL} which will use the data from the web.
We only recommend downloading the full data if you will use it several times.}
\item{chrlen}{The chromosome length in base pairs. If it's \code{NULL}, the
chromosome length is extracted from the Rail-RNA runs GitHub repository.
Alternatively check the \code{SciServer} section on the vignette to see
how to access all the recount data via a R Jupyter Notebook.}
\item{verbose}{If \code{TRUE} basic status updates will be printed along the
way.}
\item{verboseLoad}{If \code{TRUE} basic status updates for loading the data
will be printed.}
\item{scale}{If \code{TRUE}, the coverage counts will be scaled to read
counts based on a library size of 40 million reads. Set \code{scale} to
\code{FALSE} if you want the raw coverage counts. The scaling method is by
AUC, as in the default option of \link{scale_counts}.}
\item{round}{If \code{TRUE}, the counts are rounded to integers. Set to
\code{TRUE} if you want to match the defaults of \link{scale_counts}.}
\item{...}{Additional arguments passed to \link{download_study} when
\code{outdir} is specified but the required files are missing.}
}
\value{
A \link[SummarizedExperiment:RangedSummarizedExperiment-class]{RangedSummarizedExperiment-class}
object with the counts stored in the assays slot.
}
\description{
Given a set of genomic regions as created by \link{expressed_regions}, this
function computes the coverage matrix for a library size of 40 million 100 bp
reads for a given SRA study.
}
\details{
When using \code{outdir = NULL} the information will be accessed
from the web on the fly. If you encounter internet access problems, it might
be best to first download the BigWig files using \link{download_study}. This
might be the best option if you are accessing all chromosomes for a given
project and/or are thinking of using different sets of \code{regions} (for
example, from different cutoffs applied to \link{expressed_regions}).
Alternatively check the \code{SciServer} section on the vignette to see
how to access all the recount data via a R Jupyter Notebook.
If you have \code{bwtool} installed, you can use
\url{https://github.com/LieberInstitute/recount.bwtool} for faster results.
Note that you will need to run \link{scale_counts} after running
\code{coverage_matrix_bwtool()}.
}
\examples{
if (.Platform$OS.type != "windows") {
## Reading BigWig files is not supported by rtracklayer on Windows
## Define expressed regions for study DRP002835, chrY
regions <- expressed_regions("DRP002835", "chrY",
cutoff = 5L,
maxClusterGap = 3000L
)
## Now calculate the coverage matrix for this study
rse <- coverage_matrix("DRP002835", "chrY", regions)
## One row per region
identical(length(regions), nrow(rse))
}
}
\seealso{
\link{download_study}, \link[derfinder:findRegions]{findRegions},
\link[derfinder:railMatrix]{railMatrix}
}
\author{
Leonardo Collado-Torres
}
|
/man/coverage_matrix.Rd
|
no_license
|
leekgroup/recount
|
R
| false
| true
| 4,409
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coverage_matrix.R
\name{coverage_matrix}
\alias{coverage_matrix}
\title{Given a set of regions for a chromosome, compute the coverage matrix for a
given SRA study.}
\usage{
coverage_matrix(
project,
chr,
regions,
chunksize = 1000,
bpparam = NULL,
outdir = NULL,
chrlen = NULL,
verbose = TRUE,
verboseLoad = verbose,
scale = TRUE,
round = FALSE,
...
)
}
\arguments{
\item{project}{A character vector with one SRA study id.}
\item{chr}{A character vector with the name of the chromosome.}
\item{regions}{A \link[GenomicRanges:GRanges-class]{GRanges-class} object with regions
for \code{chr} for which to calculate the coverage matrix.}
\item{chunksize}{A single integer vector defining the chunksize to use for
computing the coverage matrix. Regions will be split into different chunks
which can be useful when using a parallel instance as defined by
\code{bpparam}.}
\item{bpparam}{A \link[BiocParallel:BiocParallelParam-class]{BiocParallelParam-class} instance which
will be used to calculate the coverage matrix in parallel. By default,
\link[BiocParallel:SerialParam-class]{SerialParam-class} will be used.}
\item{outdir}{The destination directory for the downloaded file(s) that were
previously downloaded with \link{download_study}. If the files are missing,
but \code{outdir} is specified, they will get downloaded first. By default
\code{outdir} is set to \code{NULL} which will use the data from the web.
We only recommend downloading the full data if you will use it several times.}
\item{chrlen}{The chromosome length in base pairs. If it's \code{NULL}, the
chromosome length is extracted from the Rail-RNA runs GitHub repository.
Alternatively check the \code{SciServer} section on the vignette to see
how to access all the recount data via a R Jupyter Notebook.}
\item{verbose}{If \code{TRUE} basic status updates will be printed along the
way.}
\item{verboseLoad}{If \code{TRUE} basic status updates for loading the data
will be printed.}
\item{scale}{If \code{TRUE}, the coverage counts will be scaled to read
counts based on a library size of 40 million reads. Set \code{scale} to
\code{FALSE} if you want the raw coverage counts. The scaling method is by
AUC, as in the default option of \link{scale_counts}.}
\item{round}{If \code{TRUE}, the counts are rounded to integers. Set to
\code{TRUE} if you want to match the defaults of \link{scale_counts}.}
\item{...}{Additional arguments passed to \link{download_study} when
\code{outdir} is specified but the required files are missing.}
}
\value{
A \link[SummarizedExperiment:RangedSummarizedExperiment-class]{RangedSummarizedExperiment-class}
object with the counts stored in the assays slot.
}
\description{
Given a set of genomic regions as created by \link{expressed_regions}, this
function computes the coverage matrix for a library size of 40 million 100 bp
reads for a given SRA study.
}
\details{
When using \code{outdir = NULL} the information will be accessed
from the web on the fly. If you encounter internet access problems, it might
be best to first download the BigWig files using \link{download_study}. This
might be the best option if you are accessing all chromosomes for a given
project and/or are thinking of using different sets of \code{regions} (for
example, from different cutoffs applied to \link{expressed_regions}).
Alternatively check the \code{SciServer} section on the vignette to see
how to access all the recount data via a R Jupyter Notebook.
If you have \code{bwtool} installed, you can use
\url{https://github.com/LieberInstitute/recount.bwtool} for faster results.
Note that you will need to run \link{scale_counts} after running
\code{coverage_matrix_bwtool()}.
}
\examples{
if (.Platform$OS.type != "windows") {
## Reading BigWig files is not supported by rtracklayer on Windows
## Define expressed regions for study DRP002835, chrY
regions <- expressed_regions("DRP002835", "chrY",
cutoff = 5L,
maxClusterGap = 3000L
)
## Now calculate the coverage matrix for this study
rse <- coverage_matrix("DRP002835", "chrY", regions)
## One row per region
identical(length(regions), nrow(rse))
}
}
\seealso{
\link{download_study}, \link[derfinder:findRegions]{findRegions},
\link[derfinder:railMatrix]{railMatrix}
}
\author{
Leonardo Collado-Torres
}
|
print("This file is created in RStudio")
print("And now it's live on GitHub")
data <- c("East","West","East","North","North","East","West","West","West","East","North")
print(data)
print(is.factor(data))
factor_data <- factor(data)
print(factor_data)
print(is.factor(factor_data))
|
/Demo.R
|
no_license
|
sanketpanasuriya/datasciencecoursera
|
R
| false
| false
| 284
|
r
|
print("This file is created in RStudio")
print("And now it's live on GitHub")
data <- c("East","West","East","North","North","East","West","West","West","East","North")
print(data)
print(is.factor(data))
factor_data <- factor(data)
print(factor_data)
print(is.factor(factor_data))
|
# read the stub
lines = readLines("analyses/empirical_analyses/haemulids/step_5/src/template.Rev")
dir.create("analyses/empirical_analyses/haemulids/step_5/jobs", showWarnings=FALSE)
background_model = c("constant_rate","lognormal_noise")
vcv_model = c("univariate","variance_covariance")
nruns = 4
grid = expand.grid(background_model=background_model, vcv_model=vcv_model, run=1:nruns, stringsAsFactors=FALSE)
for(i in 1:nrow(grid)) {
this_row = grid[i,]
this_background_model = this_row[[1]]
this_vcv_model = this_row[[2]]
this_run = as.numeric(this_row[[3]])
these_lines = lines
these_lines[9] = paste0("noise_model = \"",this_background_model,"\"")
these_lines[10] = paste0("mvbm_model = \"",this_vcv_model,"\"")
these_lines[11] = paste0("run_ID = ",this_run)
these_lines[15] = paste0("# seed(",paste0(sample(0:9, 9, replace=TRUE),collapse=""),")")
this_file = paste0("analyses/empirical_analyses/haemulids/step_5/jobs/job_",this_vcv_model,"_",this_background_model,"_",this_run,".Rev")
cat(these_lines, file = this_file, sep="\n")
}
|
/analyses/empirical_analyses/haemulids/step_5/src/write_jobs.R
|
no_license
|
mikeryanmay/musscrat_supp_archive
|
R
| false
| false
| 1,127
|
r
|
# read the stub
lines = readLines("analyses/empirical_analyses/haemulids/step_5/src/template.Rev")
dir.create("analyses/empirical_analyses/haemulids/step_5/jobs", showWarnings=FALSE)
background_model = c("constant_rate","lognormal_noise")
vcv_model = c("univariate","variance_covariance")
nruns = 4
grid = expand.grid(background_model=background_model, vcv_model=vcv_model, run=1:nruns, stringsAsFactors=FALSE)
for(i in 1:nrow(grid)) {
this_row = grid[i,]
this_background_model = this_row[[1]]
this_vcv_model = this_row[[2]]
this_run = as.numeric(this_row[[3]])
these_lines = lines
these_lines[9] = paste0("noise_model = \"",this_background_model,"\"")
these_lines[10] = paste0("mvbm_model = \"",this_vcv_model,"\"")
these_lines[11] = paste0("run_ID = ",this_run)
these_lines[15] = paste0("# seed(",paste0(sample(0:9, 9, replace=TRUE),collapse=""),")")
this_file = paste0("analyses/empirical_analyses/haemulids/step_5/jobs/job_",this_vcv_model,"_",this_background_model,"_",this_run,".Rev")
cat(these_lines, file = this_file, sep="\n")
}
|
## This code is part of the ips package
## © C. Heibl 2014 (last update 2019-11-05)
#' @title Trim Alignment Ends
#' @description Trims both ends of a DNA sequence alignment to the first and
#' last alignment positions that contain a minimum number of IUPAC base
#' characters (\code{"a"}, \code{"c"}, \code{"g"}, \code{"t"}, \code{"r"},
#' \code{"y"}, \code{"s"}, \code{"w"}, \code{"k"}, \code{"m"}, \code{"b"},
#' \code{"d"}, \code{"h"}, \code{"v"}). In addition, all gap characters
#' (\code{"-"}) beyond the first and last base characters of each sequence are
#' replaced by the character \code{"n"}.
#' @param x An object of class \code{DNAbin}.
#' @param min.n.seq A \code{numeric} giving the required minimum number of
#' sequences having an non-ambiguous base character (a, c, g, t) in the first
#' and last position of the alignment; defaults to \code{4}, which is the
#' minimum number of sequences needed to produce a non-trivial unrooted
#' topology. Can also be given as a fraction.
#' @return An object of class \code{DNAbin}.
#' @seealso \code{\link{deleteEmptyCells}}, \code{\link{deleteGaps}}
#' @examples
#' # simple example alignment:
#' x <- structure(list(nb = 5, seq = c("acaaggtaca", "-caaggtac-",
#' "acaaggtaca", "aca--gtaca", "-ccaggta--"), nam = LETTERS[1:5]),
#' .Names = c("nb", "seq", "nam"), class = "alignment")
#' # convert to DNAbin:
#' x <- as.DNAbin(x)
#' # fill missing nucleotides:
#' x <- trimEnds(x)
#' # show results:
#' as.character(x[2, ])
#' @export
trimEnds <- function(x, min.n.seq = 4){
if ( !inherits(x, "DNAbin") ){
stop("'x' is not of class 'DNAbin'")
}
if ( !is.matrix(x) ){
stop("'x' must be a matrix")
}
## Store confidence stores; if not present cs == NULL
## --------------------------------------------------
cs <- attr(x, "cs")
## Turn fraction into numbers
## --------------------------
if (min.n.seq < 1){
min.n.seq <- ceiling(nrow(x) * min.n.seq)
}
## If alignment has less then min.n.seq sequences,
## min.n.seq has to be adjusted
## ----------------------------
min.n.seq <- min(nrow(x), min.n.seq)
## Replace terminal '-' with 'N'
## -----------------------------
replaceWithN <- function(x){
n <- vector()
## head (5'-end)
id <- which(x == as.raw(4))
if ( 1 %in% id ) n <- c(n, which(id == 1:length(id)))
## tail (3'-end)
id <- which(rev(x == as.raw(4)))
if ( 1 %in% id ) n <- c(n, (length(x):1)[which(id == 1:length(id))])
## replace - by N
if ( length(n) > 0 ){
x[n] <- as.raw(240)
}
x
}
x <- t(apply(x, 1, replaceWithN))
class(x) <- "DNAbin"
## Remove 'sandspit' pattern
## -------------------------
removeSandspit <- function(x){
## anything to do?
id <- which(x == as.raw(4))
if (length(id) == 0) return(x)
## head (5'-end)
n <- vector()
lagoon <- id[id == min(id) + (1:length(id)) - 1]
spit <- 1:(min(lagoon) - 1)
if ( length(spit) <= 10 & length(lagoon) >= 5 ){
n <- c(n, union(spit, lagoon))
}
## tail (3'-end)
id <- which(rev(x == as.raw(4)))
lagoon <- lagoon[lagoon == min(lagoon) + (1:length(lagoon)) - 1]
spit <- 1:(min(lagoon) - 1)
if ( length(spit) <= 10 & length(lagoon) >= 5 ){
n <- c(n, (length(id):1)[union(spit, lagoon)])
}
if ( length(n) > 0 ){
x[n] <- as.raw(240)
}
x
}
x <- t(apply(x, 1, removeSandspit))
class(x) <- "DNAbin"
## Trim ends to 'min.n.seq' bases
## ------------------------------
iupac <- c(a = 136, c = 40, g = 72, t = 24,
r = 192, y = 48, s = 96, w = 144, k = 80, m = 160,
b = 112, d = 208, h = 176, v = 224)
iupac <- as.raw(iupac)
percentInformation <- function(x, iupac){
length(which(x %in% iupac))
}
m <- apply(x, 2, percentInformation, iupac)
if ( max(m) < min.n.seq ) stop("alignment contains less sequences then required")
m <- range(which(m >= min.n.seq))
m <- seq(from = m[1], to = m[2])
x <- x[, m]
## Trim and reappend confidence scores
## -----------------------------------
if (!is.null(cs)){
if (is.matrix(cs)){
cs <- cs[, m]
} else {
cs <- cs[m]
}
attr(x, "cs") <- cs
}
x
}
|
/R/trimEnds.R
|
no_license
|
heibl/ips
|
R
| false
| false
| 4,278
|
r
|
## This code is part of the ips package
## © C. Heibl 2014 (last update 2019-11-05)
#' @title Trim Alignment Ends
#' @description Trims both ends of a DNA sequence alignment to the first and
#' last alignment positions that contain a minimum number of IUPAC base
#' characters (\code{"a"}, \code{"c"}, \code{"g"}, \code{"t"}, \code{"r"},
#' \code{"y"}, \code{"s"}, \code{"w"}, \code{"k"}, \code{"m"}, \code{"b"},
#' \code{"d"}, \code{"h"}, \code{"v"}). In addition, all gap characters
#' (\code{"-"}) beyond the first and last base characters of each sequence are
#' replaced by the character \code{"n"}.
#' @param x An object of class \code{DNAbin}.
#' @param min.n.seq A \code{numeric} giving the required minimum number of
#' sequences having an non-ambiguous base character (a, c, g, t) in the first
#' and last position of the alignment; defaults to \code{4}, which is the
#' minimum number of sequences needed to produce a non-trivial unrooted
#' topology. Can also be given as a fraction.
#' @return An object of class \code{DNAbin}.
#' @seealso \code{\link{deleteEmptyCells}}, \code{\link{deleteGaps}}
#' @examples
#' # simple example alignment:
#' x <- structure(list(nb = 5, seq = c("acaaggtaca", "-caaggtac-",
#' "acaaggtaca", "aca--gtaca", "-ccaggta--"), nam = LETTERS[1:5]),
#' .Names = c("nb", "seq", "nam"), class = "alignment")
#' # convert to DNAbin:
#' x <- as.DNAbin(x)
#' # fill missing nucleotides:
#' x <- trimEnds(x)
#' # show results:
#' as.character(x[2, ])
#' @export
trimEnds <- function(x, min.n.seq = 4){
if ( !inherits(x, "DNAbin") ){
stop("'x' is not of class 'DNAbin'")
}
if ( !is.matrix(x) ){
stop("'x' must be a matrix")
}
## Store confidence stores; if not present cs == NULL
## --------------------------------------------------
cs <- attr(x, "cs")
## Turn fraction into numbers
## --------------------------
if (min.n.seq < 1){
min.n.seq <- ceiling(nrow(x) * min.n.seq)
}
## If alignment has less then min.n.seq sequences,
## min.n.seq has to be adjusted
## ----------------------------
min.n.seq <- min(nrow(x), min.n.seq)
## Replace terminal '-' with 'N'
## -----------------------------
replaceWithN <- function(x){
n <- vector()
## head (5'-end)
id <- which(x == as.raw(4))
if ( 1 %in% id ) n <- c(n, which(id == 1:length(id)))
## tail (3'-end)
id <- which(rev(x == as.raw(4)))
if ( 1 %in% id ) n <- c(n, (length(x):1)[which(id == 1:length(id))])
## replace - by N
if ( length(n) > 0 ){
x[n] <- as.raw(240)
}
x
}
x <- t(apply(x, 1, replaceWithN))
class(x) <- "DNAbin"
## Remove 'sandspit' pattern
## -------------------------
removeSandspit <- function(x){
## anything to do?
id <- which(x == as.raw(4))
if (length(id) == 0) return(x)
## head (5'-end)
n <- vector()
lagoon <- id[id == min(id) + (1:length(id)) - 1]
spit <- 1:(min(lagoon) - 1)
if ( length(spit) <= 10 & length(lagoon) >= 5 ){
n <- c(n, union(spit, lagoon))
}
## tail (3'-end)
id <- which(rev(x == as.raw(4)))
lagoon <- lagoon[lagoon == min(lagoon) + (1:length(lagoon)) - 1]
spit <- 1:(min(lagoon) - 1)
if ( length(spit) <= 10 & length(lagoon) >= 5 ){
n <- c(n, (length(id):1)[union(spit, lagoon)])
}
if ( length(n) > 0 ){
x[n] <- as.raw(240)
}
x
}
x <- t(apply(x, 1, removeSandspit))
class(x) <- "DNAbin"
## Trim ends to 'min.n.seq' bases
## ------------------------------
iupac <- c(a = 136, c = 40, g = 72, t = 24,
r = 192, y = 48, s = 96, w = 144, k = 80, m = 160,
b = 112, d = 208, h = 176, v = 224)
iupac <- as.raw(iupac)
percentInformation <- function(x, iupac){
length(which(x %in% iupac))
}
m <- apply(x, 2, percentInformation, iupac)
if ( max(m) < min.n.seq ) stop("alignment contains less sequences then required")
m <- range(which(m >= min.n.seq))
m <- seq(from = m[1], to = m[2])
x <- x[, m]
## Trim and reappend confidence scores
## -----------------------------------
if (!is.null(cs)){
if (is.matrix(cs)){
cs <- cs[, m]
} else {
cs <- cs[m]
}
attr(x, "cs") <- cs
}
x
}
|
#loading required libraries
library(tidyverse)
library(Stat2Data)
#loading data
data("Hawks")
# Filtering and selecting columns
hSF <- Hawks %>% filter(Species == "RT", Weight >= 1000 ) %>% select("Wing","Weight","Tail")
# Displaying first 5 rows of df sorted by ascending order of Wing size
head(hSF %>% arrange(Wing))
#Creating a df with full names of Species code and Species full name
species_code <- c('RT','SS','CH')
species_name_full <- c("Red-tailed", "Sharp-shinned", "Cooper's")
species_full_name <- data.frame(species_code, species_name_full)
hawksFullName <- Hawks %>%
left_join( species_full_name, by = c("Species" = "species_code")) %>%
select(-Species) %>%
rename("Species" = "species_name_full" )
head(hawksFullName %>%
select("Species","Wing","Weight"))
hawksWithBMI <- Hawks %>%
mutate(bird_BMI = 1000 * (Weight/Wing^2)) %>%
select(Species,bird_BMI) %>%
arrange(desc(bird_BMI))
hawksWithBMI %>% filter(!bird_BMI > 100) %>% ggplot(aes(x = bird_BMI,y = Species ,fill = Species)) + xlab("Bird BMI") + geom_violin() + ylab("Species")
hawksFullName %>%
group_by(Species) %>%
summarize(
num_rows = n(), min_wing = min(Wing, na.rm = T), avg_wing = mean(Wing, na.rm = T), max_wing = max(Wing, na.rm = T), avg_tail_wing_ratio = mean(Wing/Tail, na.rm = T)
)
hawksFullName %>%
group_by(Species) %>%
summarize(
Wing = sum(is.na(Wing)),Weight = sum(is.na(Weight)),Culmen = sum(is.na(Culmen)),Hallux = sum(is.na(Hallux)), Tail = sum(is.na(Tail)), StandardTail = sum(is.na(StandardTail)), Tarsus = sum(is.na(Tarsus)), Crop = sum(is.na(Crop))
)
|
/ass_4_data_wrangling.R
|
no_license
|
noah-sheldon/firstRProject
|
R
| false
| false
| 1,689
|
r
|
#loading required libraries
library(tidyverse)
library(Stat2Data)
#loading data
data("Hawks")
# Filtering and selecting columns
hSF <- Hawks %>% filter(Species == "RT", Weight >= 1000 ) %>% select("Wing","Weight","Tail")
# Displaying first 5 rows of df sorted by ascending order of Wing size
head(hSF %>% arrange(Wing))
#Creating a df with full names of Species code and Species full name
species_code <- c('RT','SS','CH')
species_name_full <- c("Red-tailed", "Sharp-shinned", "Cooper's")
species_full_name <- data.frame(species_code, species_name_full)
hawksFullName <- Hawks %>%
left_join( species_full_name, by = c("Species" = "species_code")) %>%
select(-Species) %>%
rename("Species" = "species_name_full" )
head(hawksFullName %>%
select("Species","Wing","Weight"))
hawksWithBMI <- Hawks %>%
mutate(bird_BMI = 1000 * (Weight/Wing^2)) %>%
select(Species,bird_BMI) %>%
arrange(desc(bird_BMI))
hawksWithBMI %>% filter(!bird_BMI > 100) %>% ggplot(aes(x = bird_BMI,y = Species ,fill = Species)) + xlab("Bird BMI") + geom_violin() + ylab("Species")
hawksFullName %>%
group_by(Species) %>%
summarize(
num_rows = n(), min_wing = min(Wing, na.rm = T), avg_wing = mean(Wing, na.rm = T), max_wing = max(Wing, na.rm = T), avg_tail_wing_ratio = mean(Wing/Tail, na.rm = T)
)
hawksFullName %>%
group_by(Species) %>%
summarize(
Wing = sum(is.na(Wing)),Weight = sum(is.na(Weight)),Culmen = sum(is.na(Culmen)),Hallux = sum(is.na(Hallux)), Tail = sum(is.na(Tail)), StandardTail = sum(is.na(StandardTail)), Tarsus = sum(is.na(Tarsus)), Crop = sum(is.na(Crop))
)
|
testlist <- list(Beta = 0, CAL = numeric(0), CVLinf = 0, L50 = 0, L95 = 0, LenBins = numeric(0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), nage = 0L, nlen = 0L, pars = c(3.81959242373749e-313, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), rLens = numeric(0))
result <- do.call(DLMtool:::LBSPRopt,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRopt/AFL_LBSPRopt/LBSPRopt_valgrind_files/1615838156-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 397
|
r
|
testlist <- list(Beta = 0, CAL = numeric(0), CVLinf = 0, L50 = 0, L95 = 0, LenBins = numeric(0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), nage = 0L, nlen = 0L, pars = c(3.81959242373749e-313, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), rLens = numeric(0))
result <- do.call(DLMtool:::LBSPRopt,testlist)
str(result)
|
#0+x+y
library(rgl)
x<-seq(-5,5,by=0.5)
y<-seq(-5,5,by=0.5)
f<-function(x,y){
min(0,x,y)
}
z<-matrix(1:441)
dim(z)<-c(21,21)
for(i in 1:length(x)){
for(j in 1:length(y)){
z[i,j]<-f(x[i],y[j])
}
}
dim(x)<-c(21,1)
X<-cbind(x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x)
dim(y)<-c(1:21)
Y<-rbind(y,y,y,y,y,y,y,y,y,y,y,y,y,y,y,y,y,y,y,y,y)
plot3d(X,Y,z)
#10+5.5x+0x^2+8.5y+6.5y^2+4.5xy
library(rgl)
x<-seq(-10,10,by=0.1)
y<-seq(-10,10,by=0.1)
f<-function(x,y){
max(10,5.5+x,2*x,8.5+y,6.5+2*y,4.5+x+y)
}
z<-matrix(1:40401)
dim(z)<-c(201,201)
for(i in 1:length(x)){
for(j in 1:length(y)){
z[i,j]<-f(x[i],y[j])
}
}
dim(x)<-c(201,1)
mylist<-list(1:201)
mylist[[1]]<-x
for(i in 1:200){
mylist[[i+1]]<-cbind(mylist[[i]],x)
}
X<-mylist[[201]]
dim(y)<-c(1,201)
mylist2<-list(1:201)
mylist2[[1]]<-y
for(i in 1:200){
mylist2[[i+1]]<-rbind(mylist2[[i]],y)
}
Y<-mylist2[[201]]
plot3d(X,Y,z,col="blue")
#5+4x+2.25x^2+0x^3+4y+2.5xy+1x^2y+3y^2+1.5xy^2+1.5y^3
library(rgl)
x<-seq(-10,10,by=0.1)
y<-seq(-10,10,by=0.1)
f<-function(x,y){
max(5,4+x,2.25+2*x,3*x,4+y,2.5+x+y,1+2*x+y,3+2*y,1.5+x+2*y,1.5+3*y)
}
z<-matrix(1:40401)
dim(z)<-c(201,201)
for(i in 1:length(x)){
for(j in 1:length(y)){
z[i,j]<-f(x[i],y[j])
}
}
dim(x)<-c(201,1)
mylist<-list()
mylist[[1]]<-x
for(i in 1:200){
mylist[[i+1]]<-cbind(mylist[[i]],x)
}
X<-mylist[[201]]
dim(y)<-c(1,201)
mylist2<-list()
mylist2[[1]]<-y
for(i in 1:200){
mylist2[[i+1]]<-rbind(mylist2[[i]],y)
}
Y<-mylist2[[201]]
plot3d(X,Y,z,col="blue")
|
/max.R
|
no_license
|
LZW-0313/Some-small-exercises-on-R
|
R
| false
| false
| 1,581
|
r
|
#0+x+y
library(rgl)
x<-seq(-5,5,by=0.5)
y<-seq(-5,5,by=0.5)
f<-function(x,y){
min(0,x,y)
}
z<-matrix(1:441)
dim(z)<-c(21,21)
for(i in 1:length(x)){
for(j in 1:length(y)){
z[i,j]<-f(x[i],y[j])
}
}
dim(x)<-c(21,1)
X<-cbind(x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x)
dim(y)<-c(1:21)
Y<-rbind(y,y,y,y,y,y,y,y,y,y,y,y,y,y,y,y,y,y,y,y,y)
plot3d(X,Y,z)
#10+5.5x+0x^2+8.5y+6.5y^2+4.5xy
library(rgl)
x<-seq(-10,10,by=0.1)
y<-seq(-10,10,by=0.1)
f<-function(x,y){
max(10,5.5+x,2*x,8.5+y,6.5+2*y,4.5+x+y)
}
z<-matrix(1:40401)
dim(z)<-c(201,201)
for(i in 1:length(x)){
for(j in 1:length(y)){
z[i,j]<-f(x[i],y[j])
}
}
dim(x)<-c(201,1)
mylist<-list(1:201)
mylist[[1]]<-x
for(i in 1:200){
mylist[[i+1]]<-cbind(mylist[[i]],x)
}
X<-mylist[[201]]
dim(y)<-c(1,201)
mylist2<-list(1:201)
mylist2[[1]]<-y
for(i in 1:200){
mylist2[[i+1]]<-rbind(mylist2[[i]],y)
}
Y<-mylist2[[201]]
plot3d(X,Y,z,col="blue")
#5+4x+2.25x^2+0x^3+4y+2.5xy+1x^2y+3y^2+1.5xy^2+1.5y^3
library(rgl)
x<-seq(-10,10,by=0.1)
y<-seq(-10,10,by=0.1)
f<-function(x,y){
max(5,4+x,2.25+2*x,3*x,4+y,2.5+x+y,1+2*x+y,3+2*y,1.5+x+2*y,1.5+3*y)
}
z<-matrix(1:40401)
dim(z)<-c(201,201)
for(i in 1:length(x)){
for(j in 1:length(y)){
z[i,j]<-f(x[i],y[j])
}
}
dim(x)<-c(201,1)
mylist<-list()
mylist[[1]]<-x
for(i in 1:200){
mylist[[i+1]]<-cbind(mylist[[i]],x)
}
X<-mylist[[201]]
dim(y)<-c(1,201)
mylist2<-list()
mylist2[[1]]<-y
for(i in 1:200){
mylist2[[i+1]]<-rbind(mylist2[[i]],y)
}
Y<-mylist2[[201]]
plot3d(X,Y,z,col="blue")
|
#' Monthly Australian Wine Sale
#'
#' Monthly sales of wine (in thousands of liters) in Australia from January 1980 through July 1995.
#'
#'
#' @format A data frame with 187 observations of 7 variables
#' \describe{
#' \item{winet}{month 1-187}
#' \item{fortw}{fortified wine}
#' \item{dryw}{dry white}
#' \item{sweetw}{sweet white}
#' \item{red}{red wine}
#' \item{rose}{rose wine}
#' \item{spark}{sparlking wine}
#' }
#'
#' @source Introductory Time Series with R, by Cowpertwait and Metcalfe
"auswine"
|
/R/auswine.R
|
no_license
|
richujos/tswrdata
|
R
| false
| false
| 520
|
r
|
#' Monthly Australian Wine Sale
#'
#' Monthly sales of wine (in thousands of liters) in Australia from January 1980 through July 1995.
#'
#'
#' @format A data frame with 187 observations of 7 variables
#' \describe{
#' \item{winet}{month 1-187}
#' \item{fortw}{fortified wine}
#' \item{dryw}{dry white}
#' \item{sweetw}{sweet white}
#' \item{red}{red wine}
#' \item{rose}{rose wine}
#' \item{spark}{sparlking wine}
#' }
#'
#' @source Introductory Time Series with R, by Cowpertwait and Metcalfe
"auswine"
|
## Assignment: Caching the Inverse of a Matrix
#Matrix inversion is usually a costly computation and there may be some
#benefit to caching the inverse of a matrix rather than computing it
#repeatedly
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inver <- NULL
set <- function(y) {
x <<- y
inver <<- NULL
}
get <- function() x
setInverse <- function(inverse) inver <<- inverse
getInverse <- function() inver
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by `makeCacheMatrix` above
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inver <- x$getInverse()
if (!is.null(inver)) {
message("getting cached value")
return(inver)
}
mat <- x$get()
inver <- solve(mat, ...)
x$setInverse(inver)
inver
}
|
/cachematrix.R
|
no_license
|
Devashree26-rai/ProgrammingAssignment2
|
R
| false
| false
| 986
|
r
|
## Assignment: Caching the Inverse of a Matrix
#Matrix inversion is usually a costly computation and there may be some
#benefit to caching the inverse of a matrix rather than computing it
#repeatedly
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inver <- NULL
set <- function(y) {
x <<- y
inver <<- NULL
}
get <- function() x
setInverse <- function(inverse) inver <<- inverse
getInverse <- function() inver
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by `makeCacheMatrix` above
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inver <- x$getInverse()
if (!is.null(inver)) {
message("getting cached value")
return(inver)
}
mat <- x$get()
inver <- solve(mat, ...)
x$setInverse(inver)
inver
}
|
library(tidyverse)
library(tswge)
#Read in file
df <-read.csv(file="C:/users/bodie/Documents/CAPSTONE_DATA.csv")
#Change Date format
df$DAY <- as.Date(df$DAY,format="%m/%d/%Y")
#Subset dataframe
Load_df <- df %>% select('DAY','TOTAL_LOAD')
#Quick check for na's and types
summary(Load_df)
str(Load_df)
###Boxplot of Total Load
Load_df %>% ggplot(aes(TOTAL_LOAD))+geom_boxplot()
##Histogram
Load_df %>% ggplot(aes(TOTAL_LOAD))+geom_histogram()
##plots
plotts.sample.wge(Load_df$TOTAL_LOAD)
###Doesn't look to meet three conditions of stationary but will proceed for base line
nrow(Load_df)
##Define short term Forecasting as 30 days
##Long term forecasting as 180 days.
##Can redefine later but lets use as base for now
##Update test size
test_size = nrow(Load_df)-1000
#Checking length of test_size values
test_size
Train_Load_DF <- Load_df[1:test_size,]
Test_Load_DF <- Load_df[(test_size+1):nrow(Load_df),]
##Check the lengths tie out
((nrow(Train_Load_DF)+nrow(Test_Load_DF))==nrow(Load_df))
##Creating List for compare later on
Model_name <- vector(mode = "list")
Short_Term_ASE_Score <- vector(mode = "list")
Long_Term_ASE_Score <- vector(mode = "list")
##ARMA
aic5.wge(Train_Load_DF$TOTAL_LOAD,p=0:5,q=0:5,type="bic")
aic5.wge(Train_Load_DF$TOTAL_LOAD,p=0:5,q=0:5,type="aic")
#Bic picks 1,4 and in top for AIC.
arma_model = est.arma.wge(Train_Load_DF$TOTAL_LOAD,p=1,q=4)
##Short TERM forecasting for ARMA model 30 days
f = fore.aruma.wge(Train_Load_DF$TOTAL_LOAD,phi = arma_model$phi, theta = arma_model$theta, n.ahead = 30,limits = T, lastn = T)
Short_Term_ARMA_ASE = mean((Test_Load_DF$TOTAL_LOAD[1:30]- f$f)^2)
Short_Term_ARMA_ASE
#Long Term ARMA
f = fore.aruma.wge(Train_Load_DF$TOTAL_LOAD,phi = arma_model$phi, theta = arma_model$theta, n.ahead = 180,limits = T, lastn = T)
Long_Term_ARMA_ASE = mean((Test_Load_DF$TOTAL_LOAD[1:180]- f$f)^2)
Long_Term_ARMA_ASE
##Append List
Model_name <- c(Model_name,"ARMA")
Short_Term_ASE_Score <- c(Short_Term_ASE_Score,Short_Term_ARMA_ASE)
Long_Term_ASE_Score <- c(Long_Term_ASE_Score,Long_Term_ARMA_ASE)
#####ARIMA
##Here we are differencing the dataset then doing train/test split.
trans.p = artrans.wge(Load_df$TOTAL_LOAD,phi.tr = 1)
plotts.sample.wge(trans.p)
#Train test split
length(trans.p)
#4168
#Dataframe so we can subset
trans.p <- as.data.frame(trans.p)
test_size_difference = nrow(trans.p)-1000
#Checking length of test_size values
test_size_difference
Train_Load_DF_Diff <- trans.p[1:test_size_difference,]
Test_Load_DF_Diff <- trans.p[(test_size_difference+1):nrow(trans.p),]
#Check the lengths tie out. Since the train/test sets aren't DF we have to use lengths instead of nrow.
(length(Train_Load_DF_Diff)+length(Test_Load_DF_Diff)) == nrow(trans.p)
#Find p and Q
aic5.wge(Train_Load_DF_Diff,p=0:5,q=0:5,type='aic')
aic5.wge(Train_Load_DF_Diff,p=0:5,q=0:5,type='bic')
#Both BIC & AIC pick 4,3.
arima_model = est.arma.wge(Train_Load_DF_Diff,p=4,q=3)
#short term
f = fore.aruma.wge(Train_Load_DF_Diff,phi = arima_model$phi, theta = arima_model$theta, n.ahead = 30,limits = T, lastn = T)
Short_Term_ARIMA_ASE = mean((Test_Load_DF_Diff[1:30] - f$f)^2)
Short_Term_ARIMA_ASE
##Long term
f = fore.aruma.wge(Train_Load_DF_Diff,phi = arima_model$phi, theta = arima_model$theta, n.ahead = 180,limits = T, lastn = T)
Long_Term_ARIMA_ASE = mean((Test_Load_DF_Diff[1:180] - f$f)^2)
Long_Term_ARIMA_ASE
##Append List
Model_name <- c(Model_name,"ARIMA")
Short_Term_ASE_Score <- c(Short_Term_ASE_Score,Short_Term_ARIMA_ASE)
Long_Term_ASE_Score <- c(Long_Term_ASE_Score,Long_Term_ARIMA_ASE)
###Why are we getting neg values when we difference the data? This is why the ASE is terrible.
###Log this works
#Let's do log then take the difference
Load_df$LOG_LOAD <- log(Load_df$TOTAL_LOAD)
LOG_DF <-Load_df %>% select('DAY','LOG_LOAD')
plotts.sample.wge(LOG_DF$LOG_LOAD)
###Difference
trans.log = artrans.wge(LOG_DF$LOG_LOAD,phi.tr = 1)
##Train test split
trans.log <-as.data.frame(trans.log)
test_size_log_difference = nrow(trans.log)-1000
#Checking length of test_size values
test_size_log_difference
Train_Load_DF_log_Diff <- trans.log[1:test_size_log_difference,]
Test_Load_DF_log_Diff <- trans.log[(test_size_log_difference+1):nrow(trans.log),]
((length(Train_Load_DF_log_Diff)+length(Test_Load_DF_log_Diff))==nrow(trans.log))
aic5.wge(Train_Load_DF_log_Diff,p=0:5,q=0:5,type='aic')
aic5.wge(Train_Load_DF_log_Diff,p=0:5,q=0:5,type='bic')
#Both BIC & AIC pick 4,3.
log_difference = est.arma.wge(Train_Load_DF_log_Diff,p=4,q=3)
#short term
f = fore.aruma.wge(Train_Load_DF_log_Diff,phi = log_difference$phi, theta = log_difference$theta, n.ahead = 30,limits = T, lastn = T)
Short_Term_log_difference_ASE = mean((Test_Load_DF_log_Diff[1:30] - f$f)^2)
Short_Term_log_difference_ASE
###Long
f = fore.aruma.wge(Train_Load_DF_log_Diff,phi = log_difference$phi, theta = log_difference$theta, n.ahead = 180,limits = T, lastn = T)
Long_Term_log_difference_ASE = mean((Test_Load_DF_log_Diff[1:180] - f$f)^2)
Long_Term_log_difference_ASE
##Append
Model_name <- c(Model_name,"Log Difference")
Short_Term_ASE_Score <- c(Short_Term_ASE_Score,Short_Term_log_difference_ASE)
Long_Term_ASE_Score <- c(Long_Term_ASE_Score,Long_Term_log_difference_ASE)
###Seasonality compoent
Seasonality_removed_df <- artrans.wge(LOG_DF$LOG_LOAD,c(rep(0,364),1))
plotts.sample.wge(Seasonality_removed_df)
length(Seasonality_removed_df)
Seasonality_removed_df <- as.data.frame(Seasonality_removed_df)
test_size_Seasonality = nrow(Seasonality_removed_df)-1000
#Checking length of test_size values
test_size_Seasonality
Train_Load_DF_seasonality_removed <- Seasonality_removed_df[1:test_size_Seasonality,]
Test_Load_DF_seasonality_removed <- Seasonality_removed_df[(test_size_Seasonality+1):nrow(Seasonality_removed_df),]
#check the splits add up
((length(Train_Load_DF_seasonality_removed)+length(Test_Load_DF_seasonality_removed))==nrow(Seasonality_removed_df))
aic5.wge(Train_Load_DF_seasonality_removed,p=0:5,q=0:5,type='aic')
aic5.wge(Train_Load_DF_seasonality_removed,p=0:5,q=0:5,type='bic')
#Both BIC & AIC pick 4,3.
seasonality_removed_model = est.arma.wge(Train_Load_DF_seasonality_removed,p=5,q=5)
##short term
f = fore.aruma.wge(Train_Load_DF_seasonality_removed,phi = seasonality_removed_model$phi, theta = seasonality_removed_model$theta, n.ahead = 30,limits = T, lastn = T)
Short_Term_seasonality_ASE = mean((Test_Load_DF_seasonality_removed[1:30] - f$f)^2)
Short_Term_seasonality_ASE
##long term
f = fore.aruma.wge(Train_Load_DF_seasonality_removed,phi = seasonality_removed_model$phi, theta = seasonality_removed_model$theta, n.ahead = 180,limits = T, lastn = T)
long_Term_seasonality_ASE = mean((Test_Load_DF_seasonality_removed[1:180] - f$f)^2)
long_Term_seasonality_ASE
##Append List
Model_name <- c(Model_name,"Seasonality_removed")
Short_Term_ASE_Score <- c(Short_Term_ASE_Score,Short_Term_seasonality_ASE)
Long_Term_ASE_Score <- c(Long_Term_ASE_Score,long_Term_seasonality_ASE)
##quarterly data
quarter_seasonality <- artrans.wge(Load_df$TOTAL_LOAD,c(0,0,0,1))
quarter_seasonality <- as.data.frame(quarter_seasonality)
##Train test split
test_size_quarter = nrow(quarter_seasonality)-1000
#Checking length of test_size values
test_size_quarter
Train_Load_DF_quarter <- quarter_seasonality[1:test_size_quarter,]
Test_Load_DF_quarter <- quarter_seasonality[(test_size_quarter+1):nrow(quarter_seasonality),]
#check the splits add up
((length(Train_Load_DF_quarter)+length(Test_Load_DF_quarter))==nrow(quarter_seasonality))
##Build model
aic5.wge(Train_Load_DF_quarter,p=0:5,q=0:5,type='aic')
aic5.wge(Train_Load_DF_quarter,p=0:5,q=0:5,type='bic')
#Both BIC & AIC 5,1 in their top
quarter_removed_model = est.arma.wge(Train_Load_DF_quarter,p=5,q=1)
##short term
f = fore.aruma.wge(Train_Load_DF_quarter,phi = quarter_removed_model$phi, theta = quarter_removed_model$theta, n.ahead = 30,limits = T, lastn = T)
Short_Term_quarter_ASE = mean((Test_Load_DF_quarter[1:30] - f$f)^2)
Short_Term_quarter_ASE
##long term
f = fore.aruma.wge(Train_Load_DF_quarter,phi = quarter_removed_model$phi, theta = quarter_removed_model$theta, n.ahead = 180,limits = T, lastn = T)
long_Term_quarter_ASE = mean((Test_Load_DF_quarter[1:180] - f$f)^2)
long_Term_quarter_ASE
##Append list
Model_name <- c(Model_name,"quarter_removed_model")
Short_Term_ASE_Score <- c(Short_Term_ASE_Score,Short_Term_quarter_ASE)
Long_Term_ASE_Score <- c(Long_Term_ASE_Score,long_Term_quarter_ASE)
#turn list into dataframe
Model_results <-cbind(Model_name,Short_Term_ASE_Score,Long_Term_ASE_Score)
Model_results
|
/Version_Two_Capstone_Time_Series.R
|
no_license
|
bodief/Time_Series
|
R
| false
| false
| 8,918
|
r
|
library(tidyverse)
library(tswge)
#Read in file
df <-read.csv(file="C:/users/bodie/Documents/CAPSTONE_DATA.csv")
#Change Date format
df$DAY <- as.Date(df$DAY,format="%m/%d/%Y")
#Subset dataframe
Load_df <- df %>% select('DAY','TOTAL_LOAD')
#Quick check for na's and types
summary(Load_df)
str(Load_df)
###Boxplot of Total Load
Load_df %>% ggplot(aes(TOTAL_LOAD))+geom_boxplot()
##Histogram
Load_df %>% ggplot(aes(TOTAL_LOAD))+geom_histogram()
##plots
plotts.sample.wge(Load_df$TOTAL_LOAD)
###Doesn't look to meet three conditions of stationary but will proceed for base line
nrow(Load_df)
##Define short term Forecasting as 30 days
##Long term forecasting as 180 days.
##Can redefine later but lets use as base for now
##Update test size
test_size = nrow(Load_df)-1000
#Checking length of test_size values
test_size
Train_Load_DF <- Load_df[1:test_size,]
Test_Load_DF <- Load_df[(test_size+1):nrow(Load_df),]
##Check the lengths tie out
((nrow(Train_Load_DF)+nrow(Test_Load_DF))==nrow(Load_df))
##Creating List for compare later on
Model_name <- vector(mode = "list")
Short_Term_ASE_Score <- vector(mode = "list")
Long_Term_ASE_Score <- vector(mode = "list")
##ARMA
aic5.wge(Train_Load_DF$TOTAL_LOAD,p=0:5,q=0:5,type="bic")
aic5.wge(Train_Load_DF$TOTAL_LOAD,p=0:5,q=0:5,type="aic")
#Bic picks 1,4 and in top for AIC.
arma_model = est.arma.wge(Train_Load_DF$TOTAL_LOAD,p=1,q=4)
##Short TERM forecasting for ARMA model 30 days
f = fore.aruma.wge(Train_Load_DF$TOTAL_LOAD,phi = arma_model$phi, theta = arma_model$theta, n.ahead = 30,limits = T, lastn = T)
Short_Term_ARMA_ASE = mean((Test_Load_DF$TOTAL_LOAD[1:30]- f$f)^2)
Short_Term_ARMA_ASE
#Long Term ARMA
f = fore.aruma.wge(Train_Load_DF$TOTAL_LOAD,phi = arma_model$phi, theta = arma_model$theta, n.ahead = 180,limits = T, lastn = T)
Long_Term_ARMA_ASE = mean((Test_Load_DF$TOTAL_LOAD[1:180]- f$f)^2)
Long_Term_ARMA_ASE
##Append List
Model_name <- c(Model_name,"ARMA")
Short_Term_ASE_Score <- c(Short_Term_ASE_Score,Short_Term_ARMA_ASE)
Long_Term_ASE_Score <- c(Long_Term_ASE_Score,Long_Term_ARMA_ASE)
#####ARIMA
##Here we are differencing the dataset then doing train/test split.
trans.p = artrans.wge(Load_df$TOTAL_LOAD,phi.tr = 1)
plotts.sample.wge(trans.p)
#Train test split
length(trans.p)
#4168
#Dataframe so we can subset
trans.p <- as.data.frame(trans.p)
test_size_difference = nrow(trans.p)-1000
#Checking length of test_size values
test_size_difference
Train_Load_DF_Diff <- trans.p[1:test_size_difference,]
Test_Load_DF_Diff <- trans.p[(test_size_difference+1):nrow(trans.p),]
#Check the lengths tie out. Since the train/test sets aren't DF we have to use lengths instead of nrow.
(length(Train_Load_DF_Diff)+length(Test_Load_DF_Diff)) == nrow(trans.p)
#Find p and Q
aic5.wge(Train_Load_DF_Diff,p=0:5,q=0:5,type='aic')
aic5.wge(Train_Load_DF_Diff,p=0:5,q=0:5,type='bic')
#Both BIC & AIC pick 4,3.
arima_model = est.arma.wge(Train_Load_DF_Diff,p=4,q=3)
#short term
f = fore.aruma.wge(Train_Load_DF_Diff,phi = arima_model$phi, theta = arima_model$theta, n.ahead = 30,limits = T, lastn = T)
Short_Term_ARIMA_ASE = mean((Test_Load_DF_Diff[1:30] - f$f)^2)
Short_Term_ARIMA_ASE
##Long term
f = fore.aruma.wge(Train_Load_DF_Diff,phi = arima_model$phi, theta = arima_model$theta, n.ahead = 180,limits = T, lastn = T)
Long_Term_ARIMA_ASE = mean((Test_Load_DF_Diff[1:180] - f$f)^2)
Long_Term_ARIMA_ASE
##Append List
Model_name <- c(Model_name,"ARIMA")
Short_Term_ASE_Score <- c(Short_Term_ASE_Score,Short_Term_ARIMA_ASE)
Long_Term_ASE_Score <- c(Long_Term_ASE_Score,Long_Term_ARIMA_ASE)
###Why are we getting neg values when we difference the data? This is why the ASE is terrible.
###Log this works
#Let's do log then take the difference
Load_df$LOG_LOAD <- log(Load_df$TOTAL_LOAD)
LOG_DF <-Load_df %>% select('DAY','LOG_LOAD')
plotts.sample.wge(LOG_DF$LOG_LOAD)
###Difference
trans.log = artrans.wge(LOG_DF$LOG_LOAD,phi.tr = 1)
##Train test split
trans.log <-as.data.frame(trans.log)
test_size_log_difference = nrow(trans.log)-1000
#Checking length of test_size values
test_size_log_difference
Train_Load_DF_log_Diff <- trans.log[1:test_size_log_difference,]
Test_Load_DF_log_Diff <- trans.log[(test_size_log_difference+1):nrow(trans.log),]
((length(Train_Load_DF_log_Diff)+length(Test_Load_DF_log_Diff))==nrow(trans.log))
aic5.wge(Train_Load_DF_log_Diff,p=0:5,q=0:5,type='aic')
aic5.wge(Train_Load_DF_log_Diff,p=0:5,q=0:5,type='bic')
#Both BIC & AIC pick 4,3.
log_difference = est.arma.wge(Train_Load_DF_log_Diff,p=4,q=3)
#short term
f = fore.aruma.wge(Train_Load_DF_log_Diff,phi = log_difference$phi, theta = log_difference$theta, n.ahead = 30,limits = T, lastn = T)
Short_Term_log_difference_ASE = mean((Test_Load_DF_log_Diff[1:30] - f$f)^2)
Short_Term_log_difference_ASE
###Long
f = fore.aruma.wge(Train_Load_DF_log_Diff,phi = log_difference$phi, theta = log_difference$theta, n.ahead = 180,limits = T, lastn = T)
Long_Term_log_difference_ASE = mean((Test_Load_DF_log_Diff[1:180] - f$f)^2)
Long_Term_log_difference_ASE
##Append
Model_name <- c(Model_name,"Log Difference")
Short_Term_ASE_Score <- c(Short_Term_ASE_Score,Short_Term_log_difference_ASE)
Long_Term_ASE_Score <- c(Long_Term_ASE_Score,Long_Term_log_difference_ASE)
###Seasonality compoent
Seasonality_removed_df <- artrans.wge(LOG_DF$LOG_LOAD,c(rep(0,364),1))
plotts.sample.wge(Seasonality_removed_df)
length(Seasonality_removed_df)
Seasonality_removed_df <- as.data.frame(Seasonality_removed_df)
test_size_Seasonality = nrow(Seasonality_removed_df)-1000
#Checking length of test_size values
test_size_Seasonality
Train_Load_DF_seasonality_removed <- Seasonality_removed_df[1:test_size_Seasonality,]
Test_Load_DF_seasonality_removed <- Seasonality_removed_df[(test_size_Seasonality+1):nrow(Seasonality_removed_df),]
#check the splits add up
((length(Train_Load_DF_seasonality_removed)+length(Test_Load_DF_seasonality_removed))==nrow(Seasonality_removed_df))
aic5.wge(Train_Load_DF_seasonality_removed,p=0:5,q=0:5,type='aic')
aic5.wge(Train_Load_DF_seasonality_removed,p=0:5,q=0:5,type='bic')
#Both BIC & AIC pick 4,3.
seasonality_removed_model = est.arma.wge(Train_Load_DF_seasonality_removed,p=5,q=5)
##short term
f = fore.aruma.wge(Train_Load_DF_seasonality_removed,phi = seasonality_removed_model$phi, theta = seasonality_removed_model$theta, n.ahead = 30,limits = T, lastn = T)
Short_Term_seasonality_ASE = mean((Test_Load_DF_seasonality_removed[1:30] - f$f)^2)
Short_Term_seasonality_ASE
##long term
f = fore.aruma.wge(Train_Load_DF_seasonality_removed,phi = seasonality_removed_model$phi, theta = seasonality_removed_model$theta, n.ahead = 180,limits = T, lastn = T)
long_Term_seasonality_ASE = mean((Test_Load_DF_seasonality_removed[1:180] - f$f)^2)
long_Term_seasonality_ASE
##Append List
Model_name <- c(Model_name,"Seasonality_removed")
Short_Term_ASE_Score <- c(Short_Term_ASE_Score,Short_Term_seasonality_ASE)
Long_Term_ASE_Score <- c(Long_Term_ASE_Score,long_Term_seasonality_ASE)
##quarterly data
quarter_seasonality <- artrans.wge(Load_df$TOTAL_LOAD,c(0,0,0,1))
quarter_seasonality <- as.data.frame(quarter_seasonality)
##Train test split
test_size_quarter = nrow(quarter_seasonality)-1000
#Checking length of test_size values
test_size_quarter
Train_Load_DF_quarter <- quarter_seasonality[1:test_size_quarter,]
Test_Load_DF_quarter <- quarter_seasonality[(test_size_quarter+1):nrow(quarter_seasonality),]
#check the splits add up
((length(Train_Load_DF_quarter)+length(Test_Load_DF_quarter))==nrow(quarter_seasonality))
##Build model
aic5.wge(Train_Load_DF_quarter,p=0:5,q=0:5,type='aic')
aic5.wge(Train_Load_DF_quarter,p=0:5,q=0:5,type='bic')
#Both BIC & AIC 5,1 in their top
quarter_removed_model = est.arma.wge(Train_Load_DF_quarter,p=5,q=1)
##short term
f = fore.aruma.wge(Train_Load_DF_quarter,phi = quarter_removed_model$phi, theta = quarter_removed_model$theta, n.ahead = 30,limits = T, lastn = T)
Short_Term_quarter_ASE = mean((Test_Load_DF_quarter[1:30] - f$f)^2)
Short_Term_quarter_ASE
##long term
f = fore.aruma.wge(Train_Load_DF_quarter,phi = quarter_removed_model$phi, theta = quarter_removed_model$theta, n.ahead = 180,limits = T, lastn = T)
long_Term_quarter_ASE = mean((Test_Load_DF_quarter[1:180] - f$f)^2)
long_Term_quarter_ASE
##Append list
Model_name <- c(Model_name,"quarter_removed_model")
Short_Term_ASE_Score <- c(Short_Term_ASE_Score,Short_Term_quarter_ASE)
Long_Term_ASE_Score <- c(Long_Term_ASE_Score,long_Term_quarter_ASE)
#turn list into dataframe
Model_results <-cbind(Model_name,Short_Term_ASE_Score,Long_Term_ASE_Score)
Model_results
|
##' Access to FAOSTAT data
##'
##' A wrapper function to obtain multiple data sets under the FAOSTAT
##' domains: "Production"; "Trade" (excluding "Detailed trade matrix");
##' "Food Balance"; the subdomain "Suite of Food Security Indicators"
##' under the domain "Food Security"; "Prices"; "Inputs" (excluding
##' "Employment Indicators"); "Population"; "Investment" (excluding "Development
##' Flows to Agriculture"); "Macro-Statistics"; "Agri-Environmental Indicators";
##' "Emissions - Agriculture"; "Emissions - Land Use"; the subdomain
##' "Forestry Production and Trade" under the domain "Forestry"; "ASTI R&D Indicators".
##'
##' @param name The name to be given to the variable.
##' @param domainCode The domain code of the variable, see details.
##' @param elementCode The element code of the variable, see details.
##' @param itemCode The item code of the variable, see details.
##' @param yearRange A numeric vector containing the years to be downloaded.
##' @param countrySet A numeric vector of FAOSTAT codes of those countries to be downloaded.
##' @param query The object created if using the FAOsearch function.
##' @param toDataFrame Logical, whether to return the results in data frames instead of data tables.
##'
##' @return A list containing the following elements
##' \describe{
##' \item{entity}{The entity level data}
##' \item{aggregates}{The aggregates provided by the FAO}
##' \item{results}{The status of the download, whether success/failed}
##' }
##' @export
##'
##' @examples
##'
##' ## production.dt <-
##' ## data.table(varName = c("Grapes_AreaHarv",
##' ## "Wine_ProdQuantity",
##' ## "CattleBuffaloes_Stocks",
##' ## "MilkProducts_Yield",
##' ## "Yoghurt_ProdQuantity",
##' ## "Food _GPIN",
##' ## "Agriculture_NetProdValue"),
##' ## domainCode = c("QC", "QD", "QA", "QL", "QP", "QI", "QV"),
##' ## itemCode = c(560, 564, 1746, "1780>", 891, 2054, 2051),
##' ## elementCode = c(2312, 2510, 2111, 2413, 2510, 432, 154))
##' ## production.lst <- with(production.dt,
##' ## FAOSTAT(name = varName, domainCode = domainCode,
##' ## itemCode = itemCode, elementCode = elementCode,
##' ## yearRange = c(2005:2010), countrySet = c(106, 5400)))
##'
FAOSTAT <-
function(name = NULL, domainCode = "QC", elementCode = 2510,
itemCode = NULL, yearRange = NULL, countrySet = NULL,
query, toDataFrame = FALSE){
## Year range
if (!is.null(yearRange)) {
if (!is.numeric(yearRange)) {
stop("Please, provide a numeric vector for the year range.")
} else {
yearRange <- paste(yearRange, collapse = ",")
}
}
## Country set
if (!is.null(countrySet)) {
countrySet <- paste(countrySet, collapse = ",")
}
## Query
if(!missing(query)){
domainCode = query$domainCode
itemCode = query$itemCode
elementCode = query$elementCode
countrySet = query$areaCode
if(is.null(query$name)){
name <- with(query, paste(domainCode, itemCode, elementCode, countrySet, sep = "_"))
} else {
name <- query$name
}
}
## Name
if(is.null(name))
name <- paste(domainCode, itemCode, elementCode, sep = "_")
n <- length(name)
## Check length
if(any(length(domainCode) != n, length(elementCode) != n))
stop("domainCode and elementCode should have the same length")
## Initializing ...
faoData <- data.table()
results.dt <- data.frame(Name = name, Success = logical(length(name)),
Reason = character(length(name)),
Time = as.POSIXct(rep(NA, length(name))),
stringsAsFactors = FALSE)
printLab(paste("FAOSTAT Data Download (", n, " in Total)", sep = ""))
i <- 1
retry <- 1
while(i <= n){
if(retry == 1)
cat(paste("(", i, "): Downloading variable ", name[i], " ... ",
sep = ""))
if(any(is.na(domainCode[i]), is.na(elementCode[i]))){
cat("FAIL\n\t Error: domain or element is missing\n")
results.dt[i, "Success"] <- FALSE
results.dt[i, "Reason"] <- "domain or element is missing"
} else {
## API
baseUrl <- paste0("http://fenixservices.fao.org/faostat/api/v1/en/data/")
if (!domainCode[i] %in% c("PM", "CP")) {
url <- paste0(baseUrl, domainCode, "?element=", elementCode)
} else {
url <- paste0(baseUrl, domainCode, "?month=", elementCode)
}
if (!is.null(itemCode)) {
url <- paste0(url, "&item=", itemCode)
}
if (!is.null(yearRange)) {
url <- paste0(url, "&year=", yearRange)
}
if (!is.null(countrySet)) {
url <- paste0(url, "&area=", countrySet)
}
## Download the data
## NOTE: the Flag variable sometimes is interpreted as logic by data.table as "F"
## is one of the flag codes, but then data.table understands that is not logic and
## goes back to character. However, the previos read flags "F" are converted in "0".
## For this reason I force Flag to be character.
tmp <- try(fread(paste0(url[i], "&output_type=csv"), encoding = "UTF-8",
colClasses = c(Flag = "character")))
if(!inherits(tmp, "try-error")){
## This was to account sometimes the download is successful, yet
## the data frame is empty
if(NROW(tmp) != 0){
cat("OK\n")
results.dt[i, "Success"] <- TRUE
results.dt[i, "Reason"] <- "Download Successful"
results.dt[i, "Time"] <- Sys.time()
## Adjust the encoding of the title
for (coltmp in colnames(tmp)){
Encoding(colnames(tmp)) <- "UTF-8"
}
if (domainCode[i] %in% c("PM", "CP")) {
names(tmp)[which(names(tmp) == "Months Code")] <- "Element Code"
names(tmp)[which(names(tmp) == "Months")] <- "Element"
}
if (domainCode[i] == "PE") {
names(tmp)[which(names(tmp) == "ISO Currency Code")] <- "Element Code"
names(tmp)[which(names(tmp) == "Currency")] <- "Element"
}
## Add the Element List just before the Element Code
## NOTE: the element list is the code used for the downloading
## while the element code is the code used in dissemination.
target <- which(names(tmp) == 'Element Code')[1]
suppressWarnings(tmp[, `Element List` := elementCode[i]])
tmp <-
cbind(tmp[,1:(target-1), with = FALSE],
tmp[, .(`Element List`)],
tmp[, target:(ncol(tmp)-1), with = FALSE])
## Remove Year Code
tmp[, `Year Code` := NULL]
## Add the Name as first column
tmp[, "Name" := name[i]]
tmp <-
cbind(tmp[, .(`Name`)],
tmp[,1:(ncol(tmp)-1), with = FALSE])
## Convert to numeric
## NOTE: sometimes there are strange symbols that do not allow
## the value to be numeric
tmp[, Year := as.integer(Year)]
tmp[, Value := suppressWarnings(as.numeric(Value))]
## Country vs Area
names(tmp)[which(names(tmp) == "Area")] <- "Country"
names(tmp)[which(names(tmp) %in% c("Area Code", "Country Code"))] <-
"FAOST_CODE"
## Add Note
if (length(grep("Note", colnames(tmp))) == 0) {
suppressWarnings(tmp[, Note := ""])
}
faoData <- rbind(faoData, tmp)
i <- i + 1
retry <- 1
} else {
tmp <- c("The specified query has no data, consult FAOSTAT")
cat(paste(tmp, "\n"))
class(tmp) <- "try-error"
attr(tmp, "condition") <-
list(message = tmp, call = NULL)
i <- i + 1
retry <- 1
}
} else {
if(retry <=50){
print(retry)
retry <- retry + 1
} else {
cat("Download fail after 50 tries\n")
results.dt[i, "Success"] <- FALSE
results.dt[i, "Reason"] <- attr(tmp, "condition")$message
i <- i + 1
retry <- 1
}
}
}
}
entity.dt <- arrange(with(faoData, faoData[FAOST_CODE %in%
FAOcountryProfile[, FAOST_CODE], ]), FAOST_CODE, Year)
region.dt <- arrange(with(faoData, faoData[!(FAOST_CODE %in%
FAOcountryProfile[, FAOST_CODE]), ]), FAOST_CODE, Year)
cat(paste("\n Number of variables successfully downloaded: ",
sum(results.dt$Success), " out of ", NROW(results.dt), "\n\n", sep = ""))
if (toDataFrame) {
list(entity = as.data.frame(entity.dt), aggregates = as.data.frame(region.dt),
results = as.data.frame(results.dt))
} else {
list(entity = entity.dt, aggregates = region.dt, results = results.dt)
}
}
## The following two variables are hard coded
utils::globalVariables(names = c("FAOST_CODE", "Year"))
|
/Codes/R/FAOSTAT.R
|
no_license
|
sebastian-c/FAOSTATpackage
|
R
| false
| false
| 9,616
|
r
|
##' Access to FAOSTAT data
##'
##' A wrapper function to obtain multiple data sets under the FAOSTAT
##' domains: "Production"; "Trade" (excluding "Detailed trade matrix");
##' "Food Balance"; the subdomain "Suite of Food Security Indicators"
##' under the domain "Food Security"; "Prices"; "Inputs" (excluding
##' "Employment Indicators"); "Population"; "Investment" (excluding "Development
##' Flows to Agriculture"); "Macro-Statistics"; "Agri-Environmental Indicators";
##' "Emissions - Agriculture"; "Emissions - Land Use"; the subdomain
##' "Forestry Production and Trade" under the domain "Forestry"; "ASTI R&D Indicators".
##'
##' @param name The name to be given to the variable.
##' @param domainCode The domain code of the variable, see details.
##' @param elementCode The element code of the variable, see details.
##' @param itemCode The item code of the variable, see details.
##' @param yearRange A numeric vector containing the years to be downloaded.
##' @param countrySet A numeric vector of FAOSTAT codes of those countries to be downloaded.
##' @param query The object created if using the FAOsearch function.
##' @param toDataFrame Logical, whether to return the results in data frames instead of data tables.
##'
##' @return A list containing the following elements
##' \describe{
##' \item{entity}{The entity level data}
##' \item{aggregates}{The aggregates provided by the FAO}
##' \item{results}{The status of the download, whether success/failed}
##' }
##' @export
##'
##' @examples
##'
##' ## production.dt <-
##' ## data.table(varName = c("Grapes_AreaHarv",
##' ## "Wine_ProdQuantity",
##' ## "CattleBuffaloes_Stocks",
##' ## "MilkProducts_Yield",
##' ## "Yoghurt_ProdQuantity",
##' ## "Food _GPIN",
##' ## "Agriculture_NetProdValue"),
##' ## domainCode = c("QC", "QD", "QA", "QL", "QP", "QI", "QV"),
##' ## itemCode = c(560, 564, 1746, "1780>", 891, 2054, 2051),
##' ## elementCode = c(2312, 2510, 2111, 2413, 2510, 432, 154))
##' ## production.lst <- with(production.dt,
##' ## FAOSTAT(name = varName, domainCode = domainCode,
##' ## itemCode = itemCode, elementCode = elementCode,
##' ## yearRange = c(2005:2010), countrySet = c(106, 5400)))
##'
FAOSTAT <-
function(name = NULL, domainCode = "QC", elementCode = 2510,
itemCode = NULL, yearRange = NULL, countrySet = NULL,
query, toDataFrame = FALSE){
## Year range
if (!is.null(yearRange)) {
if (!is.numeric(yearRange)) {
stop("Please, provide a numeric vector for the year range.")
} else {
yearRange <- paste(yearRange, collapse = ",")
}
}
## Country set
if (!is.null(countrySet)) {
countrySet <- paste(countrySet, collapse = ",")
}
## Query
if(!missing(query)){
domainCode = query$domainCode
itemCode = query$itemCode
elementCode = query$elementCode
countrySet = query$areaCode
if(is.null(query$name)){
name <- with(query, paste(domainCode, itemCode, elementCode, countrySet, sep = "_"))
} else {
name <- query$name
}
}
## Name
if(is.null(name))
name <- paste(domainCode, itemCode, elementCode, sep = "_")
n <- length(name)
## Check length
if(any(length(domainCode) != n, length(elementCode) != n))
stop("domainCode and elementCode should have the same length")
## Initializing ...
faoData <- data.table()
results.dt <- data.frame(Name = name, Success = logical(length(name)),
Reason = character(length(name)),
Time = as.POSIXct(rep(NA, length(name))),
stringsAsFactors = FALSE)
printLab(paste("FAOSTAT Data Download (", n, " in Total)", sep = ""))
i <- 1
retry <- 1
while(i <= n){
if(retry == 1)
cat(paste("(", i, "): Downloading variable ", name[i], " ... ",
sep = ""))
if(any(is.na(domainCode[i]), is.na(elementCode[i]))){
cat("FAIL\n\t Error: domain or element is missing\n")
results.dt[i, "Success"] <- FALSE
results.dt[i, "Reason"] <- "domain or element is missing"
} else {
## API
baseUrl <- paste0("http://fenixservices.fao.org/faostat/api/v1/en/data/")
if (!domainCode[i] %in% c("PM", "CP")) {
url <- paste0(baseUrl, domainCode, "?element=", elementCode)
} else {
url <- paste0(baseUrl, domainCode, "?month=", elementCode)
}
if (!is.null(itemCode)) {
url <- paste0(url, "&item=", itemCode)
}
if (!is.null(yearRange)) {
url <- paste0(url, "&year=", yearRange)
}
if (!is.null(countrySet)) {
url <- paste0(url, "&area=", countrySet)
}
## Download the data
## NOTE: the Flag variable sometimes is interpreted as logic by data.table as "F"
## is one of the flag codes, but then data.table understands that is not logic and
## goes back to character. However, the previos read flags "F" are converted in "0".
## For this reason I force Flag to be character.
tmp <- try(fread(paste0(url[i], "&output_type=csv"), encoding = "UTF-8",
colClasses = c(Flag = "character")))
if(!inherits(tmp, "try-error")){
## This was to account sometimes the download is successful, yet
## the data frame is empty
if(NROW(tmp) != 0){
cat("OK\n")
results.dt[i, "Success"] <- TRUE
results.dt[i, "Reason"] <- "Download Successful"
results.dt[i, "Time"] <- Sys.time()
## Adjust the encoding of the title
for (coltmp in colnames(tmp)){
Encoding(colnames(tmp)) <- "UTF-8"
}
if (domainCode[i] %in% c("PM", "CP")) {
names(tmp)[which(names(tmp) == "Months Code")] <- "Element Code"
names(tmp)[which(names(tmp) == "Months")] <- "Element"
}
if (domainCode[i] == "PE") {
names(tmp)[which(names(tmp) == "ISO Currency Code")] <- "Element Code"
names(tmp)[which(names(tmp) == "Currency")] <- "Element"
}
## Add the Element List just before the Element Code
## NOTE: the element list is the code used for the downloading
## while the element code is the code used in dissemination.
target <- which(names(tmp) == 'Element Code')[1]
suppressWarnings(tmp[, `Element List` := elementCode[i]])
tmp <-
cbind(tmp[,1:(target-1), with = FALSE],
tmp[, .(`Element List`)],
tmp[, target:(ncol(tmp)-1), with = FALSE])
## Remove Year Code
tmp[, `Year Code` := NULL]
## Add the Name as first column
tmp[, "Name" := name[i]]
tmp <-
cbind(tmp[, .(`Name`)],
tmp[,1:(ncol(tmp)-1), with = FALSE])
## Convert to numeric
## NOTE: sometimes there are strange symbols that do not allow
## the value to be numeric
tmp[, Year := as.integer(Year)]
tmp[, Value := suppressWarnings(as.numeric(Value))]
## Country vs Area
names(tmp)[which(names(tmp) == "Area")] <- "Country"
names(tmp)[which(names(tmp) %in% c("Area Code", "Country Code"))] <-
"FAOST_CODE"
## Add Note
if (length(grep("Note", colnames(tmp))) == 0) {
suppressWarnings(tmp[, Note := ""])
}
faoData <- rbind(faoData, tmp)
i <- i + 1
retry <- 1
} else {
tmp <- c("The specified query has no data, consult FAOSTAT")
cat(paste(tmp, "\n"))
class(tmp) <- "try-error"
attr(tmp, "condition") <-
list(message = tmp, call = NULL)
i <- i + 1
retry <- 1
}
} else {
if(retry <=50){
print(retry)
retry <- retry + 1
} else {
cat("Download fail after 50 tries\n")
results.dt[i, "Success"] <- FALSE
results.dt[i, "Reason"] <- attr(tmp, "condition")$message
i <- i + 1
retry <- 1
}
}
}
}
entity.dt <- arrange(with(faoData, faoData[FAOST_CODE %in%
FAOcountryProfile[, FAOST_CODE], ]), FAOST_CODE, Year)
region.dt <- arrange(with(faoData, faoData[!(FAOST_CODE %in%
FAOcountryProfile[, FAOST_CODE]), ]), FAOST_CODE, Year)
cat(paste("\n Number of variables successfully downloaded: ",
sum(results.dt$Success), " out of ", NROW(results.dt), "\n\n", sep = ""))
if (toDataFrame) {
list(entity = as.data.frame(entity.dt), aggregates = as.data.frame(region.dt),
results = as.data.frame(results.dt))
} else {
list(entity = entity.dt, aggregates = region.dt, results = results.dt)
}
}
## The following two variables are hard coded
utils::globalVariables(names = c("FAOST_CODE", "Year"))
|
#' Compute similarity scores between strings
#'
#' \code{stringsim} computes pairwise string similarities between elements of
#' \code{character} vectors \code{a} and \code{b}, where the vector with less
#' elements is recycled.
#'
#' @param a R object (target); will be converted by \code{as.character}.
#' @param b R object (source); will be converted by \code{as.character}.
#' @param method Method for distance calculation. The default is \code{"osa"},
#' see \code{\link{stringdist-metrics}}.
#' @param useBytes Perform byte-wise comparison, see \code{\link{stringdist-encoding}}.
#' @param q Size of the \eqn{q}-gram; must be nonnegative. Only applies to
#' \code{method='qgram'}, \code{'jaccard'} or \code{'cosine'}.
#' @param ... additional arguments are passed on to \code{\link{stringdist}}.
#'
#' @return
#' Returns a vector with similarities, which are values between 0 and 1 where
#' 1 corresponds to perfect similarity (distance 0) and 0 to complete
#' dissimilarity. \code{NA} is returned when \code{\link{stringdist}} returns
#' \code{NA}. Distances equal to \code{Inf} are truncated to a similarity of
#' 0.
#'
#' @details
#' The similarity is calculated by first calculating the distance using
#' \code{\link{stringdist}}, dividing the distance by the maximum
#' possible distance, and substracting the result from 1.
#' This results in a score between 0 and 1, with 1
#' corresponding to complete similarity and 0 to complete dissimilarity.
#' Note that complete similarity only means equality for distances satisfying
#' the identity property. This is not the case e.g. for q-gram based distances
#' (for example if q=1, anagrams are completely similar).
#' For distances where weights can be specified, the maximum distance
#' is currently computed by assuming that all weights are equal to 1.
#'
#' @example ../examples/stringsim.R
#' @export
stringsim <- function(a, b, method = c("osa", "lv", "dl", "hamming", "lcs",
"qgram", "cosine", "jaccard", "jw", "soundex"), useBytes=FALSE, q = 1, ...) {
# Calculate the distance
method <- match.arg(method)
dist <- stringdist::stringdist(a, b, method=method, useBytes=useBytes, q=q, ...)
nctype <- if (useBytes) "bytes" else "char"
normalize_dist(dist, a, b, method=method, nctype=nctype, q=q)
}
#' Compute similarity scores between sequences of integers
#'
#' @param a \code{list} of \code{integer} vectors (target)
#' @param b \code{list} of \code{integer} vectors (source). Optional for
#' \code{seq_distmatrix}.
#' @param method Method for distance calculation. The default is \code{"osa"},
#' see \code{\link{stringdist-metrics}}.
#' @param q Size of the \eqn{q}-gram; must be nonnegative. Only applies to
#' \code{method='qgram'}, \code{'jaccard'} or \code{'cosine'}.
#' @param ... additional arguments are passed on to \code{\link{seq_dist}}.
#'
#' @return
#' A \code{numeric} vector of length \code{max(length(a),length(b))}. If one of the
#' entries in \code{a} or \code{b} is \code{NA_integer_}, all comparisons with that
#' element result in \code{NA}. Missings occurring within the sequences are treated
#' as an ordinary number (the representation of \code{NA_integer_}).
#'
#' @example ../examples/seq_sim.R
#' @seealso \code{\link{seq_dist}}, \code{\link{seq_amatch}}
#' @export
seq_sim <- function(a, b, method = c("osa", "lv", "dl", "hamming", "lcs",
"qgram", "cosine", "jaccard", "jw"), q = 1, ...) {
method <- match.arg(method)
dist <- stringdist::seq_dist(a, b, method=method, q=q, ...)
normalize_dist(dist,a,b,method=method,q=q)
}
#### HELPER FUNCTIONS ---------------------------------------------------------
# get lengths of sequences (internal function)
lengths <- function(x,...){
UseMethod("lengths")
}
lengths.character <- function(x, type="char",...){
nchar(x,type=type)
}
lengths.list <- function(x,...){
.Call("R_lengths",x)
}
normalize_dist <- function(dist, a, b, method, nctype="char",q=1L){
# Normalise the distance by dividing it by the maximum possible distance
if (method == "hamming") {
max_dist <- if (length(b) > length(a)) lengths(b,type=nctype) else lengths(a,type=nctype)
max_dist[max_dist == 0] <- 1
sim <- 1 - dist/max_dist
} else if (method == "lcs") {
max_dist <- lengths(a,type=nctype) + lengths(b,type=nctype)
max_dist[max_dist == 0] <- 1
sim <- 1 - dist/max_dist
} else if (method == "lv") {
max_dist <- pmax(lengths(a,type=nctype), lengths(b,type=nctype))
max_dist[max_dist == 0] <- 1
sim <- 1 - dist/max_dist
} else if (method == "osa") {
max_dist <- pmax(lengths(a,type=nctype), lengths(b,type=nctype))
max_dist[max_dist == 0] <- 1
sim <- 1 - dist/max_dist
} else if (method == "dl") {
max_dist <- pmax(lengths(a,type=nctype), lengths(b,type=nctype))
max_dist[max_dist == 0] <- 1
sim <- 1 - dist/max_dist
} else if (method == "qgram") {
max_dist <- (lengths(a,type=nctype) + lengths(b,type=nctype) - 2*q + 2)
max_dist[max_dist < 0] <- 1
sim <- 1 - dist/max_dist
} else if (method == "cosine") {
sim <- 1 - dist
} else if (method == "jaccard") {
sim <- 1 - dist
} else if (method == "jw") {
sim <- 1 - dist
} else if (method == "soundex") {
sim <- 1 - dist
}
# all metrics can have distances == Inf; for similariy score set these to 0
sim[sim < 0] <- 0
sim
}
|
/pkg/R/stringsim.R
|
no_license
|
huisman/stringdist
|
R
| false
| false
| 5,365
|
r
|
#' Compute similarity scores between strings
#'
#' \code{stringsim} computes pairwise string similarities between elements of
#' \code{character} vectors \code{a} and \code{b}, where the vector with less
#' elements is recycled.
#'
#' @param a R object (target); will be converted by \code{as.character}.
#' @param b R object (source); will be converted by \code{as.character}.
#' @param method Method for distance calculation. The default is \code{"osa"},
#' see \code{\link{stringdist-metrics}}.
#' @param useBytes Perform byte-wise comparison, see \code{\link{stringdist-encoding}}.
#' @param q Size of the \eqn{q}-gram; must be nonnegative. Only applies to
#' \code{method='qgram'}, \code{'jaccard'} or \code{'cosine'}.
#' @param ... additional arguments are passed on to \code{\link{stringdist}}.
#'
#' @return
#' Returns a vector with similarities, which are values between 0 and 1 where
#' 1 corresponds to perfect similarity (distance 0) and 0 to complete
#' dissimilarity. \code{NA} is returned when \code{\link{stringdist}} returns
#' \code{NA}. Distances equal to \code{Inf} are truncated to a similarity of
#' 0.
#'
#' @details
#' The similarity is calculated by first calculating the distance using
#' \code{\link{stringdist}}, dividing the distance by the maximum
#' possible distance, and substracting the result from 1.
#' This results in a score between 0 and 1, with 1
#' corresponding to complete similarity and 0 to complete dissimilarity.
#' Note that complete similarity only means equality for distances satisfying
#' the identity property. This is not the case e.g. for q-gram based distances
#' (for example if q=1, anagrams are completely similar).
#' For distances where weights can be specified, the maximum distance
#' is currently computed by assuming that all weights are equal to 1.
#'
#' @example ../examples/stringsim.R
#' @export
stringsim <- function(a, b, method = c("osa", "lv", "dl", "hamming", "lcs",
"qgram", "cosine", "jaccard", "jw", "soundex"), useBytes=FALSE, q = 1, ...) {
# Calculate the distance
method <- match.arg(method)
dist <- stringdist::stringdist(a, b, method=method, useBytes=useBytes, q=q, ...)
nctype <- if (useBytes) "bytes" else "char"
normalize_dist(dist, a, b, method=method, nctype=nctype, q=q)
}
#' Compute similarity scores between sequences of integers
#'
#' @param a \code{list} of \code{integer} vectors (target)
#' @param b \code{list} of \code{integer} vectors (source). Optional for
#' \code{seq_distmatrix}.
#' @param method Method for distance calculation. The default is \code{"osa"},
#' see \code{\link{stringdist-metrics}}.
#' @param q Size of the \eqn{q}-gram; must be nonnegative. Only applies to
#' \code{method='qgram'}, \code{'jaccard'} or \code{'cosine'}.
#' @param ... additional arguments are passed on to \code{\link{seq_dist}}.
#'
#' @return
#' A \code{numeric} vector of length \code{max(length(a),length(b))}. If one of the
#' entries in \code{a} or \code{b} is \code{NA_integer_}, all comparisons with that
#' element result in \code{NA}. Missings occurring within the sequences are treated
#' as an ordinary number (the representation of \code{NA_integer_}).
#'
#' @example ../examples/seq_sim.R
#' @seealso \code{\link{seq_dist}}, \code{\link{seq_amatch}}
#' @export
seq_sim <- function(a, b, method = c("osa", "lv", "dl", "hamming", "lcs",
"qgram", "cosine", "jaccard", "jw"), q = 1, ...) {
method <- match.arg(method)
dist <- stringdist::seq_dist(a, b, method=method, q=q, ...)
normalize_dist(dist,a,b,method=method,q=q)
}
#### HELPER FUNCTIONS ---------------------------------------------------------
# get lengths of sequences (internal function)
lengths <- function(x,...){
UseMethod("lengths")
}
lengths.character <- function(x, type="char",...){
nchar(x,type=type)
}
lengths.list <- function(x,...){
.Call("R_lengths",x)
}
normalize_dist <- function(dist, a, b, method, nctype="char",q=1L){
# Normalise the distance by dividing it by the maximum possible distance
if (method == "hamming") {
max_dist <- if (length(b) > length(a)) lengths(b,type=nctype) else lengths(a,type=nctype)
max_dist[max_dist == 0] <- 1
sim <- 1 - dist/max_dist
} else if (method == "lcs") {
max_dist <- lengths(a,type=nctype) + lengths(b,type=nctype)
max_dist[max_dist == 0] <- 1
sim <- 1 - dist/max_dist
} else if (method == "lv") {
max_dist <- pmax(lengths(a,type=nctype), lengths(b,type=nctype))
max_dist[max_dist == 0] <- 1
sim <- 1 - dist/max_dist
} else if (method == "osa") {
max_dist <- pmax(lengths(a,type=nctype), lengths(b,type=nctype))
max_dist[max_dist == 0] <- 1
sim <- 1 - dist/max_dist
} else if (method == "dl") {
max_dist <- pmax(lengths(a,type=nctype), lengths(b,type=nctype))
max_dist[max_dist == 0] <- 1
sim <- 1 - dist/max_dist
} else if (method == "qgram") {
max_dist <- (lengths(a,type=nctype) + lengths(b,type=nctype) - 2*q + 2)
max_dist[max_dist < 0] <- 1
sim <- 1 - dist/max_dist
} else if (method == "cosine") {
sim <- 1 - dist
} else if (method == "jaccard") {
sim <- 1 - dist
} else if (method == "jw") {
sim <- 1 - dist
} else if (method == "soundex") {
sim <- 1 - dist
}
# all metrics can have distances == Inf; for similariy score set these to 0
sim[sim < 0] <- 0
sim
}
|
## The two functions below are used to create a special object that stores a matrix
## and cache's its inverse.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
set <-function(y){
x<<-y
i<<-NULL
}
get<-function() x
setinverse<-function(solve)i<<-solve
getinverse<-function() i
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i<-x$getinverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data <-x$get()
i<-solve(data, ...)
x$setinverse(i)
i
}
|
/cachematrix.R
|
no_license
|
nvvrajan/ProgrammingAssignment2
|
R
| false
| false
| 790
|
r
|
## The two functions below are used to create a special object that stores a matrix
## and cache's its inverse.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
set <-function(y){
x<<-y
i<<-NULL
}
get<-function() x
setinverse<-function(solve)i<<-solve
getinverse<-function() i
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i<-x$getinverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data <-x$get()
i<-solve(data, ...)
x$setinverse(i)
i
}
|
args <- commandArgs(TRUE)
input <- args[1]
sel <- args[2]
l <- args[3]
dati <- read.delim(input, sep='\t')
dats <- read.delim(sel, sep='\t')
dati.freq <- tabulate(dati$start[as.numeric(dati$start) < l])/length(dati$start)
dats.freq <- tabulate(dats$start[as.numeric(dats$start) < l])/length(dats$start)
enrichments <- log(dats.freq/dati.freq,2)
pdf('test.pdf', width=12, height=8)
plot(dati.freq, dats.freq)
abline(a=0,b=1,lty=2, col='red', lwd=3)
hist(enrichments, 50)
plot(seq(dats.freq), enrichments)
dev.off()
|
/compareBED.R
|
no_license
|
msr2009/SCRIPTS
|
R
| false
| false
| 518
|
r
|
args <- commandArgs(TRUE)
input <- args[1]
sel <- args[2]
l <- args[3]
dati <- read.delim(input, sep='\t')
dats <- read.delim(sel, sep='\t')
dati.freq <- tabulate(dati$start[as.numeric(dati$start) < l])/length(dati$start)
dats.freq <- tabulate(dats$start[as.numeric(dats$start) < l])/length(dats$start)
enrichments <- log(dats.freq/dati.freq,2)
pdf('test.pdf', width=12, height=8)
plot(dati.freq, dats.freq)
abline(a=0,b=1,lty=2, col='red', lwd=3)
hist(enrichments, 50)
plot(seq(dats.freq), enrichments)
dev.off()
|
Skeleton_R <- function(data, # data
alpha, # pvalue for CI test
p){ # num of permutation in dcov.test
n = ncol(data)
G = diag(x = -1, nrow = n, ncol = n)+1 # graph matrix with 0 on the diagonal
sepstep = list() # I create the separation list
check = hash()
for(i in 1:n)
{
sepstep[[i]] = list()
}
for (N in 0:n) # for each N = dimension subset to be taken
{
if(N!=0)
{
G_old = G
print(N)
for (i in 1:n) # for each node i
{
print(i)
ad = adiac_D(G_old, i) # I look for the adjacent (double arrow) OUTPUT: POSITION
n_ad = length(ad)
if((n_ad-1)>=N) # if card of adj > N
{
for ( j in 1:n_ad) # for each node adjac to i
# if n_ad = 0, then n_ad -1 is NEVER > N, so I don't enter here
{
if(n_ad==2)
{
sub = c(ad[3-j]) # because if combn(8, 1) return: 1, 2, 3, 4, 5, 6, 7, 8
sub = as.matrix(sub)
}
else
{
sub = combn(c(ad[-c(which(ad==ad[j]))]), N) # all the subset of dimension N of the different POSITION of nodes
}
k = 1
while(k<=ncol(sub) && G[i, ad[j]]==1 ) # while there are other subset to investigate for c.i. AND I still haven't found a subset for which i is indip to adj[j] given subset
{
a = c(i, ad[j])
a = sort(a)
a = paste(a, collapse="_")
b = sort(sub[1:N, k])
b = paste(b, collapse="_")
long = paste(a, b, sep='x')
if( is.numeric(check[[long]]) == F )
{
dep = condIndepTestBr(x= data[,i], y= data[,ad[j]], z = data[, sub[1:N , k] ], p = p)
print(paste('I test', i , 'and', ad[j], 'given',sub[1:N , k]))
print(paste('p value =',dep))
if (dep > alpha) # if pval > alpha, I DON'T refuse H0 => i and adj[j] are c.i.
{
G[i, ad[j]] = 0
G[ad[j], i] = 0
sepstep[[i]][[ad[j]]] = sub[1:N,k]
sepstep[[ad[j]]][[i]] = sub[1:N,k]
}
check[[long]]=dep
}
k = k+1
} # chiude while
} # chiude for ( j in 1:n_ad)
} # chiude if((n_ad-1)>=N)
} # chiude for (i in 1:n)
} # chiude if N!=0
else
{
print(N)
for ( i in 1:n)
{
print(i)
ad = adiac_D(G, i) # I look for the adjacent (double arrow) OUTPUT: POSITION
n_ad = length(ad)
if((n_ad-1)>=N) # if card of adj > N
{
for ( j in 1:n_ad) # for each node adjac to i
# if n_ad = 0, then n_ad -1 is NEVER > N, so I don't enter here
{
a = c(i, ad[j])
a = sort(a)
a = paste(a, collapse="_")
b = NULL
b = paste(b, collapse="_")
long = paste(a, b, sep='x')
if( is.numeric(check[[long]]) == F )
{
# test = gaussCItest(x = i, y = ad[j], S = NULL, suffStat = suffst )
dep = IndepTestBr(x = data[,i], y = data[, ad[j]], p = p)
if (dep > alpha) # if pval > alpha, I DON'T refuse H0 => i and adj[j] are c.i.
{
G[i, ad[j]] = 0
G[ad[j], i] = 0
sepstep[[i]][[ad[j]]] = -1 # it's a default value because it will never exist a variable in the -1 position
sepstep[[ad[j]]][[i]] = -1 # it's a default value because it will never exist a variable in the -1 position
}
check[[long]]=dep
}
}
}
}
}
}
Sk = list(sepstep = sepstep, G = G, pval = check)
Sk
}
|
/Skeleton_R.R
|
no_license
|
NinaDes/code-bPC
|
R
| false
| false
| 4,362
|
r
|
Skeleton_R <- function(data, # data
alpha, # pvalue for CI test
p){ # num of permutation in dcov.test
n = ncol(data)
G = diag(x = -1, nrow = n, ncol = n)+1 # graph matrix with 0 on the diagonal
sepstep = list() # I create the separation list
check = hash()
for(i in 1:n)
{
sepstep[[i]] = list()
}
for (N in 0:n) # for each N = dimension subset to be taken
{
if(N!=0)
{
G_old = G
print(N)
for (i in 1:n) # for each node i
{
print(i)
ad = adiac_D(G_old, i) # I look for the adjacent (double arrow) OUTPUT: POSITION
n_ad = length(ad)
if((n_ad-1)>=N) # if card of adj > N
{
for ( j in 1:n_ad) # for each node adjac to i
# if n_ad = 0, then n_ad -1 is NEVER > N, so I don't enter here
{
if(n_ad==2)
{
sub = c(ad[3-j]) # because if combn(8, 1) return: 1, 2, 3, 4, 5, 6, 7, 8
sub = as.matrix(sub)
}
else
{
sub = combn(c(ad[-c(which(ad==ad[j]))]), N) # all the subset of dimension N of the different POSITION of nodes
}
k = 1
while(k<=ncol(sub) && G[i, ad[j]]==1 ) # while there are other subset to investigate for c.i. AND I still haven't found a subset for which i is indip to adj[j] given subset
{
a = c(i, ad[j])
a = sort(a)
a = paste(a, collapse="_")
b = sort(sub[1:N, k])
b = paste(b, collapse="_")
long = paste(a, b, sep='x')
if( is.numeric(check[[long]]) == F )
{
dep = condIndepTestBr(x= data[,i], y= data[,ad[j]], z = data[, sub[1:N , k] ], p = p)
print(paste('I test', i , 'and', ad[j], 'given',sub[1:N , k]))
print(paste('p value =',dep))
if (dep > alpha) # if pval > alpha, I DON'T refuse H0 => i and adj[j] are c.i.
{
G[i, ad[j]] = 0
G[ad[j], i] = 0
sepstep[[i]][[ad[j]]] = sub[1:N,k]
sepstep[[ad[j]]][[i]] = sub[1:N,k]
}
check[[long]]=dep
}
k = k+1
} # chiude while
} # chiude for ( j in 1:n_ad)
} # chiude if((n_ad-1)>=N)
} # chiude for (i in 1:n)
} # chiude if N!=0
else
{
print(N)
for ( i in 1:n)
{
print(i)
ad = adiac_D(G, i) # I look for the adjacent (double arrow) OUTPUT: POSITION
n_ad = length(ad)
if((n_ad-1)>=N) # if card of adj > N
{
for ( j in 1:n_ad) # for each node adjac to i
# if n_ad = 0, then n_ad -1 is NEVER > N, so I don't enter here
{
a = c(i, ad[j])
a = sort(a)
a = paste(a, collapse="_")
b = NULL
b = paste(b, collapse="_")
long = paste(a, b, sep='x')
if( is.numeric(check[[long]]) == F )
{
# test = gaussCItest(x = i, y = ad[j], S = NULL, suffStat = suffst )
dep = IndepTestBr(x = data[,i], y = data[, ad[j]], p = p)
if (dep > alpha) # if pval > alpha, I DON'T refuse H0 => i and adj[j] are c.i.
{
G[i, ad[j]] = 0
G[ad[j], i] = 0
sepstep[[i]][[ad[j]]] = -1 # it's a default value because it will never exist a variable in the -1 position
sepstep[[ad[j]]][[i]] = -1 # it's a default value because it will never exist a variable in the -1 position
}
check[[long]]=dep
}
}
}
}
}
}
Sk = list(sepstep = sepstep, G = G, pval = check)
Sk
}
|
# Built in R 3.6.1
# This script scrapes all available tiger road data for a given year from the US census FTP site.
# The script then unzips all files for Virginia and combines into a single shapefile for further
# spatial analyses.
# This script was built to ease the labor intensive process of acquiring and combining tiger
# road data (available by county only at higher resolutions) for the watershed landcover
# analysis tool. The data is available yearly and requires many hours to organize.
# Author: Emma Jones (emma.jones@deq.virginia.gov)
# Last Updated: 11/22/2019
# Load libraries
library(rvest) # 0.3.4
library(tidyverse) #1.2.1
library(stringr) #1.4.0
library(sf) # 0.7-7
# Step 1: Identfy year to pull data from. This is critical to getting the FTP web address correct for
# all subsequent analyses. If the web address fed to the subsequent function is incorrect, then
# no data can be pulled.
# e.g. https://www2.census.gov/geo/tiger/TIGER2018/ is the appropriate web address for 2018 data.
year <- 2022
FTPaddress <- paste0("https://www2.census.gov/geo/tiger/TIGER",year,"/ROADS")
dirName <- 'tigerRoadsPull' # create a folder in the project directory with this exact name to store
#
# Step 2: Scrape FTP page for headers. The scraping_tiger object will contain all available files for
#scraping_tiger <- read_html(FTPaddress) %>% # edit this
# html_nodes("tr") %>% # find only table records
# html_text() # convert to character object
# Step 2.1: Identify all counties that intersect the VA NHD
#This was done in GIS by clipping US county data to the VA NHD. All the state and county FIPS codes that
# could fall into a given probmon watershed are then able to be efficiently downloaded from the TIGER FTP
#VANHD <- st_read('G:/evjones/GIS/ProbMonGIS/GISdata/nhd_83albers.shp')
#UScounties <- st_read('GISdata/tl_2017_us_county.shp') %>%
# st_transform(st_crs(VANHD))
#nhdCounties <- UScounties[VANHD, ]
#st_write(nhdCounties, "GISdata/counties.shp")
#rm(UScounties);rm(VANHD)
# Don't need to repeat above step more than once, here are counties we need
nhdCounties <- st_read('GISdata/counties.shp') %>%
st_set_geometry(NULL) %>%
distinct(GEOID) # keep only unique FIPS codes (already concatinated with state + county)
# Step 3: Download all appropriate files.
# custom function to do so
downloadTigerRoadsByYear <- function(year,fileName,outDirectory){
for(i in 1:length(fileName)){
download.file(url = paste0('https://www2.census.gov/geo/tiger/TIGER',
year,'/ROADS/', fileName[i]),
destfile = paste0(outDirectory,'/', fileName[i]))
}
}
downloadTigerRoadsByYear(year,
paste0('tl_',year,'_', as.character(nhdCounties$GEOID),'_roads.zip'),
paste0(dirName,'/', year))
#in unzip folder there is tl_2016_10005_roads.zip
# Step 4: Unzip and combine all files in the dirName directory into one shapefile
filenames <- list.files( paste0(dirName,'/', year), pattern="*.zip", full.names=TRUE)
#filenames <- filenames[-length(filenames)] # remove the last bogus record, only for 2018
lapply(filenames, unzip, exdir=paste0('tigerRoadsPull/unzipped/',year)) # unzip all filenames that end in zip into folder named unzipped
filenames_slim <- gsub('.zip', '' , gsub(paste0('tigerRoadsPull/',year,'/'),'', filenames ))
# check to make sure all downloaded files were unzipped correctly
filenamesUnzipped <- list.files(paste0(dirName,'/unzipped/',year), pattern="*.shx", full.names=F) # search by .cpg bc .shp has a few extension options and duplicates unique names
filenamesUnzipped_slim <- gsub('.shx','',filenamesUnzipped)
all(filenames_slim %in% filenamesUnzipped_slim )
# if true then cool
# if not then find out which missing
filenames_slim [!(filenames_slim %in% filenamesUnzipped_slim )]
# an output of character(0) is good because it means there are none missing
# Step 5: Read in unzipped files and combine to single object
filenamesUnzipped <- paste0(dirName,'/unzipped/',year,'/',gsub('.shx','.shp', filenamesUnzipped)) # change .cpg to .shp for file reading in
shapefiles <- filenamesUnzipped[1:50] %>% # might need to parse this going forward to conserve memory
map(st_read) %>%
reduce(rbind)
# Step 6: Save out shapefile
st_write(shapefiles, paste0('GISdata/TIGERroads/', year, '50tigerRoads.shp'))
rm(shapefiles)
shapefiles <- filenamesUnzipped[51:100] %>% # might need to parse this going forward to conserve memory
map(st_read) %>%
reduce(rbind)
# Step 6: Save out shapefile
st_write(shapefiles, paste0('GISdata/TIGERroads/', year, '100tigerRoads.shp'))
rm(shapefiles)
shapefiles <- filenamesUnzipped[101:150] %>% # might need to parse this going forward to conserve memory
map(st_read) %>%
reduce(rbind)
# Step 6: Save out shapefile
st_write(shapefiles, paste0('GISdata/TIGERroads/', year, '150tigerRoads.shp'))
rm(shapefiles)
shapefiles <- filenamesUnzipped[151:200] %>% # might need to parse this going forward to conserve memory
map(st_read) %>%
reduce(rbind)
# Step 6: Save out shapefile
st_write(shapefiles, paste0('GISdata/TIGERroads/', year, '200tigerRoads.shp'))
rm(shapefiles)
shapefiles <- filenamesUnzipped[201:length(filenamesUnzipped)] %>% # might need to parse this going forward to conserve memory
map(st_read) %>%
reduce(rbind)
# Step 6: Save out shapefile
st_write(shapefiles, paste0('GISdata/TIGERroads/', year, '229tigerRoads.shp'))
rm(shapefiles
)
|
/tigerRoadsWebScrapingScript.R
|
no_license
|
EmmaVJones/LandcoverAnalysis
|
R
| false
| false
| 5,544
|
r
|
# Built in R 3.6.1
# This script scrapes all available tiger road data for a given year from the US census FTP site.
# The script then unzips all files for Virginia and combines into a single shapefile for further
# spatial analyses.
# This script was built to ease the labor intensive process of acquiring and combining tiger
# road data (available by county only at higher resolutions) for the watershed landcover
# analysis tool. The data is available yearly and requires many hours to organize.
# Author: Emma Jones (emma.jones@deq.virginia.gov)
# Last Updated: 11/22/2019
# Load libraries
library(rvest) # 0.3.4
library(tidyverse) #1.2.1
library(stringr) #1.4.0
library(sf) # 0.7-7
# Step 1: Identfy year to pull data from. This is critical to getting the FTP web address correct for
# all subsequent analyses. If the web address fed to the subsequent function is incorrect, then
# no data can be pulled.
# e.g. https://www2.census.gov/geo/tiger/TIGER2018/ is the appropriate web address for 2018 data.
year <- 2022
FTPaddress <- paste0("https://www2.census.gov/geo/tiger/TIGER",year,"/ROADS")
dirName <- 'tigerRoadsPull' # create a folder in the project directory with this exact name to store
#
# Step 2: Scrape FTP page for headers. The scraping_tiger object will contain all available files for
#scraping_tiger <- read_html(FTPaddress) %>% # edit this
# html_nodes("tr") %>% # find only table records
# html_text() # convert to character object
# Step 2.1: Identify all counties that intersect the VA NHD
#This was done in GIS by clipping US county data to the VA NHD. All the state and county FIPS codes that
# could fall into a given probmon watershed are then able to be efficiently downloaded from the TIGER FTP
#VANHD <- st_read('G:/evjones/GIS/ProbMonGIS/GISdata/nhd_83albers.shp')
#UScounties <- st_read('GISdata/tl_2017_us_county.shp') %>%
# st_transform(st_crs(VANHD))
#nhdCounties <- UScounties[VANHD, ]
#st_write(nhdCounties, "GISdata/counties.shp")
#rm(UScounties);rm(VANHD)
# Don't need to repeat above step more than once, here are counties we need
nhdCounties <- st_read('GISdata/counties.shp') %>%
st_set_geometry(NULL) %>%
distinct(GEOID) # keep only unique FIPS codes (already concatinated with state + county)
# Step 3: Download all appropriate files.
# custom function to do so
downloadTigerRoadsByYear <- function(year,fileName,outDirectory){
for(i in 1:length(fileName)){
download.file(url = paste0('https://www2.census.gov/geo/tiger/TIGER',
year,'/ROADS/', fileName[i]),
destfile = paste0(outDirectory,'/', fileName[i]))
}
}
downloadTigerRoadsByYear(year,
paste0('tl_',year,'_', as.character(nhdCounties$GEOID),'_roads.zip'),
paste0(dirName,'/', year))
#in unzip folder there is tl_2016_10005_roads.zip
# Step 4: Unzip and combine all files in the dirName directory into one shapefile
filenames <- list.files( paste0(dirName,'/', year), pattern="*.zip", full.names=TRUE)
#filenames <- filenames[-length(filenames)] # remove the last bogus record, only for 2018
lapply(filenames, unzip, exdir=paste0('tigerRoadsPull/unzipped/',year)) # unzip all filenames that end in zip into folder named unzipped
filenames_slim <- gsub('.zip', '' , gsub(paste0('tigerRoadsPull/',year,'/'),'', filenames ))
# check to make sure all downloaded files were unzipped correctly
filenamesUnzipped <- list.files(paste0(dirName,'/unzipped/',year), pattern="*.shx", full.names=F) # search by .cpg bc .shp has a few extension options and duplicates unique names
filenamesUnzipped_slim <- gsub('.shx','',filenamesUnzipped)
all(filenames_slim %in% filenamesUnzipped_slim )
# if true then cool
# if not then find out which missing
filenames_slim [!(filenames_slim %in% filenamesUnzipped_slim )]
# an output of character(0) is good because it means there are none missing
# Step 5: Read in unzipped files and combine to single object
filenamesUnzipped <- paste0(dirName,'/unzipped/',year,'/',gsub('.shx','.shp', filenamesUnzipped)) # change .cpg to .shp for file reading in
shapefiles <- filenamesUnzipped[1:50] %>% # might need to parse this going forward to conserve memory
map(st_read) %>%
reduce(rbind)
# Step 6: Save out shapefile
st_write(shapefiles, paste0('GISdata/TIGERroads/', year, '50tigerRoads.shp'))
rm(shapefiles)
shapefiles <- filenamesUnzipped[51:100] %>% # might need to parse this going forward to conserve memory
map(st_read) %>%
reduce(rbind)
# Step 6: Save out shapefile
st_write(shapefiles, paste0('GISdata/TIGERroads/', year, '100tigerRoads.shp'))
rm(shapefiles)
shapefiles <- filenamesUnzipped[101:150] %>% # might need to parse this going forward to conserve memory
map(st_read) %>%
reduce(rbind)
# Step 6: Save out shapefile
st_write(shapefiles, paste0('GISdata/TIGERroads/', year, '150tigerRoads.shp'))
rm(shapefiles)
shapefiles <- filenamesUnzipped[151:200] %>% # might need to parse this going forward to conserve memory
map(st_read) %>%
reduce(rbind)
# Step 6: Save out shapefile
st_write(shapefiles, paste0('GISdata/TIGERroads/', year, '200tigerRoads.shp'))
rm(shapefiles)
shapefiles <- filenamesUnzipped[201:length(filenamesUnzipped)] %>% # might need to parse this going forward to conserve memory
map(st_read) %>%
reduce(rbind)
# Step 6: Save out shapefile
st_write(shapefiles, paste0('GISdata/TIGERroads/', year, '229tigerRoads.shp'))
rm(shapefiles
)
|
library(shiny)
library(tidyverse)
library(ggplot2)
library(knitr)
library(DT)
library(psych)
library(rpart)
library(partykit)
library(randomForest)
library(readr)
dat.car <- read_delim("car.txt",delim=";",col_names = TRUE)
dat.car$Origin <- as.factor(dat.car$Origin)
car.int <- dat.car %>% select(-c(Car))
shinyUI(navbarPage("Project 3",
tabPanel(
"Information",
#add in latex functionality if needed
withMathJax(),
strong('Note'),
p('This web application is developed with',
a("Shiny.", href="http://www.rstudio.com/shiny/", target="_blank"),
''),
br(),
strong('List of Packages Used'), br(),
code('library(shiny)'),br(),
code('library(shinyAce)'),br(),
code('library(psych)'),br(),
code('library(rpart)'),br(),
code('library(partykit)'),br(),
code('library(randomForest)'),br(),
code('library(ggplot2)'),br(),
code('library(tidyverse)'),br(),
code('library(knitr)'),br(),
code('library(DT)'),br(),
code('library(readr)'),br(),
br(),
strong('Application'),
p('This application studies a cars data set by means of summary statistics, clustering, and modeling.',
'There are a total of 5 tabs, each with their own purpose - this tab is the',
'information tab - where information such as the purpose of the application and',
'the data is provided. :)'
),
p('The EXPLORATION tab provides common numeric and',
'graphical summaries of the characteristics of the',
'cars data set.'),
p('The CLUSTERING tab applies the Ward hierarchical clustering and',
'the K-means clustering. On this tab, feel free to download',
'the output plots.'),
p('The MODELING tab applies the Decision tree and Random forest methods providing',
'variable importance and error rates.'
),
strong('Data'),
p('The data consist of car information of different brands from',
'USA, Europe, and Japan obtained from',
a('here.', href='https://perso.telecom-paristech.fr/eagan/class/igr204/datasets',
target="_blank")),
p('It is a data set of 406 cars and 8 characteristics.'),
br(),
strong('Code'),
p('The code for this web application is available at',
a('GitHub.', href='https://github.com/srhvng/CarsRshiny', target="_blank")),
p('If you want to run this code on your computer (in a local R session), download the data and run the code below:',
br(),
code('library(shiny)'),br(),
code('runGitHub("CarsRshiny","srhvng")')
),
p(br())
),
############################################################################################################################################
###############---------- NEW TAB ----------################################################################################################
############################################################################################################################################
tabPanel("Exploration",
# Application title
uiOutput("extitle"),
# Sidebar with options for the data set
sidebarLayout(
sidebarPanel(
h3("Cars Data Set"),
radioButtons("p", "Select column of Car dataset:",
list("MPG"='MPG', "Cylinders"='Cylinders', "Displacement"='Displacement', "Horsepower"='Horsepower',
"Weight"='Weight', "Acceleration"='Acceleration',"Model"='Model')),
checkboxInput("facet", h4("View by Origin", style = "color:red;"))
),
mainPanel(
verbatimTextOutput("summary"),
plotOutput("histogram",
click = "plot_click",
dblclick = "plot_dblclick",
hover = "plot_hover",
brush = "plot_brush"),
verbatimTextOutput("click")
)
)
),
############################################################################################################################################
###############---------- NEW TAB ----------################################################################################################
############################################################################################################################################
tabPanel(
"Clustering",
# Application title
headerPanel('Clustering of the Cars Data'),
# Sidebar with a slider input for number of observations and checkboxes
sidebarPanel(
sliderInput("clusters", "Number of clusters:",
min = 2, # 1 cluster is pointless
max = 10, # too many is too crowded
value = 4) , # sensible start
br(),
h3("Cluster Dendrogram: Select Variable(s)"),
helpText("Note: This Cluster Dendrogram runs Ward hierarchical clustering",
"using a Euclidean distance metric" ,
"(\\(d_{ij}=d(\\{X_i\\}, \\{X_j\\}) = { \\|X_i - X_j\\|^2}\\))",
"and standardised versions of the variables" ,
"(i.e. with mean=0 sd=1) you select in the checkboxes below.",
"You can choose the number of clusters with the slider above.") ,
br(),
checkboxInput("mpg", "miles per gallon", TRUE) , # as in regression project
checkboxInput("Displacement", "displacement", FALSE) ,
checkboxInput("Horsepower", "gross horsepower", FALSE) ,
checkboxInput("Cylinders", "cylinders", FALSE) ,
checkboxInput("Weight", "weight", TRUE) ,
checkboxInput("Acceleration", "acceleration", FALSE),
checkboxInput("Model", "model", FALSE),
#download Dendrogram plot
downloadButton("dwnlddendrogram","Download Cluster Dendrogram"),
br(),
h3("Scatter Plot: Select Variable"),
helpText("Note: This cluster plot runs the K-means clustering.",
"You can choose the axis and the number of clusters with",
"the slider above."),
selectInput('xcol', 'X Variable', names(car.int)),
selectInput('ycol', 'Y Variable', names(car.int),
selected=names(car.int)[[2]]),
#download K-means plot
downloadButton("downloadplot","Download K-means Plot")
),
# Show a plot of the generated cluster dendrogram
mainPanel(
plotOutput("distPlot"),
br(),
plotOutput('plot1')
)
),
############################################################################################################################################
###############---------- NEW TAB ----------################################################################################################
############################################################################################################################################
tabPanel(
"Modeling",
strong("Scatter plot matrices"),
br(),
plotOutput("corPlot", width="120%"),
br(),
h3("Decision tree"),
uiOutput("varselect1"),
radioButtons("explvars", "Choose explanatory variables:",
list("All" = "all", "Select" = "select"), selected = "all"),
# Display this only if "expl.vars" is "select"
conditionalPanel(condition = "input.explvars == 'select'",
uiOutput("varselect2")
),
verbatimTextOutput("dtree"),
br(),
h3("Plotting the decision tree"),
br(),
plotOutput("dtreePlot", width="120%"),
br(),
h3("Random forest"),
verbatimTextOutput("randforest"),
plotOutput("errorPlot", width="80%"),
br(),
strong("Variable importance"),
plotOutput("varimPlot"),
br(),
br(),
br(),
strong('R session info'),
verbatimTextOutput("info.out")
),
############################################################################################################################################
###############---------- NEW TAB ----------################################################################################################
############################################################################################################################################
tabPanel(
"Data",
sidebarLayout(
sidebarPanel(
h3("Cars Data Set"),
#Download Data
downloadButton("downloadData", "Download")
),
mainPanel(
DT::dataTableOutput("table")
)
)
)))
|
/ui.R
|
no_license
|
srhvng/CarsRshiny
|
R
| false
| false
| 12,784
|
r
|
library(shiny)
library(tidyverse)
library(ggplot2)
library(knitr)
library(DT)
library(psych)
library(rpart)
library(partykit)
library(randomForest)
library(readr)
dat.car <- read_delim("car.txt",delim=";",col_names = TRUE)
dat.car$Origin <- as.factor(dat.car$Origin)
car.int <- dat.car %>% select(-c(Car))
shinyUI(navbarPage("Project 3",
tabPanel(
"Information",
#add in latex functionality if needed
withMathJax(),
strong('Note'),
p('This web application is developed with',
a("Shiny.", href="http://www.rstudio.com/shiny/", target="_blank"),
''),
br(),
strong('List of Packages Used'), br(),
code('library(shiny)'),br(),
code('library(shinyAce)'),br(),
code('library(psych)'),br(),
code('library(rpart)'),br(),
code('library(partykit)'),br(),
code('library(randomForest)'),br(),
code('library(ggplot2)'),br(),
code('library(tidyverse)'),br(),
code('library(knitr)'),br(),
code('library(DT)'),br(),
code('library(readr)'),br(),
br(),
strong('Application'),
p('This application studies a cars data set by means of summary statistics, clustering, and modeling.',
'There are a total of 5 tabs, each with their own purpose - this tab is the',
'information tab - where information such as the purpose of the application and',
'the data is provided. :)'
),
p('The EXPLORATION tab provides common numeric and',
'graphical summaries of the characteristics of the',
'cars data set.'),
p('The CLUSTERING tab applies the Ward hierarchical clustering and',
'the K-means clustering. On this tab, feel free to download',
'the output plots.'),
p('The MODELING tab applies the Decision tree and Random forest methods providing',
'variable importance and error rates.'
),
strong('Data'),
p('The data consist of car information of different brands from',
'USA, Europe, and Japan obtained from',
a('here.', href='https://perso.telecom-paristech.fr/eagan/class/igr204/datasets',
target="_blank")),
p('It is a data set of 406 cars and 8 characteristics.'),
br(),
strong('Code'),
p('The code for this web application is available at',
a('GitHub.', href='https://github.com/srhvng/CarsRshiny', target="_blank")),
p('If you want to run this code on your computer (in a local R session), download the data and run the code below:',
br(),
code('library(shiny)'),br(),
code('runGitHub("CarsRshiny","srhvng")')
),
p(br())
),
############################################################################################################################################
###############---------- NEW TAB ----------################################################################################################
############################################################################################################################################
tabPanel("Exploration",
# Application title
uiOutput("extitle"),
# Sidebar with options for the data set
sidebarLayout(
sidebarPanel(
h3("Cars Data Set"),
radioButtons("p", "Select column of Car dataset:",
list("MPG"='MPG', "Cylinders"='Cylinders', "Displacement"='Displacement', "Horsepower"='Horsepower',
"Weight"='Weight', "Acceleration"='Acceleration',"Model"='Model')),
checkboxInput("facet", h4("View by Origin", style = "color:red;"))
),
mainPanel(
verbatimTextOutput("summary"),
plotOutput("histogram",
click = "plot_click",
dblclick = "plot_dblclick",
hover = "plot_hover",
brush = "plot_brush"),
verbatimTextOutput("click")
)
)
),
############################################################################################################################################
###############---------- NEW TAB ----------################################################################################################
############################################################################################################################################
tabPanel(
"Clustering",
# Application title
headerPanel('Clustering of the Cars Data'),
# Sidebar with a slider input for number of observations and checkboxes
sidebarPanel(
sliderInput("clusters", "Number of clusters:",
min = 2, # 1 cluster is pointless
max = 10, # too many is too crowded
value = 4) , # sensible start
br(),
h3("Cluster Dendrogram: Select Variable(s)"),
helpText("Note: This Cluster Dendrogram runs Ward hierarchical clustering",
"using a Euclidean distance metric" ,
"(\\(d_{ij}=d(\\{X_i\\}, \\{X_j\\}) = { \\|X_i - X_j\\|^2}\\))",
"and standardised versions of the variables" ,
"(i.e. with mean=0 sd=1) you select in the checkboxes below.",
"You can choose the number of clusters with the slider above.") ,
br(),
checkboxInput("mpg", "miles per gallon", TRUE) , # as in regression project
checkboxInput("Displacement", "displacement", FALSE) ,
checkboxInput("Horsepower", "gross horsepower", FALSE) ,
checkboxInput("Cylinders", "cylinders", FALSE) ,
checkboxInput("Weight", "weight", TRUE) ,
checkboxInput("Acceleration", "acceleration", FALSE),
checkboxInput("Model", "model", FALSE),
#download Dendrogram plot
downloadButton("dwnlddendrogram","Download Cluster Dendrogram"),
br(),
h3("Scatter Plot: Select Variable"),
helpText("Note: This cluster plot runs the K-means clustering.",
"You can choose the axis and the number of clusters with",
"the slider above."),
selectInput('xcol', 'X Variable', names(car.int)),
selectInput('ycol', 'Y Variable', names(car.int),
selected=names(car.int)[[2]]),
#download K-means plot
downloadButton("downloadplot","Download K-means Plot")
),
# Show a plot of the generated cluster dendrogram
mainPanel(
plotOutput("distPlot"),
br(),
plotOutput('plot1')
)
),
############################################################################################################################################
###############---------- NEW TAB ----------################################################################################################
############################################################################################################################################
tabPanel(
"Modeling",
strong("Scatter plot matrices"),
br(),
plotOutput("corPlot", width="120%"),
br(),
h3("Decision tree"),
uiOutput("varselect1"),
radioButtons("explvars", "Choose explanatory variables:",
list("All" = "all", "Select" = "select"), selected = "all"),
# Display this only if "expl.vars" is "select"
conditionalPanel(condition = "input.explvars == 'select'",
uiOutput("varselect2")
),
verbatimTextOutput("dtree"),
br(),
h3("Plotting the decision tree"),
br(),
plotOutput("dtreePlot", width="120%"),
br(),
h3("Random forest"),
verbatimTextOutput("randforest"),
plotOutput("errorPlot", width="80%"),
br(),
strong("Variable importance"),
plotOutput("varimPlot"),
br(),
br(),
br(),
strong('R session info'),
verbatimTextOutput("info.out")
),
############################################################################################################################################
###############---------- NEW TAB ----------################################################################################################
############################################################################################################################################
tabPanel(
"Data",
sidebarLayout(
sidebarPanel(
h3("Cars Data Set"),
#Download Data
downloadButton("downloadData", "Download")
),
mainPanel(
DT::dataTableOutput("table")
)
)
)))
|
load('Fi_gdsc1000_RESULTS/gdscANOVA/PANCAN_GDSC1000_newModel_new_data/OUTPUT/ANOVA_results.rdata')
nnTOTRES<-TOTRES
load('Fi_gdsc1000_RESULTS/gdscANOVA/PANCAN_GDSC1000_newModel_old_data/OUTPUT/ANOVA_results.rdata')
noTOTRES<-TOTRES
# load('Fi_gdsc1000_RESULTS/gdscANOVA/PANCAN_GDSC1000_oldModel_old_data/OUTPUT/MANOVA_results.rdata')
# ooTOTRES<-TOTRES
commons<-rep(0,500)
no_only<-rep(0,500)
nn_only<-rep(0,500)
for (i in 1:500){
print(i)
commons[i]<-length(intersect(paste(noTOTRES[1:i,2],noTOTRES[1:i,3],noTOTRES[1:i,4]),paste(nnTOTRES[1:i,2],nnTOTRES[1:i,3],nnTOTRES[1:i,4])))
no_only[i]<-length(setdiff(paste(noTOTRES[1:i,2],noTOTRES[1:i,3],noTOTRES[1:i,4]),paste(nnTOTRES[1:i,2],nnTOTRES[1:i,3],nnTOTRES[1:i,4])))
nn_only[i]<-length(setdiff(paste(nnTOTRES[1:i,2],nnTOTRES[1:i,3],nnTOTRES[1:i,4]),paste(noTOTRES[1:i,2],noTOTRES[1:i,3],noTOTRES[1:i,4])))
}
plot(100*commons[1:300]/(1:300),type='b',ylim=c(0,100),pch=16,col='purple',ylab='',xlab='top n.significant hits')
par(new=TRUE)
plot(as.numeric(noTOTRES[1:300,"ANOVA FEATURE FDR %"]),type='l',ylim=c(0,100),pch=16,col='red',ylab='',xlab='',xaxt='n',yaxt='n',frame.plot = FALSE)
par(new=TRUE)
plot(as.numeric(nnTOTRES[1:300,"ANOVA FEATURE FDR %"]),type='l',ylim=c(0,100),pch=16,col='blue',ylab='',xlab='',xaxt='n',yaxt='n',frame.plot = FALSE)
|
/R/Project_bench_01.R
|
no_license
|
francescojm/FI.GDSC.ANOVA.MULTIFACTORIAL
|
R
| false
| false
| 1,320
|
r
|
load('Fi_gdsc1000_RESULTS/gdscANOVA/PANCAN_GDSC1000_newModel_new_data/OUTPUT/ANOVA_results.rdata')
nnTOTRES<-TOTRES
load('Fi_gdsc1000_RESULTS/gdscANOVA/PANCAN_GDSC1000_newModel_old_data/OUTPUT/ANOVA_results.rdata')
noTOTRES<-TOTRES
# load('Fi_gdsc1000_RESULTS/gdscANOVA/PANCAN_GDSC1000_oldModel_old_data/OUTPUT/MANOVA_results.rdata')
# ooTOTRES<-TOTRES
commons<-rep(0,500)
no_only<-rep(0,500)
nn_only<-rep(0,500)
for (i in 1:500){
print(i)
commons[i]<-length(intersect(paste(noTOTRES[1:i,2],noTOTRES[1:i,3],noTOTRES[1:i,4]),paste(nnTOTRES[1:i,2],nnTOTRES[1:i,3],nnTOTRES[1:i,4])))
no_only[i]<-length(setdiff(paste(noTOTRES[1:i,2],noTOTRES[1:i,3],noTOTRES[1:i,4]),paste(nnTOTRES[1:i,2],nnTOTRES[1:i,3],nnTOTRES[1:i,4])))
nn_only[i]<-length(setdiff(paste(nnTOTRES[1:i,2],nnTOTRES[1:i,3],nnTOTRES[1:i,4]),paste(noTOTRES[1:i,2],noTOTRES[1:i,3],noTOTRES[1:i,4])))
}
plot(100*commons[1:300]/(1:300),type='b',ylim=c(0,100),pch=16,col='purple',ylab='',xlab='top n.significant hits')
par(new=TRUE)
plot(as.numeric(noTOTRES[1:300,"ANOVA FEATURE FDR %"]),type='l',ylim=c(0,100),pch=16,col='red',ylab='',xlab='',xaxt='n',yaxt='n',frame.plot = FALSE)
par(new=TRUE)
plot(as.numeric(nnTOTRES[1:300,"ANOVA FEATURE FDR %"]),type='l',ylim=c(0,100),pch=16,col='blue',ylab='',xlab='',xaxt='n',yaxt='n',frame.plot = FALSE)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/NNshapeReg.r
\name{NNshapeReg}
\alias{NNshapeReg}
\title{Estimate the shape by averaging the shape of the nearest neighbours.}
\usage{
NNshapeReg(x, y = NULL, n = 3, mahalanobis = FALSE,
mc.cores = parallel::detectCores())
}
\arguments{
\item{x}{an array or matrix (one row per specim) with data used for
estimating weights.}
\item{y}{an array or matrix (one row per specim) with landmark data on which
the weighted averaging is applied for prediction. If NULL, x will be used
for both tasks.}
\item{n}{amount of nearest neighbours to consider}
\item{mahalanobis}{logical: use mahalanobis distance}
\item{mc.cores}{integer: amount of cores used for parallel processing.}
}
\value{
matrix or array of estimates.
}
\description{
Estimate the shape of one set of landmarks by averaging the shape of the
nearest neighbours obtained by a second set of landmarks. Weights are
calculated either form Mahalanobis or Procrustes distances. This can be
useful for data with missing landmarks.
}
\details{
This function calculates weights from one set of shape data and then
estimates the shape of another (or same) set of landmarks. CAUTION:
landmark data has to be registered beforehand.
}
\examples{
library(shapes)
proc <- procSym(gorf.dat)
#use the closest 3 specimen based on the first 4 landmarks
#to estimate the shape
estim <- NNshapeReg(proc$rotated[1:4,,],proc$rotated,n=3,mc.cores=1)
#compare estimation and true config
plot(proc$rotated[,,1],asp=1)
points(estim[,,1],col=2)
}
\seealso{
\code{\link{proc.weight}}, \code{\link{fixLMtps}}
}
|
/man/NNshapeReg.Rd
|
no_license
|
zedyautja/Morpho
|
R
| false
| false
| 1,633
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/NNshapeReg.r
\name{NNshapeReg}
\alias{NNshapeReg}
\title{Estimate the shape by averaging the shape of the nearest neighbours.}
\usage{
NNshapeReg(x, y = NULL, n = 3, mahalanobis = FALSE,
mc.cores = parallel::detectCores())
}
\arguments{
\item{x}{an array or matrix (one row per specim) with data used for
estimating weights.}
\item{y}{an array or matrix (one row per specim) with landmark data on which
the weighted averaging is applied for prediction. If NULL, x will be used
for both tasks.}
\item{n}{amount of nearest neighbours to consider}
\item{mahalanobis}{logical: use mahalanobis distance}
\item{mc.cores}{integer: amount of cores used for parallel processing.}
}
\value{
matrix or array of estimates.
}
\description{
Estimate the shape of one set of landmarks by averaging the shape of the
nearest neighbours obtained by a second set of landmarks. Weights are
calculated either form Mahalanobis or Procrustes distances. This can be
useful for data with missing landmarks.
}
\details{
This function calculates weights from one set of shape data and then
estimates the shape of another (or same) set of landmarks. CAUTION:
landmark data has to be registered beforehand.
}
\examples{
library(shapes)
proc <- procSym(gorf.dat)
#use the closest 3 specimen based on the first 4 landmarks
#to estimate the shape
estim <- NNshapeReg(proc$rotated[1:4,,],proc$rotated,n=3,mc.cores=1)
#compare estimation and true config
plot(proc$rotated[,,1],asp=1)
points(estim[,,1],col=2)
}
\seealso{
\code{\link{proc.weight}}, \code{\link{fixLMtps}}
}
|
#' Joint Random Forest for the simultaneous estimation of multiple related networks
#'
#' MAIN FUNCTION -- > iJRFNet
importance <- function(x, scale=TRUE) {
# --- Function importance is a modified version of function importance from R package randomForest
type=NULL;
class=NULL;
if (!inherits(x, "randomForest"))
stop("x is not of class randomForest")
classRF <- x$type != "regression"
hasImp <- !is.null(dim(x$importance)) || ncol(x$importance) == 1
hasType <- !is.null(type)
if (hasType && type == 1 && !hasImp)
stop("That measure has not been computed")
allImp <- is.null(type) && hasImp
if (hasType) {
if (!(type %in% 1:2)) stop("Wrong type specified")
if (type == 2 && !is.null(class))
stop("No class-specific measure for that type")
}
imp <- x$importance
if (hasType && type == 2) {
if (hasImp) imp <- imp[, ncol(imp), drop=FALSE]
} else {
if (scale) {
SD <- x$importanceSD
imp[, -ncol(imp)] <-
imp[, -ncol(imp), drop=FALSE] /
ifelse(SD < .Machine$double.eps, 1, SD)
}
if (!allImp) {
if (is.null(class)) {
## The average decrease in accuracy measure:
imp <- imp[, ncol(imp) - 1, drop=FALSE]
} else {
whichCol <- if (classRF) match(class, colnames(imp)) else 1
if (is.na(whichCol)) stop(paste("Class", class, "not found."))
imp <- imp[, whichCol, drop=FALSE]
}
}
}
imp<-imp[,2]
imp
}
# --- Functions called by iJRFNet
"iJRF_onetarget" <-
function(x, y=NULL, xtest=NULL, ytest=NULL, ntree,
sampsize,
totsize = if (replace) ncol(x) else ceiling(.632*ncol(x)),
mtry=if (!is.null(y) && !is.factor(y))
max(floor(nrow(x)/3), 1) else floor(sqrt(nrow(x))),
replace=TRUE, classwt=NULL, cutoff, strata,
nodesize = if (!is.null(y) && !is.factor(y)) 5 else 1,
maxnodes=NULL,
importance=FALSE, localImp=FALSE, nPerm=1,
proximity, oob.prox=proximity,
norm.votes=TRUE, do.trace=FALSE,
keep.forest=!is.null(y) && is.null(xtest), corr.bias=FALSE,
keep.inbag=FALSE, nclasses, sw,...) {
ww=1/sampsize;
nclass=mylevels=ipi=NULL
addclass <- is.null(y)
classRF <- addclass || is.factor(y)
if (!classRF && length(unique(y)) <= 5) {
warning("The response has five or fewer unique values. Are you sure you want to do regression?")
}
if (classRF && !addclass && length(unique(y)) < 2)
stop("Need at least two classes to do classification.")
n <- ncol(y) # number of samples
p <- nrow(x)/nclasses # number of variables
if (n == 0) stop("data (x) has 0 rows")
x.row.names <- rownames(x)
x.col.names <- if (is.null(colnames(x))) 1:ncol(x) else colnames(x)
keep.forest=!is.null(y)
xtest=NULL; ytest=NULL
testdat <- !is.null(xtest)
if (testdat) {
if (ncol(x) != ncol(xtest))
stop("x and xtest must have same number of columns")
ntest <- nrow(xtest)
xts.row.names <- rownames(xtest)
}
prox <- proxts <- double(1)
## Check for NAs.
if (any(is.na(x))) stop("NA not permitted in predictors")
if (testdat && any(is.na(xtest))) stop("NA not permitted in xtest")
if (any(is.na(y))) stop("NA not permitted in response")
if (!is.null(ytest) && any(is.na(ytest))) stop("NA not permitted in ytest")
if (is.data.frame(x)) {
xlevels <- lapply(x, mylevels)
ncat <- sapply(xlevels, length)
## Treat ordered factors as numerics.
ncat <- ifelse(sapply(x, is.ordered), 1, ncat)
x <- data.matrix(x)
if(testdat) {
if(!is.data.frame(xtest))
stop("xtest must be data frame if x is")
xfactor <- which(sapply(xtest, is.factor))
if (length(xfactor) > 0) {
for (i in xfactor) {
if (any(! levels(xtest[[i]]) %in% xlevels[[i]]))
stop("New factor levels in xtest not present in x")
xtest[[i]] <-
factor(xlevels[[i]][match(xtest[[i]], xlevels[[i]])],
levels=xlevels[[i]])
}
}
xtest <- data.matrix(xtest)
}
} else {
ncat <- rep(1, p)
xlevels <- as.list(rep(0, p))
}
maxcat <- max(ncat)
if (maxcat > 32)
stop("Can not handle categorical predictors with more than 32 categories.")
addclass <- FALSE
proximity <- addclass
impout <- matrix(0.0, p*nclasses, 2)
impSD <- matrix(0.0, p*nclasses, 1)
# names(impSD) <- x.col.names
nsample <- if (addclass) 2 * n else n
Stratify <- length(n) > 1
nodesize=5;
nrnodes <- 2 * trunc(n/max(1, nodesize - 4)) + 1
maxnodes=NULL
if (!is.null(maxnodes)) {
## convert # of terminal nodes to total # of nodes
maxnodes <- 2 * maxnodes - 1
if (maxnodes > nrnodes) warning("maxnodes exceeds its max value.")
nrnodes <- min(c(nrnodes, max(c(maxnodes, 1))))
}
## Compiled code expects variables in rows and observations in columns.
# x <- t(x)
storage.mode(x) <- "double"
xtest <- double(1)
ytest <- double(1)
ntest <- 1
labelts <- FALSE
nt <- if (keep.forest) ntree else 1
nPerm=1
do.trace=F; oob.prox=F
corr.bias=FALSE
keep.inbag=FALSE
impmat <- double(1)
replace=T
rfout <- .C("iJRF_regRF",
x,
y, ww,
as.integer(c(totsize, p)),
sampsize=as.integer(sampsize), as.integer(totsize),
as.integer(nodesize),
as.integer(nrnodes),
as.integer(ntree),
as.integer(mtry),
as.integer(c(importance, localImp, nPerm)),
as.integer(ncat),
as.integer(maxcat),
as.integer(do.trace),
as.integer(proximity),
as.integer(oob.prox),
as.integer(corr.bias),
ypred = double(n * nclasses),
impout = impout,
impmat = impmat,
impSD = impSD,
prox = prox,
ndbigtree = integer(ntree),
nodestatus = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
leftDaughter = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
rightDaughter = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
nodepred = matrix(double(nrnodes * nt * nclasses), ncol=nt),
bestvar = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
xbestsplit = matrix(double(nrnodes * nt * nclasses), ncol=nt),
mse = double(ntree * nclasses),
keep = as.integer(c(keep.forest, keep.inbag)),
replace = as.integer(replace),
testdat = as.integer(testdat),
xts = xtest,
ntest = as.integer(ntest),
yts = as.double(ytest),
labelts = as.integer(labelts),
ytestpred = double(ntest),
proxts = proxts,
msets = double(if (labelts) ntree else 1),
coef = double(2),
oob.times = integer(n),
inbag = if (keep.inbag)
matrix(integer(n * ntree), n) else integer(1), as.integer(nclasses),
sw = as.double(sw))[c(16:28, 36:41)]
# ## Format the forest component, if present.
if (keep.forest) {
max.nodes <- max(rfout$ndbigtree)
rfout$nodestatus <-
rfout$nodestatus[1:max.nodes, , drop=FALSE]
rfout$bestvar <-
rfout$bestvar[1:max.nodes, , drop=FALSE]
rfout$nodepred <-
rfout$nodepred[1:max.nodes, , drop=FALSE]
rfout$xbestsplit <-
rfout$xbestsplit[1:max.nodes, , drop=FALSE]
rfout$leftDaughter <-
rfout$leftDaughter[1:max.nodes, , drop=FALSE]
rfout$rightDaughter <-
rfout$rightDaughter[1:max.nodes, , drop=FALSE]
}
cl <- match.call()
cl[[1]] <- as.name("randomForest")
# ## Make sure those obs. that have not been OOB get NA as prediction.
ypred <- rfout$ypred
if (any(rfout$oob.times < 1)) {
ypred[rfout$oob.times == 0] <- NA
}
out <- list(call = cl,
type = "regression",
predicted =0,
mse = rfout$mse,
rsq = 1 - rfout$mse / (var(y[1,]) * (n-1) / n),
oob.times = rfout$oob.times,
importance = if (importance) matrix(rfout$impout, p * nclasses, 2) else
matrix(rfout$impout, ncol=1),
importanceSD=if (importance) rfout$impSD else NULL,
localImportance = if (localImp)
matrix(rfout$impmat, p, n, dimnames=list(x.col.names,
x.row.names)) else NULL,
proximity = if (proximity) matrix(rfout$prox, n, n,
dimnames = list(x.row.names, x.row.names)) else NULL,
ntree = ntree,
mtry = mtry,
forest = if (keep.forest)
c(rfout[c("ndbigtree", "nodestatus", "leftDaughter",
"rightDaughter", "nodepred", "bestvar",
"xbestsplit")],
list(ncat = ncat), list(nrnodes=max.nodes),
list(ntree=ntree), list(xlevels=xlevels)) else NULL,
coefs = if (corr.bias) rfout$coef else NULL,
y = y,
test = if(testdat) {
list(predicted = structure(rfout$ytestpred,
names=xts.row.names),
mse = if(labelts) rfout$msets else NULL,
rsq = if(labelts) 1 - rfout$msets /
(var(ytest) * (n-1) / n) else NULL,
proximity = if (proximity)
matrix(rfout$proxts / ntree, nrow = ntest,
dimnames = list(xts.row.names,
c(xts.row.names,
x.row.names))) else NULL)
} else NULL,
inbag = if (keep.inbag)
matrix(rfout$inbag, nrow(rfout$inbag),
dimnames=list(x.row.names, NULL)) else NULL)
# print(rfout$mse)
class(out) <- "randomForest"
return(out)
}
"irafnet_onetarget" <-
function(x, y=NULL, xtest=NULL, ytest=NULL, ntree,
mtry=if (!is.null(y) && !is.factor(y))
max(floor(ncol(x)/3), 1) else floor(sqrt(ncol(x))),
replace=TRUE, classwt=NULL, cutoff, strata,
sampsize = if (replace) nrow(x) else ceiling(.632*nrow(x)),
nodesize = if (!is.null(y) && !is.factor(y)) 5 else 1,
maxnodes=NULL,
importance=FALSE, localImp=FALSE, nPerm=1,
proximity, oob.prox=proximity,
norm.votes=TRUE, do.trace=FALSE,
keep.forest=!is.null(y) && is.null(xtest), corr.bias=FALSE,
keep.inbag=FALSE, sw) {
addclass <- is.null(y)
classRF <- addclass || is.factor(y)
if (!classRF && length(unique(y)) <= 5) {
warning("The response has five or fewer unique values. Are you sure you want to do regression?")
}
if (classRF && !addclass && length(unique(y)) < 2)
stop("Need at least two classes to do classification.")
n <- nrow(x)
p <- ncol(x)
if (n == 0) stop("data (x) has 0 rows")
x.row.names <- rownames(x)
x.col.names <- if (is.null(colnames(x))) 1:ncol(x) else colnames(x)
## overcome R's lazy evaluation:
keep.forest <- keep.forest
testdat <- !is.null(xtest)
if (testdat) {
if (ncol(x) != ncol(xtest))
stop("x and xtest must have same number of columns")
ntest <- nrow(xtest)
xts.row.names <- rownames(xtest)
}
## Make sure mtry is in reasonable range.
if (mtry < 1 || mtry > p)
warning("invalid mtry: reset to within valid range")
mtry <- max(1, min(p, round(mtry)))
if (!is.null(y)) {
if (length(y) != n) stop("length of response must be the same as predictors")
addclass <- FALSE
} else {
if (!addclass) addclass <- TRUE
y <- factor(c(rep(1, n), rep(2, n)))
x <- rbind(x, x)
}
## Check for NAs.
if (any(is.na(x))) stop("NA not permitted in predictors")
if (testdat && any(is.na(xtest))) stop("NA not permitted in xtest")
if (any(is.na(y))) stop("NA not permitted in response")
if (!is.null(ytest) && any(is.na(ytest))) stop("NA not permitted in ytest")
ncat <- rep(1, p)
xlevels <- as.list(rep(0, p))
maxcat <- max(ncat)
if (maxcat > 32)
stop("Can not handle categorical predictors with more than 32 categories.")
if (classRF) {
nclass <- length(levels(y))
## Check for empty classes:
if (any(table(y) == 0)) stop("Can't have empty classes in y.")
if (!is.null(ytest)) {
if (!is.factor(ytest)) stop("ytest must be a factor")
if (!all(levels(y) == levels(ytest)))
stop("y and ytest must have the same levels")
}
if (missing(cutoff)) {
cutoff <- rep(1 / nclass, nclass)
} else {
if (sum(cutoff) > 1 || sum(cutoff) < 0 || !all(cutoff > 0) ||
length(cutoff) != nclass) {
stop("Incorrect cutoff specified.")
}
if (!is.null(names(cutoff))) {
if (!all(names(cutoff) %in% levels(y))) {
stop("Wrong name(s) for cutoff")
}
cutoff <- cutoff[levels(y)]
}
}
if (!is.null(classwt)) {
if (length(classwt) != nclass)
stop("length of classwt not equal to number of classes")
## If classwt has names, match to class labels.
if (!is.null(names(classwt))) {
if (!all(names(classwt) %in% levels(y))) {
stop("Wrong name(s) for classwt")
}
classwt <- classwt[levels(y)]
}
if (any(classwt <= 0)) stop("classwt must be positive")
ipi <- 1
} else {
classwt <- rep(1, nclass)
ipi <- 0
}
} else addclass <- FALSE
if (missing(proximity)) proximity <- addclass
if (proximity) {
prox <- matrix(0.0, n, n)
proxts <- if (testdat) matrix(0, ntest, ntest + n) else double(1)
} else {
prox <- proxts <- double(1)
}
if (localImp) {
importance <- TRUE
impmat <- matrix(0, p, n)
} else impmat <- double(1)
if (importance) {
if (nPerm < 1) nPerm <- as.integer(1) else nPerm <- as.integer(nPerm)
if (classRF) {
impout <- matrix(0.0, p, nclass + 2)
impSD <- matrix(0.0, p, nclass + 1)
} else {
impout <- matrix(0.0, p, 2)
impSD <- double(p)
names(impSD) <- x.col.names
}
} else {
impout <- double(p)
impSD <- double(1)
}
nsample <- if (addclass) 2 * n else n
Stratify <- length(sampsize) > 1
if ((!Stratify) && sampsize > nrow(x)) stop("sampsize too large")
if (Stratify && (!classRF)) stop("sampsize should be of length one")
if (classRF) {
if (Stratify) {
if (missing(strata)) strata <- y
if (!is.factor(strata)) strata <- as.factor(strata)
nsum <- sum(sampsize)
if (length(sampsize) > nlevels(strata))
stop("sampsize has too many elements.")
if (any(sampsize <= 0) || nsum == 0)
stop("Bad sampsize specification")
## If sampsize has names, match to class labels.
if (!is.null(names(sampsize))) {
sampsize <- sampsize[levels(strata)]
}
if (any(sampsize > table(strata)))
stop("sampsize can not be larger than class frequency")
} else {
nsum <- sampsize
}
nrnodes <- 2 * trunc(nsum / nodesize) + 1
} else {
## For regression trees, need to do this to get maximal trees.
nrnodes <- 2 * trunc(sampsize/max(1, nodesize - 4)) + 1
}
if (!is.null(maxnodes)) {
## convert # of terminal nodes to total # of nodes
maxnodes <- 2 * maxnodes - 1
if (maxnodes > nrnodes) warning("maxnodes exceeds its max value.")
nrnodes <- min(c(nrnodes, max(c(maxnodes, 1))))
}
## Compiled code expects variables in rows and observations in columns.
x <- t(x)
storage.mode(x) <- "double"
if (testdat) {
xtest <- t(xtest)
storage.mode(xtest) <- "double"
if (is.null(ytest)) {
ytest <- labelts <- 0
} else {
labelts <- TRUE
}
} else {
xtest <- double(1)
ytest <- double(1)
ntest <- 1
labelts <- FALSE
}
nt <- if (keep.forest) ntree else 1
rfout <- .C("iRafNet_regRF",
x,
as.double(y),
as.integer(c(n, p)),
as.integer(sampsize),
as.integer(nodesize),
as.integer(nrnodes),
as.integer(ntree),
as.integer(mtry),
as.integer(c(importance, localImp, nPerm)),
as.integer(ncat),
as.integer(maxcat),
as.integer(do.trace),
as.integer(proximity),
as.integer(oob.prox),
as.integer(corr.bias),
ypred = double(n),
impout = impout,
impmat = impmat,
impSD = impSD,
prox = prox,
ndbigtree = integer(ntree),
nodestatus = matrix(integer(nrnodes * nt), ncol=nt),
leftDaughter = matrix(integer(nrnodes * nt), ncol=nt),
rightDaughter = matrix(integer(nrnodes * nt), ncol=nt),
nodepred = matrix(double(nrnodes * nt), ncol=nt),
bestvar = matrix(integer(nrnodes * nt), ncol=nt),
xbestsplit = matrix(double(nrnodes * nt), ncol=nt),
mse = double(ntree),
keep = as.integer(c(keep.forest, keep.inbag)),
replace = as.integer(replace),
testdat = as.integer(testdat),
xts = xtest,
ntest = as.integer(ntest),
yts = as.double(ytest),
labelts = as.integer(labelts),
ytestpred = double(ntest),
proxts = proxts,
msets = double(if (labelts) ntree else 1),
coef = double(2),
oob.times = integer(n),
inbag = if (keep.inbag)
matrix(integer(n * ntree), n) else integer(1), sw = as.double(sw))[c(16:28, 36:41)]
## Format the forest component, if present.
if (keep.forest) {
max.nodes <- max(rfout$ndbigtree)
rfout$nodestatus <-
rfout$nodestatus[1:max.nodes, , drop=FALSE]
rfout$bestvar <-
rfout$bestvar[1:max.nodes, , drop=FALSE]
rfout$nodepred <-
rfout$nodepred[1:max.nodes, , drop=FALSE]
rfout$xbestsplit <-
rfout$xbestsplit[1:max.nodes, , drop=FALSE]
rfout$leftDaughter <-
rfout$leftDaughter[1:max.nodes, , drop=FALSE]
rfout$rightDaughter <-
rfout$rightDaughter[1:max.nodes, , drop=FALSE]
}
cl <- match.call()
cl[[1]] <- as.name("randomForest")
## Make sure those obs. that have not been OOB get NA as prediction.
ypred <- rfout$ypred
if (any(rfout$oob.times < 1)) {
ypred[rfout$oob.times == 0] <- NA
}
out <- list(call = cl,
type = "regression",
predicted = structure(ypred, names=x.row.names),
mse = rfout$mse,
rsq = 1 - rfout$mse / (var(y) * (n-1) / n),
oob.times = rfout$oob.times,
importance = if (importance) matrix(rfout$impout, p, 2,
dimnames=list(x.col.names,
c("%IncMSE","IncNodePurity"))) else
matrix(rfout$impout, ncol=1,
dimnames=list(x.col.names, "IncNodePurity")),
importanceSD=if (importance) rfout$impSD else NULL,
localImportance = if (localImp)
matrix(rfout$impmat, p, n, dimnames=list(x.col.names,
x.row.names)) else NULL,
proximity = if (proximity) matrix(rfout$prox, n, n,
dimnames = list(x.row.names, x.row.names)) else NULL,
ntree = ntree,
mtry = mtry,
forest = if (keep.forest)
c(rfout[c("ndbigtree", "nodestatus", "leftDaughter",
"rightDaughter", "nodepred", "bestvar",
"xbestsplit")],
list(ncat = ncat), list(nrnodes=max.nodes),
list(ntree=ntree), list(xlevels=xlevels)) else NULL,
coefs = if (corr.bias) rfout$coef else NULL,
y = y,
test = if(testdat) {
list(predicted = structure(rfout$ytestpred,
names=xts.row.names),
mse = if(labelts) rfout$msets else NULL,
rsq = if(labelts) 1 - rfout$msets /
(var(ytest) * (n-1) / n) else NULL,
proximity = if (proximity)
matrix(rfout$proxts / ntree, nrow = ntest,
dimnames = list(xts.row.names,
c(xts.row.names,
x.row.names))) else NULL)
} else NULL,
inbag = if (keep.inbag)
matrix(rfout$inbag, nrow(rfout$inbag),
dimnames=list(x.row.names, NULL)) else NULL)
class(out) <- "randomForest"
return(out)
}
"JRF_onetarget" <-
function(x, y=NULL, xtest=NULL, ytest=NULL, ntree,
sampsize,
totsize = if (replace) ncol(x) else ceiling(.632*ncol(x)),
mtry=if (!is.null(y) && !is.factor(y))
max(floor(nrow(x)/3), 1) else floor(sqrt(nrow(x))),
replace=TRUE, classwt=NULL, cutoff, strata,
nodesize = if (!is.null(y) && !is.factor(y)) 5 else 1,
maxnodes=NULL,
importance=FALSE, localImp=FALSE, nPerm=1,
proximity, oob.prox=proximity,
norm.votes=TRUE, do.trace=FALSE,
keep.forest=!is.null(y) && is.null(xtest), corr.bias=FALSE,
keep.inbag=FALSE, nclasses, ...) {
ww=1/sampsize;
nclass=mylevels=ipi=sw=NULL
addclass <- is.null(y)
classRF <- addclass || is.factor(y)
if (!classRF && length(unique(y)) <= 5) {
warning("The response has five or fewer unique values. Are you sure you want to do regression?")
}
if (classRF && !addclass && length(unique(y)) < 2)
stop("Need at least two classes to do classification.")
n <- ncol(y) # number of samples
p <- nrow(x)/nclasses # number of variables
if (n == 0) stop("data (x) has 0 rows")
x.row.names <- rownames(x)
x.col.names <- if (is.null(colnames(x))) 1:ncol(x) else colnames(x)
keep.forest=!is.null(y)
xtest=NULL; ytest=NULL
testdat <- !is.null(xtest)
if (testdat) {
if (ncol(x) != ncol(xtest))
stop("x and xtest must have same number of columns")
ntest <- nrow(xtest)
xts.row.names <- rownames(xtest)
}
prox <- proxts <- double(1)
## Check for NAs.
if (any(is.na(x))) stop("NA not permitted in predictors")
if (testdat && any(is.na(xtest))) stop("NA not permitted in xtest")
if (any(is.na(y))) stop("NA not permitted in response")
if (!is.null(ytest) && any(is.na(ytest))) stop("NA not permitted in ytest")
if (is.data.frame(x)) {
xlevels <- lapply(x, mylevels)
ncat <- sapply(xlevels, length)
## Treat ordered factors as numerics.
ncat <- ifelse(sapply(x, is.ordered), 1, ncat)
x <- data.matrix(x)
if(testdat) {
if(!is.data.frame(xtest))
stop("xtest must be data frame if x is")
xfactor <- which(sapply(xtest, is.factor))
if (length(xfactor) > 0) {
for (i in xfactor) {
if (any(! levels(xtest[[i]]) %in% xlevels[[i]]))
stop("New factor levels in xtest not present in x")
xtest[[i]] <-
factor(xlevels[[i]][match(xtest[[i]], xlevels[[i]])],
levels=xlevels[[i]])
}
}
xtest <- data.matrix(xtest)
}
} else {
ncat <- rep(1, p)
xlevels <- as.list(rep(0, p))
}
maxcat <- max(ncat)
if (maxcat > 32)
stop("Can not handle categorical predictors with more than 32 categories.")
addclass <- FALSE
proximity <- addclass
impout <- matrix(0.0, p*nclasses, 2)
impSD <- matrix(0.0, p*nclasses, 1)
# names(impSD) <- x.col.names
nsample <- if (addclass) 2 * n else n
Stratify <- length(n) > 1
nodesize=5;
nrnodes <- 2 * trunc(n/max(1, nodesize - 4)) + 1
maxnodes=NULL
if (!is.null(maxnodes)) {
## convert # of terminal nodes to total # of nodes
maxnodes <- 2 * maxnodes - 1
if (maxnodes > nrnodes) warning("maxnodes exceeds its max value.")
nrnodes <- min(c(nrnodes, max(c(maxnodes, 1))))
}
## Compiled code expects variables in rows and observations in columns.
# x <- t(x)
storage.mode(x) <- "double"
xtest <- double(1)
ytest <- double(1)
ntest <- 1
labelts <- FALSE
nt <- if (keep.forest) ntree else 1
nPerm=1
do.trace=F; oob.prox=F
corr.bias=FALSE
keep.inbag=FALSE
impmat <- double(1)
replace=T
rfout <- .C("JRF_regRF",
x,
y, ww,
as.integer(c(totsize, p)),
sampsize=as.integer(sampsize), as.integer(totsize),
as.integer(nodesize),
as.integer(nrnodes),
as.integer(ntree),
as.integer(mtry),
as.integer(c(importance, localImp, nPerm)),
as.integer(ncat),
as.integer(maxcat),
as.integer(do.trace),
as.integer(proximity),
as.integer(oob.prox),
as.integer(corr.bias),
ypred = double(n * nclasses),
impout = impout,
impmat = impmat,
impSD = impSD,
prox = prox,
ndbigtree = integer(ntree),
nodestatus = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
leftDaughter = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
rightDaughter = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
nodepred = matrix(double(nrnodes * nt * nclasses), ncol=nt),
bestvar = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
xbestsplit = matrix(double(nrnodes * nt * nclasses), ncol=nt),
mse = double(ntree * nclasses),
keep = as.integer(c(keep.forest, keep.inbag)),
replace = as.integer(replace),
testdat = as.integer(testdat),
xts = xtest,
ntest = as.integer(ntest),
yts = as.double(ytest),
labelts = as.integer(labelts),
ytestpred = double(ntest),
proxts = proxts,
msets = double(if (labelts) ntree else 1),
coef = double(2),
oob.times = integer(n),
inbag = if (keep.inbag)
matrix(integer(n * ntree), n) else integer(1), as.integer(nclasses))[c(16:28, 36:41)]
# ## Format the forest component, if present.
if (keep.forest) {
max.nodes <- max(rfout$ndbigtree)
rfout$nodestatus <-
rfout$nodestatus[1:max.nodes, , drop=FALSE]
rfout$bestvar <-
rfout$bestvar[1:max.nodes, , drop=FALSE]
rfout$nodepred <-
rfout$nodepred[1:max.nodes, , drop=FALSE]
rfout$xbestsplit <-
rfout$xbestsplit[1:max.nodes, , drop=FALSE]
rfout$leftDaughter <-
rfout$leftDaughter[1:max.nodes, , drop=FALSE]
rfout$rightDaughter <-
rfout$rightDaughter[1:max.nodes, , drop=FALSE]
}
cl <- match.call()
cl[[1]] <- as.name("randomForest")
# ## Make sure those obs. that have not been OOB get NA as prediction.
ypred <- rfout$ypred
if (any(rfout$oob.times < 1)) {
ypred[rfout$oob.times == 0] <- NA
}
out <- list(call = cl,
type = "regression",
predicted =0,
mse = rfout$mse,
rsq = 1 - rfout$mse / (var(y[1,]) * (n-1) / n),
oob.times = rfout$oob.times,
importance = if (importance) matrix(rfout$impout, p * nclasses, 2) else
matrix(rfout$impout, ncol=1),
importanceSD=if (importance) rfout$impSD else NULL,
localImportance = if (localImp)
matrix(rfout$impmat, p, n, dimnames=list(x.col.names,
x.row.names)) else NULL,
proximity = if (proximity) matrix(rfout$prox, n, n,
dimnames = list(x.row.names, x.row.names)) else NULL,
ntree = ntree,
mtry = mtry,
forest = if (keep.forest)
c(rfout[c("ndbigtree", "nodestatus", "leftDaughter",
"rightDaughter", "nodepred", "bestvar",
"xbestsplit")],
list(ncat = ncat), list(nrnodes=max.nodes),
list(ntree=ntree), list(xlevels=xlevels)) else NULL,
coefs = if (corr.bias) rfout$coef else NULL,
y = y,
test = if(testdat) {
list(predicted = structure(rfout$ytestpred,
names=xts.row.names),
mse = if(labelts) rfout$msets else NULL,
rsq = if(labelts) 1 - rfout$msets /
(var(ytest) * (n-1) / n) else NULL,
proximity = if (proximity)
matrix(rfout$proxts / ntree, nrow = ntest,
dimnames = list(xts.row.names,
c(xts.row.names,
x.row.names))) else NULL)
} else NULL,
inbag = if (keep.inbag)
matrix(rfout$inbag, nrow(rfout$inbag),
dimnames=list(x.row.names, NULL)) else NULL)
# print(rfout$mse)
class(out) <- "randomForest"
return(out)
}
"iJRF" <-
function(X, W=NULL,ntree=NULL,mtry=NULL,genes.name=NULL) {
p<-dim(X[[1]])[1];
if (is.null(mtry)) mtry=sqrt(p)
if (is.null(ntree)) ntree=1000
if (is.null(genes.name)) genes.name=paste("G",seq(1,p),sep="")
nclasses<-length(X)
sampsize<-rep(0,nclasses)
imp<-array(0,c(p,length(genes.name),nclasses))
imp.final<-matrix(0,p*(p-1)/2,nclasses);
vec1<-matrix(rep(genes.name,p),p,p)
vec2<-t(vec1)
vec1<-vec1[lower.tri(vec1,diag=FALSE)]
vec2<-vec2[lower.tri(vec2,diag=FALSE)]
index<-seq(1,p)
for (j in 1:nclasses) { X[[j]] <- t(apply(X[[j]], 1, function(x) { (x - mean(x)) / sd(x) } ))
sampsize[j]<-dim(X[[j]])[2] }
tot<-max(sampsize);
print(is.null(W))
if (is.null(W)) { # -- implement standard JRF
for (j in 1:length(genes.name)){
covar<-matrix(0,(p-1)*nclasses,tot)
y<-matrix(0,nclasses,tot)
for (c in 1:nclasses) {
y[c,seq(1,sampsize[c])]<-as.matrix(X[[c]][j,])
covar[seq((c-1)*(p-1)+1,c*(p-1)),seq(1,sampsize[c])]<-X[[c]][-j,]
}
jrf.out<-JRF_onetarget(x=covar,y=y,mtry=mtry,importance=TRUE,sampsize=sampsize,nclasses=nclasses,ntree=ntree)
for (s in 1:nclasses) imp[-j,j,s]<-importance(jrf.out,scale=FALSE)[seq((p-1)*(s-1)+1,(p-1)*(s-1)+p-1)] #- save importance score for net1
}
} else { # -- implement iJRF (integrative JRF)
for (j in 1:length(genes.name)){
weights.rf<-as.matrix(W[,j]);
weights.rf[j]<-0
weights.rf<-weights.rf/sum(weights.rf);
w.sorted<-sort(weights.rf,decreasing = FALSE,index.return=T)
index<-w.sorted$ix
w.sorted<-w.sorted$x
covar<-matrix(0,p*nclasses,tot)
y<-matrix(0,nclasses,tot)
for (c in 1:nclasses) {
y[c,seq(1,sampsize[c])]<-X[[c]][j,]
covar[seq((c-1)*(p)+1,c*p),seq(1,sampsize[c])]<-X[[c]][index,]
}
jrf.out<-iJRF_onetarget(x=covar,y=y,mtry=mtry,importance=TRUE,sampsize=sampsize,
nclasses=nclasses,ntree=ntree,sw=as.double(w.sorted))
for (s in 1:nclasses) imp[index,j,s]<-importance(jrf.out,scale=FALSE)[seq(p*(s-1)+1,p*s)] #- save importance score for net1
}
}
# --- Derive importance score for each interaction
for (s in 1:nclasses){
imp.s<-imp[,,s]; t.imp<-t(imp.s)
imp.final[,s]<-(imp.s[lower.tri(imp.s,diag=FALSE)]+t.imp[lower.tri(t.imp,diag=FALSE)])/2
}
out<-cbind(as.character(vec1),as.character(vec2),as.data.frame(imp.final),stringsAsFactors=FALSE)
colnames(out)<-c(paste0('gene',1:2),paste0('importance',1:nclasses))
return(out)
}
"iRafNet" <- function(X,W,ntree=NULL,mtry=NULL,genes.name) {
X<-t(X[[1]])
p<-dim(X)[2]
if (is.null(mtry)) mtry=sqrt(p)
if (is.null(ntree)) ntree=1000
if (is.null(genes.name)) genes.name=paste("G",seq(1,p),sep="")
if (is.null(W)) W=matrix(1,p,p)
imp<-matrix(0,p,p)
imp.final<-matrix(0,p*(p-1)/2,1);
vec1<-matrix(rep(genes.name,p),p,p)
vec2<-t(vec1)
vec1<-vec1[lower.tri(vec1,diag=FALSE)]
vec2<-vec2[lower.tri(vec2,diag=FALSE)]
X <- (apply(X, 2, function(x) { (x - mean(x)) / sd(x) } ))
for (j in 1:p){
y<-X[,j];
weights.rf<-as.matrix(W[,j]);
weights.rf[j]<-0
weights.rf<-weights.rf/sum(weights.rf);
w.sorted<-sort(weights.rf,decreasing = FALSE,index.return=T)
index<-w.sorted$ix
x.sorted<-X[,index]
w.sorted<-w.sorted$x
rout<-irafnet_onetarget(x=x.sorted,y=as.double(y),importance=TRUE,mtry=round(sqrt(p-1)),ntree=1000,
sw=as.double(w.sorted))
imp[index,j]<-c(importance(rout))
}
# --- Return importance score for each regulation
imp.s<-imp; t.imp<-t(imp.s)
imp.final<-(imp.s[lower.tri(imp.s,diag=FALSE)]+t.imp[lower.tri(t.imp,diag=FALSE)])/2
out<-cbind(as.character(vec1),as.character(vec2),as.data.frame(imp.final),stringsAsFactors=FALSE)
colnames(out)<-c(paste0('gene',1:2),'importance')
return(out)
}
"ptmJRF" <-
function(X, ntree,mtry,genes.name,ptm.name) {
nclasses<-length(X)
sampsize<-rep(0,nclasses)
for (j in 1:nclasses) { X[[j]] <- t(apply(X[[j]], 1, function(x) { (x - mean(x)) / sd(x) } ))
sampsize[j]<-dim(X[[j]])[2] }
p<-length(genes.name); ptm.p<-length(ptm.name)
if (is.null(mtry)) mtry=p;
# --- reorder rows in PTM object X[[1]]
X.ptm<-X[[1]]; s=0 ; locptm<-numptm<-rep(0,p)
ptm.new<-ptm.name
for (j in 1:p){
ptm.j<-X[[1]][ptm.name==genes.name[j],]
n.j<-sum(ptm.name==genes.name[j])
X.ptm[seq(s+1,s+n.j),]<-ptm.j
locptm[j]<-(s+1)
numptm[j]<-n.j
ptm.new[seq(s+1,s+n.j)]<-rep(genes.name[j],n.j)
s<-s+n.j
}
X[[1]]<-X.ptm
ptm.name<-ptm.new
imp<-array(0,c(p,length(genes.name),nclasses))
imp.final<-matrix(0,p*(p-1)/2,nclasses);
vec1<-matrix(rep(genes.name,p),p,p)
vec2<-t(vec1)
vec1<-vec1[lower.tri(vec1,diag=FALSE)]
vec2<-vec2[lower.tri(vec2,diag=FALSE)]
index<-seq(1,p)
imp<-array(0,c(p,ptm.p,nclasses))
for (j in 1:ptm.p){
covar<-matrix(0,ptm.p*nclasses,max(sampsize))
y<-matrix(0,nclasses,max(sampsize))
for (c in 1:nclasses) {
if (c==1) {
y[c,seq(1,sampsize[c])]<-as.matrix(X[[c]][j,])
covar[seq(1,ptm.p-numptm[genes.name==ptm.name[j]]),seq(1,sampsize[c])]<-X[[c]][-seq(locptm[genes.name==ptm.name[j]],locptm[genes.name==ptm.name[j]]+numptm[genes.name==ptm.name[j]]-1),]
n.covar<-ptm.p-numptm[genes.name==ptm.name[j]] } else {
y[c,seq(1,sampsize[c])]<-as.matrix(X[[c]][genes.name==ptm.name[j],])
covar[seq(n.covar+1,n.covar+p-1),seq(1,sampsize[c])]<-X[[c]][-j,]
n.covar<-n.covar+p-1
}
}
covar<-covar[seq(1,n.covar),]
numptm.j<-numptm[genes.name!=ptm.name[j]]
index<-seq(1,length(locptm))
index<-index[genes.name==ptm.name[j]]
locptm.j<-locptm;
if (index != p) locptm.j[seq(index+1,length(locptm))]<-locptm.j[seq(index+1,length(locptm))]-numptm[index]
locptm.j<-locptm.j[-index]
rfout<-ptmJRF_onetarget(x=covar,y=y,p=(p-1),mptm=ptm.p-numptm[genes.name==ptm.name[j]],
mtry=sqrt(p-1),importance=TRUE,sampsize=sampsize,nclasses=nclasses,
ntree=ntree,numptm=numptm.j,locptm=locptm.j)
imp.rfout<-importance(rfout)
for (s in 1:nclasses) imp[genes.name!=ptm.name[j],j,s]<-imp.rfout[seq((p-1)*(s-1)+1,(p-1)*(s-1)+p-1)]
}
imp.new<-array(0,c(p,p,nclasses))
for (j in 1:p){
if (sum(ptm.name==genes.name[j])==1){
for (c in 1:nclasses) imp.new[,j,c]<-imp[,ptm.name==genes.name[j],c]
} else {
for (c in 1:nclasses) imp.new[,j,c]<-apply(imp[,ptm.name==genes.name[j],c], 1, function(x) { mean(x) } )
}
}
# --- Derive importance score for each interaction
for (s in 1:nclasses){
imp.s<-imp.new[,,s]; t.imp<-t(imp.s)
imp.final[,s]<-(imp.s[lower.tri(imp.s,diag=FALSE)]+t.imp[lower.tri(t.imp,diag=FALSE)])/2
}
out<-cbind(as.character(vec1),as.character(vec2),as.data.frame(imp.final),stringsAsFactors=FALSE)
colnames(out)<-c(paste0('gene',1:2),paste0('importance',1:nclasses))
return(out)
}
# --- MAIN function
"iJRFNet" <-
function(X, W=NULL,ntree=NULL,mtry=NULL,model=NULL,genes.name,ptm.name=NULL) {
if (is.null(model)) {print("Error: Specify Model")} else {
if (is.null(ntree)) ntree=1000
if (model=="iJRF") out<-iJRF(X,W,ntree,mtry,genes.name)
if (model=="iRafNet") out<-iRafNet(X, W,ntree,mtry,genes.name)
if (model=="ptmJRF") out<-ptmJRF(X,ntree,mtry,genes.name,ptm.name)
return(out)
}
}
|
/R/iJRFNet.R
|
no_license
|
petraf01/iJRF
|
R
| false
| false
| 40,225
|
r
|
#' Joint Random Forest for the simultaneous estimation of multiple related networks
#'
#' MAIN FUNCTION -- > iJRFNet
importance <- function(x, scale=TRUE) {
# --- Function importance is a modified version of function importance from R package randomForest
type=NULL;
class=NULL;
if (!inherits(x, "randomForest"))
stop("x is not of class randomForest")
classRF <- x$type != "regression"
hasImp <- !is.null(dim(x$importance)) || ncol(x$importance) == 1
hasType <- !is.null(type)
if (hasType && type == 1 && !hasImp)
stop("That measure has not been computed")
allImp <- is.null(type) && hasImp
if (hasType) {
if (!(type %in% 1:2)) stop("Wrong type specified")
if (type == 2 && !is.null(class))
stop("No class-specific measure for that type")
}
imp <- x$importance
if (hasType && type == 2) {
if (hasImp) imp <- imp[, ncol(imp), drop=FALSE]
} else {
if (scale) {
SD <- x$importanceSD
imp[, -ncol(imp)] <-
imp[, -ncol(imp), drop=FALSE] /
ifelse(SD < .Machine$double.eps, 1, SD)
}
if (!allImp) {
if (is.null(class)) {
## The average decrease in accuracy measure:
imp <- imp[, ncol(imp) - 1, drop=FALSE]
} else {
whichCol <- if (classRF) match(class, colnames(imp)) else 1
if (is.na(whichCol)) stop(paste("Class", class, "not found."))
imp <- imp[, whichCol, drop=FALSE]
}
}
}
imp<-imp[,2]
imp
}
# --- Functions called by iJRFNet
"iJRF_onetarget" <-
function(x, y=NULL, xtest=NULL, ytest=NULL, ntree,
sampsize,
totsize = if (replace) ncol(x) else ceiling(.632*ncol(x)),
mtry=if (!is.null(y) && !is.factor(y))
max(floor(nrow(x)/3), 1) else floor(sqrt(nrow(x))),
replace=TRUE, classwt=NULL, cutoff, strata,
nodesize = if (!is.null(y) && !is.factor(y)) 5 else 1,
maxnodes=NULL,
importance=FALSE, localImp=FALSE, nPerm=1,
proximity, oob.prox=proximity,
norm.votes=TRUE, do.trace=FALSE,
keep.forest=!is.null(y) && is.null(xtest), corr.bias=FALSE,
keep.inbag=FALSE, nclasses, sw,...) {
ww=1/sampsize;
nclass=mylevels=ipi=NULL
addclass <- is.null(y)
classRF <- addclass || is.factor(y)
if (!classRF && length(unique(y)) <= 5) {
warning("The response has five or fewer unique values. Are you sure you want to do regression?")
}
if (classRF && !addclass && length(unique(y)) < 2)
stop("Need at least two classes to do classification.")
n <- ncol(y) # number of samples
p <- nrow(x)/nclasses # number of variables
if (n == 0) stop("data (x) has 0 rows")
x.row.names <- rownames(x)
x.col.names <- if (is.null(colnames(x))) 1:ncol(x) else colnames(x)
keep.forest=!is.null(y)
xtest=NULL; ytest=NULL
testdat <- !is.null(xtest)
if (testdat) {
if (ncol(x) != ncol(xtest))
stop("x and xtest must have same number of columns")
ntest <- nrow(xtest)
xts.row.names <- rownames(xtest)
}
prox <- proxts <- double(1)
## Check for NAs.
if (any(is.na(x))) stop("NA not permitted in predictors")
if (testdat && any(is.na(xtest))) stop("NA not permitted in xtest")
if (any(is.na(y))) stop("NA not permitted in response")
if (!is.null(ytest) && any(is.na(ytest))) stop("NA not permitted in ytest")
if (is.data.frame(x)) {
xlevels <- lapply(x, mylevels)
ncat <- sapply(xlevels, length)
## Treat ordered factors as numerics.
ncat <- ifelse(sapply(x, is.ordered), 1, ncat)
x <- data.matrix(x)
if(testdat) {
if(!is.data.frame(xtest))
stop("xtest must be data frame if x is")
xfactor <- which(sapply(xtest, is.factor))
if (length(xfactor) > 0) {
for (i in xfactor) {
if (any(! levels(xtest[[i]]) %in% xlevels[[i]]))
stop("New factor levels in xtest not present in x")
xtest[[i]] <-
factor(xlevels[[i]][match(xtest[[i]], xlevels[[i]])],
levels=xlevels[[i]])
}
}
xtest <- data.matrix(xtest)
}
} else {
ncat <- rep(1, p)
xlevels <- as.list(rep(0, p))
}
maxcat <- max(ncat)
if (maxcat > 32)
stop("Can not handle categorical predictors with more than 32 categories.")
addclass <- FALSE
proximity <- addclass
impout <- matrix(0.0, p*nclasses, 2)
impSD <- matrix(0.0, p*nclasses, 1)
# names(impSD) <- x.col.names
nsample <- if (addclass) 2 * n else n
Stratify <- length(n) > 1
nodesize=5;
nrnodes <- 2 * trunc(n/max(1, nodesize - 4)) + 1
maxnodes=NULL
if (!is.null(maxnodes)) {
## convert # of terminal nodes to total # of nodes
maxnodes <- 2 * maxnodes - 1
if (maxnodes > nrnodes) warning("maxnodes exceeds its max value.")
nrnodes <- min(c(nrnodes, max(c(maxnodes, 1))))
}
## Compiled code expects variables in rows and observations in columns.
# x <- t(x)
storage.mode(x) <- "double"
xtest <- double(1)
ytest <- double(1)
ntest <- 1
labelts <- FALSE
nt <- if (keep.forest) ntree else 1
nPerm=1
do.trace=F; oob.prox=F
corr.bias=FALSE
keep.inbag=FALSE
impmat <- double(1)
replace=T
rfout <- .C("iJRF_regRF",
x,
y, ww,
as.integer(c(totsize, p)),
sampsize=as.integer(sampsize), as.integer(totsize),
as.integer(nodesize),
as.integer(nrnodes),
as.integer(ntree),
as.integer(mtry),
as.integer(c(importance, localImp, nPerm)),
as.integer(ncat),
as.integer(maxcat),
as.integer(do.trace),
as.integer(proximity),
as.integer(oob.prox),
as.integer(corr.bias),
ypred = double(n * nclasses),
impout = impout,
impmat = impmat,
impSD = impSD,
prox = prox,
ndbigtree = integer(ntree),
nodestatus = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
leftDaughter = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
rightDaughter = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
nodepred = matrix(double(nrnodes * nt * nclasses), ncol=nt),
bestvar = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
xbestsplit = matrix(double(nrnodes * nt * nclasses), ncol=nt),
mse = double(ntree * nclasses),
keep = as.integer(c(keep.forest, keep.inbag)),
replace = as.integer(replace),
testdat = as.integer(testdat),
xts = xtest,
ntest = as.integer(ntest),
yts = as.double(ytest),
labelts = as.integer(labelts),
ytestpred = double(ntest),
proxts = proxts,
msets = double(if (labelts) ntree else 1),
coef = double(2),
oob.times = integer(n),
inbag = if (keep.inbag)
matrix(integer(n * ntree), n) else integer(1), as.integer(nclasses),
sw = as.double(sw))[c(16:28, 36:41)]
# ## Format the forest component, if present.
if (keep.forest) {
max.nodes <- max(rfout$ndbigtree)
rfout$nodestatus <-
rfout$nodestatus[1:max.nodes, , drop=FALSE]
rfout$bestvar <-
rfout$bestvar[1:max.nodes, , drop=FALSE]
rfout$nodepred <-
rfout$nodepred[1:max.nodes, , drop=FALSE]
rfout$xbestsplit <-
rfout$xbestsplit[1:max.nodes, , drop=FALSE]
rfout$leftDaughter <-
rfout$leftDaughter[1:max.nodes, , drop=FALSE]
rfout$rightDaughter <-
rfout$rightDaughter[1:max.nodes, , drop=FALSE]
}
cl <- match.call()
cl[[1]] <- as.name("randomForest")
# ## Make sure those obs. that have not been OOB get NA as prediction.
ypred <- rfout$ypred
if (any(rfout$oob.times < 1)) {
ypred[rfout$oob.times == 0] <- NA
}
out <- list(call = cl,
type = "regression",
predicted =0,
mse = rfout$mse,
rsq = 1 - rfout$mse / (var(y[1,]) * (n-1) / n),
oob.times = rfout$oob.times,
importance = if (importance) matrix(rfout$impout, p * nclasses, 2) else
matrix(rfout$impout, ncol=1),
importanceSD=if (importance) rfout$impSD else NULL,
localImportance = if (localImp)
matrix(rfout$impmat, p, n, dimnames=list(x.col.names,
x.row.names)) else NULL,
proximity = if (proximity) matrix(rfout$prox, n, n,
dimnames = list(x.row.names, x.row.names)) else NULL,
ntree = ntree,
mtry = mtry,
forest = if (keep.forest)
c(rfout[c("ndbigtree", "nodestatus", "leftDaughter",
"rightDaughter", "nodepred", "bestvar",
"xbestsplit")],
list(ncat = ncat), list(nrnodes=max.nodes),
list(ntree=ntree), list(xlevels=xlevels)) else NULL,
coefs = if (corr.bias) rfout$coef else NULL,
y = y,
test = if(testdat) {
list(predicted = structure(rfout$ytestpred,
names=xts.row.names),
mse = if(labelts) rfout$msets else NULL,
rsq = if(labelts) 1 - rfout$msets /
(var(ytest) * (n-1) / n) else NULL,
proximity = if (proximity)
matrix(rfout$proxts / ntree, nrow = ntest,
dimnames = list(xts.row.names,
c(xts.row.names,
x.row.names))) else NULL)
} else NULL,
inbag = if (keep.inbag)
matrix(rfout$inbag, nrow(rfout$inbag),
dimnames=list(x.row.names, NULL)) else NULL)
# print(rfout$mse)
class(out) <- "randomForest"
return(out)
}
"irafnet_onetarget" <-
function(x, y=NULL, xtest=NULL, ytest=NULL, ntree,
mtry=if (!is.null(y) && !is.factor(y))
max(floor(ncol(x)/3), 1) else floor(sqrt(ncol(x))),
replace=TRUE, classwt=NULL, cutoff, strata,
sampsize = if (replace) nrow(x) else ceiling(.632*nrow(x)),
nodesize = if (!is.null(y) && !is.factor(y)) 5 else 1,
maxnodes=NULL,
importance=FALSE, localImp=FALSE, nPerm=1,
proximity, oob.prox=proximity,
norm.votes=TRUE, do.trace=FALSE,
keep.forest=!is.null(y) && is.null(xtest), corr.bias=FALSE,
keep.inbag=FALSE, sw) {
addclass <- is.null(y)
classRF <- addclass || is.factor(y)
if (!classRF && length(unique(y)) <= 5) {
warning("The response has five or fewer unique values. Are you sure you want to do regression?")
}
if (classRF && !addclass && length(unique(y)) < 2)
stop("Need at least two classes to do classification.")
n <- nrow(x)
p <- ncol(x)
if (n == 0) stop("data (x) has 0 rows")
x.row.names <- rownames(x)
x.col.names <- if (is.null(colnames(x))) 1:ncol(x) else colnames(x)
## overcome R's lazy evaluation:
keep.forest <- keep.forest
testdat <- !is.null(xtest)
if (testdat) {
if (ncol(x) != ncol(xtest))
stop("x and xtest must have same number of columns")
ntest <- nrow(xtest)
xts.row.names <- rownames(xtest)
}
## Make sure mtry is in reasonable range.
if (mtry < 1 || mtry > p)
warning("invalid mtry: reset to within valid range")
mtry <- max(1, min(p, round(mtry)))
if (!is.null(y)) {
if (length(y) != n) stop("length of response must be the same as predictors")
addclass <- FALSE
} else {
if (!addclass) addclass <- TRUE
y <- factor(c(rep(1, n), rep(2, n)))
x <- rbind(x, x)
}
## Check for NAs.
if (any(is.na(x))) stop("NA not permitted in predictors")
if (testdat && any(is.na(xtest))) stop("NA not permitted in xtest")
if (any(is.na(y))) stop("NA not permitted in response")
if (!is.null(ytest) && any(is.na(ytest))) stop("NA not permitted in ytest")
ncat <- rep(1, p)
xlevels <- as.list(rep(0, p))
maxcat <- max(ncat)
if (maxcat > 32)
stop("Can not handle categorical predictors with more than 32 categories.")
if (classRF) {
nclass <- length(levels(y))
## Check for empty classes:
if (any(table(y) == 0)) stop("Can't have empty classes in y.")
if (!is.null(ytest)) {
if (!is.factor(ytest)) stop("ytest must be a factor")
if (!all(levels(y) == levels(ytest)))
stop("y and ytest must have the same levels")
}
if (missing(cutoff)) {
cutoff <- rep(1 / nclass, nclass)
} else {
if (sum(cutoff) > 1 || sum(cutoff) < 0 || !all(cutoff > 0) ||
length(cutoff) != nclass) {
stop("Incorrect cutoff specified.")
}
if (!is.null(names(cutoff))) {
if (!all(names(cutoff) %in% levels(y))) {
stop("Wrong name(s) for cutoff")
}
cutoff <- cutoff[levels(y)]
}
}
if (!is.null(classwt)) {
if (length(classwt) != nclass)
stop("length of classwt not equal to number of classes")
## If classwt has names, match to class labels.
if (!is.null(names(classwt))) {
if (!all(names(classwt) %in% levels(y))) {
stop("Wrong name(s) for classwt")
}
classwt <- classwt[levels(y)]
}
if (any(classwt <= 0)) stop("classwt must be positive")
ipi <- 1
} else {
classwt <- rep(1, nclass)
ipi <- 0
}
} else addclass <- FALSE
if (missing(proximity)) proximity <- addclass
if (proximity) {
prox <- matrix(0.0, n, n)
proxts <- if (testdat) matrix(0, ntest, ntest + n) else double(1)
} else {
prox <- proxts <- double(1)
}
if (localImp) {
importance <- TRUE
impmat <- matrix(0, p, n)
} else impmat <- double(1)
if (importance) {
if (nPerm < 1) nPerm <- as.integer(1) else nPerm <- as.integer(nPerm)
if (classRF) {
impout <- matrix(0.0, p, nclass + 2)
impSD <- matrix(0.0, p, nclass + 1)
} else {
impout <- matrix(0.0, p, 2)
impSD <- double(p)
names(impSD) <- x.col.names
}
} else {
impout <- double(p)
impSD <- double(1)
}
nsample <- if (addclass) 2 * n else n
Stratify <- length(sampsize) > 1
if ((!Stratify) && sampsize > nrow(x)) stop("sampsize too large")
if (Stratify && (!classRF)) stop("sampsize should be of length one")
if (classRF) {
if (Stratify) {
if (missing(strata)) strata <- y
if (!is.factor(strata)) strata <- as.factor(strata)
nsum <- sum(sampsize)
if (length(sampsize) > nlevels(strata))
stop("sampsize has too many elements.")
if (any(sampsize <= 0) || nsum == 0)
stop("Bad sampsize specification")
## If sampsize has names, match to class labels.
if (!is.null(names(sampsize))) {
sampsize <- sampsize[levels(strata)]
}
if (any(sampsize > table(strata)))
stop("sampsize can not be larger than class frequency")
} else {
nsum <- sampsize
}
nrnodes <- 2 * trunc(nsum / nodesize) + 1
} else {
## For regression trees, need to do this to get maximal trees.
nrnodes <- 2 * trunc(sampsize/max(1, nodesize - 4)) + 1
}
if (!is.null(maxnodes)) {
## convert # of terminal nodes to total # of nodes
maxnodes <- 2 * maxnodes - 1
if (maxnodes > nrnodes) warning("maxnodes exceeds its max value.")
nrnodes <- min(c(nrnodes, max(c(maxnodes, 1))))
}
## Compiled code expects variables in rows and observations in columns.
x <- t(x)
storage.mode(x) <- "double"
if (testdat) {
xtest <- t(xtest)
storage.mode(xtest) <- "double"
if (is.null(ytest)) {
ytest <- labelts <- 0
} else {
labelts <- TRUE
}
} else {
xtest <- double(1)
ytest <- double(1)
ntest <- 1
labelts <- FALSE
}
nt <- if (keep.forest) ntree else 1
rfout <- .C("iRafNet_regRF",
x,
as.double(y),
as.integer(c(n, p)),
as.integer(sampsize),
as.integer(nodesize),
as.integer(nrnodes),
as.integer(ntree),
as.integer(mtry),
as.integer(c(importance, localImp, nPerm)),
as.integer(ncat),
as.integer(maxcat),
as.integer(do.trace),
as.integer(proximity),
as.integer(oob.prox),
as.integer(corr.bias),
ypred = double(n),
impout = impout,
impmat = impmat,
impSD = impSD,
prox = prox,
ndbigtree = integer(ntree),
nodestatus = matrix(integer(nrnodes * nt), ncol=nt),
leftDaughter = matrix(integer(nrnodes * nt), ncol=nt),
rightDaughter = matrix(integer(nrnodes * nt), ncol=nt),
nodepred = matrix(double(nrnodes * nt), ncol=nt),
bestvar = matrix(integer(nrnodes * nt), ncol=nt),
xbestsplit = matrix(double(nrnodes * nt), ncol=nt),
mse = double(ntree),
keep = as.integer(c(keep.forest, keep.inbag)),
replace = as.integer(replace),
testdat = as.integer(testdat),
xts = xtest,
ntest = as.integer(ntest),
yts = as.double(ytest),
labelts = as.integer(labelts),
ytestpred = double(ntest),
proxts = proxts,
msets = double(if (labelts) ntree else 1),
coef = double(2),
oob.times = integer(n),
inbag = if (keep.inbag)
matrix(integer(n * ntree), n) else integer(1), sw = as.double(sw))[c(16:28, 36:41)]
## Format the forest component, if present.
if (keep.forest) {
max.nodes <- max(rfout$ndbigtree)
rfout$nodestatus <-
rfout$nodestatus[1:max.nodes, , drop=FALSE]
rfout$bestvar <-
rfout$bestvar[1:max.nodes, , drop=FALSE]
rfout$nodepred <-
rfout$nodepred[1:max.nodes, , drop=FALSE]
rfout$xbestsplit <-
rfout$xbestsplit[1:max.nodes, , drop=FALSE]
rfout$leftDaughter <-
rfout$leftDaughter[1:max.nodes, , drop=FALSE]
rfout$rightDaughter <-
rfout$rightDaughter[1:max.nodes, , drop=FALSE]
}
cl <- match.call()
cl[[1]] <- as.name("randomForest")
## Make sure those obs. that have not been OOB get NA as prediction.
ypred <- rfout$ypred
if (any(rfout$oob.times < 1)) {
ypred[rfout$oob.times == 0] <- NA
}
out <- list(call = cl,
type = "regression",
predicted = structure(ypred, names=x.row.names),
mse = rfout$mse,
rsq = 1 - rfout$mse / (var(y) * (n-1) / n),
oob.times = rfout$oob.times,
importance = if (importance) matrix(rfout$impout, p, 2,
dimnames=list(x.col.names,
c("%IncMSE","IncNodePurity"))) else
matrix(rfout$impout, ncol=1,
dimnames=list(x.col.names, "IncNodePurity")),
importanceSD=if (importance) rfout$impSD else NULL,
localImportance = if (localImp)
matrix(rfout$impmat, p, n, dimnames=list(x.col.names,
x.row.names)) else NULL,
proximity = if (proximity) matrix(rfout$prox, n, n,
dimnames = list(x.row.names, x.row.names)) else NULL,
ntree = ntree,
mtry = mtry,
forest = if (keep.forest)
c(rfout[c("ndbigtree", "nodestatus", "leftDaughter",
"rightDaughter", "nodepred", "bestvar",
"xbestsplit")],
list(ncat = ncat), list(nrnodes=max.nodes),
list(ntree=ntree), list(xlevels=xlevels)) else NULL,
coefs = if (corr.bias) rfout$coef else NULL,
y = y,
test = if(testdat) {
list(predicted = structure(rfout$ytestpred,
names=xts.row.names),
mse = if(labelts) rfout$msets else NULL,
rsq = if(labelts) 1 - rfout$msets /
(var(ytest) * (n-1) / n) else NULL,
proximity = if (proximity)
matrix(rfout$proxts / ntree, nrow = ntest,
dimnames = list(xts.row.names,
c(xts.row.names,
x.row.names))) else NULL)
} else NULL,
inbag = if (keep.inbag)
matrix(rfout$inbag, nrow(rfout$inbag),
dimnames=list(x.row.names, NULL)) else NULL)
class(out) <- "randomForest"
return(out)
}
"JRF_onetarget" <-
function(x, y=NULL, xtest=NULL, ytest=NULL, ntree,
sampsize,
totsize = if (replace) ncol(x) else ceiling(.632*ncol(x)),
mtry=if (!is.null(y) && !is.factor(y))
max(floor(nrow(x)/3), 1) else floor(sqrt(nrow(x))),
replace=TRUE, classwt=NULL, cutoff, strata,
nodesize = if (!is.null(y) && !is.factor(y)) 5 else 1,
maxnodes=NULL,
importance=FALSE, localImp=FALSE, nPerm=1,
proximity, oob.prox=proximity,
norm.votes=TRUE, do.trace=FALSE,
keep.forest=!is.null(y) && is.null(xtest), corr.bias=FALSE,
keep.inbag=FALSE, nclasses, ...) {
ww=1/sampsize;
nclass=mylevels=ipi=sw=NULL
addclass <- is.null(y)
classRF <- addclass || is.factor(y)
if (!classRF && length(unique(y)) <= 5) {
warning("The response has five or fewer unique values. Are you sure you want to do regression?")
}
if (classRF && !addclass && length(unique(y)) < 2)
stop("Need at least two classes to do classification.")
n <- ncol(y) # number of samples
p <- nrow(x)/nclasses # number of variables
if (n == 0) stop("data (x) has 0 rows")
x.row.names <- rownames(x)
x.col.names <- if (is.null(colnames(x))) 1:ncol(x) else colnames(x)
keep.forest=!is.null(y)
xtest=NULL; ytest=NULL
testdat <- !is.null(xtest)
if (testdat) {
if (ncol(x) != ncol(xtest))
stop("x and xtest must have same number of columns")
ntest <- nrow(xtest)
xts.row.names <- rownames(xtest)
}
prox <- proxts <- double(1)
## Check for NAs.
if (any(is.na(x))) stop("NA not permitted in predictors")
if (testdat && any(is.na(xtest))) stop("NA not permitted in xtest")
if (any(is.na(y))) stop("NA not permitted in response")
if (!is.null(ytest) && any(is.na(ytest))) stop("NA not permitted in ytest")
if (is.data.frame(x)) {
xlevels <- lapply(x, mylevels)
ncat <- sapply(xlevels, length)
## Treat ordered factors as numerics.
ncat <- ifelse(sapply(x, is.ordered), 1, ncat)
x <- data.matrix(x)
if(testdat) {
if(!is.data.frame(xtest))
stop("xtest must be data frame if x is")
xfactor <- which(sapply(xtest, is.factor))
if (length(xfactor) > 0) {
for (i in xfactor) {
if (any(! levels(xtest[[i]]) %in% xlevels[[i]]))
stop("New factor levels in xtest not present in x")
xtest[[i]] <-
factor(xlevels[[i]][match(xtest[[i]], xlevels[[i]])],
levels=xlevels[[i]])
}
}
xtest <- data.matrix(xtest)
}
} else {
ncat <- rep(1, p)
xlevels <- as.list(rep(0, p))
}
maxcat <- max(ncat)
if (maxcat > 32)
stop("Can not handle categorical predictors with more than 32 categories.")
addclass <- FALSE
proximity <- addclass
impout <- matrix(0.0, p*nclasses, 2)
impSD <- matrix(0.0, p*nclasses, 1)
# names(impSD) <- x.col.names
nsample <- if (addclass) 2 * n else n
Stratify <- length(n) > 1
nodesize=5;
nrnodes <- 2 * trunc(n/max(1, nodesize - 4)) + 1
maxnodes=NULL
if (!is.null(maxnodes)) {
## convert # of terminal nodes to total # of nodes
maxnodes <- 2 * maxnodes - 1
if (maxnodes > nrnodes) warning("maxnodes exceeds its max value.")
nrnodes <- min(c(nrnodes, max(c(maxnodes, 1))))
}
## Compiled code expects variables in rows and observations in columns.
# x <- t(x)
storage.mode(x) <- "double"
xtest <- double(1)
ytest <- double(1)
ntest <- 1
labelts <- FALSE
nt <- if (keep.forest) ntree else 1
nPerm=1
do.trace=F; oob.prox=F
corr.bias=FALSE
keep.inbag=FALSE
impmat <- double(1)
replace=T
rfout <- .C("JRF_regRF",
x,
y, ww,
as.integer(c(totsize, p)),
sampsize=as.integer(sampsize), as.integer(totsize),
as.integer(nodesize),
as.integer(nrnodes),
as.integer(ntree),
as.integer(mtry),
as.integer(c(importance, localImp, nPerm)),
as.integer(ncat),
as.integer(maxcat),
as.integer(do.trace),
as.integer(proximity),
as.integer(oob.prox),
as.integer(corr.bias),
ypred = double(n * nclasses),
impout = impout,
impmat = impmat,
impSD = impSD,
prox = prox,
ndbigtree = integer(ntree),
nodestatus = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
leftDaughter = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
rightDaughter = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
nodepred = matrix(double(nrnodes * nt * nclasses), ncol=nt),
bestvar = matrix(integer(nrnodes * nt * nclasses), ncol=nt),
xbestsplit = matrix(double(nrnodes * nt * nclasses), ncol=nt),
mse = double(ntree * nclasses),
keep = as.integer(c(keep.forest, keep.inbag)),
replace = as.integer(replace),
testdat = as.integer(testdat),
xts = xtest,
ntest = as.integer(ntest),
yts = as.double(ytest),
labelts = as.integer(labelts),
ytestpred = double(ntest),
proxts = proxts,
msets = double(if (labelts) ntree else 1),
coef = double(2),
oob.times = integer(n),
inbag = if (keep.inbag)
matrix(integer(n * ntree), n) else integer(1), as.integer(nclasses))[c(16:28, 36:41)]
# ## Format the forest component, if present.
if (keep.forest) {
max.nodes <- max(rfout$ndbigtree)
rfout$nodestatus <-
rfout$nodestatus[1:max.nodes, , drop=FALSE]
rfout$bestvar <-
rfout$bestvar[1:max.nodes, , drop=FALSE]
rfout$nodepred <-
rfout$nodepred[1:max.nodes, , drop=FALSE]
rfout$xbestsplit <-
rfout$xbestsplit[1:max.nodes, , drop=FALSE]
rfout$leftDaughter <-
rfout$leftDaughter[1:max.nodes, , drop=FALSE]
rfout$rightDaughter <-
rfout$rightDaughter[1:max.nodes, , drop=FALSE]
}
cl <- match.call()
cl[[1]] <- as.name("randomForest")
# ## Make sure those obs. that have not been OOB get NA as prediction.
ypred <- rfout$ypred
if (any(rfout$oob.times < 1)) {
ypred[rfout$oob.times == 0] <- NA
}
out <- list(call = cl,
type = "regression",
predicted =0,
mse = rfout$mse,
rsq = 1 - rfout$mse / (var(y[1,]) * (n-1) / n),
oob.times = rfout$oob.times,
importance = if (importance) matrix(rfout$impout, p * nclasses, 2) else
matrix(rfout$impout, ncol=1),
importanceSD=if (importance) rfout$impSD else NULL,
localImportance = if (localImp)
matrix(rfout$impmat, p, n, dimnames=list(x.col.names,
x.row.names)) else NULL,
proximity = if (proximity) matrix(rfout$prox, n, n,
dimnames = list(x.row.names, x.row.names)) else NULL,
ntree = ntree,
mtry = mtry,
forest = if (keep.forest)
c(rfout[c("ndbigtree", "nodestatus", "leftDaughter",
"rightDaughter", "nodepred", "bestvar",
"xbestsplit")],
list(ncat = ncat), list(nrnodes=max.nodes),
list(ntree=ntree), list(xlevels=xlevels)) else NULL,
coefs = if (corr.bias) rfout$coef else NULL,
y = y,
test = if(testdat) {
list(predicted = structure(rfout$ytestpred,
names=xts.row.names),
mse = if(labelts) rfout$msets else NULL,
rsq = if(labelts) 1 - rfout$msets /
(var(ytest) * (n-1) / n) else NULL,
proximity = if (proximity)
matrix(rfout$proxts / ntree, nrow = ntest,
dimnames = list(xts.row.names,
c(xts.row.names,
x.row.names))) else NULL)
} else NULL,
inbag = if (keep.inbag)
matrix(rfout$inbag, nrow(rfout$inbag),
dimnames=list(x.row.names, NULL)) else NULL)
# print(rfout$mse)
class(out) <- "randomForest"
return(out)
}
"iJRF" <-
function(X, W=NULL,ntree=NULL,mtry=NULL,genes.name=NULL) {
p<-dim(X[[1]])[1];
if (is.null(mtry)) mtry=sqrt(p)
if (is.null(ntree)) ntree=1000
if (is.null(genes.name)) genes.name=paste("G",seq(1,p),sep="")
nclasses<-length(X)
sampsize<-rep(0,nclasses)
imp<-array(0,c(p,length(genes.name),nclasses))
imp.final<-matrix(0,p*(p-1)/2,nclasses);
vec1<-matrix(rep(genes.name,p),p,p)
vec2<-t(vec1)
vec1<-vec1[lower.tri(vec1,diag=FALSE)]
vec2<-vec2[lower.tri(vec2,diag=FALSE)]
index<-seq(1,p)
for (j in 1:nclasses) { X[[j]] <- t(apply(X[[j]], 1, function(x) { (x - mean(x)) / sd(x) } ))
sampsize[j]<-dim(X[[j]])[2] }
tot<-max(sampsize);
print(is.null(W))
if (is.null(W)) { # -- implement standard JRF
for (j in 1:length(genes.name)){
covar<-matrix(0,(p-1)*nclasses,tot)
y<-matrix(0,nclasses,tot)
for (c in 1:nclasses) {
y[c,seq(1,sampsize[c])]<-as.matrix(X[[c]][j,])
covar[seq((c-1)*(p-1)+1,c*(p-1)),seq(1,sampsize[c])]<-X[[c]][-j,]
}
jrf.out<-JRF_onetarget(x=covar,y=y,mtry=mtry,importance=TRUE,sampsize=sampsize,nclasses=nclasses,ntree=ntree)
for (s in 1:nclasses) imp[-j,j,s]<-importance(jrf.out,scale=FALSE)[seq((p-1)*(s-1)+1,(p-1)*(s-1)+p-1)] #- save importance score for net1
}
} else { # -- implement iJRF (integrative JRF)
for (j in 1:length(genes.name)){
weights.rf<-as.matrix(W[,j]);
weights.rf[j]<-0
weights.rf<-weights.rf/sum(weights.rf);
w.sorted<-sort(weights.rf,decreasing = FALSE,index.return=T)
index<-w.sorted$ix
w.sorted<-w.sorted$x
covar<-matrix(0,p*nclasses,tot)
y<-matrix(0,nclasses,tot)
for (c in 1:nclasses) {
y[c,seq(1,sampsize[c])]<-X[[c]][j,]
covar[seq((c-1)*(p)+1,c*p),seq(1,sampsize[c])]<-X[[c]][index,]
}
jrf.out<-iJRF_onetarget(x=covar,y=y,mtry=mtry,importance=TRUE,sampsize=sampsize,
nclasses=nclasses,ntree=ntree,sw=as.double(w.sorted))
for (s in 1:nclasses) imp[index,j,s]<-importance(jrf.out,scale=FALSE)[seq(p*(s-1)+1,p*s)] #- save importance score for net1
}
}
# --- Derive importance score for each interaction
for (s in 1:nclasses){
imp.s<-imp[,,s]; t.imp<-t(imp.s)
imp.final[,s]<-(imp.s[lower.tri(imp.s,diag=FALSE)]+t.imp[lower.tri(t.imp,diag=FALSE)])/2
}
out<-cbind(as.character(vec1),as.character(vec2),as.data.frame(imp.final),stringsAsFactors=FALSE)
colnames(out)<-c(paste0('gene',1:2),paste0('importance',1:nclasses))
return(out)
}
"iRafNet" <- function(X,W,ntree=NULL,mtry=NULL,genes.name) {
X<-t(X[[1]])
p<-dim(X)[2]
if (is.null(mtry)) mtry=sqrt(p)
if (is.null(ntree)) ntree=1000
if (is.null(genes.name)) genes.name=paste("G",seq(1,p),sep="")
if (is.null(W)) W=matrix(1,p,p)
imp<-matrix(0,p,p)
imp.final<-matrix(0,p*(p-1)/2,1);
vec1<-matrix(rep(genes.name,p),p,p)
vec2<-t(vec1)
vec1<-vec1[lower.tri(vec1,diag=FALSE)]
vec2<-vec2[lower.tri(vec2,diag=FALSE)]
X <- (apply(X, 2, function(x) { (x - mean(x)) / sd(x) } ))
for (j in 1:p){
y<-X[,j];
weights.rf<-as.matrix(W[,j]);
weights.rf[j]<-0
weights.rf<-weights.rf/sum(weights.rf);
w.sorted<-sort(weights.rf,decreasing = FALSE,index.return=T)
index<-w.sorted$ix
x.sorted<-X[,index]
w.sorted<-w.sorted$x
rout<-irafnet_onetarget(x=x.sorted,y=as.double(y),importance=TRUE,mtry=round(sqrt(p-1)),ntree=1000,
sw=as.double(w.sorted))
imp[index,j]<-c(importance(rout))
}
# --- Return importance score for each regulation
imp.s<-imp; t.imp<-t(imp.s)
imp.final<-(imp.s[lower.tri(imp.s,diag=FALSE)]+t.imp[lower.tri(t.imp,diag=FALSE)])/2
out<-cbind(as.character(vec1),as.character(vec2),as.data.frame(imp.final),stringsAsFactors=FALSE)
colnames(out)<-c(paste0('gene',1:2),'importance')
return(out)
}
"ptmJRF" <-
function(X, ntree,mtry,genes.name,ptm.name) {
nclasses<-length(X)
sampsize<-rep(0,nclasses)
for (j in 1:nclasses) { X[[j]] <- t(apply(X[[j]], 1, function(x) { (x - mean(x)) / sd(x) } ))
sampsize[j]<-dim(X[[j]])[2] }
p<-length(genes.name); ptm.p<-length(ptm.name)
if (is.null(mtry)) mtry=p;
# --- reorder rows in PTM object X[[1]]
X.ptm<-X[[1]]; s=0 ; locptm<-numptm<-rep(0,p)
ptm.new<-ptm.name
for (j in 1:p){
ptm.j<-X[[1]][ptm.name==genes.name[j],]
n.j<-sum(ptm.name==genes.name[j])
X.ptm[seq(s+1,s+n.j),]<-ptm.j
locptm[j]<-(s+1)
numptm[j]<-n.j
ptm.new[seq(s+1,s+n.j)]<-rep(genes.name[j],n.j)
s<-s+n.j
}
X[[1]]<-X.ptm
ptm.name<-ptm.new
imp<-array(0,c(p,length(genes.name),nclasses))
imp.final<-matrix(0,p*(p-1)/2,nclasses);
vec1<-matrix(rep(genes.name,p),p,p)
vec2<-t(vec1)
vec1<-vec1[lower.tri(vec1,diag=FALSE)]
vec2<-vec2[lower.tri(vec2,diag=FALSE)]
index<-seq(1,p)
imp<-array(0,c(p,ptm.p,nclasses))
for (j in 1:ptm.p){
covar<-matrix(0,ptm.p*nclasses,max(sampsize))
y<-matrix(0,nclasses,max(sampsize))
for (c in 1:nclasses) {
if (c==1) {
y[c,seq(1,sampsize[c])]<-as.matrix(X[[c]][j,])
covar[seq(1,ptm.p-numptm[genes.name==ptm.name[j]]),seq(1,sampsize[c])]<-X[[c]][-seq(locptm[genes.name==ptm.name[j]],locptm[genes.name==ptm.name[j]]+numptm[genes.name==ptm.name[j]]-1),]
n.covar<-ptm.p-numptm[genes.name==ptm.name[j]] } else {
y[c,seq(1,sampsize[c])]<-as.matrix(X[[c]][genes.name==ptm.name[j],])
covar[seq(n.covar+1,n.covar+p-1),seq(1,sampsize[c])]<-X[[c]][-j,]
n.covar<-n.covar+p-1
}
}
covar<-covar[seq(1,n.covar),]
numptm.j<-numptm[genes.name!=ptm.name[j]]
index<-seq(1,length(locptm))
index<-index[genes.name==ptm.name[j]]
locptm.j<-locptm;
if (index != p) locptm.j[seq(index+1,length(locptm))]<-locptm.j[seq(index+1,length(locptm))]-numptm[index]
locptm.j<-locptm.j[-index]
rfout<-ptmJRF_onetarget(x=covar,y=y,p=(p-1),mptm=ptm.p-numptm[genes.name==ptm.name[j]],
mtry=sqrt(p-1),importance=TRUE,sampsize=sampsize,nclasses=nclasses,
ntree=ntree,numptm=numptm.j,locptm=locptm.j)
imp.rfout<-importance(rfout)
for (s in 1:nclasses) imp[genes.name!=ptm.name[j],j,s]<-imp.rfout[seq((p-1)*(s-1)+1,(p-1)*(s-1)+p-1)]
}
imp.new<-array(0,c(p,p,nclasses))
for (j in 1:p){
if (sum(ptm.name==genes.name[j])==1){
for (c in 1:nclasses) imp.new[,j,c]<-imp[,ptm.name==genes.name[j],c]
} else {
for (c in 1:nclasses) imp.new[,j,c]<-apply(imp[,ptm.name==genes.name[j],c], 1, function(x) { mean(x) } )
}
}
# --- Derive importance score for each interaction
for (s in 1:nclasses){
imp.s<-imp.new[,,s]; t.imp<-t(imp.s)
imp.final[,s]<-(imp.s[lower.tri(imp.s,diag=FALSE)]+t.imp[lower.tri(t.imp,diag=FALSE)])/2
}
out<-cbind(as.character(vec1),as.character(vec2),as.data.frame(imp.final),stringsAsFactors=FALSE)
colnames(out)<-c(paste0('gene',1:2),paste0('importance',1:nclasses))
return(out)
}
# --- MAIN function
"iJRFNet" <-
function(X, W=NULL,ntree=NULL,mtry=NULL,model=NULL,genes.name,ptm.name=NULL) {
if (is.null(model)) {print("Error: Specify Model")} else {
if (is.null(ntree)) ntree=1000
if (model=="iJRF") out<-iJRF(X,W,ntree,mtry,genes.name)
if (model=="iRafNet") out<-iRafNet(X, W,ntree,mtry,genes.name)
if (model=="ptmJRF") out<-ptmJRF(X,ntree,mtry,genes.name,ptm.name)
return(out)
}
}
|
#install packages
install.packages("tidyverse")
library(tidyverse)
library(ggplot2)
library(reshape2)
library(RColorBrewer)
install.packages("cowplot")
library(cowplot)
install.packages('Rmisc')
library(dplyr)
#load data
library(readxl)
LK135_pa <- read_excel("LK135_pa.xlsx")
LK369_pa <- read_excel("LK369_pa.xlsx")
DMSO_pa <- read_excel("DMSO_pa.xlsx")
#select mean and sd columns for plotting
new <- select(LK135_pa, 1,4,5,8,9,12,13,16,17,21,22)
new1 <- select(LK369_pa, 1,4,5,8,9,12,13,16,17,21,22)
new2 <- select(DMSO_pa, 1,4,5,8,9,12,13,16,17,21,22)
#gather means together into 4 columns with STDEV as long format
sd <- new %>% gather("Sample", "STDEV", starts_with("STDEV"))
#select time and mean columns
mean <- select(LK135_pa, 1, 4,8,12,16,21)
#melting mean data
mean <- melt(mean, id.vars = "Time")
#move long format STDEV to melted mean data
mean$sd <- sd$STDEV
sd1 <- new1 %>% gather("Sample", "STDEV", starts_with("STDEV"))
mean1 <- select(LK369_pa, 1, 4,8,12,16,21)
mean1 <- melt(mean1, id.vars = "Time")
mean1$sd <- sd$STDEV
sd2 <- new2 %>% gather("Sample", "STDEV", starts_with("STDEV"))
mean2 <- select(DMSO_pa, 1, 4,8,12,16,21)
mean2 <- melt(mean2, id.vars = "Time")
mean2$sd <- sd$STDEV
#plotting individual plots
LK135 <- ggplot(data = mean, aes(x = Time, y = value, color = variable)) +
geom_point() +
geom_errorbar(aes(ymin = value-sd, ymax = value+sd))+
labs(x = "Time", y = "OD600") +
ggtitle("Brevibacterium sp.") +
theme(plot.title = element_text(family = "Times", face = "bold", hjust = 0.5)) +
theme(legend.title = element_blank()) +
theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "grey")) +
scale_color_brewer(palette = "Set3")
LK135
LK369 <- ggplot(data = mean1, aes(x = Time, y= value, color = variable)) +
geom_point() +
geom_errorbar(aes(ymin = value-sd, ymax = value+sd))+
labs(x = "Time", y= "OD600") +
ggtitle("Microbacterium sp.") +
theme(plot.title = element_text(family = "Times", face = "bold", hjust = 0.5)) +
theme(legend.title = element_blank()) +
theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "grey")) +
scale_color_brewer(palette = "Set3")
LK369
DMSO <- ggplot(data = mean2, aes(x = Time, y=value, color = variable)) +
geom_point() +
geom_errorbar(aes(ymin = value-sd, ymax = value+sd))+
labs(x = "Time", y = "OD600") +
ggtitle("DMSO") +
theme(plot.title = element_text(family = "Times", face = "bold", hjust = 0.5)) +
theme(legend.title = element_blank()) +
theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "grey"))+
scale_color_brewer(palette = "Set3")
DMSO
#combining three plots
plot_grid(LK135, LK369, DMSO, labels = c("A", "B", "C"), ncol = 2)
|
/gcurve_LK135_LK369_DMSOpa.R
|
no_license
|
unguyen2/growth_curve_pa
|
R
| false
| false
| 3,071
|
r
|
#install packages
install.packages("tidyverse")
library(tidyverse)
library(ggplot2)
library(reshape2)
library(RColorBrewer)
install.packages("cowplot")
library(cowplot)
install.packages('Rmisc')
library(dplyr)
#load data
library(readxl)
LK135_pa <- read_excel("LK135_pa.xlsx")
LK369_pa <- read_excel("LK369_pa.xlsx")
DMSO_pa <- read_excel("DMSO_pa.xlsx")
#select mean and sd columns for plotting
new <- select(LK135_pa, 1,4,5,8,9,12,13,16,17,21,22)
new1 <- select(LK369_pa, 1,4,5,8,9,12,13,16,17,21,22)
new2 <- select(DMSO_pa, 1,4,5,8,9,12,13,16,17,21,22)
#gather means together into 4 columns with STDEV as long format
sd <- new %>% gather("Sample", "STDEV", starts_with("STDEV"))
#select time and mean columns
mean <- select(LK135_pa, 1, 4,8,12,16,21)
#melting mean data
mean <- melt(mean, id.vars = "Time")
#move long format STDEV to melted mean data
mean$sd <- sd$STDEV
sd1 <- new1 %>% gather("Sample", "STDEV", starts_with("STDEV"))
mean1 <- select(LK369_pa, 1, 4,8,12,16,21)
mean1 <- melt(mean1, id.vars = "Time")
mean1$sd <- sd$STDEV
sd2 <- new2 %>% gather("Sample", "STDEV", starts_with("STDEV"))
mean2 <- select(DMSO_pa, 1, 4,8,12,16,21)
mean2 <- melt(mean2, id.vars = "Time")
mean2$sd <- sd$STDEV
#plotting individual plots
LK135 <- ggplot(data = mean, aes(x = Time, y = value, color = variable)) +
geom_point() +
geom_errorbar(aes(ymin = value-sd, ymax = value+sd))+
labs(x = "Time", y = "OD600") +
ggtitle("Brevibacterium sp.") +
theme(plot.title = element_text(family = "Times", face = "bold", hjust = 0.5)) +
theme(legend.title = element_blank()) +
theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "grey")) +
scale_color_brewer(palette = "Set3")
LK135
LK369 <- ggplot(data = mean1, aes(x = Time, y= value, color = variable)) +
geom_point() +
geom_errorbar(aes(ymin = value-sd, ymax = value+sd))+
labs(x = "Time", y= "OD600") +
ggtitle("Microbacterium sp.") +
theme(plot.title = element_text(family = "Times", face = "bold", hjust = 0.5)) +
theme(legend.title = element_blank()) +
theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "grey")) +
scale_color_brewer(palette = "Set3")
LK369
DMSO <- ggplot(data = mean2, aes(x = Time, y=value, color = variable)) +
geom_point() +
geom_errorbar(aes(ymin = value-sd, ymax = value+sd))+
labs(x = "Time", y = "OD600") +
ggtitle("DMSO") +
theme(plot.title = element_text(family = "Times", face = "bold", hjust = 0.5)) +
theme(legend.title = element_blank()) +
theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "grey"))+
scale_color_brewer(palette = "Set3")
DMSO
#combining three plots
plot_grid(LK135, LK369, DMSO, labels = c("A", "B", "C"), ncol = 2)
|
library(dplyr)
df_total = data.frame()
start_outer = Sys.time()
for(data_id in 1:4){
message("Reading data...")
path = paste0("NetflixPrizeData/combined_data_", data_id ,".txt")
data = readLines(path)
total = length(data)
start = Sys.time()
item_id_loc = rep(NA, length(data))
# get location of items
message("Getting location of item headers...")
for(i in 1:length(data)){
if(grepl(x = data[i], pattern = ":", fixed = TRUE)){
item_id_loc[i] = i
}
if(i %% 10000 == 0){
cat("Dataset ", data_id, " out of ", 4, " | Percent item ID locations found: ", round(100*i/total,3), " %", "\n")
}
}
item_loc_actual = item_id_loc[!is.na(item_id_loc)]
df = data.frame(item_begin = item_loc_actual + 1,
item_end = lead(item_loc_actual) - 1)
df[nrow(df),"item_end"] = length(data)
lines_per_item = df %>% mutate(rows_per_item = item_end - item_begin)
message("Determining item locations...")
item_id = rep(NA, length(item_loc_actual))
for(i in 1:length(item_loc_actual)){
item_id_i = strsplit(data[item_loc_actual][i], split = ":")[[1]][1]
#print(item_id_i)
item_id_i = as.numeric(item_id_i)
item_id[i] = item_id_i
}
df_rows_per_item = cbind(item_id, lines_per_item)
message("Rearranging data to final form...")
df_empty_data_frame = data.frame()
for(i in 1:nrow(df_rows_per_item)){
df_empty_data_frame = bind_rows(df_empty_data_frame,
data.frame(item = df_rows_per_item[i,"item_id"],
string_item = data[df_rows_per_item[i,"item_begin"]:df_rows_per_item[i,"item_end"]]))
#print(i)
}
message("Emergency dimensions check...")
stopifnot(nrow(df_empty_data_frame) == (length(data) - nrow(df_rows_per_item)))
message("Final formatting")
df_empty_data_frame = df_empty_data_frame %>%
mutate(user = sapply(strsplit(string_item, ","), function(x) x[[1]][1]),
rating = sapply(strsplit(string_item, ","), function(x) x[[2]][1]),
date = sapply(strsplit(string_item, ","), function(x) x[[3]][1]))
df_total = bind_rows(df_total, df_empty_data_frame)
message("Clearing garbage data frames...")
rm(df_empty_data_frame)
rm(df_rows_per_item)
rm(lines_per_item)
rm(df)
rm(data)
rm(item_id_loc)
gc()
#beepr::beep(1)
end = Sys.time()
print(end - start)
}
end_outer = Sys.time()
print(end_outer - start_outer)
|
/Handling Large Data/netflix_data_parsing.R
|
no_license
|
jacobmunson/RecommenderSystems
|
R
| false
| false
| 2,488
|
r
|
library(dplyr)
df_total = data.frame()
start_outer = Sys.time()
for(data_id in 1:4){
message("Reading data...")
path = paste0("NetflixPrizeData/combined_data_", data_id ,".txt")
data = readLines(path)
total = length(data)
start = Sys.time()
item_id_loc = rep(NA, length(data))
# get location of items
message("Getting location of item headers...")
for(i in 1:length(data)){
if(grepl(x = data[i], pattern = ":", fixed = TRUE)){
item_id_loc[i] = i
}
if(i %% 10000 == 0){
cat("Dataset ", data_id, " out of ", 4, " | Percent item ID locations found: ", round(100*i/total,3), " %", "\n")
}
}
item_loc_actual = item_id_loc[!is.na(item_id_loc)]
df = data.frame(item_begin = item_loc_actual + 1,
item_end = lead(item_loc_actual) - 1)
df[nrow(df),"item_end"] = length(data)
lines_per_item = df %>% mutate(rows_per_item = item_end - item_begin)
message("Determining item locations...")
item_id = rep(NA, length(item_loc_actual))
for(i in 1:length(item_loc_actual)){
item_id_i = strsplit(data[item_loc_actual][i], split = ":")[[1]][1]
#print(item_id_i)
item_id_i = as.numeric(item_id_i)
item_id[i] = item_id_i
}
df_rows_per_item = cbind(item_id, lines_per_item)
message("Rearranging data to final form...")
df_empty_data_frame = data.frame()
for(i in 1:nrow(df_rows_per_item)){
df_empty_data_frame = bind_rows(df_empty_data_frame,
data.frame(item = df_rows_per_item[i,"item_id"],
string_item = data[df_rows_per_item[i,"item_begin"]:df_rows_per_item[i,"item_end"]]))
#print(i)
}
message("Emergency dimensions check...")
stopifnot(nrow(df_empty_data_frame) == (length(data) - nrow(df_rows_per_item)))
message("Final formatting")
df_empty_data_frame = df_empty_data_frame %>%
mutate(user = sapply(strsplit(string_item, ","), function(x) x[[1]][1]),
rating = sapply(strsplit(string_item, ","), function(x) x[[2]][1]),
date = sapply(strsplit(string_item, ","), function(x) x[[3]][1]))
df_total = bind_rows(df_total, df_empty_data_frame)
message("Clearing garbage data frames...")
rm(df_empty_data_frame)
rm(df_rows_per_item)
rm(lines_per_item)
rm(df)
rm(data)
rm(item_id_loc)
gc()
#beepr::beep(1)
end = Sys.time()
print(end - start)
}
end_outer = Sys.time()
print(end_outer - start_outer)
|
\name{ht_opt}
\alias{ht_opt}
\title{
Global Options for Heatmaps
}
\description{
Global Options for Heatmaps
}
\usage{
ht_opt(..., RESET = FALSE, READ.ONLY = NULL, LOCAL = FALSE, ADD = FALSE)
}
\arguments{
\item{...}{Options, see 'Details' section.}
\item{RESET}{Reset all the option values.}
\item{READ.ONLY}{Please ignore this argument.}
\item{LOCAL}{Please ignore this argument.}
\item{ADD}{Please ignore this argument.}
}
\details{
You can set some parameters for all heatmaps/annotations simultaneously by this global function.
Pleast note you should put it before your heatmap code and reset
all option values after drawing the heatmaps to get rid of affecting next heatmap.
There are following parameters to control all heatmaps:
\describe{
\item{heatmap_row_names_gp}{set \code{row_names_gp} in all \code{\link{Heatmap}}.}
\item{heatmap_column_names_gp}{set \code{column_names_gp} in all \code{\link{Heatmap}}.}
\item{heatmap_row_title_gp}{set \code{row_title_gp} in all \code{\link{Heatmap}}.}
\item{heatmap_column_title_gp}{set \code{column_title_gp} in all \code{\link{Heatmap}}.}
\item{heatmap_border}{set \code{border} in all \code{\link{Heatmap}}.}
}
Following parameters control the legends:
\describe{
\item{legend_title_gp}{set \code{title_gp} in all heatmap legends and annotation legends.}
\item{legend_title_position}{set \code{title_position} in all heatmap legends and annotation legends.}
\item{legend_labels_gp}{set \code{labels_gp} in all heatmap legends and annotation legends.}
\item{legend_grid_width}{set \code{grid_width} in all heatmap legends and annotation legends.}
\item{legend_grid_height}{set \code{grid_height} in all heatmap legends and annotation legends.}
\item{legend_border}{set \code{border} in all heatmap legends and annotation legends.}
\item{legend_gap}{Gap between legends. The value should be a vector of two units. One for gaps between vertical legends and one for the horizontal legends. If only one single unit is specified, the same gap set for the vertical and horizontal legends.}
\item{merge_legend}{wether merge heatmap and annotation legends.}
}
Following parameters control heatmap annotations:
\describe{
\item{annotation_border}{\code{border} in all \code{\link{HeatmapAnnotation}}.}
\item{simple_anno_size}{size for the simple annotation.}
}
Following parameters control the space between heatmap components:
\describe{
\item{DENDROGRAM_PADDING}{space bewteen dendrograms and heatmap body.}
\item{DIMNAME_PADDING}{space between row/column names and heatmap body.}
\item{TITLE_PADDING}{space between row/column titles and heatmap body. The value can have length of two which corresponds to the botton and top padding.}
\item{COLUMN_ANNO_PADDING}{space between column annotations and heatmap body.}
\item{ROW_ANNO_PADDING}{space between row annotations and heatmap body.}
\item{HEATMAP_LEGEND_PADDING}{space between heatmap legends and heatmaps}
\item{ANNOTATION_LEGEND_PADDING}{space between annotation legends and heatmaps}
}
Other parameters:
\describe{
\item{fast_hclust}{whether use \code{\link[fastcluster]{hclust}} to speed up clustering?}
\item{show_parent_dend_line}{when heatmap is split, whether to add a dashed line to mark parent dendrogram and children dendrograms?}
\item{COLOR}{default colors for continuous color mapping.}
}
You can get or set option values by the traditional way (like \code{\link[base]{options}}) or by \code{$} operator:
\preformatted{
# to get option values
ht_opt("heatmap_row_names_gp")
ht_opt$heatmap_row_names_gp
# to set option values
ht_opt("heatmap_row_names_gp" = gpar(fontsize = 8))
ht_opt$heatmap_row_names_gp = gpar(fontsize = 8) }
Reset to the default values by \code{ht_opt(RESET = TRUE)}.
}
\author{
Zuguang Gu <z.gu@dkfz.de>
}
\examples{
ht_opt
}
|
/man/ht_opt.Rd
|
permissive
|
jokergoo/ComplexHeatmap
|
R
| false
| false
| 3,864
|
rd
|
\name{ht_opt}
\alias{ht_opt}
\title{
Global Options for Heatmaps
}
\description{
Global Options for Heatmaps
}
\usage{
ht_opt(..., RESET = FALSE, READ.ONLY = NULL, LOCAL = FALSE, ADD = FALSE)
}
\arguments{
\item{...}{Options, see 'Details' section.}
\item{RESET}{Reset all the option values.}
\item{READ.ONLY}{Please ignore this argument.}
\item{LOCAL}{Please ignore this argument.}
\item{ADD}{Please ignore this argument.}
}
\details{
You can set some parameters for all heatmaps/annotations simultaneously by this global function.
Pleast note you should put it before your heatmap code and reset
all option values after drawing the heatmaps to get rid of affecting next heatmap.
There are following parameters to control all heatmaps:
\describe{
\item{heatmap_row_names_gp}{set \code{row_names_gp} in all \code{\link{Heatmap}}.}
\item{heatmap_column_names_gp}{set \code{column_names_gp} in all \code{\link{Heatmap}}.}
\item{heatmap_row_title_gp}{set \code{row_title_gp} in all \code{\link{Heatmap}}.}
\item{heatmap_column_title_gp}{set \code{column_title_gp} in all \code{\link{Heatmap}}.}
\item{heatmap_border}{set \code{border} in all \code{\link{Heatmap}}.}
}
Following parameters control the legends:
\describe{
\item{legend_title_gp}{set \code{title_gp} in all heatmap legends and annotation legends.}
\item{legend_title_position}{set \code{title_position} in all heatmap legends and annotation legends.}
\item{legend_labels_gp}{set \code{labels_gp} in all heatmap legends and annotation legends.}
\item{legend_grid_width}{set \code{grid_width} in all heatmap legends and annotation legends.}
\item{legend_grid_height}{set \code{grid_height} in all heatmap legends and annotation legends.}
\item{legend_border}{set \code{border} in all heatmap legends and annotation legends.}
\item{legend_gap}{Gap between legends. The value should be a vector of two units. One for gaps between vertical legends and one for the horizontal legends. If only one single unit is specified, the same gap set for the vertical and horizontal legends.}
\item{merge_legend}{wether merge heatmap and annotation legends.}
}
Following parameters control heatmap annotations:
\describe{
\item{annotation_border}{\code{border} in all \code{\link{HeatmapAnnotation}}.}
\item{simple_anno_size}{size for the simple annotation.}
}
Following parameters control the space between heatmap components:
\describe{
\item{DENDROGRAM_PADDING}{space bewteen dendrograms and heatmap body.}
\item{DIMNAME_PADDING}{space between row/column names and heatmap body.}
\item{TITLE_PADDING}{space between row/column titles and heatmap body. The value can have length of two which corresponds to the botton and top padding.}
\item{COLUMN_ANNO_PADDING}{space between column annotations and heatmap body.}
\item{ROW_ANNO_PADDING}{space between row annotations and heatmap body.}
\item{HEATMAP_LEGEND_PADDING}{space between heatmap legends and heatmaps}
\item{ANNOTATION_LEGEND_PADDING}{space between annotation legends and heatmaps}
}
Other parameters:
\describe{
\item{fast_hclust}{whether use \code{\link[fastcluster]{hclust}} to speed up clustering?}
\item{show_parent_dend_line}{when heatmap is split, whether to add a dashed line to mark parent dendrogram and children dendrograms?}
\item{COLOR}{default colors for continuous color mapping.}
}
You can get or set option values by the traditional way (like \code{\link[base]{options}}) or by \code{$} operator:
\preformatted{
# to get option values
ht_opt("heatmap_row_names_gp")
ht_opt$heatmap_row_names_gp
# to set option values
ht_opt("heatmap_row_names_gp" = gpar(fontsize = 8))
ht_opt$heatmap_row_names_gp = gpar(fontsize = 8) }
Reset to the default values by \code{ht_opt(RESET = TRUE)}.
}
\author{
Zuguang Gu <z.gu@dkfz.de>
}
\examples{
ht_opt
}
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1635476531L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
/IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609875160-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 729
|
r
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1635476531L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
.onLoad = function(...) {
addShinyEventsUIRessourcePath()
}
addShinyEventsUIRessourcePath = addShinyRessourcePath = function() {
www.dir = system.file('www', package='shinyEventsUI')
# init ressource paths
shiny::addResourcePath(
prefix = 'shinyEventsUI',
directoryPath = www.dir
)
}
to.style = function(x, sep=";") {
paste0(names(x),": ", x, sep=sep)
}
nlist = function (...)
{
li = list(...)
li.names = names(li)
names = unlist(as.list(match.call())[-1])
if (!is.null(li.names)) {
no.names = li.names == ""
names(li)[no.names] = names[no.names]
}
else {
names(li) = names
}
li
}
remove.null = function(li) {
ok = !sapply(li, is.null)
li[ok]
}
|
/R/shinyEventsUI.R
|
no_license
|
skranz/shinyEventsUI
|
R
| false
| false
| 733
|
r
|
.onLoad = function(...) {
addShinyEventsUIRessourcePath()
}
addShinyEventsUIRessourcePath = addShinyRessourcePath = function() {
www.dir = system.file('www', package='shinyEventsUI')
# init ressource paths
shiny::addResourcePath(
prefix = 'shinyEventsUI',
directoryPath = www.dir
)
}
to.style = function(x, sep=";") {
paste0(names(x),": ", x, sep=sep)
}
nlist = function (...)
{
li = list(...)
li.names = names(li)
names = unlist(as.list(match.call())[-1])
if (!is.null(li.names)) {
no.names = li.names == ""
names(li)[no.names] = names[no.names]
}
else {
names(li) = names
}
li
}
remove.null = function(li) {
ok = !sapply(li, is.null)
li[ok]
}
|
library(CGP)
### Name: print.CGP
### Title: CGP model summary information
### Aliases: print.CGP
### ** Examples
x1<-c(0,.02,.075,.08,.14,.15,.155,.156,.18,.22,.29,.32,.36,
.37,.42,.5,.57,.63,.72,.785,.8,.84,.925,1)
x2<-c(.29,.02,.12,.58,.38,.87,.01,.12,.22,.08,.34,.185,.64,
.02,.93,.15,.42,.71,1,0,.21,.5,.785,.21)
X<-cbind(x1,x2)
yobs<-sin(1/((x1*0.7+0.3)*(x2*0.7+0.3)))
## Not run:
##D #Fit the CGP model
##D mod<-CGP(X,yobs)
##D print(mod)
## End(Not run)
|
/data/genthat_extracted_code/CGP/examples/print.CGP.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 481
|
r
|
library(CGP)
### Name: print.CGP
### Title: CGP model summary information
### Aliases: print.CGP
### ** Examples
x1<-c(0,.02,.075,.08,.14,.15,.155,.156,.18,.22,.29,.32,.36,
.37,.42,.5,.57,.63,.72,.785,.8,.84,.925,1)
x2<-c(.29,.02,.12,.58,.38,.87,.01,.12,.22,.08,.34,.185,.64,
.02,.93,.15,.42,.71,1,0,.21,.5,.785,.21)
X<-cbind(x1,x2)
yobs<-sin(1/((x1*0.7+0.3)*(x2*0.7+0.3)))
## Not run:
##D #Fit the CGP model
##D mod<-CGP(X,yobs)
##D print(mod)
## End(Not run)
|
###############################################################################
# R (https://r-project.org/) Numeric Methods for Optimization of Portfolios
#
# Copyright (c) 2004-2021 Brian G. Peterson, Peter Carl, Ross Bennett, Kris Boudt
#
# This library is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id$
#
###############################################################################
# TODO add examples
# TODO add more details about the nuances of the optimization engines
#' @rdname constrained_objective
#' @name constrained_objective
#' @export
constrained_objective_v1 <- function(w, R, constraints, ..., trace=FALSE, normalize=TRUE, storage=FALSE)
{
if (ncol(R)>length(w)) {
R=R[,1:length(w)]
}
if(!hasArg(penalty)) penalty = 1e4
N = length(w)
T = nrow(R)
if(hasArg(optimize_method))
optimize_method=match.call(expand.dots=TRUE)$optimize_method else optimize_method=''
if(hasArg(verbose))
verbose=match.call(expand.dots=TRUE)$verbose
else verbose=FALSE
# check for valid constraints
if (!is.constraint(constraints)) {
stop("constraints passed in are not of class constraint")
}
# check that the constraints and the weighting vector have the same length
if (N != length(constraints$assets)){
warning("length of constraints asset list and weights vector do not match, results may be bogus")
}
out=0
# do the get here
store_output <- try(get('.objectivestorage',envir=.storage),silent=TRUE)
if(inherits(store_output,"try-error")) storage=FALSE else storage=TRUE
if(isTRUE(normalize)){
if(!is.null(constraints$min_sum) | !is.null(constraints$max_sum)){
# the user has passed in either min_sum or max_sum constraints for the portfolio, or both.
# we'll normalize the weights passed in to whichever boundary condition has been violated
# NOTE: this means that the weights produced by a numeric optimization algorithm like DEoptim
# might violate your constraints, so you'd need to renormalize them after optimizing
# we'll create functions for that so the user is less likely to mess it up.
# NOTE: need to normalize in the optimization wrapper too before we return, since we've normalized in here
# In Kris' original function, this was manifested as a full investment constraint
# the normalization process produces much faster convergence,
# and then we penalize parameters outside the constraints in the next block
if(!is.null(constraints$max_sum) & constraints$max_sum != Inf ) {
max_sum=constraints$max_sum
if(sum(w)>max_sum) { w<-(max_sum/sum(w))*w } # normalize to max_sum
}
if(!is.null(constraints$min_sum) & constraints$min_sum != -Inf ) {
min_sum=constraints$min_sum
if(sum(w)<min_sum) { w<-(min_sum/sum(w))*w } # normalize to min_sum
}
} # end min_sum and max_sum normalization
} else {
# the user wants the optimization algorithm to figure it out
if(!is.null(constraints$max_sum) & constraints$max_sum != Inf ) {
max_sum=constraints$max_sum
if(sum(w)>max_sum) { out = out + penalty*(sum(w) - max_sum) } # penalize difference to max_sum
}
if(!is.null(constraints$min_sum) & constraints$min_sum != -Inf ) {
min_sum=constraints$min_sum
if(sum(w)<min_sum) { out = out + penalty*(min_sum - sum(w)) } # penalize difference to min_sum
}
}
# penalize weights outside my constraints (can be caused by normalization)
if (!is.null(constraints$max)){
max = constraints$max
out = out + sum(w[which(w>max[1:N])]- constraints$max[which(w>max[1:N])])*penalty
}
if (!is.null(constraints$min)){
min = constraints$min
out = out + sum(constraints$min[which(w<min[1:N])] - w[which(w<min[1:N])])*penalty
}
nargs <-list(...)
if(length(nargs)==0) nargs=NULL
if (length('...')==0 | is.null('...')) {
# rm('...')
nargs=NULL
}
nargs<-set.portfolio.moments(R, constraints, momentargs=nargs)
if(is.null(constraints$objectives)) {
warning("no objectives specified in constraints")
} else{
if(isTRUE(trace) | isTRUE(storage)) tmp_return<-list()
for (objective in constraints$objectives){
#check for clean bits to pass in
if(objective$enabled){
tmp_measure = NULL
multiplier = objective$multiplier
#if(is.null(objective$arguments) | !is.list(objective$arguments)) objective$arguments<-list()
switch(objective$name,
mean =,
median = {
fun = match.fun(objective$name)
nargs$x <- ( R %*% w ) #do the multivariate mean/median with Kroneker product
},
sd =,
StdDev = {
fun= match.fun(StdDev)
},
mVaR =,
VaR = {
fun= match.fun(VaR)
if(!inherits(objective,"risk_budget_objective") & is.null(objective$arguments$portfolio_method) & is.null(nargs$portfolio_method)) nargs$portfolio_method='single'
if(is.null(objective$arguments$invert)) objective$arguments$invert = FALSE
},
es =,
mES =,
CVaR =,
cVaR =,
ES = {
fun = match.fun(ES)
if(!inherits(objective,"risk_budget_objective") & is.null(objective$arguments$portfolio_method)& is.null(nargs$portfolio_method)) nargs$portfolio_method='single'
if(is.null(objective$arguments$invert)) objective$arguments$invert = FALSE
},
turnover = {
fun = match.fun(turnover) # turnover function included in objectiveFUN.R
},
{ # see 'S Programming p. 67 for this matching
fun<-try(match.fun(objective$name))
}
)
if(is.function(fun)){
.formals <- formals(fun)
onames <- names(.formals)
if(is.list(objective$arguments)){
#TODO FIXME only do this if R and weights are in the argument list of the fn
if(is.null(nargs$R) | !length(nargs$R)==length(R)) nargs$R <- R
if(is.null(nargs$weights)) nargs$weights <- w
pm <- pmatch(names(objective$arguments), onames, nomatch = 0L)
if (any(pm == 0L))
warning(paste("some arguments stored for",objective$name,"do not match"))
# this line overwrites the names of things stored in $arguments with names from formals.
# I'm not sure it's a good idea, so commenting for now, until we prove we need it
#names(objective$arguments[pm > 0L]) <- onames[pm]
.formals[pm] <- objective$arguments[pm > 0L]
#now add dots
if (length(nargs)) {
dargs<-nargs
pm <- pmatch(names(dargs), onames, nomatch = 0L)
names(dargs[pm > 0L]) <- onames[pm]
.formals[pm] <- dargs[pm > 0L]
}
.formals$... <- NULL
}
} # TODO do some funky return magic here on try-error
tmp_measure = try((do.call(fun,.formals)) ,silent=TRUE)
if(isTRUE(trace) | isTRUE(storage)) {
if(is.null(names(tmp_measure))) names(tmp_measure)<-objective$name
tmp_return[[objective$name]]<-tmp_measure
}
if(inherits(tmp_measure,"try-error")) {
message(paste("objective name",objective$name,"generated an error or warning:",tmp_measure))
}
# now set the new value of the objective output
if(inherits(objective,"return_objective")){
if (!is.null(objective$target) & is.numeric(objective$target)){ # we have a target
out = out + penalty*abs(objective$multiplier)*abs(tmp_measure-objective$target)
}
# target is null or doesn't exist, just maximize, or minimize violation of constraint
out = out + objective$multiplier*tmp_measure
} # end handling for return objectives
if(inherits(objective,"portfolio_risk_objective")){
if (!is.null(objective$target) & is.numeric(objective$target)){ # we have a target
out = out + penalty*abs(objective$multiplier)*abs(tmp_measure-objective$target)
#should we also penalize risk too low for risk targets? or is a range another objective?
# # half penalty for risk lower than target
# if( prw < (.9*Riskupper) ){ out = out + .5*(penalty*( prw - Riskupper)) }
}
# target is null or doesn't exist, just maximize, or minimize violation of constraint
out = out + abs(objective$multiplier)*tmp_measure
} # univariate risk objectives
if(inherits(objective,"turnover_objective")){
if (!is.null(objective$target) & is.numeric(objective$target)){ # we have a target
out = out + penalty*abs(objective$multiplier)*abs(tmp_measure-objective$target)
}
# target is null or doesn't exist, just maximize, or minimize violation of constraint
out = out + abs(objective$multiplier)*tmp_measure
} # univariate turnover objectives
if(inherits(objective,"minmax_objective")){
if (!is.null(objective$min) & !is.null(objective$max)){ # we have a min and max
if(tmp_measure > objective$max){
out = out + penalty * objective$multiplier * (tmp_measure - objective$max)
}
if(tmp_measure < objective$min){
out = out + penalty * objective$multiplier * (objective$min - tmp_measure)
}
}
} # temporary minmax objective
if(inherits(objective,"risk_budget_objective")){
# setup
# out = out + penalty*sum( (percrisk-RBupper)*( percrisk > RBupper ),na.rm=TRUE ) + penalty*sum( (RBlower-percrisk)*( percrisk < RBlower ),na.rm=TRUE )
# add risk budget constraint
if(!is.null(objective$target) & is.numeric(objective$target)){
#in addition to a risk budget constraint, we have a univariate target
# the first element of the returned list is the univariate measure
# we'll use the univariate measure exactly like we would as a separate objective
out = out + penalty*abs(objective$multiplier)*abs(tmp_measure[[1]]-objective$target)
#should we also penalize risk too low for risk targets? or is a range another objective?
# # half penalty for risk lower than target
# if( prw < (.9*Riskupper) ){ out = out + .5*(penalty*( prw - Riskupper)) }
}
percrisk = tmp_measure[[3]] # third element is percent component contribution
RBupper = objective$max_prisk
RBlower = objective$min_prisk
if(!is.null(RBupper) | !is.null(RBlower)){
out = out + penalty * objective$multiplier * sum( (percrisk-RBupper)*( percrisk > RBupper ),na.rm=TRUE ) + penalty*sum( (RBlower-percrisk)*( percrisk < RBlower ),na.rm=TRUE )
}
# if(!is.null(objective$min_concentration)){
# if(isTRUE(objective$min_concentration)){
# max_conc<-max(tmp_measure[[2]]) #second element is the contribution in absolute terms
# # out=out + penalty * objective$multiplier * max_conc
# out = out + objective$multiplier * max_conc
# }
# }
# Combined min_con and min_dif to take advantage of a better concentration obj measure
if(!is.null(objective$min_difference) || !is.null(objective$min_concentration)){
if(isTRUE(objective$min_difference)){
# max_diff<-max(tmp_measure[[2]]-(sum(tmp_measure[[2]])/length(tmp_measure[[2]]))) #second element is the contribution in absolute terms
# Uses Herfindahl index to calculate concentration; added scaling perc diffs back to univariate numbers
max_diff <- sqrt(sum(tmp_measure[[3]]^2))/100 #third element is the contribution in percentage terms
# out = out + penalty * objective$multiplier * max_diff
out = out + penalty*objective$multiplier * max_diff
}
}
} # end handling of risk_budget objective
} # end enabled check
} # end loop over objectives
} # end objectives processing
if(isTRUE(verbose)) {
print('weights: ')
print(paste(w,' '))
print(paste("output of objective function",out))
print(unlist(tmp_return))
}
if(is.na(out) | is.nan(out) | is.null(out)){
#this should never happen
warning('NA or NaN produced in objective function for weights ',w)
out<-penalty
}
#return
if (isTRUE(storage)){
#add the new objective results
store_output[[length(store_output)+1]]<-list(out=as.numeric(out),weights=w,objective_measures=tmp_return)
# do the assign here
assign('.objectivestorage', store_output, envir=.storage)
}
if(!isTRUE(trace)){
return(out)
} else {
return(list(out=as.numeric(out),weights=w,objective_measures=tmp_return))
}
}
#' calculate a numeric return value for a portfolio based on a set of constraints and objectives
#'
#' Function to calculate a numeric return value for a portfolio based on a set of constraints and objectives.
#' We'll try to make as few assumptions as possible and only run objectives that are enabled by the user.
#'
#' If the user has passed in either min_sum or max_sum constraints for the portfolio, or both,
#' and are using a numerical optimization method like DEoptim, and normalize=TRUE,
#' we'll normalize the weights passed in to whichever boundary condition has been violated.
#' If using random portfolios, all the portfolios generated will meet the constraints by construction.
#' NOTE: this means that the weights produced by a numeric optimization algorithm like DEoptim, pso, or GenSA
#' might violate constraints, and will need to be renormalized after optimizing.
#' We apply the same normalization in \code{\link{optimize.portfolio}} so that the weights you see have been
#' normalized to min_sum if the generated portfolio is smaller than min_sum or max_sum if the
#' generated portfolio is larger than max_sum.
#' This normalization increases the speed of optimization and convergence by several orders of magnitude in many cases.
#'
#' You may find that for some portfolios, normalization is not desirable, if the algorithm
#' cannot find a direction in which to move to head towards an optimal portfolio. In these cases,
#' it may be best to set normalize=FALSE, and penalize the portfolios if the sum of the weighting
#' vector lies outside the min_sum and/or max_sum.
#'
#' Whether or not we normalize the weights using min_sum and max_sum, and are using a numerical optimization
#' engine like DEoptim, we will penalize portfolios that violate weight constraints in much the same way
#' we penalize other constraints. If a min_sum/max_sum normalization has not occurred, convergence
#' can take a very long time. We currently do not allow for a non-normalized full investment constraint.
#' Future version of this function could include this additional constraint penalty.
#'
#' When you are optimizing a return objective, you must specify a negative multiplier
#' for the return objective so that the function will maximize return. If you specify a target return,
#' any return that deviates from your target will be penalized. If you do not specify a target return,
#' you may need to specify a negative VTR (value to reach) , or the function will not converge.
#' Try the maximum expected return times the multiplier (e.g. -1 or -10).
#' Adding a return objective defaults the multiplier to -1.
#'
#' Additional parameters for other solvers
#' (e.g. random portfolios or
#' \code{\link[DEoptim]{DEoptim.control}} or pso or GenSA
#' may be passed in via \dots
#'
#'
#' @param R an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns.
#' @param w a vector of weights to test.
#' @param portfolio an object of class \code{portfolio} specifying the constraints and objectives for the optimization, see \code{\link{portfolio}}.
#' @param \dots any other passthru parameters.
#' @param trace TRUE/FALSE whether to include debugging and additional detail in the output list. The default is FALSE. Several charting functions require that \code{trace=TRUE}.
#' @param normalize TRUE/FALSE whether to normalize results to min/max sum (TRUE), or let the optimizer penalize portfolios that do not conform (FALSE)
#' @param storage TRUE/FALSE default TRUE for DEoptim with trace, otherwise FALSE. not typically user-called.
#' @param constraints a v1_constraint object for backwards compatibility with \code{constrained_objective_v1}.
#' @param env environment of moments calculated in \code{optimize.portfolio}
#' @seealso \code{\link{constraint}}, \code{\link{objective}}, \code{\link[DEoptim]{DEoptim.control}}
#' @author Kris Boudt, Peter Carl, Brian G. Peterson, Ross Bennett
#' @aliases constrained_objective constrained_objective_v1 constrained_objective_v2
#' @rdname constrained_objective
#' @export constrained_objective
#' @export constrained_objective_v2
constrained_objective <- constrained_objective_v2 <- function(w, R, portfolio, ..., trace=FALSE, normalize=TRUE, storage=FALSE, env=NULL)
{
if (ncol(R) > length(w)) {
R <- R[ ,1:length(w)]
}
if(!hasArg(penalty)) penalty <- 1e4
N <- length(w)
T <- nrow(R)
if(hasArg(optimize_method))
optimize_method <- match.call(expand.dots=TRUE)$optimize_method else optimize_method <- ''
if(hasArg(verbose))
verbose <- match.call(expand.dots=TRUE)$verbose
else verbose <- FALSE
# initial weights
init_weights <- w
# get the constraints from the portfolio object
constraints <- get_constraints(portfolio)
# check for valid portfolio
if (!is.portfolio(portfolio)) {
stop("portfolio object passed in is not of class portfolio")
}
# check that the assets and the weighting vector have the same length
if (N != length(portfolio$assets)){
warning("length of portfolio asset list and weights vector do not match, results may be bogus")
}
out <- 0
# do the get here
store_output <- try(get('.objectivestorage',envir=.storage), silent=TRUE)
if(inherits(store_output,"try-error")) {
storage <- FALSE
# warning("could not get .objectivestorage")
} else {
storage <- TRUE
}
# use fn_map to normalize the weights
if(isTRUE(normalize)){
w <- fn_map(weights=w, portfolio=portfolio)$weights
# end fn_map transformation
} else {
# the user wants the optimization algorithm to figure it out
if(!is.null(constraints$max_sum) & constraints$max_sum != Inf ) {
max_sum <- constraints$max_sum
if(sum(w) > max_sum) { out <- out + penalty * (sum(w) - max_sum) } # penalize difference to max_sum
}
if(!is.null(constraints$min_sum) & constraints$min_sum != -Inf ) {
min_sum <- constraints$min_sum
if(sum(w) < min_sum) { out <- out + penalty * (min_sum - sum(w)) } # penalize difference to min_sum
}
}
# penalize weights outside min and max box constraints (can be caused by normalization)
if (!is.null(constraints$max)){
max <- constraints$max
# Only go to penalty term if any of the weights violate max
if(any(w > max)){
out <- out + sum(w[which(w > max[1:N])] - constraints$max[which(w > max[1:N])]) * penalty
}
}
if (!is.null(constraints$min)){
min <- constraints$min
# Only go to penalty term if any of the weights violate min
if(any(w < min)){
out <- out + sum(constraints$min[which(w < min[1:N])] - w[which(w < min[1:N])]) * penalty
}
}
# penalize weights that violate group constraints
if(!is.null(constraints$groups) & !is.null(constraints$cLO) & !is.null(constraints$cUP)){
groups <- constraints$groups
cLO <- constraints$cLO
cUP <- constraints$cUP
# Only go to penalty term if group constraint is violated
if(any(group_fail(w, groups, cLO, cUP))){
ngroups <- length(groups)
for(i in 1:ngroups){
tmp_w <- w[groups[[i]]]
# penalize for weights that are below cLO
if(sum(tmp_w) < cLO[i]){
out <- out + penalty * (cLO[i] - sum(tmp_w))
}
if(sum(tmp_w) > cUP[i]){
out <- out + penalty * (sum(tmp_w) - cUP[i])
}
}
}
} # End group constraint penalty
# penalize weights that violate max_pos constraints
if(!is.null(constraints$max_pos)){
max_pos <- constraints$max_pos
tolerance <- .Machine$double.eps^0.5
mult <- 1
# sum(abs(w) > tolerance) is the number of non-zero assets
nzassets <- sum(abs(w) > tolerance)
if(nzassets > max_pos){
# Do we need a small multiplier term here since (nzassets - max_pos)
# will be an integer and much larger than the weight penalty terms
out <- out + penalty * mult * (nzassets - max_pos)
}
} # End position_limit constraint penalty
# penalize weights that violate diversification constraint
if(!is.null(constraints$div_target)){
div_target <- constraints$div_target
div <- diversification(w)
mult <- 1
# only penalize if not within +/- 5% of target
if((div < div_target * 0.95) | (div > div_target * 1.05)){
out <- out + penalty * mult * abs(div - div_target)
}
} # End diversification constraint penalty
# penalize weights that violate turnover constraint
if(!is.null(constraints$turnover_target)){
turnover_target <- constraints$turnover_target
to <- turnover(w)
mult <- 1
# only penalize if not within +/- 5% of target
if((to < turnover_target * 0.95) | (to > turnover_target * 1.05)){
# print("transform or penalize to meet turnover target")
out = out + penalty * mult * abs(to - turnover_target)
}
} # End turnover constraint penalty
# penalize weights that violate return target constraint
if(!is.null(constraints$return_target)){
return_target <- constraints$return_target
mean_return <- port.mean(weights=w, mu=env$mu)
mult <- 1
out = out + penalty * mult * abs(mean_return - return_target)
} # End return constraint penalty
# penalize weights that violate factor exposure constraints
if(!is.null(constraints$B)){
t.B <- t(constraints$B)
lower <- constraints$lower
upper <- constraints$upper
mult <- 1
for(i in 1:nrow(t.B)){
tmpexp <- as.numeric(t(w) %*% t.B[i, ])
if(tmpexp < lower[i]){
out <- out + penalty * mult * (lower[i] - tmpexp)
}
if(tmpexp > upper[i]){
out <- out + penalty * mult * (tmpexp - upper[i])
}
}
} # End factor exposure constraint penalty
# Add penalty for transaction costs
if(!is.null(constraints$ptc)){
# calculate total transaction cost using portfolio$assets as initial set of weights
tc <- sum(abs(w - portfolio$assets) * constraints$ptc)
# for now use a multiplier of 1, may need to adjust this later
mult <- 1
out <- out + mult * tc
} # End transaction cost penalty
# Add penalty for leverage exposure
# This could potentially be added to random portfolios
if(!is.null(constraints$leverage)){
if((sum(abs(w)) > constraints$leverage)){
# only penalize if leverage is exceeded
mult <- 1/100
out <- out + penalty * mult * abs(sum(abs(w)) - constraints$leverage)
}
} # End leverage exposure penalty
# The "..." are passed in from optimize.portfolio and contain the output of
# momentFUN. The default is momentFUN=set.portfolio.moments and returns
# moments$mu, moments$sigma, moments$m3, moments$m4, etc. depending on the
# the functions corresponding to portfolio$objective$name. Would it be better
# to make this a formal argument for constrained_objective? This means that
# we completely avoid evaluating the set.portfolio.moments function. Can we
# trust that all the moments are correctly set in optimize.portfolio through
# momentFUN?
# Add R and w to the environment with the moments
# env$R <- R
# env$weights <- w
if(!is.null(env)){
nargs <- env
} else {
# print("calculating moments")
# calculating the moments
# nargs are used as the arguments for functions corresponding to
# objective$name called in the objective loop later
momentargs <- eval(substitute(alist(...)))
.formals <- formals(set.portfolio.moments)
.formals <- modify.args(formals=.formals, arglist=alist(momentargs=momentargs), dots=TRUE)
.formals <- modify.args(formals=.formals, arglist=NULL, R=R, dots=TRUE)
.formals <- modify.args(formals=.formals, arglist=NULL, portfolio=portfolio, dots=TRUE)
.formals$... <- NULL
# print(.formals)
nargs <- do.call(set.portfolio.moments, .formals)
}
# We should avoid modifying nargs in the loop below.
# If we modify nargs with something like nargs$x, nargs is copied and this
# should be avoided because nargs could be large because it contains the moments.
tmp_args <- list()
# JMU: Add all the variables in 'env' to tmp_args as names/symbols
# tmp_args[ls(env)] <- lapply(ls(env), as.name)
if(is.null(portfolio$objectives)) {
warning("no objectives specified in portfolio")
} else{
if(isTRUE(trace) | isTRUE(storage)) tmp_return <- list()
for (objective in portfolio$objectives){
#check for clean bits to pass in
if(objective$enabled){
tmp_measure <- NULL
multiplier <- objective$multiplier
#if(is.null(objective$arguments) | !is.list(objective$arguments)) objective$arguments<-list()
switch(objective$name,
mean =,
median = {
fun = match.fun(port.mean)
# would it be better to do crossprod(w, moments$mu)?
# tmp_args$x <- ( R %*% w ) #do the multivariate mean/median with Kroneker product
},
median = {
fun = match.fun(objective$name)
tmp_args$x <- ( R %*% w ) #do the multivariate mean/median with Kroneker product
},
sd =,
var =,
StdDev = {
fun = match.fun(StdDev)
},
mVaR =,
VaR = {
fun = match.fun(VaR)
if(!inherits(objective,"risk_budget_objective") & is.null(objective$arguments$portfolio_method) & is.null(nargs$portfolio_method)) tmp_args$portfolio_method='single'
if(is.null(objective$arguments$invert)) tmp_args$invert = FALSE
},
es =,
mES =,
CVaR =,
cVaR =,
ETL=,
mETL=,
ES = {
fun = match.fun(ES)
if(!inherits(objective,"risk_budget_objective") & is.null(objective$arguments$portfolio_method) & is.null(nargs$portfolio_method)) tmp_args$portfolio_method='single'
if(is.null(objective$arguments$invert)) tmp_args$invert = FALSE
},
turnover = {
fun = match.fun(turnover) # turnover function included in objectiveFUN.R
},
{ # see 'S Programming p. 67 for this matching
fun <- try(match.fun(objective$name))
}
)
if(is.function(fun)){
.formals <- formals(fun)
# Add the moments from the nargs object
# nargs contains the moments, these are being evaluated
.formals <- modify.args(formals=.formals, arglist=nargs, dots=TRUE)
# Add anything from tmp_args
.formals <- modify.args(formals=.formals, arglist=tmp_args, dots=TRUE)
# Now add the objective$arguments
.formals <- modify.args(formals=.formals, arglist=objective$arguments, dots=TRUE)
# Add R and weights if necessary
if("R" %in% names(.formals)) .formals <- modify.args(formals=.formals, arglist=NULL, R=R, dots=TRUE)
if("weights" %in% names(.formals)) .formals <- modify.args(formals=.formals, arglist=NULL, weights=w, dots=TRUE)
# .formals <- modify.args(formals=.formals, arglist=tmp_args, dots=TRUE)
.formals$... <- NULL
}
# tmp_measure <- try(do.call(fun, .formals, envir=env), silent=TRUE)
tmp_measure <- try(do.call(fun, .formals), silent=TRUE)
if(isTRUE(trace) | isTRUE(storage)) {
# Subsitute 'StdDev' if the objective name is 'var'
# if the user passes in var as an objective name, we are actually
# calculating StdDev, so we need to change the name here.
tmp_objname <- objective$name
if(tmp_objname == "var") tmp_objname <- "StdDev"
if(is.null(names(tmp_measure))) names(tmp_measure) <- tmp_objname
tmp_return[[tmp_objname]] <- tmp_measure
}
if(inherits(tmp_measure, "try-error")) {
message(paste("objective name", objective$name, "generated an error or warning:", tmp_measure))
}
# now set the new value of the objective output
if(inherits(objective, "return_objective")){
if (!is.null(objective$target) & is.numeric(objective$target)){ # we have a target
out <- out + penalty*abs(objective$multiplier)*abs(tmp_measure - objective$target)
}
# target is null or doesn't exist, just maximize, or minimize violation of constraint
out <- out + objective$multiplier*tmp_measure
} # end handling for return objectives
if(inherits(objective, "portfolio_risk_objective")){
if (!is.null(objective$target) & is.numeric(objective$target)){ # we have a target
out <- out + penalty*abs(objective$multiplier)*abs(tmp_measure - objective$target)
#should we also penalize risk too low for risk targets? or is a range another objective?
# # half penalty for risk lower than target
# if( prw < (.9*Riskupper) ){ out = out + .5*(penalty*( prw - Riskupper)) }
}
# target is null or doesn't exist, just maximize, or minimize violation of constraint
out <- out + abs(objective$multiplier)*tmp_measure
} # univariate risk objectives
if(inherits(objective, "turnover_objective")){
if (!is.null(objective$target) & is.numeric(objective$target)){ # we have a target
out <- out + penalty*abs(objective$multiplier)*abs(tmp_measure - objective$target)
}
# target is null or doesn't exist, just maximize, or minimize violation of constraint
out <- out + abs(objective$multiplier)*tmp_measure
} # univariate turnover objectives
if(inherits(objective, "minmax_objective")){
if (!is.null(objective$min) & !is.null(objective$max)){ # we have a min and max
if(tmp_measure > objective$max){
out <- out + penalty * objective$multiplier * (tmp_measure - objective$max)
}
if(tmp_measure < objective$min){
out <- out + penalty * objective$multiplier * (objective$min - tmp_measure)
}
}
} # temporary minmax objective
if(inherits(objective, "risk_budget_objective")){
# setup
# out = out + penalty*sum( (percrisk-RBupper)*( percrisk > RBupper ),na.rm=TRUE ) + penalty*sum( (RBlower-percrisk)*( percrisk < RBlower ),na.rm=TRUE )
# add risk budget constraint
if(!is.null(objective$target) & is.numeric(objective$target)){
#in addition to a risk budget constraint, we have a univariate target
# the first element of the returned list is the univariate measure
# we'll use the univariate measure exactly like we would as a separate objective
out = out + penalty*abs(objective$multiplier)*abs(tmp_measure[[1]]-objective$target)
#should we also penalize risk too low for risk targets? or is a range another objective?
# # half penalty for risk lower than target
# if( prw < (.9*Riskupper) ){ out = out + .5*(penalty*( prw - Riskupper)) }
}
percrisk = tmp_measure[[3]] # third element is percent component contribution
RBupper = objective$max_prisk
RBlower = objective$min_prisk
if(!is.null(RBupper) | !is.null(RBlower)){
out = out + penalty * objective$multiplier * sum( (percrisk-RBupper)*( percrisk > RBupper ),na.rm=TRUE ) + penalty*sum( (RBlower-percrisk)*( percrisk < RBlower ),na.rm=TRUE )
}
# if(!is.null(objective$min_concentration)){
# if(isTRUE(objective$min_concentration)){
# max_conc<-max(tmp_measure[[2]]) #second element is the contribution in absolute terms
# # out=out + penalty * objective$multiplier * max_conc
# out = out + objective$multiplier * max_conc
# }
# }
# Combined min_con and min_dif to take advantage of a better concentration obj measure
if(!is.null(objective$min_difference) || !is.null(objective$min_concentration)){
if(isTRUE(objective$min_difference)){
# max_diff<-max(tmp_measure[[2]]-(sum(tmp_measure[[2]])/length(tmp_measure[[2]]))) #second element is the contribution in absolute terms
# Uses Herfindahl index to calculate concentration; added scaling perc diffs back to univariate numbers
max_diff <- sqrt(sum(tmp_measure[[3]]^2))/100 #third element is the contribution in percentage terms
# out = out + penalty * objective$multiplier * max_diff
out = out + penalty*objective$multiplier * max_diff
}
if(isTRUE(objective$min_concentration)){
# use HHI to calculate concentration
# actual HHI
act_hhi <- sum(tmp_measure[[3]]^2)/100
# minimum possible HHI
min_hhi <- sum(rep(1/length(tmp_measure[[3]]), length(tmp_measure[[3]]))^2)/100
out <- out + penalty * objective$multiplier * abs(act_hhi - min_hhi)
}
}
} # end handling of risk_budget objective
if(inherits(objective, "weight_concentration_objective")){
# If the user does not pass in conc_groups, the output of HHI will be a scalar
if((length(objective$conc_aversion) == 1) & is.null(objective$conc_groups)){
# treat conc_aversion as a multiplier
out <- out + penalty * objective$conc_aversion * tmp_measure
}
# If the user passes in conc_groups, the output of HHI will be a list
# The second element of the list will be the group HHI
if(length(objective$conc_aversion > 1) & !is.null(objective$conc_groups)){
if(length(objective$conc_aversion) == length(tmp_measure[[2]])){
# treat the conc_aversion vector as a multiplier per group hhi
out <- out + penalty * sum(objective$conc_aversion * tmp_measure[[2]])
}
}
} # weight concentration objective
} # end enabled check
} # end loop over objectives
} # end objectives processing
if(isTRUE(verbose)) {
print('weights: ')
print(paste(w,' '))
print(paste("output of objective function", out))
print(unlist(tmp_return))
}
if(is.na(out) | is.nan(out) | is.null(out)){
#this should never happen
warning('NA or NaN produced in objective function for weights ',w)
out <- penalty
}
#return
if (isTRUE(storage)){
#add the new objective results
store_output[[length(store_output)+1]] <- list(out=as.numeric(out), weights=w, init_weights=init_weights, objective_measures=tmp_return)
# do the assign here
assign('.objectivestorage', store_output, envir=.storage)
}
if(!isTRUE(trace)){
return(out)
} else {
return(list(out=as.numeric(out), weights=w, objective_measures=tmp_return))
}
}
|
/R/constrained_objective.R
|
no_license
|
crconline/PortfolioAnalytics
|
R
| false
| false
| 37,337
|
r
|
###############################################################################
# R (https://r-project.org/) Numeric Methods for Optimization of Portfolios
#
# Copyright (c) 2004-2021 Brian G. Peterson, Peter Carl, Ross Bennett, Kris Boudt
#
# This library is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id$
#
###############################################################################
# TODO add examples
# TODO add more details about the nuances of the optimization engines
#' @rdname constrained_objective
#' @name constrained_objective
#' @export
constrained_objective_v1 <- function(w, R, constraints, ..., trace=FALSE, normalize=TRUE, storage=FALSE)
{
if (ncol(R)>length(w)) {
R=R[,1:length(w)]
}
if(!hasArg(penalty)) penalty = 1e4
N = length(w)
T = nrow(R)
if(hasArg(optimize_method))
optimize_method=match.call(expand.dots=TRUE)$optimize_method else optimize_method=''
if(hasArg(verbose))
verbose=match.call(expand.dots=TRUE)$verbose
else verbose=FALSE
# check for valid constraints
if (!is.constraint(constraints)) {
stop("constraints passed in are not of class constraint")
}
# check that the constraints and the weighting vector have the same length
if (N != length(constraints$assets)){
warning("length of constraints asset list and weights vector do not match, results may be bogus")
}
out=0
# do the get here
store_output <- try(get('.objectivestorage',envir=.storage),silent=TRUE)
if(inherits(store_output,"try-error")) storage=FALSE else storage=TRUE
if(isTRUE(normalize)){
if(!is.null(constraints$min_sum) | !is.null(constraints$max_sum)){
# the user has passed in either min_sum or max_sum constraints for the portfolio, or both.
# we'll normalize the weights passed in to whichever boundary condition has been violated
# NOTE: this means that the weights produced by a numeric optimization algorithm like DEoptim
# might violate your constraints, so you'd need to renormalize them after optimizing
# we'll create functions for that so the user is less likely to mess it up.
# NOTE: need to normalize in the optimization wrapper too before we return, since we've normalized in here
# In Kris' original function, this was manifested as a full investment constraint
# the normalization process produces much faster convergence,
# and then we penalize parameters outside the constraints in the next block
if(!is.null(constraints$max_sum) & constraints$max_sum != Inf ) {
max_sum=constraints$max_sum
if(sum(w)>max_sum) { w<-(max_sum/sum(w))*w } # normalize to max_sum
}
if(!is.null(constraints$min_sum) & constraints$min_sum != -Inf ) {
min_sum=constraints$min_sum
if(sum(w)<min_sum) { w<-(min_sum/sum(w))*w } # normalize to min_sum
}
} # end min_sum and max_sum normalization
} else {
# the user wants the optimization algorithm to figure it out
if(!is.null(constraints$max_sum) & constraints$max_sum != Inf ) {
max_sum=constraints$max_sum
if(sum(w)>max_sum) { out = out + penalty*(sum(w) - max_sum) } # penalize difference to max_sum
}
if(!is.null(constraints$min_sum) & constraints$min_sum != -Inf ) {
min_sum=constraints$min_sum
if(sum(w)<min_sum) { out = out + penalty*(min_sum - sum(w)) } # penalize difference to min_sum
}
}
# penalize weights outside my constraints (can be caused by normalization)
if (!is.null(constraints$max)){
max = constraints$max
out = out + sum(w[which(w>max[1:N])]- constraints$max[which(w>max[1:N])])*penalty
}
if (!is.null(constraints$min)){
min = constraints$min
out = out + sum(constraints$min[which(w<min[1:N])] - w[which(w<min[1:N])])*penalty
}
nargs <-list(...)
if(length(nargs)==0) nargs=NULL
if (length('...')==0 | is.null('...')) {
# rm('...')
nargs=NULL
}
nargs<-set.portfolio.moments(R, constraints, momentargs=nargs)
if(is.null(constraints$objectives)) {
warning("no objectives specified in constraints")
} else{
if(isTRUE(trace) | isTRUE(storage)) tmp_return<-list()
for (objective in constraints$objectives){
#check for clean bits to pass in
if(objective$enabled){
tmp_measure = NULL
multiplier = objective$multiplier
#if(is.null(objective$arguments) | !is.list(objective$arguments)) objective$arguments<-list()
switch(objective$name,
mean =,
median = {
fun = match.fun(objective$name)
nargs$x <- ( R %*% w ) #do the multivariate mean/median with Kroneker product
},
sd =,
StdDev = {
fun= match.fun(StdDev)
},
mVaR =,
VaR = {
fun= match.fun(VaR)
if(!inherits(objective,"risk_budget_objective") & is.null(objective$arguments$portfolio_method) & is.null(nargs$portfolio_method)) nargs$portfolio_method='single'
if(is.null(objective$arguments$invert)) objective$arguments$invert = FALSE
},
es =,
mES =,
CVaR =,
cVaR =,
ES = {
fun = match.fun(ES)
if(!inherits(objective,"risk_budget_objective") & is.null(objective$arguments$portfolio_method)& is.null(nargs$portfolio_method)) nargs$portfolio_method='single'
if(is.null(objective$arguments$invert)) objective$arguments$invert = FALSE
},
turnover = {
fun = match.fun(turnover) # turnover function included in objectiveFUN.R
},
{ # see 'S Programming p. 67 for this matching
fun<-try(match.fun(objective$name))
}
)
if(is.function(fun)){
.formals <- formals(fun)
onames <- names(.formals)
if(is.list(objective$arguments)){
#TODO FIXME only do this if R and weights are in the argument list of the fn
if(is.null(nargs$R) | !length(nargs$R)==length(R)) nargs$R <- R
if(is.null(nargs$weights)) nargs$weights <- w
pm <- pmatch(names(objective$arguments), onames, nomatch = 0L)
if (any(pm == 0L))
warning(paste("some arguments stored for",objective$name,"do not match"))
# this line overwrites the names of things stored in $arguments with names from formals.
# I'm not sure it's a good idea, so commenting for now, until we prove we need it
#names(objective$arguments[pm > 0L]) <- onames[pm]
.formals[pm] <- objective$arguments[pm > 0L]
#now add dots
if (length(nargs)) {
dargs<-nargs
pm <- pmatch(names(dargs), onames, nomatch = 0L)
names(dargs[pm > 0L]) <- onames[pm]
.formals[pm] <- dargs[pm > 0L]
}
.formals$... <- NULL
}
} # TODO do some funky return magic here on try-error
tmp_measure = try((do.call(fun,.formals)) ,silent=TRUE)
if(isTRUE(trace) | isTRUE(storage)) {
if(is.null(names(tmp_measure))) names(tmp_measure)<-objective$name
tmp_return[[objective$name]]<-tmp_measure
}
if(inherits(tmp_measure,"try-error")) {
message(paste("objective name",objective$name,"generated an error or warning:",tmp_measure))
}
# now set the new value of the objective output
if(inherits(objective,"return_objective")){
if (!is.null(objective$target) & is.numeric(objective$target)){ # we have a target
out = out + penalty*abs(objective$multiplier)*abs(tmp_measure-objective$target)
}
# target is null or doesn't exist, just maximize, or minimize violation of constraint
out = out + objective$multiplier*tmp_measure
} # end handling for return objectives
if(inherits(objective,"portfolio_risk_objective")){
if (!is.null(objective$target) & is.numeric(objective$target)){ # we have a target
out = out + penalty*abs(objective$multiplier)*abs(tmp_measure-objective$target)
#should we also penalize risk too low for risk targets? or is a range another objective?
# # half penalty for risk lower than target
# if( prw < (.9*Riskupper) ){ out = out + .5*(penalty*( prw - Riskupper)) }
}
# target is null or doesn't exist, just maximize, or minimize violation of constraint
out = out + abs(objective$multiplier)*tmp_measure
} # univariate risk objectives
if(inherits(objective,"turnover_objective")){
if (!is.null(objective$target) & is.numeric(objective$target)){ # we have a target
out = out + penalty*abs(objective$multiplier)*abs(tmp_measure-objective$target)
}
# target is null or doesn't exist, just maximize, or minimize violation of constraint
out = out + abs(objective$multiplier)*tmp_measure
} # univariate turnover objectives
if(inherits(objective,"minmax_objective")){
if (!is.null(objective$min) & !is.null(objective$max)){ # we have a min and max
if(tmp_measure > objective$max){
out = out + penalty * objective$multiplier * (tmp_measure - objective$max)
}
if(tmp_measure < objective$min){
out = out + penalty * objective$multiplier * (objective$min - tmp_measure)
}
}
} # temporary minmax objective
if(inherits(objective,"risk_budget_objective")){
# setup
# out = out + penalty*sum( (percrisk-RBupper)*( percrisk > RBupper ),na.rm=TRUE ) + penalty*sum( (RBlower-percrisk)*( percrisk < RBlower ),na.rm=TRUE )
# add risk budget constraint
if(!is.null(objective$target) & is.numeric(objective$target)){
#in addition to a risk budget constraint, we have a univariate target
# the first element of the returned list is the univariate measure
# we'll use the univariate measure exactly like we would as a separate objective
out = out + penalty*abs(objective$multiplier)*abs(tmp_measure[[1]]-objective$target)
#should we also penalize risk too low for risk targets? or is a range another objective?
# # half penalty for risk lower than target
# if( prw < (.9*Riskupper) ){ out = out + .5*(penalty*( prw - Riskupper)) }
}
percrisk = tmp_measure[[3]] # third element is percent component contribution
RBupper = objective$max_prisk
RBlower = objective$min_prisk
if(!is.null(RBupper) | !is.null(RBlower)){
out = out + penalty * objective$multiplier * sum( (percrisk-RBupper)*( percrisk > RBupper ),na.rm=TRUE ) + penalty*sum( (RBlower-percrisk)*( percrisk < RBlower ),na.rm=TRUE )
}
# if(!is.null(objective$min_concentration)){
# if(isTRUE(objective$min_concentration)){
# max_conc<-max(tmp_measure[[2]]) #second element is the contribution in absolute terms
# # out=out + penalty * objective$multiplier * max_conc
# out = out + objective$multiplier * max_conc
# }
# }
# Combined min_con and min_dif to take advantage of a better concentration obj measure
if(!is.null(objective$min_difference) || !is.null(objective$min_concentration)){
if(isTRUE(objective$min_difference)){
# max_diff<-max(tmp_measure[[2]]-(sum(tmp_measure[[2]])/length(tmp_measure[[2]]))) #second element is the contribution in absolute terms
# Uses Herfindahl index to calculate concentration; added scaling perc diffs back to univariate numbers
max_diff <- sqrt(sum(tmp_measure[[3]]^2))/100 #third element is the contribution in percentage terms
# out = out + penalty * objective$multiplier * max_diff
out = out + penalty*objective$multiplier * max_diff
}
}
} # end handling of risk_budget objective
} # end enabled check
} # end loop over objectives
} # end objectives processing
if(isTRUE(verbose)) {
print('weights: ')
print(paste(w,' '))
print(paste("output of objective function",out))
print(unlist(tmp_return))
}
if(is.na(out) | is.nan(out) | is.null(out)){
#this should never happen
warning('NA or NaN produced in objective function for weights ',w)
out<-penalty
}
#return
if (isTRUE(storage)){
#add the new objective results
store_output[[length(store_output)+1]]<-list(out=as.numeric(out),weights=w,objective_measures=tmp_return)
# do the assign here
assign('.objectivestorage', store_output, envir=.storage)
}
if(!isTRUE(trace)){
return(out)
} else {
return(list(out=as.numeric(out),weights=w,objective_measures=tmp_return))
}
}
#' calculate a numeric return value for a portfolio based on a set of constraints and objectives
#'
#' Function to calculate a numeric return value for a portfolio based on a set of constraints and objectives.
#' We'll try to make as few assumptions as possible and only run objectives that are enabled by the user.
#'
#' If the user has passed in either min_sum or max_sum constraints for the portfolio, or both,
#' and are using a numerical optimization method like DEoptim, and normalize=TRUE,
#' we'll normalize the weights passed in to whichever boundary condition has been violated.
#' If using random portfolios, all the portfolios generated will meet the constraints by construction.
#' NOTE: this means that the weights produced by a numeric optimization algorithm like DEoptim, pso, or GenSA
#' might violate constraints, and will need to be renormalized after optimizing.
#' We apply the same normalization in \code{\link{optimize.portfolio}} so that the weights you see have been
#' normalized to min_sum if the generated portfolio is smaller than min_sum or max_sum if the
#' generated portfolio is larger than max_sum.
#' This normalization increases the speed of optimization and convergence by several orders of magnitude in many cases.
#'
#' You may find that for some portfolios, normalization is not desirable, if the algorithm
#' cannot find a direction in which to move to head towards an optimal portfolio. In these cases,
#' it may be best to set normalize=FALSE, and penalize the portfolios if the sum of the weighting
#' vector lies outside the min_sum and/or max_sum.
#'
#' Whether or not we normalize the weights using min_sum and max_sum, and are using a numerical optimization
#' engine like DEoptim, we will penalize portfolios that violate weight constraints in much the same way
#' we penalize other constraints. If a min_sum/max_sum normalization has not occurred, convergence
#' can take a very long time. We currently do not allow for a non-normalized full investment constraint.
#' Future version of this function could include this additional constraint penalty.
#'
#' When you are optimizing a return objective, you must specify a negative multiplier
#' for the return objective so that the function will maximize return. If you specify a target return,
#' any return that deviates from your target will be penalized. If you do not specify a target return,
#' you may need to specify a negative VTR (value to reach) , or the function will not converge.
#' Try the maximum expected return times the multiplier (e.g. -1 or -10).
#' Adding a return objective defaults the multiplier to -1.
#'
#' Additional parameters for other solvers
#' (e.g. random portfolios or
#' \code{\link[DEoptim]{DEoptim.control}} or pso or GenSA
#' may be passed in via \dots
#'
#'
#' @param R an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns.
#' @param w a vector of weights to test.
#' @param portfolio an object of class \code{portfolio} specifying the constraints and objectives for the optimization, see \code{\link{portfolio}}.
#' @param \dots any other passthru parameters.
#' @param trace TRUE/FALSE whether to include debugging and additional detail in the output list. The default is FALSE. Several charting functions require that \code{trace=TRUE}.
#' @param normalize TRUE/FALSE whether to normalize results to min/max sum (TRUE), or let the optimizer penalize portfolios that do not conform (FALSE)
#' @param storage TRUE/FALSE default TRUE for DEoptim with trace, otherwise FALSE. not typically user-called.
#' @param constraints a v1_constraint object for backwards compatibility with \code{constrained_objective_v1}.
#' @param env environment of moments calculated in \code{optimize.portfolio}
#' @seealso \code{\link{constraint}}, \code{\link{objective}}, \code{\link[DEoptim]{DEoptim.control}}
#' @author Kris Boudt, Peter Carl, Brian G. Peterson, Ross Bennett
#' @aliases constrained_objective constrained_objective_v1 constrained_objective_v2
#' @rdname constrained_objective
#' @export constrained_objective
#' @export constrained_objective_v2
constrained_objective <- constrained_objective_v2 <- function(w, R, portfolio, ..., trace=FALSE, normalize=TRUE, storage=FALSE, env=NULL)
{
if (ncol(R) > length(w)) {
R <- R[ ,1:length(w)]
}
if(!hasArg(penalty)) penalty <- 1e4
N <- length(w)
T <- nrow(R)
if(hasArg(optimize_method))
optimize_method <- match.call(expand.dots=TRUE)$optimize_method else optimize_method <- ''
if(hasArg(verbose))
verbose <- match.call(expand.dots=TRUE)$verbose
else verbose <- FALSE
# initial weights
init_weights <- w
# get the constraints from the portfolio object
constraints <- get_constraints(portfolio)
# check for valid portfolio
if (!is.portfolio(portfolio)) {
stop("portfolio object passed in is not of class portfolio")
}
# check that the assets and the weighting vector have the same length
if (N != length(portfolio$assets)){
warning("length of portfolio asset list and weights vector do not match, results may be bogus")
}
out <- 0
# do the get here
store_output <- try(get('.objectivestorage',envir=.storage), silent=TRUE)
if(inherits(store_output,"try-error")) {
storage <- FALSE
# warning("could not get .objectivestorage")
} else {
storage <- TRUE
}
# use fn_map to normalize the weights
if(isTRUE(normalize)){
w <- fn_map(weights=w, portfolio=portfolio)$weights
# end fn_map transformation
} else {
# the user wants the optimization algorithm to figure it out
if(!is.null(constraints$max_sum) & constraints$max_sum != Inf ) {
max_sum <- constraints$max_sum
if(sum(w) > max_sum) { out <- out + penalty * (sum(w) - max_sum) } # penalize difference to max_sum
}
if(!is.null(constraints$min_sum) & constraints$min_sum != -Inf ) {
min_sum <- constraints$min_sum
if(sum(w) < min_sum) { out <- out + penalty * (min_sum - sum(w)) } # penalize difference to min_sum
}
}
# penalize weights outside min and max box constraints (can be caused by normalization)
if (!is.null(constraints$max)){
max <- constraints$max
# Only go to penalty term if any of the weights violate max
if(any(w > max)){
out <- out + sum(w[which(w > max[1:N])] - constraints$max[which(w > max[1:N])]) * penalty
}
}
if (!is.null(constraints$min)){
min <- constraints$min
# Only go to penalty term if any of the weights violate min
if(any(w < min)){
out <- out + sum(constraints$min[which(w < min[1:N])] - w[which(w < min[1:N])]) * penalty
}
}
# penalize weights that violate group constraints
if(!is.null(constraints$groups) & !is.null(constraints$cLO) & !is.null(constraints$cUP)){
groups <- constraints$groups
cLO <- constraints$cLO
cUP <- constraints$cUP
# Only go to penalty term if group constraint is violated
if(any(group_fail(w, groups, cLO, cUP))){
ngroups <- length(groups)
for(i in 1:ngroups){
tmp_w <- w[groups[[i]]]
# penalize for weights that are below cLO
if(sum(tmp_w) < cLO[i]){
out <- out + penalty * (cLO[i] - sum(tmp_w))
}
if(sum(tmp_w) > cUP[i]){
out <- out + penalty * (sum(tmp_w) - cUP[i])
}
}
}
} # End group constraint penalty
# penalize weights that violate max_pos constraints
if(!is.null(constraints$max_pos)){
max_pos <- constraints$max_pos
tolerance <- .Machine$double.eps^0.5
mult <- 1
# sum(abs(w) > tolerance) is the number of non-zero assets
nzassets <- sum(abs(w) > tolerance)
if(nzassets > max_pos){
# Do we need a small multiplier term here since (nzassets - max_pos)
# will be an integer and much larger than the weight penalty terms
out <- out + penalty * mult * (nzassets - max_pos)
}
} # End position_limit constraint penalty
# penalize weights that violate diversification constraint
if(!is.null(constraints$div_target)){
div_target <- constraints$div_target
div <- diversification(w)
mult <- 1
# only penalize if not within +/- 5% of target
if((div < div_target * 0.95) | (div > div_target * 1.05)){
out <- out + penalty * mult * abs(div - div_target)
}
} # End diversification constraint penalty
# penalize weights that violate turnover constraint
if(!is.null(constraints$turnover_target)){
turnover_target <- constraints$turnover_target
to <- turnover(w)
mult <- 1
# only penalize if not within +/- 5% of target
if((to < turnover_target * 0.95) | (to > turnover_target * 1.05)){
# print("transform or penalize to meet turnover target")
out = out + penalty * mult * abs(to - turnover_target)
}
} # End turnover constraint penalty
# penalize weights that violate return target constraint
if(!is.null(constraints$return_target)){
return_target <- constraints$return_target
mean_return <- port.mean(weights=w, mu=env$mu)
mult <- 1
out = out + penalty * mult * abs(mean_return - return_target)
} # End return constraint penalty
# penalize weights that violate factor exposure constraints
if(!is.null(constraints$B)){
t.B <- t(constraints$B)
lower <- constraints$lower
upper <- constraints$upper
mult <- 1
for(i in 1:nrow(t.B)){
tmpexp <- as.numeric(t(w) %*% t.B[i, ])
if(tmpexp < lower[i]){
out <- out + penalty * mult * (lower[i] - tmpexp)
}
if(tmpexp > upper[i]){
out <- out + penalty * mult * (tmpexp - upper[i])
}
}
} # End factor exposure constraint penalty
# Add penalty for transaction costs
if(!is.null(constraints$ptc)){
# calculate total transaction cost using portfolio$assets as initial set of weights
tc <- sum(abs(w - portfolio$assets) * constraints$ptc)
# for now use a multiplier of 1, may need to adjust this later
mult <- 1
out <- out + mult * tc
} # End transaction cost penalty
# Add penalty for leverage exposure
# This could potentially be added to random portfolios
if(!is.null(constraints$leverage)){
if((sum(abs(w)) > constraints$leverage)){
# only penalize if leverage is exceeded
mult <- 1/100
out <- out + penalty * mult * abs(sum(abs(w)) - constraints$leverage)
}
} # End leverage exposure penalty
# The "..." are passed in from optimize.portfolio and contain the output of
# momentFUN. The default is momentFUN=set.portfolio.moments and returns
# moments$mu, moments$sigma, moments$m3, moments$m4, etc. depending on the
# the functions corresponding to portfolio$objective$name. Would it be better
# to make this a formal argument for constrained_objective? This means that
# we completely avoid evaluating the set.portfolio.moments function. Can we
# trust that all the moments are correctly set in optimize.portfolio through
# momentFUN?
# Add R and w to the environment with the moments
# env$R <- R
# env$weights <- w
if(!is.null(env)){
nargs <- env
} else {
# print("calculating moments")
# calculating the moments
# nargs are used as the arguments for functions corresponding to
# objective$name called in the objective loop later
momentargs <- eval(substitute(alist(...)))
.formals <- formals(set.portfolio.moments)
.formals <- modify.args(formals=.formals, arglist=alist(momentargs=momentargs), dots=TRUE)
.formals <- modify.args(formals=.formals, arglist=NULL, R=R, dots=TRUE)
.formals <- modify.args(formals=.formals, arglist=NULL, portfolio=portfolio, dots=TRUE)
.formals$... <- NULL
# print(.formals)
nargs <- do.call(set.portfolio.moments, .formals)
}
# We should avoid modifying nargs in the loop below.
# If we modify nargs with something like nargs$x, nargs is copied and this
# should be avoided because nargs could be large because it contains the moments.
tmp_args <- list()
# JMU: Add all the variables in 'env' to tmp_args as names/symbols
# tmp_args[ls(env)] <- lapply(ls(env), as.name)
if(is.null(portfolio$objectives)) {
warning("no objectives specified in portfolio")
} else{
if(isTRUE(trace) | isTRUE(storage)) tmp_return <- list()
for (objective in portfolio$objectives){
#check for clean bits to pass in
if(objective$enabled){
tmp_measure <- NULL
multiplier <- objective$multiplier
#if(is.null(objective$arguments) | !is.list(objective$arguments)) objective$arguments<-list()
switch(objective$name,
mean =,
median = {
fun = match.fun(port.mean)
# would it be better to do crossprod(w, moments$mu)?
# tmp_args$x <- ( R %*% w ) #do the multivariate mean/median with Kroneker product
},
median = {
fun = match.fun(objective$name)
tmp_args$x <- ( R %*% w ) #do the multivariate mean/median with Kroneker product
},
sd =,
var =,
StdDev = {
fun = match.fun(StdDev)
},
mVaR =,
VaR = {
fun = match.fun(VaR)
if(!inherits(objective,"risk_budget_objective") & is.null(objective$arguments$portfolio_method) & is.null(nargs$portfolio_method)) tmp_args$portfolio_method='single'
if(is.null(objective$arguments$invert)) tmp_args$invert = FALSE
},
es =,
mES =,
CVaR =,
cVaR =,
ETL=,
mETL=,
ES = {
fun = match.fun(ES)
if(!inherits(objective,"risk_budget_objective") & is.null(objective$arguments$portfolio_method) & is.null(nargs$portfolio_method)) tmp_args$portfolio_method='single'
if(is.null(objective$arguments$invert)) tmp_args$invert = FALSE
},
turnover = {
fun = match.fun(turnover) # turnover function included in objectiveFUN.R
},
{ # see 'S Programming p. 67 for this matching
fun <- try(match.fun(objective$name))
}
)
if(is.function(fun)){
.formals <- formals(fun)
# Add the moments from the nargs object
# nargs contains the moments, these are being evaluated
.formals <- modify.args(formals=.formals, arglist=nargs, dots=TRUE)
# Add anything from tmp_args
.formals <- modify.args(formals=.formals, arglist=tmp_args, dots=TRUE)
# Now add the objective$arguments
.formals <- modify.args(formals=.formals, arglist=objective$arguments, dots=TRUE)
# Add R and weights if necessary
if("R" %in% names(.formals)) .formals <- modify.args(formals=.formals, arglist=NULL, R=R, dots=TRUE)
if("weights" %in% names(.formals)) .formals <- modify.args(formals=.formals, arglist=NULL, weights=w, dots=TRUE)
# .formals <- modify.args(formals=.formals, arglist=tmp_args, dots=TRUE)
.formals$... <- NULL
}
# tmp_measure <- try(do.call(fun, .formals, envir=env), silent=TRUE)
tmp_measure <- try(do.call(fun, .formals), silent=TRUE)
if(isTRUE(trace) | isTRUE(storage)) {
# Subsitute 'StdDev' if the objective name is 'var'
# if the user passes in var as an objective name, we are actually
# calculating StdDev, so we need to change the name here.
tmp_objname <- objective$name
if(tmp_objname == "var") tmp_objname <- "StdDev"
if(is.null(names(tmp_measure))) names(tmp_measure) <- tmp_objname
tmp_return[[tmp_objname]] <- tmp_measure
}
if(inherits(tmp_measure, "try-error")) {
message(paste("objective name", objective$name, "generated an error or warning:", tmp_measure))
}
# now set the new value of the objective output
if(inherits(objective, "return_objective")){
if (!is.null(objective$target) & is.numeric(objective$target)){ # we have a target
out <- out + penalty*abs(objective$multiplier)*abs(tmp_measure - objective$target)
}
# target is null or doesn't exist, just maximize, or minimize violation of constraint
out <- out + objective$multiplier*tmp_measure
} # end handling for return objectives
if(inherits(objective, "portfolio_risk_objective")){
if (!is.null(objective$target) & is.numeric(objective$target)){ # we have a target
out <- out + penalty*abs(objective$multiplier)*abs(tmp_measure - objective$target)
#should we also penalize risk too low for risk targets? or is a range another objective?
# # half penalty for risk lower than target
# if( prw < (.9*Riskupper) ){ out = out + .5*(penalty*( prw - Riskupper)) }
}
# target is null or doesn't exist, just maximize, or minimize violation of constraint
out <- out + abs(objective$multiplier)*tmp_measure
} # univariate risk objectives
if(inherits(objective, "turnover_objective")){
if (!is.null(objective$target) & is.numeric(objective$target)){ # we have a target
out <- out + penalty*abs(objective$multiplier)*abs(tmp_measure - objective$target)
}
# target is null or doesn't exist, just maximize, or minimize violation of constraint
out <- out + abs(objective$multiplier)*tmp_measure
} # univariate turnover objectives
if(inherits(objective, "minmax_objective")){
if (!is.null(objective$min) & !is.null(objective$max)){ # we have a min and max
if(tmp_measure > objective$max){
out <- out + penalty * objective$multiplier * (tmp_measure - objective$max)
}
if(tmp_measure < objective$min){
out <- out + penalty * objective$multiplier * (objective$min - tmp_measure)
}
}
} # temporary minmax objective
if(inherits(objective, "risk_budget_objective")){
# setup
# out = out + penalty*sum( (percrisk-RBupper)*( percrisk > RBupper ),na.rm=TRUE ) + penalty*sum( (RBlower-percrisk)*( percrisk < RBlower ),na.rm=TRUE )
# add risk budget constraint
if(!is.null(objective$target) & is.numeric(objective$target)){
#in addition to a risk budget constraint, we have a univariate target
# the first element of the returned list is the univariate measure
# we'll use the univariate measure exactly like we would as a separate objective
out = out + penalty*abs(objective$multiplier)*abs(tmp_measure[[1]]-objective$target)
#should we also penalize risk too low for risk targets? or is a range another objective?
# # half penalty for risk lower than target
# if( prw < (.9*Riskupper) ){ out = out + .5*(penalty*( prw - Riskupper)) }
}
percrisk = tmp_measure[[3]] # third element is percent component contribution
RBupper = objective$max_prisk
RBlower = objective$min_prisk
if(!is.null(RBupper) | !is.null(RBlower)){
out = out + penalty * objective$multiplier * sum( (percrisk-RBupper)*( percrisk > RBupper ),na.rm=TRUE ) + penalty*sum( (RBlower-percrisk)*( percrisk < RBlower ),na.rm=TRUE )
}
# if(!is.null(objective$min_concentration)){
# if(isTRUE(objective$min_concentration)){
# max_conc<-max(tmp_measure[[2]]) #second element is the contribution in absolute terms
# # out=out + penalty * objective$multiplier * max_conc
# out = out + objective$multiplier * max_conc
# }
# }
# Combined min_con and min_dif to take advantage of a better concentration obj measure
if(!is.null(objective$min_difference) || !is.null(objective$min_concentration)){
if(isTRUE(objective$min_difference)){
# max_diff<-max(tmp_measure[[2]]-(sum(tmp_measure[[2]])/length(tmp_measure[[2]]))) #second element is the contribution in absolute terms
# Uses Herfindahl index to calculate concentration; added scaling perc diffs back to univariate numbers
max_diff <- sqrt(sum(tmp_measure[[3]]^2))/100 #third element is the contribution in percentage terms
# out = out + penalty * objective$multiplier * max_diff
out = out + penalty*objective$multiplier * max_diff
}
if(isTRUE(objective$min_concentration)){
# use HHI to calculate concentration
# actual HHI
act_hhi <- sum(tmp_measure[[3]]^2)/100
# minimum possible HHI
min_hhi <- sum(rep(1/length(tmp_measure[[3]]), length(tmp_measure[[3]]))^2)/100
out <- out + penalty * objective$multiplier * abs(act_hhi - min_hhi)
}
}
} # end handling of risk_budget objective
if(inherits(objective, "weight_concentration_objective")){
# If the user does not pass in conc_groups, the output of HHI will be a scalar
if((length(objective$conc_aversion) == 1) & is.null(objective$conc_groups)){
# treat conc_aversion as a multiplier
out <- out + penalty * objective$conc_aversion * tmp_measure
}
# If the user passes in conc_groups, the output of HHI will be a list
# The second element of the list will be the group HHI
if(length(objective$conc_aversion > 1) & !is.null(objective$conc_groups)){
if(length(objective$conc_aversion) == length(tmp_measure[[2]])){
# treat the conc_aversion vector as a multiplier per group hhi
out <- out + penalty * sum(objective$conc_aversion * tmp_measure[[2]])
}
}
} # weight concentration objective
} # end enabled check
} # end loop over objectives
} # end objectives processing
if(isTRUE(verbose)) {
print('weights: ')
print(paste(w,' '))
print(paste("output of objective function", out))
print(unlist(tmp_return))
}
if(is.na(out) | is.nan(out) | is.null(out)){
#this should never happen
warning('NA or NaN produced in objective function for weights ',w)
out <- penalty
}
#return
if (isTRUE(storage)){
#add the new objective results
store_output[[length(store_output)+1]] <- list(out=as.numeric(out), weights=w, init_weights=init_weights, objective_measures=tmp_return)
# do the assign here
assign('.objectivestorage', store_output, envir=.storage)
}
if(!isTRUE(trace)){
return(out)
} else {
return(list(out=as.numeric(out), weights=w, objective_measures=tmp_return))
}
}
|
library(EDISON)
### Name: EDISON.run
### Title: Wrapper function for starting an MCMC simulation
### Aliases: EDISON.run
### ** Examples
# Generate random gene network and simulate data from it
dataset = simulateNetwork(l=25)
# Run MCMC simulation to infer networks and changepoint locations
# Uses default settings: Poisson prior and 1500 iterations
result.poisson = EDISON.run(dataset$sim_data, num.iter=500)
# Use the binomial information sharing prior with hard node coupling, and
# run for 5000 iterations
# NOT EXECUTED
#result.bino = EDISON.run(dataset$sim_data,
# information.sharing='bino_hard', num.iter=5000)
# Set options to allow saving network and changepoint samples to file
options = defaultOptions()
options$save.file = TRUE
# NOT EXECUTED
# result.bino2 = EDISON.run(dataset$sim_data,
# information.sharing='bino_hard',
# num.iter=5000, output.file='bino2.results',
# options=options)
|
/data/genthat_extracted_code/EDISON/examples/EDISON.run.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,009
|
r
|
library(EDISON)
### Name: EDISON.run
### Title: Wrapper function for starting an MCMC simulation
### Aliases: EDISON.run
### ** Examples
# Generate random gene network and simulate data from it
dataset = simulateNetwork(l=25)
# Run MCMC simulation to infer networks and changepoint locations
# Uses default settings: Poisson prior and 1500 iterations
result.poisson = EDISON.run(dataset$sim_data, num.iter=500)
# Use the binomial information sharing prior with hard node coupling, and
# run for 5000 iterations
# NOT EXECUTED
#result.bino = EDISON.run(dataset$sim_data,
# information.sharing='bino_hard', num.iter=5000)
# Set options to allow saving network and changepoint samples to file
options = defaultOptions()
options$save.file = TRUE
# NOT EXECUTED
# result.bino2 = EDISON.run(dataset$sim_data,
# information.sharing='bino_hard',
# num.iter=5000, output.file='bino2.results',
# options=options)
|
#' @title Pluscode South Neighbour
#'
#' @description This package retrieves the southerly neighbour of a valid pluscode, with a precision of 2, 4, 8, or 10 excluding the plus sign (which can be included)
#'
#' @param pluscode A valid pluscod 2, 4, 8, or 10 characters in length excluding the plus sign (which can be included)
#'
#' @return NULL
#'
#' @examples pluscode_southneighbour("9C4MGC2M+H4")
#'
#' @export
pluscode_southneighbour <- function(pluscode) {
code<-c("2","3","4","5","6","7","8","9","C","F","G","H","J","M","P","Q","R","V","W","X")
pluscode<-toupper(gsub(pattern = "\\+",replacement = "",pluscode))
pluscode_length<-nchar(pluscode)
if(pluscode_length %in% c(2,4,8,10)!=TRUE) {
stop(paste0("The pluscode is not a valid length, please enter value with length of 2/4/6/8/10, or 9/11 (with + character)"))
}
for(i in strsplit(pluscode,"")[[1]]){
if(any(grepl(i,code))!=TRUE){
stop(paste0("The character ",i," is not a valid pluscode character"))
}
}
d10<-strsplit(pluscode,"")[[1]][10]
d9<-strsplit(pluscode,"")[[1]][9]
d8<-strsplit(pluscode,"")[[1]][8]
d7<-strsplit(pluscode,"")[[1]][7]
d6<-strsplit(pluscode,"")[[1]][6]
d5<-strsplit(pluscode,"")[[1]][5]
d4<-strsplit(pluscode,"")[[1]][4]
d3<-strsplit(pluscode,"")[[1]][3]
d2<-strsplit(pluscode,"")[[1]][2]
d1<-strsplit(pluscode,"")[[1]][1]
if(d1%in% seq(3,9)!=TRUE){
stop(paste0("The character ",d1," is not a valid pluscode character for the first character"))
}
n9<-if(is.na(d9)==TRUE) {
NA
} else if (grep(d9,code)==1) {
code[20]
} else {
code[grep(d9,code)-1]
}
n7<-if(is.na(d7)==TRUE) {
NA
} else if(any(is.na(d9)==TRUE | grep(d9,code)==1,na.rm = T) & grep(d7,code)==1){
code[20]
} else if (any(is.na(d9)==TRUE | grep(d9,code)==1,na.rm = T)) {
code[grep(d7,code)-1]
} else {
code[grep(d7,code)]
}
n5<-if(is.na(d5)==TRUE) {
NA
} else if(any(is.na(d9)==TRUE | grep(d9,code)==1,na.rm = T) & any(is.na(d7)==TRUE | grep(d7,code)==1,na.rm = T) & grep(d5,code)==1){
code[20]
} else if (any(is.na(d9)==TRUE | grep(d9,code)==1,na.rm = T) & any(is.na(d7)==TRUE | grep(d7,code)==1,na.rm = T)) {
code[grep(d5,code)-1]
} else {
code[grep(d5,code)]
}
n3<-if(is.na(d3)==TRUE) {
NA
} else if(any(is.na(d9)==TRUE | grep(d9,code)==1,na.rm = T) & any(is.na(d7)==TRUE | grep(d7,code)==1,na.rm = T) & any(is.na(d5)==TRUE | grep(d5,code)==1,na.rm = T) & grep(d3,code)==1){
code[20]
} else if (any(is.na(d9)==TRUE | grep(d9,code)==1,na.rm = T) & any(is.na(d7)==TRUE | grep(d7,code)==1,na.rm = T) & any(is.na(d5)==TRUE | grep(d5,code)==1,na.rm = T)) {
code[grep(d3,code)-1]
} else {
code[grep(d3,code)]
}
n1<-if(is.na(d1)==TRUE) {
NA
} else if(any(is.na(d9)==TRUE | grep(d9,code)==1,na.rm = T) & any(is.na(d7)==TRUE | grep(d7,code)==1,na.rm = T) & any(is.na(d5)==TRUE | grep(d5,code)==1,na.rm = T) & any(is.na(d3)==TRUE | grep(d3,code)==1,na.rm = T)) {
as.numeric(d1)-1
} else {
d1
}
if(n1>9 | n1<1){
stop("Neighbour pluscode is out of bounds")
}
pluscode_neighbour<-if(pluscode_length == 10) {
paste0(n1,d2,n3,d4,n5,d6,n7,d8,"+",n9,d10)
} else if(pluscode_length == 8) {
paste0(n1,d2,n3,d4,n5,d6,n7,d8,"+")
} else if(pluscode_length == 4) {
paste0(n1,d2,n3,d4)
} else if(pluscode_length == 2) {
paste0(n1,d2)
}
return(pluscode_neighbour)
}
|
/R/pluscode_southneighbour.R
|
no_license
|
cran/pluscode
|
R
| false
| false
| 3,405
|
r
|
#' @title Pluscode South Neighbour
#'
#' @description This package retrieves the southerly neighbour of a valid pluscode, with a precision of 2, 4, 8, or 10 excluding the plus sign (which can be included)
#'
#' @param pluscode A valid pluscod 2, 4, 8, or 10 characters in length excluding the plus sign (which can be included)
#'
#' @return NULL
#'
#' @examples pluscode_southneighbour("9C4MGC2M+H4")
#'
#' @export
pluscode_southneighbour <- function(pluscode) {
code<-c("2","3","4","5","6","7","8","9","C","F","G","H","J","M","P","Q","R","V","W","X")
pluscode<-toupper(gsub(pattern = "\\+",replacement = "",pluscode))
pluscode_length<-nchar(pluscode)
if(pluscode_length %in% c(2,4,8,10)!=TRUE) {
stop(paste0("The pluscode is not a valid length, please enter value with length of 2/4/6/8/10, or 9/11 (with + character)"))
}
for(i in strsplit(pluscode,"")[[1]]){
if(any(grepl(i,code))!=TRUE){
stop(paste0("The character ",i," is not a valid pluscode character"))
}
}
d10<-strsplit(pluscode,"")[[1]][10]
d9<-strsplit(pluscode,"")[[1]][9]
d8<-strsplit(pluscode,"")[[1]][8]
d7<-strsplit(pluscode,"")[[1]][7]
d6<-strsplit(pluscode,"")[[1]][6]
d5<-strsplit(pluscode,"")[[1]][5]
d4<-strsplit(pluscode,"")[[1]][4]
d3<-strsplit(pluscode,"")[[1]][3]
d2<-strsplit(pluscode,"")[[1]][2]
d1<-strsplit(pluscode,"")[[1]][1]
if(d1%in% seq(3,9)!=TRUE){
stop(paste0("The character ",d1," is not a valid pluscode character for the first character"))
}
n9<-if(is.na(d9)==TRUE) {
NA
} else if (grep(d9,code)==1) {
code[20]
} else {
code[grep(d9,code)-1]
}
n7<-if(is.na(d7)==TRUE) {
NA
} else if(any(is.na(d9)==TRUE | grep(d9,code)==1,na.rm = T) & grep(d7,code)==1){
code[20]
} else if (any(is.na(d9)==TRUE | grep(d9,code)==1,na.rm = T)) {
code[grep(d7,code)-1]
} else {
code[grep(d7,code)]
}
n5<-if(is.na(d5)==TRUE) {
NA
} else if(any(is.na(d9)==TRUE | grep(d9,code)==1,na.rm = T) & any(is.na(d7)==TRUE | grep(d7,code)==1,na.rm = T) & grep(d5,code)==1){
code[20]
} else if (any(is.na(d9)==TRUE | grep(d9,code)==1,na.rm = T) & any(is.na(d7)==TRUE | grep(d7,code)==1,na.rm = T)) {
code[grep(d5,code)-1]
} else {
code[grep(d5,code)]
}
n3<-if(is.na(d3)==TRUE) {
NA
} else if(any(is.na(d9)==TRUE | grep(d9,code)==1,na.rm = T) & any(is.na(d7)==TRUE | grep(d7,code)==1,na.rm = T) & any(is.na(d5)==TRUE | grep(d5,code)==1,na.rm = T) & grep(d3,code)==1){
code[20]
} else if (any(is.na(d9)==TRUE | grep(d9,code)==1,na.rm = T) & any(is.na(d7)==TRUE | grep(d7,code)==1,na.rm = T) & any(is.na(d5)==TRUE | grep(d5,code)==1,na.rm = T)) {
code[grep(d3,code)-1]
} else {
code[grep(d3,code)]
}
n1<-if(is.na(d1)==TRUE) {
NA
} else if(any(is.na(d9)==TRUE | grep(d9,code)==1,na.rm = T) & any(is.na(d7)==TRUE | grep(d7,code)==1,na.rm = T) & any(is.na(d5)==TRUE | grep(d5,code)==1,na.rm = T) & any(is.na(d3)==TRUE | grep(d3,code)==1,na.rm = T)) {
as.numeric(d1)-1
} else {
d1
}
if(n1>9 | n1<1){
stop("Neighbour pluscode is out of bounds")
}
pluscode_neighbour<-if(pluscode_length == 10) {
paste0(n1,d2,n3,d4,n5,d6,n7,d8,"+",n9,d10)
} else if(pluscode_length == 8) {
paste0(n1,d2,n3,d4,n5,d6,n7,d8,"+")
} else if(pluscode_length == 4) {
paste0(n1,d2,n3,d4)
} else if(pluscode_length == 2) {
paste0(n1,d2)
}
return(pluscode_neighbour)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f7Card.R
\name{f7ListCardItem}
\alias{f7ListCardItem}
\title{Create a Framework7 list card item}
\usage{
f7ListCardItem(url = NULL, title = NULL)
}
\arguments{
\item{url}{Item url.}
\item{title}{item title.}
}
\description{
Build a Framework7 list card item
}
\author{
David Granjon, \email{dgranjon@ymail.com}
}
|
/man/f7ListCardItem.Rd
|
no_license
|
irudolf16/shinyF7
|
R
| false
| true
| 392
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f7Card.R
\name{f7ListCardItem}
\alias{f7ListCardItem}
\title{Create a Framework7 list card item}
\usage{
f7ListCardItem(url = NULL, title = NULL)
}
\arguments{
\item{url}{Item url.}
\item{title}{item title.}
}
\description{
Build a Framework7 list card item
}
\author{
David Granjon, \email{dgranjon@ymail.com}
}
|
# ##############################################################################
# Author: Georgios Kampolis
#
# Description: Loads the hourly wind speed train & test sets for model building
# and validation. Returns teh forecast horizon (length of the test set) and both
# data frames and msts objects.
#
# ##############################################################################
# Set forecast horizon h, in hours.
horizon <- 48
# Load data
windTrain <- read_csv("data/windTrainSet.csv")
windTest <- read_csv("data/windTestSet.csv") %>% head(horizon)
# Create time series
freq <- c(24, 24*365.25) # Diurnal & Annual seasonality
windTrainTS <- msts(windTrain$windSpeed, freq, ts.frequency = freq[1])
# Shift the Test data in time to accomodate plotting. Start point is the
# same as the forecast objects. This does not truncate the series.
windTestTS <- ts(data = as.vector(head(windTest$windSpeed, horizon)),
frequency = 24,
start = c(746, 1)
)
## Notify that script's end has been reached ##
if (require(beepr)) {beepr::beep(1)}
|
/scripts/4_1Load.R
|
permissive
|
gkampolis/ChilWind
|
R
| false
| false
| 1,080
|
r
|
# ##############################################################################
# Author: Georgios Kampolis
#
# Description: Loads the hourly wind speed train & test sets for model building
# and validation. Returns teh forecast horizon (length of the test set) and both
# data frames and msts objects.
#
# ##############################################################################
# Set forecast horizon h, in hours.
horizon <- 48
# Load data
windTrain <- read_csv("data/windTrainSet.csv")
windTest <- read_csv("data/windTestSet.csv") %>% head(horizon)
# Create time series
freq <- c(24, 24*365.25) # Diurnal & Annual seasonality
windTrainTS <- msts(windTrain$windSpeed, freq, ts.frequency = freq[1])
# Shift the Test data in time to accomodate plotting. Start point is the
# same as the forecast objects. This does not truncate the series.
windTestTS <- ts(data = as.vector(head(windTest$windSpeed, horizon)),
frequency = 24,
start = c(746, 1)
)
## Notify that script's end has been reached ##
if (require(beepr)) {beepr::beep(1)}
|
library(tidyverse)
library(vegan)
rushdata <- read.csv("data/rushdata_w.csv", header = TRUE, row.names = 1)
rushdata <- rushdata %>%
mutate_at(funs(as.factor), .vars = 1:5)
# Abbreviate species names
#make species list lookup
head(names(rushdata), 20) #check where species start
#spp start at col 17
spp_list <- data.frame(names(rushdata)[6:ncol(rushdata)])
names(spp_list) <- "spp_name"
spp_list$spp_abbr <- vegan::make.cepnames(spp_list$spp_name)
spp_list$spp_name[which(duplicated(spp_list$spp_abbr))]
spp_list <- separate(spp_list, spp_name, into = c("genus", "species"),
remove = FALSE)
#eyeball these to find duplicate species and other mistakes
### sort out species -----
#Rhytidiadelphus squarrosus
VIM::matrixplot(select(rushdata, contains("Rhytidiadelphus")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Rhytidiadelphus")))))
#Rhytidiadelphus.squarrosus and Rhytidiadelphus.squarrosus.1 almost completely overlap - need to check if values are the same
#Rhytidiadelphus.squaros is different
rhytid <- select(rushdata, contains("Rhytidiadelphus"))
summary(rhytid)
#by eyeballing, seems that there are no conflicts so can merge them.
rushdata$Rhytidiadelphus.squarrosus[is.na(rushdata$Rhytidiadelphus.squarrosus)] <-
rushdata$Rhytidiadelphus.squarrosus.1[is.na(rushdata$Rhytidiadelphus.squarrosus)]
rushdata$Rhytidiadelphus.squarrosus[is.na(rushdata$Rhytidiadelphus.squarrosus)] <-
rushdata$Rhytidiadelphus.squaros[is.na(rushdata$Rhytidiadelphus.squarrosus)]
rushdata$Rhytidiadelphus.squarrosus.1 <- NULL
rushdata$Rhytidiadelphus.squaros <- NULL
#Alopecurus pratensis
VIM::matrixplot(select(rushdata, contains("Alopecurus")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Alopecurus")))))
#no overlap, although there is also not much overlap with A. geniculatus - so maybe an issue here.
rushdata$Alopecurus.pratensis[is.na(rushdata$Alopecurus.pratensis)] <-
rushdata$Alopecurus..pratensis[is.na(rushdata$Alopecurus.pratensis)]
rushdata$Alopecurus..pratensis <- NULL
#Anthoxanthum.odoratum
VIM::matrixplot(select(rushdata, contains("Anthoxanthum")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Anthoxanthum")))))
#diff 2016 v 2015/2017
rushdata$Anthoxanthum.odoratum[is.na(rushdata$Anthoxanthum.odoratum)] <-
rushdata$Anthoxanthum.odorat[is.na(rushdata$Anthoxanthum.odoratum)]
rushdata$Anthoxanthum.odorat <- NULL
#Calliergonella.cuspidata
VIM::matrixplot(select(rushdata, contains("Calliergon")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Calliergon")))))
rushdata$Calliergonella.cuspidata[is.na(rushdata$Calliergonella.cuspidata)] <-
rushdata$Calliergon.cuspidatum[is.na(rushdata$Calliergonella.cuspidata)]
rushdata$Calliergon.cuspidatum <- NULL
rushdata$Calliergonella.cuspidata[is.na(rushdata$Calliergonella.cuspidata)] <-
rushdata$Calliergonella.cuspidata.1[is.na(rushdata$Calliergonella.cuspidata)]
rushdata$Calliergonella.cuspidata.1 <- NULL
rushdata$Calliergonella.cuspidata[is.na(rushdata$Calliergonella.cuspidata)] <-
rushdata$Calliergonella.cuspidata.2[is.na(rushdata$Calliergonella.cuspidata)]
rushdata$Calliergonella.cuspidata.2<- NULL
rushdata$Calliergonella.cuspidata[is.na(rushdata$Calliergonella.cuspidata)] <-
rushdata$Calliergonella.cuspidata.3[is.na(rushdata$Calliergonella.cuspidata)]
rushdata$Calliergonella.cuspidata.3 <- NULL
#Carex
VIM::matrixplot(select(rushdata, contains("Carex")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Carex")))))
#Carex.echinata
rushdata$Carex.echinata[is.na(rushdata$Carex.echinata)] <-
rushdata$Carex.sp..echinata..[is.na(rushdata$Carex.echinata)]
rushdata$Carex.sp..echinata.. <- NULL
#Carex.hostiana
rushdata$Carex.hostiana[is.na(rushdata$Carex.hostiana)] <-
rushdata$Carex.hostiana.1[is.na(rushdata$Carex.hostiana)]
rushdata$Carex.hostiana.1 <- NULL
#Carex.pilulifera
rushdata$Carex.pilulifera[is.na(rushdata$Carex.pilulifera)] <-
rushdata$Carex.pilulifera.1[is.na(rushdata$Carex.pilulifera)]
rushdata$Carex.pilulifera.1 <- NULL
#Carex.viridula
rushdata$Carex.viridula[is.na(rushdata$Carex.viridula)] <-
rushdata$Carex.viridula.1[is.na(rushdata$Carex.viridula)]
rushdata$Carex.viridula.1 <- NULL
#Cerastium.glomeratum
VIM::matrixplot(select(rushdata, contains("Cerastium")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Cerastium")))))
rushdata$Cerastium.glomeratum[is.na(rushdata$Cerastium.glomeratum)] <-
rushdata$Cerastium..glomeratum[is.na(rushdata$Cerastium.glomeratum)]
rushdata$Cerastium..glomeratum <- NULL
#Hylocomium.splendens
VIM::matrixplot(select(rushdata, contains("Hyloc")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Hyloc")))))
rushdata$Hylocomium.splendens[is.na(rushdata$Hylocomium.splendens)] <-
rushdata$Hylocumium.splendens[is.na(rushdata$Hylocomium.splendens)]
rushdata$Hylocumium.splendens <- NULL
#Lathyrus.pratensis
VIM::matrixplot(select(rushdata, contains("Lathyrus")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Lathyrus")))))
rushdata$Lathyrus.pratensis.1 <- NULL
#Luzula sp
VIM::matrixplot(select(rushdata, contains("Luzula")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Luzula")))))
rushdata$Luzula.sp[is.na(rushdata$Luzula.sp)] <-
rushdata$Luzula.sp.[is.na(rushdata$Luzula.sp)]
rushdata$Luzula.sp. <- NULL
#Lychnis.flos.cuculi
VIM::matrixplot(select(rushdata, contains("Lychnis")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Lychnis")))))
rushdata$Lychnis.flos.cuculi[is.na(rushdata$Lychnis.flos.cuculi)] <-
rushdata$Lychnis.flos.cuculi.[is.na(rushdata$Lychnis.flos.cuculi)]
rushdata$Lychnis.flos.cuculi. <- NULL
#Lysimachia.nemorum
VIM::matrixplot(select(rushdata, contains("ysimach")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("ysimach")))))
rushdata$Lysimachia.nemorum[is.na(rushdata$Lysimachia.nemorum)] <-
rushdata$lysimachia.nemorum[is.na(rushdata$Lysimachia.nemorum)]
rushdata$lysimachia.nemorum <- NULL
rushdata$Lysimachia.nemorum[is.na(rushdata$Lysimachia.nemorum)] <-
rushdata$Lysimachia.nemorum.1[is.na(rushdata$Lysimachia.nemorum)]
rushdata$Lysimachia.nemorum.1 <- NULL
#Polytrichum.formosum
VIM::matrixplot(select(rushdata, contains("formos")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("formos")))))
rushdata$Polytrichum.formosum[is.na(rushdata$Polytrichum.formosum)] <-
rushdata$Polytrichum.formosa[is.na(rushdata$Polytrichum.formosum)]
rushdata$Polytrichum.formosa <- NULL
#Potentilla.erecta
VIM::matrixplot(select(rushdata, contains("Potentilla")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Potentilla")))))
rushdata$Potentilla.erecta[is.na(rushdata$Potentilla.erecta)] <-
rushdata$Potentilla.erecta.erecta[is.na(rushdata$Potentilla.erecta)]
rushdata$Potentilla.erecta.erecta <- NULL
#Ranunculus.repens
VIM::matrixplot(select(rushdata, contains("unculus")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("unculus")))))
rushdata$Ranunculus.repens[is.na(rushdata$Ranunculus.repens)] <-
rushdata$Ranunculus..repens[is.na(rushdata$Ranunculus.repens)]
rushdata$Ranunculus..repens <- NULL
rushdata$Ranunculus.repens[is.na(rushdata$Ranunculus.repens)] <-
rushdata$Ranuunculus..repens[is.na(rushdata$Ranunculus.repens)]
rushdata$Ranuunculus..repens <- NULL
#Sphagnum.squarrosum
VIM::matrixplot(select(rushdata, contains("squarrosum")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("squarrosum")))))
rushdata$Sphagnum.squarrosum[is.na(rushdata$Sphagnum.squarrosum)] <-
rushdata$Sphagnum_squarrosum[is.na(rushdata$Sphagnum.squarrosum)]
rushdata$Sphagnum_squarrosum <- NULL
#Valeriana.dioica
VIM::matrixplot(select(rushdata, contains("Valerian")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Valerian")))))
rushdata$Valeriana.dioica[is.na(rushdata$Valeriana.dioica)] <-
rushdata$Valerian.dioica[is.na(rushdata$Valeriana.dioica)]
rushdata$Valerian.dioica <- NULL
#Veronica.serpyllifolia
VIM::matrixplot(select(rushdata, contains("serpyl")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("serpyl")))))
rushdata$Veronica.serpyllifolia[is.na(rushdata$Veronica.serpyllifolia)] <-
rushdata$Veronica.serpylifolia[is.na(rushdata$Veronica.serpyllifolia)]
rushdata$Veronica.serpylifolia <- NULL
rushdata$Veronica.serpyllifolia[is.na(rushdata$Veronica.serpyllifolia)] <-
rushdata$Veronica.serpyllifolia.1[is.na(rushdata$Veronica.serpyllifolia)]
rushdata$Veronica.serpyllifolia.1 <- NULL
# Abbreviate species names
#make species list lookup
head(names(rushdata), 20) #check where species start
#spp start at col 6
spp_list <- data.frame(names(rushdata)[6:ncol(rushdata)])
names(spp_list) <- "spp_name"
spp_list$spp_abbr <- vegan::make.cepnames(spp_list$spp_name)
spp_list$spp_name[which(duplicated(spp_list$spp_abbr))]
spp_list <- separate(spp_list, spp_name, into = c("genus", "species"),
remove = FALSE)
#rename species columns
names(rushdata)[6:ncol(rushdata)] <- spp_list$spp_abbr
#sort out classes
names(rushdata)
rushdata <- mutate_at(rushdata, funs(as.factor),
.vars = 1:5)
rushdata <- mutate_at(rushdata, funs(as.numeric), .vars = 6:ncol(rushdata))
#add zeroes
rushdata[6:ncol(rushdata)][is.na(rushdata[6:ncol(rushdata)])] <- 0
#have a look
VIM::matrixplot(rushdata, labels = names(rushdata))
# make unique id again
uid <- paste(rushdata$location, rushdata$treat_plot, rushdata$replicate,
rushdata$quad, rushdata$year, sep = ".")
dupes <- uid[which(duplicated(uid))]
uid[which(uid %in% dupes)]
#all clear
#add uid to rushdata rownames
rownames(rushdata) <- uid
# export -----
write.csv(rushdata, "data/rushdata_w.csv", row.names = TRUE)
write.csv(spp_list, "data/spp_list.csv", row.names = FALSE)
|
/scripts/rush_01.4_prep_species.R
|
no_license
|
bogsnork/rushtrial
|
R
| false
| false
| 11,227
|
r
|
library(tidyverse)
library(vegan)
rushdata <- read.csv("data/rushdata_w.csv", header = TRUE, row.names = 1)
rushdata <- rushdata %>%
mutate_at(funs(as.factor), .vars = 1:5)
# Abbreviate species names
#make species list lookup
head(names(rushdata), 20) #check where species start
#spp start at col 17
spp_list <- data.frame(names(rushdata)[6:ncol(rushdata)])
names(spp_list) <- "spp_name"
spp_list$spp_abbr <- vegan::make.cepnames(spp_list$spp_name)
spp_list$spp_name[which(duplicated(spp_list$spp_abbr))]
spp_list <- separate(spp_list, spp_name, into = c("genus", "species"),
remove = FALSE)
#eyeball these to find duplicate species and other mistakes
### sort out species -----
#Rhytidiadelphus squarrosus
VIM::matrixplot(select(rushdata, contains("Rhytidiadelphus")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Rhytidiadelphus")))))
#Rhytidiadelphus.squarrosus and Rhytidiadelphus.squarrosus.1 almost completely overlap - need to check if values are the same
#Rhytidiadelphus.squaros is different
rhytid <- select(rushdata, contains("Rhytidiadelphus"))
summary(rhytid)
#by eyeballing, seems that there are no conflicts so can merge them.
rushdata$Rhytidiadelphus.squarrosus[is.na(rushdata$Rhytidiadelphus.squarrosus)] <-
rushdata$Rhytidiadelphus.squarrosus.1[is.na(rushdata$Rhytidiadelphus.squarrosus)]
rushdata$Rhytidiadelphus.squarrosus[is.na(rushdata$Rhytidiadelphus.squarrosus)] <-
rushdata$Rhytidiadelphus.squaros[is.na(rushdata$Rhytidiadelphus.squarrosus)]
rushdata$Rhytidiadelphus.squarrosus.1 <- NULL
rushdata$Rhytidiadelphus.squaros <- NULL
#Alopecurus pratensis
VIM::matrixplot(select(rushdata, contains("Alopecurus")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Alopecurus")))))
#no overlap, although there is also not much overlap with A. geniculatus - so maybe an issue here.
rushdata$Alopecurus.pratensis[is.na(rushdata$Alopecurus.pratensis)] <-
rushdata$Alopecurus..pratensis[is.na(rushdata$Alopecurus.pratensis)]
rushdata$Alopecurus..pratensis <- NULL
#Anthoxanthum.odoratum
VIM::matrixplot(select(rushdata, contains("Anthoxanthum")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Anthoxanthum")))))
#diff 2016 v 2015/2017
rushdata$Anthoxanthum.odoratum[is.na(rushdata$Anthoxanthum.odoratum)] <-
rushdata$Anthoxanthum.odorat[is.na(rushdata$Anthoxanthum.odoratum)]
rushdata$Anthoxanthum.odorat <- NULL
#Calliergonella.cuspidata
VIM::matrixplot(select(rushdata, contains("Calliergon")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Calliergon")))))
rushdata$Calliergonella.cuspidata[is.na(rushdata$Calliergonella.cuspidata)] <-
rushdata$Calliergon.cuspidatum[is.na(rushdata$Calliergonella.cuspidata)]
rushdata$Calliergon.cuspidatum <- NULL
rushdata$Calliergonella.cuspidata[is.na(rushdata$Calliergonella.cuspidata)] <-
rushdata$Calliergonella.cuspidata.1[is.na(rushdata$Calliergonella.cuspidata)]
rushdata$Calliergonella.cuspidata.1 <- NULL
rushdata$Calliergonella.cuspidata[is.na(rushdata$Calliergonella.cuspidata)] <-
rushdata$Calliergonella.cuspidata.2[is.na(rushdata$Calliergonella.cuspidata)]
rushdata$Calliergonella.cuspidata.2<- NULL
rushdata$Calliergonella.cuspidata[is.na(rushdata$Calliergonella.cuspidata)] <-
rushdata$Calliergonella.cuspidata.3[is.na(rushdata$Calliergonella.cuspidata)]
rushdata$Calliergonella.cuspidata.3 <- NULL
#Carex
VIM::matrixplot(select(rushdata, contains("Carex")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Carex")))))
#Carex.echinata
rushdata$Carex.echinata[is.na(rushdata$Carex.echinata)] <-
rushdata$Carex.sp..echinata..[is.na(rushdata$Carex.echinata)]
rushdata$Carex.sp..echinata.. <- NULL
#Carex.hostiana
rushdata$Carex.hostiana[is.na(rushdata$Carex.hostiana)] <-
rushdata$Carex.hostiana.1[is.na(rushdata$Carex.hostiana)]
rushdata$Carex.hostiana.1 <- NULL
#Carex.pilulifera
rushdata$Carex.pilulifera[is.na(rushdata$Carex.pilulifera)] <-
rushdata$Carex.pilulifera.1[is.na(rushdata$Carex.pilulifera)]
rushdata$Carex.pilulifera.1 <- NULL
#Carex.viridula
rushdata$Carex.viridula[is.na(rushdata$Carex.viridula)] <-
rushdata$Carex.viridula.1[is.na(rushdata$Carex.viridula)]
rushdata$Carex.viridula.1 <- NULL
#Cerastium.glomeratum
VIM::matrixplot(select(rushdata, contains("Cerastium")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Cerastium")))))
rushdata$Cerastium.glomeratum[is.na(rushdata$Cerastium.glomeratum)] <-
rushdata$Cerastium..glomeratum[is.na(rushdata$Cerastium.glomeratum)]
rushdata$Cerastium..glomeratum <- NULL
#Hylocomium.splendens
VIM::matrixplot(select(rushdata, contains("Hyloc")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Hyloc")))))
rushdata$Hylocomium.splendens[is.na(rushdata$Hylocomium.splendens)] <-
rushdata$Hylocumium.splendens[is.na(rushdata$Hylocomium.splendens)]
rushdata$Hylocumium.splendens <- NULL
#Lathyrus.pratensis
VIM::matrixplot(select(rushdata, contains("Lathyrus")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Lathyrus")))))
rushdata$Lathyrus.pratensis.1 <- NULL
#Luzula sp
VIM::matrixplot(select(rushdata, contains("Luzula")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Luzula")))))
rushdata$Luzula.sp[is.na(rushdata$Luzula.sp)] <-
rushdata$Luzula.sp.[is.na(rushdata$Luzula.sp)]
rushdata$Luzula.sp. <- NULL
#Lychnis.flos.cuculi
VIM::matrixplot(select(rushdata, contains("Lychnis")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Lychnis")))))
rushdata$Lychnis.flos.cuculi[is.na(rushdata$Lychnis.flos.cuculi)] <-
rushdata$Lychnis.flos.cuculi.[is.na(rushdata$Lychnis.flos.cuculi)]
rushdata$Lychnis.flos.cuculi. <- NULL
#Lysimachia.nemorum
VIM::matrixplot(select(rushdata, contains("ysimach")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("ysimach")))))
rushdata$Lysimachia.nemorum[is.na(rushdata$Lysimachia.nemorum)] <-
rushdata$lysimachia.nemorum[is.na(rushdata$Lysimachia.nemorum)]
rushdata$lysimachia.nemorum <- NULL
rushdata$Lysimachia.nemorum[is.na(rushdata$Lysimachia.nemorum)] <-
rushdata$Lysimachia.nemorum.1[is.na(rushdata$Lysimachia.nemorum)]
rushdata$Lysimachia.nemorum.1 <- NULL
#Polytrichum.formosum
VIM::matrixplot(select(rushdata, contains("formos")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("formos")))))
rushdata$Polytrichum.formosum[is.na(rushdata$Polytrichum.formosum)] <-
rushdata$Polytrichum.formosa[is.na(rushdata$Polytrichum.formosum)]
rushdata$Polytrichum.formosa <- NULL
#Potentilla.erecta
VIM::matrixplot(select(rushdata, contains("Potentilla")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Potentilla")))))
rushdata$Potentilla.erecta[is.na(rushdata$Potentilla.erecta)] <-
rushdata$Potentilla.erecta.erecta[is.na(rushdata$Potentilla.erecta)]
rushdata$Potentilla.erecta.erecta <- NULL
#Ranunculus.repens
VIM::matrixplot(select(rushdata, contains("unculus")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("unculus")))))
rushdata$Ranunculus.repens[is.na(rushdata$Ranunculus.repens)] <-
rushdata$Ranunculus..repens[is.na(rushdata$Ranunculus.repens)]
rushdata$Ranunculus..repens <- NULL
rushdata$Ranunculus.repens[is.na(rushdata$Ranunculus.repens)] <-
rushdata$Ranuunculus..repens[is.na(rushdata$Ranunculus.repens)]
rushdata$Ranuunculus..repens <- NULL
#Sphagnum.squarrosum
VIM::matrixplot(select(rushdata, contains("squarrosum")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("squarrosum")))))
rushdata$Sphagnum.squarrosum[is.na(rushdata$Sphagnum.squarrosum)] <-
rushdata$Sphagnum_squarrosum[is.na(rushdata$Sphagnum.squarrosum)]
rushdata$Sphagnum_squarrosum <- NULL
#Valeriana.dioica
VIM::matrixplot(select(rushdata, contains("Valerian")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("Valerian")))))
rushdata$Valeriana.dioica[is.na(rushdata$Valeriana.dioica)] <-
rushdata$Valerian.dioica[is.na(rushdata$Valeriana.dioica)]
rushdata$Valerian.dioica <- NULL
#Veronica.serpyllifolia
VIM::matrixplot(select(rushdata, contains("serpyl")),
labels = vegan::make.cepnames(names(select(rushdata,
contains("serpyl")))))
rushdata$Veronica.serpyllifolia[is.na(rushdata$Veronica.serpyllifolia)] <-
rushdata$Veronica.serpylifolia[is.na(rushdata$Veronica.serpyllifolia)]
rushdata$Veronica.serpylifolia <- NULL
rushdata$Veronica.serpyllifolia[is.na(rushdata$Veronica.serpyllifolia)] <-
rushdata$Veronica.serpyllifolia.1[is.na(rushdata$Veronica.serpyllifolia)]
rushdata$Veronica.serpyllifolia.1 <- NULL
# Abbreviate species names
#make species list lookup
head(names(rushdata), 20) #check where species start
#spp start at col 6
spp_list <- data.frame(names(rushdata)[6:ncol(rushdata)])
names(spp_list) <- "spp_name"
spp_list$spp_abbr <- vegan::make.cepnames(spp_list$spp_name)
spp_list$spp_name[which(duplicated(spp_list$spp_abbr))]
spp_list <- separate(spp_list, spp_name, into = c("genus", "species"),
remove = FALSE)
#rename species columns
names(rushdata)[6:ncol(rushdata)] <- spp_list$spp_abbr
#sort out classes
names(rushdata)
rushdata <- mutate_at(rushdata, funs(as.factor),
.vars = 1:5)
rushdata <- mutate_at(rushdata, funs(as.numeric), .vars = 6:ncol(rushdata))
#add zeroes
rushdata[6:ncol(rushdata)][is.na(rushdata[6:ncol(rushdata)])] <- 0
#have a look
VIM::matrixplot(rushdata, labels = names(rushdata))
# make unique id again
uid <- paste(rushdata$location, rushdata$treat_plot, rushdata$replicate,
rushdata$quad, rushdata$year, sep = ".")
dupes <- uid[which(duplicated(uid))]
uid[which(uid %in% dupes)]
#all clear
#add uid to rushdata rownames
rownames(rushdata) <- uid
# export -----
write.csv(rushdata, "data/rushdata_w.csv", row.names = TRUE)
write.csv(spp_list, "data/spp_list.csv", row.names = FALSE)
|
#### load required libraries ####
library(ggplot2)
library(dplyr)
#### load data ####
df <- read.csv("Metadata.csv",nrows=77)
#of na.exclude(df) verwijderd elke rij met een NA
# rowSums(if.na(df)) == ncol(df)
#na.omit() rijen die NA bevatten, worden weggefilterd maar regressiefunctie weet nog dat je die gefilterd hebt
# blank.lines.skip = TRUE als argument bij read.csv .. werkt soms, hangt er van af (stringsAsFactors...)
# the function of the last loaded library is used: eg. function filter from dplyr is used
# and not from stats
# if you want to use the original functions: two times :: ,eg. stats::filter
mean(df[df$Reactor.phase == "Control", "ph"])
#calc mean on dataframe df, subset : df[rows,columns] ,select all rows from column ph
# where reactor.phase is control
levels(df$Reactor.phase)
#### select ####
physicochem <- select(df,ph,temp,Conductivity)
# piping symbol short cut: ctrl shift m, now you can use type completion
physicochem <- df %>% select(ph,temp,Conductivity)
physicochem.control <- df %>%
filter(Reactor.phase == "Control") %>%
select(ph,temp,Conductivity)
physicochem.control
#challenge: select only the diversity parameter for reactor phase startup
diversity <- df %>%
filter(Reactor.phase == "Startup") %>%
select(Diversity...D0,Diversity...D1,Diversity...D2)
#korter:
diversity2 <- df %>%
filter(Reactor.phase == "Startup") %>%
select(contains("Diversity"))
#### grouping by ####
meanph <- df %>%
group_by(Reactor.phase) %>%
summarise(mean.ph = mean(ph),
mean.d2 = mean(Diversity...D2),
sd.ph = sd(ph))
#challenge
#generate a summary for reactor cycle 2 and add standard deviation and
#log10 transformed cell count
df$Reactor.cycle <- as.factor(df$Reactor.cycle)
d2 <- df %>%
filter(Reactor.cycle == "2") %>%
mutate(condratio = Conductivity/temp) %>%
summarise(sd.d2 = sd(Diversity...D2),
avelog10.celldens = mean(log10(Cell.density..cells.mL.)),
mean.condrat = mean(condratio))
df$Reactor.cycle <- as.integer(df$Reactor.cycle)
#### join data sets ####
physicochem <- df %>%
select(sample_title,temp,ph,Conductivity)
diversity <- df %>%
select(sample_title,contains("Diversity"))
meanph <- df %>%
filter(Reactor.cycle == 2) %>%
group_by(Reactor.phase) %>%
mutate(condratio=Conductivity/temp) %>%
summarise(mean.ph = mean(ph),
mean.d2 = mean(Diversity...D2),
sd.ph = sd(ph),
sd.d2 = sd(Diversity...D2),
avelog10.celldens = mean(log10(Cell.density..cells.mL.)),
mean.condrat = mean(condratio))
physicodiversity <- dplyr::full_join(physicochem,diversity,by = "sample_title")
physicodiversity
#### combining dplyr and ggplot2 ####
p1 <- ggplot(data = df, aes(x = Timepoint,y = Cell.density..cells.mL., fill = Cell.density..cells.mL.)) +
geom_point(shape = 21, size = 4)
df.2 <- df %>% filter(Reactor.cycle==2)
p2 <- df %>% filter(Reactor.cycle==2) %>%
ggplot(aes(x = Timepoint,y = Cell.density..cells.mL., fill = Cell.density..cells.mL.)) +geom_point(shape = 21, size = 4)
p2 + scale_y_log10()
|
/dplyrexercises.R
|
no_license
|
KlaasVanderpoorten/SWC_test
|
R
| false
| false
| 3,114
|
r
|
#### load required libraries ####
library(ggplot2)
library(dplyr)
#### load data ####
df <- read.csv("Metadata.csv",nrows=77)
#of na.exclude(df) verwijderd elke rij met een NA
# rowSums(if.na(df)) == ncol(df)
#na.omit() rijen die NA bevatten, worden weggefilterd maar regressiefunctie weet nog dat je die gefilterd hebt
# blank.lines.skip = TRUE als argument bij read.csv .. werkt soms, hangt er van af (stringsAsFactors...)
# the function of the last loaded library is used: eg. function filter from dplyr is used
# and not from stats
# if you want to use the original functions: two times :: ,eg. stats::filter
mean(df[df$Reactor.phase == "Control", "ph"])
#calc mean on dataframe df, subset : df[rows,columns] ,select all rows from column ph
# where reactor.phase is control
levels(df$Reactor.phase)
#### select ####
physicochem <- select(df,ph,temp,Conductivity)
# piping symbol short cut: ctrl shift m, now you can use type completion
physicochem <- df %>% select(ph,temp,Conductivity)
physicochem.control <- df %>%
filter(Reactor.phase == "Control") %>%
select(ph,temp,Conductivity)
physicochem.control
#challenge: select only the diversity parameter for reactor phase startup
diversity <- df %>%
filter(Reactor.phase == "Startup") %>%
select(Diversity...D0,Diversity...D1,Diversity...D2)
#korter:
diversity2 <- df %>%
filter(Reactor.phase == "Startup") %>%
select(contains("Diversity"))
#### grouping by ####
meanph <- df %>%
group_by(Reactor.phase) %>%
summarise(mean.ph = mean(ph),
mean.d2 = mean(Diversity...D2),
sd.ph = sd(ph))
#challenge
#generate a summary for reactor cycle 2 and add standard deviation and
#log10 transformed cell count
df$Reactor.cycle <- as.factor(df$Reactor.cycle)
d2 <- df %>%
filter(Reactor.cycle == "2") %>%
mutate(condratio = Conductivity/temp) %>%
summarise(sd.d2 = sd(Diversity...D2),
avelog10.celldens = mean(log10(Cell.density..cells.mL.)),
mean.condrat = mean(condratio))
df$Reactor.cycle <- as.integer(df$Reactor.cycle)
#### join data sets ####
physicochem <- df %>%
select(sample_title,temp,ph,Conductivity)
diversity <- df %>%
select(sample_title,contains("Diversity"))
meanph <- df %>%
filter(Reactor.cycle == 2) %>%
group_by(Reactor.phase) %>%
mutate(condratio=Conductivity/temp) %>%
summarise(mean.ph = mean(ph),
mean.d2 = mean(Diversity...D2),
sd.ph = sd(ph),
sd.d2 = sd(Diversity...D2),
avelog10.celldens = mean(log10(Cell.density..cells.mL.)),
mean.condrat = mean(condratio))
physicodiversity <- dplyr::full_join(physicochem,diversity,by = "sample_title")
physicodiversity
#### combining dplyr and ggplot2 ####
p1 <- ggplot(data = df, aes(x = Timepoint,y = Cell.density..cells.mL., fill = Cell.density..cells.mL.)) +
geom_point(shape = 21, size = 4)
df.2 <- df %>% filter(Reactor.cycle==2)
p2 <- df %>% filter(Reactor.cycle==2) %>%
ggplot(aes(x = Timepoint,y = Cell.density..cells.mL., fill = Cell.density..cells.mL.)) +geom_point(shape = 21, size = 4)
p2 + scale_y_log10()
|
foo =
function()
{
"B"
}
|
/B/R/foo.R
|
no_license
|
dsidavis/RPackagesWorkshop
|
R
| false
| false
| 28
|
r
|
foo =
function()
{
"B"
}
|
#Loading required libraries for cleaning and managing the data
library(dplyr)
#Downloading the data. First we check if we have the file -
#-already in our working directory. If not we download it.
if(!file.exists("UCI HAR Dataset")){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, "UCI HAR Dataset")
unzip("UCI HAR Dataset")
features <- read.table("UCI HAR Dataset/features.txt", col.names = c("n","functions"))
activities <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("code", "activity"))
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$functions)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$functions)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code")
}
if(file.exists("UCI HAR Dataset")){
features <- read.table("UCI HAR Dataset/features.txt", col.names = c("n","functions"))
activities <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("code", "activity"))
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$functions)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$functions)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code")
}
#1) Merges the training and the test sets to create one data set.
#Merged from subject, y, x. In that respective order the M stands for Merged.
MSubject <- rbind(subject_train, subject_test)
MY <- rbind(y_train, y_test)
MX <- rbind(x_train, x_test)
MData <- cbind(MSubject, MY, MX)
#2) Extracts only the measurements on the mean and standard deviation for each measurement.
# We use the select() function together with regular expressions.
MData2 <- select(MData, subject, code, contains("mean"), contains("std"))
#3) Uses descriptive activity names to name the activities in the data set
# Here we merge the data from activities to change the numbers to their respective activitie
MData3 <- MData2
MData3$code <- activities[MData2$code, 2]
#4) Appropriately labels the data set with descriptive variable names.
# Here we use the function gsub() together with Regular expressions
# To find out the labels in the columns and rename them with Descriptive
# Variable names
MData4 <- MData3
names(MData4)[2] = "activity"
names(MData4)<-gsub("Acc", "Accelerometer", names(MData4))
names(MData4)<-gsub("Gyro", "Gyroscope", names(MData4))
names(MData4)<-gsub("BodyBody", "Body", names(MData4))
names(MData4)<-gsub("Mag", "Magnitude", names(MData4))
names(MData4)<-gsub("^t", "Time", names(MData4))
names(MData4)<-gsub("^f", "Frequency", names(MData4))
names(MData4)<-gsub("tBody", "TimeBody", names(MData4))
names(MData4)<-gsub("-mean()", "Mean", names(MData4), ignore.case = TRUE)
names(MData4)<-gsub("-std()", "STD", names(MData4), ignore.case = TRUE)
names(MData4)<-gsub("-freq()", "Frequency", names(MData4), ignore.case = TRUE)
names(MData4)<-gsub("angle", "Angle", names(MData4))
names(MData4)<-gsub("gravity", "Gravity", names(MData4))
#5) From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
# Here we use the functions group_by() and summarize_all().
MData5 <- MData4
MData5 <- group_by(MData5, subject, activity)
MData5 <- summarize_all(MData5, funs(mean))
write.table(MData5,"TidyData.txt", row.names=F)
str(MData5)
|
/run_analysis.R
|
no_license
|
EPriske/Getting-and-Cleaning-Data-Course-Project
|
R
| false
| false
| 4,072
|
r
|
#Loading required libraries for cleaning and managing the data
library(dplyr)
#Downloading the data. First we check if we have the file -
#-already in our working directory. If not we download it.
if(!file.exists("UCI HAR Dataset")){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, "UCI HAR Dataset")
unzip("UCI HAR Dataset")
features <- read.table("UCI HAR Dataset/features.txt", col.names = c("n","functions"))
activities <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("code", "activity"))
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$functions)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$functions)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code")
}
if(file.exists("UCI HAR Dataset")){
features <- read.table("UCI HAR Dataset/features.txt", col.names = c("n","functions"))
activities <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("code", "activity"))
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$functions)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$functions)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code")
}
#1) Merges the training and the test sets to create one data set.
#Merged from subject, y, x. In that respective order the M stands for Merged.
MSubject <- rbind(subject_train, subject_test)
MY <- rbind(y_train, y_test)
MX <- rbind(x_train, x_test)
MData <- cbind(MSubject, MY, MX)
#2) Extracts only the measurements on the mean and standard deviation for each measurement.
# We use the select() function together with regular expressions.
MData2 <- select(MData, subject, code, contains("mean"), contains("std"))
#3) Uses descriptive activity names to name the activities in the data set
# Here we merge the data from activities to change the numbers to their respective activitie
MData3 <- MData2
MData3$code <- activities[MData2$code, 2]
#4) Appropriately labels the data set with descriptive variable names.
# Here we use the function gsub() together with Regular expressions
# To find out the labels in the columns and rename them with Descriptive
# Variable names
MData4 <- MData3
names(MData4)[2] = "activity"
names(MData4)<-gsub("Acc", "Accelerometer", names(MData4))
names(MData4)<-gsub("Gyro", "Gyroscope", names(MData4))
names(MData4)<-gsub("BodyBody", "Body", names(MData4))
names(MData4)<-gsub("Mag", "Magnitude", names(MData4))
names(MData4)<-gsub("^t", "Time", names(MData4))
names(MData4)<-gsub("^f", "Frequency", names(MData4))
names(MData4)<-gsub("tBody", "TimeBody", names(MData4))
names(MData4)<-gsub("-mean()", "Mean", names(MData4), ignore.case = TRUE)
names(MData4)<-gsub("-std()", "STD", names(MData4), ignore.case = TRUE)
names(MData4)<-gsub("-freq()", "Frequency", names(MData4), ignore.case = TRUE)
names(MData4)<-gsub("angle", "Angle", names(MData4))
names(MData4)<-gsub("gravity", "Gravity", names(MData4))
#5) From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
# Here we use the functions group_by() and summarize_all().
MData5 <- MData4
MData5 <- group_by(MData5, subject, activity)
MData5 <- summarize_all(MData5, funs(mean))
write.table(MData5,"TidyData.txt", row.names=F)
str(MData5)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 4636
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 4635
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 4635
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt21_79_91.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1447
c no.of clauses 4636
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 4635
c
c QBFLIB/Basler/terminator/stmt21_79_91.qdimacs 1447 4636 E1 [1] 0 105 1341 4635 RED
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Basler/terminator/stmt21_79_91/stmt21_79_91.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 709
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 4636
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 4635
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 4635
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt21_79_91.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1447
c no.of clauses 4636
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 4635
c
c QBFLIB/Basler/terminator/stmt21_79_91.qdimacs 1447 4636 E1 [1] 0 105 1341 4635 RED
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_sections.R
\name{get_sections}
\alias{get_sections}
\title{A function that returns sections delimited by local maxima with height > hmin.}
\usage{
get_sections(sr, hmin, hmax)
}
\arguments{
\item{sr}{tibble with columns l and z describing first profile}
\item{hmin}{the minimum height (relative to local minimum of z) of levees-defining local maxima}
\item{hmax}{the maximum height (relative to minimum of z) of a channel-defining local minima}
}
\value{
hmin
}
\description{
A function that returns sections delimited by local maxima with height > hmin.
}
\examples{
data(s1)
get_sections(s1, hmin=1,hmax=5)
}
|
/man/get_sections.Rd
|
no_license
|
lvaudor/riverbed
|
R
| false
| true
| 695
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_sections.R
\name{get_sections}
\alias{get_sections}
\title{A function that returns sections delimited by local maxima with height > hmin.}
\usage{
get_sections(sr, hmin, hmax)
}
\arguments{
\item{sr}{tibble with columns l and z describing first profile}
\item{hmin}{the minimum height (relative to local minimum of z) of levees-defining local maxima}
\item{hmax}{the maximum height (relative to minimum of z) of a channel-defining local minima}
}
\value{
hmin
}
\description{
A function that returns sections delimited by local maxima with height > hmin.
}
\examples{
data(s1)
get_sections(s1, hmin=1,hmax=5)
}
|
trace_coverage_log <- function(eventlog,
threshold = NULL) {
stop_eventlog(eventlog)
if(is.null(threshold)) {
warning("Threshold defaulted to 0.8. Use `threshold = x` to adjust this")
threshold = 0.8
}
tra <- traces(eventlog)
tr <- tra %>% arrange(desc(relative_frequency)) %>% group_by(relative_frequency) %>% summarize(s = sum(relative_frequency)) %>% arrange(desc(relative_frequency))
tr$c <- cumsum(tr$s)
if(tr$c[1] >= threshold){
tr <- tr[1,]
}
else if(threshold == 1)
tr <- tr[nrow(tr),]
else {
stop = FALSE
for(i in 2:nrow(tr)){
if(!stop && tr$c[i-1] <= threshold && tr$c[i] >= threshold){
tr <- tr[(i-1):i,]
stop = TRUE
}
}
}
tr$cnt <- 0:0
for(i in 1:nrow(tr))
tr$cnt[i] <- nrow(filter(tra, relative_frequency >= tr$relative_frequency[i] ))
tr <- tr %>% select(cnt,c)
colnames(tr) <- c("number_of_traces","coverage")
tr <- tbl_df(tr)
return(tr)
}
|
/edeaR/R/trace_coverage_log.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 951
|
r
|
trace_coverage_log <- function(eventlog,
threshold = NULL) {
stop_eventlog(eventlog)
if(is.null(threshold)) {
warning("Threshold defaulted to 0.8. Use `threshold = x` to adjust this")
threshold = 0.8
}
tra <- traces(eventlog)
tr <- tra %>% arrange(desc(relative_frequency)) %>% group_by(relative_frequency) %>% summarize(s = sum(relative_frequency)) %>% arrange(desc(relative_frequency))
tr$c <- cumsum(tr$s)
if(tr$c[1] >= threshold){
tr <- tr[1,]
}
else if(threshold == 1)
tr <- tr[nrow(tr),]
else {
stop = FALSE
for(i in 2:nrow(tr)){
if(!stop && tr$c[i-1] <= threshold && tr$c[i] >= threshold){
tr <- tr[(i-1):i,]
stop = TRUE
}
}
}
tr$cnt <- 0:0
for(i in 1:nrow(tr))
tr$cnt[i] <- nrow(filter(tra, relative_frequency >= tr$relative_frequency[i] ))
tr <- tr %>% select(cnt,c)
colnames(tr) <- c("number_of_traces","coverage")
tr <- tbl_df(tr)
return(tr)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/average_vegpar.R
\name{get_basin_avg}
\alias{get_basin_avg}
\title{Get Basin Average}
\usage{
get_basin_avg(r)
}
\arguments{
\item{r}{Input raster stack of monthly average parameter values}
}
\description{
Calculates the basin average values of a distributed parameter, given monthly maps
}
\details{
This function is not limited to vegetation parameters. t's just a function for calculating the average value of a raster (stack).
}
\examples{
LAI_wt_avg <- vegpar_wt_avg(LAI, CV)
spatial_avg_LAI <- get_basin_avg(LAI_wt_avg)
}
|
/man/get_basin_avg.Rd
|
no_license
|
jschap1/vegpar
|
R
| false
| true
| 606
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/average_vegpar.R
\name{get_basin_avg}
\alias{get_basin_avg}
\title{Get Basin Average}
\usage{
get_basin_avg(r)
}
\arguments{
\item{r}{Input raster stack of monthly average parameter values}
}
\description{
Calculates the basin average values of a distributed parameter, given monthly maps
}
\details{
This function is not limited to vegetation parameters. t's just a function for calculating the average value of a raster (stack).
}
\examples{
LAI_wt_avg <- vegpar_wt_avg(LAI, CV)
spatial_avg_LAI <- get_basin_avg(LAI_wt_avg)
}
|
## MODIFYING THE PREDICT.DIRICHLETREGMODEL FUNCTION TO MAKE IT PREDICT FROM A NEW SET OF COVARIATES
## USED IN THE EN MATRIX REGRESSION PROCESS
## BASE FUNCTION THAT I'VE CHANGED IS FROM HERE: https://github.com/cran/DirichletReg/blob/master/R/predict.DirichletRegModel.R
predict.DirichletRegModel.newcoef <- function (object, newdata, newcoef, mu = TRUE, alpha = FALSE, phi = FALSE,
...)
{
if (missing(newdata))
return(fitted(object, mu, alpha, phi))
repar <- object$parametrization == "alternative"
dims <- ncol(object$Y)
model_formula <- object$mf_formula
model_formula$formula <- as.Formula(deparse(model_formula$formula))
model_formula$data <- as.name("newdata")
model_formula$lhs <- 0
if (repar && (length(model_formula$formula)[2L] == 1L)) {
model_formula$formula <- as.Formula(paste0(deparse(model_formula$formula),
" | 1"))
}
if (!repar && (length(model_formula$formula)[2L] == 1L)) {
model_formula$formula <- as.Formula(paste0(deparse(model_formula$formula),
" | ", paste0(rep(deparse(model_formula$formula[[3]]),
dims - 1L), collapse = " | ")))
}
model_formula[["drop.unused.levels"]] <- FALSE
mf <- eval(model_formula)
if (!repar) {
X <- lapply(seq_len(dims), function(i) {
model.matrix(Formula(terms(model_formula$formula,
data = newdata, rhs = i)), mf)
})
Z <- NULL
}
else {
X <- model.matrix(Formula(terms(model_formula$formula,
data = newdata, rhs = 1L)), mf)
Z <- model.matrix(Formula(terms(model_formula$formula,
data = newdata, rhs = 2L)), mf)
}
cc <- newcoef
if (repar) {
base <- object$base
cc[[1L]] <- split(unlist(cc[[1L]]), factor(seq_len(dims))[rep(seq_len(dims)[-base],
each = ncol(X))])
cc[[2L]] <- unlist(cc[[2L]])
ETA <- matrix(0, nrow = nrow(newdata), ncol = dims)
for (i in seq_len(dims)[-base]) {
ETA[, i] <- X %*% cc[[1]][[i]]
}
MU <- exp(ETA)/rowSums(exp(ETA))
PHI <- exp(Z %*% cc[[2L]])
ALPHA <- MU * as.numeric(PHI)
}
else {
ALPHA <- matrix(0, nrow = nrow(newdata), ncol = dims)
for (i in seq_len(dims)) {
ALPHA[, i] <- exp(X[[i]] %*% cc[[i]])
}
PHI <- rowSums(ALPHA)
MU <- ALPHA/PHI
}
if (!any(mu || alpha || phi))
stop("Either mu, alpha or phi has to be requested.")
if (sum(mu + alpha + phi) == 1) {
if (mu)
return(MU)
if (alpha)
return(ALPHA)
if (phi)
return(PHI)
}
else {
res <- list()
if (mu)
res[["mu"]] <- MU
if (alpha)
res[["alpha"]] <- ALPHA
if (phi)
res[["phi"]] <- PHI
return(res)
}
}
|
/gbd_2017/nonfatal_code/nonfatal_injuries/prep/en_matrices/dirichlet_predict_fun.R
|
no_license
|
Nermin-Ghith/ihme-modeling
|
R
| false
| false
| 3,003
|
r
|
## MODIFYING THE PREDICT.DIRICHLETREGMODEL FUNCTION TO MAKE IT PREDICT FROM A NEW SET OF COVARIATES
## USED IN THE EN MATRIX REGRESSION PROCESS
## BASE FUNCTION THAT I'VE CHANGED IS FROM HERE: https://github.com/cran/DirichletReg/blob/master/R/predict.DirichletRegModel.R
predict.DirichletRegModel.newcoef <- function (object, newdata, newcoef, mu = TRUE, alpha = FALSE, phi = FALSE,
...)
{
if (missing(newdata))
return(fitted(object, mu, alpha, phi))
repar <- object$parametrization == "alternative"
dims <- ncol(object$Y)
model_formula <- object$mf_formula
model_formula$formula <- as.Formula(deparse(model_formula$formula))
model_formula$data <- as.name("newdata")
model_formula$lhs <- 0
if (repar && (length(model_formula$formula)[2L] == 1L)) {
model_formula$formula <- as.Formula(paste0(deparse(model_formula$formula),
" | 1"))
}
if (!repar && (length(model_formula$formula)[2L] == 1L)) {
model_formula$formula <- as.Formula(paste0(deparse(model_formula$formula),
" | ", paste0(rep(deparse(model_formula$formula[[3]]),
dims - 1L), collapse = " | ")))
}
model_formula[["drop.unused.levels"]] <- FALSE
mf <- eval(model_formula)
if (!repar) {
X <- lapply(seq_len(dims), function(i) {
model.matrix(Formula(terms(model_formula$formula,
data = newdata, rhs = i)), mf)
})
Z <- NULL
}
else {
X <- model.matrix(Formula(terms(model_formula$formula,
data = newdata, rhs = 1L)), mf)
Z <- model.matrix(Formula(terms(model_formula$formula,
data = newdata, rhs = 2L)), mf)
}
cc <- newcoef
if (repar) {
base <- object$base
cc[[1L]] <- split(unlist(cc[[1L]]), factor(seq_len(dims))[rep(seq_len(dims)[-base],
each = ncol(X))])
cc[[2L]] <- unlist(cc[[2L]])
ETA <- matrix(0, nrow = nrow(newdata), ncol = dims)
for (i in seq_len(dims)[-base]) {
ETA[, i] <- X %*% cc[[1]][[i]]
}
MU <- exp(ETA)/rowSums(exp(ETA))
PHI <- exp(Z %*% cc[[2L]])
ALPHA <- MU * as.numeric(PHI)
}
else {
ALPHA <- matrix(0, nrow = nrow(newdata), ncol = dims)
for (i in seq_len(dims)) {
ALPHA[, i] <- exp(X[[i]] %*% cc[[i]])
}
PHI <- rowSums(ALPHA)
MU <- ALPHA/PHI
}
if (!any(mu || alpha || phi))
stop("Either mu, alpha or phi has to be requested.")
if (sum(mu + alpha + phi) == 1) {
if (mu)
return(MU)
if (alpha)
return(ALPHA)
if (phi)
return(PHI)
}
else {
res <- list()
if (mu)
res[["mu"]] <- MU
if (alpha)
res[["alpha"]] <- ALPHA
if (phi)
res[["phi"]] <- PHI
return(res)
}
}
|
is.3dpoints <- function (x)
{
is <- FALSE
if (is.array(x))
if (length(dim(x)) == 2)
if (dim(x)[2] >= 3)
is <- TRUE
is
}
|
/stpp/R/is.3dpoints.R
|
no_license
|
albrizre/spatstat.revdep
|
R
| false
| false
| 176
|
r
|
is.3dpoints <- function (x)
{
is <- FALSE
if (is.array(x))
if (length(dim(x)) == 2)
if (dim(x)[2] >= 3)
is <- TRUE
is
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dates.R
\name{monthly}
\alias{monthly}
\title{Add a monthly variable to group by.}
\usage{
monthly(df, col)
}
\arguments{
\item{df}{Data frame.}
\item{col}{Column name of Date or POSIXct type.}
}
\value{
Data frame with a new column \code{mo}.
}
\description{
Add a monthly variable to group by.
}
|
/man/monthly.Rd
|
no_license
|
pdrhlik/pdtools
|
R
| false
| true
| 396
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dates.R
\name{monthly}
\alias{monthly}
\title{Add a monthly variable to group by.}
\usage{
monthly(df, col)
}
\arguments{
\item{df}{Data frame.}
\item{col}{Column name of Date or POSIXct type.}
}
\value{
Data frame with a new column \code{mo}.
}
\description{
Add a monthly variable to group by.
}
|
testlist <- list(a = -1L, b = -1342221858L, x = c(-55794L, 134898732L, -768856879L, -741081336L, 16751333L, -16187640L, -9605779L, 1835887981L, -16187640L, 1090519031L, -16187640L, -1L, -248L, -16252929L, -16742L, -1L, -1414852865L, 134898732L, -766375926L, -2133718785L, -29044224L, -134217902L, 134921257L, NA, -1138161655L, 65535L, -20481L, -1097138177L, -2945528L, 174337234L, 741134803L, -738260992L, 150931503L, 1499027801L, 1499027801L, 1499027801L, 1499027801L, 134220031L, 1499027801L, 1499027801L, 1499027801L, 235407972L, 751971372L, -774646785L, 134220031L, 134217916L, -450454785L, -1L, -1703937L, -14221414L, -436207617L, 0L, 0L, 8388608L, 89L, 1499027801L, 1493172224L, 8L, 62L, 0L, 0L, 0L, 0L, 16973824L, -1L, -1L, -5566876L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610387044-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 813
|
r
|
testlist <- list(a = -1L, b = -1342221858L, x = c(-55794L, 134898732L, -768856879L, -741081336L, 16751333L, -16187640L, -9605779L, 1835887981L, -16187640L, 1090519031L, -16187640L, -1L, -248L, -16252929L, -16742L, -1L, -1414852865L, 134898732L, -766375926L, -2133718785L, -29044224L, -134217902L, 134921257L, NA, -1138161655L, 65535L, -20481L, -1097138177L, -2945528L, 174337234L, 741134803L, -738260992L, 150931503L, 1499027801L, 1499027801L, 1499027801L, 1499027801L, 134220031L, 1499027801L, 1499027801L, 1499027801L, 235407972L, 751971372L, -774646785L, 134220031L, 134217916L, -450454785L, -1L, -1703937L, -14221414L, -436207617L, 0L, 0L, 8388608L, 89L, 1499027801L, 1493172224L, 8L, 62L, 0L, 0L, 0L, 0L, 16973824L, -1L, -1L, -5566876L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
x<-rnorm(100)
y<-rnorm(100)
plot(x,y)
#nikhiltesting
#test
|
/Nikhil Trial/Nikhil's_Code_Test.R
|
no_license
|
wcornwell/bigdatatest
|
R
| false
| false
| 59
|
r
|
x<-rnorm(100)
y<-rnorm(100)
plot(x,y)
#nikhiltesting
#test
|
source("template_function_count_table_corrected.R")
rm(count_table_corrected)
source("template_function_metadata_fixed.R")
rm(metadata_fixed)
source("template_function_remove_ercc.R")
rm(remove_ercc)
source("template_function_lymph_node_filtered.R")
rm(lymph_node_filtered)
source("template_function_DEG_lymph.R")
rm(DEG_lymph)
source("template_function_ld2_go_pathway_up.R")
rm(ld2_go_pathway_up)
source("template_function_ld3_go_pathway_up.R")
rm(ld3_go_pathway_up)
source("template_function_ld2_go_collect.R")
rm(ld2_go_collect)
source("template_function_ld3_go_collect.R")
rm(ld3_go_collect)
source("template_function_FIGS2D_S4E_L_leukocyte_migration_new.R")
rm(FIGS2D_S4E_L_leukocyte_migration_new)
source("template_function_FIGS2E_S4F_L_tcell_activation_new.R")
rm(FIGS2E_S4F_L_tcell_activation_new)
|
/Workbook_2/run_pipeline_R.R
|
no_license
|
NCI-VB/pavlakis_TNBC_hetIL-15
|
R
| false
| false
| 806
|
r
|
source("template_function_count_table_corrected.R")
rm(count_table_corrected)
source("template_function_metadata_fixed.R")
rm(metadata_fixed)
source("template_function_remove_ercc.R")
rm(remove_ercc)
source("template_function_lymph_node_filtered.R")
rm(lymph_node_filtered)
source("template_function_DEG_lymph.R")
rm(DEG_lymph)
source("template_function_ld2_go_pathway_up.R")
rm(ld2_go_pathway_up)
source("template_function_ld3_go_pathway_up.R")
rm(ld3_go_pathway_up)
source("template_function_ld2_go_collect.R")
rm(ld2_go_collect)
source("template_function_ld3_go_collect.R")
rm(ld3_go_collect)
source("template_function_FIGS2D_S4E_L_leukocyte_migration_new.R")
rm(FIGS2D_S4E_L_leukocyte_migration_new)
source("template_function_FIGS2E_S4F_L_tcell_activation_new.R")
rm(FIGS2E_S4F_L_tcell_activation_new)
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinydashboard)
getallMenu <- function(){
#extract a df of all menu items
restaurantmenu <- data.frame(menuitem = c("Choose Something","fish","chicken","beef","None"),calories = c(NA,100,200,300,0), hungereffect = c(NA,1,1,1,1))
restaurantmenu
}
getMenu <- function(){
# Extract random menu from database
# output will be dataframe containing menuitem,calories,hungereffect and append a none option and null option
#test menu
restaurantmenu <- data.frame(menuitem = c("Choose Something","fish","chicken","beef","None"),calories = c(NA,100,200,300,0), hungereffect = c(NA,1,1,1,1))
restaurantmenu
}
starvingModal <- function(){
modalDialog(
title = "You are starving. You have no choice but to stuff your face with food.",
footer = tagList(
actionButton("starvingok", "OK")
)
)
}
menuModal <- function(failed = FALSE){
modalDialog(
title = "Menu",
selectInput("menuitem","Select Menu item:",getMenu()[,1]),
if (failed)
div(tags$b("Please choose an item", style = "color: red;")),
footer = tagList(
actionButton("menuok", "OK")
)
)
}
checktile <- function(row,col){
tile <- -1
playerpos <- paste(row,col)
if (playerpos %in% eventlist) {tile <- 0}
#tile 0 means event, tile 1 means restaurant
tile
}
getSingleStepLocation <- function(gridrow,gridcol){
# We assume that the direction is clockwise
newgridrow <- gridrow
newgridcol <- gridcol
if (gridrow==GRIDSIZE){
if (gridcol>1){
newgridcol <- gridcol-1
} else newgridrow <- gridrow-1
}else{if (gridrow==1){
if (gridcol<GRIDSIZE){
newgridcol <- gridcol+1
} else newgridrow <- gridrow+1
}else {# gridrow is neither 1 nor GRIDSIZE
if (gridcol==GRIDSIZE){
newgridrow <- gridrow+1
} else { # Assume gridcol==1
newgridrow <- gridrow-1
}
}
}
newlocation <- list(row=newgridrow,col=newgridcol)
newlocation
}
updateBoardState <- function(pieces,dieNumber){
# For each game variant, there is exactly one piece of each color (red and blue) on the board.
# Find the cell with the piece whose turn is next
#targetcontent <- CELLRED
#misscontent <- CELLBLUE
#if (playerturn==BLUETURN) {
# targetcontent <- CELLBLUE
# misscontent <- CELLRED
#}
locationindex <- which(pieces == CELLRED)
gridcol <- as.integer((locationindex-1)/GRIDSIZE)+1
gridrow <- locationindex - (gridcol-1)*GRIDSIZE
# Now we know the gridrow and gridcol where the piece is located
# Remove the piece from that location
pieces[gridrow,gridcol] <- CELLEMPTY
newlocation <- list(row=gridrow,col=gridcol)
# MONOPOLY
while (dieNumber>0) {
newlocation <- getSingleStepLocation(newlocation$row,newlocation$col)
dieNumber <- dieNumber -1
}
# Update the newlocation with the pieces that are there
gridrow <- newlocation$row
gridcol <- newlocation$col
pieces[gridrow,gridcol] <- CELLRED
# Return the pieces matrix
pieces
}
getImageId <- function(gridrow,gridcol,boardstate){
imageid <- boardstate$pieces[gridrow,gridcol]
imageid
}
getImageStyle <- function(gridrow,gridcol,boardstate){
imgstyle <- "border: 2px solid blue;"
# If the cell should be highlighted draw a blue border around it
#if (boardstate$highlights[gridrow,gridcol])imgstyle <- "border: 2px solid blue;"
imgstyle
}
|
/maxcode.R
|
no_license
|
max-koh/ESA-2021-Team10
|
R
| false
| false
| 3,680
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinydashboard)
getallMenu <- function(){
#extract a df of all menu items
restaurantmenu <- data.frame(menuitem = c("Choose Something","fish","chicken","beef","None"),calories = c(NA,100,200,300,0), hungereffect = c(NA,1,1,1,1))
restaurantmenu
}
getMenu <- function(){
# Extract random menu from database
# output will be dataframe containing menuitem,calories,hungereffect and append a none option and null option
#test menu
restaurantmenu <- data.frame(menuitem = c("Choose Something","fish","chicken","beef","None"),calories = c(NA,100,200,300,0), hungereffect = c(NA,1,1,1,1))
restaurantmenu
}
starvingModal <- function(){
modalDialog(
title = "You are starving. You have no choice but to stuff your face with food.",
footer = tagList(
actionButton("starvingok", "OK")
)
)
}
menuModal <- function(failed = FALSE){
modalDialog(
title = "Menu",
selectInput("menuitem","Select Menu item:",getMenu()[,1]),
if (failed)
div(tags$b("Please choose an item", style = "color: red;")),
footer = tagList(
actionButton("menuok", "OK")
)
)
}
checktile <- function(row,col){
tile <- -1
playerpos <- paste(row,col)
if (playerpos %in% eventlist) {tile <- 0}
#tile 0 means event, tile 1 means restaurant
tile
}
getSingleStepLocation <- function(gridrow,gridcol){
# We assume that the direction is clockwise
newgridrow <- gridrow
newgridcol <- gridcol
if (gridrow==GRIDSIZE){
if (gridcol>1){
newgridcol <- gridcol-1
} else newgridrow <- gridrow-1
}else{if (gridrow==1){
if (gridcol<GRIDSIZE){
newgridcol <- gridcol+1
} else newgridrow <- gridrow+1
}else {# gridrow is neither 1 nor GRIDSIZE
if (gridcol==GRIDSIZE){
newgridrow <- gridrow+1
} else { # Assume gridcol==1
newgridrow <- gridrow-1
}
}
}
newlocation <- list(row=newgridrow,col=newgridcol)
newlocation
}
updateBoardState <- function(pieces,dieNumber){
# For each game variant, there is exactly one piece of each color (red and blue) on the board.
# Find the cell with the piece whose turn is next
#targetcontent <- CELLRED
#misscontent <- CELLBLUE
#if (playerturn==BLUETURN) {
# targetcontent <- CELLBLUE
# misscontent <- CELLRED
#}
locationindex <- which(pieces == CELLRED)
gridcol <- as.integer((locationindex-1)/GRIDSIZE)+1
gridrow <- locationindex - (gridcol-1)*GRIDSIZE
# Now we know the gridrow and gridcol where the piece is located
# Remove the piece from that location
pieces[gridrow,gridcol] <- CELLEMPTY
newlocation <- list(row=gridrow,col=gridcol)
# MONOPOLY
while (dieNumber>0) {
newlocation <- getSingleStepLocation(newlocation$row,newlocation$col)
dieNumber <- dieNumber -1
}
# Update the newlocation with the pieces that are there
gridrow <- newlocation$row
gridcol <- newlocation$col
pieces[gridrow,gridcol] <- CELLRED
# Return the pieces matrix
pieces
}
getImageId <- function(gridrow,gridcol,boardstate){
imageid <- boardstate$pieces[gridrow,gridcol]
imageid
}
getImageStyle <- function(gridrow,gridcol,boardstate){
imgstyle <- "border: 2px solid blue;"
# If the cell should be highlighted draw a blue border around it
#if (boardstate$highlights[gridrow,gridcol])imgstyle <- "border: 2px solid blue;"
imgstyle
}
|
# Unit tests for cpp backend of CMF\
context("Data Generation")
test_that("Data generation works", {
# generate data
set.seed(45)
apaths <- runif(100, -1, 1)
bpaths <- runif(100, -1, 1)
d <<- generateMed(50, apaths, bpaths, r2y = .6, dir = F)
expect_s3_class(d, "data.frame")
})
test_that("Complex data generation works", {
set.seed(45)
p <- 16
P <- qr.Q(qr(matrix(rnorm(p^2), p))) # eigenvectors
rate <- 1.1
e <- (rate^(p:1)/rate*p)/sum(rate^(p:1)/rate) # eigenvalues sum to p
S <- cov2cor(crossprod(P, P * e))
apaths <- c(0.3, rep(0, 15))
bpaths <- c(0.3, sign(S)[-1,1]*c(rep(0.8, 3), rep(0.4, 12)))
Sigma <- diag(1 - apaths^2)
S <- S * tcrossprod(diag(Sigma))
diag(S) <- 0
Sigma <- Sigma + S
rsquared <- 0.5
d <- generateMed(n = 400,
a = apaths,
b = bpaths,
Sigma = Sigma,
residual = TRUE,
r2y = rsquared,
empirical = TRUE)
expect_s3_class(d, "data.frame")
})
context("CPP backend Product of Coefficients")
test_that("Single-core prodcoef cmf works", {
res <<- cmf(
d,
nStarts = 100,
decisionFunction = "prodcoef",
nCores = 1,
pb = FALSE
)
expect(inherits(res, "cmf"), "Result is not of class CMF")
})
test_that("Multi-core prodcoef cmf works", {
res <<- cmf(
d,
nStarts = 400,
decisionFunction = "prodcoef",
nCores = parallel::detectCores(),
pb = FALSE
)
expect(inherits(res, "cmf"), "Result is not of class CMF")
})
test_that("Update method works", {
oldNstarts <- res$call$nStarts
res <- update(res, 100)
expect_equal(res$call$nStarts, oldNstarts + 100)
})
test_that("Print and summary methods work", {
ptest <- capture_output_lines(print(res))
expect_equal(ptest[2], "CMF Algorithm Results")
stest <- capture_output_lines(summary(res))
expect_equal(stest[2], "CMF Algorithm Results")
})
test_that("Screeplot method works", {
fn <- tempfile(fileext = ".png")
png(fn)
screeplot(res, topn = 100)
dev.off()
expect_gt(file.size(fn), 318)
})
test_that("Plot method works", {
fn <- tempfile(fileext = ".png")
png(fn)
plot(res)
dev.off()
expect_gt(file.size(fn), 318)
})
test_that("Cutoff setting works", {
res <- setCutoff(res, cutoff = 0.1)
expect_equal(res$call$cutoff, .1)
})
test_that("Adding method works", {
res1 <- res
res2 <- res
res3 <- res1 + res2
expect_equal(res3$selectionRate, (res1$selectionRate + res2$selectionRate)/2)
})
context("CPP backend Causal Steps")
test_that("Single-core csteps cmf works", {
res <<- cmf(
d,
nStarts = 100,
decisionFunction = "causalsteps",
nCores = 1,
pb = FALSE
)
expect(inherits(res, "cmf"), "Result is not of class CMF")
})
test_that("Multi-core csteps cmf works", {
res <<- cmf(
d,
nStarts = 400,
decisionFunction = "causalsteps",
nCores = parallel::detectCores(),
pb = FALSE
)
expect(inherits(res, "cmf"), "Result is not of class CMF")
})
test_that("Update method works", {
oldNstarts <- res$call$nStarts
res <- update(res, 100)
expect_equal(res$call$nStarts, oldNstarts + 100)
})
test_that("Print and summary methods work", {
ptest <- capture_output_lines(print(res))
expect_equal(ptest[2], "CMF Algorithm Results")
stest <- capture_output_lines(summary(res))
expect_equal(stest[2], "CMF Algorithm Results")
})
test_that("Screeplot method works", {
fn <- tempfile(fileext = ".png")
png(fn)
screeplot(res, topn = 100)
dev.off()
expect_gt(file.size(fn), 318)
})
test_that("Plot method works", {
fn <- tempfile(fileext = ".png")
png(fn)
plot(res)
dev.off()
expect_gt(file.size(fn), 318)
})
test_that("Cutoff setting works", {
res <- setCutoff(res, cutoff = 0.1)
expect_equal(res$call$cutoff, .1)
})
test_that("Adding method works", {
res1 <- res
res2 <- res
res3 <- res1 + res2
expect_equal(res3$selectionRate, (res1$selectionRate + res2$selectionRate)/2)
})
|
/tests/testthat/test_01_cpp_backend.R
|
permissive
|
guhjy/cmfilter
|
R
| false
| false
| 3,974
|
r
|
# Unit tests for cpp backend of CMF\
context("Data Generation")
test_that("Data generation works", {
# generate data
set.seed(45)
apaths <- runif(100, -1, 1)
bpaths <- runif(100, -1, 1)
d <<- generateMed(50, apaths, bpaths, r2y = .6, dir = F)
expect_s3_class(d, "data.frame")
})
test_that("Complex data generation works", {
set.seed(45)
p <- 16
P <- qr.Q(qr(matrix(rnorm(p^2), p))) # eigenvectors
rate <- 1.1
e <- (rate^(p:1)/rate*p)/sum(rate^(p:1)/rate) # eigenvalues sum to p
S <- cov2cor(crossprod(P, P * e))
apaths <- c(0.3, rep(0, 15))
bpaths <- c(0.3, sign(S)[-1,1]*c(rep(0.8, 3), rep(0.4, 12)))
Sigma <- diag(1 - apaths^2)
S <- S * tcrossprod(diag(Sigma))
diag(S) <- 0
Sigma <- Sigma + S
rsquared <- 0.5
d <- generateMed(n = 400,
a = apaths,
b = bpaths,
Sigma = Sigma,
residual = TRUE,
r2y = rsquared,
empirical = TRUE)
expect_s3_class(d, "data.frame")
})
context("CPP backend Product of Coefficients")
test_that("Single-core prodcoef cmf works", {
res <<- cmf(
d,
nStarts = 100,
decisionFunction = "prodcoef",
nCores = 1,
pb = FALSE
)
expect(inherits(res, "cmf"), "Result is not of class CMF")
})
test_that("Multi-core prodcoef cmf works", {
res <<- cmf(
d,
nStarts = 400,
decisionFunction = "prodcoef",
nCores = parallel::detectCores(),
pb = FALSE
)
expect(inherits(res, "cmf"), "Result is not of class CMF")
})
test_that("Update method works", {
oldNstarts <- res$call$nStarts
res <- update(res, 100)
expect_equal(res$call$nStarts, oldNstarts + 100)
})
test_that("Print and summary methods work", {
ptest <- capture_output_lines(print(res))
expect_equal(ptest[2], "CMF Algorithm Results")
stest <- capture_output_lines(summary(res))
expect_equal(stest[2], "CMF Algorithm Results")
})
test_that("Screeplot method works", {
fn <- tempfile(fileext = ".png")
png(fn)
screeplot(res, topn = 100)
dev.off()
expect_gt(file.size(fn), 318)
})
test_that("Plot method works", {
fn <- tempfile(fileext = ".png")
png(fn)
plot(res)
dev.off()
expect_gt(file.size(fn), 318)
})
test_that("Cutoff setting works", {
res <- setCutoff(res, cutoff = 0.1)
expect_equal(res$call$cutoff, .1)
})
test_that("Adding method works", {
res1 <- res
res2 <- res
res3 <- res1 + res2
expect_equal(res3$selectionRate, (res1$selectionRate + res2$selectionRate)/2)
})
context("CPP backend Causal Steps")
test_that("Single-core csteps cmf works", {
res <<- cmf(
d,
nStarts = 100,
decisionFunction = "causalsteps",
nCores = 1,
pb = FALSE
)
expect(inherits(res, "cmf"), "Result is not of class CMF")
})
test_that("Multi-core csteps cmf works", {
res <<- cmf(
d,
nStarts = 400,
decisionFunction = "causalsteps",
nCores = parallel::detectCores(),
pb = FALSE
)
expect(inherits(res, "cmf"), "Result is not of class CMF")
})
test_that("Update method works", {
oldNstarts <- res$call$nStarts
res <- update(res, 100)
expect_equal(res$call$nStarts, oldNstarts + 100)
})
test_that("Print and summary methods work", {
ptest <- capture_output_lines(print(res))
expect_equal(ptest[2], "CMF Algorithm Results")
stest <- capture_output_lines(summary(res))
expect_equal(stest[2], "CMF Algorithm Results")
})
test_that("Screeplot method works", {
fn <- tempfile(fileext = ".png")
png(fn)
screeplot(res, topn = 100)
dev.off()
expect_gt(file.size(fn), 318)
})
test_that("Plot method works", {
fn <- tempfile(fileext = ".png")
png(fn)
plot(res)
dev.off()
expect_gt(file.size(fn), 318)
})
test_that("Cutoff setting works", {
res <- setCutoff(res, cutoff = 0.1)
expect_equal(res$call$cutoff, .1)
})
test_that("Adding method works", {
res1 <- res
res2 <- res
res3 <- res1 + res2
expect_equal(res3$selectionRate, (res1$selectionRate + res2$selectionRate)/2)
})
|
#' Find the three best-performing and worst-performing commodities
#'
#' This function takes in a data set and returns the best three and worst three
#' commodity names and their respective pnls. All these data will be presented
#' as a formatted table.
#'
#' @param x A data frame that contains data for individual commodities.
#'
#' @return output A list of the best three and worst three performers with their
#' commodity names, ids and respective pnls
best_worst_three <- function(x){
message(paste0("best_worst_three with parameter: \n", head(x), "\n\n"))
## Initiate so that CMD Check won't release notes
name <- pnl <- desc <- NULL
## Initiate a list to contain all outputs, and the first rows of the two output
## data frames
output <- best.frame <- worst.frame <- list()
best.frame[[1]] <- data.frame(Instruments = "Instruments", pnl = "P&L ($)")
worst.frame[[1]] <- data.frame(Instruments = "Instruments", pnl = "P&L ($)")
## Select columns of the name and pnl in the data frame. Then group the data
## set by name, summarize pnl for individual instruments, and order instruments
## by pnl value
x <- x %>%
select(pnl, name) %>%
group_by(name) %>%
summarise(pnl = sum(pnl, na.rm = TRUE)) %>%
ungroup() %>%
arrange(desc(pnl))
## Take out the best/worst performers, then the second best/worst and the
## third. If there are less than three instruments available, the function
## will only look at all the available ones
n <- nrow(x)
for(i in 1:min(3, length(unique(x$name)))){
best.frame[[i+1]] <- data.frame(Instruments = x$name[i],
pnl = as.character(x$pnl[i]))
worst.frame[[i+1]] <- data.frame(Instruments = x$name[n - i + 1],
pnl = as.character(x$pnl[n - i + 1]))
}
output$best.3 <- do.call("rbind", best.frame)
output$worst.3 <- do.call("rbind", worst.frame)
return(output)
}
|
/R/best_worst_three.R
|
no_license
|
rynkwn/backtestGraphics
|
R
| false
| false
| 1,977
|
r
|
#' Find the three best-performing and worst-performing commodities
#'
#' This function takes in a data set and returns the best three and worst three
#' commodity names and their respective pnls. All these data will be presented
#' as a formatted table.
#'
#' @param x A data frame that contains data for individual commodities.
#'
#' @return output A list of the best three and worst three performers with their
#' commodity names, ids and respective pnls
best_worst_three <- function(x){
message(paste0("best_worst_three with parameter: \n", head(x), "\n\n"))
## Initiate so that CMD Check won't release notes
name <- pnl <- desc <- NULL
## Initiate a list to contain all outputs, and the first rows of the two output
## data frames
output <- best.frame <- worst.frame <- list()
best.frame[[1]] <- data.frame(Instruments = "Instruments", pnl = "P&L ($)")
worst.frame[[1]] <- data.frame(Instruments = "Instruments", pnl = "P&L ($)")
## Select columns of the name and pnl in the data frame. Then group the data
## set by name, summarize pnl for individual instruments, and order instruments
## by pnl value
x <- x %>%
select(pnl, name) %>%
group_by(name) %>%
summarise(pnl = sum(pnl, na.rm = TRUE)) %>%
ungroup() %>%
arrange(desc(pnl))
## Take out the best/worst performers, then the second best/worst and the
## third. If there are less than three instruments available, the function
## will only look at all the available ones
n <- nrow(x)
for(i in 1:min(3, length(unique(x$name)))){
best.frame[[i+1]] <- data.frame(Instruments = x$name[i],
pnl = as.character(x$pnl[i]))
worst.frame[[i+1]] <- data.frame(Instruments = x$name[n - i + 1],
pnl = as.character(x$pnl[n - i + 1]))
}
output$best.3 <- do.call("rbind", best.frame)
output$worst.3 <- do.call("rbind", worst.frame)
return(output)
}
|
###########################################################################################################
############################ Exploring data for indiviudal bid offers ####################################
###########################################################################################################
# This code specifies the descriptive statistics used for the indiviudal data for AMO Border.
# 1.0 Set working directory and load the data ----
setwd ("C:/Users/wwainwright/Documents/R/Zambia_Analysis")
Zambia <- read.csv("C:/Users/wwainwright/Documents/R/Zambia_Analysis/AllData/INDIVIDUALAll.csv")
# 2.0 Load the packages ----
# Load packages
library(tidyr)
library(dplyr)
library(ggplot2)
library(readr)
library(gridExtra)
library(scales)
library(psych)
library(corrplot)
# 3.0 Explore the data ----
names(Zambia)
show(Zambia)
# 4.0 summary of all data ----
summary(Zambia$AGE)
summary(Zambia$COMMUNITY)
summary(Zambia$PROVINCE)
summary(Zambia$ECOREGION)
summary(Zambia$GMA)
summary(Zambia$FARMSIZE)
summary(Zambia$MALE)
summary(Zambia$FEMALE)
summary(Zambia$PLOTS)
summary(Zambia$HA)
summary(Zambia$PROPORTION)
summary(Zambia$USD)
summary(Zambia$USDHA)
summary(Zambia$USDPLOT)
# 5.0 Aggregate the data for summary stats----
# GMA / non-GMA sites
aggregate(Zambia[, 10:20], list(Zambia$GMA), mean)
# Ecoregion 1 / Ecoregion 2
aggregate(Zambia[, 10:20], list(Zambia$ECOREGION), mean)
# Male / Female
aggregate(Zambia[, 10:20], list(Zambia$MALE), mean)
# 6.0 Subset data into GMA and non-GMA / Ecoregion 1 and Ecoregion 2 ----
GMA <- Zambia[Zambia$GMA == "1" ,]
nonGMA <- Zambia[Zambia$GMA == "0" ,]
Eco1 <- Zambia[Zambia$ECOREGION == "1" ,]
Eco2 <- Zambia[Zambia$ECOREGION == "2" ,]
# Makes all the 0 values in data sheet be N/A
Zambia[Zambia==0]<-NA
GMA[GMA==0]<-NA
nonGMA[nonGMA==0]<-NA
Eco1[Eco1==0]<-NA
Eco2[Eco2==0]<-NA
# Summary of data
summary(GMA)
# 7.0 t-test (Ecoregion and GMA differences) ----
# Using a Fisher's F-test to verify the homoskedasticity (homogeneity of variances).
var.test(GMA$FarmSize, nonGMA$FarmSize) # Sig diff
var.test(GMA$Age, nonGMA$Age) # Not sig
var.test(GMA$Plots, nonGMA$Plots) # Not sig
var.test(GMA$Area, nonGMA$Area) # Sig dif
var.test(GMA$Proportion, nonGMA$Proportion) # Not sig
var.test(GMA$Bidoffer, nonGMA$Bidoffer) # Sig dif
var.test(GMA$PriceHa, nonGMA$PriceHa) # Sig dif
var.test(GMA$PricePlot, nonGMA$PricePlot) # Sig dif
var.test(GMA$RichnessIndex, nonGMA$RichnessIndex) #Sig dif
var.test(GMA$AveSizePlot, nonGMA$Averagesizeplot) #Sig dif
# independent 2-sample t-test for GMA differences
t.test(GMA$FarmSize, nonGMA$FarmSize, var.equal=FALSE, paired=FALSE)
t.test(GMA$Age, nonGMA$Age, var.equal=TRUE, paired=FALSE)
t.test(GMA$Plots, nonGMA$Plots, var.equal=TRUE, paired=FALSE)
t.test(GMA$Area, nonGMA$Area, var.equal=FALSE, paired=FALSE)
t.test(GMA$Proportion, nonGMA$Proportion, var.equal=TRUE, paired=FALSE)
t.test(GMA$Bidoffer, nonGMA$Bidoffer, var.equal=FALSE, paired=FALSE)
t.test(GMA$PriceHa, nonGMA$PriceHa, var.equal=FALSE, paired=FALSE)
t.test(GMA$PricePlot, nonGMA$PricePlot, var.equal=FALSE, paired=FALSE)
t.test(GMA$RichnessIndex, nonGMA$RichnessIndex, var.equal=FALSE, paired=FALSE)
t.test(GMA$Averagesizeplot, nonGMA$Averagesizeplot, var.equal=FALSE, paired=FALSE)
# 8.0 Standard deviations of parameters ----
# GMA
sd(GMA$FarmSize)
sd(GMA$Age, na.rm=TRUE)
sd(GMA$Plots, na.rm=TRUE)
sd(GMA$Area)
sd(GMA$Proportion, na.rm=TRUE)
sd(GMA$Bidoffer)
sd(GMA$PriceHa)
sd(GMA$PricePlot, na.rm=TRUE)
sd(GMA$RichnessIndex)
sd(GMA$AveSizePlot, na.rm=TRUE)
# non-GMA
sd(nonGMA$FarmSize)
sd(nonGMA$Age, na.rm=TRUE)
sd(nonGMA$Plots, na.rm=TRUE)
sd(nonGMA$Area)
sd(nonGMA$Proportion, na.rm=TRUE)
sd(nonGMA$Bidoffer)
sd(nonGMA$PriceHa)
sd(nonGMA$PricePlot, na.rm=TRUE)
sd(nonGMA$RichnessIndex)
sd(nonGMA$AveSizePlot, na.rm=TRUE)
# 9.0 Bar Plot farmer bids as cost per hectare for GMA and non-GMA sites----
# Plot all farmer bids from GMA sites
(wbar1 <- ggplot(GMA, aes(x=reorder(RESPONDENT, USDHA), y=USDHA)) +
geom_bar(position=position_dodge(width=0.1), width = 0.15, stat="identity", colour="black", fill="#00868B") +
geom_smooth(method = "loess", se=TRUE, color="blue", aes(group=1)) +
ylab("Cost per hectare (USD)") +
xlab("Farmer bid offer GMA sites") +
theme(
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
panel.grid.minor = element_line(size = 0.1, linetype = 'solid', colour = "black")))
# Plot all farmer bids from nonGMA sites
(wbar2 <- ggplot(nonGMA, aes(x=reorder(RESPONDENT, USDHA), y=USDHA)) +
geom_bar(position=position_dodge(width=0.1), width = 0.15, stat="identity", colour="black", fill="#00868B") +
geom_smooth(method = "loess", se=TRUE, color="blue", aes(group=1)) +
ylab("Cost per hectare (USD)") +
xlab("Farmer bid offer non-GMA sites") +
theme(
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
panel.grid.minor = element_line(size = 0.1, linetype = 'solid', colour = "black")))
## Arrange the two plots into a Panel
limits <- c(0, 1500)
breaks <- seq(limits[1], limits[2], by=100)
# assign common axis to both plots
wbar1.common.y <- wbar1 + scale_y_continuous(limits=limits, breaks=breaks)
wbar2.common.y <- wbar2 + scale_y_continuous(limits=limits, breaks=breaks)
# build the plots
wbar1.common.y <- ggplot_gtable(ggplot_build(wbar1.common.y))
wbar2.common.y <- ggplot_gtable(ggplot_build(wbar2.common.y))
# copy the plot height from p1 to p2
wbar1.common.y$heights <- wbar2.common.y$heights
# Display
grid.arrange(wbar1.common.y,wbar2.common.y,ncol=2,widths=c(11,9))
# 10.0 Supply curve of farmer bids as cost per hectare for Ecoregion1 and Ecoregion2 ----
# Creating an object called x, based on x variables, and then plotting in a model, ordered by Price per Ha
x = Zambia %>%
select(COMMUNITYID, USDHA, Gender, RESPONDENT, GMASite, EcoregionSite)
p <- x %>%
mutate(RESPONDENT=reorder(RESPONDENT, USDHA)) %>%
ggplot(aes(RESPONDENT, USDHA,colour=GMASite, group=1)) +
geom_point() +
labs(x="Farmer bidding in tender", y="Price per hectare (USD)")+
#ggtitle("B) Size of farms selected for conservation services")+
facet_wrap(~EcoregionSite) +
theme(#axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.line = element_line(colour = "black"),
panel.background = element_blank(),
legend.position = c(.02, .98),
legend.justification = c("left", "top"),
legend.box.just = "left",
legend.margin = margin(6, 6, 6, 6) ,legend.direction="horizontal",
legend.title = element_blank())
p
# 11.0 Bar Plot farmer bids as cost per hectare for Ecoregion1 and Ecoregion2 ----
(wbar3 <- ggplot(Eco1, aes(x=reorder(RESPONDENT, USDHA), y=USDHA)) +
geom_bar(position=position_dodge(width=0.1), width = 0.15, stat="identity", colour="black", fill="#00868B") +
geom_smooth(method = "loess", se=TRUE, color="blue", aes(group=1)) +
ylab("Cost per hectare (USD)") +
xlab("Farmer bid offer Ecoregion 1") +
theme(
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
panel.grid.minor = element_line(size = 0.1, linetype = 'solid', colour = "black")))
# Plot all farmer bids for Ecoregion 2
(wbar4 <- ggplot(Eco2, aes(x=reorder(RESPONDENT, USDHA), y=USDHA)) +
geom_bar(position=position_dodge(width=0.1), width = 0.15, stat="identity", colour="black", fill="#00868B") +
geom_smooth(method = "loess", se=TRUE, color="blue", aes(group=1)) +
ylab("Cost per hectare (USD)") +
xlab("Farmer bid offer Ecoregion 2") +
theme(
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
panel.grid.minor = element_line(size = 0.1, linetype = 'solid', colour = "black")))
# Arrange the two plots into a Panel
limits <- c(0, 1500)
breaks <- seq(limits[1], limits[2], by=100)
# assign common axis to both plots
wbar3.common.y <- wbar3 + scale_y_continuous(limits=limits, breaks=breaks)
wbar4.common.y <- wbar4 + scale_y_continuous(limits=limits, breaks=breaks)
# build the plots
wbar3.common.y <- ggplot_gtable(ggplot_build(wbar3.common.y))
wbar4.common.y <- ggplot_gtable(ggplot_build(wbar4.common.y))
# copy the plot height from p1 to p2
wbar3.common.y$heights <- wbar4.common.y$heights
# Display
grid.arrange(wbar3.common.y,wbar4.common.y,ncol=2,widths=c(11,9))
# 12.0 Boxplot ----
# Box plot of community farmer bids (US/ha) for GMA/non-GMA sites
(box1 <- ggplot (Zambia, aes(COMMUNITY, USDHA)) + geom_boxplot(aes(fill=GMA))+
geom_point()+
ylab("Total cost per hectare (USD))") +
xlab("\nCommunity") +
guides(fill=guide_legend(title="GMA or non-GMA sites")) +
theme(axis.text.x=element_text(size=11, angle=90, vjust=1, hjust=1)))
# Ordered box plot of individual community bids (US/ha) for GMA/non-GMA sites
(box2 <- ggplot (Zambia, aes(x=reorder(COMMUNITY, USDHA, FUN=median),y=USDHA)) + geom_boxplot(aes(fill=GMA2))+
ylab("Total cost per hectare (USD))") +
xlab("\nCommunity") +
guides(fill=guide_legend(title="GMA or non-GMA sites")) +
theme(
axis.text.x=element_text(size=11, angle=90, vjust=1, hjust=1),
axis.line = element_line(color="black", size = 0.1),
panel.background = element_blank()))
# 13.0 Bar plot and trendline of farmer proportions enrolled for conservation services relative ----
# Ecoregion 1
(wline <- ggplot(Eco1, aes(x=reorder(FARMSIZE, PROPORTION), y=PROPORTION)) +
geom_bar(position=position_dodge(width=0.1), width = 0.15, stat="identity", colour="black", fill="#00868B") +
geom_smooth(method = "loess", se=TRUE, color="blue", aes(group=1)) +
ylab("Proportion (%) of land enrolled in bid offers") +
xlab("Farm size (ha)") +
theme(
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
panel.grid.minor = element_line(size = 0.1, linetype = 'solid', colour = "black")))
# Ecoregion 2
(wline2 <- ggplot(Eco2, aes(x=reorder(FARMSIZE, PROPORTION), y=PROPORTION)) +
geom_bar(position=position_dodge(width=0.1), width = 0.15, stat="identity", colour="black", fill="#00868B") +
geom_smooth(method = "loess", se=TRUE, color="blue", aes(group=1)) +
ylab("Proportion (%) of land enrolled in bid offers") +
xlab("Farm size (ha)") +
theme(
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
panel.grid.minor = element_line(size = 0.1, linetype = 'solid', colour = "black")))
# Arrange the two plots into a Panel
limits <- c(0, 100)
breaks <- seq(limits[1], limits[2], by=25)
# assign common axis to both plots
wline.common.y <- wline + scale_y_continuous(limits=limits, breaks=breaks)
wline2.common.y <- wline2 + scale_y_continuous(limits=limits, breaks=breaks)
# build the plots
wline.common.y <- ggplot_gtable(ggplot_build(wline.common.y))
wline2.common.y <- ggplot_gtable(ggplot_build(wline2.common.y))
# copy the plot height from p1 to p2
wline.common.y$heights <- wline2.common.y$heights
# Display
grid.arrange(wline.common.y,wline2.common.y,ncol=2,widths=c(10,10))
# 14.0 Regression analysis ----
# Overarching regression model using bid offer price
mod.1 = lm(formula = USD ~ HA + PLOTS + ECOREGION + GMA + FARMSIZE, data = Zambia)
summary(mod.1)
# Plot of all regressions
plot(Zamcor, pch=16, col="blue")
# Other regression models
# GMA using bid offer price
mod.2 = lm(formula = USD ~ HA + ECOREGION + PROPORTION + FARMSIZE + RICHNESSINDEX, data = GMA)
summary(mod.2)
# non-GMA using bid offer price
mod.3 = lm(formula = USD ~ HA + PROPORTION + FARMSIZE , data = nonGMA)
summary(mod.3)
# Ecoregion 1 using bid offer price
mod.4 = lm(formula = USD ~ HA + GMA + PROPORTION + RICHNESSINDEX , data = Eco1)
summary(mod.4)
# Ecoregion 2 using bid offer price
mod.5 = lm(formula = USD ~ HA + FARMSIZE , data = Eco2)
summary(mod.5)
# 15.0 Correlation matrix analysis ----
# Create data frame of variables with selected columns using column indices
Zamcor <- Zambia[,c(14,22,4,26,28,12,11,8,7,5,3)]
names(Zamcor)
Zamcor[is.na(Zamcor)] <- 0
# Group correlation test
corr.test(Zamcor[1:11])
# Visulisations
pairs.panels(Zamcor[1:11])
# Simple visualisation of correlation analysis effect size including significance
x <- cor(Zamcor[1:11])
cor(x,use="pairwise.complete.obs")
colnames (x) <- c("Price/ha", "Ecoregion 1", "Socio-status index", "GMA", "Farm size", "Gender", "Age", "Plots", "Area", "Proportion enrolled", "Bid offer")
rownames(x) <- c("Price/ha", "Ecoregion 1", "Socio-status index", "GMA", "Farm size", "Gender", "Age", "Plots", "Area", "Proportion enrolled", "Bid offer")
p.mat <- cor.mtest(Zamcor, conf.level = .95)
p.mat <- cor.mtest(Zamcor)$p
corrplot(x, p.mat = p.mat, sig.level = .05)
M2 <- x
diag(M2) = NA
corrplot(M2, na.label = "NA", type="upper",tl.col = "black", p.mat = p.mat, sig.level = 0.05, insig = "blank") # Ensures non-significant values are not displayed
#corrplot(x, order = "hclust", addrect = 2)
# ***The below is a longer version of the above***
# Bid offer
cor.test(Zambia$USD, Zambia$HA)
cor.test(Zambia$USD, Zambia$PLOTS)
cor.test(Zambia$USD, Zambia$AGE)
cor.test(Zambia$USD, Zambia$FARMSIZE)
cor.test(Zambia$USD, Zambia$GENDER)
cor.test(Zambia$USD, Zambia$PROPORTION)
cor.test(Zambia$USD, Zambia$GMA)
cor.test(Zambia$USD, Zambia$ECOREGION)
cor.test(Zambia$USD, Zambia$COMMUNITYID)
cor.test(Zambia$USD, Zambia$RICHNESSINDEX)
# Area
cor.test(Zambia$HA, Zambia$PLOTS)
cor.test(Zambia$HA, Zambia$AGE)
cor.test(Zambia$HA, Zambia$FARMSIZE)
cor.test(Zambia$HA, Zambia$GENDER)
cor.test(Zambia$HA, Zambia$PROPORTION)
cor.test(Zambia$HA, Zambia$GMA)
cor.test(Zambia$HA, Zambia$ECOREGION)
cor.test(Zambia$HA, Zambia$COMMUNITYID)
cor.test(Zambia$HA, Zambia$RICHNESSINDEX)
# Plots
cor.test(Zambia$PLOTS, Zambia$AGE)
cor.test(Zambia$PLOTS, Zambia$FARMSIZE)
cor.test(Zambia$PLOTS, Zambia$GENDER)
cor.test(Zambia$PLOTS, Zambia$PROPORTION)
cor.test(Zambia$PLOTS, Zambia$GMA)
cor.test(Zambia$PLOTS, Zambia$ECOREGION)
cor.test(Zambia$PLOTS, Zambia$COMMUNITYID)
cor.test(Zambia$PLOTS, Zambia$RICHNESSINDEX)
# Age
cor.test(Zambia$AGE, Zambia$FARMSIZE)
cor.test(Zambia$AGE, Zambia$GENDER)
cor.test(Zambia$AGE, Zambia$PROPORTION)
cor.test(Zambia$AGE, Zambia$GMA)
cor.test(Zambia$AGE, Zambia$ECOREGION)
cor.test(Zambia$AGE, Zambia$COMMUNITYID)
cor.test(Zambia$AGE, Zambia$RICHNESSINDEX)
# Farm size
cor.test(Zambia$FARMSIZE, Zambia$GENDER)
cor.test(Zambia$FARMSIZE, Zambia$PROPORTION)
cor.test(Zambia$FARMSIZE, Zambia$GMA)
cor.test(Zambia$FARMSIZE, Zambia$ECOREGION)
cor.test(Zambia$FARMSIZE, Zambia$COMMUNITYID)
cor.test(Zambia$FARMSIZE, Zambia$RICHNESSINDEX)
# Gender
cor.test(Zambia$GENDER, Zambia$PROPORTION)
cor.test(Zambia$GENDER, Zambia$GMA)
cor.test(Zambia$GENDER, Zambia$ECOREGION)
cor.test(Zambia$GENDER, Zambia$COMMUNITYID)
cor.test(Zambia$GENDER, Zambia$RICHNESSINDEX)
# Proportion
cor.test(Zambia$PROPORTION, Zambia$GMA)
cor.test(Zambia$PROPORTION, Zambia$ECOREGION)
cor.test(Zambia$PROPORTION, Zambia$COMMUNITYID)
cor.test(Zambia$PROPORTION, Zambia$RICHNESSINDEX)
# GMA
cor.test(Zambia$GMA, Zambia$ECOREGION)
cor.test(Zambia$GMA, Zambia$COMMUNITYID)
cor.test(Zambia$GMA, Zambia$RICHNESSINDEX)
# ECoregion
cor.test(Zambia$ECOREGION, Zambia$COMMUNITYID)
cor.test(Zambia$ECOREGION, Zambia$RICHNESSINDEX)
# Community
cor.test(Zambia$COMMUNITYID, Zambia$RICHNESSINDEX)
|
/Scripts/Exploredata_Individualbids.R
|
no_license
|
wainwrigh/Zambia-CWR-Data-
|
R
| false
| false
| 15,888
|
r
|
###########################################################################################################
############################ Exploring data for indiviudal bid offers ####################################
###########################################################################################################
# This code specifies the descriptive statistics used for the indiviudal data for AMO Border.
# 1.0 Set working directory and load the data ----
setwd ("C:/Users/wwainwright/Documents/R/Zambia_Analysis")
Zambia <- read.csv("C:/Users/wwainwright/Documents/R/Zambia_Analysis/AllData/INDIVIDUALAll.csv")
# 2.0 Load the packages ----
# Load packages
library(tidyr)
library(dplyr)
library(ggplot2)
library(readr)
library(gridExtra)
library(scales)
library(psych)
library(corrplot)
# 3.0 Explore the data ----
names(Zambia)
show(Zambia)
# 4.0 summary of all data ----
summary(Zambia$AGE)
summary(Zambia$COMMUNITY)
summary(Zambia$PROVINCE)
summary(Zambia$ECOREGION)
summary(Zambia$GMA)
summary(Zambia$FARMSIZE)
summary(Zambia$MALE)
summary(Zambia$FEMALE)
summary(Zambia$PLOTS)
summary(Zambia$HA)
summary(Zambia$PROPORTION)
summary(Zambia$USD)
summary(Zambia$USDHA)
summary(Zambia$USDPLOT)
# 5.0 Aggregate the data for summary stats----
# GMA / non-GMA sites
aggregate(Zambia[, 10:20], list(Zambia$GMA), mean)
# Ecoregion 1 / Ecoregion 2
aggregate(Zambia[, 10:20], list(Zambia$ECOREGION), mean)
# Male / Female
aggregate(Zambia[, 10:20], list(Zambia$MALE), mean)
# 6.0 Subset data into GMA and non-GMA / Ecoregion 1 and Ecoregion 2 ----
GMA <- Zambia[Zambia$GMA == "1" ,]
nonGMA <- Zambia[Zambia$GMA == "0" ,]
Eco1 <- Zambia[Zambia$ECOREGION == "1" ,]
Eco2 <- Zambia[Zambia$ECOREGION == "2" ,]
# Makes all the 0 values in data sheet be N/A
Zambia[Zambia==0]<-NA
GMA[GMA==0]<-NA
nonGMA[nonGMA==0]<-NA
Eco1[Eco1==0]<-NA
Eco2[Eco2==0]<-NA
# Summary of data
summary(GMA)
# 7.0 t-test (Ecoregion and GMA differences) ----
# Using a Fisher's F-test to verify the homoskedasticity (homogeneity of variances).
var.test(GMA$FarmSize, nonGMA$FarmSize) # Sig diff
var.test(GMA$Age, nonGMA$Age) # Not sig
var.test(GMA$Plots, nonGMA$Plots) # Not sig
var.test(GMA$Area, nonGMA$Area) # Sig dif
var.test(GMA$Proportion, nonGMA$Proportion) # Not sig
var.test(GMA$Bidoffer, nonGMA$Bidoffer) # Sig dif
var.test(GMA$PriceHa, nonGMA$PriceHa) # Sig dif
var.test(GMA$PricePlot, nonGMA$PricePlot) # Sig dif
var.test(GMA$RichnessIndex, nonGMA$RichnessIndex) #Sig dif
var.test(GMA$AveSizePlot, nonGMA$Averagesizeplot) #Sig dif
# independent 2-sample t-test for GMA differences
t.test(GMA$FarmSize, nonGMA$FarmSize, var.equal=FALSE, paired=FALSE)
t.test(GMA$Age, nonGMA$Age, var.equal=TRUE, paired=FALSE)
t.test(GMA$Plots, nonGMA$Plots, var.equal=TRUE, paired=FALSE)
t.test(GMA$Area, nonGMA$Area, var.equal=FALSE, paired=FALSE)
t.test(GMA$Proportion, nonGMA$Proportion, var.equal=TRUE, paired=FALSE)
t.test(GMA$Bidoffer, nonGMA$Bidoffer, var.equal=FALSE, paired=FALSE)
t.test(GMA$PriceHa, nonGMA$PriceHa, var.equal=FALSE, paired=FALSE)
t.test(GMA$PricePlot, nonGMA$PricePlot, var.equal=FALSE, paired=FALSE)
t.test(GMA$RichnessIndex, nonGMA$RichnessIndex, var.equal=FALSE, paired=FALSE)
t.test(GMA$Averagesizeplot, nonGMA$Averagesizeplot, var.equal=FALSE, paired=FALSE)
# 8.0 Standard deviations of parameters ----
# GMA
sd(GMA$FarmSize)
sd(GMA$Age, na.rm=TRUE)
sd(GMA$Plots, na.rm=TRUE)
sd(GMA$Area)
sd(GMA$Proportion, na.rm=TRUE)
sd(GMA$Bidoffer)
sd(GMA$PriceHa)
sd(GMA$PricePlot, na.rm=TRUE)
sd(GMA$RichnessIndex)
sd(GMA$AveSizePlot, na.rm=TRUE)
# non-GMA
sd(nonGMA$FarmSize)
sd(nonGMA$Age, na.rm=TRUE)
sd(nonGMA$Plots, na.rm=TRUE)
sd(nonGMA$Area)
sd(nonGMA$Proportion, na.rm=TRUE)
sd(nonGMA$Bidoffer)
sd(nonGMA$PriceHa)
sd(nonGMA$PricePlot, na.rm=TRUE)
sd(nonGMA$RichnessIndex)
sd(nonGMA$AveSizePlot, na.rm=TRUE)
# 9.0 Bar Plot farmer bids as cost per hectare for GMA and non-GMA sites----
# Plot all farmer bids from GMA sites
(wbar1 <- ggplot(GMA, aes(x=reorder(RESPONDENT, USDHA), y=USDHA)) +
geom_bar(position=position_dodge(width=0.1), width = 0.15, stat="identity", colour="black", fill="#00868B") +
geom_smooth(method = "loess", se=TRUE, color="blue", aes(group=1)) +
ylab("Cost per hectare (USD)") +
xlab("Farmer bid offer GMA sites") +
theme(
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
panel.grid.minor = element_line(size = 0.1, linetype = 'solid', colour = "black")))
# Plot all farmer bids from nonGMA sites
(wbar2 <- ggplot(nonGMA, aes(x=reorder(RESPONDENT, USDHA), y=USDHA)) +
geom_bar(position=position_dodge(width=0.1), width = 0.15, stat="identity", colour="black", fill="#00868B") +
geom_smooth(method = "loess", se=TRUE, color="blue", aes(group=1)) +
ylab("Cost per hectare (USD)") +
xlab("Farmer bid offer non-GMA sites") +
theme(
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
panel.grid.minor = element_line(size = 0.1, linetype = 'solid', colour = "black")))
## Arrange the two plots into a Panel
limits <- c(0, 1500)
breaks <- seq(limits[1], limits[2], by=100)
# assign common axis to both plots
wbar1.common.y <- wbar1 + scale_y_continuous(limits=limits, breaks=breaks)
wbar2.common.y <- wbar2 + scale_y_continuous(limits=limits, breaks=breaks)
# build the plots
wbar1.common.y <- ggplot_gtable(ggplot_build(wbar1.common.y))
wbar2.common.y <- ggplot_gtable(ggplot_build(wbar2.common.y))
# copy the plot height from p1 to p2
wbar1.common.y$heights <- wbar2.common.y$heights
# Display
grid.arrange(wbar1.common.y,wbar2.common.y,ncol=2,widths=c(11,9))
# 10.0 Supply curve of farmer bids as cost per hectare for Ecoregion1 and Ecoregion2 ----
# Creating an object called x, based on x variables, and then plotting in a model, ordered by Price per Ha
x = Zambia %>%
select(COMMUNITYID, USDHA, Gender, RESPONDENT, GMASite, EcoregionSite)
p <- x %>%
mutate(RESPONDENT=reorder(RESPONDENT, USDHA)) %>%
ggplot(aes(RESPONDENT, USDHA,colour=GMASite, group=1)) +
geom_point() +
labs(x="Farmer bidding in tender", y="Price per hectare (USD)")+
#ggtitle("B) Size of farms selected for conservation services")+
facet_wrap(~EcoregionSite) +
theme(#axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.line = element_line(colour = "black"),
panel.background = element_blank(),
legend.position = c(.02, .98),
legend.justification = c("left", "top"),
legend.box.just = "left",
legend.margin = margin(6, 6, 6, 6) ,legend.direction="horizontal",
legend.title = element_blank())
p
# 11.0 Bar Plot farmer bids as cost per hectare for Ecoregion1 and Ecoregion2 ----
(wbar3 <- ggplot(Eco1, aes(x=reorder(RESPONDENT, USDHA), y=USDHA)) +
geom_bar(position=position_dodge(width=0.1), width = 0.15, stat="identity", colour="black", fill="#00868B") +
geom_smooth(method = "loess", se=TRUE, color="blue", aes(group=1)) +
ylab("Cost per hectare (USD)") +
xlab("Farmer bid offer Ecoregion 1") +
theme(
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
panel.grid.minor = element_line(size = 0.1, linetype = 'solid', colour = "black")))
# Plot all farmer bids for Ecoregion 2
(wbar4 <- ggplot(Eco2, aes(x=reorder(RESPONDENT, USDHA), y=USDHA)) +
geom_bar(position=position_dodge(width=0.1), width = 0.15, stat="identity", colour="black", fill="#00868B") +
geom_smooth(method = "loess", se=TRUE, color="blue", aes(group=1)) +
ylab("Cost per hectare (USD)") +
xlab("Farmer bid offer Ecoregion 2") +
theme(
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
panel.grid.minor = element_line(size = 0.1, linetype = 'solid', colour = "black")))
# Arrange the two plots into a Panel
limits <- c(0, 1500)
breaks <- seq(limits[1], limits[2], by=100)
# assign common axis to both plots
wbar3.common.y <- wbar3 + scale_y_continuous(limits=limits, breaks=breaks)
wbar4.common.y <- wbar4 + scale_y_continuous(limits=limits, breaks=breaks)
# build the plots
wbar3.common.y <- ggplot_gtable(ggplot_build(wbar3.common.y))
wbar4.common.y <- ggplot_gtable(ggplot_build(wbar4.common.y))
# copy the plot height from p1 to p2
wbar3.common.y$heights <- wbar4.common.y$heights
# Display
grid.arrange(wbar3.common.y,wbar4.common.y,ncol=2,widths=c(11,9))
# 12.0 Boxplot ----
# Box plot of community farmer bids (US/ha) for GMA/non-GMA sites
(box1 <- ggplot (Zambia, aes(COMMUNITY, USDHA)) + geom_boxplot(aes(fill=GMA))+
geom_point()+
ylab("Total cost per hectare (USD))") +
xlab("\nCommunity") +
guides(fill=guide_legend(title="GMA or non-GMA sites")) +
theme(axis.text.x=element_text(size=11, angle=90, vjust=1, hjust=1)))
# Ordered box plot of individual community bids (US/ha) for GMA/non-GMA sites
(box2 <- ggplot (Zambia, aes(x=reorder(COMMUNITY, USDHA, FUN=median),y=USDHA)) + geom_boxplot(aes(fill=GMA2))+
ylab("Total cost per hectare (USD))") +
xlab("\nCommunity") +
guides(fill=guide_legend(title="GMA or non-GMA sites")) +
theme(
axis.text.x=element_text(size=11, angle=90, vjust=1, hjust=1),
axis.line = element_line(color="black", size = 0.1),
panel.background = element_blank()))
# 13.0 Bar plot and trendline of farmer proportions enrolled for conservation services relative ----
# Ecoregion 1
(wline <- ggplot(Eco1, aes(x=reorder(FARMSIZE, PROPORTION), y=PROPORTION)) +
geom_bar(position=position_dodge(width=0.1), width = 0.15, stat="identity", colour="black", fill="#00868B") +
geom_smooth(method = "loess", se=TRUE, color="blue", aes(group=1)) +
ylab("Proportion (%) of land enrolled in bid offers") +
xlab("Farm size (ha)") +
theme(
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
panel.grid.minor = element_line(size = 0.1, linetype = 'solid', colour = "black")))
# Ecoregion 2
(wline2 <- ggplot(Eco2, aes(x=reorder(FARMSIZE, PROPORTION), y=PROPORTION)) +
geom_bar(position=position_dodge(width=0.1), width = 0.15, stat="identity", colour="black", fill="#00868B") +
geom_smooth(method = "loess", se=TRUE, color="blue", aes(group=1)) +
ylab("Proportion (%) of land enrolled in bid offers") +
xlab("Farm size (ha)") +
theme(
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
panel.grid.minor = element_line(size = 0.1, linetype = 'solid', colour = "black")))
# Arrange the two plots into a Panel
limits <- c(0, 100)
breaks <- seq(limits[1], limits[2], by=25)
# assign common axis to both plots
wline.common.y <- wline + scale_y_continuous(limits=limits, breaks=breaks)
wline2.common.y <- wline2 + scale_y_continuous(limits=limits, breaks=breaks)
# build the plots
wline.common.y <- ggplot_gtable(ggplot_build(wline.common.y))
wline2.common.y <- ggplot_gtable(ggplot_build(wline2.common.y))
# copy the plot height from p1 to p2
wline.common.y$heights <- wline2.common.y$heights
# Display
grid.arrange(wline.common.y,wline2.common.y,ncol=2,widths=c(10,10))
# 14.0 Regression analysis ----
# Overarching regression model using bid offer price
mod.1 = lm(formula = USD ~ HA + PLOTS + ECOREGION + GMA + FARMSIZE, data = Zambia)
summary(mod.1)
# Plot of all regressions
plot(Zamcor, pch=16, col="blue")
# Other regression models
# GMA using bid offer price
mod.2 = lm(formula = USD ~ HA + ECOREGION + PROPORTION + FARMSIZE + RICHNESSINDEX, data = GMA)
summary(mod.2)
# non-GMA using bid offer price
mod.3 = lm(formula = USD ~ HA + PROPORTION + FARMSIZE , data = nonGMA)
summary(mod.3)
# Ecoregion 1 using bid offer price
mod.4 = lm(formula = USD ~ HA + GMA + PROPORTION + RICHNESSINDEX , data = Eco1)
summary(mod.4)
# Ecoregion 2 using bid offer price
mod.5 = lm(formula = USD ~ HA + FARMSIZE , data = Eco2)
summary(mod.5)
# 15.0 Correlation matrix analysis ----
# Create data frame of variables with selected columns using column indices
Zamcor <- Zambia[,c(14,22,4,26,28,12,11,8,7,5,3)]
names(Zamcor)
Zamcor[is.na(Zamcor)] <- 0
# Group correlation test
corr.test(Zamcor[1:11])
# Visulisations
pairs.panels(Zamcor[1:11])
# Simple visualisation of correlation analysis effect size including significance
x <- cor(Zamcor[1:11])
cor(x,use="pairwise.complete.obs")
colnames (x) <- c("Price/ha", "Ecoregion 1", "Socio-status index", "GMA", "Farm size", "Gender", "Age", "Plots", "Area", "Proportion enrolled", "Bid offer")
rownames(x) <- c("Price/ha", "Ecoregion 1", "Socio-status index", "GMA", "Farm size", "Gender", "Age", "Plots", "Area", "Proportion enrolled", "Bid offer")
p.mat <- cor.mtest(Zamcor, conf.level = .95)
p.mat <- cor.mtest(Zamcor)$p
corrplot(x, p.mat = p.mat, sig.level = .05)
M2 <- x
diag(M2) = NA
corrplot(M2, na.label = "NA", type="upper",tl.col = "black", p.mat = p.mat, sig.level = 0.05, insig = "blank") # Ensures non-significant values are not displayed
#corrplot(x, order = "hclust", addrect = 2)
# ***The below is a longer version of the above***
# Bid offer
cor.test(Zambia$USD, Zambia$HA)
cor.test(Zambia$USD, Zambia$PLOTS)
cor.test(Zambia$USD, Zambia$AGE)
cor.test(Zambia$USD, Zambia$FARMSIZE)
cor.test(Zambia$USD, Zambia$GENDER)
cor.test(Zambia$USD, Zambia$PROPORTION)
cor.test(Zambia$USD, Zambia$GMA)
cor.test(Zambia$USD, Zambia$ECOREGION)
cor.test(Zambia$USD, Zambia$COMMUNITYID)
cor.test(Zambia$USD, Zambia$RICHNESSINDEX)
# Area
cor.test(Zambia$HA, Zambia$PLOTS)
cor.test(Zambia$HA, Zambia$AGE)
cor.test(Zambia$HA, Zambia$FARMSIZE)
cor.test(Zambia$HA, Zambia$GENDER)
cor.test(Zambia$HA, Zambia$PROPORTION)
cor.test(Zambia$HA, Zambia$GMA)
cor.test(Zambia$HA, Zambia$ECOREGION)
cor.test(Zambia$HA, Zambia$COMMUNITYID)
cor.test(Zambia$HA, Zambia$RICHNESSINDEX)
# Plots
cor.test(Zambia$PLOTS, Zambia$AGE)
cor.test(Zambia$PLOTS, Zambia$FARMSIZE)
cor.test(Zambia$PLOTS, Zambia$GENDER)
cor.test(Zambia$PLOTS, Zambia$PROPORTION)
cor.test(Zambia$PLOTS, Zambia$GMA)
cor.test(Zambia$PLOTS, Zambia$ECOREGION)
cor.test(Zambia$PLOTS, Zambia$COMMUNITYID)
cor.test(Zambia$PLOTS, Zambia$RICHNESSINDEX)
# Age
cor.test(Zambia$AGE, Zambia$FARMSIZE)
cor.test(Zambia$AGE, Zambia$GENDER)
cor.test(Zambia$AGE, Zambia$PROPORTION)
cor.test(Zambia$AGE, Zambia$GMA)
cor.test(Zambia$AGE, Zambia$ECOREGION)
cor.test(Zambia$AGE, Zambia$COMMUNITYID)
cor.test(Zambia$AGE, Zambia$RICHNESSINDEX)
# Farm size
cor.test(Zambia$FARMSIZE, Zambia$GENDER)
cor.test(Zambia$FARMSIZE, Zambia$PROPORTION)
cor.test(Zambia$FARMSIZE, Zambia$GMA)
cor.test(Zambia$FARMSIZE, Zambia$ECOREGION)
cor.test(Zambia$FARMSIZE, Zambia$COMMUNITYID)
cor.test(Zambia$FARMSIZE, Zambia$RICHNESSINDEX)
# Gender
cor.test(Zambia$GENDER, Zambia$PROPORTION)
cor.test(Zambia$GENDER, Zambia$GMA)
cor.test(Zambia$GENDER, Zambia$ECOREGION)
cor.test(Zambia$GENDER, Zambia$COMMUNITYID)
cor.test(Zambia$GENDER, Zambia$RICHNESSINDEX)
# Proportion
cor.test(Zambia$PROPORTION, Zambia$GMA)
cor.test(Zambia$PROPORTION, Zambia$ECOREGION)
cor.test(Zambia$PROPORTION, Zambia$COMMUNITYID)
cor.test(Zambia$PROPORTION, Zambia$RICHNESSINDEX)
# GMA
cor.test(Zambia$GMA, Zambia$ECOREGION)
cor.test(Zambia$GMA, Zambia$COMMUNITYID)
cor.test(Zambia$GMA, Zambia$RICHNESSINDEX)
# ECoregion
cor.test(Zambia$ECOREGION, Zambia$COMMUNITYID)
cor.test(Zambia$ECOREGION, Zambia$RICHNESSINDEX)
# Community
cor.test(Zambia$COMMUNITYID, Zambia$RICHNESSINDEX)
|
\name{qType}
\alias{qType}
\title{Determines question type as single/grid question and single/multi response.}
\usage{
qType(s)
}
\arguments{
\item{s}{A surveyorStats object}
}
\description{
Determines question type as single/grid question and
single/multi response.
}
\seealso{
\code{\link{as.surveyorStats}}
Other stats helper functions: \code{\link{allNA}},
\code{\link{allNull}}, \code{\link{is.yesno}},
\code{\link{reorderQuestion}},
\code{\link{reorderResponse}},
\code{\link{splitBinCombine}},
\code{\link{splitMeanCombine}},
\code{\link{splitPercentCombine}},
\code{\link{weightedCount}}, \code{\link{weightedMean}},
\code{\link{weightedMedian}}, \code{\link{weightedSum}}
}
\keyword{internal}
|
/man/qType.Rd
|
no_license
|
andrie/surveyor
|
R
| false
| false
| 732
|
rd
|
\name{qType}
\alias{qType}
\title{Determines question type as single/grid question and single/multi response.}
\usage{
qType(s)
}
\arguments{
\item{s}{A surveyorStats object}
}
\description{
Determines question type as single/grid question and
single/multi response.
}
\seealso{
\code{\link{as.surveyorStats}}
Other stats helper functions: \code{\link{allNA}},
\code{\link{allNull}}, \code{\link{is.yesno}},
\code{\link{reorderQuestion}},
\code{\link{reorderResponse}},
\code{\link{splitBinCombine}},
\code{\link{splitMeanCombine}},
\code{\link{splitPercentCombine}},
\code{\link{weightedCount}}, \code{\link{weightedMean}},
\code{\link{weightedMedian}}, \code{\link{weightedSum}}
}
\keyword{internal}
|
### reading data
RT <- read.csv("household_power_consumption.txt", sep=";", colClasses=c("character", "character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"), na.strings = c("NA", "NaN", "?"))
### subset to two specific dates
RS1 <- RT[RT$Date=="1/2/2007",]
RS2 <- RT[RT$Date=="2/2/2007",]
SS <- rbind(RS1,RS2)
### parse the time and date
Dparsed <- as.Date(SS$Date, "%d/%m/%Y")
DT <- paste(Dparsed,SS$Time)
DT <- strptime(DT, "%Y-%m-%d %H:%M:%S")
NewData <- cbind(DT,SS)
### output plot 1
png("plot1.png")
hist(NewData$Global_active_power,col="red", main="",xlab="")
title(main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency")
dev.off()
|
/figure/plot1.R
|
no_license
|
vivtyng/ExData_Plotting1
|
R
| false
| false
| 698
|
r
|
### reading data
RT <- read.csv("household_power_consumption.txt", sep=";", colClasses=c("character", "character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"), na.strings = c("NA", "NaN", "?"))
### subset to two specific dates
RS1 <- RT[RT$Date=="1/2/2007",]
RS2 <- RT[RT$Date=="2/2/2007",]
SS <- rbind(RS1,RS2)
### parse the time and date
Dparsed <- as.Date(SS$Date, "%d/%m/%Y")
DT <- paste(Dparsed,SS$Time)
DT <- strptime(DT, "%Y-%m-%d %H:%M:%S")
NewData <- cbind(DT,SS)
### output plot 1
png("plot1.png")
hist(NewData$Global_active_power,col="red", main="",xlab="")
title(main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency")
dev.off()
|
#' Google geocoding
#'
#' Geocoding is the process of converting addresses (like "1600 Amphitheatre
#' Parkway, Mountain View, CA") into geographic coordinates (like latitude 37.423021
#' and longitude -122.083739), which you can use to place markers on a map, or position the map.
#'
#' @param address \code{string}. The street address that you want to geocode, in the
#' format used by the national postal service of the country concerned
#' @param bounds list of two, each element is a vector of lat/lon coordinates
#' representing the south-west and north-east bounding box
#' @param language \code{string}. Specifies the language in which to return the results.
#' See the list of supported languages:
#' \url{https://developers.google.com/maps/faq#using-google-maps-apis}. If no
#' langauge is supplied, the service will attempt to use the language of the domain
#' from which the request was sent
#' @param region \code{string}. Specifies the region code, specified as a ccTLD
#' ("top-level domain"). See region basing for details
#' \url{https://developers.google.com/maps/documentation/directions/intro#RegionBiasing}
#' @param key \code{string}. A valid Google Developers Geocode API key
#' @param components \code{data.frame} of two columns, component and value. Restricts
#' the results to a specific area. One or more of "route","locality","administrative_area",
#' "postal_code","country"
#' @param simplify \code{logical} - TRUE indicates the returned JSON will be coerced into a list. FALSE indicates the returend JSON will be returned as a string
#' @param curl_proxy a curl proxy object
#' @return Either list or JSON string of the geocoded address
#' @examples
#' \dontrun{
#' df <- google_geocode(address = "MCG, Melbourne, Australia",
#' key = "<your valid api key>",
#' simplify = TRUE)
#'
#' df$results$geometry$location
#' lat lng
#' 1 -37.81659 144.9841
#'
#' ## using bounds
#' bounds <- list(c(34.172684,-118.604794),
#' c(34.236144,-118.500938))
#'
#' js <- google_geocode(address = "Winnetka",
#' bounds = bounds,
#' key = "<your valid api key>",
#' simplify = FALSE)
#'
#' ## using components
#' components <- data.frame(component = c("postal_code", "country"),
#' value = c("3000", "AU"))
#'
#'df <- google_geocode(address = "Flinders Street Station",
#' key = "<your valid api key>",
#' components = components,
#' simplify = FALSE)
#'
#' }
#' @export
google_geocode <- function(address,
bounds = NULL,
key = get_api_key("geocode"),
language = NULL,
region = NULL,
components = NULL,
simplify = TRUE,
curl_proxy = NULL
){
## parameter check - key
if(is.null(key))
stop("A Valid Google Developers API key is required")
logicalCheck(simplify)
address <- check_address(address)
address <- tolower(address)
bounds <- validateBounds(bounds)
language <- validateLanguage(language)
region <- validateRegion(region)
components <- validateComponents(components)
map_url <- "https://maps.googleapis.com/maps/api/geocode/json?"
map_url <- constructURL(map_url, c("address" = address,
"bounds" = bounds,
"language" = language,
"region" = region,
"components" = components,
"key" = key))
return(downloadData(map_url, simplify, curl_proxy))
}
|
/R/google_geocode.R
|
no_license
|
HONG1992/googleway
|
R
| false
| false
| 3,812
|
r
|
#' Google geocoding
#'
#' Geocoding is the process of converting addresses (like "1600 Amphitheatre
#' Parkway, Mountain View, CA") into geographic coordinates (like latitude 37.423021
#' and longitude -122.083739), which you can use to place markers on a map, or position the map.
#'
#' @param address \code{string}. The street address that you want to geocode, in the
#' format used by the national postal service of the country concerned
#' @param bounds list of two, each element is a vector of lat/lon coordinates
#' representing the south-west and north-east bounding box
#' @param language \code{string}. Specifies the language in which to return the results.
#' See the list of supported languages:
#' \url{https://developers.google.com/maps/faq#using-google-maps-apis}. If no
#' langauge is supplied, the service will attempt to use the language of the domain
#' from which the request was sent
#' @param region \code{string}. Specifies the region code, specified as a ccTLD
#' ("top-level domain"). See region basing for details
#' \url{https://developers.google.com/maps/documentation/directions/intro#RegionBiasing}
#' @param key \code{string}. A valid Google Developers Geocode API key
#' @param components \code{data.frame} of two columns, component and value. Restricts
#' the results to a specific area. One or more of "route","locality","administrative_area",
#' "postal_code","country"
#' @param simplify \code{logical} - TRUE indicates the returned JSON will be coerced into a list. FALSE indicates the returend JSON will be returned as a string
#' @param curl_proxy a curl proxy object
#' @return Either list or JSON string of the geocoded address
#' @examples
#' \dontrun{
#' df <- google_geocode(address = "MCG, Melbourne, Australia",
#' key = "<your valid api key>",
#' simplify = TRUE)
#'
#' df$results$geometry$location
#' lat lng
#' 1 -37.81659 144.9841
#'
#' ## using bounds
#' bounds <- list(c(34.172684,-118.604794),
#' c(34.236144,-118.500938))
#'
#' js <- google_geocode(address = "Winnetka",
#' bounds = bounds,
#' key = "<your valid api key>",
#' simplify = FALSE)
#'
#' ## using components
#' components <- data.frame(component = c("postal_code", "country"),
#' value = c("3000", "AU"))
#'
#'df <- google_geocode(address = "Flinders Street Station",
#' key = "<your valid api key>",
#' components = components,
#' simplify = FALSE)
#'
#' }
#' @export
google_geocode <- function(address,
bounds = NULL,
key = get_api_key("geocode"),
language = NULL,
region = NULL,
components = NULL,
simplify = TRUE,
curl_proxy = NULL
){
## parameter check - key
if(is.null(key))
stop("A Valid Google Developers API key is required")
logicalCheck(simplify)
address <- check_address(address)
address <- tolower(address)
bounds <- validateBounds(bounds)
language <- validateLanguage(language)
region <- validateRegion(region)
components <- validateComponents(components)
map_url <- "https://maps.googleapis.com/maps/api/geocode/json?"
map_url <- constructURL(map_url, c("address" = address,
"bounds" = bounds,
"language" = language,
"region" = region,
"components" = components,
"key" = key))
return(downloadData(map_url, simplify, curl_proxy))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connectparticipant_service.R
\name{connectparticipant}
\alias{connectparticipant}
\title{Amazon Connect Participant Service}
\usage{
connectparticipant(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
Amazon Connect is a cloud-based contact center solution that makes it
easy to set up and manage a customer contact center and provide reliable
customer engagement at any scale.
Amazon Connect enables customer contacts through voice or chat.
The APIs described here are used by chat participants, such as agents
and customers.
}
\section{Service syntax}{
\preformatted{svc <- connectparticipant(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=connectparticipant_create_participant_connection]{create_participant_connection} \tab Creates the participant's connection \cr
\link[=connectparticipant_disconnect_participant]{disconnect_participant} \tab Disconnects a participant \cr
\link[=connectparticipant_get_transcript]{get_transcript} \tab Retrieves a transcript of the session\cr
\link[=connectparticipant_send_event]{send_event} \tab Sends an event \cr
\link[=connectparticipant_send_message]{send_message} \tab Sends a message
}
}
\examples{
\donttest{svc <- connectparticipant()
svc$create_participant_connection(
Foo = 123
)}
}
|
/paws/man/connectparticipant.Rd
|
permissive
|
ryanb8/paws
|
R
| false
| true
| 1,658
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connectparticipant_service.R
\name{connectparticipant}
\alias{connectparticipant}
\title{Amazon Connect Participant Service}
\usage{
connectparticipant(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
Amazon Connect is a cloud-based contact center solution that makes it
easy to set up and manage a customer contact center and provide reliable
customer engagement at any scale.
Amazon Connect enables customer contacts through voice or chat.
The APIs described here are used by chat participants, such as agents
and customers.
}
\section{Service syntax}{
\preformatted{svc <- connectparticipant(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=connectparticipant_create_participant_connection]{create_participant_connection} \tab Creates the participant's connection \cr
\link[=connectparticipant_disconnect_participant]{disconnect_participant} \tab Disconnects a participant \cr
\link[=connectparticipant_get_transcript]{get_transcript} \tab Retrieves a transcript of the session\cr
\link[=connectparticipant_send_event]{send_event} \tab Sends an event \cr
\link[=connectparticipant_send_message]{send_message} \tab Sends a message
}
}
\examples{
\donttest{svc <- connectparticipant()
svc$create_participant_connection(
Foo = 123
)}
}
|
################################################################
# Build adjacency matrices from dyadic data
# Dyad data must identify countries by variables
# ccode_1 & ccode_2 and the time aspect by a variable called year
# time is a simple vector of years
# panel is a dataset with country codes
DyadBuild <- function(variable, dyadData, cntry1, cntry2, cntryYear1 = NA, cntryYear2 = NA, time, pd, panel=panel, directed=FALSE){
if ( is.na(cntryYear1) ==T){
panelMatch <- panel
}else {
panelMatch <- panel[-which(panel$cnameYear
%in% intersect(setdiff(panel$cnameYear, dyadData[,cntryYear1]),
setdiff(panel$cnameYear, dyadData[,cntryYear2])) ),] }
countryList <- lapply(pd, function(x) FUN=panelMatch[panelMatch$year==x,'ccode'])
names(countryList) <- pd
Mats <- list()
for(ii in 1:length(pd)){
countries <- countryList[[ii]]
yearMatrix <- matrix(0, nrow=length(countries), ncol=length(countries))
rownames(yearMatrix) <- colnames(yearMatrix) <- countries
dyadData <- dyadData[,c(cntry1,cntry2,time,variable)]
dyadData <- data.matrix(dyadData)
data <- matrix(dyadData[which(dyadData[,time] %in% pd[ii]),], ncol=4,
dimnames=list(NULL, c(cntry1,cntry2,time,variable)))
for(jj in 1:nrow(yearMatrix)){
slice <- matrix(data[which(data[,cntry1] %in% countries[jj]), c(cntry2,variable)], ncol=2,
dimnames=list(NULL, c(cntry2,variable)))
rownames(slice) <- slice[,cntry2]
x <- intersect(countries, as.vector(slice[,cntry2]))
slice2 <- matrix(slice[as.character(x),], ncol=2,
dimnames=list(NULL, c(cntry2,variable)))
rownames(slice2) <- slice2[,cntry2]
yearMatrix[as.character(countries[jj]), rownames(slice2)] <- slice2[,variable]
if(directed==FALSE){yearMatrix[rownames(slice2), as.character(countries[jj])] <- slice2[,variable]}
}
Mats[[ii]] <- yearMatrix
print(pd[ii])
}
names(Mats) <- pd
Mats
}
################################################################
################################################################
# Calculate moving average or sum for network data
matrixMatcher=function(matToMatch, toAdd){
addCntr=setdiff(toAdd,rownames( matToMatch ))
addRows=matrix(NA, nrow=length(addCntr),ncol=nrow(matToMatch),
dimnames=list(c(addCntr), NULL))
matToMatch=rbind(matToMatch, addRows)
addCols=matrix(NA, ncol=length(addCntr),nrow=nrow(matToMatch),
dimnames=list(NULL, c(addCntr)))
cbind(matToMatch, addCols) }
mvaStatMat=function(years, wdow, mats, avg=TRUE){
matListStat=list()
for(ii in 1:length(years) ){
sy1=years[ii]-wdow+1; sy2=years[ii]
ys=as.character(sy1:sy2)
matList=mats[ys]
namL=names(matList); namL=namL[!is.na(namL)]
matList=mats[namL]
kmat=mats[as.character(sy2)][[1]]
cntries=rownames(kmat); lcnt=length(cntries)
matList=lapply(matList, function(x) FUN=matrixMatcher(x, cntries))
matList2=lapply(matList, function(x) FUN=x[cntries,cntries])
if(avg){matStat=rowMeans(
array(unlist(matList2), dim = c(lcnt,lcnt,length(matList2))),
dims=2, na.rm=T) }
if(!avg){matStat=rowSums(
array(unlist(matList2), dim = c(lcnt,lcnt,length(matList2))),
dims=2, na.rm=T) }
matStat=matrix(matStat, nrow=lcnt, ncol=lcnt, dimnames=list(cntries, cntries))
matListStat[[ii]]=matStat; print(years[ii])
}
names(matListStat)=years; matListStat
}
################################################################
|
/RCode/Funcs/adjMatHelpers.R
|
no_license
|
s7minhas/ForeignAid
|
R
| false
| false
| 3,465
|
r
|
################################################################
# Build adjacency matrices from dyadic data
# Dyad data must identify countries by variables
# ccode_1 & ccode_2 and the time aspect by a variable called year
# time is a simple vector of years
# panel is a dataset with country codes
DyadBuild <- function(variable, dyadData, cntry1, cntry2, cntryYear1 = NA, cntryYear2 = NA, time, pd, panel=panel, directed=FALSE){
if ( is.na(cntryYear1) ==T){
panelMatch <- panel
}else {
panelMatch <- panel[-which(panel$cnameYear
%in% intersect(setdiff(panel$cnameYear, dyadData[,cntryYear1]),
setdiff(panel$cnameYear, dyadData[,cntryYear2])) ),] }
countryList <- lapply(pd, function(x) FUN=panelMatch[panelMatch$year==x,'ccode'])
names(countryList) <- pd
Mats <- list()
for(ii in 1:length(pd)){
countries <- countryList[[ii]]
yearMatrix <- matrix(0, nrow=length(countries), ncol=length(countries))
rownames(yearMatrix) <- colnames(yearMatrix) <- countries
dyadData <- dyadData[,c(cntry1,cntry2,time,variable)]
dyadData <- data.matrix(dyadData)
data <- matrix(dyadData[which(dyadData[,time] %in% pd[ii]),], ncol=4,
dimnames=list(NULL, c(cntry1,cntry2,time,variable)))
for(jj in 1:nrow(yearMatrix)){
slice <- matrix(data[which(data[,cntry1] %in% countries[jj]), c(cntry2,variable)], ncol=2,
dimnames=list(NULL, c(cntry2,variable)))
rownames(slice) <- slice[,cntry2]
x <- intersect(countries, as.vector(slice[,cntry2]))
slice2 <- matrix(slice[as.character(x),], ncol=2,
dimnames=list(NULL, c(cntry2,variable)))
rownames(slice2) <- slice2[,cntry2]
yearMatrix[as.character(countries[jj]), rownames(slice2)] <- slice2[,variable]
if(directed==FALSE){yearMatrix[rownames(slice2), as.character(countries[jj])] <- slice2[,variable]}
}
Mats[[ii]] <- yearMatrix
print(pd[ii])
}
names(Mats) <- pd
Mats
}
################################################################
################################################################
# Calculate moving average or sum for network data
matrixMatcher=function(matToMatch, toAdd){
addCntr=setdiff(toAdd,rownames( matToMatch ))
addRows=matrix(NA, nrow=length(addCntr),ncol=nrow(matToMatch),
dimnames=list(c(addCntr), NULL))
matToMatch=rbind(matToMatch, addRows)
addCols=matrix(NA, ncol=length(addCntr),nrow=nrow(matToMatch),
dimnames=list(NULL, c(addCntr)))
cbind(matToMatch, addCols) }
mvaStatMat=function(years, wdow, mats, avg=TRUE){
matListStat=list()
for(ii in 1:length(years) ){
sy1=years[ii]-wdow+1; sy2=years[ii]
ys=as.character(sy1:sy2)
matList=mats[ys]
namL=names(matList); namL=namL[!is.na(namL)]
matList=mats[namL]
kmat=mats[as.character(sy2)][[1]]
cntries=rownames(kmat); lcnt=length(cntries)
matList=lapply(matList, function(x) FUN=matrixMatcher(x, cntries))
matList2=lapply(matList, function(x) FUN=x[cntries,cntries])
if(avg){matStat=rowMeans(
array(unlist(matList2), dim = c(lcnt,lcnt,length(matList2))),
dims=2, na.rm=T) }
if(!avg){matStat=rowSums(
array(unlist(matList2), dim = c(lcnt,lcnt,length(matList2))),
dims=2, na.rm=T) }
matStat=matrix(matStat, nrow=lcnt, ncol=lcnt, dimnames=list(cntries, cntries))
matListStat[[ii]]=matStat; print(years[ii])
}
names(matListStat)=years; matListStat
}
################################################################
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090195120L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
/IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609873728-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 729
|
r
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090195120L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estpoly.R
\name{arx}
\alias{arx}
\title{Estimate ARX Models}
\usage{
arx(x, order = c(1, 1, 1), lambda = 0.1, intNoise = FALSE, fixed = NULL)
}
\arguments{
\item{x}{an object of class \code{idframe}}
\item{order}{Specification of the orders: the three integer components
(na,nb,nk) are the order of polynolnomial A, (order of polynomial B + 1) and
the input-output delay}
\item{lambda}{Regularization parameter(Default=\code{0.1})}
\item{intNoise}{Logical variable indicating whether to add integrators in
the noise channel (Default=\code{FALSE})}
\item{fixed}{list containing fixed parameters. If supplied, only \code{NA} entries
will be varied. Specified as a list of two vectors, each containing the parameters
of polynomials A and B respectively.}
}
\value{
An object of class \code{estpoly} containing the following elements:
\item{sys}{an \code{idpoly} object containing the
fitted ARX coefficients}
\item{fitted.values}{the predicted response}
\item{residuals}{the residuals}
\item{input}{the input data used}
\item{call}{the matched call}
\item{stats}{A list containing the following fields: \cr
\code{vcov} - the covariance matrix of the fitted coefficients \cr
\code{sigma} - the standard deviation of the innovations\cr
\code{df} - the residual degrees of freedom}
}
\description{
Fit an ARX model of the specified order given the input-output data
}
\details{
SISO ARX models are of the form
\deqn{
y[k] + a_1 y[k-1] + \ldots + a_{na} y[k-na] = b_{nk} u[k-nk] +
\ldots + b_{nk+nb} u[k-nk-nb] + e[k]
}
The function estimates the coefficients using linear least squares (with
regularization).
\cr
The data is expected to have no offsets or trends. They can be removed
using the \code{\link{detrend}} function.
\cr
To estimate finite impulse response(\code{FIR}) models, specify the first
order to be zero.
}
\examples{
data(arxsim)
mod_arx <- arx(arxsim,c(1,2,2))
mod_arx
plot(mod_arx) # plot the predicted and actual responses
}
\references{
Arun K. Tangirala (2015), \emph{Principles of System Identification:
Theory and Practice}, CRC Press, Boca Raton. Section 21.6.1
Lennart Ljung (1999), \emph{System Identification: Theory for the User},
2nd Edition, Prentice Hall, New York. Section 10.1
}
|
/man/arx.Rd
|
no_license
|
cran/sysid
|
R
| false
| true
| 2,404
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estpoly.R
\name{arx}
\alias{arx}
\title{Estimate ARX Models}
\usage{
arx(x, order = c(1, 1, 1), lambda = 0.1, intNoise = FALSE, fixed = NULL)
}
\arguments{
\item{x}{an object of class \code{idframe}}
\item{order}{Specification of the orders: the three integer components
(na,nb,nk) are the order of polynolnomial A, (order of polynomial B + 1) and
the input-output delay}
\item{lambda}{Regularization parameter(Default=\code{0.1})}
\item{intNoise}{Logical variable indicating whether to add integrators in
the noise channel (Default=\code{FALSE})}
\item{fixed}{list containing fixed parameters. If supplied, only \code{NA} entries
will be varied. Specified as a list of two vectors, each containing the parameters
of polynomials A and B respectively.}
}
\value{
An object of class \code{estpoly} containing the following elements:
\item{sys}{an \code{idpoly} object containing the
fitted ARX coefficients}
\item{fitted.values}{the predicted response}
\item{residuals}{the residuals}
\item{input}{the input data used}
\item{call}{the matched call}
\item{stats}{A list containing the following fields: \cr
\code{vcov} - the covariance matrix of the fitted coefficients \cr
\code{sigma} - the standard deviation of the innovations\cr
\code{df} - the residual degrees of freedom}
}
\description{
Fit an ARX model of the specified order given the input-output data
}
\details{
SISO ARX models are of the form
\deqn{
y[k] + a_1 y[k-1] + \ldots + a_{na} y[k-na] = b_{nk} u[k-nk] +
\ldots + b_{nk+nb} u[k-nk-nb] + e[k]
}
The function estimates the coefficients using linear least squares (with
regularization).
\cr
The data is expected to have no offsets or trends. They can be removed
using the \code{\link{detrend}} function.
\cr
To estimate finite impulse response(\code{FIR}) models, specify the first
order to be zero.
}
\examples{
data(arxsim)
mod_arx <- arx(arxsim,c(1,2,2))
mod_arx
plot(mod_arx) # plot the predicted and actual responses
}
\references{
Arun K. Tangirala (2015), \emph{Principles of System Identification:
Theory and Practice}, CRC Press, Boca Raton. Section 21.6.1
Lennart Ljung (1999), \emph{System Identification: Theory for the User},
2nd Edition, Prentice Hall, New York. Section 10.1
}
|
plot_ts_icos_affected <- function() {
df_spei <- read_SDI("SPEI", agg = 365)
df_spi <- read_SDI("SPI", agg = 365)
df_smi <- read_SDI("SMI", agg = "full")
df <-
left_join(df_spei, df_spi) %>%
left_join(df_smi) %>%
pivot_longer(-c(Date, site), names_to = "index") %>%
drop_na() %>%
mutate(drought_mckee = case_when(
value <= -1 & value >-1.5 ~ "moderate",
value <= -1.5 & value >-2. ~ "severe",
value <= -2. ~ "extreme",
TRUE ~ NA_character_
)) %>%
mutate(drought_agnew = case_when(
value <= -0.84 & value >-1.28 ~ "moderate",
value <= -1.28 & value >-1.65 ~ "severe",
value <= -1.65 ~ "extreme",
TRUE ~ NA_character_
)) %>%
mutate(country = str_extract(site, "^.{2}"))
### how many icos sites affected
nn <- length(unique(df$site))
palette <- c("#481D24","#F03B20","#FFC857")
df_plot <- df %>%
drop_na() %>%
pivot_longer(-c(Date,site,index,value,country),
names_to = "category_source",
values_to = "drought_category") %>%
mutate(drought_category = factor(drought_category, levels = c("extreme", "severe", "moderate"))) %>%
group_by(Date, index, category_source, drought_category) %>%
#group_by(Date, index, drought_mckee) %>%
summarise(n = n() / nn)
plot_function <- function(df_plot) {
}
p <- df_plot %>%
filter(year(Date) > 2010) %>%
ggplot(aes(Date, y = n)) +
theme_cowplot() +
background_grid(major = "y") +
geom_col(mapping = aes(fill = drought_category), colour = NA, width = 2, alpha = 1) +
scale_x_date(expand = c(0,0), breaks = pretty_breaks(n = 7)) +
scale_y_continuous(expand = c(0,0), limits = c(0,1), labels = percent_format(), breaks = c(0.25,0.5,0.75,1)) +
xlab(NULL) + ylab("number of ICOS sites affected") +
#geom_smooth(mapping = aes(colour = "local trend"), se = F) +
#facet_wrap(~category_source, ncol = 1) +
scale_fill_manual("drought", values = palette) +
facet_wrap(~index, ncol = 1, strip.position = "right") +
facet_grid(vars(index), vars(category_source))
save_plot("plots/icos_sites_affected.eps", p,
bg = "white",
base_width = 10)
return(NULL)
}
|
/code/plot_ts_icos_affected.R
|
no_license
|
pohlf/icosdroughtindices
|
R
| false
| false
| 2,251
|
r
|
plot_ts_icos_affected <- function() {
df_spei <- read_SDI("SPEI", agg = 365)
df_spi <- read_SDI("SPI", agg = 365)
df_smi <- read_SDI("SMI", agg = "full")
df <-
left_join(df_spei, df_spi) %>%
left_join(df_smi) %>%
pivot_longer(-c(Date, site), names_to = "index") %>%
drop_na() %>%
mutate(drought_mckee = case_when(
value <= -1 & value >-1.5 ~ "moderate",
value <= -1.5 & value >-2. ~ "severe",
value <= -2. ~ "extreme",
TRUE ~ NA_character_
)) %>%
mutate(drought_agnew = case_when(
value <= -0.84 & value >-1.28 ~ "moderate",
value <= -1.28 & value >-1.65 ~ "severe",
value <= -1.65 ~ "extreme",
TRUE ~ NA_character_
)) %>%
mutate(country = str_extract(site, "^.{2}"))
### how many icos sites affected
nn <- length(unique(df$site))
palette <- c("#481D24","#F03B20","#FFC857")
df_plot <- df %>%
drop_na() %>%
pivot_longer(-c(Date,site,index,value,country),
names_to = "category_source",
values_to = "drought_category") %>%
mutate(drought_category = factor(drought_category, levels = c("extreme", "severe", "moderate"))) %>%
group_by(Date, index, category_source, drought_category) %>%
#group_by(Date, index, drought_mckee) %>%
summarise(n = n() / nn)
plot_function <- function(df_plot) {
}
p <- df_plot %>%
filter(year(Date) > 2010) %>%
ggplot(aes(Date, y = n)) +
theme_cowplot() +
background_grid(major = "y") +
geom_col(mapping = aes(fill = drought_category), colour = NA, width = 2, alpha = 1) +
scale_x_date(expand = c(0,0), breaks = pretty_breaks(n = 7)) +
scale_y_continuous(expand = c(0,0), limits = c(0,1), labels = percent_format(), breaks = c(0.25,0.5,0.75,1)) +
xlab(NULL) + ylab("number of ICOS sites affected") +
#geom_smooth(mapping = aes(colour = "local trend"), se = F) +
#facet_wrap(~category_source, ncol = 1) +
scale_fill_manual("drought", values = palette) +
facet_wrap(~index, ncol = 1, strip.position = "right") +
facet_grid(vars(index), vars(category_source))
save_plot("plots/icos_sites_affected.eps", p,
bg = "white",
base_width = 10)
return(NULL)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conserveR-package.R
\docType{data}
\name{traits}
\alias{traits}
\title{Traits}
\format{
A data frame with thirty-one variables:
\describe{
\item{author}{The last name of the first author.}
\item{year}{The publication year.}
\item{acronym}{The acronym of the method as suggested by the authors, or a custom one if there was none available}
\item{method_name}{The full name of the method as suggested by the authors.}
\item{scale}{}
\item{scalability}{A qualitative assessment how scalable the methods are to large-scale analyses
with hundreds of species or global extent.}
\item{implementation_standard}{How is the method implemented/how can it be used by people, i.e. a software or similar that anyone that would like to use the method could use. "none" if no implementation exists.}
\item{target}{The target of the method. Either "species" or "area".}
\item{scope_terrestrial}{Can the method be applied to terrestrial species/systems? 1 = yes, 0 = no.}
\item{scope_marine}{Can the method be applied to marine species/systems? 1 = yes, 0 = no.}
\item{scope_limnic}{Can the method be applied to limnic species/systems? 1 = yes, 0 = no}
\item{phylogeny}{Does the method include evolutionary aspects (i.e. does it at any point use a phylogeny as input) to prioritize conservation efforts? 1 = yes, 0 = no.}
\item{distribution}{Does the method include distribution aspects of species (i.e. any type of distribution information as input, e.g. species ranges, grid-cell occupancy or occurrence records) to prioritize conservation? 1 = yes, 0 = no.}
\item{functional}{Does the method include functional aspects of species (i.e. functional traits as input data) to prioritize conservation? 1 = yes, 0 = no.}
\item{rarity}{Does the method include species' rarity or commonness (i.e. species abundances as input data) to prioritize conservation? 1 = yes, 0 = no.}
\item{pop_dynamics}{Does the method include changes in species' population density through time? 1 = yes, 0 = no.}
\item{genetics}{Does the method include genetic aspects (i.e. sequence data as input)? For example genetic diversity. 1 = yes, 0 = no.}
\item{ecosystem_services}{Does the method include the importance of species or areas for ecosystem services to prioritize conservation? 1 = yes, 0 = no.}
\item{socio_economic}{Does the method include socio-economic values of species or areas to prioritize conservation effort? 1 = yes, 0 = no.}
\item{landscape_connectivity}{Does the method include landscape connectivity to prioritize conservation effort? 1 = yes, 0 = no.}
\item{land_use}{Does the method include land use factors (i.e. land use data, for instance modeled or remotely sensed) for conservation prioritization? 1 = yes, 0 = no.}
\item{protected_area}{Does the method include protected areas in some way to prioritize conservation effort? 1 = yes, 0 = no.}
\item{extinction_risk}{Does the method include species extinction risk in any way (i.e. the International Union for the Conservation of Nature assessment categories as input data) for conservation prioritization? 1 = yes, 0 = no.}
\item{environment}{Does the method include environmental variables (e.g., modeled precipitation, species niche or worldclim data as input data)?}
\item{vulnerability}{Does the method include assessments of the vulnerability of species or areas to specific threats, for instance pollution, hunting or logging? 1 = yes, 0 = no}
\item{climate_change}{Does the method include climate change as explicit factor for conservation prioritization?}
\item{includes_simulation}{Does the method include the possibility to conduct simulations?}
\item{free_text_description}{}
\item{DOI/link}{The digital object identifier or link to a scientific publication.}
\item{ID}{The ID to link with the \code{\link{literature}}}
\item{example_taxon_standard}{On which taxon was the method developed/tested?}
\item{example_area_standard}{In which area was the method developed/tested?}
}
}
\usage{
traits
}
\description{
The dataset of conservation prioritization methods for relevant for
macro-evolution and macro-ecology including information on data needs, "method traits" and met-data
}
\keyword{datasets}
|
/man/traits.Rd
|
no_license
|
cd-barratt/conserveR
|
R
| false
| true
| 4,223
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conserveR-package.R
\docType{data}
\name{traits}
\alias{traits}
\title{Traits}
\format{
A data frame with thirty-one variables:
\describe{
\item{author}{The last name of the first author.}
\item{year}{The publication year.}
\item{acronym}{The acronym of the method as suggested by the authors, or a custom one if there was none available}
\item{method_name}{The full name of the method as suggested by the authors.}
\item{scale}{}
\item{scalability}{A qualitative assessment how scalable the methods are to large-scale analyses
with hundreds of species or global extent.}
\item{implementation_standard}{How is the method implemented/how can it be used by people, i.e. a software or similar that anyone that would like to use the method could use. "none" if no implementation exists.}
\item{target}{The target of the method. Either "species" or "area".}
\item{scope_terrestrial}{Can the method be applied to terrestrial species/systems? 1 = yes, 0 = no.}
\item{scope_marine}{Can the method be applied to marine species/systems? 1 = yes, 0 = no.}
\item{scope_limnic}{Can the method be applied to limnic species/systems? 1 = yes, 0 = no}
\item{phylogeny}{Does the method include evolutionary aspects (i.e. does it at any point use a phylogeny as input) to prioritize conservation efforts? 1 = yes, 0 = no.}
\item{distribution}{Does the method include distribution aspects of species (i.e. any type of distribution information as input, e.g. species ranges, grid-cell occupancy or occurrence records) to prioritize conservation? 1 = yes, 0 = no.}
\item{functional}{Does the method include functional aspects of species (i.e. functional traits as input data) to prioritize conservation? 1 = yes, 0 = no.}
\item{rarity}{Does the method include species' rarity or commonness (i.e. species abundances as input data) to prioritize conservation? 1 = yes, 0 = no.}
\item{pop_dynamics}{Does the method include changes in species' population density through time? 1 = yes, 0 = no.}
\item{genetics}{Does the method include genetic aspects (i.e. sequence data as input)? For example genetic diversity. 1 = yes, 0 = no.}
\item{ecosystem_services}{Does the method include the importance of species or areas for ecosystem services to prioritize conservation? 1 = yes, 0 = no.}
\item{socio_economic}{Does the method include socio-economic values of species or areas to prioritize conservation effort? 1 = yes, 0 = no.}
\item{landscape_connectivity}{Does the method include landscape connectivity to prioritize conservation effort? 1 = yes, 0 = no.}
\item{land_use}{Does the method include land use factors (i.e. land use data, for instance modeled or remotely sensed) for conservation prioritization? 1 = yes, 0 = no.}
\item{protected_area}{Does the method include protected areas in some way to prioritize conservation effort? 1 = yes, 0 = no.}
\item{extinction_risk}{Does the method include species extinction risk in any way (i.e. the International Union for the Conservation of Nature assessment categories as input data) for conservation prioritization? 1 = yes, 0 = no.}
\item{environment}{Does the method include environmental variables (e.g., modeled precipitation, species niche or worldclim data as input data)?}
\item{vulnerability}{Does the method include assessments of the vulnerability of species or areas to specific threats, for instance pollution, hunting or logging? 1 = yes, 0 = no}
\item{climate_change}{Does the method include climate change as explicit factor for conservation prioritization?}
\item{includes_simulation}{Does the method include the possibility to conduct simulations?}
\item{free_text_description}{}
\item{DOI/link}{The digital object identifier or link to a scientific publication.}
\item{ID}{The ID to link with the \code{\link{literature}}}
\item{example_taxon_standard}{On which taxon was the method developed/tested?}
\item{example_area_standard}{In which area was the method developed/tested?}
}
}
\usage{
traits
}
\description{
The dataset of conservation prioritization methods for relevant for
macro-evolution and macro-ecology including information on data needs, "method traits" and met-data
}
\keyword{datasets}
|
if(!requireNamespace("nycflights13")) install.packages("nycflights13")
library(nycflights13)
nycflights13::flights
str(flights)
#dplyr 패키지
library(dplyr)
#열 방향 : 선택 - select()
# 데이터에서 컬럼을 선택해 사용, 선언된 순서대로 컬럼을 결정
select(flights,year,month,day)
# 숫자에서만 제공하던 from:to 문법을 컬럼 순서를 기준으로 지원
select(flights, year:day)
# -(마이너스)는 지정한 컬럼을 제외하고 전부라는 의미
select(flights, -(year:day))
# everything() 같은 helper함수를 제공 -> 선언된 컬럼을 제외한 나머지 전부라는 의미
select(flights, time_hour, air_time, everything())
# ends_with()같이 글자의 일부에 해당하는 컬럼 전부를 가져오는 helper함수도 존재 정규표현식의 주요기능을 함수로 제공
# ?select로 확인
select(flights, year:day, ends_with("delay"), distance, air_time)
# 열 방향 : 계산 - mutate()
# 출력 편의를 위해 일부 데이터만 사용
flights_sml <- select(flights, year:day, ends_with("delay"), distance, air_time)
flights_sml
# 각 컬럼간의 계산으로 새로운 열을 만들 수 있음
mutate(flights_sml,
gain = arr_delay - dep_delay,
speed = distance / air_time * 60
)
# 컬럼을 지우거나 기존의 컬럼을 변경하는 것도 가능
mutate(flights_sml,
arr_delay = NULL,
air_time = air_time / 60
)
# transmute()는 계산한 컬럼만 있는 테이블을 생성
transmute(flights,
gain = arr_delay - dep_delay,
hours = air_time / 60,
gain_per_hour = gain / hours
)
# group_by()와 함께 window function들이 유용하게 사용
flights_smlg <- group_by(flights_sml, month)
mutate(flights_smlg, rank = row_number(desc(arr_delay)))
# 행 방향 : 조건 - filter()
# filter()는 데이터 중 조건에 해당하는 일부 데이터만 필터해서 사용.
filter(flights, month == 1)
# & == and 조건을 추가할 때 사용
filter(flights, month == 1 & day == 1)
#filter(flights, month == 1 and day == 1) #이건 안되네요.
# | == or
filter(flights, month == 11 | month == 12 )
# %in%는 유용하게 사용하는 논리연산자로 왼쪽에 있는 벡터가 오른쪽 벡터의 데이터 중 어느 하나라도 맞으면 출력
filter(flights, month %in% c(11,12))
# !는 local 데이터에서 결과를 반대로 뒤집는 역할
# 수학에서의 괄호와 같이 연산의 범위를 작성해 두는 것이 문제 발생 소지 감소
filter(flights, !(arr_delay > 120 | dep_delay > 120))
# 행 방향 : 추가 - bind_rows()
feb <- filter(flights, month == 2)
dec <- filter(flights, month == 12)
dim(feb); dim(dec)
nrow(feb)+nrow(dec)
# bind_rows()는 컬럼 이름을 기준으로 같은 컬럼 킽에 데이터를 붙여서 묶어줌.
bind_rows(feb, dec)
# list()로 구분된 데이터도 묶어줌
bind_rows(list(feb, dec))
# split()은 첫번째 인자로 받은 데이터를 컬럼을 기준으로 list()로 분리해 줌.
flights_mon <- split(flights, flights$month)
summary(flights_mon)
# split()으로 분리된 12개의 list()자료도 잘 합쳐줌
nrow(flights)
bind_rows(flights_mon)
# 다른 종류의 데이터도 묶어줌, c()는 vector를 생성하고, data_frame은 data.frame을 생성
bind_rows(
c(a = 1, b = 2),
data_frame(a = 3:4, b = 5:6),
c(a = 7, b = 8)
)
# 데이터를 묶을 때 데이터를 구분하는 컬럼 추가 가능
bind_rows(list(feb, dec), .id = "id")
# 데이터를 구분하는 컬럼에 대해 이름이 동작하는 방식
bind_rows(list(a = feb, b = dec), .id = "data")
# 같은 이름의 컬럼이 없을 때는 NA로 채우면서 동작함
bind_rows(data.frame(x = 1:3), data.frame(y = 1:4))
# 행 방향 : 정렬 - arrange()
# arrange()는 지정되는 컬럼 순으로 오름차순 정렬해주는 함수
arrange(flights, dep_delay)
# desc()는 내림차순 정렬로 방향을 바꾸는 helper 함수
arrange(flights, desc(month), dep_delay)
# 그룹 계산 : group_by() + summarise()
# summarise()는 여러 데이터를 요약해 특성을 파악하는 방식으로 동작하는 함수들을 적용할 때 사용
summarise(flights, mean = mean(dep_delay, na.rm = T), n = n())
# group_by()는 데이터에 지정한 컬럼별이라는 추가 조건을 지정하는 기능을 수행
flights_g <- group_by(flights, month)
flights_g
# group_by()에 의해 지정한 컬럼별 summarise()연산을 수행
summarise(flights_g, mean = mean(dep_delay, na.rm = T), n = n())
# 열 결합(Join) - left_join()
flights2 <- select(flights, year:day, hour, origin, dest, tailnum, carrier)
View(flights2)
# left_join()은 왼쪽 데이터를 기준으로 하고, by로 지정된 컬럼이 같은 데이터임을 식별하는 key로 지정해
# 오른쪽 데이터를 왼쪽 데이터에 결합하는 함수
View(left_join(flights2, airlines, by = "carrier"))
# muture(), match()등의 함수로 구현하려면 아래와 같음
mutate(flights2, name = airlines$name[match(carrier, airlines$carrier)])
# key 역할을 할 컬럼을 지정하지 않으면 양쪽 데이터에서 컬럼 이름이 같은 모든 컬럼을 key로 자동 지정
View(left_join(flights2, weather))
# 여러 컬럼이 key로써 가능할 때 명시적인 지정이 있으면 작성된 컬럼만 key로 동작
left_join(flights2, planes, by="tailnum")
# 여러 컬럼이 key로 동작했을 때 데이터가 잘못 되는 예
left_join(flights2, planes)
# 컬럼 이름이 다를 때는 아래와 같은 문법을 사용
left_join(flights2, airports, c("dest" = "faa"))
# rename()을 이용해 맞추는 방법도 가능
left_join(flights2, rename(airports, dest = faa), by = "dest")
|
/dplyr package.R
|
no_license
|
KCY0409/data-retreatment
|
R
| false
| false
| 5,711
|
r
|
if(!requireNamespace("nycflights13")) install.packages("nycflights13")
library(nycflights13)
nycflights13::flights
str(flights)
#dplyr 패키지
library(dplyr)
#열 방향 : 선택 - select()
# 데이터에서 컬럼을 선택해 사용, 선언된 순서대로 컬럼을 결정
select(flights,year,month,day)
# 숫자에서만 제공하던 from:to 문법을 컬럼 순서를 기준으로 지원
select(flights, year:day)
# -(마이너스)는 지정한 컬럼을 제외하고 전부라는 의미
select(flights, -(year:day))
# everything() 같은 helper함수를 제공 -> 선언된 컬럼을 제외한 나머지 전부라는 의미
select(flights, time_hour, air_time, everything())
# ends_with()같이 글자의 일부에 해당하는 컬럼 전부를 가져오는 helper함수도 존재 정규표현식의 주요기능을 함수로 제공
# ?select로 확인
select(flights, year:day, ends_with("delay"), distance, air_time)
# 열 방향 : 계산 - mutate()
# 출력 편의를 위해 일부 데이터만 사용
flights_sml <- select(flights, year:day, ends_with("delay"), distance, air_time)
flights_sml
# 각 컬럼간의 계산으로 새로운 열을 만들 수 있음
mutate(flights_sml,
gain = arr_delay - dep_delay,
speed = distance / air_time * 60
)
# 컬럼을 지우거나 기존의 컬럼을 변경하는 것도 가능
mutate(flights_sml,
arr_delay = NULL,
air_time = air_time / 60
)
# transmute()는 계산한 컬럼만 있는 테이블을 생성
transmute(flights,
gain = arr_delay - dep_delay,
hours = air_time / 60,
gain_per_hour = gain / hours
)
# group_by()와 함께 window function들이 유용하게 사용
flights_smlg <- group_by(flights_sml, month)
mutate(flights_smlg, rank = row_number(desc(arr_delay)))
# 행 방향 : 조건 - filter()
# filter()는 데이터 중 조건에 해당하는 일부 데이터만 필터해서 사용.
filter(flights, month == 1)
# & == and 조건을 추가할 때 사용
filter(flights, month == 1 & day == 1)
#filter(flights, month == 1 and day == 1) #이건 안되네요.
# | == or
filter(flights, month == 11 | month == 12 )
# %in%는 유용하게 사용하는 논리연산자로 왼쪽에 있는 벡터가 오른쪽 벡터의 데이터 중 어느 하나라도 맞으면 출력
filter(flights, month %in% c(11,12))
# !는 local 데이터에서 결과를 반대로 뒤집는 역할
# 수학에서의 괄호와 같이 연산의 범위를 작성해 두는 것이 문제 발생 소지 감소
filter(flights, !(arr_delay > 120 | dep_delay > 120))
# 행 방향 : 추가 - bind_rows()
feb <- filter(flights, month == 2)
dec <- filter(flights, month == 12)
dim(feb); dim(dec)
nrow(feb)+nrow(dec)
# bind_rows()는 컬럼 이름을 기준으로 같은 컬럼 킽에 데이터를 붙여서 묶어줌.
bind_rows(feb, dec)
# list()로 구분된 데이터도 묶어줌
bind_rows(list(feb, dec))
# split()은 첫번째 인자로 받은 데이터를 컬럼을 기준으로 list()로 분리해 줌.
flights_mon <- split(flights, flights$month)
summary(flights_mon)
# split()으로 분리된 12개의 list()자료도 잘 합쳐줌
nrow(flights)
bind_rows(flights_mon)
# 다른 종류의 데이터도 묶어줌, c()는 vector를 생성하고, data_frame은 data.frame을 생성
bind_rows(
c(a = 1, b = 2),
data_frame(a = 3:4, b = 5:6),
c(a = 7, b = 8)
)
# 데이터를 묶을 때 데이터를 구분하는 컬럼 추가 가능
bind_rows(list(feb, dec), .id = "id")
# 데이터를 구분하는 컬럼에 대해 이름이 동작하는 방식
bind_rows(list(a = feb, b = dec), .id = "data")
# 같은 이름의 컬럼이 없을 때는 NA로 채우면서 동작함
bind_rows(data.frame(x = 1:3), data.frame(y = 1:4))
# 행 방향 : 정렬 - arrange()
# arrange()는 지정되는 컬럼 순으로 오름차순 정렬해주는 함수
arrange(flights, dep_delay)
# desc()는 내림차순 정렬로 방향을 바꾸는 helper 함수
arrange(flights, desc(month), dep_delay)
# 그룹 계산 : group_by() + summarise()
# summarise()는 여러 데이터를 요약해 특성을 파악하는 방식으로 동작하는 함수들을 적용할 때 사용
summarise(flights, mean = mean(dep_delay, na.rm = T), n = n())
# group_by()는 데이터에 지정한 컬럼별이라는 추가 조건을 지정하는 기능을 수행
flights_g <- group_by(flights, month)
flights_g
# group_by()에 의해 지정한 컬럼별 summarise()연산을 수행
summarise(flights_g, mean = mean(dep_delay, na.rm = T), n = n())
# 열 결합(Join) - left_join()
flights2 <- select(flights, year:day, hour, origin, dest, tailnum, carrier)
View(flights2)
# left_join()은 왼쪽 데이터를 기준으로 하고, by로 지정된 컬럼이 같은 데이터임을 식별하는 key로 지정해
# 오른쪽 데이터를 왼쪽 데이터에 결합하는 함수
View(left_join(flights2, airlines, by = "carrier"))
# muture(), match()등의 함수로 구현하려면 아래와 같음
mutate(flights2, name = airlines$name[match(carrier, airlines$carrier)])
# key 역할을 할 컬럼을 지정하지 않으면 양쪽 데이터에서 컬럼 이름이 같은 모든 컬럼을 key로 자동 지정
View(left_join(flights2, weather))
# 여러 컬럼이 key로써 가능할 때 명시적인 지정이 있으면 작성된 컬럼만 key로 동작
left_join(flights2, planes, by="tailnum")
# 여러 컬럼이 key로 동작했을 때 데이터가 잘못 되는 예
left_join(flights2, planes)
# 컬럼 이름이 다를 때는 아래와 같은 문법을 사용
left_join(flights2, airports, c("dest" = "faa"))
# rename()을 이용해 맞추는 방법도 가능
left_join(flights2, rename(airports, dest = faa), by = "dest")
|
## ---- echo=FALSE---------------------------------------------------------
library(ggplot2)
## ---- fig.width=6, fig.height=6------------------------------------------
ggplot(mtcars) +
geom_point(aes(x=hp, y=wt, colour=factor(cyl)))
|
/data/genthat_extracted_code/ggconf/vignettes/Introduction-to-ggconf.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 238
|
r
|
## ---- echo=FALSE---------------------------------------------------------
library(ggplot2)
## ---- fig.width=6, fig.height=6------------------------------------------
ggplot(mtcars) +
geom_point(aes(x=hp, y=wt, colour=factor(cyl)))
|
setMethod("alignPeaks",signature(x='RangedDataList', strand='character'),
function(x, strand, npeaks=1000, bandwidth=150, mc.cores=1) {
x <- as.list(x)
ans <- parallel::mclapply(x, FUN=function(z) alignPeaks(z, strand=strand, npeaks=npeaks, bandwidth=bandwidth), mc.cores=mc.cores, mc.preschedule=FALSE)
ans <- RangedDataList(ans)
names(ans) <- names(x)
return(ans)
}
)
setMethod("alignPeaks",signature(x='RangedData',strand='character'),
function(x, strand, npeaks=1000, bandwidth=150, mc.cores=1) {
strandnew <- lapply(values(x),function(z) as.factor(z[,strand]))
ans <- alignPeaks(x=ranges(x), strand=strandnew, npeaks=npeaks, bandwidth=bandwidth)
ans <- RangedData(ans, values=values(x))
colnames(ans) <- sub('values.','',colnames(ans))
return(ans)
}
)
setMethod("alignPeaks",signature(x='IRangesList',strand='list'),
function(x, strand, npeaks=1000, bandwidth=150, mc.cores=1) {
if (length(x) != length(strand)) stop('x and strand must have the same length')
strand <- strand[names(x)]
#Find highest peaks
xcov <- vector("list",length(x)); names(xcov) <- names(x)
xcov <- coverage(x)
#for (i in 1:length(xcov)) xcov[[i]] <- coverage(x[[i]])
xislands <- lapply(xcov,function(x) slice(x,lower=1))
xmax <- lapply(xislands,function(x) viewMaxs(x))
xwhichmax <- lapply(xislands,function(x) viewWhichMaxs(x))
chr <- rep(names(xmax),lapply(xmax,function(x) length(x)))
xmax <- unlist(xmax)
xwhichmax <- unlist(xwhichmax)
chr <- chr[order(xmax,decreasing=TRUE)][1:npeaks]
xwhichmax <- xwhichmax[order(xmax,decreasing=TRUE)][1:npeaks]
xmax <- xmax[order(xmax,decreasing=TRUE)][1:npeaks]
#Format peaks +/- bandwidth as an IRangesList
peakranges <- vector("list",length(x))
names(peakranges) <- names(x)
for (i in 1:length(peakranges)) {
sel <- chr %in% names(peakranges)[i]
if (any(sel)) peakranges[[i]] <- IRanges(start=xwhichmax[sel]-bandwidth,end=xwhichmax[sel]+bandwidth)
}
class(peakranges) <- 'IRangesList'
#Find center of reads overlapping with peaks (+/- bandwidth)
f <- function(x,strand,y) {
if (!is.null(y)) {
# o <- findOverlaps(y,query=x,multiple=TRUE)
o <- findOverlaps(y,query=x, select='all') # Removed by Oscar on may 19. Check complained with unused argument(s) (multiple = TRUE)
midpoint <- start(x)[queryHits(o)] - .5*(start(y)[subjectHits(o)]+end(y)[subjectHits(o)])
# midpoint <- start(x)[as.matrix(o)[,'query']] - .5*(start(y)[as.matrix(o)[,'subject']]+end(y)[as.matrix(o)[,'subject']])
strandSel <- strand[queryHits(o)]
#strandSel <- strand[as.matrix(o)[,'query']]
} else {
midpoint <- strandSel <- NULL
}
return(list(midpoint=midpoint,strand=strandSel))
}
readct <- vector("list",length(x))
for (i in 1:length(x)) readct[[i]] <- f(x=x[[i]],strand=strand[[i]],y=peakranges[[i]])
#readct <- mapply(f,x,strand,peakranges)
readstrand <- unlist(sapply(readct,function(x) as.character(x$strand)))
readct <- unlist(sapply(readct,'[[','midpoint'))
#Distance between peaks
d <- mean(readct[readstrand=='-']) - mean(readct[readstrand=='+'])
if (d<0) {
d <- 0
warning('The estimated shift size was below zero. Set to zero instead.')
} else if (d>300) {
d <- 300
warning('The estimated shift size was > 300. Set to 300 instead.')
}
#Adjust reads
if (d!=0) {
adj <- ifelse(unlist(strand)=='+',d,-d)
s <- unlist(start(x)) + adj
e <- unlist(end(x)) + adj
negs <- s<0
e[negs] <- e[negs] - adj[negs]
s[negs] <- s[negs] - adj[negs]
space <- rep(names(x),sapply(x,length))
x <- ranges(RangedData(IRanges(start=s,end=e),space=space))
#
#for (i in 1:length(x)) {
# sel <- ifelse(strand[[i]]=='+',1,-1)
# adj <- sel*d
# e <- end(x[[i]]) + adj
# s <- start(x[[i]]) + adj
# negs <- s<0
# e[negs] <- e[negs] - adj[negs]
# s[negs] <- s[negs] - adj[negs]
# x[[i]] <- IRanges(start=s,end=e)
#}
}
cat('Estimated shift size is',d,'\n')
return(x)
}
)
setMethod("alignPeaks",signature(x='GRanges',strand='character'),
function(x, strand, npeaks=1000, bandwidth=150, mc.cores=1) {
x <- as(x,'RangedData')
ans <- alignPeaks(x,strand=strand,npeaks=npeaks,bandwidth=bandwidth,mc.cores=mc.cores)
ans <- as(ans,'GRanges')
return(ans)
}
)
setMethod("alignPeaks",signature(x='GRangesList'),
function(x, strand, npeaks=1000, bandwidth=150, mc.cores=1) {
x <- RangedDataList(lapply(x,function(y) as(y,'RangedData')))
ans <- alignPeaks(x,strand=strand,npeaks=npeaks,bandwidth=bandwidth,mc.cores=mc.cores)
ans <- as(ans,'GRangesList')
return(ans)
}
)
|
/R/alignPeaks.R
|
no_license
|
singlecoated/htSeqTools
|
R
| false
| false
| 4,831
|
r
|
setMethod("alignPeaks",signature(x='RangedDataList', strand='character'),
function(x, strand, npeaks=1000, bandwidth=150, mc.cores=1) {
x <- as.list(x)
ans <- parallel::mclapply(x, FUN=function(z) alignPeaks(z, strand=strand, npeaks=npeaks, bandwidth=bandwidth), mc.cores=mc.cores, mc.preschedule=FALSE)
ans <- RangedDataList(ans)
names(ans) <- names(x)
return(ans)
}
)
setMethod("alignPeaks",signature(x='RangedData',strand='character'),
function(x, strand, npeaks=1000, bandwidth=150, mc.cores=1) {
strandnew <- lapply(values(x),function(z) as.factor(z[,strand]))
ans <- alignPeaks(x=ranges(x), strand=strandnew, npeaks=npeaks, bandwidth=bandwidth)
ans <- RangedData(ans, values=values(x))
colnames(ans) <- sub('values.','',colnames(ans))
return(ans)
}
)
setMethod("alignPeaks",signature(x='IRangesList',strand='list'),
function(x, strand, npeaks=1000, bandwidth=150, mc.cores=1) {
if (length(x) != length(strand)) stop('x and strand must have the same length')
strand <- strand[names(x)]
#Find highest peaks
xcov <- vector("list",length(x)); names(xcov) <- names(x)
xcov <- coverage(x)
#for (i in 1:length(xcov)) xcov[[i]] <- coverage(x[[i]])
xislands <- lapply(xcov,function(x) slice(x,lower=1))
xmax <- lapply(xislands,function(x) viewMaxs(x))
xwhichmax <- lapply(xislands,function(x) viewWhichMaxs(x))
chr <- rep(names(xmax),lapply(xmax,function(x) length(x)))
xmax <- unlist(xmax)
xwhichmax <- unlist(xwhichmax)
chr <- chr[order(xmax,decreasing=TRUE)][1:npeaks]
xwhichmax <- xwhichmax[order(xmax,decreasing=TRUE)][1:npeaks]
xmax <- xmax[order(xmax,decreasing=TRUE)][1:npeaks]
#Format peaks +/- bandwidth as an IRangesList
peakranges <- vector("list",length(x))
names(peakranges) <- names(x)
for (i in 1:length(peakranges)) {
sel <- chr %in% names(peakranges)[i]
if (any(sel)) peakranges[[i]] <- IRanges(start=xwhichmax[sel]-bandwidth,end=xwhichmax[sel]+bandwidth)
}
class(peakranges) <- 'IRangesList'
#Find center of reads overlapping with peaks (+/- bandwidth)
f <- function(x,strand,y) {
if (!is.null(y)) {
# o <- findOverlaps(y,query=x,multiple=TRUE)
o <- findOverlaps(y,query=x, select='all') # Removed by Oscar on may 19. Check complained with unused argument(s) (multiple = TRUE)
midpoint <- start(x)[queryHits(o)] - .5*(start(y)[subjectHits(o)]+end(y)[subjectHits(o)])
# midpoint <- start(x)[as.matrix(o)[,'query']] - .5*(start(y)[as.matrix(o)[,'subject']]+end(y)[as.matrix(o)[,'subject']])
strandSel <- strand[queryHits(o)]
#strandSel <- strand[as.matrix(o)[,'query']]
} else {
midpoint <- strandSel <- NULL
}
return(list(midpoint=midpoint,strand=strandSel))
}
readct <- vector("list",length(x))
for (i in 1:length(x)) readct[[i]] <- f(x=x[[i]],strand=strand[[i]],y=peakranges[[i]])
#readct <- mapply(f,x,strand,peakranges)
readstrand <- unlist(sapply(readct,function(x) as.character(x$strand)))
readct <- unlist(sapply(readct,'[[','midpoint'))
#Distance between peaks
d <- mean(readct[readstrand=='-']) - mean(readct[readstrand=='+'])
if (d<0) {
d <- 0
warning('The estimated shift size was below zero. Set to zero instead.')
} else if (d>300) {
d <- 300
warning('The estimated shift size was > 300. Set to 300 instead.')
}
#Adjust reads
if (d!=0) {
adj <- ifelse(unlist(strand)=='+',d,-d)
s <- unlist(start(x)) + adj
e <- unlist(end(x)) + adj
negs <- s<0
e[negs] <- e[negs] - adj[negs]
s[negs] <- s[negs] - adj[negs]
space <- rep(names(x),sapply(x,length))
x <- ranges(RangedData(IRanges(start=s,end=e),space=space))
#
#for (i in 1:length(x)) {
# sel <- ifelse(strand[[i]]=='+',1,-1)
# adj <- sel*d
# e <- end(x[[i]]) + adj
# s <- start(x[[i]]) + adj
# negs <- s<0
# e[negs] <- e[negs] - adj[negs]
# s[negs] <- s[negs] - adj[negs]
# x[[i]] <- IRanges(start=s,end=e)
#}
}
cat('Estimated shift size is',d,'\n')
return(x)
}
)
setMethod("alignPeaks",signature(x='GRanges',strand='character'),
function(x, strand, npeaks=1000, bandwidth=150, mc.cores=1) {
x <- as(x,'RangedData')
ans <- alignPeaks(x,strand=strand,npeaks=npeaks,bandwidth=bandwidth,mc.cores=mc.cores)
ans <- as(ans,'GRanges')
return(ans)
}
)
setMethod("alignPeaks",signature(x='GRangesList'),
function(x, strand, npeaks=1000, bandwidth=150, mc.cores=1) {
x <- RangedDataList(lapply(x,function(y) as(y,'RangedData')))
ans <- alignPeaks(x,strand=strand,npeaks=npeaks,bandwidth=bandwidth,mc.cores=mc.cores)
ans <- as(ans,'GRangesList')
return(ans)
}
)
|
library(lubridate)
#Import the data to a dataframe
##############################################################################
#
# PART I: import data, filter and subset
#
##############################################################################
# 1. Setting directory and files
#1.a Check if working directory exists and create one
if (!file.exists("Course_Project_1")) {
dir.create("Course_Project_1")
}
setwd("./Course_Project_1")
#1.b download and unzip files
file_url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(file_url,destfile = "dataset.zip")
unzip("dataset.zip")
unlink("dataset.zip")
#remove unnecessary variables
rm(file_url)
dir()
file_txt<-"household_power_consumption.txt"
temp_class<-read.table(file_txt,
sep=";",
header = TRUE,
nrows = 5)
classes<-sapply(temp_class,class)
#now we can specify the class of every column, making it easier for
#the read.table function
data_raw <- read.table(file_txt,
sep=";",
header = TRUE,
colClasses = classes,
na.strings = "?")
#remove unnecessary variables
rm(temp_class, file_txt, classes)
head(data_raw)
#format factor variables to date:
data_raw$datetime<-dmy_hms(
paste(as.character(data_raw$Date),as.character(data_raw$Time)))
data_raw$Date<-dmy(data_raw$Date)
data_raw$Time<-hms(data_raw$Time)
#create filtered table
epc<-subset(data_raw, Date=="2007/02/01" | Date=="2007/02/02")
rm(data_raw)
########################################################################
##############################################################################
#
# PART II: PLOTS
#
##############################################################################
#Plot 2
with(epc, plot(datetime, Global_active_power,
type="n",
xlab="",
ylab= "Global Active Power (Kilowatts)"
))
with(epc, lines(datetime,Global_active_power))
dev.copy(png, "Plot2.png", height = 480, width = 480)
dev.off()
|
/Plot2.R
|
no_license
|
Pedro-Agudo/ExData_Plotting1
|
R
| false
| false
| 2,179
|
r
|
library(lubridate)
#Import the data to a dataframe
##############################################################################
#
# PART I: import data, filter and subset
#
##############################################################################
# 1. Setting directory and files
#1.a Check if working directory exists and create one
if (!file.exists("Course_Project_1")) {
dir.create("Course_Project_1")
}
setwd("./Course_Project_1")
#1.b download and unzip files
file_url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(file_url,destfile = "dataset.zip")
unzip("dataset.zip")
unlink("dataset.zip")
#remove unnecessary variables
rm(file_url)
dir()
file_txt<-"household_power_consumption.txt"
temp_class<-read.table(file_txt,
sep=";",
header = TRUE,
nrows = 5)
classes<-sapply(temp_class,class)
#now we can specify the class of every column, making it easier for
#the read.table function
data_raw <- read.table(file_txt,
sep=";",
header = TRUE,
colClasses = classes,
na.strings = "?")
#remove unnecessary variables
rm(temp_class, file_txt, classes)
head(data_raw)
#format factor variables to date:
data_raw$datetime<-dmy_hms(
paste(as.character(data_raw$Date),as.character(data_raw$Time)))
data_raw$Date<-dmy(data_raw$Date)
data_raw$Time<-hms(data_raw$Time)
#create filtered table
epc<-subset(data_raw, Date=="2007/02/01" | Date=="2007/02/02")
rm(data_raw)
########################################################################
##############################################################################
#
# PART II: PLOTS
#
##############################################################################
#Plot 2
with(epc, plot(datetime, Global_active_power,
type="n",
xlab="",
ylab= "Global Active Power (Kilowatts)"
))
with(epc, lines(datetime,Global_active_power))
dev.copy(png, "Plot2.png", height = 480, width = 480)
dev.off()
|
#############################################
### Positive unlabelled ensemble learning ###
#############################################
#################################################################
## Apply the ensemblePrediction to the feature enhanced data ##
#################################################################
#####################################
## Data preparation and cleansing ##
#####################################
# Get idx for positive and negative labels
# ========================================
get_negative_idx <- function(df, target_attr, attr_values) {
return(rownames(df[!(df[[target_attr]] %in% attr_values), ]))
}
get_positive_idx <- function(df, target_attr, attr_values) {
return(as.numeric(rownames(df[(df[[target_attr]] %in% attr_values), ])))
}
############################################################
## Build ensemble Predictors: SVM, GLM, RF, XGBoost, kNN ##
############################################################
get_prediction <- function(
kinase,
kinases,
full_dataset,
test_idx,
ensemble_size,
negative_size,
model_type,
svm_kernel,
svm_cost,
glm_family,
glm_alpha,
xgb_rounds,
xgb_depth,
rf_ntree,
rf_nodesize,
estimate_c = FALSE,
other_ksr = FALSE
){
positive_idx <- setdiff(kinases[[kinase]][['positive-idx']], test_idx)
positive_train <- full_dataset[positive_idx, ]
test_data <- full_dataset[test_idx, ]
# Building the positive training set
negative_unlabelled <- full_dataset[kinases[[kinase]][['unlabelled-idx']], ]
# Sampling from the pool of 'negative' unlabelled
pred_models <- list();
model_estimated_c <- c();
for (r in 1:ensemble_size){
set.seed(r)
idx <- sample(1:nrow(negative_unlabelled), size = negative_size, replace = F)
negative_train <- negative_unlabelled[idx, ]
# Creating the training set with positive and negative samples
train_df <- rbind(positive_train, negative_train)
rownames(train_df) <- NULL;
# estimating c
k <- 3
cls <- as.factor(rep(c(1, 2), times=c(nrow(positive_train), negative_size)))
fold <- createFolds(cls, k);
# label 1 correspond to positive labelled examples
p <- which(cls[fold$Fold1] == 1)
if(model_type == "svm"){
train_df <- train_df[, !(names(train_df) %in% features$nonnumeric)]
train_df <- train_df[, !(names(train_df) %in% c('substrate_type'))]
if(other_ksr == TRUE){ train_df <- train_df[, !(names(train_df) %in% kinase)]}
pred_model <- svm(train_df, cls, kernel=svm_kernel, probability=TRUE, scale = FALSE)
c_model <- svm(train_df[-fold$Fold1,], cls[-fold$Fold1], kernel=svm_kernel, probability=TRUE, scale = FALSE)
c_pred <- predict(c_model, train_df[fold$Fold1,][p,], decision.values=F, probability=T);
estimated_c <- sum(attr(c_pred, "probabilities")[,1]) / nrow(attr(c_pred, "probabilities"))
} else if (model_type == 'glm'){
if(ncol(train_df) > nrow(train_df)) { train_df = train_df[,1:nrow(train_df)-1]}
train_df$substrate_type <- ifelse(train_df$substrate_type == "<unlabeled>", 0, 1)
if(other_ksr == TRUE){ train_df <- train_df[, !(names(train_df) %in% kinase)]}
train_x <- as.matrix(train_df[, !(names(train_df) %in% features$nonnumeric)])
train_y <- as.matrix(train_df$substrate_type)
cdf <- train_df[-fold$Fold1,]
cx <- as.matrix(cdf[, !(colnames(train_df) %in% features$nonnumeric)])
cy <- as.matrix(cdf$substrate_type)
tx = as.matrix(cdf[fold$Fold1, !(colnames(train_df) %in% c('substrate_type'))][p,])
pred_model <- suppressWarnings(cv.glmnet(x=train_x, y=train_y, type.measure='mse', family=glm_family, alpha=glm_alpha))
c_model <- suppressWarnings(cv.glmnet(x=cx, y=cy, type.measure='mse', family=glm_family, alpha=glm_alpha))
c_pred <- predict(c_model, s=c_model$lambda.1se, newx=tx, type='response');
estimated_c <- sum(c_pred) / length(c_pred)
} else if (model_type == 'rf'){
if(other_ksr == TRUE){
train_df <- train_df[, !(names(train_df) %in% 'substrate_type')]
train_df[[kinase]] <- factor(train_df[[kinase]])
pred_model <- randomForest(y=train_df[[kinase]], x=train_df[, !(names(train_df) %in% kinase)], norm.votes=TRUE, proximity=TRUE, ntree=rf_ntree, maxnodes=rf_nodesize, keep.forest = TRUE)
c_model <- randomForest(y=train_df[-fold$Fold1,kinase], x=train_df[-fold$Fold1, !(names(train_df) %in% kinase)], norm.votes=TRUE, proximity=TRUE, ntree=rf_ntree, maxnodes=rf_nodesize, keep.forest = TRUE)
c_pred <- predict(c_model, train_df[fold$Fold1,][p,], type='prob');
} else {
train_df$substrate_type <- factor(train_df$substrate_type)
pred_model <- randomForest(y=train_df$substrate_type, x=train_df[, !(names(train_df) %in% c('substrate_type')) ], norm.votes=TRUE, proximity=TRUE, ntree=rf_ntree, maxnodes=rf_nodesize, keep.forest = TRUE)
c_model <- randomForest(y=train_df[-fold$Fold1,'substrate_type'], x=train_df[-fold$Fold1, !(names(train_df) %in% c('substrate_type'))], norm.votes=TRUE, proximity=TRUE, ntree=rf_ntree, maxnodes=rf_nodesize, keep.forest = TRUE)
c_pred <- predict(c_model, train_df[fold$Fold1,][p,], type='prob');
}
estimated_c <- sum(c_pred[,2]) / nrow(c_pred)
} else if (model_type == 'xgb'){
if(other_ksr == TRUE){ train_df <- train_df[, !(names(train_df) %in% kinase)]}
train_df <- train_df[, !(names(train_df) %in% features$nonnumeric[-31])]
param <- list(max.depth = xgb_depth, eta = 0.01, objective="binary:logistic", subsample=0.9)
train_df$substrate_type <- ifelse(train_df$substrate_type == "<unlabeled>", 0, 1)
pred_model <- xgboost(param, label=data.matrix(train_df$substrate_type), data=data.matrix(train_df[, !(names(train_df) %in% c('substrate_type')) ]), objective='binary:logistic', nrounds=xgb_rounds, verbose=0)
c_model <- xgboost(param, label=data.matrix(train_df[-fold$Fold1,'substrate_type']), data=data.matrix(train_df[-fold$Fold1, !(names(train_df) %in% c('substrate_type'))]), objective='binary:logistic', nrounds=xgb_rounds, verbose=0)
c_pred <- predict(c_model, data.matrix(train_df[fold$Fold1,][p,]), type='prob');
estimated_c <- sum(c_pred) / length(c_pred)
} else if (model_type == 'knn'){
} else if (model_type == 'nn'){
}
model_estimated_c <- c(model_estimated_c, estimated_c)
# training base classifiers
pred_models[[r]] <- pred_model
}
# an ensemble approach for prediction
predict_df <- test_data
predict_full <- full_dataset
if (model_type == 'xgb') {
predict_df <- predict_df[, !(names(predict_df) %in% features$nonnumeric[-31])]
predict_df$substrate_type <- ifelse(predict_df$substrate_type == "<unlabeled>", 0, 1)
predict_full <- predict_full[, !(names(predict_full) %in% features$nonnumeric[-31])]
predict_full$substrate_type <- ifelse(predict_full$substrate_type == "<unlabeled>", 0, 1)
} else if (model_type == 'rf') {
predict_df$substrate_type = factor(predict_df$substrate_type)
predict_full$substrate_type = factor(predict_full$substrate_type)
} else if (model_type == 'glm') {
predict_df <- predict_df[, !(names(predict_df) %in% features$nonnumeric)]
predict_df <- as.matrix(predict_df)
predict_full <- predict_full[, !(names(predict_full) %in% features$nonnumeric)]
predict_full <- as.matrix(predict_full)
}
# correcting base classifiers with estimated c values
pred_corrected <- 0
pred_corrected_full <- 0
for(i in 1:length(pred_models)) {
if(model_type == 'svm'){
predict_df <- predict_df[, !(names(predict_df) %in% features$nonnumeric)]
predict_df <- predict_df[, !(names(predict_df) %in% c('substrate_type'))]
if(other_ksr == TRUE){ predict_df <- predict_df[, !(names(predict_df) %in% kinase)]}
pred <- predict(pred_models[[i]], predict_df, decision.values=T, probability=T)
pred <- attr(pred,"probabilities")[,1]
if(estimate_c == TRUE) { pred <- pred / model_estimated_c[i]}
pred_corrected <- pred_corrected + pred
predict_full <- predict_full[, !(names(predict_full) %in% features$nonnumeric)]
predict_full <- predict_full[, !(names(predict_full) %in% c('substrate_type'))]
if(other_ksr == TRUE){ predict_full <- predict_full[, !(names(predict_full) %in% kinase)]}
full_pred <- predict(pred_models[[i]], predict_full, decision.values=T, probability=T)
full_pred <- attr(full_pred,"probabilities")[,1]
if(estimate_c == TRUE) { full_pred <- full_pred / model_estimated_c[i]}
pred_corrected_full <- pred_corrected_full + full_pred
} else if (model_type == 'glm') {
if(other_ksr == TRUE){ predict_df <- predict_df[, !(names(predict_df) %in% kinase)]}
pred <- predict(pred_models[[i]], s=pred_models[[i]]$lambda.1se, newx=predict_df, type='response');
if(estimate_c == TRUE) { pred <- pred / model_estimated_c[i]}
pred_corrected <- pred_corrected + pred
if(other_ksr == TRUE){ predict_full <- predict_full[, !(names(predict_full) %in% kinase)]}
full_pred <- predict(pred_models[[i]], s=pred_models[[i]]$lambda.1se, newx=predict_full, type='response');
if(estimate_c == TRUE) { full_pred <- full_pred / model_estimated_c[i]}
pred_corrected_full <- pred_corrected_full + full_pred
} else if (model_type == 'rf') {
if(other_ksr == TRUE){ predict_df <- predict_df[, !(names(predict_df) %in% kinase)]}
pred <- predict(pred_models[[i]], predict_df, type='prob')[, 2]
if(estimate_c == TRUE) { pred <- pred / model_estimated_c[i]}
pred_corrected <- pred_corrected + pred
if(other_ksr == TRUE){ predict_full <- predict_full[, !(names(predict_full) %in% kinase)]}
full_pred <- predict(pred_models[[i]], predict_full, type='prob')[, 2]
if(estimate_c == TRUE) { full_pred <- full_pred / model_estimated_c[i]}
pred_corrected_full <- pred_corrected_full + full_pred
} else if (model_type == 'xgb') {
if(other_ksr == TRUE){ predict_df <- predict_df[, !(names(predict_df) %in% kinase)]}
pred <- predict(pred_models[[i]], data.matrix(predict_df))
if(estimate_c == TRUE) { pred <- pred / model_estimated_c[i]}
pred_corrected <- pred_corrected + pred
if(other_ksr == TRUE){ predict_full <- predict_full[, !(names(predict_full) %in% kinase)]}
full_pred <- predict(pred_models[[i]], data.matrix(predict_full))
if(estimate_c == TRUE) { full_pred <- full_pred / model_estimated_c[i]}
pred_corrected_full <- pred_corrected_full + full_pred
}
}
# return prediction results and base classifiers
results <- list()
predicts <- (pred_corrected / ensemble_size)
predicts_full <- (pred_corrected_full / ensemble_size)
results$prediction <- predicts
results$prediction <- predicts / max(predicts)
results$prediction_full <- predicts_full
results$prediction_full <- predicts_full / max(predicts_full)
results$pred_models <- pred_models;
results$estimated_c <- estimated_c
return(results);
}
#######################################
### Models used in final prediction ###
#######################################
######################################
### Models used for Akt prediction ###
######################################
generate_akt_prob <- function(){
kinase_count <- floor(length(kinases[['Akt']][['positive-idx']]))
# Model 1: XGB
model1 <- get_prediction(
kinase = 'Akt',
kinases = kinases,
full_dataset = full_data[, features$temporal],
test_idx = 1,
ensemble_size = 50,
negative_size = kinase_count * 1,
model_type = 'xgb',
xgb_rounds = 10,
xgb_depth = 2,
estimate_c = TRUE
)$prediction_full
# Model 2: Random Forest
model2 <- get_prediction(
kinase = 'Akt',
kinases = kinases,
full_dataset = full_data[, features$final],
test_idx = 1,
ensemble_size = 100,
negative_size = kinase_count * 1,
model_type = 'rf',
rf_ntree = 500,
rf_nodesize = 4,
estimate_c = TRUE
)$prediction_full
# Model 3: SVM (Radial Kernel)
model3 <- get_prediction(
kinase = 'Akt',
kinases = kinases,
full_dataset = full_data[, features$relative],
test_idx = 1,
ensemble_size = 100,
negative_size = kinase_count * 1,
model_type = 'svm',
svm_kernel = 'radial',
svm_cost = 1,
estimate_c = TRUE
)$prediction_full
# Model 4: Generalised Linear Model
model4 <- get_prediction(
kinase = 'Akt',
kinases = kinases,
full_dataset = full_data[, features$temporal],
test_idx = 1,
ensemble_size = 75,
negative_size = kinase_count * 1.1,
model_type = 'glm',
glm_family = 'gaussian',
glm_alpha = 1,
estimate_c = FALSE
)$prediction_full
# Model 5: Random Forest (Motif classifier)
model5 <- get_prediction(
kinase = 'Akt',
kinases = kinases,
full_dataset = full_data[, features$sequence],
test_idx = 1,
ensemble_size = 25,
negative_size = kinase_count * 1,
model_type = 'rf',
rf_ntree = 750,
rf_nodesize = 4,
estimate_c = TRUE
)$prediction_full
# Create ensemble of individual Akt models
all_predictions <- cbind(model1, model2, model3, model4, model5)
prediction_threshold <- 0.8
# Three voting options: majority vote, all vote and average vote by prediction threshold
ensemble_probability <- rowSums(all_predictions) / ncol(all_predictions)
ensemble_yes_vote <- apply(all_predictions, 1, function(x) sum(x > prediction_threshold))
ensemble_no_vote <- apply(all_predictions, 1, function(x) sum(x <= prediction_threshold))
probability_decision <- ifelse(ensemble_probability > prediction_threshold, 1, 0)
majority_decision <- ifelse(ensemble_yes_vote > ensemble_no_vote, 1, 0)
all_decision <- ifelse(ensemble_yes_vote == ncol(all_predictions), 1, 0)
akt_prob <- cbind(all_predictions, ensemble_yes_vote, ensemble_no_vote, ensemble_probability, probability_decision, majority_decision, all_decision)
return(akt_prob)
}
#######################################
### Models used for mTOR prediction ###
#######################################
generate_mtor_prob <- function(){
kinase_count <- floor(length(kinases[['mTOR']][['positive-idx']]))
# Model 1: XGB
model1 <- get_prediction(
kinase = 'mTOR',
kinases = kinases,
full_dataset = full_data[, features$relative],
test_idx = 1,
ensemble_size = 50,
negative_size = kinase_count * 1.2,
model_type = 'xgb',
xgb_rounds = 50,
xgb_depth = 2,
estimate_c = FALSE
)$prediction_full
# Model 2: Random Forest
model2 <- get_prediction(
kinase = 'mTOR',
kinases = kinases,
full_dataset = full_data[, features$final],
test_idx = 1,
ensemble_size = 25,
negative_size = kinase_count * 1,
model_type = 'rf',
rf_ntree = 750,
rf_nodesize = 4,
estimate_c = TRUE
)$prediction_full
# Model 3: Support Vector Machine
model3 <- get_prediction(
kinase = 'mTOR',
kinases = kinases,
full_dataset = full_data[, features$relative],
test_idx = 1,
ensemble_size = 100,
negative_size = kinase_count * 1,
model_type = 'svm',
svm_kernel = 'radial',
svm_cost = 1,
estimate_c = TRUE
)$prediction_full
# Model 4: Generalised Linear Model (Lasso)
model4 <- get_prediction(
kinase = 'mTOR',
kinases = kinases,
full_dataset = full_data[, features$temporal],
test_idx = 1,
ensemble_size = 100,
negative_size = kinase_count * 1.1,
model_type = 'glm',
glm_family = 'binomial',
glm_alpha = 1,
estimate_c = FALSE
)$prediction_full
# Model 5: Random Forest (Motif classifier)
model5 <- get_prediction(
kinase = 'mTOR',
kinases = kinases,
full_dataset = full_data[, features$sequence],
test_idx = 1,
ensemble_size = 25,
negative_size = kinase_count * 1,
model_type = 'rf',
rf_ntree = 500,
rf_nodesize = 4,
estimate_c = TRUE
)$prediction_full
# Create ensemble of individual mTOR models
all_predictions <- cbind(model1, model2, model3, model4, model5)
prediction_threshold <- 0.7
# Three voting options: majority vote, all vote and average vote by prediction threshold
ensemble_probability <- rowSums(all_predictions) / ncol(all_predictions)
ensemble_yes_vote <- apply(all_predictions, 1, function(x) sum(x > prediction_threshold))
ensemble_no_vote <- apply(all_predictions, 1, function(x) sum(x <= prediction_threshold))
probability_decision <- ifelse(ensemble_probability > prediction_threshold, 1, 0)
majority_decision <- ifelse(ensemble_yes_vote > ensemble_no_vote, 1, 0)
all_decision <- ifelse(ensemble_yes_vote == ncol(all_predictions), 1, 0)
mtor_prob <- cbind(all_predictions, ensemble_yes_vote, ensemble_no_vote, ensemble_probability, probability_decision, majority_decision, all_decision)
return(mtor_prob)
}
# Create single probability file
generate_all_prob <- function(akt_prob, mtor_prob){
all_prob <- cbind(full_data[, c('Identifier', 'Seq.Window', 'substrate_type')], akt_prob[,'ensemble_probability'], mtor_prob[,'ensemble_probability'])
colnames(all_prob) <- c('Identifier', 'Seq.Window', 'substrate_type', 'akt$prediction_full', 'mtor$prediction_full')
return(all_prob)
}
|
/R/learning.R
|
no_license
|
kristopherlopez/imbalanced_unlabelled_learning
|
R
| false
| false
| 18,559
|
r
|
#############################################
### Positive unlabelled ensemble learning ###
#############################################
#################################################################
## Apply the ensemblePrediction to the feature enhanced data ##
#################################################################
#####################################
## Data preparation and cleansing ##
#####################################
# Get idx for positive and negative labels
# ========================================
get_negative_idx <- function(df, target_attr, attr_values) {
return(rownames(df[!(df[[target_attr]] %in% attr_values), ]))
}
get_positive_idx <- function(df, target_attr, attr_values) {
return(as.numeric(rownames(df[(df[[target_attr]] %in% attr_values), ])))
}
############################################################
## Build ensemble Predictors: SVM, GLM, RF, XGBoost, kNN ##
############################################################
get_prediction <- function(
kinase,
kinases,
full_dataset,
test_idx,
ensemble_size,
negative_size,
model_type,
svm_kernel,
svm_cost,
glm_family,
glm_alpha,
xgb_rounds,
xgb_depth,
rf_ntree,
rf_nodesize,
estimate_c = FALSE,
other_ksr = FALSE
){
positive_idx <- setdiff(kinases[[kinase]][['positive-idx']], test_idx)
positive_train <- full_dataset[positive_idx, ]
test_data <- full_dataset[test_idx, ]
# Building the positive training set
negative_unlabelled <- full_dataset[kinases[[kinase]][['unlabelled-idx']], ]
# Sampling from the pool of 'negative' unlabelled
pred_models <- list();
model_estimated_c <- c();
for (r in 1:ensemble_size){
set.seed(r)
idx <- sample(1:nrow(negative_unlabelled), size = negative_size, replace = F)
negative_train <- negative_unlabelled[idx, ]
# Creating the training set with positive and negative samples
train_df <- rbind(positive_train, negative_train)
rownames(train_df) <- NULL;
# estimating c
k <- 3
cls <- as.factor(rep(c(1, 2), times=c(nrow(positive_train), negative_size)))
fold <- createFolds(cls, k);
# label 1 correspond to positive labelled examples
p <- which(cls[fold$Fold1] == 1)
if(model_type == "svm"){
train_df <- train_df[, !(names(train_df) %in% features$nonnumeric)]
train_df <- train_df[, !(names(train_df) %in% c('substrate_type'))]
if(other_ksr == TRUE){ train_df <- train_df[, !(names(train_df) %in% kinase)]}
pred_model <- svm(train_df, cls, kernel=svm_kernel, probability=TRUE, scale = FALSE)
c_model <- svm(train_df[-fold$Fold1,], cls[-fold$Fold1], kernel=svm_kernel, probability=TRUE, scale = FALSE)
c_pred <- predict(c_model, train_df[fold$Fold1,][p,], decision.values=F, probability=T);
estimated_c <- sum(attr(c_pred, "probabilities")[,1]) / nrow(attr(c_pred, "probabilities"))
} else if (model_type == 'glm'){
if(ncol(train_df) > nrow(train_df)) { train_df = train_df[,1:nrow(train_df)-1]}
train_df$substrate_type <- ifelse(train_df$substrate_type == "<unlabeled>", 0, 1)
if(other_ksr == TRUE){ train_df <- train_df[, !(names(train_df) %in% kinase)]}
train_x <- as.matrix(train_df[, !(names(train_df) %in% features$nonnumeric)])
train_y <- as.matrix(train_df$substrate_type)
cdf <- train_df[-fold$Fold1,]
cx <- as.matrix(cdf[, !(colnames(train_df) %in% features$nonnumeric)])
cy <- as.matrix(cdf$substrate_type)
tx = as.matrix(cdf[fold$Fold1, !(colnames(train_df) %in% c('substrate_type'))][p,])
pred_model <- suppressWarnings(cv.glmnet(x=train_x, y=train_y, type.measure='mse', family=glm_family, alpha=glm_alpha))
c_model <- suppressWarnings(cv.glmnet(x=cx, y=cy, type.measure='mse', family=glm_family, alpha=glm_alpha))
c_pred <- predict(c_model, s=c_model$lambda.1se, newx=tx, type='response');
estimated_c <- sum(c_pred) / length(c_pred)
} else if (model_type == 'rf'){
if(other_ksr == TRUE){
train_df <- train_df[, !(names(train_df) %in% 'substrate_type')]
train_df[[kinase]] <- factor(train_df[[kinase]])
pred_model <- randomForest(y=train_df[[kinase]], x=train_df[, !(names(train_df) %in% kinase)], norm.votes=TRUE, proximity=TRUE, ntree=rf_ntree, maxnodes=rf_nodesize, keep.forest = TRUE)
c_model <- randomForest(y=train_df[-fold$Fold1,kinase], x=train_df[-fold$Fold1, !(names(train_df) %in% kinase)], norm.votes=TRUE, proximity=TRUE, ntree=rf_ntree, maxnodes=rf_nodesize, keep.forest = TRUE)
c_pred <- predict(c_model, train_df[fold$Fold1,][p,], type='prob');
} else {
train_df$substrate_type <- factor(train_df$substrate_type)
pred_model <- randomForest(y=train_df$substrate_type, x=train_df[, !(names(train_df) %in% c('substrate_type')) ], norm.votes=TRUE, proximity=TRUE, ntree=rf_ntree, maxnodes=rf_nodesize, keep.forest = TRUE)
c_model <- randomForest(y=train_df[-fold$Fold1,'substrate_type'], x=train_df[-fold$Fold1, !(names(train_df) %in% c('substrate_type'))], norm.votes=TRUE, proximity=TRUE, ntree=rf_ntree, maxnodes=rf_nodesize, keep.forest = TRUE)
c_pred <- predict(c_model, train_df[fold$Fold1,][p,], type='prob');
}
estimated_c <- sum(c_pred[,2]) / nrow(c_pred)
} else if (model_type == 'xgb'){
if(other_ksr == TRUE){ train_df <- train_df[, !(names(train_df) %in% kinase)]}
train_df <- train_df[, !(names(train_df) %in% features$nonnumeric[-31])]
param <- list(max.depth = xgb_depth, eta = 0.01, objective="binary:logistic", subsample=0.9)
train_df$substrate_type <- ifelse(train_df$substrate_type == "<unlabeled>", 0, 1)
pred_model <- xgboost(param, label=data.matrix(train_df$substrate_type), data=data.matrix(train_df[, !(names(train_df) %in% c('substrate_type')) ]), objective='binary:logistic', nrounds=xgb_rounds, verbose=0)
c_model <- xgboost(param, label=data.matrix(train_df[-fold$Fold1,'substrate_type']), data=data.matrix(train_df[-fold$Fold1, !(names(train_df) %in% c('substrate_type'))]), objective='binary:logistic', nrounds=xgb_rounds, verbose=0)
c_pred <- predict(c_model, data.matrix(train_df[fold$Fold1,][p,]), type='prob');
estimated_c <- sum(c_pred) / length(c_pred)
} else if (model_type == 'knn'){
} else if (model_type == 'nn'){
}
model_estimated_c <- c(model_estimated_c, estimated_c)
# training base classifiers
pred_models[[r]] <- pred_model
}
# an ensemble approach for prediction
predict_df <- test_data
predict_full <- full_dataset
if (model_type == 'xgb') {
predict_df <- predict_df[, !(names(predict_df) %in% features$nonnumeric[-31])]
predict_df$substrate_type <- ifelse(predict_df$substrate_type == "<unlabeled>", 0, 1)
predict_full <- predict_full[, !(names(predict_full) %in% features$nonnumeric[-31])]
predict_full$substrate_type <- ifelse(predict_full$substrate_type == "<unlabeled>", 0, 1)
} else if (model_type == 'rf') {
predict_df$substrate_type = factor(predict_df$substrate_type)
predict_full$substrate_type = factor(predict_full$substrate_type)
} else if (model_type == 'glm') {
predict_df <- predict_df[, !(names(predict_df) %in% features$nonnumeric)]
predict_df <- as.matrix(predict_df)
predict_full <- predict_full[, !(names(predict_full) %in% features$nonnumeric)]
predict_full <- as.matrix(predict_full)
}
# correcting base classifiers with estimated c values
pred_corrected <- 0
pred_corrected_full <- 0
for(i in 1:length(pred_models)) {
if(model_type == 'svm'){
predict_df <- predict_df[, !(names(predict_df) %in% features$nonnumeric)]
predict_df <- predict_df[, !(names(predict_df) %in% c('substrate_type'))]
if(other_ksr == TRUE){ predict_df <- predict_df[, !(names(predict_df) %in% kinase)]}
pred <- predict(pred_models[[i]], predict_df, decision.values=T, probability=T)
pred <- attr(pred,"probabilities")[,1]
if(estimate_c == TRUE) { pred <- pred / model_estimated_c[i]}
pred_corrected <- pred_corrected + pred
predict_full <- predict_full[, !(names(predict_full) %in% features$nonnumeric)]
predict_full <- predict_full[, !(names(predict_full) %in% c('substrate_type'))]
if(other_ksr == TRUE){ predict_full <- predict_full[, !(names(predict_full) %in% kinase)]}
full_pred <- predict(pred_models[[i]], predict_full, decision.values=T, probability=T)
full_pred <- attr(full_pred,"probabilities")[,1]
if(estimate_c == TRUE) { full_pred <- full_pred / model_estimated_c[i]}
pred_corrected_full <- pred_corrected_full + full_pred
} else if (model_type == 'glm') {
if(other_ksr == TRUE){ predict_df <- predict_df[, !(names(predict_df) %in% kinase)]}
pred <- predict(pred_models[[i]], s=pred_models[[i]]$lambda.1se, newx=predict_df, type='response');
if(estimate_c == TRUE) { pred <- pred / model_estimated_c[i]}
pred_corrected <- pred_corrected + pred
if(other_ksr == TRUE){ predict_full <- predict_full[, !(names(predict_full) %in% kinase)]}
full_pred <- predict(pred_models[[i]], s=pred_models[[i]]$lambda.1se, newx=predict_full, type='response');
if(estimate_c == TRUE) { full_pred <- full_pred / model_estimated_c[i]}
pred_corrected_full <- pred_corrected_full + full_pred
} else if (model_type == 'rf') {
if(other_ksr == TRUE){ predict_df <- predict_df[, !(names(predict_df) %in% kinase)]}
pred <- predict(pred_models[[i]], predict_df, type='prob')[, 2]
if(estimate_c == TRUE) { pred <- pred / model_estimated_c[i]}
pred_corrected <- pred_corrected + pred
if(other_ksr == TRUE){ predict_full <- predict_full[, !(names(predict_full) %in% kinase)]}
full_pred <- predict(pred_models[[i]], predict_full, type='prob')[, 2]
if(estimate_c == TRUE) { full_pred <- full_pred / model_estimated_c[i]}
pred_corrected_full <- pred_corrected_full + full_pred
} else if (model_type == 'xgb') {
if(other_ksr == TRUE){ predict_df <- predict_df[, !(names(predict_df) %in% kinase)]}
pred <- predict(pred_models[[i]], data.matrix(predict_df))
if(estimate_c == TRUE) { pred <- pred / model_estimated_c[i]}
pred_corrected <- pred_corrected + pred
if(other_ksr == TRUE){ predict_full <- predict_full[, !(names(predict_full) %in% kinase)]}
full_pred <- predict(pred_models[[i]], data.matrix(predict_full))
if(estimate_c == TRUE) { full_pred <- full_pred / model_estimated_c[i]}
pred_corrected_full <- pred_corrected_full + full_pred
}
}
# return prediction results and base classifiers
results <- list()
predicts <- (pred_corrected / ensemble_size)
predicts_full <- (pred_corrected_full / ensemble_size)
results$prediction <- predicts
results$prediction <- predicts / max(predicts)
results$prediction_full <- predicts_full
results$prediction_full <- predicts_full / max(predicts_full)
results$pred_models <- pred_models;
results$estimated_c <- estimated_c
return(results);
}
#######################################
### Models used in final prediction ###
#######################################
######################################
### Models used for Akt prediction ###
######################################
generate_akt_prob <- function(){
kinase_count <- floor(length(kinases[['Akt']][['positive-idx']]))
# Model 1: XGB
model1 <- get_prediction(
kinase = 'Akt',
kinases = kinases,
full_dataset = full_data[, features$temporal],
test_idx = 1,
ensemble_size = 50,
negative_size = kinase_count * 1,
model_type = 'xgb',
xgb_rounds = 10,
xgb_depth = 2,
estimate_c = TRUE
)$prediction_full
# Model 2: Random Forest
model2 <- get_prediction(
kinase = 'Akt',
kinases = kinases,
full_dataset = full_data[, features$final],
test_idx = 1,
ensemble_size = 100,
negative_size = kinase_count * 1,
model_type = 'rf',
rf_ntree = 500,
rf_nodesize = 4,
estimate_c = TRUE
)$prediction_full
# Model 3: SVM (Radial Kernel)
model3 <- get_prediction(
kinase = 'Akt',
kinases = kinases,
full_dataset = full_data[, features$relative],
test_idx = 1,
ensemble_size = 100,
negative_size = kinase_count * 1,
model_type = 'svm',
svm_kernel = 'radial',
svm_cost = 1,
estimate_c = TRUE
)$prediction_full
# Model 4: Generalised Linear Model
model4 <- get_prediction(
kinase = 'Akt',
kinases = kinases,
full_dataset = full_data[, features$temporal],
test_idx = 1,
ensemble_size = 75,
negative_size = kinase_count * 1.1,
model_type = 'glm',
glm_family = 'gaussian',
glm_alpha = 1,
estimate_c = FALSE
)$prediction_full
# Model 5: Random Forest (Motif classifier)
model5 <- get_prediction(
kinase = 'Akt',
kinases = kinases,
full_dataset = full_data[, features$sequence],
test_idx = 1,
ensemble_size = 25,
negative_size = kinase_count * 1,
model_type = 'rf',
rf_ntree = 750,
rf_nodesize = 4,
estimate_c = TRUE
)$prediction_full
# Create ensemble of individual Akt models
all_predictions <- cbind(model1, model2, model3, model4, model5)
prediction_threshold <- 0.8
# Three voting options: majority vote, all vote and average vote by prediction threshold
ensemble_probability <- rowSums(all_predictions) / ncol(all_predictions)
ensemble_yes_vote <- apply(all_predictions, 1, function(x) sum(x > prediction_threshold))
ensemble_no_vote <- apply(all_predictions, 1, function(x) sum(x <= prediction_threshold))
probability_decision <- ifelse(ensemble_probability > prediction_threshold, 1, 0)
majority_decision <- ifelse(ensemble_yes_vote > ensemble_no_vote, 1, 0)
all_decision <- ifelse(ensemble_yes_vote == ncol(all_predictions), 1, 0)
akt_prob <- cbind(all_predictions, ensemble_yes_vote, ensemble_no_vote, ensemble_probability, probability_decision, majority_decision, all_decision)
return(akt_prob)
}
#######################################
### Models used for mTOR prediction ###
#######################################
generate_mtor_prob <- function(){
kinase_count <- floor(length(kinases[['mTOR']][['positive-idx']]))
# Model 1: XGB
model1 <- get_prediction(
kinase = 'mTOR',
kinases = kinases,
full_dataset = full_data[, features$relative],
test_idx = 1,
ensemble_size = 50,
negative_size = kinase_count * 1.2,
model_type = 'xgb',
xgb_rounds = 50,
xgb_depth = 2,
estimate_c = FALSE
)$prediction_full
# Model 2: Random Forest
model2 <- get_prediction(
kinase = 'mTOR',
kinases = kinases,
full_dataset = full_data[, features$final],
test_idx = 1,
ensemble_size = 25,
negative_size = kinase_count * 1,
model_type = 'rf',
rf_ntree = 750,
rf_nodesize = 4,
estimate_c = TRUE
)$prediction_full
# Model 3: Support Vector Machine
model3 <- get_prediction(
kinase = 'mTOR',
kinases = kinases,
full_dataset = full_data[, features$relative],
test_idx = 1,
ensemble_size = 100,
negative_size = kinase_count * 1,
model_type = 'svm',
svm_kernel = 'radial',
svm_cost = 1,
estimate_c = TRUE
)$prediction_full
# Model 4: Generalised Linear Model (Lasso)
model4 <- get_prediction(
kinase = 'mTOR',
kinases = kinases,
full_dataset = full_data[, features$temporal],
test_idx = 1,
ensemble_size = 100,
negative_size = kinase_count * 1.1,
model_type = 'glm',
glm_family = 'binomial',
glm_alpha = 1,
estimate_c = FALSE
)$prediction_full
# Model 5: Random Forest (Motif classifier)
model5 <- get_prediction(
kinase = 'mTOR',
kinases = kinases,
full_dataset = full_data[, features$sequence],
test_idx = 1,
ensemble_size = 25,
negative_size = kinase_count * 1,
model_type = 'rf',
rf_ntree = 500,
rf_nodesize = 4,
estimate_c = TRUE
)$prediction_full
# Create ensemble of individual mTOR models
all_predictions <- cbind(model1, model2, model3, model4, model5)
prediction_threshold <- 0.7
# Three voting options: majority vote, all vote and average vote by prediction threshold
ensemble_probability <- rowSums(all_predictions) / ncol(all_predictions)
ensemble_yes_vote <- apply(all_predictions, 1, function(x) sum(x > prediction_threshold))
ensemble_no_vote <- apply(all_predictions, 1, function(x) sum(x <= prediction_threshold))
probability_decision <- ifelse(ensemble_probability > prediction_threshold, 1, 0)
majority_decision <- ifelse(ensemble_yes_vote > ensemble_no_vote, 1, 0)
all_decision <- ifelse(ensemble_yes_vote == ncol(all_predictions), 1, 0)
mtor_prob <- cbind(all_predictions, ensemble_yes_vote, ensemble_no_vote, ensemble_probability, probability_decision, majority_decision, all_decision)
return(mtor_prob)
}
# Create single probability file
generate_all_prob <- function(akt_prob, mtor_prob){
all_prob <- cbind(full_data[, c('Identifier', 'Seq.Window', 'substrate_type')], akt_prob[,'ensemble_probability'], mtor_prob[,'ensemble_probability'])
colnames(all_prob) <- c('Identifier', 'Seq.Window', 'substrate_type', 'akt$prediction_full', 'mtor$prediction_full')
return(all_prob)
}
|
#-------------------- DATA Layer
#import data
movies <- read.csv("Section6Datafile - Movie-Ratings.csv")
movies # check data out
head(movies)
# Grammer of Graphics - a cool hisotry of some useful stuff to know
# https://sds-platform-private.s3-us-east-2.amazonaws.com/uploads/P2-Section6-Grammar-Of-Graphics.pdf
colnames(movies) <- c("Film","Genre","CriticRating","AudienceRating","BudgetMillions","Year")
head(movies)
str(movies) # see structure
# note we have some factors and R stores these things as numbers ;)
summary(movies) # see some nice stats
# note the Year variable is being treated as a number, which we don't want
# lets convert Year to a Factor
factor(movies$Year) # to have a look
# so lets do the dance... what we are doing here is using the function 'factor' to transform
# the variable into a factor, and we are reassigning that as the same item we started with
movies$Year <- factor(movies$Year)
str(movies)
# now year is a factor!
#----------------- AESTHETIC layer
library(ggplot2)
# let's make some aesthetics - define the way our data maps to what you want to see, it's not what you want to see
ggplot(data=movies, aes(x=CriticRating, y=AudienceRating)) # note this displays an empty graph
# now we'll jump ahead to GEOMETRY but only so we can see ;)
ggplot(data=movies, aes(x=CriticRating, y=AudienceRating)) +
geom_point()
# add some more aesthetics
ggplot(data=movies, aes(x=CriticRating, y=AudienceRating, colour=Genre, size=Genre)) +
geom_point()
# note warning: Warning message: Using size for a discrete variable is not advised.
ggplot(data=movies, aes(x=CriticRating, y=AudienceRating, colour=Genre, size=BudgetMillions)) +
geom_point()
# budget makes a much more sensible Size element
#>>> This is the 1st request (we will improve this)
#---------------- GEOMETRIES layer - and layer geometries - Plotting with Layers
ggplot(data=movies, aes(x=CriticRating, y=AudienceRating, colour=Genre, size=BudgetMillions)) +
geom_point()
# we can put our data and asethetics into an object
p <- ggplot(data=movies, aes(x=CriticRating, y=AudienceRating, colour=Genre, size=BudgetMillions))
# now we can use the object and add geometries
p + geom_point()
p + geom_line()
# now layer them, really just piling them on top of one another
p + geom_line() + geom_point()
#---------------- Going back a step to Asethetics ;) OVERRIDING AESTHETICS
# make an object
q <- ggplot(data=movies, aes(x=CriticRating, y=AudienceRating, colour=Genre, size=BudgetMillions))
# now look at it by adding geometry - how do we overrie the AES items above
q + geom_point()
# override aesthetics
q + geom_point(aes(size=CriticRating))
q + geom_point(aes(colour=BudgetMillions))
q + geom_point(aes(x=BudgetMillions)) +
xlab("Budget Millions $$$")
p + geom_line(size=1) + geom_point() # note we didn't map this size
# we only set it, there's a difference ;)
|
/Section6Lecture60 - Overriding Aethetics.R
|
no_license
|
ghettocounselor/R
|
R
| false
| false
| 2,888
|
r
|
#-------------------- DATA Layer
#import data
movies <- read.csv("Section6Datafile - Movie-Ratings.csv")
movies # check data out
head(movies)
# Grammer of Graphics - a cool hisotry of some useful stuff to know
# https://sds-platform-private.s3-us-east-2.amazonaws.com/uploads/P2-Section6-Grammar-Of-Graphics.pdf
colnames(movies) <- c("Film","Genre","CriticRating","AudienceRating","BudgetMillions","Year")
head(movies)
str(movies) # see structure
# note we have some factors and R stores these things as numbers ;)
summary(movies) # see some nice stats
# note the Year variable is being treated as a number, which we don't want
# lets convert Year to a Factor
factor(movies$Year) # to have a look
# so lets do the dance... what we are doing here is using the function 'factor' to transform
# the variable into a factor, and we are reassigning that as the same item we started with
movies$Year <- factor(movies$Year)
str(movies)
# now year is a factor!
#----------------- AESTHETIC layer
library(ggplot2)
# let's make some aesthetics - define the way our data maps to what you want to see, it's not what you want to see
ggplot(data=movies, aes(x=CriticRating, y=AudienceRating)) # note this displays an empty graph
# now we'll jump ahead to GEOMETRY but only so we can see ;)
ggplot(data=movies, aes(x=CriticRating, y=AudienceRating)) +
geom_point()
# add some more aesthetics
ggplot(data=movies, aes(x=CriticRating, y=AudienceRating, colour=Genre, size=Genre)) +
geom_point()
# note warning: Warning message: Using size for a discrete variable is not advised.
ggplot(data=movies, aes(x=CriticRating, y=AudienceRating, colour=Genre, size=BudgetMillions)) +
geom_point()
# budget makes a much more sensible Size element
#>>> This is the 1st request (we will improve this)
#---------------- GEOMETRIES layer - and layer geometries - Plotting with Layers
ggplot(data=movies, aes(x=CriticRating, y=AudienceRating, colour=Genre, size=BudgetMillions)) +
geom_point()
# we can put our data and asethetics into an object
p <- ggplot(data=movies, aes(x=CriticRating, y=AudienceRating, colour=Genre, size=BudgetMillions))
# now we can use the object and add geometries
p + geom_point()
p + geom_line()
# now layer them, really just piling them on top of one another
p + geom_line() + geom_point()
#---------------- Going back a step to Asethetics ;) OVERRIDING AESTHETICS
# make an object
q <- ggplot(data=movies, aes(x=CriticRating, y=AudienceRating, colour=Genre, size=BudgetMillions))
# now look at it by adding geometry - how do we overrie the AES items above
q + geom_point()
# override aesthetics
q + geom_point(aes(size=CriticRating))
q + geom_point(aes(colour=BudgetMillions))
q + geom_point(aes(x=BudgetMillions)) +
xlab("Budget Millions $$$")
p + geom_line(size=1) + geom_point() # note we didn't map this size
# we only set it, there's a difference ;)
|
train <- read.csv(file.choose(),header = T)
str(train)
train[train==-999.000] <- NA
train$Label=as.numeric(train$Label)-1
train$Label <- as.factor(train$Label)
summary(train)
#Scaling: Normalization
normalizer <- function(x){
return((x-min(x))/(max(x)-min(x)))
}
#Converting to dataframe
train_missing <- subset(train, select=colMeans(is.na(train)) == 0)
data <- train_missing[,-22]
train_norm<- normalizer(data)
train_norm <- as.data.frame(train_norm)
#Sampling
train_sam <- data[1:200000,]
test_sam <- data[200001:250000,]
train_Lab <- train[1:200000,33]
test_Lab <- train[200001:250000,33]
#Model: knn
install.packages("class")
library(class)
#k : how many nearest neighbours you want
modelknn <- knn(train = train_sam, test = test_sam, cl= train_Lab, k= 20)
#Confusion matrix
table(actual= test_Lab, pred= modelknn)
# correct classification : 32819 + 23 = 32842
#Missclassification : 17140 + 18= 17158
#Accuracy: 32842/50000 = 65.68%
table(train$Label)
164333/250000
#KNN is performing below worst case
#k = 10
modelknn <- knn(train = train_sam, test = test_sam, cl= train_Lab, k= 10)
#Confusion matrix
table(actual= test_Lab, pred= modelknn)
#accuracy decreases
#k = 15
modelknn <- knn(train = train_sam, test = test_sam, cl= train_Lab, k= 15)
#Confusion matrix
table(actual= test_Lab, pred= modelknn)
#k = 30
modelknn <- knn(train = train_sam, test = test_sam, cl= train_Lab, k= 30)
#Confusion matrix
table(actual= test_Lab, pred= modelknn)
|
/knn.R
|
no_license
|
puneetgajwal/higgs-boson-decay-detection
|
R
| false
| false
| 1,462
|
r
|
train <- read.csv(file.choose(),header = T)
str(train)
train[train==-999.000] <- NA
train$Label=as.numeric(train$Label)-1
train$Label <- as.factor(train$Label)
summary(train)
#Scaling: Normalization
normalizer <- function(x){
return((x-min(x))/(max(x)-min(x)))
}
#Converting to dataframe
train_missing <- subset(train, select=colMeans(is.na(train)) == 0)
data <- train_missing[,-22]
train_norm<- normalizer(data)
train_norm <- as.data.frame(train_norm)
#Sampling
train_sam <- data[1:200000,]
test_sam <- data[200001:250000,]
train_Lab <- train[1:200000,33]
test_Lab <- train[200001:250000,33]
#Model: knn
install.packages("class")
library(class)
#k : how many nearest neighbours you want
modelknn <- knn(train = train_sam, test = test_sam, cl= train_Lab, k= 20)
#Confusion matrix
table(actual= test_Lab, pred= modelknn)
# correct classification : 32819 + 23 = 32842
#Missclassification : 17140 + 18= 17158
#Accuracy: 32842/50000 = 65.68%
table(train$Label)
164333/250000
#KNN is performing below worst case
#k = 10
modelknn <- knn(train = train_sam, test = test_sam, cl= train_Lab, k= 10)
#Confusion matrix
table(actual= test_Lab, pred= modelknn)
#accuracy decreases
#k = 15
modelknn <- knn(train = train_sam, test = test_sam, cl= train_Lab, k= 15)
#Confusion matrix
table(actual= test_Lab, pred= modelknn)
#k = 30
modelknn <- knn(train = train_sam, test = test_sam, cl= train_Lab, k= 30)
#Confusion matrix
table(actual= test_Lab, pred= modelknn)
|
library(ape)
testtree <- read.tree("5359_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5359_2_unrooted.txt")
|
/codeml_files/newick_trees_processed/5359_2/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 135
|
r
|
library(ape)
testtree <- read.tree("5359_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5359_2_unrooted.txt")
|
# DMR Overlaps
# Plots and enrichment analyses for overlaps between datasets from the genomic coordinate and gene symbol perspectives
# Ben Laufer
# Packages ----------------------------------------------------------------
.libPaths("/share/lasallelab/programs/DMRichR/R_4.0")
if (!requireNamespace("Vennerable", quietly = TRUE))
BiocManager::install("js229/Vennerable")
packages <- c("DMRichR", "ChIPpeakAnno", "Vennerable", "TxDb.Mmusculus.UCSC.mm10.knownGene", "ggplot2",
"org.Mm.eg.db", "kableExtra", "regioneR", "BSgenome.Mmusculus.UCSC.mm10.masked", "UpSetR",
"ComplexUpset", "metap", "pheatmap", "magrittr", "Hmisc", "tidyverse")
stopifnot(suppressMessages(sapply(packages, require, character.only = TRUE)))
enrichR:::.onAttach()
# Load DMRs ---------------------------------------------------------------
setwd("/share/lasallelab/Ben/PEBBLES/DNA/DMRs/")
dir.create("overlaps")
loadDMRs <- function(name){
load(glue::glue("{name}/RData/DMRs.RData"))
assign(name, sigRegions, envir = .GlobalEnv)
rm(sigRegions, regions)
}
contrasts <- c("placenta_male", "placenta_female", "brain_male", "brain_female")
purrr::walk(contrasts, loadDMRs)
# Genomic coordinate ------------------------------------------------------
dir.create("overlaps/coordinate")
purrr::walk(c("male", "female"), function(sex){
print(glue::glue("Obtaining genomic coordinate overlaps for {sex} samples"))
res <- suppressMessages(ChIPpeakAnno::makeVennDiagram(Peaks = list(get(paste0("placenta_",sex)),
get(paste0("brain_",sex))),
NameOfPeaks = c(paste0("placenta_",sex),
paste0("brain_",sex))))
print(glue::glue("Plotting Venn of genomic coordinate overlaps for {sex} samples"))
# ref: https://support.bioconductor.org/p/67429/
venn_cnt2venn <- function(venn_cnt){
n <- which(colnames(venn_cnt) == "Counts") - 1
SetNames <- colnames(venn_cnt)[1:n]
Weight <- venn_cnt[,"Counts"]
names(Weight) <- apply(venn_cnt[,1:n], 1, paste, collapse="")
Vennerable::Venn(SetNames = SetNames, Weight = Weight)
}
v <- venn_cnt2venn(res$vennCounts)
svg(glue::glue("overlaps/coordinate/{sex}_venn_coordinate.svg"),
height = 8.5,
width = 12)
plot(v)
dev.off()
print(glue::glue("Annotating genomic coordinate overlaps for {sex} samples"))
shared <- c(get(paste0("placenta_",sex)),
get(paste0("brain_",sex))) %>%
GenomicRanges::sort() %>%
plyranges::reduce_ranges() %>%
plyranges::filter_by_overlaps(get(paste0("placenta_",sex))) %>%
plyranges::filter_by_overlaps(get(paste0("brain_",sex)))
sharedAnnotated <- shared %>%
ChIPseeker::annotatePeak(TxDb = TxDb.Mmusculus.UCSC.mm10.knownGene,
annoDb = "org.Mm.eg.db",
overlap = "all",
verbose = FALSE) %>%
dplyr::as_tibble() %>%
dplyr::select(seqnames, start, end, width, annotation, geneSymbol = SYMBOL, gene = GENENAME)
sharedOverlaps <- . %>%
plyranges::filter_by_overlaps(shared) %>%
DMRichR::annotateRegions(TxDb = TxDb.Mmusculus.UCSC.mm10.knownGene,
annoDb = "org.Mm.eg.db") %>%
dplyr::select(geneSymbol, difference, `p-value`)
brainShared <- get(paste0("brain_",sex)) %>%
sharedOverlaps()
placentaShared <- get(paste0("placenta_",sex)) %>%
sharedOverlaps
sharedAnnotated %>%
dplyr::left_join(placentaShared) %>%
dplyr::rename("placenta p-value" = `p-value`, "placenta Difference" = difference) %>%
dplyr::left_join(brainShared) %>%
dplyr::rename("brain p-value" = `p-value`, "brain Difference" = difference) %>%
dplyr::rename(chr = "seqnames", symbol = geneSymbol, Name = "gene") %>%
dplyr::rename_with(Hmisc::capitalize) %>%
dplyr::mutate(Name = Hmisc::capitalize(Name)) %>%
dplyr::mutate(Annotation = gsub(" \\(.*","", Annotation)) %>%
dplyr::arrange(`Placenta p-value`) %>%
openxlsx::write.xlsx(file = glue::glue("overlaps/coordinate/{sex}_coordinate_overlaps_annotated.xlsx"))
print(glue::glue("Creating a kable of genomic coordinate overlaps for {sex} samples"))
readxl::read_xlsx(glue::glue("overlaps/coordinate/{sex}_coordinate_overlaps_annotated.xlsx"),
col_types = c(rep("text", 7), rep(c("text","numeric"), 2))) %>%
dplyr::mutate_if(is.numeric, function(x) {
formatC(x, digits = 1, format = "e", drop0trailing = TRUE)
}) %>%
dplyr::mutate("Placenta Difference" = paste0(`Placenta Difference`, "%"),
"Brain Difference" = paste0(`Brain Difference`, "%")) %>%
kbl(align = c(rep("l",7), rep(c("r", "l"),2)),
col.names = c("Chr", "Start", "End", "Width",
"Region", "Symbol", "Name",
"Difference", "p-value", "Difference", "p-value")) %>%
kable_classic(full_width = FALSE, html_font = "Cambria") %>%
column_spec(6, italic = TRUE) %>%
add_header_above(c("Coordinates" = 4, "Annotation" = 3, "Placenta" = 2, "Brain" = 2)) %>%
kableExtra::save_kable(file = glue::glue("overlaps/coordinate/{sex}_coordinate_overlaps.html"))
print(glue::glue("{sex} genomic coordinate overlaps pipeline is complete"))
})
# regioneR ----------------------------------------------------------------
dir.create("overlaps/regioneR")
getConsensus <- . %>%
plyranges::as_granges() %>%
GenomicRanges::sort() %>%
plyranges::reduce_ranges()
placenta_both <- c(placenta_male, placenta_female) %>%
getConsensus
brain_both <- c(brain_male, brain_female) %>%
getConsensus
purrr::walk(c("male", "female", "both"), function(sex){
print(glue::glue("Counting overlaps for {sex} samples"))
print(regioneR::numOverlaps(A = get(paste0("brain_",sex)),
B = get(paste0("placenta_",sex)),
count.once = TRUE))
print(glue::glue("Running permutation test for {sex} samples"))
pt <- regioneR::overlapPermTest(A = get(paste0("brain_",sex)),
B = get(paste0("placenta_",sex)),
alternative = "greater",
genome = "mm10",
ntimes = 10000,
count.once = TRUE)
pdf(glue::glue("overlaps/regioneR/regioneR_{sex}_Brain_Placenta_Overlap.pdf"),
height = 7.50,
width = 11.50)
plot(pt)
dev.off()
print(glue::glue("Calculating local Z-score for {sex} samples"))
lz <- regioneR::localZScore(A = get(paste0("brain_",sex)),
B = get(paste0("placenta_",sex)),
pt = pt,
window = 10*mean(width(get(paste0("brain_",sex)))),
step = mean(width(get(paste0("brain_",sex))))/2,
count.once = TRUE)
pdf(glue::glue("overlaps/regioneR/regioneR_{sex}_Brain_Placenta_Overlap_Zscore.pdf"),
height = 7.50,
width = 11.50)
plot(lz)
dev.off()
print(glue::glue("Saving data for {sex} samples"))
save(pt, lz, file = glue::glue("overlaps/regioneR/regioneR_{sex}_brain_placenta_overlap.RData"))
print(glue::glue("regioneR Brain DMR within Placenta DMR overlap finished for {sex} samples"))
})
# Gene symbol -------------------------------------------------------------
getSymbol <- function(regions = sigRegions,
TxDb = TxDb,
annoDb = annoDb){
regions %>%
DMRichR::annotateRegions(TxDb = TxDb,
annoDb = annoDb) %>%
#dplyr::filter(annotation != "Distal Intergenic") %>%
dplyr::distinct() %>%
purrr::pluck("geneSymbol") %>%
na.omit()
}
contrasts <- c("placenta_male",
"placenta_female",
"brain_male",
"brain_female")
print(glue::glue("Annotating all DMRs"))
genes <- contrasts %>%
purrr::set_names() %>%
purrr::map(function(contrast){
getSymbol(get(contrast),
TxDb = TxDb.Mmusculus.UCSC.mm10.knownGene,
annoDb = "org.Mm.eg.db")
})
dir.create("overlaps/symbol")
print(glue::glue("Creating UpSet plot of all gene symbol overlaps"))
geneUpsetPlot <- function(list = list){
list %>%
UpSetR::fromList() %>%
ComplexUpset::upset(.,
names(.),
n_intersections = 40, # Default from UpSetR
width_ratio = 0.2,
height_ratio = 0.4,
base_annotations = list(
'Intersection size'= intersection_size(
counts = TRUE
) +
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = 'black'),
axis.text = element_text(size = 20),
axis.title = element_text(size = 20)
) +
scale_y_continuous(expand = c(0, 0, 0.1, 0))
),
sort_sets = FALSE,
queries = list(
upset_query(
intersect = c("Placenta Female", "Placenta Male",
"Brain Female", "Brain Male"),
color = '#E41A1C',
fill = '#E41A1C',
only_components = c('intersections_matrix', 'Intersection size')
),
upset_query(
intersect = c("Placenta Female", "Placenta Male"),
color = '#FF7F00',
fill = '#FF7F00',
only_components = c('intersections_matrix', 'Intersection size')
),
upset_query(
intersect = c("Brain Female", "Brain Male"),
color = '#4DAF4A',
fill = '#4DAF4A',
only_components = c('intersections_matrix', 'Intersection size')
),
upset_query(set = 'Placenta Female', fill = '#FF7F00'),
upset_query(set = 'Placenta Male', fill = '#FF7F00'),
upset_query(set = 'Brain Female', fill = '#4DAF4A'),
upset_query(set = 'Brain Male', fill = '#4DAF4A')
),
matrix = intersection_matrix(
geom = geom_point(
shape = 'circle filled',
size = 3.5,
stroke = 0.45
)
),
set_sizes = (
upset_set_size(geom = geom_bar(color = 'black')
)
),
themes = upset_modify_themes( # names(upset_themes)
list(
'intersections_matrix' = theme(
axis.text = element_text(size = 20),
axis.title = element_blank()
),
'overall_sizes' = theme(
axis.title = element_text(size = 14)
)
)
)
)
}
pdf(glue::glue("overlaps/symbol/UpSet_Genes_All_Symbol.pdf"),
height = 6, width = 10, onefile = FALSE)
print({
genes %>%
magrittr::set_names(c("Placenta Male", "Placenta Female",
"Brain Male", "Brain Female")) %>%
rev() %>%
geneUpsetPlot()
})
dev.off()
purrr::walk(c("all", "male", "female"), function(contrast){
print(glue::glue("Generating gene symbol overlaps for {contrast} samples"))
if(contrast == "male"){
contrasts <- c("placenta_male", "brain_male")
}else if(contrast == "female"){
contrasts <- c("placenta_female", "brain_female")
}
genes <- genes[contrasts]
v <- Vennerable::Venn(genes,
SetNames = contrasts)
print(glue::glue("Saving gene symbol Venn of overlaps for {contrast} samples"))
svg(glue::glue("overlaps/symbol/{contrast}_symbols_Venn.svg"),
height = 8.5,
width = 12)
plot(v)
dev.off()
print(glue::glue("Performing GO analysis for {contrast} samples"))
intersects <- dplyr::case_when(contrast == "all" ~ "1111",
contrast == "male" ~ "11",
contrast == "female" ~ "11")
v@IntersectionSets[[intersects]] %T>%
openxlsx::write.xlsx(glue::glue("overlaps/symbol/{contrast}_symbol_overlaps_annotated.xlsx")) %>%
enrichR::enrichr(c("GO_Biological_Process_2018",
"GO_Cellular_Component_2018",
"GO_Molecular_Function_2018",
"KEGG_2019_Mouse",
"Panther_2016",
"Reactome_2016",
"RNA-Seq_Disease_Gene_and_Drug_Signatures_from_GEO")) %T>% # %>%
#purrr::map(~ dplyr::filter(., Adjusted.P.value < 0.05)) %T>%
openxlsx::write.xlsx(file = glue::glue("overlaps/symbol/{contrast}_symbol_overlaps_enrichr.xlsx")) %>%
DMRichR::slimGO(tool = "enrichR",
annoDb = "org.Mm.eg.db",
plots = TRUE) %T>%
openxlsx::write.xlsx(file =glue::glue("overlaps/symbol/{contrast}_symbol_overlaps_enrichr_rrvgo_results.xlsx")) %>%
DMRichR::GOplot() %>%
ggplot2::ggsave(glue::glue("overlaps/symbol/{contrast}_symbol_overlaps_enrichr_plot.pdf"),
plot = .,
device = NULL,
height = 8.5,
width = 10)
print(glue::glue("Finished gene symbol overlap pipe for {contrast} samples"))
})
# Meta p-value ------------------------------------------------------------
print(glue::glue("Running meta p-value analysis for female and male samples"))
contrasts <- c("female", "male")
contrasts %>%
purrr::set_names() %>%
purrr::map(function(sex){
DMRichR::read_excel_all(glue::glue("overlaps/symbol/{sex}_symbol_overlaps_enrichr.xlsx")) %>%
data.table::rbindlist(idcol = "Gene Ontology") %>%
dplyr::as_tibble()}) %>%
purrr::map(~ dplyr::select(., Term, P.value, "Gene Ontology")) %>%
purrr::map2(contrasts, ~ data.table::setnames(.x, "P.value", .y)) %>%
purrr::reduce(dplyr::inner_join, by = c("Term", "Gene Ontology")) %>%
dplyr::rowwise(Term, "Gene Ontology") %>%
dplyr::mutate(meta_p = metap::sumlog(dplyr::c_across(where(is.numeric)))$p) %>%
dplyr::ungroup() %>%
dplyr::arrange(meta_p) %>%
dplyr::mutate(P.value = stats::p.adjust(meta_p, method = 'fdr')) %>%
dplyr::filter(P.value < 0.05, female < 0.05, male < 0.05) %>%
split(.$`Gene Ontology`) %>%
purrr::map(~ dplyr::select(., -`Gene Ontology`)) %T>%
openxlsx::write.xlsx(file = "overlaps/symbol/meta_p_all_symbol_overlaps_enrichr.xlsx") %>%
DMRichR::slimGO(tool = "enrichR",
annoDb = "org.Mm.eg.db",
plots = FALSE) %T>%
openxlsx::write.xlsx(file = "overlaps/symbol/meta_p_all_symbol_overlaps_enrichr_rrvgo_results.xlsx")
## Production plot --------------------------------------------------------
plotData <- DMRichR::read_excel_all("overlaps/symbol/meta_p_all_symbol_overlaps_enrichr.xlsx") %>%
data.table::rbindlist(idcol = "Gene Ontology") %>%
dplyr::as_tibble() %>%
dplyr::filter(`Gene Ontology` %in% c( "Panther_2016", "RNA-Seq_Disease_Gene_and_Drug_S")) %>% # "KEGG_2019_Mouse",
dplyr::filter(P.value < 0.05, female < 0.05, male < 0.05) %>%
dplyr::mutate("-log10.p-value" = -log10(P.value)) %>%
dplyr::select("Gene Ontology", Term, "-log10.p-value") %>%
dplyr::bind_rows(readxl::read_xlsx("overlaps/symbol/meta_p_all_symbol_overlaps_enrichr_rrvgo_results.xlsx"), .) %>%
dplyr::mutate("Database" = dplyr::recode_factor(`Gene Ontology`,
"Biological Process" = "GO Biological Process",
"Cellular Component" = "GO Cellular Component",
"Molecular Function" = "GO Molecular Function",
#"KEGG_2019_Mouse" = "KEGG Pathways",
"Panther_2016" = "Panther Pathways",
"RNA-Seq_Disease_Gene_and_Drug_S" = "GEO RNA-seq Disease and Drug")) %>%
dplyr::select(-`Gene Ontology`) %>%
dplyr::mutate(Term = stringr::str_trim(Term)) %>%
dplyr::mutate(Term = Hmisc::capitalize(Term)) %>%
dplyr::mutate(Term = stringr::str_remove(Term, "Homo sapiens.*$")) %T>%
openxlsx::write.xlsx("overlaps/symbol/meta_p_all_symbol_overlaps_enrichr_plot_table.xlsx") %>%
dplyr::mutate(Term = stringr::str_trunc(Term, 50)) %>%
dplyr::group_by(Database) %>%
dplyr::slice(1:7) %>%
dplyr::ungroup() %>%
dplyr::mutate(Term = stringr::str_replace(Term, "up", "Up"),
Term = stringr::str_replace(Term, "down", "Down")) %>%
dplyr::mutate(Term = factor(.$Term, levels = unique(.$Term[order(forcats::fct_rev(.$Database), .$`-log10.p-value`)])))
metaPlot <- function(data){
data %>%
ggplot2::ggplot(aes(x = Term,
y = `-log10.p-value`,
fill = Database,
group = Database)) +
ggplot2::geom_bar(stat = "identity",
position = position_dodge(),
color = "Black") +
ggplot2::coord_flip() +
ggplot2::scale_y_continuous(expand = c(0, 0)) +
ggsci::scale_fill_d3() +
ggplot2::labs(y = expression("-log"[10](italic(q)))) +
ggplot2::theme_classic() +
ggplot2::theme(text = element_text(size = 40),
axis.title.y = element_blank(),
axis.title.x = element_text(size = 25),
legend.position = "none")
}
cowplot::plot_grid(plotData %>%
dplyr::filter(Database != "GEO RNA-seq Disease and Drug") %>%
metaPlot() +
ggplot2::scale_y_continuous(breaks = c(0,5,10),
expand = c(0, 0)) +
ggplot2::theme(axis.title.x = element_blank()),
plotData %>%
dplyr::filter(Database == "GEO RNA-seq Disease and Drug") %>%
metaPlot() +
ggplot2::scale_fill_manual(values = "#9467BDFF"), # ggsci::pal_d3()(5)
align = "v",
ncol = 1,
rel_heights = c(11,4)) %>%
ggplot2::ggsave(glue::glue("overlaps/symbol/meta_p_all_production_symbol_overlaps_enrichr_plot.pdf"),
plot = .,
width = 14,
height = 16)
# Heatmap -----------------------------------------------------------------
# Modified from UpSetR
# https://github.com/hms-dbmi/UpSetR/blob/master/R/fromList.R
fromList2 <- function(input){
elements <- unique(unlist(input))
data <- unlist(lapply(input, function(x){x <- as.vector(match(elements, x))}))
data[is.na(data)] <- as.integer(0); data[data != 0] <- as.integer(1)
data <- data.frame(matrix(data, ncol = length(input), byrow = F))
data <- data[which(rowSums(data) !=0), ]
names(data) <- names(input)
rownames(data) <- elements %>%
stringr::str_to_lower() %>%
Hmisc::capitalize()
return(data)
}
# Modified from: https://stackoverflow.com/a/60177668
make_italics <- function(x) {
as.expression(lapply(rownames(x), function(y) bquote(italic(.(y)))))
}
## Make list --------------------------------------------------------------
geneList <- c("female", "male") %>%
purrr::set_names() %>%
purrr::map_dfr(function(sex){
DMRichR::read_excel_all(glue::glue("overlaps/symbol/{sex}_symbol_overlaps_enrichr.xlsx")) %>%
data.table::rbindlist(idcol = "Gene Ontology") %>%
dplyr::as_tibble() %>%
dplyr::mutate(Genes = Genes %>%
purrr::map(~ stringr::str_split(., pattern = ";")) %>%
purrr::flatten()) %>%
dplyr::mutate("Database" = dplyr::recode_factor(`Gene Ontology`,
"GO_Biological_Process_2018" = "GO Biological Process",
"GO_Cellular_Component_2018" = "GO Cellular Component",
"GO_Molecular_Function_2018" = "GO Molecular Function",
"Panther_2016" = "Panther Pathways",
"RNA-Seq_Disease_Gene_and_Drug_S" = "GEO RNA-seq Disease and Drug")) %>%
dplyr::mutate(Term = stringr::str_remove(.$Term, "\\(GO.*")) %>%
dplyr::mutate(Term = stringr::str_remove(Term, "Homo sapiens.*$")) %>%
dplyr::mutate(Term = stringr::str_trim(Term)) %>%
dplyr::mutate(Term = Hmisc::capitalize(Term)) %>%
dplyr::select(Term, Database, Genes)
}, .id = "Sex") %>%
dplyr::group_by(Term) %>%
dplyr::filter(dplyr::n() > 1) %>%
dplyr::ungroup() %>%
dplyr::mutate(Sex = Hmisc::capitalize(Sex)) %>%
tidyr::pivot_wider(names_from = Sex, values_from = Genes) %>%
dplyr::inner_join(readxl::read_xlsx("overlaps/symbol/meta_p_all_symbol_overlaps_enrichr_plot_table.xlsx"),
.,
by = c("Term", "Database")) %>%
dplyr::mutate(Shared = purrr::map2(Female, Male, intersect)) %T>%
openxlsx::write.xlsx("overlaps/symbol/enrichr_meta_p.xlsx") %>%
dplyr::filter(Database == "GEO RNA-seq Disease and Drug") %>%
dplyr::select(Term, `-log10.p-value`, Database, Shared) %>%
dplyr::slice(1:7) %>%
purrr::pluck("Shared") %>%
purrr::set_names(c("MeCP2 Hypothalamus Knockout Up",
"Topotecan Cortical neurons 300 nM Down",
"MeCP2 Hypothalamus Transgenic Down",
"LPS Neuron Down",
"TAF15 Striatum Knockdown Down",
"Bicuculin Hippocampus 20 uM Down",
"MeCP2 Visual Cortex Knockout Up"))
## Main plot --------------------------------------------------------------
geneList %>%
magrittr::extract(c("MeCP2 Hypothalamus Knockout Up", "MeCP2 Hypothalamus Transgenic Down")) %>%
magrittr::set_names(c("MeCP2 Knockout Up", "MeCP2 Transgenic Down")) %>%
fromList2() %>%
pheatmap::pheatmap(angle_col = 45,
legend = FALSE,
labels_row = make_italics(.),
border_color = "black",
treeheight_col = 10,
cluster_rows = FALSE,
cluster_cols = FALSE,
filename = "overlaps/symbol/Rett_heatmap.pdf",
width = 1.1,
height = 8)
## Supplementary Plot -----------------------------------------------------
geneList %>%
fromList2() %>%
pheatmap::pheatmap(angle_col = 45,
legend = FALSE,
labels_row = make_italics(.),
border_color = "black",
treeheight_col = 10,
cluster_rows = FALSE,
filename = "overlaps/symbol/full_NDD_heatmap.pdf",
width = 2,
height = 12)
|
/DMRoverlaps_region.R
|
permissive
|
ben-laufer/PCB-Placenta-and-Brain
|
R
| false
| false
| 24,054
|
r
|
# DMR Overlaps
# Plots and enrichment analyses for overlaps between datasets from the genomic coordinate and gene symbol perspectives
# Ben Laufer
# Packages ----------------------------------------------------------------
.libPaths("/share/lasallelab/programs/DMRichR/R_4.0")
if (!requireNamespace("Vennerable", quietly = TRUE))
BiocManager::install("js229/Vennerable")
packages <- c("DMRichR", "ChIPpeakAnno", "Vennerable", "TxDb.Mmusculus.UCSC.mm10.knownGene", "ggplot2",
"org.Mm.eg.db", "kableExtra", "regioneR", "BSgenome.Mmusculus.UCSC.mm10.masked", "UpSetR",
"ComplexUpset", "metap", "pheatmap", "magrittr", "Hmisc", "tidyverse")
stopifnot(suppressMessages(sapply(packages, require, character.only = TRUE)))
enrichR:::.onAttach()
# Load DMRs ---------------------------------------------------------------
setwd("/share/lasallelab/Ben/PEBBLES/DNA/DMRs/")
dir.create("overlaps")
loadDMRs <- function(name){
load(glue::glue("{name}/RData/DMRs.RData"))
assign(name, sigRegions, envir = .GlobalEnv)
rm(sigRegions, regions)
}
contrasts <- c("placenta_male", "placenta_female", "brain_male", "brain_female")
purrr::walk(contrasts, loadDMRs)
# Genomic coordinate ------------------------------------------------------
dir.create("overlaps/coordinate")
purrr::walk(c("male", "female"), function(sex){
print(glue::glue("Obtaining genomic coordinate overlaps for {sex} samples"))
res <- suppressMessages(ChIPpeakAnno::makeVennDiagram(Peaks = list(get(paste0("placenta_",sex)),
get(paste0("brain_",sex))),
NameOfPeaks = c(paste0("placenta_",sex),
paste0("brain_",sex))))
print(glue::glue("Plotting Venn of genomic coordinate overlaps for {sex} samples"))
# ref: https://support.bioconductor.org/p/67429/
venn_cnt2venn <- function(venn_cnt){
n <- which(colnames(venn_cnt) == "Counts") - 1
SetNames <- colnames(venn_cnt)[1:n]
Weight <- venn_cnt[,"Counts"]
names(Weight) <- apply(venn_cnt[,1:n], 1, paste, collapse="")
Vennerable::Venn(SetNames = SetNames, Weight = Weight)
}
v <- venn_cnt2venn(res$vennCounts)
svg(glue::glue("overlaps/coordinate/{sex}_venn_coordinate.svg"),
height = 8.5,
width = 12)
plot(v)
dev.off()
print(glue::glue("Annotating genomic coordinate overlaps for {sex} samples"))
shared <- c(get(paste0("placenta_",sex)),
get(paste0("brain_",sex))) %>%
GenomicRanges::sort() %>%
plyranges::reduce_ranges() %>%
plyranges::filter_by_overlaps(get(paste0("placenta_",sex))) %>%
plyranges::filter_by_overlaps(get(paste0("brain_",sex)))
sharedAnnotated <- shared %>%
ChIPseeker::annotatePeak(TxDb = TxDb.Mmusculus.UCSC.mm10.knownGene,
annoDb = "org.Mm.eg.db",
overlap = "all",
verbose = FALSE) %>%
dplyr::as_tibble() %>%
dplyr::select(seqnames, start, end, width, annotation, geneSymbol = SYMBOL, gene = GENENAME)
sharedOverlaps <- . %>%
plyranges::filter_by_overlaps(shared) %>%
DMRichR::annotateRegions(TxDb = TxDb.Mmusculus.UCSC.mm10.knownGene,
annoDb = "org.Mm.eg.db") %>%
dplyr::select(geneSymbol, difference, `p-value`)
brainShared <- get(paste0("brain_",sex)) %>%
sharedOverlaps()
placentaShared <- get(paste0("placenta_",sex)) %>%
sharedOverlaps
sharedAnnotated %>%
dplyr::left_join(placentaShared) %>%
dplyr::rename("placenta p-value" = `p-value`, "placenta Difference" = difference) %>%
dplyr::left_join(brainShared) %>%
dplyr::rename("brain p-value" = `p-value`, "brain Difference" = difference) %>%
dplyr::rename(chr = "seqnames", symbol = geneSymbol, Name = "gene") %>%
dplyr::rename_with(Hmisc::capitalize) %>%
dplyr::mutate(Name = Hmisc::capitalize(Name)) %>%
dplyr::mutate(Annotation = gsub(" \\(.*","", Annotation)) %>%
dplyr::arrange(`Placenta p-value`) %>%
openxlsx::write.xlsx(file = glue::glue("overlaps/coordinate/{sex}_coordinate_overlaps_annotated.xlsx"))
print(glue::glue("Creating a kable of genomic coordinate overlaps for {sex} samples"))
readxl::read_xlsx(glue::glue("overlaps/coordinate/{sex}_coordinate_overlaps_annotated.xlsx"),
col_types = c(rep("text", 7), rep(c("text","numeric"), 2))) %>%
dplyr::mutate_if(is.numeric, function(x) {
formatC(x, digits = 1, format = "e", drop0trailing = TRUE)
}) %>%
dplyr::mutate("Placenta Difference" = paste0(`Placenta Difference`, "%"),
"Brain Difference" = paste0(`Brain Difference`, "%")) %>%
kbl(align = c(rep("l",7), rep(c("r", "l"),2)),
col.names = c("Chr", "Start", "End", "Width",
"Region", "Symbol", "Name",
"Difference", "p-value", "Difference", "p-value")) %>%
kable_classic(full_width = FALSE, html_font = "Cambria") %>%
column_spec(6, italic = TRUE) %>%
add_header_above(c("Coordinates" = 4, "Annotation" = 3, "Placenta" = 2, "Brain" = 2)) %>%
kableExtra::save_kable(file = glue::glue("overlaps/coordinate/{sex}_coordinate_overlaps.html"))
print(glue::glue("{sex} genomic coordinate overlaps pipeline is complete"))
})
# regioneR ----------------------------------------------------------------
dir.create("overlaps/regioneR")
getConsensus <- . %>%
plyranges::as_granges() %>%
GenomicRanges::sort() %>%
plyranges::reduce_ranges()
placenta_both <- c(placenta_male, placenta_female) %>%
getConsensus
brain_both <- c(brain_male, brain_female) %>%
getConsensus
purrr::walk(c("male", "female", "both"), function(sex){
print(glue::glue("Counting overlaps for {sex} samples"))
print(regioneR::numOverlaps(A = get(paste0("brain_",sex)),
B = get(paste0("placenta_",sex)),
count.once = TRUE))
print(glue::glue("Running permutation test for {sex} samples"))
pt <- regioneR::overlapPermTest(A = get(paste0("brain_",sex)),
B = get(paste0("placenta_",sex)),
alternative = "greater",
genome = "mm10",
ntimes = 10000,
count.once = TRUE)
pdf(glue::glue("overlaps/regioneR/regioneR_{sex}_Brain_Placenta_Overlap.pdf"),
height = 7.50,
width = 11.50)
plot(pt)
dev.off()
print(glue::glue("Calculating local Z-score for {sex} samples"))
lz <- regioneR::localZScore(A = get(paste0("brain_",sex)),
B = get(paste0("placenta_",sex)),
pt = pt,
window = 10*mean(width(get(paste0("brain_",sex)))),
step = mean(width(get(paste0("brain_",sex))))/2,
count.once = TRUE)
pdf(glue::glue("overlaps/regioneR/regioneR_{sex}_Brain_Placenta_Overlap_Zscore.pdf"),
height = 7.50,
width = 11.50)
plot(lz)
dev.off()
print(glue::glue("Saving data for {sex} samples"))
save(pt, lz, file = glue::glue("overlaps/regioneR/regioneR_{sex}_brain_placenta_overlap.RData"))
print(glue::glue("regioneR Brain DMR within Placenta DMR overlap finished for {sex} samples"))
})
# Gene symbol -------------------------------------------------------------
getSymbol <- function(regions = sigRegions,
TxDb = TxDb,
annoDb = annoDb){
regions %>%
DMRichR::annotateRegions(TxDb = TxDb,
annoDb = annoDb) %>%
#dplyr::filter(annotation != "Distal Intergenic") %>%
dplyr::distinct() %>%
purrr::pluck("geneSymbol") %>%
na.omit()
}
contrasts <- c("placenta_male",
"placenta_female",
"brain_male",
"brain_female")
print(glue::glue("Annotating all DMRs"))
genes <- contrasts %>%
purrr::set_names() %>%
purrr::map(function(contrast){
getSymbol(get(contrast),
TxDb = TxDb.Mmusculus.UCSC.mm10.knownGene,
annoDb = "org.Mm.eg.db")
})
dir.create("overlaps/symbol")
print(glue::glue("Creating UpSet plot of all gene symbol overlaps"))
geneUpsetPlot <- function(list = list){
list %>%
UpSetR::fromList() %>%
ComplexUpset::upset(.,
names(.),
n_intersections = 40, # Default from UpSetR
width_ratio = 0.2,
height_ratio = 0.4,
base_annotations = list(
'Intersection size'= intersection_size(
counts = TRUE
) +
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = 'black'),
axis.text = element_text(size = 20),
axis.title = element_text(size = 20)
) +
scale_y_continuous(expand = c(0, 0, 0.1, 0))
),
sort_sets = FALSE,
queries = list(
upset_query(
intersect = c("Placenta Female", "Placenta Male",
"Brain Female", "Brain Male"),
color = '#E41A1C',
fill = '#E41A1C',
only_components = c('intersections_matrix', 'Intersection size')
),
upset_query(
intersect = c("Placenta Female", "Placenta Male"),
color = '#FF7F00',
fill = '#FF7F00',
only_components = c('intersections_matrix', 'Intersection size')
),
upset_query(
intersect = c("Brain Female", "Brain Male"),
color = '#4DAF4A',
fill = '#4DAF4A',
only_components = c('intersections_matrix', 'Intersection size')
),
upset_query(set = 'Placenta Female', fill = '#FF7F00'),
upset_query(set = 'Placenta Male', fill = '#FF7F00'),
upset_query(set = 'Brain Female', fill = '#4DAF4A'),
upset_query(set = 'Brain Male', fill = '#4DAF4A')
),
matrix = intersection_matrix(
geom = geom_point(
shape = 'circle filled',
size = 3.5,
stroke = 0.45
)
),
set_sizes = (
upset_set_size(geom = geom_bar(color = 'black')
)
),
themes = upset_modify_themes( # names(upset_themes)
list(
'intersections_matrix' = theme(
axis.text = element_text(size = 20),
axis.title = element_blank()
),
'overall_sizes' = theme(
axis.title = element_text(size = 14)
)
)
)
)
}
pdf(glue::glue("overlaps/symbol/UpSet_Genes_All_Symbol.pdf"),
height = 6, width = 10, onefile = FALSE)
print({
genes %>%
magrittr::set_names(c("Placenta Male", "Placenta Female",
"Brain Male", "Brain Female")) %>%
rev() %>%
geneUpsetPlot()
})
dev.off()
purrr::walk(c("all", "male", "female"), function(contrast){
print(glue::glue("Generating gene symbol overlaps for {contrast} samples"))
if(contrast == "male"){
contrasts <- c("placenta_male", "brain_male")
}else if(contrast == "female"){
contrasts <- c("placenta_female", "brain_female")
}
genes <- genes[contrasts]
v <- Vennerable::Venn(genes,
SetNames = contrasts)
print(glue::glue("Saving gene symbol Venn of overlaps for {contrast} samples"))
svg(glue::glue("overlaps/symbol/{contrast}_symbols_Venn.svg"),
height = 8.5,
width = 12)
plot(v)
dev.off()
print(glue::glue("Performing GO analysis for {contrast} samples"))
intersects <- dplyr::case_when(contrast == "all" ~ "1111",
contrast == "male" ~ "11",
contrast == "female" ~ "11")
v@IntersectionSets[[intersects]] %T>%
openxlsx::write.xlsx(glue::glue("overlaps/symbol/{contrast}_symbol_overlaps_annotated.xlsx")) %>%
enrichR::enrichr(c("GO_Biological_Process_2018",
"GO_Cellular_Component_2018",
"GO_Molecular_Function_2018",
"KEGG_2019_Mouse",
"Panther_2016",
"Reactome_2016",
"RNA-Seq_Disease_Gene_and_Drug_Signatures_from_GEO")) %T>% # %>%
#purrr::map(~ dplyr::filter(., Adjusted.P.value < 0.05)) %T>%
openxlsx::write.xlsx(file = glue::glue("overlaps/symbol/{contrast}_symbol_overlaps_enrichr.xlsx")) %>%
DMRichR::slimGO(tool = "enrichR",
annoDb = "org.Mm.eg.db",
plots = TRUE) %T>%
openxlsx::write.xlsx(file =glue::glue("overlaps/symbol/{contrast}_symbol_overlaps_enrichr_rrvgo_results.xlsx")) %>%
DMRichR::GOplot() %>%
ggplot2::ggsave(glue::glue("overlaps/symbol/{contrast}_symbol_overlaps_enrichr_plot.pdf"),
plot = .,
device = NULL,
height = 8.5,
width = 10)
print(glue::glue("Finished gene symbol overlap pipe for {contrast} samples"))
})
# Meta p-value ------------------------------------------------------------
print(glue::glue("Running meta p-value analysis for female and male samples"))
contrasts <- c("female", "male")
contrasts %>%
purrr::set_names() %>%
purrr::map(function(sex){
DMRichR::read_excel_all(glue::glue("overlaps/symbol/{sex}_symbol_overlaps_enrichr.xlsx")) %>%
data.table::rbindlist(idcol = "Gene Ontology") %>%
dplyr::as_tibble()}) %>%
purrr::map(~ dplyr::select(., Term, P.value, "Gene Ontology")) %>%
purrr::map2(contrasts, ~ data.table::setnames(.x, "P.value", .y)) %>%
purrr::reduce(dplyr::inner_join, by = c("Term", "Gene Ontology")) %>%
dplyr::rowwise(Term, "Gene Ontology") %>%
dplyr::mutate(meta_p = metap::sumlog(dplyr::c_across(where(is.numeric)))$p) %>%
dplyr::ungroup() %>%
dplyr::arrange(meta_p) %>%
dplyr::mutate(P.value = stats::p.adjust(meta_p, method = 'fdr')) %>%
dplyr::filter(P.value < 0.05, female < 0.05, male < 0.05) %>%
split(.$`Gene Ontology`) %>%
purrr::map(~ dplyr::select(., -`Gene Ontology`)) %T>%
openxlsx::write.xlsx(file = "overlaps/symbol/meta_p_all_symbol_overlaps_enrichr.xlsx") %>%
DMRichR::slimGO(tool = "enrichR",
annoDb = "org.Mm.eg.db",
plots = FALSE) %T>%
openxlsx::write.xlsx(file = "overlaps/symbol/meta_p_all_symbol_overlaps_enrichr_rrvgo_results.xlsx")
## Production plot --------------------------------------------------------
plotData <- DMRichR::read_excel_all("overlaps/symbol/meta_p_all_symbol_overlaps_enrichr.xlsx") %>%
data.table::rbindlist(idcol = "Gene Ontology") %>%
dplyr::as_tibble() %>%
dplyr::filter(`Gene Ontology` %in% c( "Panther_2016", "RNA-Seq_Disease_Gene_and_Drug_S")) %>% # "KEGG_2019_Mouse",
dplyr::filter(P.value < 0.05, female < 0.05, male < 0.05) %>%
dplyr::mutate("-log10.p-value" = -log10(P.value)) %>%
dplyr::select("Gene Ontology", Term, "-log10.p-value") %>%
dplyr::bind_rows(readxl::read_xlsx("overlaps/symbol/meta_p_all_symbol_overlaps_enrichr_rrvgo_results.xlsx"), .) %>%
dplyr::mutate("Database" = dplyr::recode_factor(`Gene Ontology`,
"Biological Process" = "GO Biological Process",
"Cellular Component" = "GO Cellular Component",
"Molecular Function" = "GO Molecular Function",
#"KEGG_2019_Mouse" = "KEGG Pathways",
"Panther_2016" = "Panther Pathways",
"RNA-Seq_Disease_Gene_and_Drug_S" = "GEO RNA-seq Disease and Drug")) %>%
dplyr::select(-`Gene Ontology`) %>%
dplyr::mutate(Term = stringr::str_trim(Term)) %>%
dplyr::mutate(Term = Hmisc::capitalize(Term)) %>%
dplyr::mutate(Term = stringr::str_remove(Term, "Homo sapiens.*$")) %T>%
openxlsx::write.xlsx("overlaps/symbol/meta_p_all_symbol_overlaps_enrichr_plot_table.xlsx") %>%
dplyr::mutate(Term = stringr::str_trunc(Term, 50)) %>%
dplyr::group_by(Database) %>%
dplyr::slice(1:7) %>%
dplyr::ungroup() %>%
dplyr::mutate(Term = stringr::str_replace(Term, "up", "Up"),
Term = stringr::str_replace(Term, "down", "Down")) %>%
dplyr::mutate(Term = factor(.$Term, levels = unique(.$Term[order(forcats::fct_rev(.$Database), .$`-log10.p-value`)])))
metaPlot <- function(data){
data %>%
ggplot2::ggplot(aes(x = Term,
y = `-log10.p-value`,
fill = Database,
group = Database)) +
ggplot2::geom_bar(stat = "identity",
position = position_dodge(),
color = "Black") +
ggplot2::coord_flip() +
ggplot2::scale_y_continuous(expand = c(0, 0)) +
ggsci::scale_fill_d3() +
ggplot2::labs(y = expression("-log"[10](italic(q)))) +
ggplot2::theme_classic() +
ggplot2::theme(text = element_text(size = 40),
axis.title.y = element_blank(),
axis.title.x = element_text(size = 25),
legend.position = "none")
}
cowplot::plot_grid(plotData %>%
dplyr::filter(Database != "GEO RNA-seq Disease and Drug") %>%
metaPlot() +
ggplot2::scale_y_continuous(breaks = c(0,5,10),
expand = c(0, 0)) +
ggplot2::theme(axis.title.x = element_blank()),
plotData %>%
dplyr::filter(Database == "GEO RNA-seq Disease and Drug") %>%
metaPlot() +
ggplot2::scale_fill_manual(values = "#9467BDFF"), # ggsci::pal_d3()(5)
align = "v",
ncol = 1,
rel_heights = c(11,4)) %>%
ggplot2::ggsave(glue::glue("overlaps/symbol/meta_p_all_production_symbol_overlaps_enrichr_plot.pdf"),
plot = .,
width = 14,
height = 16)
# Heatmap -----------------------------------------------------------------
# Modified from UpSetR
# https://github.com/hms-dbmi/UpSetR/blob/master/R/fromList.R
fromList2 <- function(input){
elements <- unique(unlist(input))
data <- unlist(lapply(input, function(x){x <- as.vector(match(elements, x))}))
data[is.na(data)] <- as.integer(0); data[data != 0] <- as.integer(1)
data <- data.frame(matrix(data, ncol = length(input), byrow = F))
data <- data[which(rowSums(data) !=0), ]
names(data) <- names(input)
rownames(data) <- elements %>%
stringr::str_to_lower() %>%
Hmisc::capitalize()
return(data)
}
# Modified from: https://stackoverflow.com/a/60177668
make_italics <- function(x) {
as.expression(lapply(rownames(x), function(y) bquote(italic(.(y)))))
}
## Make list --------------------------------------------------------------
geneList <- c("female", "male") %>%
purrr::set_names() %>%
purrr::map_dfr(function(sex){
DMRichR::read_excel_all(glue::glue("overlaps/symbol/{sex}_symbol_overlaps_enrichr.xlsx")) %>%
data.table::rbindlist(idcol = "Gene Ontology") %>%
dplyr::as_tibble() %>%
dplyr::mutate(Genes = Genes %>%
purrr::map(~ stringr::str_split(., pattern = ";")) %>%
purrr::flatten()) %>%
dplyr::mutate("Database" = dplyr::recode_factor(`Gene Ontology`,
"GO_Biological_Process_2018" = "GO Biological Process",
"GO_Cellular_Component_2018" = "GO Cellular Component",
"GO_Molecular_Function_2018" = "GO Molecular Function",
"Panther_2016" = "Panther Pathways",
"RNA-Seq_Disease_Gene_and_Drug_S" = "GEO RNA-seq Disease and Drug")) %>%
dplyr::mutate(Term = stringr::str_remove(.$Term, "\\(GO.*")) %>%
dplyr::mutate(Term = stringr::str_remove(Term, "Homo sapiens.*$")) %>%
dplyr::mutate(Term = stringr::str_trim(Term)) %>%
dplyr::mutate(Term = Hmisc::capitalize(Term)) %>%
dplyr::select(Term, Database, Genes)
}, .id = "Sex") %>%
dplyr::group_by(Term) %>%
dplyr::filter(dplyr::n() > 1) %>%
dplyr::ungroup() %>%
dplyr::mutate(Sex = Hmisc::capitalize(Sex)) %>%
tidyr::pivot_wider(names_from = Sex, values_from = Genes) %>%
dplyr::inner_join(readxl::read_xlsx("overlaps/symbol/meta_p_all_symbol_overlaps_enrichr_plot_table.xlsx"),
.,
by = c("Term", "Database")) %>%
dplyr::mutate(Shared = purrr::map2(Female, Male, intersect)) %T>%
openxlsx::write.xlsx("overlaps/symbol/enrichr_meta_p.xlsx") %>%
dplyr::filter(Database == "GEO RNA-seq Disease and Drug") %>%
dplyr::select(Term, `-log10.p-value`, Database, Shared) %>%
dplyr::slice(1:7) %>%
purrr::pluck("Shared") %>%
purrr::set_names(c("MeCP2 Hypothalamus Knockout Up",
"Topotecan Cortical neurons 300 nM Down",
"MeCP2 Hypothalamus Transgenic Down",
"LPS Neuron Down",
"TAF15 Striatum Knockdown Down",
"Bicuculin Hippocampus 20 uM Down",
"MeCP2 Visual Cortex Knockout Up"))
## Main plot --------------------------------------------------------------
geneList %>%
magrittr::extract(c("MeCP2 Hypothalamus Knockout Up", "MeCP2 Hypothalamus Transgenic Down")) %>%
magrittr::set_names(c("MeCP2 Knockout Up", "MeCP2 Transgenic Down")) %>%
fromList2() %>%
pheatmap::pheatmap(angle_col = 45,
legend = FALSE,
labels_row = make_italics(.),
border_color = "black",
treeheight_col = 10,
cluster_rows = FALSE,
cluster_cols = FALSE,
filename = "overlaps/symbol/Rett_heatmap.pdf",
width = 1.1,
height = 8)
## Supplementary Plot -----------------------------------------------------
geneList %>%
fromList2() %>%
pheatmap::pheatmap(angle_col = 45,
legend = FALSE,
labels_row = make_italics(.),
border_color = "black",
treeheight_col = 10,
cluster_rows = FALSE,
filename = "overlaps/symbol/full_NDD_heatmap.pdf",
width = 2,
height = 12)
|
\name{OBrienKaiserLong}
\alias{OBrienKaiserLong}
\docType{data}
\title{O'Brien and Kaiser's Repeated-Measures Data in "Long" Format}
\description{Contrived repeated-measures data from O'Brien and Kaiser (1985). For details see \code{\link{OBrienKaiser}}, which is for the "wide" form of the same data.}
\usage{OBrienKaiserLong}
\format{
A data frame with 240 observations on the following 6 variables.
\describe{
\item{\code{treatment}}{a between-subjects factor with levels \code{control}, \code{A}, \code{B}.}
\item{\code{gender}}{a between-subjects factor with levels \code{F}, \code{M.}}
\item{\code{score}}{the numeric response variable.}
\item{\code{id}}{the subject id number.}
\item{\code{phase}}{a within-subjects factor with levels \code{pre}, \code{post}, \code{fup}.}
\item{\code{hour}}{a within-subjects factor with levels \code{1}, \code{2}, \code{3}, \code{4}, \code{5}.}
}
}
\source{
O'Brien, R. G., and Kaiser, M. K. (1985)
MANOVA method for analyzing repeated measures designs: An extensive primer.
\emph{Psychological Bulletin} \bold{97}, 316--333, Table 7.
}
\examples{
head(OBrienKaiserLong, 15) # first subject
}
\seealso{\code{\link{OBrienKaiser}}.}
\keyword{datasets}
|
/man/OBrienKaiserLong.Rd
|
no_license
|
cran/carData
|
R
| false
| false
| 1,236
|
rd
|
\name{OBrienKaiserLong}
\alias{OBrienKaiserLong}
\docType{data}
\title{O'Brien and Kaiser's Repeated-Measures Data in "Long" Format}
\description{Contrived repeated-measures data from O'Brien and Kaiser (1985). For details see \code{\link{OBrienKaiser}}, which is for the "wide" form of the same data.}
\usage{OBrienKaiserLong}
\format{
A data frame with 240 observations on the following 6 variables.
\describe{
\item{\code{treatment}}{a between-subjects factor with levels \code{control}, \code{A}, \code{B}.}
\item{\code{gender}}{a between-subjects factor with levels \code{F}, \code{M.}}
\item{\code{score}}{the numeric response variable.}
\item{\code{id}}{the subject id number.}
\item{\code{phase}}{a within-subjects factor with levels \code{pre}, \code{post}, \code{fup}.}
\item{\code{hour}}{a within-subjects factor with levels \code{1}, \code{2}, \code{3}, \code{4}, \code{5}.}
}
}
\source{
O'Brien, R. G., and Kaiser, M. K. (1985)
MANOVA method for analyzing repeated measures designs: An extensive primer.
\emph{Psychological Bulletin} \bold{97}, 316--333, Table 7.
}
\examples{
head(OBrienKaiserLong, 15) # first subject
}
\seealso{\code{\link{OBrienKaiser}}.}
\keyword{datasets}
|
#' Data for three letter airport code lookup
#'
#' This table contains a list of airports in three letter codes
#' Each airport code has a corresponding longitude and latitude in degrees
#'
#' @format the latitude of YYJ is 48.64694 and the longitude is negative 123.425833
#' @source from the web
"airport_codes"
|
/hw09/carbonfly/R/airport_codes.R
|
no_license
|
swynes/carbonfly
|
R
| false
| false
| 315
|
r
|
#' Data for three letter airport code lookup
#'
#' This table contains a list of airports in three letter codes
#' Each airport code has a corresponding longitude and latitude in degrees
#'
#' @format the latitude of YYJ is 48.64694 and the longitude is negative 123.425833
#' @source from the web
"airport_codes"
|
###############################################################
# DetermineStateSpace_out.R
#
# Organizes output for DetermineStateSpace.R
###############################################################
# Ryan Hastings, 26 May 2020..stage four 11 jun
###############################################################
H=H+G+Q
C=G+Q
percent_infected<-c(rep(0,maxt))
for (t in 1: maxt) {
percent_infected[t]=round(100*Icum[t]/N)
}
for (t in 1:maxt) {
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,1,t]=round(S[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,2,t]=round(E[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,3,t]=round(In[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,4,t]=round(H[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,5,t]=round(C[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,6,t]=round(D[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,7,t]=round(R[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,8,t]=round(Ecum[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,9,t]=round(Icum[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,10,t]=round(Hcum[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,11,t]=round(Ccum[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,12,t]=round(percent_infected[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,13,t]=round(Dday[t])
StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,1,t]=round(H[t])
StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,2,t]=round(C[t])
StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,3,t]=round(D[t])
StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,4,t]=round(Dday[t])
StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,5,t]=AugReduction
StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,6,t]=stage5_weeks
}
|
/model_v3.0/DetermineStateSpaceStageFiveAugust_out.R
|
no_license
|
RyanHastings/COVID19
|
R
| false
| false
| 1,632
|
r
|
###############################################################
# DetermineStateSpace_out.R
#
# Organizes output for DetermineStateSpace.R
###############################################################
# Ryan Hastings, 26 May 2020..stage four 11 jun
###############################################################
H=H+G+Q
C=G+Q
percent_infected<-c(rep(0,maxt))
for (t in 1: maxt) {
percent_infected[t]=round(100*Icum[t]/N)
}
for (t in 1:maxt) {
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,1,t]=round(S[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,2,t]=round(E[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,3,t]=round(In[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,4,t]=round(H[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,5,t]=round(C[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,6,t]=round(D[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,7,t]=round(R[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,8,t]=round(Ecum[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,9,t]=round(Icum[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,10,t]=round(Hcum[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,11,t]=round(Ccum[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,12,t]=round(percent_infected[t])
# StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,13,t]=round(Dday[t])
StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,1,t]=round(H[t])
StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,2,t]=round(C[t])
StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,3,t]=round(D[t])
StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,4,t]=round(Dday[t])
StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,5,t]=AugReduction
StateSpace[Rcrit_vec_i,Rhosp_vec_i,j,i,6,t]=stage5_weeks
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.