content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/events_guildmemberupdate.r
\name{events.guild_member_update}
\alias{events.guild_member_update}
\title{Event, emits whenever a member updates it's state}
\usage{
events.guild_member_update(data, client)
}
\arguments{
\item{data}{The event fields}
\item{client}{The client object}
}
\description{
Event, emits whenever a member updates it's state
}
\section{Disclaimer}{
This event will return guild id instead of guild object if not cached.
this can be used in order to fetch the guild from the API
AND old_member will return as NA if not cached
}
\section{Differences}{
This event will not return differences because it will be too expensive
of an operation.
}
\examples{
\dontrun{
client$emitter$on("GUILD_MEMBER_UPDATE", function(guild, old_member, new_member) {
cat("Old nick", old_member$nick, "New:", new_member$nick)
})
}
}
| /man/events.guild_member_update.Rd | no_license | TheOnlyArtz/Pirate | R | false | true | 915 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/events_guildmemberupdate.r
\name{events.guild_member_update}
\alias{events.guild_member_update}
\title{Event, emits whenever a member updates it's state}
\usage{
events.guild_member_update(data, client)
}
\arguments{
\item{data}{The event fields}
\item{client}{The client object}
}
\description{
Event, emits whenever a member updates it's state
}
\section{Disclaimer}{
This event will return guild id instead of guild object if not cached.
this can be used in order to fetch the guild from the API
AND old_member will return as NA if not cached
}
\section{Differences}{
This event will not return differences because it will be too expensive
of an operation.
}
\examples{
\dontrun{
client$emitter$on("GUILD_MEMBER_UPDATE", function(guild, old_member, new_member) {
cat("Old nick", old_member$nick, "New:", new_member$nick)
})
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linkage-methods.R
\docType{methods}
\name{lva.internal}
\alias{lva.internal}
\alias{lva.internal,array-method}
\title{lva.internal}
\usage{
lva.internal(x, ...)
\S4method{lva.internal}{array}(
x,
grp,
element = 3,
type = "lm",
subject = NULL,
covariates = matrix(),
...
)
}
\arguments{
\item{x}{regionSummary array phased for maternal allele}
\item{...}{arguments to forward to internal functions}
\item{grp}{group 1-3 (1 for 0:0, 2 for 1:0 or 0:1, and 3 for 1:1)}
\item{element}{which column in x contains the values to use with lm.}
\item{type}{which column in x contains the values to use with lm.}
\item{subject}{which samples belongs to the same individual}
\item{covariates}{add data.frame with covariates (only integers and numeric)}
}
\description{
make an almlof regression for arrays (internal function)
}
\details{
internal method that takes one array with results from regionSummary
and one matrix with group information for each risk SNP (based on phase).
Input and output objects can change format slightly in future.
}
\examples{
data(ASEset)
a <- ASEset
# Add phase
set.seed(1)
p1 <- matrix(sample(c(1,0),replace=TRUE, size=nrow(a)*ncol(a)),nrow=nrow(a), ncol(a))
p2 <- matrix(sample(c(1,0),replace=TRUE, size=nrow(a)*ncol(a)),nrow=nrow(a), ncol(a))
p <- matrix(paste(p1,sample(c("|","|","/"), size=nrow(a)*ncol(a), replace=TRUE), p2, sep=""),
nrow=nrow(a), ncol(a))
phase(a) <- p
#add alternative allele information
mcols(a)[["alt"]] <- inferAltAllele(a)
# in this example two overlapping subsets of snps in the ASEset defines the region
region <- split(granges(a)[c(1,2,2,3)], c(1,1,2,2))
rs <- regionSummary(a, region, return.class="array", return.meta=FALSE)
# use (change to generated riskSNP phase later)
phs <- array(c(phase(a,return.class="array")[1,,c(1, 2)],
phase(a,return.class="array")[2,,c(1, 2)]), dim=c(20,2,2))
grp <- matrix(2, nrow=dim(phs)[1], ncol=dim(phs)[2])
grp[(phs[,,1] == 0) & (phs[,,2] == 0)] <- 1
grp[(phs[,,1] == 1) & (phs[,,2] == 1)] <- 3
#only use mean.fr at the moment, which is col 3
lva.internal(x=assays(rs)[["rs1"]],grp=grp, element=3)
}
\author{
Jesper R. Gadin, Lasse Folkersen
}
\keyword{phase}
| /man/lva.internal.Rd | no_license | pappewaio/AllelicImbalance | R | false | true | 2,266 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linkage-methods.R
\docType{methods}
\name{lva.internal}
\alias{lva.internal}
\alias{lva.internal,array-method}
\title{lva.internal}
\usage{
lva.internal(x, ...)
\S4method{lva.internal}{array}(
x,
grp,
element = 3,
type = "lm",
subject = NULL,
covariates = matrix(),
...
)
}
\arguments{
\item{x}{regionSummary array phased for maternal allele}
\item{...}{arguments to forward to internal functions}
\item{grp}{group 1-3 (1 for 0:0, 2 for 1:0 or 0:1, and 3 for 1:1)}
\item{element}{which column in x contains the values to use with lm.}
\item{type}{which column in x contains the values to use with lm.}
\item{subject}{which samples belongs to the same individual}
\item{covariates}{add data.frame with covariates (only integers and numeric)}
}
\description{
make an almlof regression for arrays (internal function)
}
\details{
internal method that takes one array with results from regionSummary
and one matrix with group information for each risk SNP (based on phase).
Input and output objects can change format slightly in future.
}
\examples{
data(ASEset)
a <- ASEset
# Add phase
set.seed(1)
p1 <- matrix(sample(c(1,0),replace=TRUE, size=nrow(a)*ncol(a)),nrow=nrow(a), ncol(a))
p2 <- matrix(sample(c(1,0),replace=TRUE, size=nrow(a)*ncol(a)),nrow=nrow(a), ncol(a))
p <- matrix(paste(p1,sample(c("|","|","/"), size=nrow(a)*ncol(a), replace=TRUE), p2, sep=""),
nrow=nrow(a), ncol(a))
phase(a) <- p
#add alternative allele information
mcols(a)[["alt"]] <- inferAltAllele(a)
# in this example two overlapping subsets of snps in the ASEset defines the region
region <- split(granges(a)[c(1,2,2,3)], c(1,1,2,2))
rs <- regionSummary(a, region, return.class="array", return.meta=FALSE)
# use (change to generated riskSNP phase later)
phs <- array(c(phase(a,return.class="array")[1,,c(1, 2)],
phase(a,return.class="array")[2,,c(1, 2)]), dim=c(20,2,2))
grp <- matrix(2, nrow=dim(phs)[1], ncol=dim(phs)[2])
grp[(phs[,,1] == 0) & (phs[,,2] == 0)] <- 1
grp[(phs[,,1] == 1) & (phs[,,2] == 1)] <- 3
#only use mean.fr at the moment, which is col 3
lva.internal(x=assays(rs)[["rs1"]],grp=grp, element=3)
}
\author{
Jesper R. Gadin, Lasse Folkersen
}
\keyword{phase}
|
#upload the data file into R
car <- read.csv("CarPrice_Assignment.csv", stringsAsFactors = F, header = T)
library(dplyr)
library(stringr)
library(MASS)
library(car)
#check the na values in the file
na_values <- car %>% summarise_all(funs(sum(is.na(.)/n())))
View(na_values)
#No na values
#To add the Car company name as a independent vector, need to split the company name from the car name
name <- car$CarName
lname <- str_split_fixed(name, " ", 2)
colnames(lname) <- c("First_Name", "Last_Name")
#since lname is an atomic vector, we cannot pass the column from lname to car dataset. have to convert the
#atomic vector into dataframe
lname <- data.frame(lname)
car$Company <- lname$First_Name
#summary of car$company, if you see that there will lot of spelling mistakes which makes more variables
#In order to clean the data to make sure in correct spelling to eradicate the redundant
car$Company <- gsub("vw","volkswagen", car$Company)
car$Company <- gsub("vokswagen","volkswagen", car$Company)
car$Company <- gsub("toyouta","toyota", car$Company)
car$Company <- gsub("porcshce","porsche", car$Company)
car$Company <- gsub("Nissan","nissan", car$Company)
car$Company <- gsub("maxda","mazda", car$Company)
summary(car$Company)
#convert company into factors
car$Company <- as.factor(car$Company)
#dummy variable creation
#convert the gas type into numeric variable by assigning 1 for gas and diesel for 0
car$fueltype <- ifelse(car$fueltype == "gas",1,0)
car$fueltype <- as.numeric(car$fueltype)
#convert the aspiration into numeric variable
car$aspiration <- ifelse(car$aspiration == "std",1,0)
car$aspiration <- as.numeric(car$aspiration)
#convert the door into numeric variable
car$doornumber <- ifelse(car$doornumber == "two",1,0)
car$doornumber <- as.numeric(car$doornumber)
#convert the engine location into numeric
car$enginelocation <- ifelse(car$enginelocation == "front",1,0)
car$enginelocation <- as.numeric(car$enginelocation)
#convert the cylindernumber into numeric
car$cylindernumber <- ifelse(car$cylindernumber == "two",2,ifelse(car$cylindernumber == "three",3,ifelse(car$cylindernumber == "four",4,ifelse(car$cylindernumber == "five",5,ifelse(car$cylindernumber == "six",6,ifelse(car$cylindernumber == "eight",8,12))))))
car$cylindernumber <- as.numeric(car$cylindernumber)
# Create the dummy variable for carbody variable
dummy_1 <- data.frame(model.matrix( ~carbody, data = car))
View(dummy_1)
dummy_1 <- dummy_1[,-1]
# Combine the dummy variables and the numeric columns of car dataset, in a new dataset called car_1
car_1 <- cbind(car[,-7], dummy_1)
# Create the dummy variable for drivewheel variable
dummy_2 <- data.frame(model.matrix(~drivewheel, data = car_1))
View(dummy_2)
dummy_2 <- dummy_2[,-1]
# Combine the dummy variables and the numeric columns of car dataset, in a new dataset called car_2
car_2 <- cbind(car_1[,-7], dummy_2)
# Create the dummy variable for enginetype variable
dummy_3 <- data.frame(model.matrix(~enginetype, data = car_2))
View(dummy_3)
dummy_3 <- dummy_3[,-1]
# Combine the dummy variables and the numeric columns of car dataset, in a new dataset called car_3
car_3 <- cbind(car_2[,-13], dummy_3)
# Create the dummy variable for enginetype variable
dummy_4 <- data.frame(model.matrix(~fuelsystem, data = car_3))
View(dummy_4)
dummy_4 <- dummy_4[,-1]
# Combine the dummy variables and the numeric columns of car dataset, in a new dataset called car_3
car_4 <- cbind(car_3[,-15], dummy_4)
# Create the dummy variable for company variable
dummy_5 <- data.frame(model.matrix(~Company, data = car_4))
View(dummy_5)
dummy_5 <- dummy_5[,-1]
# Combine the dummy variables and the numeric columns of car dataset, in a new dataset called car_3
car_5 <- cbind(car_4[,-23], dummy_5)
#drop the irrelevant variables(car_ID and carName) from the dataset
car_5 <- drop(car_5[,-1])
car_5 <- drop(car_5[,-2])
View(car_5)
#derived metrics
#compare the enginesize with citympg
car_5$engine_city <- car_5$enginesize/car_5$citympg
#compare the cylindernumber with citympg
car_5$city_cyl <- car_5$citympg/car_5$cylindernumber
#conveninence store the car_5 in other variable name
price_car <- car_5
#seperate training and testing dataset
set.seed(100)
trainindices= sample(1:nrow(price_car), 0.7*nrow(price_car))
train = price_car[trainindices,]
test = price_car[-trainindices,]
#build the model with all variables
model1 <- lm(price~., data = train)
summary(model1)
#execute the stepAIC to obtain the most relevant variables
step <- stepAIC(model1, direction="both")
#based on the stepAIC, Create the model2
model2 <- lm(price ~ aspiration + enginelocation + carwidth + curbweight +
cylindernumber + boreratio + compressionratio + horsepower +
peakrpm + citympg + carbodyhardtop + carbodyhatchback + carbodysedan +
carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + fuelsystem2bbl +
fuelsystemmpfi + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen + engine_city, data = price_car)
summary(model2)
#To find the multicollinearity, using VIF
vif(model2)
#Horsepower has the largest vif of 25.55 with max p value of 0.619
#lets build the model by removing the horsepower from the model2
model3 <- lm(price ~ aspiration + enginelocation + carwidth + curbweight +
cylindernumber + boreratio + compressionratio + peakrpm + citympg + carbodyhardtop + carbodyhatchback + carbodysedan +
carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + fuelsystem2bbl +
fuelsystemmpfi + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen + engine_city, data = price_car)
vif(model3)
#Curbweight has the largest VIF of 23.77 and 0.002 p value
#lets build the model by removing the Curbweight from the model3
model4 <- lm(price ~ aspiration + enginelocation + carwidth + cylindernumber + boreratio + compressionratio + peakrpm + citympg + carbodyhardtop + carbodyhatchback + carbodysedan +
carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + fuelsystem2bbl +
fuelsystemmpfi + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen + engine_city, data = price_car)
summary(model4)
vif(model4)
#Engine_City has the largest VIF of 17.192
#lets build the model by removing the Engine_city from the model4
model5 <- lm(price ~ aspiration + enginelocation + carwidth + cylindernumber + boreratio + compressionratio + peakrpm + citympg + carbodyhardtop + carbodyhatchback + carbodysedan +
carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + fuelsystem2bbl +
fuelsystemmpfi + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model5)
vif(model5)
#CarbodySedan has the largest VIF of 12.166
#lets build the model by removing the carbodysedan from the model5
model6 <- lm(price ~ aspiration + enginelocation + carwidth + cylindernumber + boreratio + compressionratio + peakrpm + citympg + carbodyhardtop + carbodyhatchback + carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + fuelsystem2bbl +
fuelsystemmpfi + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model6)
vif(model6)
#fuelsystemmpfi has the largest VIF of 7.07
#lets build the model by removing the fuelsystemmpfi from the previous model
model7 <- lm(price ~ aspiration + enginelocation + carwidth + cylindernumber + boreratio + compressionratio + peakrpm + citympg + carbodyhardtop + carbodyhatchback + carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + fuelsystem2bbl + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model7)
vif(model7)
#citympg has the largest VIF of 6.227
#lets build the model by removing the citympg from the previous model
model8 <- lm(price ~ aspiration + enginelocation + carwidth + cylindernumber + boreratio + compressionratio + peakrpm + carbodyhardtop + carbodyhatchback + carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + fuelsystem2bbl + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model8)
vif(model8)
#enginetypeohc has the largest VIF of 4.9889
#lets build the model by removing the enginetypeohc from the previous model
model9 <- lm(price ~ aspiration + enginelocation + carwidth + cylindernumber + boreratio + compressionratio + peakrpm + carbodyhardtop + carbodyhatchback + carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + fuelsystem2bbl + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model9)
vif(model9)
#enginetypeohc has the largest VIF of 4.9889
#lets build the model by removing the enginetypeohc from the previous model
model_10 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + peakrpm + carbodyhardtop + carbodyhatchback + carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + fuelsystem2bbl + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_10)
vif(model_10)
#fuelsystem2bbl has the largest VIF of 2.9141 with high pvalue
#lets build the model by removing the fuelsystem2bbl from the previous model
model_11 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + peakrpm + carbodyhardtop + carbodyhatchback + carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_11)
vif(model_11)
#drivewheelfwd has the largest VIF of 2.5889 with high pvalue
#lets build the model by removing the fuelsystem2bbl from the previous model
model_12 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + peakrpm + carbodyhardtop + carbodyhatchback + carbodywagon + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_12)
vif(model_12)
#peakrpm has the largest VIF of 2.404 with high pvalue
#lets build the model by removing the peakrpm from the previous model
model_13 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + carbodyhardtop + carbodyhatchback + carbodywagon + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_13)
vif(model_13)
#fuelsystemspdi has the largest VIF of 2.302 with high pvalue
#lets build the model by removing the fuelsystemspdi from the previous model
model_14 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + carbodyhardtop + carbodyhatchback + carbodywagon + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_14)
vif(model_14)
#from model_14, there will be no high high vif value and p value variables - This is one of the final model
#all multicollinearity was deducted and removed with the help of vif function.
#Till now we considered the variable which has high vif and p values are removed,
#now consider only p value to remove the insignificant variables.
#from the model_14, enginetypedohcv has high p value of 0.688541,
#lets remove the enginetypedohcv from the previous model
model_15 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + carbodyhardtop + carbodyhatchback + carbodywagon + enginetypel +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_15)
vif(model_15)
#after removing the enginetypedohcv from the model_14, adjusted rsquare increases from 0.8793 to 0.8798
#from the model_15, carbodywagon has high p value of 0.618468,
#lets remove the carbodywagon from the previous model
model_16 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + carbodyhardtop + carbodyhatchback + enginetypel +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_16)
vif(model_16)
#after removing the carbodywagon from the model_15, adjusted rsquare increases from 0.8798 to 0.8803
#from the model_16, carbodyhardtop has high p value of 0.544754,
#lets remove the carbodyhardtop from the previous model
model_17 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + carbodyhatchback + enginetypel +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_17)
vif(model_17)
#after removing the carbodyhardtop from the model_16, adjusted rsquare increases from 0.8803 to 0.8807
#from the model_17, enginetypel has high p value of 0.356290,
#lets remove the enginetypel from the previous model
model_18 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + carbodyhatchback +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_18)
vif(model_18)
#after removing the enginetypel from the model_17, adjusted rsquare increases from 0.8807 to 0.8808
#from the model_18, compressionratio has high p value of 0.200736,
#lets remove the compressionratio from the previous model
model_19 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + carbodyhatchback +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_19)
vif(model_19)
#after removing the compressionratio from the model_18, adjusted rsquare little decreases from 0.8808 to 0.8804
#from the model_19, carbodyhatchback has high p value of 0.217082,
#lets remove the carbodyhatchback from the previous model
model_20 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_20)
vif(model_20)
#after removing the carbodyhatchback from the model_19, adjusted rsquare little decreases from 0.8804 to 0.8801
#from the model_20, Companychevrolet has high p value of 0.106923,
#lets remove the Companychevrolet from the previous model
model_21 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_21)
vif(model_21)
#after removing the Companychevrolet from the model_20, adjusted rsquare little decreases from 0.8801 to 0.879
#from the model_21, Companyvolkswagen has high p value of 0.078943,
#lets remove the Companyvolkswagen from the previous model
model_22 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota , data = price_car)
summary(model_22)
vif(model_22)
#after removing the Companyvolkswagen from the model_21, adjusted rsquare little decreases from 0.879 to
#from the model_22, Companyisuzu has high p value of 0.10312,
#lets remove the Companyisuzu from the previous model
model_23 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companydodge + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota , data = price_car)
summary(model_23)
vif(model_23)
#after removing the Companyisuzu from the model_22, adjusted rsquare little decreases from 0.8777 to 0.8766
#Now, we get the model with only significant variables. In this significant variables some of the variables
#are less significant. In order to make the model to strong, lets remove the less significant variables
#from the model_23, Companyrenault has high p value of 0.049975,
#lets remove the Companyrenault from the previous model
model_24 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companydodge + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth +
Companytoyota , data = price_car)
summary(model_24)
vif(model_24)
#after removing the Companyrenault from the model_23, adjusted rsquare little decreases from 0.8766 to 0.8747
#from the model_24, Companymazda has high p value of 0.029303,
#lets remove the Companymazda from the previous model
model_25 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companydodge + Companymitsubishi + Companynissan + Companyplymouth +
Companytoyota , data = price_car)
summary(model_25)
vif(model_25)
#after removing the Companymazda from the model_24, adjusted rsquare little decreases from 0.8747 to 0.8722
#from the model_25, Companyplymouth has high p value of 0.015678,
#lets remove the Companyplymouth from the previous model
model_26 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companydodge + Companymitsubishi + Companynissan + Companytoyota , data = price_car)
summary(model_26)
vif(model_26)
#after removing the Companyplymouth from the model_25, adjusted rsquare little decreases from 0.8722 to 0.8689
#from the model_26, Companydodge has high p value of 0.018463,
#lets remove the Companydodge from the previous model
model_27 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companymitsubishi + Companynissan + Companytoyota , data = price_car)
summary(model_27)
vif(model_27)
#after removing the Companydodge from the model_26, adjusted rsquare little decreases from 0.8689 to 0.8657
#from the model_27, Companymitsubishi has high p value of 0.002619,
#lets remove the Companymitsubishi from the previous model
model_28 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companynissan + Companytoyota , data = price_car)
summary(model_28)
vif(model_28)
#after removing the Companymitsubishi from the model_27, adjusted rsquare little decreases from 0.8657 to 0.86
#from the model_28, Companytoyota has high p value of 0.001206,
#lets remove the Companytoyota from the previous model
model_29 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companynissan, data = price_car)
summary(model_29)
vif(model_29)
#after removing the Companytoyota from the model_28, adjusted rsquare little decreases from 0.86 to 0.853
#from the model_29, Companynissan has high p value of 0.001206,
#lets remove the Companynissan from the previous model
model_30 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick, data = price_car)
summary(model_30)
vif(model_30)
#after removing the Companynissan from the model_29, adjusted rsquare little decreases from 0.853 to 0.8463
#Model 30 has low vif and p value and which shows strong model compare to all other models.
#predicting the results in test dataset
Predict_1 <- predict(model_30,test[,-20])
test$test_price <- Predict_1
# Now, we need to test the r square between actual and predicted sales.
r <- cor(test$price,test$test_price)
rsquared <- cor(test$price,test$test_price)^2
rsquared
#variables are removed based on the high VIF and p value
#In the training dataset, Adjusted r square value of final model 30 is 0.8463
#In the Test dataset, Adjusted r square value is 0.80626
#By comparing actual vs predicted, model predicted 80% authentication of actual price.
#Variables which contributed to the model are
#aspiration
#enginelocation
#cylindernumber
#boreratio
#enginetypeohcf
#enginetyperotor
#companybmw
#companybuick
| /car_price_lm.R | no_license | raghavan-ds/linear-regression-R | R | false | false | 25,948 | r | #upload the data file into R
car <- read.csv("CarPrice_Assignment.csv", stringsAsFactors = F, header = T)
library(dplyr)
library(stringr)
library(MASS)
library(car)
#check the na values in the file
na_values <- car %>% summarise_all(funs(sum(is.na(.)/n())))
View(na_values)
#No na values
#To add the Car company name as a independent vector, need to split the company name from the car name
name <- car$CarName
lname <- str_split_fixed(name, " ", 2)
colnames(lname) <- c("First_Name", "Last_Name")
#since lname is an atomic vector, we cannot pass the column from lname to car dataset. have to convert the
#atomic vector into dataframe
lname <- data.frame(lname)
car$Company <- lname$First_Name
#summary of car$company, if you see that there will lot of spelling mistakes which makes more variables
#In order to clean the data to make sure in correct spelling to eradicate the redundant
car$Company <- gsub("vw","volkswagen", car$Company)
car$Company <- gsub("vokswagen","volkswagen", car$Company)
car$Company <- gsub("toyouta","toyota", car$Company)
car$Company <- gsub("porcshce","porsche", car$Company)
car$Company <- gsub("Nissan","nissan", car$Company)
car$Company <- gsub("maxda","mazda", car$Company)
summary(car$Company)
#convert company into factors
car$Company <- as.factor(car$Company)
#dummy variable creation
#convert the gas type into numeric variable by assigning 1 for gas and diesel for 0
car$fueltype <- ifelse(car$fueltype == "gas",1,0)
car$fueltype <- as.numeric(car$fueltype)
#convert the aspiration into numeric variable
car$aspiration <- ifelse(car$aspiration == "std",1,0)
car$aspiration <- as.numeric(car$aspiration)
#convert the door into numeric variable
car$doornumber <- ifelse(car$doornumber == "two",1,0)
car$doornumber <- as.numeric(car$doornumber)
#convert the engine location into numeric
car$enginelocation <- ifelse(car$enginelocation == "front",1,0)
car$enginelocation <- as.numeric(car$enginelocation)
#convert the cylindernumber into numeric
car$cylindernumber <- ifelse(car$cylindernumber == "two",2,ifelse(car$cylindernumber == "three",3,ifelse(car$cylindernumber == "four",4,ifelse(car$cylindernumber == "five",5,ifelse(car$cylindernumber == "six",6,ifelse(car$cylindernumber == "eight",8,12))))))
car$cylindernumber <- as.numeric(car$cylindernumber)
# Create the dummy variable for carbody variable
dummy_1 <- data.frame(model.matrix( ~carbody, data = car))
View(dummy_1)
dummy_1 <- dummy_1[,-1]
# Combine the dummy variables and the numeric columns of car dataset, in a new dataset called car_1
car_1 <- cbind(car[,-7], dummy_1)
# Create the dummy variable for drivewheel variable
dummy_2 <- data.frame(model.matrix(~drivewheel, data = car_1))
View(dummy_2)
dummy_2 <- dummy_2[,-1]
# Combine the dummy variables and the numeric columns of car dataset, in a new dataset called car_2
car_2 <- cbind(car_1[,-7], dummy_2)
# Create the dummy variable for enginetype variable
dummy_3 <- data.frame(model.matrix(~enginetype, data = car_2))
View(dummy_3)
dummy_3 <- dummy_3[,-1]
# Combine the dummy variables and the numeric columns of car dataset, in a new dataset called car_3
car_3 <- cbind(car_2[,-13], dummy_3)
# Create the dummy variable for enginetype variable
dummy_4 <- data.frame(model.matrix(~fuelsystem, data = car_3))
View(dummy_4)
dummy_4 <- dummy_4[,-1]
# Combine the dummy variables and the numeric columns of car dataset, in a new dataset called car_3
car_4 <- cbind(car_3[,-15], dummy_4)
# Create the dummy variable for company variable
dummy_5 <- data.frame(model.matrix(~Company, data = car_4))
View(dummy_5)
dummy_5 <- dummy_5[,-1]
# Combine the dummy variables and the numeric columns of car dataset, in a new dataset called car_3
car_5 <- cbind(car_4[,-23], dummy_5)
#drop the irrelevant variables(car_ID and carName) from the dataset
car_5 <- drop(car_5[,-1])
car_5 <- drop(car_5[,-2])
View(car_5)
#derived metrics
#compare the enginesize with citympg
car_5$engine_city <- car_5$enginesize/car_5$citympg
#compare the cylindernumber with citympg
car_5$city_cyl <- car_5$citympg/car_5$cylindernumber
#conveninence store the car_5 in other variable name
price_car <- car_5
#seperate training and testing dataset
set.seed(100)
trainindices= sample(1:nrow(price_car), 0.7*nrow(price_car))
train = price_car[trainindices,]
test = price_car[-trainindices,]
#build the model with all variables
model1 <- lm(price~., data = train)
summary(model1)
#execute the stepAIC to obtain the most relevant variables
step <- stepAIC(model1, direction="both")
#based on the stepAIC, Create the model2
model2 <- lm(price ~ aspiration + enginelocation + carwidth + curbweight +
cylindernumber + boreratio + compressionratio + horsepower +
peakrpm + citympg + carbodyhardtop + carbodyhatchback + carbodysedan +
carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + fuelsystem2bbl +
fuelsystemmpfi + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen + engine_city, data = price_car)
summary(model2)
#To find the multicollinearity, using VIF
vif(model2)
#Horsepower has the largest vif of 25.55 with max p value of 0.619
#lets build the model by removing the horsepower from the model2
model3 <- lm(price ~ aspiration + enginelocation + carwidth + curbweight +
cylindernumber + boreratio + compressionratio + peakrpm + citympg + carbodyhardtop + carbodyhatchback + carbodysedan +
carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + fuelsystem2bbl +
fuelsystemmpfi + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen + engine_city, data = price_car)
vif(model3)
#Curbweight has the largest VIF of 23.77 and 0.002 p value
#lets build the model by removing the Curbweight from the model3
model4 <- lm(price ~ aspiration + enginelocation + carwidth + cylindernumber + boreratio + compressionratio + peakrpm + citympg + carbodyhardtop + carbodyhatchback + carbodysedan +
carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + fuelsystem2bbl +
fuelsystemmpfi + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen + engine_city, data = price_car)
summary(model4)
vif(model4)
#Engine_City has the largest VIF of 17.192
#lets build the model by removing the Engine_city from the model4
model5 <- lm(price ~ aspiration + enginelocation + carwidth + cylindernumber + boreratio + compressionratio + peakrpm + citympg + carbodyhardtop + carbodyhatchback + carbodysedan +
carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + fuelsystem2bbl +
fuelsystemmpfi + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model5)
vif(model5)
#CarbodySedan has the largest VIF of 12.166
#lets build the model by removing the carbodysedan from the model5
model6 <- lm(price ~ aspiration + enginelocation + carwidth + cylindernumber + boreratio + compressionratio + peakrpm + citympg + carbodyhardtop + carbodyhatchback + carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + fuelsystem2bbl +
fuelsystemmpfi + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model6)
vif(model6)
#fuelsystemmpfi has the largest VIF of 7.07
#lets build the model by removing the fuelsystemmpfi from the previous model
model7 <- lm(price ~ aspiration + enginelocation + carwidth + cylindernumber + boreratio + compressionratio + peakrpm + citympg + carbodyhardtop + carbodyhatchback + carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + fuelsystem2bbl + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model7)
vif(model7)
#citympg has the largest VIF of 6.227
#lets build the model by removing the citympg from the previous model
model8 <- lm(price ~ aspiration + enginelocation + carwidth + cylindernumber + boreratio + compressionratio + peakrpm + carbodyhardtop + carbodyhatchback + carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohc + enginetypeohcf + enginetyperotor + fuelsystem2bbl + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model8)
vif(model8)
#enginetypeohc has the largest VIF of 4.9889
#lets build the model by removing the enginetypeohc from the previous model
model9 <- lm(price ~ aspiration + enginelocation + carwidth + cylindernumber + boreratio + compressionratio + peakrpm + carbodyhardtop + carbodyhatchback + carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + fuelsystem2bbl + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model9)
vif(model9)
#enginetypeohc has the largest VIF of 4.9889
#lets build the model by removing the enginetypeohc from the previous model
model_10 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + peakrpm + carbodyhardtop + carbodyhatchback + carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + fuelsystem2bbl + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_10)
vif(model_10)
#fuelsystem2bbl has the largest VIF of 2.9141 with high pvalue
#lets build the model by removing the fuelsystem2bbl from the previous model
model_11 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + peakrpm + carbodyhardtop + carbodyhatchback + carbodywagon + drivewheelfwd + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_11)
vif(model_11)
#drivewheelfwd has the largest VIF of 2.5889 with high pvalue
#lets build the model by removing the fuelsystem2bbl from the previous model
model_12 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + peakrpm + carbodyhardtop + carbodyhatchback + carbodywagon + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_12)
vif(model_12)
#peakrpm has the largest VIF of 2.404 with high pvalue
#lets build the model by removing the peakrpm from the previous model
model_13 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + carbodyhardtop + carbodyhatchback + carbodywagon + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + fuelsystemspdi + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_13)
vif(model_13)
#fuelsystemspdi has the largest VIF of 2.302 with high pvalue
#lets build the model by removing the fuelsystemspdi from the previous model
model_14 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + carbodyhardtop + carbodyhatchback + carbodywagon + enginetypedohcv + enginetypel +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_14)
vif(model_14)
#from model_14, there will be no high high vif value and p value variables - This is one of the final model
#all multicollinearity was deducted and removed with the help of vif function.
#Till now we considered the variable which has high vif and p values are removed,
#now consider only p value to remove the insignificant variables.
#from the model_14, enginetypedohcv has high p value of 0.688541,
#lets remove the enginetypedohcv from the previous model
model_15 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + carbodyhardtop + carbodyhatchback + carbodywagon + enginetypel +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_15)
vif(model_15)
#after removing the enginetypedohcv from the model_14, adjusted rsquare increases from 0.8793 to 0.8798
#from the model_15, carbodywagon has high p value of 0.618468,
#lets remove the carbodywagon from the previous model
model_16 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + carbodyhardtop + carbodyhatchback + enginetypel +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_16)
vif(model_16)
#after removing the carbodywagon from the model_15, adjusted rsquare increases from 0.8798 to 0.8803
#from the model_16, carbodyhardtop has high p value of 0.544754,
#lets remove the carbodyhardtop from the previous model
model_17 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + carbodyhatchback + enginetypel +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_17)
vif(model_17)
#after removing the carbodyhardtop from the model_16, adjusted rsquare increases from 0.8803 to 0.8807
#from the model_17, enginetypel has high p value of 0.356290,
#lets remove the enginetypel from the previous model
model_18 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + compressionratio + carbodyhatchback +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_18)
vif(model_18)
#after removing the enginetypel from the model_17, adjusted rsquare increases from 0.8807 to 0.8808
#from the model_18, compressionratio has high p value of 0.200736,
#lets remove the compressionratio from the previous model
model_19 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio + carbodyhatchback +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_19)
vif(model_19)
#after removing the compressionratio from the model_18, adjusted rsquare little decreases from 0.8808 to 0.8804
#from the model_19, carbodyhatchback has high p value of 0.217082,
#lets remove the carbodyhatchback from the previous model
model_20 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companychevrolet + Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_20)
vif(model_20)
#after removing the carbodyhatchback from the model_19, adjusted rsquare little decreases from 0.8804 to 0.8801
#from the model_20, Companychevrolet has high p value of 0.106923,
#lets remove the Companychevrolet from the previous model
model_21 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota + Companyvolkswagen, data = price_car)
summary(model_21)
vif(model_21)
#after removing the Companychevrolet from the model_20, adjusted rsquare little decreases from 0.8801 to 0.879
#from the model_21, Companyvolkswagen has high p value of 0.078943,
#lets remove the Companyvolkswagen from the previous model
model_22 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companydodge + Companyisuzu + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota , data = price_car)
summary(model_22)
vif(model_22)
#after removing the Companyvolkswagen from the model_21, adjusted rsquare little decreases from 0.879 to
#from the model_22, Companyisuzu has high p value of 0.10312,
#lets remove the Companyisuzu from the previous model
model_23 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companydodge + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth + Companyrenault +
Companytoyota , data = price_car)
summary(model_23)
vif(model_23)
#after removing the Companyisuzu from the model_22, adjusted rsquare little decreases from 0.8777 to 0.8766
#Now, we get the model with only significant variables. In this significant variables some of the variables
#are less significant. In order to make the model to strong, lets remove the less significant variables
#from the model_23, Companyrenault has high p value of 0.049975,
#lets remove the Companyrenault from the previous model
model_24 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companydodge + Companymazda +
Companymitsubishi + Companynissan + Companyplymouth +
Companytoyota , data = price_car)
summary(model_24)
vif(model_24)
#after removing the Companyrenault from the model_23, adjusted rsquare little decreases from 0.8766 to 0.8747
#from the model_24, Companymazda has high p value of 0.029303,
#lets remove the Companymazda from the previous model
model_25 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companydodge + Companymitsubishi + Companynissan + Companyplymouth +
Companytoyota , data = price_car)
summary(model_25)
vif(model_25)
#after removing the Companymazda from the model_24, adjusted rsquare little decreases from 0.8747 to 0.8722
#from the model_25, Companyplymouth has high p value of 0.015678,
#lets remove the Companyplymouth from the previous model
model_26 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companydodge + Companymitsubishi + Companynissan + Companytoyota , data = price_car)
summary(model_26)
vif(model_26)
#after removing the Companyplymouth from the model_25, adjusted rsquare little decreases from 0.8722 to 0.8689
#from the model_26, Companydodge has high p value of 0.018463,
#lets remove the Companydodge from the previous model
model_27 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companymitsubishi + Companynissan + Companytoyota , data = price_car)
summary(model_27)
vif(model_27)
#after removing the Companydodge from the model_26, adjusted rsquare little decreases from 0.8689 to 0.8657
#from the model_27, Companymitsubishi has high p value of 0.002619,
#lets remove the Companymitsubishi from the previous model
model_28 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companynissan + Companytoyota , data = price_car)
summary(model_28)
vif(model_28)
#after removing the Companymitsubishi from the model_27, adjusted rsquare little decreases from 0.8657 to 0.86
#from the model_28, Companytoyota has high p value of 0.001206,
#lets remove the Companytoyota from the previous model
model_29 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick +
Companynissan, data = price_car)
summary(model_29)
vif(model_29)
#after removing the Companytoyota from the model_28, adjusted rsquare little decreases from 0.86 to 0.853
#from the model_29, Companynissan has high p value of 0.001206,
#lets remove the Companynissan from the previous model
model_30 <- lm(price ~ aspiration + enginelocation + cylindernumber + boreratio +
enginetypeohcf + enginetyperotor + Companybmw + Companybuick, data = price_car)
summary(model_30)
vif(model_30)
#after removing the Companynissan from the model_29, adjusted rsquare little decreases from 0.853 to 0.8463
#Model 30 has low vif and p value and which shows strong model compare to all other models.
#predicting the results in test dataset
Predict_1 <- predict(model_30,test[,-20])
test$test_price <- Predict_1
# Now, we need to test the r square between actual and predicted sales.
r <- cor(test$price,test$test_price)
rsquared <- cor(test$price,test$test_price)^2
rsquared
#variables are removed based on the high VIF and p value
#In the training dataset, Adjusted r square value of final model 30 is 0.8463
#In the Test dataset, Adjusted r square value is 0.80626
#By comparing actual vs predicted, model predicted 80% authentication of actual price.
#Variables which contributed to the model are
#aspiration
#enginelocation
#cylindernumber
#boreratio
#enginetypeohcf
#enginetyperotor
#companybmw
#companybuick
|
library(tidyverse)
library(modelr)
library(ggplot2)
library(lme4)
df <- read.csv("../Sawyer_Research_Data_Analysis/MetaData_exc.csv", stringsAsFactors = FALSE)
names(df)
glimpse(df)
# Separate the sex
MaleT<- grep("^M", df$Sex)
FemaleT<- grep("^F", df$Sex)
# Remove Controls and blanks from group
df <- subset(df, Sex != "")
df <- subset(df, Subject != "NA")
glimpse(df)
#### My df is jack I need to chance it #####
# Change categories
Runs$Fitness_Level <- as.factor(Runs$Fitness_Level)
Runs$ILP_Speaking <- as.numeric(Runs$ILP_Speaking)
Runs$ILP_Reading_and_writing <- as.numeric(Runs$ILP_Reading_and_writing)
Runs$ILP_time_management <- as.numeric(Runs$ILP_time_management)
# Quick Plots
SpeakingPlot <- ggplot(data = Runs, aes(x = Fitness_Level, y = ILP_Speaking)) +
geom_bar(stat="identity")
SpeakingPlot
ReadingPlot <- ggplot(data = Runs, aes(x = Fitness_Level, y = ILP_Reading_and_writing)) +
geom_bar(stat="identity")
ReadingPlot
ManagementPlot <- ggplot(data = Runs, aes(x = Fitness_Level, y = ILP_time_management)) +
geom_bar(stat="identity")
ManagementPlot
# Test Fitness Level ~ ILP
RunsILP = lm(ILP_Speaking + ILP_Reading_and_writing + ILP_time_management ~ Fitness_Level, data = Runs)
summary(RunsILP)
## Check validity ##
# Residuals
eps <- residuals(RunsILP)
qqnorm(eps)
qqline(eps)
# Homoscedasticity
yhat <- fitted(RunsILP)
plot(yhat,eps)
abline(h=0)
| /Memory.R | no_license | jzushi/RBFCCF | R | false | false | 1,398 | r | library(tidyverse)
library(modelr)
library(ggplot2)
library(lme4)
df <- read.csv("../Sawyer_Research_Data_Analysis/MetaData_exc.csv", stringsAsFactors = FALSE)
names(df)
glimpse(df)
# Separate the sex
MaleT<- grep("^M", df$Sex)
FemaleT<- grep("^F", df$Sex)
# Remove Controls and blanks from group
df <- subset(df, Sex != "")
df <- subset(df, Subject != "NA")
glimpse(df)
#### My df is jack I need to chance it #####
# Change categories
Runs$Fitness_Level <- as.factor(Runs$Fitness_Level)
Runs$ILP_Speaking <- as.numeric(Runs$ILP_Speaking)
Runs$ILP_Reading_and_writing <- as.numeric(Runs$ILP_Reading_and_writing)
Runs$ILP_time_management <- as.numeric(Runs$ILP_time_management)
# Quick Plots
SpeakingPlot <- ggplot(data = Runs, aes(x = Fitness_Level, y = ILP_Speaking)) +
geom_bar(stat="identity")
SpeakingPlot
ReadingPlot <- ggplot(data = Runs, aes(x = Fitness_Level, y = ILP_Reading_and_writing)) +
geom_bar(stat="identity")
ReadingPlot
ManagementPlot <- ggplot(data = Runs, aes(x = Fitness_Level, y = ILP_time_management)) +
geom_bar(stat="identity")
ManagementPlot
# Test Fitness Level ~ ILP
RunsILP = lm(ILP_Speaking + ILP_Reading_and_writing + ILP_time_management ~ Fitness_Level, data = Runs)
summary(RunsILP)
## Check validity ##
# Residuals
eps <- residuals(RunsILP)
qqnorm(eps)
qqline(eps)
# Homoscedasticity
yhat <- fitted(RunsILP)
plot(yhat,eps)
abline(h=0)
|
##This code is used to process the meteorological data and climate analysis for the tree-ring stable oxygen isotope extreme values
## The aims are:
### 1. Process the meteorological data and climate response.
### 2. Detect the signal of the tree-ring stable oxygen isotope extreme
### 3. Climate reconstruction and analysis
###
## Author: GB Xu, xgb234@lzb.ac.cn
## Date: 2019-6-14
## Part 0 Initial call the packages-----
library(openxlsx)
library(dplyr)
library(reshape2)
library(ggplot2)
library(treeclim)
library(grid)
library(dplR)
library(treeclim)
library(MASS)
library(yhat)
library(ggpubr)
## Part 1. Process the climate data --------
## 1.1 read and load the climate data-----
mdata<-read.table("E:/Rwork/Freezingrain/S201806051426312322400.txt",header = TRUE)
ny.mdata<-subset(mdata,mdata$V01000==57776)
ny.mdata[ny.mdata==32766]<-NA
ny.mymdata.full<-subset(ny.mdata,select = c(1:3,11,17,21,23,26,6,14,5))
varname<-c("month","station","year","pre.day",
"tmean","water.pressure","rh","pre","tmin","tmax","ssd")
colnames(ny.mymdata.full)<-varname
ny.mymdata.mean <- ny.mymdata.full %>%
filter(year>1952)%>%
group_by(month)%>%
summarise_each(funs(mean(.,na.rm=TRUE)))
## 1.1.1 processing the missing data and write as ssd1 and evp.-----
library(mice)
imp<-mice(ny.mymdata[,c(1,3,11)],10)
fit<-with(imp,lm(ssd~month))
pooled<-pool(fit)
result4=complete(imp,action=3)
ny.mymdata$ssd1<-result4$ssd
evp<-read.xlsx("E:/Rwork/Freezingrain/evp57776.xlsx")
head(evp)
evp$V8[evp$V8 == 32766] <- NA
evp.mean<-evp %>% group_by(V5,V6)%>%
summarise(mean.evp=mean(V8,na.rm=TRUE))
evp.mean<-data.frame(evp.mean)
colnames(evp.mean)<-c('year','month','evp')
head(evp.mean)
imp<-mice(evp.mean,10)
fit<-with(imp,lm(evp~month))
pooled<-pool(fit)
result4=complete(imp,action=3)
ny.mymdata$evp<-result4$evp[c(1:773)]
ny.mymdata<-subset(ny.mymdata,year>1952 & year<2015)
## precipitation, temperature and water vapor pressure unit is 0.1..
##### 1.1.2 calculate the VPD based on the temperature and RH----
ea_o=6.112*exp(17.67*(ny.mymdata$tmean*0.1)/((ny.mymdata$tmean*0.1)+243.5))# The unit of tem should be degress, the unit of ea is hpa.
vpd <- ea_o*(1-ny.mymdata$rh/100)
ny.mymdata$vpd <- vpd
#1.1.3 plot the climtagraph at month------
library(plotrix)
### calculate the ratio between y1 and y2
preclim<-c(50,300)
tclim<-c(0.2,25)
d<-diff(tclim)/diff(preclim)
c<-preclim[1]-tclim[1]*d
ny.mymdata.mean$pre1<-ny.mymdata.mean$pre/10
ny.mymdata.mean$tmean1<-ny.mymdata.mean$tmean/10
clima<-ggplot(data=ny.mymdata.mean,aes(x=month))+
geom_bar(aes(y=pre1),
stat = "identity",position = "identity")+
geom_line (aes(y=c+(tmean1)/d),col="red")+
geom_point(aes(y=c+(tmean1)/d),col="red")+
xlab("Month")+
scale_y_continuous("Precipitation (mm)",
sec.axis = sec_axis( ~ (. - c)*d,
name = "Temperature (โ)"),
expand=c(0.01,0.05))+
scale_x_continuous("Month", breaks = 1:12,expand=c(0.01,0.05)) +
mythemeplot()+
theme(plot.title = element_text(hjust = 0.5))+
theme(axis.line.y.right = element_line(color = "red"),
axis.ticks.y.right = element_line(color = "red"),
axis.text.y.right = element_text(color = "red"),
axis.title.y.right = element_text(color = "red")) +
theme(plot.margin = unit(c(0,-0.2,0,0),"lines"))
ssdclim<-c(30,230)
rhclim <-c(70,95)
s<-diff(rhclim)/diff(ssdclim) #3 becareful the relationship, y2 and y1
r<-ssdclim[1]-rhclim[1]/s ## the relationship scale between rh and ssd.
climb<-ggplot(data=ny.mymdata.mean,aes(x=month))+
geom_bar( aes(y=ssd/10),
stat = "identity",position = "identity")+
geom_line (aes(y=r+(rh)/s),col="blue")+
geom_point(aes(y=r+(rh)/s),col="blue")+
xlab("Month")+
scale_y_continuous("SSD (h)",
#limits = c(50,400),
sec.axis = sec_axis(~ (. - r)*s,
name = "Relative humidity (%)"),
expand=c(0.01,0.05) ) +
scale_x_continuous("Month", breaks = 1:12,
expand=c(0.01,0.05)) +
mythemeplot()+
theme(plot.title = element_text(hjust = 0.5))+
theme(axis.line.y.right = element_line(color = "blue"),
axis.ticks.y.right = element_line(color = "blue"),
axis.text.y.right = element_text(color = "blue"),
axis.title.y.right = element_text(color = "blue")) +
theme(plot.margin = unit(c(0,-0.1,0,0),"lines"))
## 1.1.4 load the scPDSI data from CRU---
crupdsi<-read.table("./cru/iscpdsi_112.5-112.7E_27.27-27.5N_n.dat",
header = FALSE)
colnames(crupdsi)<-mon
crupdsi <- subset(crupdsi,year<2015)
# 1.2 compare the d18O data between ISOGSM model and observation-----
## The precipitation d18O data from ISOGSM model
## The precipitation data from the GNIP Changsha station
#### 1.2.1 Process the d18O data from Changsha Station-----
oxy.changsha <- read.xlsx("./rawdata/wiser_gnip-monthly-cn-gnipm.xlsx",
sheet = "Data",colNames = TRUE)
head(oxy.changsha)
oxy.changsha.reshape <- subset(oxy.changsha,select=c(SampleName, month, O18))
colnames(oxy.changsha.reshape) <- c("Var1","Var2","value")
##split the data from GNIP
oxy.changsha.reshape.1999 <-subset(oxy.changsha.reshape,Var1>1999)
#### 1.2.2 Process the d18O data from ISOGSM data-----
#### a. for the precipitation -----
data <- read.delim("F:/IsoGSM/x061y062_ensda_monthly.dat",header = FALSE)
data1<-data[c(-1,-2),c(-1)]
data1.ts<-ts(data1,start = c(1871,1),frequency = 12)
p.oxyts<-ts((data1$V6/data1$V5-1)*1000,start = c(1871,1),frequency = 12)
p.oxy<-(data1$V6/data1$V5-1)*1000
p.oxy[abs(p.oxy)>13]<-NA ## remove the outliers, set the threshold is abs(13), which is based on the mean value of multi-year observation.
p.rate<-matrix(data1$V5,ncol=12,byrow=TRUE)
p.rateoxy<-matrix(p.oxy,ncol=12,byrow=TRUE)## here, to calculate the oxygen according to original data!!
##where SMOW=[H18O]/[H2O] or [HDO]/[H2O] in Standard Mean Ocean Water.
# To calculate delta18o in precipitation, do followings:
# delta18O_p[permil]=(PRATE18O/PRATE-1)*1000
rownames(p.rateoxy)<-c(1871:2010)
p.tmp<-matrix(data1$V17,ncol=12,byrow=TRUE)
p.rh<-matrix(data1$V18,ncol=12,byrow=TRUE)
plot(data1.ts[,2])
lines(p.oxyts,col=2)
### b. process for the stable oxygen isotope of the water vapor at monthly scales-----
vp.oxy<-(data1$V15/data1$V14-1)*1000
vp.oxy[abs(vp.oxy)>30]<-NA ## remove the outliers, set the threshold is abs(30)
## reference: Xie Yulong, Zhang Xinping, et al., Monitoring and analysis of stable isotopes of the near surface water vapor in
## Changsha, Environmental Science, 2016,37(2):475-481
monthvp.oxy<-as.data.frame(matrix(vp.oxy,ncol=12,byrow=TRUE))
colnames(monthvp.oxy)<-c(1:12)
monthvp.oxy<-cbind(year=c(1871:2010),monthvp.oxy)
p.rateoxy.shape<-melt(p.rateoxy)
p.rateoxy.shape.1988 <-
subset(p.rateoxy.shape,Var1 >1987 & Var1 <1993)
# p.rateoxy.shape.1988 <-
# subset(p.rateoxy.shape,Var1 %in% oxy.changsha.reshape$Var1)
p.oxy <- rbind(oxy.changsha.reshape.1999,
p.rateoxy.shape.1988[order(p.rateoxy.shape.1988$Var1),])
p.oxy$type <- c(rep("Changsha",60),
rep("Model",60))
p.oxy$date <- c(seq.Date(from = as.Date('1988-01-01'),by = 'month', length.out = 60),
seq.Date(from = as.Date('1988-01-01'),by = 'month', length.out = 60))
oxy.p <- ggplot(p.oxy,aes(x=Var2,y=value, na.rm=TRUE,color=type))+
geom_point()+
geom_smooth(method="loess",se=TRUE,lty=1,lwd=1.5,aes(fill =type))+
xlab("Month")+ylab(expression(paste(delta ^"18","O (โฐ)")))+
scale_x_continuous(limits = c(1,12),breaks=c(1:12),
labels = c(1:12))+
mythemeplot()+
theme(legend.position = c(0.2,0.15),legend.title = element_blank())+
theme(plot.margin = unit(c(0,0,0,0),"lines"))
## Part 2 Tree-ring stable oxygen isotope data load and plot-----
## This part is show the stable oxygen isotope
##
## 2.1 plot the position of the extreme values------
stabe.all.o.max <- read.xlsx("E:/Rwork/highresolution/rawdata/omax.xlsx")
stabe.allEW.o.max <- read.xlsx("E:/Rwork/highresolution/rawdata/allEWomax.xlsx")
stabe.allLW.o.max <- read.xlsx("E:/Rwork/highresolution/rawdata/allLWomax.xlsx")
stabe.all.o.min <- read.xlsx("E:/Rwork/highresolution/rawdata/omin.xlsx")
max.oplot<- ggplot(stabe.all.o.max, aes(x=V4, color=wood)) +
geom_histogram(fill="white", alpha=0.5,
position="identity",binwidth = 0.1)+
mythemeplot()+
xlab('Proportion to boundary')+ylab("count")+
theme(legend.title = element_blank(),
axis.title.y = element_blank(),
plot.margin = unit(c(0.2,0,0,0),"lines"))+
scale_x_continuous(
labels = scales::number_format(accuracy = 0.1))+
scale_color_manual(values=c(LW="darkgreen",
EW="green"))
maxEW.oplot<- ggplot(stabe.allEW.o.max, aes(x=V4, color=wood)) +
geom_histogram(fill="white", alpha=0.5,
position="identity",binwidth = 0.1)+
#scale_color_manual(values=c("#00BFC4"))+
mythemeplot()+
xlab('Proportion to boundary')+ylab("count")+
theme(legend.title = element_blank(),
axis.title.y = element_blank(),
plot.margin = unit(c(0.2,0,0,0),"lines"))+
scale_color_manual(values=c(LW="darkgreen",
EW="green"))
maxLW.oplot<- ggplot(stabe.allLW.o.max, aes(x=V4, color=wood)) +
geom_histogram(fill="white", alpha=0.5,
position="identity",binwidth = 0.1)+
#scale_color_manual(values=c("#00BFC4"))+
mythemeplot()+
xlab('Proportion to boundary')+ylab("count")+
theme(legend.title = element_blank(),
axis.title.y = element_blank(),
plot.margin = unit(c(0.2,0,0,0),"lines"))+
scale_color_manual(values=c(LW="darkgreen",
EW="green"))
min.oplot <-ggplot(stabe.all.o.min, aes(x=V4,color=wood)) +
geom_histogram(fill="white", alpha=0.5,
position="identity",binwidth = 0.1)+
xlab('Proportion to boundary')+ylab("count")+
mythemeplot()+
theme(legend.title = element_blank(),
plot.margin = unit(c(0.2,0,0,0),"lines"))+
scale_x_continuous(
labels = scales::number_format(accuracy = 0.1))+
scale_color_manual(values=c(LW="darkgreen",
EW="green"))
stable.all.omin.mean.date <- read.xlsx("E:/Rwork/highresolution/rawdata/oxy_all.xlsx")
oxyplot<-ggplot(data=stable.all.omin.mean.date)+
scale_x_date(expand = c(0.01,0.01))+
geom_line(aes(x=date,y = min,col="Min"),lwd=1.0)+
geom_smooth(aes(x=date,y = min),method = "lm",lty=2,se=FALSE)+
geom_line(aes(x=date,y = mean,col="Mean"),lwd=1.0)+
#geom_smooth(aes(x=date,y = mean),method = "lm",lty=2)+
geom_line(aes(x=date,y = max,col="Max"),lwd=1.0)+
geom_smooth(aes(x=date,y = max),method = "lm",
col="DarkBlue",lty=2,se=FALSE)+
geom_line(aes(x=date,y = LWmax,col="LWmax"),lwd=1.0)+
geom_smooth(aes(x=date,y = LWmax),method = "lm",
col="darkgreen",lty=2,se=FALSE)+
geom_line(aes(x=date,y = EWmax,col="EWmax"),lwd=1.0)+
geom_smooth(aes(x=date,y = EWmax),method = "lm",
col="green",lty=2,se=FALSE)+
scale_x_date(name="Year",
expand = c(0,0),
breaks = "10 years",
labels=date_format("%Y"),
limits = as.Date(c("1900-01-01","2015-06-01")))+
ylab(expression(paste(delta^"18","O (โฐ)")))+
scale_color_manual(values=c(LWmax="darkgreen",
EWmax="green",
Max="DarkBlue",
Mean="DeepSkyBlue",Min="blue"))+
mythemeplot()+
guides(col = guide_legend(ncol = 1))+
theme(legend.position = c(0.9,0.85))+
theme(legend.background = element_rect(fill = "transparent"),
legend.box.background = element_rect(fill = "transparent", colour = NA),
legend.key = element_rect(fill = "transparent"),
#legend.spacing = unit(2, "lines")
)+
theme(legend.title = element_blank())
#reposition_legend(pdsiplot1, 'left')
#
summary(lm(stable.all.omin.mean.date$EWmax~stable.all.omin.mean.date$year))
summary(lm(stable.all.omin.mean.date$LWmax~stable.all.omin.mean.date$year))
summary(lm(stable.all.omin.mean.date$max~stable.all.omin.mean.date$year))
cor.test(stable.all.omin.mean.date$EWmax,stable.all.omin.mean.date$LWmax)
cor.test(stable.all.omin.mean.date$EWmax,stable.all.omin.mean.date$max)
cor.test(stable.all.omin.mean.date$LWmax,stable.all.omin.mean.date$max)
### try to insert the mean value for parameters---
oxygen.extremlong <- gather(data=stable.all.omin.mean.date,
key="para",
value = "d18",min,max,mean,EWmax,LWmax,-year)
oxy.meanbox <-
# ggplot()+
# geom_boxplot(data=extreme.predata.mean,
# aes(x=label,y=value,col=label))+
ggboxplot(oxygen.extremlong, x = "para", y = "d18",
color = "para",width = 0.4,bxp.errorbar.width = 0.1,outlier.shape = NA)+
xlab("")+ylab("")+
scale_y_continuous(breaks = c(25,30,35)) +
scale_color_manual(values=c(LWmax="darkgreen", EWmax="green",max="DarkBlue",
mean="DeepSkyBlue",min="blue"))+
theme_classic()+
mytranstheme()+
theme(legend.position = "none",axis.line.x = element_blank(),
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x= element_blank())
# called a "grop" in Grid terminology
oxymean_grob <- ggplotGrob(oxy.meanbox)
oxyplot1 <- oxyplot + annotation_custom(grob = oxymean_grob, xmin = as.Date("1940-01-01"),xmax=as.Date("1970-06-01"), ymin = 32, ymax = 35.5)
## here using the ggmap to plot the samplign site
## the map data are too large and I did not upload them,
## If you need the map data, please contact me.
# source(file = "./code/sampling_map.R")
tiff("./plot/Figure 1-Sampling site and climate diagram.tiff ",
height = 16,width=18,units = "cm",res=800,compression = "lzw",
family = "serif")
ggarrange(gg_combine,
ggarrange(clima,climb,oxy.p,
ncol=3,labels=c("b","c","d"),
label.x = 0.2,
label.y = 0.99,
align = "hv",
font.label = list(size=22,family="serif")),
ncol=1,nrow = 2,
heights = c(2, 1.4),
labels = c("a",""),
label.x = 0.67,
label.y = 0.99,
align = "hv",
font.label = list(size=22,family="serif"))
dev.off()
tiff("./plot/Figure 2-GRL distribution & variability of oxygen max and min.tiff ",
height = 16,width=20,units = "cm",res=800,compression = "lzw",
family = "serif")
ggarrange(
# ggarrange(clima,climb,oxy.p,
# ncol=3,labels=c("a","b","c"),
# label.x = c(0.17,0.17,0.19),
# label.y = 1,
# font.label = list(size=22,family="serif")),
ggarrange(min.oplot,max.oplot,maxEW.oplot,maxLW.oplot,
ncol=4,labels = c("a","b","c","d"),
#label.x = c(0.18,0.17,0.17,0.17),
label.x =0.25,
label.y = 1.0,
align = "v",
common.legend = TRUE, legend="right",
font.label = list(size=22,family="serif")),
oxyplot1,
ncol=1,nrow = 2,
heights = c(1.5,2),
labels = c("","e"),
label.x = 0.08,
label.y = 0.99,
font.label = list(size=22,family="serif"))
dev.off()
## Part 3. Climate response analysis----
## 3.3.1 load the chronology-----
#iso.chron1 <- as.data.frame(stable.all.omin.mean[2])
#iso.chron1 <- as.data.frame(stable.all.omax.mean[2])
#iso.chron1 <- as.data.frame(oc.mean[3])
## for the EW and LW min and max
# iso.chron1 <- as.data.frame(stable.allEW.omax.mean$mean)
# iso.chron1 <- as.data.frame(stable.allLW.omax.mean$mean)
# signal is weak for the EW omax
# # for the LW and EW max lag one year
# iso.chron1 <- as.data.frame(LWEWmax)
# rownames(iso.chron1) <- c(1900:2013)
#
# iso.chron1 <- as.data.frame(stable.allEW.omin.mean$mean)
# iso.chron1 <- as.data.frame(stable.allLW.omin.mean$mean)
# PDSI (JJA) is the strongest correlation -0.6 for the min oxygen
#
rownames(iso.chron1) <- c(1900:2014)
head(iso.chron1)
#iso.chron1 <- c.min.dis ## this format for carbon
#iso.chron1 <- pin[2]
### 3.3.2 climate response-----
###
### NOTE: This part and heatmap plot should be looped using the different chronologies(max,min, maxLw....),
###
tmean.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,5)]),
var_names =c("tem"), method = "correlation",
selection=.range("tem",-10:12)+.mean("tem",6:8)+.mean("tem",7:9)+.mean("tem",8:11))
evp.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,13)]),
var_names =c("evp"), method = "correlation",
selection=.range("evp",-10:12)+.mean("evp",6:8)+.mean("evp",7:9)+.mean("evp",8:11))
dtr.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,15)]),
var_names =c("dtr"), method = "correlation",
selection=.range("dtr",-10:12)+.mean("dtr",6:8)+.mean("dtr",7:9)+.mean("dtr",8:11))
tmax.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,10)]),
var_names =c("tmax"), method = "correlation",
selection=.range("tmax",-10:12)+.mean("tmax",6:8)+.mean("tmax",7:9)+.mean("tmax",8:11))
tmin.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,9)]),
var_names =c("tmin"), method = "correlation",
selection=.range("tmin",-10:12)+.mean("tmin",6:8)+.mean("tmin",7:9)+.mean("tmin",8:11))
rh.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,7)]),
var_names =c("rh"), method = "correlation",
selection=.range("rh",-10:12)+.mean("rh",6:8)
+.mean("rh",7:9)+.mean("rh",8:11))
ssd.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,12)]),
var_names =c("ssd"), method = "correlation",
selection=.range("ssd",-10:12)+.mean("ssd",6:8)+.mean("ssd",7:9)+.mean("ssd",8:11))
vpd.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,14)]),
var_names =c("vpd"), method = "correlation",
selection=.range("vpd",-10:12)+.mean("vpd",6:8)+.mean("vpd",7:9)+.mean("vpd",8:11))
pre.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,8)]),
var_names =c("pre"), method = "correlation",
selection=.range("pre",-10:12)+.sum("pre",6:8)+.sum("pre",4:8)+.sum("pre",8:11))
presure.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,6)]),
var_names =c("presure"), method = "correlation",
selection=.range("presure",-10:12)+.sum("presure",6:8)+.sum("presure",7:9)+.sum("presure",8:11))
pre.day.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,4)]),
var_names =c("pre.day"), method = "correlation",
selection=.range("pre.day",-10:12)+.mean("pre.day",6:8)+.mean("pre.day",7:9)+.mean("pre.day",8:11))
## here the pdsi data are from CRU data from 1901-2017;
## read and load PDSI data excuted in detectVariabilityCRU.R [current WD]
pdsi.res<- dcc(iso.chron1,data.frame(crupdsi),
timespan = c(1953,2013),
var_names =c("pdsi"), method = "correlation",
selection =.range("pdsi",-10:12)+.mean("pdsi",6:8)
+.mean("pdsi",7:9)+.mean("pdsi",8:11))
# plot(tmean.res)
# plot(pre.day.res)
# plot(tmax.res)
# plot(tmin.res)
# plot(rh.res)
# plot(ssd.res)
# plot(pre.res)
# plot(presure.res)
# plot(evp.res)
# plot(vpd.res)
# plot(pdsi.res)
# plot(dtr.res)
omin.miving<-plot(pdsi.movingres)+
scale_y_continuous(breaks = seq(0.5,2.5,1),labels=c("JJA","A-O","A-N"),
expand = c(0.005,0.005))+
scale_fill_gradientn(limits=c(-0.8,0.8),colors = Col.5,na.value = "white")+
scale_x_continuous(breaks = seq(0.5,
by = 1, length.out = ncol(pdsi.movingres$coef$coef)),
labels = names(pdsi.movingres$coef$coef),
expand = c(0.001,0.001))+
mythemeplot()+xlab("")+ylab("")+
theme(axis.ticks = element_line(),
axis.text.x = element_blank())+
theme(plot.margin = unit(c(0.5,0,-0.3,0),"lines"))+
annotate("text", x=1, y=2.5, label="a",size=10,family="serif")
pdsi.movingres1<- dcc(iso.chron1,data.frame(crupdsi),
#timespan = c(1953,2014),
var_names =c("pdsi"), method = "correlation",
selection =.mean("pdsi",6:8)
+.mean("pdsi",4:10)+.mean("pdsi",8:11),
dynamic = "moving", win_size = 30,sb = FALSE)
## heatmap plot----
month <- c("o","n","d","J","F","M","A","M","J","J","A","S","O","N","D","JJA","JAS","A-N")
corr.mon <- rbind(tmean.res$coef,tmax.res$coef,tmin.res$coef,pre.res$coef,rh.res$coef,pdsi.res$coef,
vpd.res$coef,evp.res$coef,ssd.res$coef)
corr.mon$coef.raw <-corr.mon$coef
clim.vars.name <- c("TEM","TMAX","TMIN","PRE","RH","scPDSI","VPD","EVP","SSD")
climgroup <- getgroup(18,clim.vars.name) ## produce the group name for different chronology!!
mongroup <- rep(month,length(clim.vars.name))
corr.mon$climgroup <- climgroup
corr.mon$mongroup <- mongroup
## select the significant correlations
##corr.mon$coef[which(corr.mon$significant=="FALSE")]<-NA
corr.mon$coef[which(abs(corr.mon$coef)<0.247)]<-NA
## plot the climate response at monthly scale
response.heatmap <-
ggplot(data=corr.mon,mapping = aes(x=id,
y=factor(climgroup,levels=clim.vars.name),
fill=coef))+
geom_tile()+xlab(label = "Month")+ylab(label = "Climatic variable")+
#scale_fill_gradient2(limits=c(-0.6,0.6),low = "steelblue", mid="white",high = "DarkOrange",na.value="grey70")+
#scale_fill_gradientn(limits=c(-1,1),colors = Col.5,na.value = #"white")+
scale_fill_gradientn(limits=c(-0.8,0.8),colors = Col.5,na.value = "white")+
scale_x_continuous(breaks=c(1:18),expand = c(0.03,0.01),labels=month)+
mythemeplot()+
theme(axis.text.x = element_text( family = "serif", vjust = 0.5, hjust = 0.5, angle = 90))
c.min.heatmap <- response.heatmap+
annotate("text", x=1, y=9, label="c",size=10,family="serif")
c.max.heatmap <- response.heatmap+
annotate("text", x=1, y=9, label="d",size=10,family="serif")
o.max.heatmap <- response.heatmap
o.max.heatmap1<-o.max.heatmap+
theme(plot.margin = unit(c(0,0,-0.8,0.1),"lines"))+
theme(legend.position = "none")
# axis.text.y = element_blank(),
# axis.title.y = element_blank())
o.min.heatmap <- response.heatmap
o.min.heatmap1 <- o.min.heatmap+
theme(legend.position = "none")+
theme(plot.margin = unit(c(0,0,-0.8,0),"lines"))
o.mean.heatmap <-response.heatmap
o.mean.heatmap1 <-o.mean.heatmap+
theme(legend.position = "none")+
theme(plot.margin = unit(c(0,0,-0.8,0.1),"lines"))+
theme(legend.position = "none",
axis.text.y = element_blank(),
axis.title.y = element_blank())
o.LWmax.heatmap <- response.heatmap
o.LWmax.heatmap1 <-o.LWmax.heatmap+
theme(plot.margin = unit(c(0,0,-0.8,0.1),"lines"))
o.LWEWmax.heatmap <- response.heatmap
o.LWEWmax.heatmap1 <-o.LWEWmax.heatmap+
theme(legend.position = "none",
plot.margin = unit(c(0,0,-0.8,0.1),"lines"))+
theme(axis.text.y = element_blank(),
axis.title.y = element_blank())
legendmap <-o.LWEWmax.heatmap+
theme(legend.position = "bottom")+
scale_fill_gradientn(limits=c(-0.8,0.8),colors = Col.5,na.value = "white",guide = guide_colorbar(direction = "horizontal",label.vjust = 0,label.theme = element_text(size = 12,family = "serif"),barwidth = 10,title = "correlation",title.position = "bottom",title.hjust = 0.5,title.theme = element_text(size=14,family = "serif"),frame.colour ="gray50"))
leg <- get_legend(legendmap)
# Convert to a ggplot and print
leg1 <-as_ggplot(leg)+theme(plot.margin = unit(c(0.8,0.1,0.5,0.3),"lines"))
tiff("./plot/LWEW-correlation-response-monthly.tiff",
width = 10,height =8 ,units = "cm",compression = "lzw",bg="white",family = "serif",res=500)
print(o.LWEWmax.heatmap)
dev.off()
source("./code/climateresponse of EWmax.R")
### 3.3.3 output the figure for the moving correaltions-----
tiff(file="./plot/Figure 3.3.1 climate Response.tiff",width = 16,
height = 20,units ="cm",compression="lzw",bg="white",res=800)
ggarrange(
ggarrange(o.min.heatmap1,o.mean.heatmap1,
o.max.heatmap1,o.EWmax.heatmap1,
o.LWmax.heatmap1,o.LWEWmax.heatmap1,
nrow = 3,ncol = 2,widths = c(1.2, 1),
labels = c("a","b","c","d","e","f"),
label.x = c(0.2,0.015),
label.y = c(1,1,1.02,1.02,1.02,1.02),
font.label = list(size=24,face="bold",family="serif"),
legend="none"),
leg1,
nrow = 2,ncol = 1,
align = "hv",heights = c(1, 0.1),
widths = c(3.5,1))
dev.off()
### Part 4. seascorr analysis for the chronologies-----
### 4.1 seascorr correlation -----
crupdsi.long <- gather(crupdsi,key="month",
value = "scpdsi",-year)
crupdsi.long1<-crupdsi.long %>%
arrange(year, match(month,month.abb))
ny.mymdata$scpdsi <- subset(crupdsi.long1,year>1952)$scpdsi
head(ny.mymdata)
pdsiresp.season <- seascorr(iso.chron1,
climate=data.frame(ny.mymdata[,c(3,1,8,16)]),
complete=11,
season_lengths = c(1,3,4,5),
primary = 2,secondary = 1,
#var_names = c("pre","scpdsi")
)
plot(pdsiresp.season)
pdsiresp.season1 <- seascorr(iso.chron1,
climate=data.frame(ny.mymdata[,c(3,1,8,16)]),
complete=11,
season_lengths = c(1,3,4,5),
#primary = 2,secondary = 1,
#var_names = c("pre","scpdsi")
)
plot(pdsiresp.season1)
recon <- skills(object = pdsiresp.season,
target = .mean("scpdsi",7:9),
calibration = "-50%")
# set 50% is for 1983-2014 as calibration
plot(recon)
recon
recon$cal.model
recon$full.model
recon$cal.years
## here, 1 - (Residual Deviance/Null Deviance) will give the R2.
fit<-lm(x~y,data=recon$full)
summary(fit)
BIC(fit)
AIC(fit)
sqrt(mean(fit$residuals^2))# calculate RMSE
paneltheme<-theme(panel.grid.major =element_line(colour = "gray80",size=0.5,inherit.blank = TRUE),panel.grid.minor =element_line(colour = "gray90",size = 0.2),strip.background=element_rect(fill=NA))
minpdsi1<-plot(pdsiresp.season)+
scale_x_continuous(breaks = seq(1,
by = 1, 14),
labels = c("o","n","d","J","F",
"M","A","M","J","J",
"A","S","O","N"),
expand = c(0.001,0.001))+
xlab("")+
theme(plot.margin = unit(c(0,1,0,1), "lines"))+
theme_pubr()+theme(strip.text.y = element_text())
paneltheme
minpre1<-plot(pdsiresp.season1)+
scale_x_continuous(breaks = seq(1,
by = 1, 14),
labels = c("o","n","d","J","F",
"M","A","M","J","J",
"A","S","O","N"),
expand = c(0.001,0.001))+
xlab("")+
theme_pubr()+
paneltheme
rhresp.season <- seascorr(iso.chron1,
climate=data.frame(ny.mymdata[,c(3,1,7,16)]),
complete=11,
season_lengths = c(1,3,4,5),
#primary = 2,secondary = 1,
#var_names = c("pre","scpdsi")
)
plot(rhresp.season)
rhresp.season2 <- seascorr(iso.chron1,
climate=data.frame(ny.mymdata[,c(3,1,7,16)]),
complete=11,
season_lengths = c(1,3,4,5),
primary = 2,secondary = 1,
#var_names = c("pre","scpdsi")
)
plot(rhresp.season2)
LWmaxrh1<-plot(rhresp.season)+
scale_x_continuous(breaks = seq(1,
by = 1, 14),
labels = c("o","n","d","J","F",
"M","A","M","J","J",
"A","S","O","N"),
expand = c(0.001,0.001))+
theme(plot.margin = unit(c(-1,1,0.2,1), "lines"))+
theme_pubr()+
paneltheme
LWmaxpdsi1<-plot(rhresp.season2)+
scale_x_continuous(breaks = seq(1,
by = 1, 14),
labels = c("o","n","d","J","F",
"M","A","M","J","J",
"A","S","O","N"),
expand = c(0.001,0.001))+
theme_pubr()+
paneltheme
LWEWrh1<-plot(rhresp.season)+
scale_x_continuous(breaks = seq(1,
by = 1, 14),
labels = c("o","n","d","J","F",
"M","A","M","J","J",
"A","S","O","N"),
expand = c(0.001,0.001))+
theme(plot.margin = unit(c(-1,1,0.2,1), "lines"))+
theme_pubr()+
paneltheme
LWEWpdsi1<-plot(rhresp.season2)+
scale_x_continuous(breaks = seq(1,
by = 1, 14),
labels = c("o","n","d","J","F",
"M","A","M","J","J",
"A","S","O","N"),
expand = c(0.001,0.001))+
theme_pubr()+
paneltheme
## 4.2 output the figures--------------
tiff(file="./plot/Figure 3. seacorr for LWEW-max & min pdsi=2 for min.tiff",
width = 21,height = 18,units ="cm",compression="lzw",
bg="white",res=800, family = "serif")
ggarrange(minpre1,LWEWpdsi1,
ncol=1,nrow = 2,
labels = c("a","b"),
label.x = 0.05,
label.y = 1.,
font.label = list(size=20,family="serif"),
common.legend = TRUE,legend = "top" )
dev.off()
tiff(file="./plot/Figure S4. seacorr for LWEW-max & min.tiff",
width = 21,height = 18,units ="cm",compression="lzw",
bg="white",res=800, family = "serif")
ggarrange(minpdsi1,LWEWrh1,
ncol=1,nrow = 2,
labels = c("a","b"),
label.x = 0.05,
label.y = 1.,
font.label = list(size=20,family="serif"),
common.legend = TRUE,legend = "top" )
dev.off()
##
## ## calculate the correlations between different parameters--
maxmin.data<-stable.all.omin.mean.date %>%
select(min,max,mean,LWmax,EWmax) %>%
as.data.frame()
maxmin.data <-array(as.numeric(unlist(maxmin.data)), dim=c(115, 5))
colnames(maxmin.data)<-c("min","max","mean","LWmax","EWmax")
cc.proxy<-rcorr(maxmin.data,
type="pearson")
cc.proxy1<-rcorr(maxmin.data[c(1:51),],
type="pearson")
cc.proxy2<-rcorr(maxmin.data[c(52:115),],
type="pearson")
head(maxmin.data)
## correlation between LW and lag1 EW
cor(maxmin.data[-1,5],maxmin.data[-115,4])
## combine EWmax and LW max
summary(maxmin.data[-1,5])
summary(maxmin.data[-115,4])
LWEWmax<-(maxmin.data[-1,5]+maxmin.data[-115,4])/2
## Part 5. multiple variable and common analysis-------------
## ##5.1 using the nlm model----
model.1=lm(mean~ mean.vpd+mean.rh+mean.pre+mean.ssd+mean.value+scpdsi+mean.tmean+mean.evp,data=semmodeldata)
step1 <- stepAIC(model.1, direction="both")
step1$anova # display results
model.2=lm(mean~ mean.ssd+mean.value+scpdsi+mean.pre+mean.rh,
data=semmodeldata)
model.3=lm(mean~ mean.ssd+mean.value+scpdsi,data=semmodeldata)
LWmodel.1=lm(mean~ mean.rh+mean.pre+mean.ssd+mean.value+scpdsi+mean.vpd, data=semmodeldata2)
LWmodel.2=lm(mean~ mean.vpd+mean.rh, data=semmodeldata2)
LWmodel.3=lm(mean~ mean.rh+mean.pre, data=semmodeldata2)
step <- stepAIC(LWmodel.1, direction="both")
step$anova # display results
head(semmodeldata3)
LWEWmodel.1=lm(LWEWmax ~ rh+pre+ssd1+mean.value+scpdsi+vpd+evp, data=semmodeldata3)
LWEWmodel.2=lm(LWEWmax~ vpd+rh, data=semmodeldata3)
LWEWmodel.3=lm(LWEWmax~ rh+pre, data=semmodeldata3)
LWEWmodel.4=lm(LWEWmax~ rh, data=semmodeldata3)
step1 <- stepAIC(LWEWmodel.1, direction="both")
step1$anova # display results
#commonality assessment--
regr(model.1)
regr(model.2)## this depend the beta weight!!
regr(model.3) ##$Commonality_Data $Commonality_Data$`CC shpw the cntributions
commonality(model.1)
## All-possible-subsets regression
apsOut=aps(semmodeldata,"mean",list("scpdsi", "mean.value","mean.ssd"))
## Commonality analysis
commonality(apsOut)
regr(LWmodel.1)
regr(LWmodel.2)
regr(LWmodel.3)
commonality(model.1)
regr(LWEWmodel.1)
regr(LWEWmodel.2)
regr(LWEWmodel.3)
regr(LWEWmodel.4)
## Part 6. Climate reconstruction and comparison---------
## 6.1 reconstruction test-----
##
## subset the chronology
Amin.chron1 <- as.data.frame(stable.all.omin.mean[2])
#iso.chron1 <- as.data.frame(stable.all.omax.mean[2])
#iso.chron1 <- as.data.frame(oc.mean[3])
rownames(Amin.chron1) <- c(1900:2014)
head(Amin.chron1)
## for the EW and LW min and max
# iso.chron1 <- as.data.frame(stable.allEW.omax.mean$mean)
# iso.chron1 <- as.data.frame(stable.allLW.omax.mean$mean)
#
# # for the LW and EW max lag one year
LWEW.chron1 <- as.data.frame(LWEWmax)
rownames(LWEW.chron1) <- c(1900:2013)
# PDSI (JAS) is the strongest correlation -0.667 for the min oxygen
#
pdsi.res1<- dcc(Amin.chron1,data.frame(subset(crupdsi,year>1952)),
timespan = c(1953,2014),
var_names =c("pdsi"), method = "correlation",
selection =.range("pdsi",-10:12)+.mean("pdsi",6:8)
+.mean("pdsi",7:9)+.mean("pdsi",8:11))
plot(pdsi.res1)
rhLWEWmax.res1<- dcc(LWEW.chron1,
data.frame(ny.mymdata[,c(3,1,7)]),
#timespan = c(1953,2014),
var_names =c("rh"), method = "correlation",
selection =.range("rh",10)+.mean("rh",6:8)
+.mean("rh",4:10)+.mean("rh",8:11))
plot(rhLWEWmax.res1)
rhLWmax.res1<- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,7)]),
timespan = c(1953,2014),
var_names =c("rh"), method = "correlation",
selection =.mean("rh",6:8)
+.mean("rh",4:10)+.mean("rh",8:11))
plot(rhLWmax.res1)
sk.pdsimin<-skills(object = pdsi.res1,
target =.mean("pdsi",7:9),
calibration = "-50%",
model="ols")
ggplotly(plot(sk.pdsimin))
sk.pdsimin$full.model$rsquare
summary(sk.pdsimin$full.model$call)
sk.pdsimin$RE#
sk.pdsimin$CE
sk.pdsimin$cal.model
sk.pdsimin.2<-skills(object = pdsi.res1,
target =.mean("pdsi",4:10),
calibration = "49%",
model="ols")
ggplotly(plot(sk.pdsimin.2))
sk.pdsimin.2$full.model$rsquare
summary(sk.pdsimin.2$coef.full)
sk.pdsimin.2$RE#
sk.pdsimin.2$CE
sk.pdsimin.2$cal.model
sk.rhLWEWmax<-skills(object = rhLWEWmax.res1,
target =.range("rh",10),
calibration = "-51%",
timespan = c(1953,2013),
model="ols")
ggplotly(plot(sk.rhEWmax))
sk.rhLWEWmax$full.model
sk.rhLWEWmax$cal.model
sk.rhLWEWmax$DW
sk.rhLWEWmax$RE
sk.rhLWEWmax$CE
sk.rhLWEWmax$cal.years
sk.rhLWmax<-skills(object =rhLWmax.res1,
target =.mean("rh",8:11),
calibration = "-51%",
timespan = c(1953,2014),
model="ols")
ggplotly(plot(sk.rhLWmax))
sk.rhLWmax$full.model
sk.rhLWmax$cal.model
sk.rhLWmax$DW
sk.rhLWmax$RE
sk.rhLWmax$CE
sk.rhLWmax.2<-skills(object =rhLWmax.res1,
target =.mean("rh",8:11),
calibration = "49%",
model="ols")
ggplotly(plot(sk.rhLWmax.2))
sk.rhLWmax.2$cal.years
sk.rhLWmax.2$full.model
sk.rhLWmax.2$cal.model
sk.rhLWmax.2$DW
sk.rhLWmax.2$RE
sk.rhLWmax.2$CE
fit<-lm(x~y,data=sk.rhLWmax$full)
summary(fit)
BIC(fit)
AIC(fit)
sqrt(mean(fit$residuals^2))# calculate RMSE
title<-cbind(Calibration=c("1953-1983","1984-2014"),
Verification=c("1984-2014","1953-1983"))
REtable.PDSI<-cbind(title,
cbind(RE=c(sk.pdsimin.2$RE,sk.pdsimin$RE),
CE=c(sk.pdsimin.2$CE,sk.pdsimin$CE)))
REtable.rh<-cbind(title,
cbind(RE=c(sk.rhLWmax.2$RE,sk.rhLWmax$RE),
CE=c(sk.rhLWmax.2$CE,sk.rhLWmax$CE)))
regree.data<-data.frame(cbind(sk.pdsimin$full.model$y,sk.pdsimin$full.model$x,
c(sk.rhLWEWmax$full.model$y,NA),
c(sk.rhLWEWmax$full.model$x,NA)))
colnames(regree.data)<-c("scpdsi","amino18","rh10","LWEWmaxo18")
cor(regree.data$scpdsi[-62],
regree.data$rh10[-62])
# mulfit1<-lm(regree.data$rh8.11[-(59:62)]~
# regree.data$LWmaxo18[-(59:62)]+
# sk.rhEWmax$full.model$x[-(59:61)])
# summary(mulfit1)
m1<-lm(regree.data$scpdsi~regree.data$amino18)
reg1<-ggplot(regree.data,aes(x=amino18,y=scpdsi)) +
geom_point(shape=1,col=4) +
geom_smooth(method=lm, lty=2, color=4, se=TRUE)+
ylab("July-September scPDSI")+
xlab(expression(paste("Annual minimum tree-ring"," ",~delta^18,"O")))+
geom_text(y = 4.5, x = 26, label = lm_eqn(m1),
parse = TRUE,
colour="black",family="TN",size=3.5)+
mythemeplot()
m2<-lm(regree.data$rh10~regree.data$LWEWmaxo18)
reg2<-ggplot(regree.data,aes(x=LWEWmaxo18,y=rh10)) +
geom_point(shape=1,col="darkgreen") +
geom_smooth(method=lm , lty=2, color="darkgreen", se=TRUE)+
ylab("October RH (%)")+
xlab(expression(paste("LW + EW(lag1) maximum tree-ring"," ",~delta^18,"O")))+
geom_text(x = 29.5, y = 70, label = lm_eqn(m2),
parse = TRUE,
colour="black",family="TN",size=3.5)+
mythemeplot()
## 6.2 climate reconstruction------
## Reconstruction data--
sk.pdsimin$full.model
pdsi.recon<-32.0118-1.209782*stable.all.omin.mean[2]
sk.rhLWEWmax$full.model
rh.recon <- 197.4765-3.896857*LWEWmax
recondata<-cbind(pdsi.recon,c(rh.recon,NA))
colnames(recondata)<-c("scpdsi","rh10")
recondata$year<-1900:2014
obs<-subset(regree.data,select = c("scpdsi","rh10"))
obs$year<-1953:2014
CRU<-subset(crupdsi,select = c(8:10),year<1954)%>%
mutate(pdsi=rowMeans(.))
CRU<-cbind(CRU[,4],NA,year=(1901:1953))
colnames(CRU)<-c("scpdsi","rh10","year")
reconcomdata<-rbind(obs,recondata,CRU)
reconcomdata$type<-c(rep("observation (CRU)",62),
rep("reconstruction",115),
rep("CRU before 1953",53))
## detect the slope for different period
summary(lm(data=subset(reconcomdata, type=="reconstruction"),
scpdsi~year))
summary(lm(data=subset(reconcomdata,year>1952 & type=="reconstruction"),
scpdsi~year))
summary(lm(data=subset(reconcomdata,year<1953 & type=="reconstruction"),
scpdsi~year))
summary(lm(data=subset(reconcomdata, type=="observation (CRU)"),
scpdsi~year))
summary(lm(data=subset(reconcomdata, type=="CRU before 1953"),
scpdsi~year))
summary(lm(data=subset(reconcomdata, type=="reconstruction"),
rh10~year))
summary(lm(data=subset(reconcomdata,year>1952 & type=="reconstruction"),
rh10~year))
summary(lm(data=subset(reconcomdata,year<1953 & type=="reconstruction"),
rh10~year))
write.csv(reconcomdata,"reconstruction.csv")
x=subset(reconcomdata,year< 1953 & type=="reconstruction"|
year<1953 & type=="CRU before 1953",
select =c("scpdsi","year","type") )
cor.test(x$scpdsi[1:52],x$scpdsi[53:104])
cor(x=subset(reconcomdata,year>1952 & type=="reconstruction",
select =c("scpdsi","rh10") ),
use="complete.obs",
method = "pearson")
cor(x=subset(reconcomdata,year<1953 & type=="reconstruction",
select =c("scpdsi","rh10") ),
use="complete.obs",
method = "pearson")
cor(subset(reconcomdata,year>1983 & type=="reconstruction",
select =c("scpdsi","rh10") ),
subset(reconcomdata,year>1983 & type=="observation (CRU)",
select =c("scpdsi","rh10") ),
use="complete.obs",
method = "pearson")
cor(subset(reconcomdata,year>1953 &year <1984 & type=="reconstruction",
select =c("scpdsi","rh10") ),
subset(reconcomdata,year>1953 &year <1984 & type=="observation (CRU)",
select =c("scpdsi","rh10")),
use="complete.obs",
method = "pearson")
## here, comparison between different filter functions!!
# spline.pdsi1<-smooth.spline(recondata$year,recondata$scpdsi,n = 10)
# spline.pdsi2<- pass.filt(recondata$scpdsi, W=10, type="low", method="Butterworth")## for 10 year low pass
# spline.pdsi2 <- as.data.frame(cbind(x=spline.pdsi$x, y=spline.pdsi2))
#
# spline.pdsi<-smooth.spline(recondata$year,recondata$scpdsi,spar = 0.2)##
# spline.pdsi <- as.data.frame(cbind(x=spline.pdsi$x, y=spline.pdsi$y))
# plot(spline.pdsi$x, spline.pdsi$y, type="l",col=2)
# par(new=TRUE)
# plot(spline.pdsi1$x, spline.pdsi1$y, type="l")
# par(new=TRUE)
# plot(spline.pdsi2$x, spline.pdsi2$y, type="l",col=4)
## reconstruction and 20-year loess smooother
pdsireconplot<-ggplot(reconcomdata,aes(x=year,y=scpdsi)) +
geom_line(aes(colour= type))+
geom_smooth(data=subset(reconcomdata,type=="reconstruction"),aes(x=year,y=scpdsi),
method = "loess",span=0.2,se=FALSE,lwd=1.5,col=4)+
#geom_line(data=spline.pdsi,aes(x=x,y=y))+
geom_smooth(data=subset(reconcomdata,type=="reconstruction"),aes(x=year,y=scpdsi),method = "loess",span=0.75,se=TRUE,col=c("blue"))+#col=c("#00BFC4"))
geom_smooth(data = CRU.all,aes(x=year,y=scpdsi),
method = "loess",span=0.75,se=TRUE,
col=c("Darkorange"))+
xlab("")+ylab("July-September scPDSI")+
scale_x_continuous(expand = c(0.01,0.01))+
mythemeplot()+
theme(legend.position = c(0.85,0.87),
legend.title = element_blank())+
geom_vline(xintercept=1984,lty=2,col="gray70")+
theme(plot.margin = unit(c(-0.2,0.3,0,0.3),"lines"))+#+
# geom_line(data=subset(crupdsi.4.10.date,year<1954),
# aes(x=year(date),y=growing,col="Darkorange"),
# lwd=0.2)+
scale_color_manual(values=c("observation (CRU)" = "#F8766D",
"reconstruction" = "blue",
"CRU before 1953"="Darkorange"),
labels=c("CRU before 1953", "observation (CRU)", "reconstruction"))+
annotate("text", x = 1984, y = -3.2,
label = expression(paste("Verification: ", italic(r), "= 0.75; Calibration: ", italic(r)," = 0.49")),
family="serif")#+
#annotate("text", x = 1984, y = -3.5, label = "RE = 0.526, CE = 0.473",family="serif")
rhreconplot<-ggplot(reconcomdata,aes(x=year,y=rh10)) +
geom_line(aes(colour= type))+
geom_smooth(data=subset(reconcomdata,type=="reconstruction"),aes(x=year,y=rh10),
method = "loess",span=0.2,se=FALSE,lwd=1.5,col=c("darkgreen"))+
geom_smooth(data=subset(reconcomdata,type=="reconstruction"),aes(x=year,y=rh10),method = "loess",span=0.75,se=TRUE,col=c("darkgreen"))+
xlab("Year")+ylab("October RH (%)")+
scale_x_continuous(expand = c(0.01,0.01))+
mythemeplot()+
theme(legend.position = c(0.2,0.2),
legend.title = element_blank())+
geom_vline(xintercept=1984,lty=2,col="gray70")+
theme(plot.margin = unit(c(-0.5,0.3,0.3,0.6),"lines"))+
scale_color_manual(values=c("observation (CRU)" = "#00BFC4",
"reconstruction" = "darkgreen",
"CRU before 1953"=NA),
labels=c("", "observation", "reconstruction"))+
annotate("text", x = 1984, y = 60,
label = expression(paste("Verification: ", italic(r), "= 0.77; Calibration: ", italic(r)," = 0.61")),
family="serif")#+
#annotate("text", x = 1984, y = 72, label = "RE = 0.464, CE = 0.461", family="serif")
tiff(file="./plot/Figure 7.1.1 reconstruction1.tiff",
width = 16,height = 18,
units ="cm",compression="lzw",bg="white",res=800)
ggarrange(
ggarrange(reg1,reg2,ncol=2,labels = c("a","b"),
label.x = 0.87,
label.y = c(1,0.99),
font.label = list(size=20,family="serif")),
ggarrange(pdsireconplot,rhreconplot,
nrow = 2,
labels = c("c","d"),
label.x = 0.1,
label.y = c(1,1.04),
align = "v",
font.label = list(size=20,family="serif")),
nrow = 2,align = "v",heights = c(0.6,1),
# labels = c("","c"),
# label.x = 0.1,
# label.y = 1.04,
font.label = list(size=20,family="serif"))
dev.off()
### Part 7. Supplementary figure plot--------
###
### 7.3. Figure S3----------
## here, the max value have a lag significant correlation, it means the significant old carbon reuse?? McCaroll et al., 2017
tiff("./plot/Figure S3 oxygen parameter correlation 1900-2014.tiff",width=8,height = 8,units = "cm",
compression = "lzw",bg="white",res = 300)
windowsFonts(TN = windowsFont("Times New Roman"))
par(mgp=c(2.0,0.5,0),family="TN",ps=8)
# par(mfrow=c(1,3),mgp=c(1.0,0.5,0),family="TN",ps=13)
#par(mar=c(0, 0, 0.0, 0) + 0.1)
par(oma=c(0,0,0.02,0.02))
corrplot(corr = cc.proxy$r,type="upper",
col=brewer.pal(n=10, name="PuOr"),cl.lim = c(0, 1),
tl.pos="d",tl.col = 1,tl.cex=1.2,
p.mat = cc.proxy$P, sig.level = 0.05,insig ="pch",
pch.cex = 3,pch.col = rgb(255, 0, 0,100, maxColorValue=255))
corrplot(corr=cc.proxy$r,add=TRUE,type="lower",method = "number", number.cex = 1,number.font=2,col=1,
diag=FALSE,tl.pos="n", cl.pos="n",p.mat = cc.proxy$P,
sig.level = 0.05,insig ="pch",pch.cex = 3,
pch.col = rgb(255, 0, 0, 100, maxColorValue=255))
dev.off()
### 7.4 Figure S4--------------
## Figure S4 has been ouputed in the part 4.2
## 7.5 Figure S5. correlation between chrongologies and ISOGSM data-----
## detect the climatic signal of the GNIP data (precipitation oxygen data )
## the aim of this part is to detect the climate response in the tree-ring d18O and d18O in precipitation
### 7.5.1 d18O precipitation response to maximum and minimum tree-ring -----
omin.mean <- as.data.frame(stable.all.omin.mean[2])
omax.mean <- as.data.frame(stable.all.omax.mean[2])
omin.mean.ts <- ts(omin.mean, start = 1900,frequency = 1)
omax.mean.ts <- ts(omax.mean, start = 1900,frequency = 1)
EWomax.mean.ts <- ts(stable.allEW.omax.mean$mean,
start = 1900,frequency = 1)
LWomax.mean.ts <- ts(stable.allLW.omax.mean$mean,
start = 1900,frequency = 1)
LWEW.mean.ts <- ts(LWEW.chron1,start = 1900,frequency = 1)
## here call the function runningclimate from E:/Rwork/myfunction/basic dplR and beyond.R
## the basic idea is used the runningclimate to detect the pearson's correlation
## call for the data @ oxygen from precipitation, @@p.rateoxy.clim
omin.mean.p <- Climateplot (omin.mean.ts,
Climatesite = p.rateoxy.clim,
fyr=1950,lyr=2010,
detrended=c("No"),
spline.length=0)
omax.mean.p <- Climateplot(omax.mean.ts,
Climatesite = p.rateoxy.clim,
fyr=1950,lyr=2010,
detrended=c("No"),
spline.length=0)
EWomax.mean.p <- Climateplot(EWomax.mean.ts,
Climatesite = p.rateoxy.clim,
fyr=1950,lyr=2010,
detrended=c("No"),
spline.length=0)
LWomax.mean.p <- Climateplot(LWomax.mean.ts,
Climatesite = p.rateoxy.clim,
fyr=1950,lyr=2010,
detrended=c("No"),
spline.length=0)
LWEWomax.mean.p <- Climateplot(LWEW.mean.ts,
Climatesite = p.rateoxy.clim,
fyr=1950,lyr=2010,
detrended=c("No"),
spline.length=0)
# Adapt these to your needs:
#parSettings <- list(layout.widths=list(left.padding=1))
omin.p <- contourplot(t(omin.mean.p),region=T,lwd=0.3,lty=2,aspect=0.4,
col.regions=colorRampPalette(c("red","yellow","white","green3","blue")),
at=c(seq(-0.8,0.8,0.05)),xlab="",ylab="Window length",main=NA)+
latticeExtra::layer(panel.text(x=3, y=11.5, label="a min",family="serif",font=2,cex=1.5))
# omax.p<-contourplot(t(omax.mean.p),region=T,lwd=0.3,lty=2,
# col.regions=colorRampPalette(c("red","yellow","white","green3","blue")),
# at=c(seq(-0.8,0.8,0.05)),xlab=" ",ylab="Window length",main=title)
EWomax.p <-contourplot(t(EWomax.mean.p),region=T,lwd=0.3,lty=2,aspect=0.4,
col.regions=colorRampPalette(c("red","yellow","white","green3","blue")),
at=c(seq(-0.8,0.8,0.05)),xlab="Months",ylab="Window length")+
latticeExtra::layer(panel.text(x=3, y=11.5, label="b EW-max",family="serif",font=2,cex=1.5))
LWomax.p <-contourplot(t(LWomax.mean.p),region=T,lwd=0.3,lty=2,aspect=0.4,
col.regions=colorRampPalette(c("red","yellow","white","green3","blue")),
at=c(seq(-0.8,0.8,0.05)),xlab="Months",ylab="Window length",main=NA)+
latticeExtra::layer(panel.text(x=3, y=11.5, label="c LW-max",family="serif",font=2,cex=1.5))
LWEWomax.p<-contourplot(t(LWEWomax.mean.p),region=T,lwd=0.3,lty=2,aspect=0.4,
col.regions=colorRampPalette(c("red","yellow","white","green3","blue")),
at=c(seq(-0.8,0.8,0.05)),xlab="Months",ylab="Window length",main=NA)+
latticeExtra::layer(panel.text(x=4.5, y=11.5, label="d Composite max",family="serif",font=2,cex=1.5))
## 7.5.2 output the correlation analysis-------
tiff("./plot/omin-EW,LWomax-precipitation-oxy-2.tiff",width = 20,height = 27,
units = "cm",pointsize = 12,compression = "lzw",res = 300,bg="white",family = "serif")
# Combine lattice charts into one
#c(omin.p, EWomax.p)
c(LWEWomax.p,LWomax.p,EWomax.p,omin.p,
merge.legends = TRUE,layout=c(1,4))
dev.off()
### 7.6. Variability of the cloud cover from CRU dataset----
### # read cloud cover data from CRU
crucld<-read.table("./cru/icru4_cld_112.5-112.7E_27.27-27.5N_n.dat",
header = FALSE)
head(crucld)
colnames(crucld)<-c("year",1:12)
crucld <- subset(crucld,year>1952 & year<2015)
# Determine p-values of regression
#
p.vals <-NA
for(i in 2:13 ) {
cldslope=coef(summary(lm(crucld[,i]~crucld[,1])))[2,4]
p.vals <- cbind(p.vals,cldslope)
}
crucldlong <- gather(crucld,key="month",value=cld,-year)
my_breaks <- function(x) { if (min(x) < 50) seq(30, 90, 20) else seq(60, 90, 15) }
crucld.longplot<-ggplot(
data=subset(crucldlong, year<2015),
aes(year,cld,group=month,col=factor(month,levels=c(1:12))))+
geom_line()+geom_point()+
facet_grid(factor(month,levels=c(1:12))~., scales="free")+
xlab(label = "Year")+
ylab(label = c("Cloud cover (%)"))+
scale_x_continuous(expand = c(0.005,0.005))+
scale_y_continuous( breaks = my_breaks)+
guides(col=guide_legend(title="Month"))
crucld.plot<-ggplot(
data=subset(crucldlong,year>1952 & year <2015), aes(year,cld,group=month,col=factor(month,levels=c(1:12))))+
geom_line()+geom_point()+
facet_grid(factor(month,levels=c(1:12))~.,scales = "free")+
#facet_grid(factor(crucldlong$month,levels=c(1:12))~., scales="free")+
xlab(label = "Year")+
ylab(label = c("Cloud cover (%)"))+
scale_x_continuous(expand = c(0.01,0.01))+
scale_y_continuous( breaks = my_breaks)+
guides(col=guide_legend(title="Month"))
tiff(file="./plot/Figure S6. Cloud cover for 1900-now.tiff",width = 16,height = 14,units ="cm",compression="lzw",bg="white",family = "serif",res=600)
print(crucld.longplot)
dev.off()
### 7.7 Variability of d18O of precipitation------
## plot and for the seasonal oxygen isotpe in precipitation from ISOGSM model
pre.oxy2.11<-subset(p.rateoxy.shape,Var2 %in% c(2,3,4,5,6,7,8,9,10,11)& Var1>1949 & Var1<2011)
pre.oxy2.11long<-subset(p.rateoxy.shape,Var2 %in% c(2,3,4,5,6,7,8,9,10,11)& Var1>1899)
# Determine p-values of regression
#
p.vals <-NA
for(i in 1:10 ) {
pslope=pvaluecal(unique(pre.oxy2.11long$Var2)[i],
group=2,data=pre.oxy2.11)
p.vals <- cbind(p.vals,pslope)
}
pre.oxy2.11.plot<-ggplot(subset(pre.oxy2.11,Var1>1949 & Var1<2011),aes(Var1,value,group=Var2,col=as.factor(Var2)))+
geom_line()+geom_point()+
facet_grid(pre.oxy2.11$Var2~., scales="free")+
# stat_smooth(method=lm,se=FALSE,lty=2,
# lwd=1.0,level = 0.95)+
#geom_smooth(method = "lm",col="black",lty=2)+
xlab(label = "Year")+ylab(label = expression(paste("Precipitation ",delta^"18","O (โฐ)")))+
guides(col=guide_legend(title="Month"))
pre.oxy2.11.longplot<-ggplot(pre.oxy2.11long,aes(Var1,value,group=Var2,col=as.factor(Var2)))+
geom_line()+geom_point()+
facet_grid(as.factor(Var2)~., scales="free")+
# stat_smooth(method=lm,se=FALSE,lty=2,
# lwd=1.0,level = 0.95)+
#geom_smooth(method = "lm",col="black",lty=2)+
scale_x_continuous(expand = c(0.01,0.01))+
xlab(label = "Year")+ylab(label = expression(paste("Precipitation ",delta^"18","O (โฐ)")))+
guides(col=guide_legend(title="Month"))
tiff(file="./plot/stable oxygen in Feb-Nov preciptation for 1950-now.tiff",width = 16,height = 14,units ="cm",compression="lzw",bg="white",family = "serif",res=600)
print(pre.oxy2.11.plot)
dev.off()
tiff(file="./plot/stable oxygen in Feb-Nov preciptation for 1900-now.tiff",width = 16,height = 14,units ="cm",compression="lzw",bg="white",family = "serif",res=600)
print(pre.oxy2.11.longplot)
dev.off()
pre.oxy5.8.mean<-pre.oxy5.8 %>% group_by(Var1)%>%
summarise(mean.value=mean(value,na.rm=TRUE))
pre.oxy2.4.mean<-pre.oxy2.4 %>% group_by(Var1)%>%
summarise(mean.value=mean(value,na.rm=TRUE))
pre.oxy2.10.sd<-pre.oxy2.10 %>% group_by(Var1)%>%
summarise(sd.value=sd(value,na.rm=TRUE))
tiff(file="./plot/diff in preciptation for 1900-now.tiff",width = 12,height = 8,units ="cm",compression="lzw",bg="white",res=600)
plot(pre.diff[,1],
abs(pre.diff[,2]),"l",xli=c(1900,2014),
xlab="year",ylab="Difference in absolute")
abline(fit.abs,lty=2)
text(1950,1.5,
label=expression(paste(italic(slope),'=-0.0093, ',italic(R)^2, '= 0.08, ', italic(p),'= 0.003')))
dev.off()
## 7.8.plot the trend of vapor d18O-----
###
monthvp.oxylong<-gather(monthvp.oxy,key="month",value = "d18O",-year)
monthvp.oxy2.11<-subset(monthvp.oxylong,month %in% c(2,3,4,5,6,7,8,9,10,11)& year>1949 & year<2011)
monthvp.oxy2.11long<-subset(monthvp.oxylong,month %in% c(2,3,4,5,6,7,8,9,10,11)& year>1899)
# Determine p-values of regression
#
p.vals <-NA
for(i in 1:10 ) {
pslope=pvaluecal(unique(monthvp.oxy2.11long$month)[i],
group=2,data=monthvp.oxy2.11)
p.vals <- cbind(p.vals,pslope)
}
monthvp.oxy2.11.plot<-ggplot(monthvp.oxy2.11,aes(year,d18O,group=month,col=factor(month,levels=c(2:11))))+
geom_line()+geom_point()+
facet_grid(factor(month,levels = c(2:11))~., scales="free")+
scale_x_continuous(expand = c(0.01,0.01))+
# stat_smooth(method=lm,se=FALSE,lty=2,
# lwd=1.0,level = 0.95)+
#geom_smooth(method = "lm",col="black",lty=2)+
xlab(label = "Year")+ylab(label = expression(paste(" Water vapour ",delta^"18","O (โฐ)")))+
guides(col=guide_legend(title="Month"))
monthvp.oxy2.11.longplot<-ggplot(monthvp.oxy2.11long,aes(year,d18O,group=month,col=factor(month,levels=c(2:11))))+
geom_line()+geom_point()+
facet_grid(factor(month,levels=c(2:11))~., scales="free")+
# stat_smooth(method=lm,se=FALSE,lty=2,
# lwd=1.0,level = 0.95)+
#geom_smooth(method = "lm",col="black",lty=2)+
scale_x_continuous(expand = c(0.01,0.01))+
xlab(label = "Year")+ylab(label = expression(paste("Water vapour ",delta^"18","O in precipitation (โฐ)")))+
guides(col=guide_legend(title="Month"))
tiff(file="./plot/Figure S8. stable oxygen in Feb-Nov vapour for 1900-now.tiff",width = 16,height = 14,units ="cm",compression="lzw",bg="white",family = "serif",res=600)
print(monthvp.oxy2.11.longplot)
dev.off()
## 7.9 Variability of seasonal mean climate-------
clim.july_sept1<-subset(ny.mymdata,month %in% c(7,8,9))%>%
group_by(year)%>%
summarise(mean.preday=mean(pre.day,na.rm=TRUE),mean.tmean=mean(tmean,na.rm=TRUE),
mean.presure=mean(water.pressure,na.rm=TRUE),mean.rh=mean(rh,na.rm=TRUE),
mean.pre=mean(pre,na.rm=TRUE),mean.tmin=mean(tmin,na.rm=TRUE),
mean.tmax=mean(tmax,na.rm=TRUE),mean.ssd=mean(ssd,na.rm=TRUE),
mean.vpd=mean(vpd,na.rm=TRUE),mean.evp=mean(evp,na.rm=TRUE),
mean.pdsi=mean(scpdsi,na.rm = TRUE))
clim.mar_jun <- subset(ny.mymdata,month %in% c(3:6))%>%
group_by(year)%>%
summarise(mean.3.6preday=mean(pre.day,na.rm=TRUE),mean.3.6tmean=mean(tmean,na.rm=TRUE),
mean.3.6presure=mean(water.pressure,na.rm=TRUE),mean.3.6rh=mean(rh,na.rm=TRUE),
mean.3.6pre=mean(pre,na.rm=TRUE),mean.3.6tmin=mean(tmin,na.rm=TRUE),
mean.3.6tmax=mean(tmax,na.rm=TRUE),mean.3.6ssd=mean(ssd,na.rm=TRUE),
mean.3.6vpd=mean(vpd,na.rm=TRUE),mean.3.6evp=mean(evp,na.rm=TRUE),
mean.3.6pdsi=mean(scpdsi,na.rm = TRUE))
clim.july_sept <- cbind(clim.july_sept1,clim.mar_jun[-1])
clim.oct<-subset(ny.mymdata,month %in% c(10))
head(clim.july_sept)
head(clim.oct)
head(clim.mar_jun)
rh.10<-ggplot(clim.oct,aes(year,rh))+
geom_line()+geom_point()+
#stat_smooth(method=lm,se=FALSE,lty=2,lwd=1.0)+
geom_smooth(method = "loess",span=0.2,se=F,col=1,lwd=1.2,lty=1)+
xlab(label = "Year")+ylab(label = "Relative humidity (%)")+
# annotate("text",x=1988,y=(min(clim.oct$rh,na.rm = TRUE))*1.02,
# label=expression(paste(italic(slope),'= 0.038, ',italic(R)^2, '= 0.06, ', italic(p),'= 0.05')))+
mythemeplot()+
theme(axis.title.x=element_blank())
summary(lm( clim.oct$rh[1:51]~clim.oct$year[1:51]))
summary(lm( clim.oct$rh~clim.oct$year))
rh.7.9<-ggplot(clim.july_sept,aes(year,mean.rh))+
geom_line(col=4)+geom_point(col=4)+
stat_smooth(method=lm,lty=2,lwd=1.0,col=4)+
geom_smooth(method = "loess",span=0.2,se=F,lty=1,lwd=1.2)+
geom_line(aes(year,mean.3.6rh),col=3)+geom_point(aes(year,mean.3.6rh),col=3)+
stat_smooth(aes(year,mean.3.6rh),method=lm,lty=2,lwd=1.0,col=3)+
geom_smooth(aes(year,mean.3.6rh),method = "loess",span=0.2,se=F,col=3,lwd=1.2,lty=1)+
xlab(label = "Year")+ylab(label = "Relative humidity (%)")+
annotate("text",x=1980,y=(min(clim.july_sept$mean.rh,na.rm = TRUE))*1.02,col=4,
label=expression(paste(italic(slope),'= 0.037, ',italic(R)^2, '= 0.04, ', italic(p),'= 0.07')))+
annotate("text",x=1980,y=(min(clim.july_sept$mean.rh,na.rm = TRUE))*1.04,col=3,
label=expression(paste(italic(slope),'= -0.038, ',italic(R)^2, '= 0.06, ', italic(p),'= 0.03')))+
mythemeplot()+
theme(axis.title.x=element_blank())
summary(lm( clim.july_sept$mean.rh~clim.july_sept$year))
summary(lm( clim.july_sept$mean.3.6rh~clim.july_sept$year))
tmean.10 <- ggplot(clim.oct,aes(year,tmean,col="Oct"))+
geom_line()+geom_point()+
#stat_smooth(method=loess,span=0.02,se=FALSE,lty=2,lwd=1.0)+
geom_smooth(method = "lm",lty=2)+
geom_smooth(method = "loess",span=0.2,se = FALSE,lty=1)+
xlab(label = "Year")+ylab(label = "Temperature (0.1 degree)")+
annotate("text",x=1990,y=(min(clim.oct$tmean))*1.02,
label=expression(paste(italic(slope),'= 0.212, ',italic(R)^2, '= 0.09, ', italic(p),'= 0.012')))+
scale_colour_manual(name="Season",
values=c("Oct" = 1))+
mythemeplot()+
theme(legend.position = "top",axis.title.x=element_blank())
summary(lm( clim.oct$tmean/10~clim.oct$year))
tmean.7.9 <- ggplot(clim.july_sept,aes(year,mean.tmean,col="July-Sept"))+
geom_line(aes(col="July-Sept"))+geom_point()+
#stat_smooth(method=loess,se=FALSE,lty=2,lwd=1.0)+
#geom_smooth(method = "lm",col="black",lty=2)+
geom_smooth(method = "loess",span=0.2,col=4,se = F,lty=1)+
geom_line(aes(year,mean.3.6tmean,col="Mar-June"))+geom_point(aes(year,mean.3.6tmean,col="Mar-June"))+
geom_smooth(aes(year,mean.3.6tmean,col="Mar-June"),method = "loess",span=0.2,se=F,col=3,lwd=1.5,lty=1)+
geom_smooth(aes(year,mean.3.6tmean),
method = "lm",col=3,lty=2)+
scale_colour_manual(name="Season",
values=c("Mar-June" = 3, "July-Sept"=4))+
xlab(label = "Year")+ylab(label = "Temperature (0.1 degree)")+
annotate("text",x=1980,y=(max(clim.july_sept$mean.tmean,na.rm = TRUE))*0.8,col=3,
label=expression(paste(italic(slope),'= 0.203, ',italic(R)^2, '= 0.23, ', italic(p),'< 0.001')))+
mythemeplot()+
theme(legend.position = "top",
axis.title.x=element_blank())
summary(lm( clim.july_sept$mean.tmean~clim.july_sept$year))
summary(lm( clim.july_sept$mean.3.6tmean~clim.july_sept$year))
pdsi.10 <- ggplot(clim.oct,aes(year,scpdsi))+
geom_line()+geom_point()+
stat_smooth(method=lm,se=FALSE,lty=2,lwd=1.0)+
geom_smooth(method = "lm",col="black",lty=2)+
geom_smooth(method = "loess",span=0.2,col=1,lwd=1.2,se=F,lty=1)+
xlab(label = "Year")+ylab(label = "scPDSI")+
annotate("text",x=1990,y=(max(clim.oct$scpdsi))*0.95,
label=expression(paste(italic(slope),'= -0.025, ',italic(R)^2, '= 0.03, ', italic(p),'= 0.09')))+
mythemeplot()
summary(lm( clim.oct$scpdsi~clim.oct$year))
pdsi.7.9 <- ggplot(clim.july_sept,aes(year,mean.pdsi))+
geom_line(col=4)+geom_point(col=4)+
stat_smooth(method=lm,col=4,lty=2,lwd=1.0)+
geom_smooth(method = "loess",span=0.2,col=4,se=F,lty=1,lwd=1.2)+
geom_line(aes(year,mean.3.6pdsi),col=3)+
geom_point(aes(year,mean.3.6pdsi),col=3)+
stat_smooth(aes(year,mean.3.6pdsi),method=lm,col=3,lty=2,lwd=1.0)+
geom_smooth(aes(year,mean.3.6pdsi),method = "loess",span=0.2,col=3,se=F,lty=1,lwd=1.2)+
xlab(label = "Year")+ylab(label = "scPDSI")+
annotate("text",x=1980,y=(max(clim.july_sept$mean.pdsi,na.rm = TRUE))*0.95,col=4,
label=expression(paste(italic(slope),'= -0.022, ',italic(R)^2, '= 0.03, ', italic(p),'< 0.09')))+
annotate("text",x=1980,y=(max(clim.july_sept$mean.pdsi,na.rm = TRUE))*0.80,col=3,
label=expression(paste(italic(slope),'= -0.033, ',italic(R)^2, '= 0.12, ', italic(p),'= 0.003')))+
mythemeplot()
summary(lm( clim.july_sept$mean.pdsi~clim.july_sept$year))
summary(lm( clim.july_sept$mean.3.6pdsi~clim.july_sept$year))
tiff("./plot/Figure S9 climate variability.tiff", width = 20, height = 16,
units = "cm",res = 400,bg = "transparent",compression = "lzw",
family = "serif")
ggarrange(tmean.7.9,tmean.10,
rh.7.9,rh.10,
pdsi.7.9,pdsi.10,
labels = c("a","a1","b","b1",
"c","c1"),
nrow = 3,ncol=2,
label.x = 0.1,
label.y = c(0.95,0.95,1.15,1.15,1.15,1.15),
heights = c(0.55,0.45,0.5),
align = "hv",
#common.legend = TRUE,
font.label = list(size=24,family="serif"))
dev.off()
| /2020-02 updated code for extreme stable oxygen data.R | no_license | GuobaoXu/Tree-ring-max-and-min-oxygen | R | false | false | 64,994 | r | ##This code is used to process the meteorological data and climate analysis for the tree-ring stable oxygen isotope extreme values
## The aims are:
### 1. Process the meteorological data and climate response.
### 2. Detect the signal of the tree-ring stable oxygen isotope extreme
### 3. Climate reconstruction and analysis
###
## Author: GB Xu, xgb234@lzb.ac.cn
## Date: 2019-6-14
## Part 0 Initial call the packages-----
library(openxlsx)
library(dplyr)
library(reshape2)
library(ggplot2)
library(treeclim)
library(grid)
library(dplR)
library(treeclim)
library(MASS)
library(yhat)
library(ggpubr)
## Part 1. Process the climate data --------
## 1.1 read and load the climate data-----
mdata<-read.table("E:/Rwork/Freezingrain/S201806051426312322400.txt",header = TRUE)
ny.mdata<-subset(mdata,mdata$V01000==57776)
ny.mdata[ny.mdata==32766]<-NA
ny.mymdata.full<-subset(ny.mdata,select = c(1:3,11,17,21,23,26,6,14,5))
varname<-c("month","station","year","pre.day",
"tmean","water.pressure","rh","pre","tmin","tmax","ssd")
colnames(ny.mymdata.full)<-varname
ny.mymdata.mean <- ny.mymdata.full %>%
filter(year>1952)%>%
group_by(month)%>%
summarise_each(funs(mean(.,na.rm=TRUE)))
## 1.1.1 processing the missing data and write as ssd1 and evp.-----
library(mice)
imp<-mice(ny.mymdata[,c(1,3,11)],10)
fit<-with(imp,lm(ssd~month))
pooled<-pool(fit)
result4=complete(imp,action=3)
ny.mymdata$ssd1<-result4$ssd
evp<-read.xlsx("E:/Rwork/Freezingrain/evp57776.xlsx")
head(evp)
evp$V8[evp$V8 == 32766] <- NA
evp.mean<-evp %>% group_by(V5,V6)%>%
summarise(mean.evp=mean(V8,na.rm=TRUE))
evp.mean<-data.frame(evp.mean)
colnames(evp.mean)<-c('year','month','evp')
head(evp.mean)
imp<-mice(evp.mean,10)
fit<-with(imp,lm(evp~month))
pooled<-pool(fit)
result4=complete(imp,action=3)
ny.mymdata$evp<-result4$evp[c(1:773)]
ny.mymdata<-subset(ny.mymdata,year>1952 & year<2015)
## precipitation, temperature and water vapor pressure unit is 0.1..
##### 1.1.2 calculate the VPD based on the temperature and RH----
ea_o=6.112*exp(17.67*(ny.mymdata$tmean*0.1)/((ny.mymdata$tmean*0.1)+243.5))# The unit of tem should be degress, the unit of ea is hpa.
vpd <- ea_o*(1-ny.mymdata$rh/100)
ny.mymdata$vpd <- vpd
#1.1.3 plot the climtagraph at month------
library(plotrix)
### calculate the ratio between y1 and y2
preclim<-c(50,300)
tclim<-c(0.2,25)
d<-diff(tclim)/diff(preclim)
c<-preclim[1]-tclim[1]*d
ny.mymdata.mean$pre1<-ny.mymdata.mean$pre/10
ny.mymdata.mean$tmean1<-ny.mymdata.mean$tmean/10
clima<-ggplot(data=ny.mymdata.mean,aes(x=month))+
geom_bar(aes(y=pre1),
stat = "identity",position = "identity")+
geom_line (aes(y=c+(tmean1)/d),col="red")+
geom_point(aes(y=c+(tmean1)/d),col="red")+
xlab("Month")+
scale_y_continuous("Precipitation (mm)",
sec.axis = sec_axis( ~ (. - c)*d,
name = "Temperature (โ)"),
expand=c(0.01,0.05))+
scale_x_continuous("Month", breaks = 1:12,expand=c(0.01,0.05)) +
mythemeplot()+
theme(plot.title = element_text(hjust = 0.5))+
theme(axis.line.y.right = element_line(color = "red"),
axis.ticks.y.right = element_line(color = "red"),
axis.text.y.right = element_text(color = "red"),
axis.title.y.right = element_text(color = "red")) +
theme(plot.margin = unit(c(0,-0.2,0,0),"lines"))
ssdclim<-c(30,230)
rhclim <-c(70,95)
s<-diff(rhclim)/diff(ssdclim) #3 becareful the relationship, y2 and y1
r<-ssdclim[1]-rhclim[1]/s ## the relationship scale between rh and ssd.
climb<-ggplot(data=ny.mymdata.mean,aes(x=month))+
geom_bar( aes(y=ssd/10),
stat = "identity",position = "identity")+
geom_line (aes(y=r+(rh)/s),col="blue")+
geom_point(aes(y=r+(rh)/s),col="blue")+
xlab("Month")+
scale_y_continuous("SSD (h)",
#limits = c(50,400),
sec.axis = sec_axis(~ (. - r)*s,
name = "Relative humidity (%)"),
expand=c(0.01,0.05) ) +
scale_x_continuous("Month", breaks = 1:12,
expand=c(0.01,0.05)) +
mythemeplot()+
theme(plot.title = element_text(hjust = 0.5))+
theme(axis.line.y.right = element_line(color = "blue"),
axis.ticks.y.right = element_line(color = "blue"),
axis.text.y.right = element_text(color = "blue"),
axis.title.y.right = element_text(color = "blue")) +
theme(plot.margin = unit(c(0,-0.1,0,0),"lines"))
## 1.1.4 load the scPDSI data from CRU---
crupdsi<-read.table("./cru/iscpdsi_112.5-112.7E_27.27-27.5N_n.dat",
header = FALSE)
colnames(crupdsi)<-mon
crupdsi <- subset(crupdsi,year<2015)
# 1.2 compare the d18O data between ISOGSM model and observation-----
## The precipitation d18O data from ISOGSM model
## The precipitation data from the GNIP Changsha station
#### 1.2.1 Process the d18O data from Changsha Station-----
oxy.changsha <- read.xlsx("./rawdata/wiser_gnip-monthly-cn-gnipm.xlsx",
sheet = "Data",colNames = TRUE)
head(oxy.changsha)
oxy.changsha.reshape <- subset(oxy.changsha,select=c(SampleName, month, O18))
colnames(oxy.changsha.reshape) <- c("Var1","Var2","value")
##split the data from GNIP
oxy.changsha.reshape.1999 <-subset(oxy.changsha.reshape,Var1>1999)
#### 1.2.2 Process the d18O data from ISOGSM data-----
#### a. for the precipitation -----
data <- read.delim("F:/IsoGSM/x061y062_ensda_monthly.dat",header = FALSE)
data1<-data[c(-1,-2),c(-1)]
data1.ts<-ts(data1,start = c(1871,1),frequency = 12)
p.oxyts<-ts((data1$V6/data1$V5-1)*1000,start = c(1871,1),frequency = 12)
p.oxy<-(data1$V6/data1$V5-1)*1000
p.oxy[abs(p.oxy)>13]<-NA ## remove the outliers, set the threshold is abs(13), which is based on the mean value of multi-year observation.
p.rate<-matrix(data1$V5,ncol=12,byrow=TRUE)
p.rateoxy<-matrix(p.oxy,ncol=12,byrow=TRUE)## here, to calculate the oxygen according to original data!!
##where SMOW=[H18O]/[H2O] or [HDO]/[H2O] in Standard Mean Ocean Water.
# To calculate delta18o in precipitation, do followings:
# delta18O_p[permil]=(PRATE18O/PRATE-1)*1000
rownames(p.rateoxy)<-c(1871:2010)
p.tmp<-matrix(data1$V17,ncol=12,byrow=TRUE)
p.rh<-matrix(data1$V18,ncol=12,byrow=TRUE)
plot(data1.ts[,2])
lines(p.oxyts,col=2)
### b. process for the stable oxygen isotope of the water vapor at monthly scales-----
vp.oxy<-(data1$V15/data1$V14-1)*1000
vp.oxy[abs(vp.oxy)>30]<-NA ## remove the outliers, set the threshold is abs(30)
## reference: Xie Yulong, Zhang Xinping, et al., Monitoring and analysis of stable isotopes of the near surface water vapor in
## Changsha, Environmental Science, 2016,37(2):475-481
monthvp.oxy<-as.data.frame(matrix(vp.oxy,ncol=12,byrow=TRUE))
colnames(monthvp.oxy)<-c(1:12)
monthvp.oxy<-cbind(year=c(1871:2010),monthvp.oxy)
p.rateoxy.shape<-melt(p.rateoxy)
p.rateoxy.shape.1988 <-
subset(p.rateoxy.shape,Var1 >1987 & Var1 <1993)
# p.rateoxy.shape.1988 <-
# subset(p.rateoxy.shape,Var1 %in% oxy.changsha.reshape$Var1)
p.oxy <- rbind(oxy.changsha.reshape.1999,
p.rateoxy.shape.1988[order(p.rateoxy.shape.1988$Var1),])
p.oxy$type <- c(rep("Changsha",60),
rep("Model",60))
p.oxy$date <- c(seq.Date(from = as.Date('1988-01-01'),by = 'month', length.out = 60),
seq.Date(from = as.Date('1988-01-01'),by = 'month', length.out = 60))
oxy.p <- ggplot(p.oxy,aes(x=Var2,y=value, na.rm=TRUE,color=type))+
geom_point()+
geom_smooth(method="loess",se=TRUE,lty=1,lwd=1.5,aes(fill =type))+
xlab("Month")+ylab(expression(paste(delta ^"18","O (โฐ)")))+
scale_x_continuous(limits = c(1,12),breaks=c(1:12),
labels = c(1:12))+
mythemeplot()+
theme(legend.position = c(0.2,0.15),legend.title = element_blank())+
theme(plot.margin = unit(c(0,0,0,0),"lines"))
## Part 2 Tree-ring stable oxygen isotope data load and plot-----
## This part is show the stable oxygen isotope
##
## 2.1 plot the position of the extreme values------
stabe.all.o.max <- read.xlsx("E:/Rwork/highresolution/rawdata/omax.xlsx")
stabe.allEW.o.max <- read.xlsx("E:/Rwork/highresolution/rawdata/allEWomax.xlsx")
stabe.allLW.o.max <- read.xlsx("E:/Rwork/highresolution/rawdata/allLWomax.xlsx")
stabe.all.o.min <- read.xlsx("E:/Rwork/highresolution/rawdata/omin.xlsx")
max.oplot<- ggplot(stabe.all.o.max, aes(x=V4, color=wood)) +
geom_histogram(fill="white", alpha=0.5,
position="identity",binwidth = 0.1)+
mythemeplot()+
xlab('Proportion to boundary')+ylab("count")+
theme(legend.title = element_blank(),
axis.title.y = element_blank(),
plot.margin = unit(c(0.2,0,0,0),"lines"))+
scale_x_continuous(
labels = scales::number_format(accuracy = 0.1))+
scale_color_manual(values=c(LW="darkgreen",
EW="green"))
maxEW.oplot<- ggplot(stabe.allEW.o.max, aes(x=V4, color=wood)) +
geom_histogram(fill="white", alpha=0.5,
position="identity",binwidth = 0.1)+
#scale_color_manual(values=c("#00BFC4"))+
mythemeplot()+
xlab('Proportion to boundary')+ylab("count")+
theme(legend.title = element_blank(),
axis.title.y = element_blank(),
plot.margin = unit(c(0.2,0,0,0),"lines"))+
scale_color_manual(values=c(LW="darkgreen",
EW="green"))
maxLW.oplot<- ggplot(stabe.allLW.o.max, aes(x=V4, color=wood)) +
geom_histogram(fill="white", alpha=0.5,
position="identity",binwidth = 0.1)+
#scale_color_manual(values=c("#00BFC4"))+
mythemeplot()+
xlab('Proportion to boundary')+ylab("count")+
theme(legend.title = element_blank(),
axis.title.y = element_blank(),
plot.margin = unit(c(0.2,0,0,0),"lines"))+
scale_color_manual(values=c(LW="darkgreen",
EW="green"))
min.oplot <-ggplot(stabe.all.o.min, aes(x=V4,color=wood)) +
geom_histogram(fill="white", alpha=0.5,
position="identity",binwidth = 0.1)+
xlab('Proportion to boundary')+ylab("count")+
mythemeplot()+
theme(legend.title = element_blank(),
plot.margin = unit(c(0.2,0,0,0),"lines"))+
scale_x_continuous(
labels = scales::number_format(accuracy = 0.1))+
scale_color_manual(values=c(LW="darkgreen",
EW="green"))
stable.all.omin.mean.date <- read.xlsx("E:/Rwork/highresolution/rawdata/oxy_all.xlsx")
oxyplot<-ggplot(data=stable.all.omin.mean.date)+
scale_x_date(expand = c(0.01,0.01))+
geom_line(aes(x=date,y = min,col="Min"),lwd=1.0)+
geom_smooth(aes(x=date,y = min),method = "lm",lty=2,se=FALSE)+
geom_line(aes(x=date,y = mean,col="Mean"),lwd=1.0)+
#geom_smooth(aes(x=date,y = mean),method = "lm",lty=2)+
geom_line(aes(x=date,y = max,col="Max"),lwd=1.0)+
geom_smooth(aes(x=date,y = max),method = "lm",
col="DarkBlue",lty=2,se=FALSE)+
geom_line(aes(x=date,y = LWmax,col="LWmax"),lwd=1.0)+
geom_smooth(aes(x=date,y = LWmax),method = "lm",
col="darkgreen",lty=2,se=FALSE)+
geom_line(aes(x=date,y = EWmax,col="EWmax"),lwd=1.0)+
geom_smooth(aes(x=date,y = EWmax),method = "lm",
col="green",lty=2,se=FALSE)+
scale_x_date(name="Year",
expand = c(0,0),
breaks = "10 years",
labels=date_format("%Y"),
limits = as.Date(c("1900-01-01","2015-06-01")))+
ylab(expression(paste(delta^"18","O (โฐ)")))+
scale_color_manual(values=c(LWmax="darkgreen",
EWmax="green",
Max="DarkBlue",
Mean="DeepSkyBlue",Min="blue"))+
mythemeplot()+
guides(col = guide_legend(ncol = 1))+
theme(legend.position = c(0.9,0.85))+
theme(legend.background = element_rect(fill = "transparent"),
legend.box.background = element_rect(fill = "transparent", colour = NA),
legend.key = element_rect(fill = "transparent"),
#legend.spacing = unit(2, "lines")
)+
theme(legend.title = element_blank())
#reposition_legend(pdsiplot1, 'left')
#
summary(lm(stable.all.omin.mean.date$EWmax~stable.all.omin.mean.date$year))
summary(lm(stable.all.omin.mean.date$LWmax~stable.all.omin.mean.date$year))
summary(lm(stable.all.omin.mean.date$max~stable.all.omin.mean.date$year))
cor.test(stable.all.omin.mean.date$EWmax,stable.all.omin.mean.date$LWmax)
cor.test(stable.all.omin.mean.date$EWmax,stable.all.omin.mean.date$max)
cor.test(stable.all.omin.mean.date$LWmax,stable.all.omin.mean.date$max)
### try to insert the mean value for parameters---
oxygen.extremlong <- gather(data=stable.all.omin.mean.date,
key="para",
value = "d18",min,max,mean,EWmax,LWmax,-year)
oxy.meanbox <-
# ggplot()+
# geom_boxplot(data=extreme.predata.mean,
# aes(x=label,y=value,col=label))+
ggboxplot(oxygen.extremlong, x = "para", y = "d18",
color = "para",width = 0.4,bxp.errorbar.width = 0.1,outlier.shape = NA)+
xlab("")+ylab("")+
scale_y_continuous(breaks = c(25,30,35)) +
scale_color_manual(values=c(LWmax="darkgreen", EWmax="green",max="DarkBlue",
mean="DeepSkyBlue",min="blue"))+
theme_classic()+
mytranstheme()+
theme(legend.position = "none",axis.line.x = element_blank(),
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x= element_blank())
# called a "grop" in Grid terminology
oxymean_grob <- ggplotGrob(oxy.meanbox)
oxyplot1 <- oxyplot + annotation_custom(grob = oxymean_grob, xmin = as.Date("1940-01-01"),xmax=as.Date("1970-06-01"), ymin = 32, ymax = 35.5)
## here using the ggmap to plot the samplign site
## the map data are too large and I did not upload them,
## If you need the map data, please contact me.
# source(file = "./code/sampling_map.R")
tiff("./plot/Figure 1-Sampling site and climate diagram.tiff ",
height = 16,width=18,units = "cm",res=800,compression = "lzw",
family = "serif")
ggarrange(gg_combine,
ggarrange(clima,climb,oxy.p,
ncol=3,labels=c("b","c","d"),
label.x = 0.2,
label.y = 0.99,
align = "hv",
font.label = list(size=22,family="serif")),
ncol=1,nrow = 2,
heights = c(2, 1.4),
labels = c("a",""),
label.x = 0.67,
label.y = 0.99,
align = "hv",
font.label = list(size=22,family="serif"))
dev.off()
tiff("./plot/Figure 2-GRL distribution & variability of oxygen max and min.tiff ",
height = 16,width=20,units = "cm",res=800,compression = "lzw",
family = "serif")
ggarrange(
# ggarrange(clima,climb,oxy.p,
# ncol=3,labels=c("a","b","c"),
# label.x = c(0.17,0.17,0.19),
# label.y = 1,
# font.label = list(size=22,family="serif")),
ggarrange(min.oplot,max.oplot,maxEW.oplot,maxLW.oplot,
ncol=4,labels = c("a","b","c","d"),
#label.x = c(0.18,0.17,0.17,0.17),
label.x =0.25,
label.y = 1.0,
align = "v",
common.legend = TRUE, legend="right",
font.label = list(size=22,family="serif")),
oxyplot1,
ncol=1,nrow = 2,
heights = c(1.5,2),
labels = c("","e"),
label.x = 0.08,
label.y = 0.99,
font.label = list(size=22,family="serif"))
dev.off()
## Part 3. Climate response analysis----
## 3.3.1 load the chronology-----
#iso.chron1 <- as.data.frame(stable.all.omin.mean[2])
#iso.chron1 <- as.data.frame(stable.all.omax.mean[2])
#iso.chron1 <- as.data.frame(oc.mean[3])
## for the EW and LW min and max
# iso.chron1 <- as.data.frame(stable.allEW.omax.mean$mean)
# iso.chron1 <- as.data.frame(stable.allLW.omax.mean$mean)
# signal is weak for the EW omax
# # for the LW and EW max lag one year
# iso.chron1 <- as.data.frame(LWEWmax)
# rownames(iso.chron1) <- c(1900:2013)
#
# iso.chron1 <- as.data.frame(stable.allEW.omin.mean$mean)
# iso.chron1 <- as.data.frame(stable.allLW.omin.mean$mean)
# PDSI (JJA) is the strongest correlation -0.6 for the min oxygen
#
rownames(iso.chron1) <- c(1900:2014)
head(iso.chron1)
#iso.chron1 <- c.min.dis ## this format for carbon
#iso.chron1 <- pin[2]
### 3.3.2 climate response-----
###
### NOTE: This part and heatmap plot should be looped using the different chronologies(max,min, maxLw....),
###
tmean.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,5)]),
var_names =c("tem"), method = "correlation",
selection=.range("tem",-10:12)+.mean("tem",6:8)+.mean("tem",7:9)+.mean("tem",8:11))
evp.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,13)]),
var_names =c("evp"), method = "correlation",
selection=.range("evp",-10:12)+.mean("evp",6:8)+.mean("evp",7:9)+.mean("evp",8:11))
dtr.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,15)]),
var_names =c("dtr"), method = "correlation",
selection=.range("dtr",-10:12)+.mean("dtr",6:8)+.mean("dtr",7:9)+.mean("dtr",8:11))
tmax.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,10)]),
var_names =c("tmax"), method = "correlation",
selection=.range("tmax",-10:12)+.mean("tmax",6:8)+.mean("tmax",7:9)+.mean("tmax",8:11))
tmin.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,9)]),
var_names =c("tmin"), method = "correlation",
selection=.range("tmin",-10:12)+.mean("tmin",6:8)+.mean("tmin",7:9)+.mean("tmin",8:11))
rh.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,7)]),
var_names =c("rh"), method = "correlation",
selection=.range("rh",-10:12)+.mean("rh",6:8)
+.mean("rh",7:9)+.mean("rh",8:11))
ssd.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,12)]),
var_names =c("ssd"), method = "correlation",
selection=.range("ssd",-10:12)+.mean("ssd",6:8)+.mean("ssd",7:9)+.mean("ssd",8:11))
vpd.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,14)]),
var_names =c("vpd"), method = "correlation",
selection=.range("vpd",-10:12)+.mean("vpd",6:8)+.mean("vpd",7:9)+.mean("vpd",8:11))
pre.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,8)]),
var_names =c("pre"), method = "correlation",
selection=.range("pre",-10:12)+.sum("pre",6:8)+.sum("pre",4:8)+.sum("pre",8:11))
presure.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,6)]),
var_names =c("presure"), method = "correlation",
selection=.range("presure",-10:12)+.sum("presure",6:8)+.sum("presure",7:9)+.sum("presure",8:11))
pre.day.res <- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,4)]),
var_names =c("pre.day"), method = "correlation",
selection=.range("pre.day",-10:12)+.mean("pre.day",6:8)+.mean("pre.day",7:9)+.mean("pre.day",8:11))
## here the pdsi data are from CRU data from 1901-2017;
## read and load PDSI data excuted in detectVariabilityCRU.R [current WD]
pdsi.res<- dcc(iso.chron1,data.frame(crupdsi),
timespan = c(1953,2013),
var_names =c("pdsi"), method = "correlation",
selection =.range("pdsi",-10:12)+.mean("pdsi",6:8)
+.mean("pdsi",7:9)+.mean("pdsi",8:11))
# plot(tmean.res)
# plot(pre.day.res)
# plot(tmax.res)
# plot(tmin.res)
# plot(rh.res)
# plot(ssd.res)
# plot(pre.res)
# plot(presure.res)
# plot(evp.res)
# plot(vpd.res)
# plot(pdsi.res)
# plot(dtr.res)
omin.miving<-plot(pdsi.movingres)+
scale_y_continuous(breaks = seq(0.5,2.5,1),labels=c("JJA","A-O","A-N"),
expand = c(0.005,0.005))+
scale_fill_gradientn(limits=c(-0.8,0.8),colors = Col.5,na.value = "white")+
scale_x_continuous(breaks = seq(0.5,
by = 1, length.out = ncol(pdsi.movingres$coef$coef)),
labels = names(pdsi.movingres$coef$coef),
expand = c(0.001,0.001))+
mythemeplot()+xlab("")+ylab("")+
theme(axis.ticks = element_line(),
axis.text.x = element_blank())+
theme(plot.margin = unit(c(0.5,0,-0.3,0),"lines"))+
annotate("text", x=1, y=2.5, label="a",size=10,family="serif")
pdsi.movingres1<- dcc(iso.chron1,data.frame(crupdsi),
#timespan = c(1953,2014),
var_names =c("pdsi"), method = "correlation",
selection =.mean("pdsi",6:8)
+.mean("pdsi",4:10)+.mean("pdsi",8:11),
dynamic = "moving", win_size = 30,sb = FALSE)
## heatmap plot----
month <- c("o","n","d","J","F","M","A","M","J","J","A","S","O","N","D","JJA","JAS","A-N")
corr.mon <- rbind(tmean.res$coef,tmax.res$coef,tmin.res$coef,pre.res$coef,rh.res$coef,pdsi.res$coef,
vpd.res$coef,evp.res$coef,ssd.res$coef)
corr.mon$coef.raw <-corr.mon$coef
clim.vars.name <- c("TEM","TMAX","TMIN","PRE","RH","scPDSI","VPD","EVP","SSD")
climgroup <- getgroup(18,clim.vars.name) ## produce the group name for different chronology!!
mongroup <- rep(month,length(clim.vars.name))
corr.mon$climgroup <- climgroup
corr.mon$mongroup <- mongroup
## select the significant correlations
##corr.mon$coef[which(corr.mon$significant=="FALSE")]<-NA
corr.mon$coef[which(abs(corr.mon$coef)<0.247)]<-NA
## plot the climate response at monthly scale
response.heatmap <-
ggplot(data=corr.mon,mapping = aes(x=id,
y=factor(climgroup,levels=clim.vars.name),
fill=coef))+
geom_tile()+xlab(label = "Month")+ylab(label = "Climatic variable")+
#scale_fill_gradient2(limits=c(-0.6,0.6),low = "steelblue", mid="white",high = "DarkOrange",na.value="grey70")+
#scale_fill_gradientn(limits=c(-1,1),colors = Col.5,na.value = #"white")+
scale_fill_gradientn(limits=c(-0.8,0.8),colors = Col.5,na.value = "white")+
scale_x_continuous(breaks=c(1:18),expand = c(0.03,0.01),labels=month)+
mythemeplot()+
theme(axis.text.x = element_text( family = "serif", vjust = 0.5, hjust = 0.5, angle = 90))
c.min.heatmap <- response.heatmap+
annotate("text", x=1, y=9, label="c",size=10,family="serif")
c.max.heatmap <- response.heatmap+
annotate("text", x=1, y=9, label="d",size=10,family="serif")
o.max.heatmap <- response.heatmap
o.max.heatmap1<-o.max.heatmap+
theme(plot.margin = unit(c(0,0,-0.8,0.1),"lines"))+
theme(legend.position = "none")
# axis.text.y = element_blank(),
# axis.title.y = element_blank())
o.min.heatmap <- response.heatmap
o.min.heatmap1 <- o.min.heatmap+
theme(legend.position = "none")+
theme(plot.margin = unit(c(0,0,-0.8,0),"lines"))
o.mean.heatmap <-response.heatmap
o.mean.heatmap1 <-o.mean.heatmap+
theme(legend.position = "none")+
theme(plot.margin = unit(c(0,0,-0.8,0.1),"lines"))+
theme(legend.position = "none",
axis.text.y = element_blank(),
axis.title.y = element_blank())
o.LWmax.heatmap <- response.heatmap
o.LWmax.heatmap1 <-o.LWmax.heatmap+
theme(plot.margin = unit(c(0,0,-0.8,0.1),"lines"))
o.LWEWmax.heatmap <- response.heatmap
o.LWEWmax.heatmap1 <-o.LWEWmax.heatmap+
theme(legend.position = "none",
plot.margin = unit(c(0,0,-0.8,0.1),"lines"))+
theme(axis.text.y = element_blank(),
axis.title.y = element_blank())
legendmap <-o.LWEWmax.heatmap+
theme(legend.position = "bottom")+
scale_fill_gradientn(limits=c(-0.8,0.8),colors = Col.5,na.value = "white",guide = guide_colorbar(direction = "horizontal",label.vjust = 0,label.theme = element_text(size = 12,family = "serif"),barwidth = 10,title = "correlation",title.position = "bottom",title.hjust = 0.5,title.theme = element_text(size=14,family = "serif"),frame.colour ="gray50"))
leg <- get_legend(legendmap)
# Convert to a ggplot and print
leg1 <-as_ggplot(leg)+theme(plot.margin = unit(c(0.8,0.1,0.5,0.3),"lines"))
tiff("./plot/LWEW-correlation-response-monthly.tiff",
width = 10,height =8 ,units = "cm",compression = "lzw",bg="white",family = "serif",res=500)
print(o.LWEWmax.heatmap)
dev.off()
source("./code/climateresponse of EWmax.R")
### 3.3.3 output the figure for the moving correaltions-----
tiff(file="./plot/Figure 3.3.1 climate Response.tiff",width = 16,
height = 20,units ="cm",compression="lzw",bg="white",res=800)
ggarrange(
ggarrange(o.min.heatmap1,o.mean.heatmap1,
o.max.heatmap1,o.EWmax.heatmap1,
o.LWmax.heatmap1,o.LWEWmax.heatmap1,
nrow = 3,ncol = 2,widths = c(1.2, 1),
labels = c("a","b","c","d","e","f"),
label.x = c(0.2,0.015),
label.y = c(1,1,1.02,1.02,1.02,1.02),
font.label = list(size=24,face="bold",family="serif"),
legend="none"),
leg1,
nrow = 2,ncol = 1,
align = "hv",heights = c(1, 0.1),
widths = c(3.5,1))
dev.off()
### Part 4. seascorr analysis for the chronologies-----
### 4.1 seascorr correlation -----
crupdsi.long <- gather(crupdsi,key="month",
value = "scpdsi",-year)
crupdsi.long1<-crupdsi.long %>%
arrange(year, match(month,month.abb))
ny.mymdata$scpdsi <- subset(crupdsi.long1,year>1952)$scpdsi
head(ny.mymdata)
pdsiresp.season <- seascorr(iso.chron1,
climate=data.frame(ny.mymdata[,c(3,1,8,16)]),
complete=11,
season_lengths = c(1,3,4,5),
primary = 2,secondary = 1,
#var_names = c("pre","scpdsi")
)
plot(pdsiresp.season)
pdsiresp.season1 <- seascorr(iso.chron1,
climate=data.frame(ny.mymdata[,c(3,1,8,16)]),
complete=11,
season_lengths = c(1,3,4,5),
#primary = 2,secondary = 1,
#var_names = c("pre","scpdsi")
)
plot(pdsiresp.season1)
recon <- skills(object = pdsiresp.season,
target = .mean("scpdsi",7:9),
calibration = "-50%")
# set 50% is for 1983-2014 as calibration
plot(recon)
recon
recon$cal.model
recon$full.model
recon$cal.years
## here, 1 - (Residual Deviance/Null Deviance) will give the R2.
fit<-lm(x~y,data=recon$full)
summary(fit)
BIC(fit)
AIC(fit)
sqrt(mean(fit$residuals^2))# calculate RMSE
paneltheme<-theme(panel.grid.major =element_line(colour = "gray80",size=0.5,inherit.blank = TRUE),panel.grid.minor =element_line(colour = "gray90",size = 0.2),strip.background=element_rect(fill=NA))
minpdsi1<-plot(pdsiresp.season)+
scale_x_continuous(breaks = seq(1,
by = 1, 14),
labels = c("o","n","d","J","F",
"M","A","M","J","J",
"A","S","O","N"),
expand = c(0.001,0.001))+
xlab("")+
theme(plot.margin = unit(c(0,1,0,1), "lines"))+
theme_pubr()+theme(strip.text.y = element_text())
paneltheme
minpre1<-plot(pdsiresp.season1)+
scale_x_continuous(breaks = seq(1,
by = 1, 14),
labels = c("o","n","d","J","F",
"M","A","M","J","J",
"A","S","O","N"),
expand = c(0.001,0.001))+
xlab("")+
theme_pubr()+
paneltheme
rhresp.season <- seascorr(iso.chron1,
climate=data.frame(ny.mymdata[,c(3,1,7,16)]),
complete=11,
season_lengths = c(1,3,4,5),
#primary = 2,secondary = 1,
#var_names = c("pre","scpdsi")
)
plot(rhresp.season)
rhresp.season2 <- seascorr(iso.chron1,
climate=data.frame(ny.mymdata[,c(3,1,7,16)]),
complete=11,
season_lengths = c(1,3,4,5),
primary = 2,secondary = 1,
#var_names = c("pre","scpdsi")
)
plot(rhresp.season2)
LWmaxrh1<-plot(rhresp.season)+
scale_x_continuous(breaks = seq(1,
by = 1, 14),
labels = c("o","n","d","J","F",
"M","A","M","J","J",
"A","S","O","N"),
expand = c(0.001,0.001))+
theme(plot.margin = unit(c(-1,1,0.2,1), "lines"))+
theme_pubr()+
paneltheme
LWmaxpdsi1<-plot(rhresp.season2)+
scale_x_continuous(breaks = seq(1,
by = 1, 14),
labels = c("o","n","d","J","F",
"M","A","M","J","J",
"A","S","O","N"),
expand = c(0.001,0.001))+
theme_pubr()+
paneltheme
LWEWrh1<-plot(rhresp.season)+
scale_x_continuous(breaks = seq(1,
by = 1, 14),
labels = c("o","n","d","J","F",
"M","A","M","J","J",
"A","S","O","N"),
expand = c(0.001,0.001))+
theme(plot.margin = unit(c(-1,1,0.2,1), "lines"))+
theme_pubr()+
paneltheme
LWEWpdsi1<-plot(rhresp.season2)+
scale_x_continuous(breaks = seq(1,
by = 1, 14),
labels = c("o","n","d","J","F",
"M","A","M","J","J",
"A","S","O","N"),
expand = c(0.001,0.001))+
theme_pubr()+
paneltheme
## 4.2 output the figures--------------
tiff(file="./plot/Figure 3. seacorr for LWEW-max & min pdsi=2 for min.tiff",
width = 21,height = 18,units ="cm",compression="lzw",
bg="white",res=800, family = "serif")
ggarrange(minpre1,LWEWpdsi1,
ncol=1,nrow = 2,
labels = c("a","b"),
label.x = 0.05,
label.y = 1.,
font.label = list(size=20,family="serif"),
common.legend = TRUE,legend = "top" )
dev.off()
tiff(file="./plot/Figure S4. seacorr for LWEW-max & min.tiff",
width = 21,height = 18,units ="cm",compression="lzw",
bg="white",res=800, family = "serif")
ggarrange(minpdsi1,LWEWrh1,
ncol=1,nrow = 2,
labels = c("a","b"),
label.x = 0.05,
label.y = 1.,
font.label = list(size=20,family="serif"),
common.legend = TRUE,legend = "top" )
dev.off()
##
## ## calculate the correlations between different parameters--
maxmin.data<-stable.all.omin.mean.date %>%
select(min,max,mean,LWmax,EWmax) %>%
as.data.frame()
maxmin.data <-array(as.numeric(unlist(maxmin.data)), dim=c(115, 5))
colnames(maxmin.data)<-c("min","max","mean","LWmax","EWmax")
cc.proxy<-rcorr(maxmin.data,
type="pearson")
cc.proxy1<-rcorr(maxmin.data[c(1:51),],
type="pearson")
cc.proxy2<-rcorr(maxmin.data[c(52:115),],
type="pearson")
head(maxmin.data)
## correlation between LW and lag1 EW
cor(maxmin.data[-1,5],maxmin.data[-115,4])
## combine EWmax and LW max
summary(maxmin.data[-1,5])
summary(maxmin.data[-115,4])
LWEWmax<-(maxmin.data[-1,5]+maxmin.data[-115,4])/2
## Part 5. multiple variable and common analysis-------------
## ##5.1 using the nlm model----
model.1=lm(mean~ mean.vpd+mean.rh+mean.pre+mean.ssd+mean.value+scpdsi+mean.tmean+mean.evp,data=semmodeldata)
step1 <- stepAIC(model.1, direction="both")
step1$anova # display results
model.2=lm(mean~ mean.ssd+mean.value+scpdsi+mean.pre+mean.rh,
data=semmodeldata)
model.3=lm(mean~ mean.ssd+mean.value+scpdsi,data=semmodeldata)
LWmodel.1=lm(mean~ mean.rh+mean.pre+mean.ssd+mean.value+scpdsi+mean.vpd, data=semmodeldata2)
LWmodel.2=lm(mean~ mean.vpd+mean.rh, data=semmodeldata2)
LWmodel.3=lm(mean~ mean.rh+mean.pre, data=semmodeldata2)
step <- stepAIC(LWmodel.1, direction="both")
step$anova # display results
head(semmodeldata3)
LWEWmodel.1=lm(LWEWmax ~ rh+pre+ssd1+mean.value+scpdsi+vpd+evp, data=semmodeldata3)
LWEWmodel.2=lm(LWEWmax~ vpd+rh, data=semmodeldata3)
LWEWmodel.3=lm(LWEWmax~ rh+pre, data=semmodeldata3)
LWEWmodel.4=lm(LWEWmax~ rh, data=semmodeldata3)
step1 <- stepAIC(LWEWmodel.1, direction="both")
step1$anova # display results
#commonality assessment--
regr(model.1)
regr(model.2)## this depend the beta weight!!
regr(model.3) ##$Commonality_Data $Commonality_Data$`CC shpw the cntributions
commonality(model.1)
## All-possible-subsets regression
apsOut=aps(semmodeldata,"mean",list("scpdsi", "mean.value","mean.ssd"))
## Commonality analysis
commonality(apsOut)
regr(LWmodel.1)
regr(LWmodel.2)
regr(LWmodel.3)
commonality(model.1)
regr(LWEWmodel.1)
regr(LWEWmodel.2)
regr(LWEWmodel.3)
regr(LWEWmodel.4)
## Part 6. Climate reconstruction and comparison---------
## 6.1 reconstruction test-----
##
## subset the chronology
Amin.chron1 <- as.data.frame(stable.all.omin.mean[2])
#iso.chron1 <- as.data.frame(stable.all.omax.mean[2])
#iso.chron1 <- as.data.frame(oc.mean[3])
rownames(Amin.chron1) <- c(1900:2014)
head(Amin.chron1)
## for the EW and LW min and max
# iso.chron1 <- as.data.frame(stable.allEW.omax.mean$mean)
# iso.chron1 <- as.data.frame(stable.allLW.omax.mean$mean)
#
# # for the LW and EW max lag one year
LWEW.chron1 <- as.data.frame(LWEWmax)
rownames(LWEW.chron1) <- c(1900:2013)
# PDSI (JAS) is the strongest correlation -0.667 for the min oxygen
#
pdsi.res1<- dcc(Amin.chron1,data.frame(subset(crupdsi,year>1952)),
timespan = c(1953,2014),
var_names =c("pdsi"), method = "correlation",
selection =.range("pdsi",-10:12)+.mean("pdsi",6:8)
+.mean("pdsi",7:9)+.mean("pdsi",8:11))
plot(pdsi.res1)
rhLWEWmax.res1<- dcc(LWEW.chron1,
data.frame(ny.mymdata[,c(3,1,7)]),
#timespan = c(1953,2014),
var_names =c("rh"), method = "correlation",
selection =.range("rh",10)+.mean("rh",6:8)
+.mean("rh",4:10)+.mean("rh",8:11))
plot(rhLWEWmax.res1)
rhLWmax.res1<- dcc(iso.chron1,data.frame(ny.mymdata[,c(3,1,7)]),
timespan = c(1953,2014),
var_names =c("rh"), method = "correlation",
selection =.mean("rh",6:8)
+.mean("rh",4:10)+.mean("rh",8:11))
plot(rhLWmax.res1)
sk.pdsimin<-skills(object = pdsi.res1,
target =.mean("pdsi",7:9),
calibration = "-50%",
model="ols")
ggplotly(plot(sk.pdsimin))
sk.pdsimin$full.model$rsquare
summary(sk.pdsimin$full.model$call)
sk.pdsimin$RE#
sk.pdsimin$CE
sk.pdsimin$cal.model
sk.pdsimin.2<-skills(object = pdsi.res1,
target =.mean("pdsi",4:10),
calibration = "49%",
model="ols")
ggplotly(plot(sk.pdsimin.2))
sk.pdsimin.2$full.model$rsquare
summary(sk.pdsimin.2$coef.full)
sk.pdsimin.2$RE#
sk.pdsimin.2$CE
sk.pdsimin.2$cal.model
sk.rhLWEWmax<-skills(object = rhLWEWmax.res1,
target =.range("rh",10),
calibration = "-51%",
timespan = c(1953,2013),
model="ols")
ggplotly(plot(sk.rhEWmax))
sk.rhLWEWmax$full.model
sk.rhLWEWmax$cal.model
sk.rhLWEWmax$DW
sk.rhLWEWmax$RE
sk.rhLWEWmax$CE
sk.rhLWEWmax$cal.years
sk.rhLWmax<-skills(object =rhLWmax.res1,
target =.mean("rh",8:11),
calibration = "-51%",
timespan = c(1953,2014),
model="ols")
ggplotly(plot(sk.rhLWmax))
sk.rhLWmax$full.model
sk.rhLWmax$cal.model
sk.rhLWmax$DW
sk.rhLWmax$RE
sk.rhLWmax$CE
sk.rhLWmax.2<-skills(object =rhLWmax.res1,
target =.mean("rh",8:11),
calibration = "49%",
model="ols")
ggplotly(plot(sk.rhLWmax.2))
sk.rhLWmax.2$cal.years
sk.rhLWmax.2$full.model
sk.rhLWmax.2$cal.model
sk.rhLWmax.2$DW
sk.rhLWmax.2$RE
sk.rhLWmax.2$CE
fit<-lm(x~y,data=sk.rhLWmax$full)
summary(fit)
BIC(fit)
AIC(fit)
sqrt(mean(fit$residuals^2))# calculate RMSE
title<-cbind(Calibration=c("1953-1983","1984-2014"),
Verification=c("1984-2014","1953-1983"))
REtable.PDSI<-cbind(title,
cbind(RE=c(sk.pdsimin.2$RE,sk.pdsimin$RE),
CE=c(sk.pdsimin.2$CE,sk.pdsimin$CE)))
REtable.rh<-cbind(title,
cbind(RE=c(sk.rhLWmax.2$RE,sk.rhLWmax$RE),
CE=c(sk.rhLWmax.2$CE,sk.rhLWmax$CE)))
regree.data<-data.frame(cbind(sk.pdsimin$full.model$y,sk.pdsimin$full.model$x,
c(sk.rhLWEWmax$full.model$y,NA),
c(sk.rhLWEWmax$full.model$x,NA)))
colnames(regree.data)<-c("scpdsi","amino18","rh10","LWEWmaxo18")
cor(regree.data$scpdsi[-62],
regree.data$rh10[-62])
# mulfit1<-lm(regree.data$rh8.11[-(59:62)]~
# regree.data$LWmaxo18[-(59:62)]+
# sk.rhEWmax$full.model$x[-(59:61)])
# summary(mulfit1)
m1<-lm(regree.data$scpdsi~regree.data$amino18)
reg1<-ggplot(regree.data,aes(x=amino18,y=scpdsi)) +
geom_point(shape=1,col=4) +
geom_smooth(method=lm, lty=2, color=4, se=TRUE)+
ylab("July-September scPDSI")+
xlab(expression(paste("Annual minimum tree-ring"," ",~delta^18,"O")))+
geom_text(y = 4.5, x = 26, label = lm_eqn(m1),
parse = TRUE,
colour="black",family="TN",size=3.5)+
mythemeplot()
m2<-lm(regree.data$rh10~regree.data$LWEWmaxo18)
reg2<-ggplot(regree.data,aes(x=LWEWmaxo18,y=rh10)) +
geom_point(shape=1,col="darkgreen") +
geom_smooth(method=lm , lty=2, color="darkgreen", se=TRUE)+
ylab("October RH (%)")+
xlab(expression(paste("LW + EW(lag1) maximum tree-ring"," ",~delta^18,"O")))+
geom_text(x = 29.5, y = 70, label = lm_eqn(m2),
parse = TRUE,
colour="black",family="TN",size=3.5)+
mythemeplot()
## 6.2 climate reconstruction------
## Reconstruction data--
sk.pdsimin$full.model
pdsi.recon<-32.0118-1.209782*stable.all.omin.mean[2]
sk.rhLWEWmax$full.model
rh.recon <- 197.4765-3.896857*LWEWmax
recondata<-cbind(pdsi.recon,c(rh.recon,NA))
colnames(recondata)<-c("scpdsi","rh10")
recondata$year<-1900:2014
obs<-subset(regree.data,select = c("scpdsi","rh10"))
obs$year<-1953:2014
CRU<-subset(crupdsi,select = c(8:10),year<1954)%>%
mutate(pdsi=rowMeans(.))
CRU<-cbind(CRU[,4],NA,year=(1901:1953))
colnames(CRU)<-c("scpdsi","rh10","year")
reconcomdata<-rbind(obs,recondata,CRU)
reconcomdata$type<-c(rep("observation (CRU)",62),
rep("reconstruction",115),
rep("CRU before 1953",53))
## detect the slope for different period
summary(lm(data=subset(reconcomdata, type=="reconstruction"),
scpdsi~year))
summary(lm(data=subset(reconcomdata,year>1952 & type=="reconstruction"),
scpdsi~year))
summary(lm(data=subset(reconcomdata,year<1953 & type=="reconstruction"),
scpdsi~year))
summary(lm(data=subset(reconcomdata, type=="observation (CRU)"),
scpdsi~year))
summary(lm(data=subset(reconcomdata, type=="CRU before 1953"),
scpdsi~year))
summary(lm(data=subset(reconcomdata, type=="reconstruction"),
rh10~year))
summary(lm(data=subset(reconcomdata,year>1952 & type=="reconstruction"),
rh10~year))
summary(lm(data=subset(reconcomdata,year<1953 & type=="reconstruction"),
rh10~year))
write.csv(reconcomdata,"reconstruction.csv")
x=subset(reconcomdata,year< 1953 & type=="reconstruction"|
year<1953 & type=="CRU before 1953",
select =c("scpdsi","year","type") )
cor.test(x$scpdsi[1:52],x$scpdsi[53:104])
cor(x=subset(reconcomdata,year>1952 & type=="reconstruction",
select =c("scpdsi","rh10") ),
use="complete.obs",
method = "pearson")
cor(x=subset(reconcomdata,year<1953 & type=="reconstruction",
select =c("scpdsi","rh10") ),
use="complete.obs",
method = "pearson")
cor(subset(reconcomdata,year>1983 & type=="reconstruction",
select =c("scpdsi","rh10") ),
subset(reconcomdata,year>1983 & type=="observation (CRU)",
select =c("scpdsi","rh10") ),
use="complete.obs",
method = "pearson")
cor(subset(reconcomdata,year>1953 &year <1984 & type=="reconstruction",
select =c("scpdsi","rh10") ),
subset(reconcomdata,year>1953 &year <1984 & type=="observation (CRU)",
select =c("scpdsi","rh10")),
use="complete.obs",
method = "pearson")
## here, comparison between different filter functions!!
# spline.pdsi1<-smooth.spline(recondata$year,recondata$scpdsi,n = 10)
# spline.pdsi2<- pass.filt(recondata$scpdsi, W=10, type="low", method="Butterworth")## for 10 year low pass
# spline.pdsi2 <- as.data.frame(cbind(x=spline.pdsi$x, y=spline.pdsi2))
#
# spline.pdsi<-smooth.spline(recondata$year,recondata$scpdsi,spar = 0.2)##
# spline.pdsi <- as.data.frame(cbind(x=spline.pdsi$x, y=spline.pdsi$y))
# plot(spline.pdsi$x, spline.pdsi$y, type="l",col=2)
# par(new=TRUE)
# plot(spline.pdsi1$x, spline.pdsi1$y, type="l")
# par(new=TRUE)
# plot(spline.pdsi2$x, spline.pdsi2$y, type="l",col=4)
## reconstruction and 20-year loess smooother
pdsireconplot<-ggplot(reconcomdata,aes(x=year,y=scpdsi)) +
geom_line(aes(colour= type))+
geom_smooth(data=subset(reconcomdata,type=="reconstruction"),aes(x=year,y=scpdsi),
method = "loess",span=0.2,se=FALSE,lwd=1.5,col=4)+
#geom_line(data=spline.pdsi,aes(x=x,y=y))+
geom_smooth(data=subset(reconcomdata,type=="reconstruction"),aes(x=year,y=scpdsi),method = "loess",span=0.75,se=TRUE,col=c("blue"))+#col=c("#00BFC4"))
geom_smooth(data = CRU.all,aes(x=year,y=scpdsi),
method = "loess",span=0.75,se=TRUE,
col=c("Darkorange"))+
xlab("")+ylab("July-September scPDSI")+
scale_x_continuous(expand = c(0.01,0.01))+
mythemeplot()+
theme(legend.position = c(0.85,0.87),
legend.title = element_blank())+
geom_vline(xintercept=1984,lty=2,col="gray70")+
theme(plot.margin = unit(c(-0.2,0.3,0,0.3),"lines"))+#+
# geom_line(data=subset(crupdsi.4.10.date,year<1954),
# aes(x=year(date),y=growing,col="Darkorange"),
# lwd=0.2)+
scale_color_manual(values=c("observation (CRU)" = "#F8766D",
"reconstruction" = "blue",
"CRU before 1953"="Darkorange"),
labels=c("CRU before 1953", "observation (CRU)", "reconstruction"))+
annotate("text", x = 1984, y = -3.2,
label = expression(paste("Verification: ", italic(r), "= 0.75; Calibration: ", italic(r)," = 0.49")),
family="serif")#+
#annotate("text", x = 1984, y = -3.5, label = "RE = 0.526, CE = 0.473",family="serif")
rhreconplot<-ggplot(reconcomdata,aes(x=year,y=rh10)) +
geom_line(aes(colour= type))+
geom_smooth(data=subset(reconcomdata,type=="reconstruction"),aes(x=year,y=rh10),
method = "loess",span=0.2,se=FALSE,lwd=1.5,col=c("darkgreen"))+
geom_smooth(data=subset(reconcomdata,type=="reconstruction"),aes(x=year,y=rh10),method = "loess",span=0.75,se=TRUE,col=c("darkgreen"))+
xlab("Year")+ylab("October RH (%)")+
scale_x_continuous(expand = c(0.01,0.01))+
mythemeplot()+
theme(legend.position = c(0.2,0.2),
legend.title = element_blank())+
geom_vline(xintercept=1984,lty=2,col="gray70")+
theme(plot.margin = unit(c(-0.5,0.3,0.3,0.6),"lines"))+
scale_color_manual(values=c("observation (CRU)" = "#00BFC4",
"reconstruction" = "darkgreen",
"CRU before 1953"=NA),
labels=c("", "observation", "reconstruction"))+
annotate("text", x = 1984, y = 60,
label = expression(paste("Verification: ", italic(r), "= 0.77; Calibration: ", italic(r)," = 0.61")),
family="serif")#+
#annotate("text", x = 1984, y = 72, label = "RE = 0.464, CE = 0.461", family="serif")
tiff(file="./plot/Figure 7.1.1 reconstruction1.tiff",
width = 16,height = 18,
units ="cm",compression="lzw",bg="white",res=800)
ggarrange(
ggarrange(reg1,reg2,ncol=2,labels = c("a","b"),
label.x = 0.87,
label.y = c(1,0.99),
font.label = list(size=20,family="serif")),
ggarrange(pdsireconplot,rhreconplot,
nrow = 2,
labels = c("c","d"),
label.x = 0.1,
label.y = c(1,1.04),
align = "v",
font.label = list(size=20,family="serif")),
nrow = 2,align = "v",heights = c(0.6,1),
# labels = c("","c"),
# label.x = 0.1,
# label.y = 1.04,
font.label = list(size=20,family="serif"))
dev.off()
### Part 7. Supplementary figure plot--------
###
### 7.3. Figure S3----------
## here, the max value have a lag significant correlation, it means the significant old carbon reuse?? McCaroll et al., 2017
tiff("./plot/Figure S3 oxygen parameter correlation 1900-2014.tiff",width=8,height = 8,units = "cm",
compression = "lzw",bg="white",res = 300)
windowsFonts(TN = windowsFont("Times New Roman"))
par(mgp=c(2.0,0.5,0),family="TN",ps=8)
# par(mfrow=c(1,3),mgp=c(1.0,0.5,0),family="TN",ps=13)
#par(mar=c(0, 0, 0.0, 0) + 0.1)
par(oma=c(0,0,0.02,0.02))
corrplot(corr = cc.proxy$r,type="upper",
col=brewer.pal(n=10, name="PuOr"),cl.lim = c(0, 1),
tl.pos="d",tl.col = 1,tl.cex=1.2,
p.mat = cc.proxy$P, sig.level = 0.05,insig ="pch",
pch.cex = 3,pch.col = rgb(255, 0, 0,100, maxColorValue=255))
corrplot(corr=cc.proxy$r,add=TRUE,type="lower",method = "number", number.cex = 1,number.font=2,col=1,
diag=FALSE,tl.pos="n", cl.pos="n",p.mat = cc.proxy$P,
sig.level = 0.05,insig ="pch",pch.cex = 3,
pch.col = rgb(255, 0, 0, 100, maxColorValue=255))
dev.off()
### 7.4 Figure S4--------------
## Figure S4 has been ouputed in the part 4.2
## 7.5 Figure S5. correlation between chrongologies and ISOGSM data-----
## detect the climatic signal of the GNIP data (precipitation oxygen data )
## the aim of this part is to detect the climate response in the tree-ring d18O and d18O in precipitation
### 7.5.1 d18O precipitation response to maximum and minimum tree-ring -----
omin.mean <- as.data.frame(stable.all.omin.mean[2])
omax.mean <- as.data.frame(stable.all.omax.mean[2])
omin.mean.ts <- ts(omin.mean, start = 1900,frequency = 1)
omax.mean.ts <- ts(omax.mean, start = 1900,frequency = 1)
EWomax.mean.ts <- ts(stable.allEW.omax.mean$mean,
start = 1900,frequency = 1)
LWomax.mean.ts <- ts(stable.allLW.omax.mean$mean,
start = 1900,frequency = 1)
LWEW.mean.ts <- ts(LWEW.chron1,start = 1900,frequency = 1)
## here call the function runningclimate from E:/Rwork/myfunction/basic dplR and beyond.R
## the basic idea is used the runningclimate to detect the pearson's correlation
## call for the data @ oxygen from precipitation, @@p.rateoxy.clim
omin.mean.p <- Climateplot (omin.mean.ts,
Climatesite = p.rateoxy.clim,
fyr=1950,lyr=2010,
detrended=c("No"),
spline.length=0)
omax.mean.p <- Climateplot(omax.mean.ts,
Climatesite = p.rateoxy.clim,
fyr=1950,lyr=2010,
detrended=c("No"),
spline.length=0)
EWomax.mean.p <- Climateplot(EWomax.mean.ts,
Climatesite = p.rateoxy.clim,
fyr=1950,lyr=2010,
detrended=c("No"),
spline.length=0)
LWomax.mean.p <- Climateplot(LWomax.mean.ts,
Climatesite = p.rateoxy.clim,
fyr=1950,lyr=2010,
detrended=c("No"),
spline.length=0)
LWEWomax.mean.p <- Climateplot(LWEW.mean.ts,
Climatesite = p.rateoxy.clim,
fyr=1950,lyr=2010,
detrended=c("No"),
spline.length=0)
# Adapt these to your needs:
#parSettings <- list(layout.widths=list(left.padding=1))
omin.p <- contourplot(t(omin.mean.p),region=T,lwd=0.3,lty=2,aspect=0.4,
col.regions=colorRampPalette(c("red","yellow","white","green3","blue")),
at=c(seq(-0.8,0.8,0.05)),xlab="",ylab="Window length",main=NA)+
latticeExtra::layer(panel.text(x=3, y=11.5, label="a min",family="serif",font=2,cex=1.5))
# omax.p<-contourplot(t(omax.mean.p),region=T,lwd=0.3,lty=2,
# col.regions=colorRampPalette(c("red","yellow","white","green3","blue")),
# at=c(seq(-0.8,0.8,0.05)),xlab=" ",ylab="Window length",main=title)
EWomax.p <-contourplot(t(EWomax.mean.p),region=T,lwd=0.3,lty=2,aspect=0.4,
col.regions=colorRampPalette(c("red","yellow","white","green3","blue")),
at=c(seq(-0.8,0.8,0.05)),xlab="Months",ylab="Window length")+
latticeExtra::layer(panel.text(x=3, y=11.5, label="b EW-max",family="serif",font=2,cex=1.5))
LWomax.p <-contourplot(t(LWomax.mean.p),region=T,lwd=0.3,lty=2,aspect=0.4,
col.regions=colorRampPalette(c("red","yellow","white","green3","blue")),
at=c(seq(-0.8,0.8,0.05)),xlab="Months",ylab="Window length",main=NA)+
latticeExtra::layer(panel.text(x=3, y=11.5, label="c LW-max",family="serif",font=2,cex=1.5))
LWEWomax.p<-contourplot(t(LWEWomax.mean.p),region=T,lwd=0.3,lty=2,aspect=0.4,
col.regions=colorRampPalette(c("red","yellow","white","green3","blue")),
at=c(seq(-0.8,0.8,0.05)),xlab="Months",ylab="Window length",main=NA)+
latticeExtra::layer(panel.text(x=4.5, y=11.5, label="d Composite max",family="serif",font=2,cex=1.5))
## 7.5.2 output the correlation analysis-------
tiff("./plot/omin-EW,LWomax-precipitation-oxy-2.tiff",width = 20,height = 27,
units = "cm",pointsize = 12,compression = "lzw",res = 300,bg="white",family = "serif")
# Combine lattice charts into one
#c(omin.p, EWomax.p)
c(LWEWomax.p,LWomax.p,EWomax.p,omin.p,
merge.legends = TRUE,layout=c(1,4))
dev.off()
### 7.6. Variability of the cloud cover from CRU dataset----
### # read cloud cover data from CRU
crucld<-read.table("./cru/icru4_cld_112.5-112.7E_27.27-27.5N_n.dat",
header = FALSE)
head(crucld)
colnames(crucld)<-c("year",1:12)
crucld <- subset(crucld,year>1952 & year<2015)
# Determine p-values of regression
#
p.vals <-NA
for(i in 2:13 ) {
cldslope=coef(summary(lm(crucld[,i]~crucld[,1])))[2,4]
p.vals <- cbind(p.vals,cldslope)
}
crucldlong <- gather(crucld,key="month",value=cld,-year)
my_breaks <- function(x) { if (min(x) < 50) seq(30, 90, 20) else seq(60, 90, 15) }
crucld.longplot<-ggplot(
data=subset(crucldlong, year<2015),
aes(year,cld,group=month,col=factor(month,levels=c(1:12))))+
geom_line()+geom_point()+
facet_grid(factor(month,levels=c(1:12))~., scales="free")+
xlab(label = "Year")+
ylab(label = c("Cloud cover (%)"))+
scale_x_continuous(expand = c(0.005,0.005))+
scale_y_continuous( breaks = my_breaks)+
guides(col=guide_legend(title="Month"))
crucld.plot<-ggplot(
data=subset(crucldlong,year>1952 & year <2015), aes(year,cld,group=month,col=factor(month,levels=c(1:12))))+
geom_line()+geom_point()+
facet_grid(factor(month,levels=c(1:12))~.,scales = "free")+
#facet_grid(factor(crucldlong$month,levels=c(1:12))~., scales="free")+
xlab(label = "Year")+
ylab(label = c("Cloud cover (%)"))+
scale_x_continuous(expand = c(0.01,0.01))+
scale_y_continuous( breaks = my_breaks)+
guides(col=guide_legend(title="Month"))
tiff(file="./plot/Figure S6. Cloud cover for 1900-now.tiff",width = 16,height = 14,units ="cm",compression="lzw",bg="white",family = "serif",res=600)
print(crucld.longplot)
dev.off()
### 7.7 Variability of d18O of precipitation------
## plot and for the seasonal oxygen isotpe in precipitation from ISOGSM model
pre.oxy2.11<-subset(p.rateoxy.shape,Var2 %in% c(2,3,4,5,6,7,8,9,10,11)& Var1>1949 & Var1<2011)
pre.oxy2.11long<-subset(p.rateoxy.shape,Var2 %in% c(2,3,4,5,6,7,8,9,10,11)& Var1>1899)
# Determine p-values of regression
#
p.vals <-NA
for(i in 1:10 ) {
pslope=pvaluecal(unique(pre.oxy2.11long$Var2)[i],
group=2,data=pre.oxy2.11)
p.vals <- cbind(p.vals,pslope)
}
pre.oxy2.11.plot<-ggplot(subset(pre.oxy2.11,Var1>1949 & Var1<2011),aes(Var1,value,group=Var2,col=as.factor(Var2)))+
geom_line()+geom_point()+
facet_grid(pre.oxy2.11$Var2~., scales="free")+
# stat_smooth(method=lm,se=FALSE,lty=2,
# lwd=1.0,level = 0.95)+
#geom_smooth(method = "lm",col="black",lty=2)+
xlab(label = "Year")+ylab(label = expression(paste("Precipitation ",delta^"18","O (โฐ)")))+
guides(col=guide_legend(title="Month"))
pre.oxy2.11.longplot<-ggplot(pre.oxy2.11long,aes(Var1,value,group=Var2,col=as.factor(Var2)))+
geom_line()+geom_point()+
facet_grid(as.factor(Var2)~., scales="free")+
# stat_smooth(method=lm,se=FALSE,lty=2,
# lwd=1.0,level = 0.95)+
#geom_smooth(method = "lm",col="black",lty=2)+
scale_x_continuous(expand = c(0.01,0.01))+
xlab(label = "Year")+ylab(label = expression(paste("Precipitation ",delta^"18","O (โฐ)")))+
guides(col=guide_legend(title="Month"))
tiff(file="./plot/stable oxygen in Feb-Nov preciptation for 1950-now.tiff",width = 16,height = 14,units ="cm",compression="lzw",bg="white",family = "serif",res=600)
print(pre.oxy2.11.plot)
dev.off()
tiff(file="./plot/stable oxygen in Feb-Nov preciptation for 1900-now.tiff",width = 16,height = 14,units ="cm",compression="lzw",bg="white",family = "serif",res=600)
print(pre.oxy2.11.longplot)
dev.off()
pre.oxy5.8.mean<-pre.oxy5.8 %>% group_by(Var1)%>%
summarise(mean.value=mean(value,na.rm=TRUE))
pre.oxy2.4.mean<-pre.oxy2.4 %>% group_by(Var1)%>%
summarise(mean.value=mean(value,na.rm=TRUE))
pre.oxy2.10.sd<-pre.oxy2.10 %>% group_by(Var1)%>%
summarise(sd.value=sd(value,na.rm=TRUE))
tiff(file="./plot/diff in preciptation for 1900-now.tiff",width = 12,height = 8,units ="cm",compression="lzw",bg="white",res=600)
plot(pre.diff[,1],
abs(pre.diff[,2]),"l",xli=c(1900,2014),
xlab="year",ylab="Difference in absolute")
abline(fit.abs,lty=2)
text(1950,1.5,
label=expression(paste(italic(slope),'=-0.0093, ',italic(R)^2, '= 0.08, ', italic(p),'= 0.003')))
dev.off()
## 7.8.plot the trend of vapor d18O-----
###
monthvp.oxylong<-gather(monthvp.oxy,key="month",value = "d18O",-year)
monthvp.oxy2.11<-subset(monthvp.oxylong,month %in% c(2,3,4,5,6,7,8,9,10,11)& year>1949 & year<2011)
monthvp.oxy2.11long<-subset(monthvp.oxylong,month %in% c(2,3,4,5,6,7,8,9,10,11)& year>1899)
# Determine p-values of regression
#
p.vals <-NA
for(i in 1:10 ) {
pslope=pvaluecal(unique(monthvp.oxy2.11long$month)[i],
group=2,data=monthvp.oxy2.11)
p.vals <- cbind(p.vals,pslope)
}
monthvp.oxy2.11.plot<-ggplot(monthvp.oxy2.11,aes(year,d18O,group=month,col=factor(month,levels=c(2:11))))+
geom_line()+geom_point()+
facet_grid(factor(month,levels = c(2:11))~., scales="free")+
scale_x_continuous(expand = c(0.01,0.01))+
# stat_smooth(method=lm,se=FALSE,lty=2,
# lwd=1.0,level = 0.95)+
#geom_smooth(method = "lm",col="black",lty=2)+
xlab(label = "Year")+ylab(label = expression(paste(" Water vapour ",delta^"18","O (โฐ)")))+
guides(col=guide_legend(title="Month"))
monthvp.oxy2.11.longplot<-ggplot(monthvp.oxy2.11long,aes(year,d18O,group=month,col=factor(month,levels=c(2:11))))+
geom_line()+geom_point()+
facet_grid(factor(month,levels=c(2:11))~., scales="free")+
# stat_smooth(method=lm,se=FALSE,lty=2,
# lwd=1.0,level = 0.95)+
#geom_smooth(method = "lm",col="black",lty=2)+
scale_x_continuous(expand = c(0.01,0.01))+
xlab(label = "Year")+ylab(label = expression(paste("Water vapour ",delta^"18","O in precipitation (โฐ)")))+
guides(col=guide_legend(title="Month"))
tiff(file="./plot/Figure S8. stable oxygen in Feb-Nov vapour for 1900-now.tiff",width = 16,height = 14,units ="cm",compression="lzw",bg="white",family = "serif",res=600)
print(monthvp.oxy2.11.longplot)
dev.off()
## 7.9 Variability of seasonal mean climate-------
clim.july_sept1<-subset(ny.mymdata,month %in% c(7,8,9))%>%
group_by(year)%>%
summarise(mean.preday=mean(pre.day,na.rm=TRUE),mean.tmean=mean(tmean,na.rm=TRUE),
mean.presure=mean(water.pressure,na.rm=TRUE),mean.rh=mean(rh,na.rm=TRUE),
mean.pre=mean(pre,na.rm=TRUE),mean.tmin=mean(tmin,na.rm=TRUE),
mean.tmax=mean(tmax,na.rm=TRUE),mean.ssd=mean(ssd,na.rm=TRUE),
mean.vpd=mean(vpd,na.rm=TRUE),mean.evp=mean(evp,na.rm=TRUE),
mean.pdsi=mean(scpdsi,na.rm = TRUE))
clim.mar_jun <- subset(ny.mymdata,month %in% c(3:6))%>%
group_by(year)%>%
summarise(mean.3.6preday=mean(pre.day,na.rm=TRUE),mean.3.6tmean=mean(tmean,na.rm=TRUE),
mean.3.6presure=mean(water.pressure,na.rm=TRUE),mean.3.6rh=mean(rh,na.rm=TRUE),
mean.3.6pre=mean(pre,na.rm=TRUE),mean.3.6tmin=mean(tmin,na.rm=TRUE),
mean.3.6tmax=mean(tmax,na.rm=TRUE),mean.3.6ssd=mean(ssd,na.rm=TRUE),
mean.3.6vpd=mean(vpd,na.rm=TRUE),mean.3.6evp=mean(evp,na.rm=TRUE),
mean.3.6pdsi=mean(scpdsi,na.rm = TRUE))
clim.july_sept <- cbind(clim.july_sept1,clim.mar_jun[-1])
clim.oct<-subset(ny.mymdata,month %in% c(10))
head(clim.july_sept)
head(clim.oct)
head(clim.mar_jun)
rh.10<-ggplot(clim.oct,aes(year,rh))+
geom_line()+geom_point()+
#stat_smooth(method=lm,se=FALSE,lty=2,lwd=1.0)+
geom_smooth(method = "loess",span=0.2,se=F,col=1,lwd=1.2,lty=1)+
xlab(label = "Year")+ylab(label = "Relative humidity (%)")+
# annotate("text",x=1988,y=(min(clim.oct$rh,na.rm = TRUE))*1.02,
# label=expression(paste(italic(slope),'= 0.038, ',italic(R)^2, '= 0.06, ', italic(p),'= 0.05')))+
mythemeplot()+
theme(axis.title.x=element_blank())
summary(lm( clim.oct$rh[1:51]~clim.oct$year[1:51]))
summary(lm( clim.oct$rh~clim.oct$year))
rh.7.9<-ggplot(clim.july_sept,aes(year,mean.rh))+
geom_line(col=4)+geom_point(col=4)+
stat_smooth(method=lm,lty=2,lwd=1.0,col=4)+
geom_smooth(method = "loess",span=0.2,se=F,lty=1,lwd=1.2)+
geom_line(aes(year,mean.3.6rh),col=3)+geom_point(aes(year,mean.3.6rh),col=3)+
stat_smooth(aes(year,mean.3.6rh),method=lm,lty=2,lwd=1.0,col=3)+
geom_smooth(aes(year,mean.3.6rh),method = "loess",span=0.2,se=F,col=3,lwd=1.2,lty=1)+
xlab(label = "Year")+ylab(label = "Relative humidity (%)")+
annotate("text",x=1980,y=(min(clim.july_sept$mean.rh,na.rm = TRUE))*1.02,col=4,
label=expression(paste(italic(slope),'= 0.037, ',italic(R)^2, '= 0.04, ', italic(p),'= 0.07')))+
annotate("text",x=1980,y=(min(clim.july_sept$mean.rh,na.rm = TRUE))*1.04,col=3,
label=expression(paste(italic(slope),'= -0.038, ',italic(R)^2, '= 0.06, ', italic(p),'= 0.03')))+
mythemeplot()+
theme(axis.title.x=element_blank())
summary(lm( clim.july_sept$mean.rh~clim.july_sept$year))
summary(lm( clim.july_sept$mean.3.6rh~clim.july_sept$year))
tmean.10 <- ggplot(clim.oct,aes(year,tmean,col="Oct"))+
geom_line()+geom_point()+
#stat_smooth(method=loess,span=0.02,se=FALSE,lty=2,lwd=1.0)+
geom_smooth(method = "lm",lty=2)+
geom_smooth(method = "loess",span=0.2,se = FALSE,lty=1)+
xlab(label = "Year")+ylab(label = "Temperature (0.1 degree)")+
annotate("text",x=1990,y=(min(clim.oct$tmean))*1.02,
label=expression(paste(italic(slope),'= 0.212, ',italic(R)^2, '= 0.09, ', italic(p),'= 0.012')))+
scale_colour_manual(name="Season",
values=c("Oct" = 1))+
mythemeplot()+
theme(legend.position = "top",axis.title.x=element_blank())
summary(lm( clim.oct$tmean/10~clim.oct$year))
tmean.7.9 <- ggplot(clim.july_sept,aes(year,mean.tmean,col="July-Sept"))+
geom_line(aes(col="July-Sept"))+geom_point()+
#stat_smooth(method=loess,se=FALSE,lty=2,lwd=1.0)+
#geom_smooth(method = "lm",col="black",lty=2)+
geom_smooth(method = "loess",span=0.2,col=4,se = F,lty=1)+
geom_line(aes(year,mean.3.6tmean,col="Mar-June"))+geom_point(aes(year,mean.3.6tmean,col="Mar-June"))+
geom_smooth(aes(year,mean.3.6tmean,col="Mar-June"),method = "loess",span=0.2,se=F,col=3,lwd=1.5,lty=1)+
geom_smooth(aes(year,mean.3.6tmean),
method = "lm",col=3,lty=2)+
scale_colour_manual(name="Season",
values=c("Mar-June" = 3, "July-Sept"=4))+
xlab(label = "Year")+ylab(label = "Temperature (0.1 degree)")+
annotate("text",x=1980,y=(max(clim.july_sept$mean.tmean,na.rm = TRUE))*0.8,col=3,
label=expression(paste(italic(slope),'= 0.203, ',italic(R)^2, '= 0.23, ', italic(p),'< 0.001')))+
mythemeplot()+
theme(legend.position = "top",
axis.title.x=element_blank())
summary(lm( clim.july_sept$mean.tmean~clim.july_sept$year))
summary(lm( clim.july_sept$mean.3.6tmean~clim.july_sept$year))
pdsi.10 <- ggplot(clim.oct,aes(year,scpdsi))+
geom_line()+geom_point()+
stat_smooth(method=lm,se=FALSE,lty=2,lwd=1.0)+
geom_smooth(method = "lm",col="black",lty=2)+
geom_smooth(method = "loess",span=0.2,col=1,lwd=1.2,se=F,lty=1)+
xlab(label = "Year")+ylab(label = "scPDSI")+
annotate("text",x=1990,y=(max(clim.oct$scpdsi))*0.95,
label=expression(paste(italic(slope),'= -0.025, ',italic(R)^2, '= 0.03, ', italic(p),'= 0.09')))+
mythemeplot()
summary(lm( clim.oct$scpdsi~clim.oct$year))
pdsi.7.9 <- ggplot(clim.july_sept,aes(year,mean.pdsi))+
geom_line(col=4)+geom_point(col=4)+
stat_smooth(method=lm,col=4,lty=2,lwd=1.0)+
geom_smooth(method = "loess",span=0.2,col=4,se=F,lty=1,lwd=1.2)+
geom_line(aes(year,mean.3.6pdsi),col=3)+
geom_point(aes(year,mean.3.6pdsi),col=3)+
stat_smooth(aes(year,mean.3.6pdsi),method=lm,col=3,lty=2,lwd=1.0)+
geom_smooth(aes(year,mean.3.6pdsi),method = "loess",span=0.2,col=3,se=F,lty=1,lwd=1.2)+
xlab(label = "Year")+ylab(label = "scPDSI")+
annotate("text",x=1980,y=(max(clim.july_sept$mean.pdsi,na.rm = TRUE))*0.95,col=4,
label=expression(paste(italic(slope),'= -0.022, ',italic(R)^2, '= 0.03, ', italic(p),'< 0.09')))+
annotate("text",x=1980,y=(max(clim.july_sept$mean.pdsi,na.rm = TRUE))*0.80,col=3,
label=expression(paste(italic(slope),'= -0.033, ',italic(R)^2, '= 0.12, ', italic(p),'= 0.003')))+
mythemeplot()
summary(lm( clim.july_sept$mean.pdsi~clim.july_sept$year))
summary(lm( clim.july_sept$mean.3.6pdsi~clim.july_sept$year))
tiff("./plot/Figure S9 climate variability.tiff", width = 20, height = 16,
units = "cm",res = 400,bg = "transparent",compression = "lzw",
family = "serif")
ggarrange(tmean.7.9,tmean.10,
rh.7.9,rh.10,
pdsi.7.9,pdsi.10,
labels = c("a","a1","b","b1",
"c","c1"),
nrow = 3,ncol=2,
label.x = 0.1,
label.y = c(0.95,0.95,1.15,1.15,1.15,1.15),
heights = c(0.55,0.45,0.5),
align = "hv",
#common.legend = TRUE,
font.label = list(size=24,family="serif"))
dev.off()
|
#' Aggregate Predictions
#'
#' Aggregate predicitons results by averaging (for \code{regr}, and \code{classif} with prob) or mode ( \code{classif} with response).
#' (works for regr, classif, multiclass)
#'
#' @param pred.list [list of \code{Predictions}]\cr
#' @export
aggregatePredictions = function(pred.list, spt = NULL) {
# return pred if list only contains one pred
if (length(pred.list) == 1) {
messagef("'pred.list' has only one prediction and returns that one unlisted. Argument 'spt' will not be applied.")
return(pred.list[[1]])
}
# Check if "equal"
x = lapply(pred.list, function(x) getTaskDescription(x))
task.unequal = unlist(lapply(2:length(x), function(i) !all.equal(x[[1]], x[[i]])))
if (any(task.unequal)) stopf("Task descriptions in prediction '1' and '%s' differ. This is not possible!", which(task.unequal)[1])
x = lapply(pred.list, function(x) x$predict.type)
pts.unequal = unlist(lapply(2:length(x), function(i) !all.equal(x[[1]], x[[i]])))
if (any(pts.unequal)) stopf("Predict type in prediction '1' and '%s' differ. This is not possible!", which(pts.unequal)[1])
x = unlist(lapply(pred.list, function(x) checkIfNullOrAnyNA(x$data$response)))
if (any(x)) messagef("Prediction '%s' is broken and will be removed.", which(x))
pred.list = pred.list[!x]
# Body
pred1 = pred.list[[1]]
type = getTaskType(pred1)
td = getTaskDescription(pred1)
rn = row.names(pred1$data)
id = pred1$data$id
tr = pred1$data$truth
pt = pred1$predict.type
if (is.null(spt)) spt = pt
assertChoice(spt, choices = c("prob", "response"))
ti = NA_real_
pred.length = length(pred.list)
# Reduce results
# type = "classif"
if (type == "classif") {
# pt = "prob"
if (pt == "prob") {
# same method for spt response and prob
preds = lapply(pred.list, getPredictionProbabilities, cl = td$class.levels)
y = Reduce("+", preds) / pred.length
if (spt == "response") {
y = factor(max.col(y), labels = td$class.levels)
}
# pt = "response"
} else {
if (spt == "response") {
preds = as.data.frame(lapply(pred.list, getPredictionResponse))
y = factor(apply(preds, 1L, computeMode), td$class.levels)
} else {
# rowiseRatio copied from Tong He (he said it's not the best solution).
# This method should be rarely used, because pt = "response",
# spt = "prob" should perfrom worse than setting pt = "prob" (due to
# information loss when convertring probs to factors)
preds = as.data.frame(lapply(pred.list, function(x) x$data$response))
y = rowiseRatio(preds, td$class.levels, model.weight = NULL)
}
}
# type = "regr"
} else {
preds = lapply(pred.list, getPredictionResponse)
y = Reduce("+", preds)/pred.length
}
return(makePrediction(task.desc = td, rn, id = id, truth = tr, predict.type = spt, predict.threshold = NULL, y, time = ti))
}
# FIXME: clean up naming
#' Expand Predictions according to frequency argument
#'
#' @param pred.list [\code{list} of \code{Predictions}]\cr
#' List of Predictions which should be expanded.
#' @param frequency [\code{named vector}]\cr
#' Named vector containing the frequency of the chosen predictions.
#' Vector names must be set to the model names.
#' @export
expandPredList = function(pred.list, freq) {
assertClass(pred.list, "list")
assertClass(freq, "numeric")
only.preds = unique(unlist(lapply(pred.list, function(x) any(class(x) == "Prediction"))))
if (!only.preds) stopf("List elements in 'pred.list' are not all of class 'Prediction'")
# remove 0s
keep = names(which(freq > 0))
freq1 = freq[keep]
pred.list1 = pred.list[keep]
# create grid for loop
grid = data.frame(model = names(freq1), freq1, row.names = NULL)
#expand_ = data.frame(model = rep(grid$model, grid$freq1)) %>% as.matrix %>% as.vector()
expand = as.character(rep(grid$model, grid$freq1))
pred.list2 = vector("list", length(expand))
names(pred.list2) = paste(expand, 1:length(expand), sep = "_")
for (i in seq_along(expand)) {
#pred.list[i] %>% print
use = expand[i]
#messagef("This is nr %s, %s", i, use)
pred.list2[i] = pred.list1[use]
#message("---------------------------------------------------")
}
pred.list2
}
| /R/aggregatePredictions.R | no_license | philippstats/mlr | R | false | false | 4,320 | r | #' Aggregate Predictions
#'
#' Aggregate predicitons results by averaging (for \code{regr}, and \code{classif} with prob) or mode ( \code{classif} with response).
#' (works for regr, classif, multiclass)
#'
#' @param pred.list [list of \code{Predictions}]\cr
#' @export
aggregatePredictions = function(pred.list, spt = NULL) {
# return pred if list only contains one pred
if (length(pred.list) == 1) {
messagef("'pred.list' has only one prediction and returns that one unlisted. Argument 'spt' will not be applied.")
return(pred.list[[1]])
}
# Check if "equal"
x = lapply(pred.list, function(x) getTaskDescription(x))
task.unequal = unlist(lapply(2:length(x), function(i) !all.equal(x[[1]], x[[i]])))
if (any(task.unequal)) stopf("Task descriptions in prediction '1' and '%s' differ. This is not possible!", which(task.unequal)[1])
x = lapply(pred.list, function(x) x$predict.type)
pts.unequal = unlist(lapply(2:length(x), function(i) !all.equal(x[[1]], x[[i]])))
if (any(pts.unequal)) stopf("Predict type in prediction '1' and '%s' differ. This is not possible!", which(pts.unequal)[1])
x = unlist(lapply(pred.list, function(x) checkIfNullOrAnyNA(x$data$response)))
if (any(x)) messagef("Prediction '%s' is broken and will be removed.", which(x))
pred.list = pred.list[!x]
# Body
pred1 = pred.list[[1]]
type = getTaskType(pred1)
td = getTaskDescription(pred1)
rn = row.names(pred1$data)
id = pred1$data$id
tr = pred1$data$truth
pt = pred1$predict.type
if (is.null(spt)) spt = pt
assertChoice(spt, choices = c("prob", "response"))
ti = NA_real_
pred.length = length(pred.list)
# Reduce results
# type = "classif"
if (type == "classif") {
# pt = "prob"
if (pt == "prob") {
# same method for spt response and prob
preds = lapply(pred.list, getPredictionProbabilities, cl = td$class.levels)
y = Reduce("+", preds) / pred.length
if (spt == "response") {
y = factor(max.col(y), labels = td$class.levels)
}
# pt = "response"
} else {
if (spt == "response") {
preds = as.data.frame(lapply(pred.list, getPredictionResponse))
y = factor(apply(preds, 1L, computeMode), td$class.levels)
} else {
# rowiseRatio copied from Tong He (he said it's not the best solution).
# This method should be rarely used, because pt = "response",
# spt = "prob" should perfrom worse than setting pt = "prob" (due to
# information loss when convertring probs to factors)
preds = as.data.frame(lapply(pred.list, function(x) x$data$response))
y = rowiseRatio(preds, td$class.levels, model.weight = NULL)
}
}
# type = "regr"
} else {
preds = lapply(pred.list, getPredictionResponse)
y = Reduce("+", preds)/pred.length
}
return(makePrediction(task.desc = td, rn, id = id, truth = tr, predict.type = spt, predict.threshold = NULL, y, time = ti))
}
# FIXME: clean up naming
#' Expand Predictions according to frequency argument
#'
#' @param pred.list [\code{list} of \code{Predictions}]\cr
#' List of Predictions which should be expanded.
#' @param frequency [\code{named vector}]\cr
#' Named vector containing the frequency of the chosen predictions.
#' Vector names must be set to the model names.
#' @export
expandPredList = function(pred.list, freq) {
assertClass(pred.list, "list")
assertClass(freq, "numeric")
only.preds = unique(unlist(lapply(pred.list, function(x) any(class(x) == "Prediction"))))
if (!only.preds) stopf("List elements in 'pred.list' are not all of class 'Prediction'")
# remove 0s
keep = names(which(freq > 0))
freq1 = freq[keep]
pred.list1 = pred.list[keep]
# create grid for loop
grid = data.frame(model = names(freq1), freq1, row.names = NULL)
#expand_ = data.frame(model = rep(grid$model, grid$freq1)) %>% as.matrix %>% as.vector()
expand = as.character(rep(grid$model, grid$freq1))
pred.list2 = vector("list", length(expand))
names(pred.list2) = paste(expand, 1:length(expand), sep = "_")
for (i in seq_along(expand)) {
#pred.list[i] %>% print
use = expand[i]
#messagef("This is nr %s, %s", i, use)
pred.list2[i] = pred.list1[use]
#message("---------------------------------------------------")
}
pred.list2
}
|
testlist <- list(kern = c(0.00753173901853728, 0, 0, 0, 0), val = c(9.34349497625167e-275, 5.91668024075023e-257, -2.75802740282252e-28, -8.6077086674033e-26, -8.63673874871544e-26, -3.41266294396796e+38, 7.66727900074612e-180, 4.79805310736869e-23, -7.45992038897424e-239, 1.02719694514458e+281, -1.30631276493468e+45, 2.88304142118581e+220, -2.01059993568949e+22, 9.55051474399118e+135, -1.28311027722622e+98, 1.0120585531904e+288, 1.82179125027559e+84, -1.25549243848819e-226, 5.14003647355047e-105, 8.45657704107198e+67, 2.91770969973858e+182))
result <- do.call(lowpassFilter:::convolve,testlist)
str(result) | /lowpassFilter/inst/testfiles/convolve/AFL_convolve/convolve_valgrind_files/1616007487-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 620 | r | testlist <- list(kern = c(0.00753173901853728, 0, 0, 0, 0), val = c(9.34349497625167e-275, 5.91668024075023e-257, -2.75802740282252e-28, -8.6077086674033e-26, -8.63673874871544e-26, -3.41266294396796e+38, 7.66727900074612e-180, 4.79805310736869e-23, -7.45992038897424e-239, 1.02719694514458e+281, -1.30631276493468e+45, 2.88304142118581e+220, -2.01059993568949e+22, 9.55051474399118e+135, -1.28311027722622e+98, 1.0120585531904e+288, 1.82179125027559e+84, -1.25549243848819e-226, 5.14003647355047e-105, 8.45657704107198e+67, 2.91770969973858e+182))
result <- do.call(lowpassFilter:::convolve,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VarID_functions.R
\name{maxNoisyGenes}
\alias{maxNoisyGenes}
\title{Function for extracting genes maximal variability}
\usage{
maxNoisyGenes(noise, cl = NULL, set = NULL)
}
\arguments{
\item{noise}{List object with the background noise model and a variability matrix, returned by the \code{compNoise} function.}
\item{cl}{List object with Louvain clustering information, returned by the \code{graphCluster} function. Default is \code{NULL}.}
\item{set}{Postive integer number or vector of integers corresponding to valid cluster numbers. Noise levels are computed across all cells in this subset of clusters. Default is \code{NULL} and noise levels are computed across all cells.}
}
\value{
Vector with average gene expression variability in decreasing order, computed across all cells or only cells in a set of clusters (if \code{cl} and
\code{set} are given.
}
\description{
This function extracts genes with maximal variability in a cluster or in the entire data set.
}
\examples{
res <- pruneKnn(intestinalDataSmall,metric="pearson",knn=10,alpha=1,no_cores=1,FSelect=FALSE)
noise <- compNoise(intestinalDataSmall,res,pvalue=0.01,genes = NULL,no_cores=1)
mgenes <- maxNoisyGenes(noise)
}
| /RaceID/man/maxNoisyGenes.Rd | no_license | akhikolla/InformationHouse | R | false | true | 1,271 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VarID_functions.R
\name{maxNoisyGenes}
\alias{maxNoisyGenes}
\title{Function for extracting genes maximal variability}
\usage{
maxNoisyGenes(noise, cl = NULL, set = NULL)
}
\arguments{
\item{noise}{List object with the background noise model and a variability matrix, returned by the \code{compNoise} function.}
\item{cl}{List object with Louvain clustering information, returned by the \code{graphCluster} function. Default is \code{NULL}.}
\item{set}{Postive integer number or vector of integers corresponding to valid cluster numbers. Noise levels are computed across all cells in this subset of clusters. Default is \code{NULL} and noise levels are computed across all cells.}
}
\value{
Vector with average gene expression variability in decreasing order, computed across all cells or only cells in a set of clusters (if \code{cl} and
\code{set} are given.
}
\description{
This function extracts genes with maximal variability in a cluster or in the entire data set.
}
\examples{
res <- pruneKnn(intestinalDataSmall,metric="pearson",knn=10,alpha=1,no_cores=1,FSelect=FALSE)
noise <- compNoise(intestinalDataSmall,res,pvalue=0.01,genes = NULL,no_cores=1)
mgenes <- maxNoisyGenes(noise)
}
|
SKAT_2Kernel_Ortho_Optimal_Get_Params_each_r_FixedRho1 <-
function(z1.1, z2.1, rho2){
c1<-matrix(rep(0,4* length(rho2)), ncol=length(rho2))
A1<-t(z1.1) %*% z1.1
B1<-t(z2.1) %*% z2.1
A2<-A1 %*% A1
B2<-B1 %*% B1
A11<-t(z1.1) %*% z2.1
A22<-A11 %*% t(A11)
B22<-t(A11) %*% A11
B333<-t(A11) %*% A1 %*% A11
#####################################
#
c1[1,]<-sum(z1.1^2) * (1-rho2) + sum(z2.1^2) * rho2
c1[2,]<-sum(A1^2) * (1-rho2)^2 + sum(B1^2) * (rho2)^2 + sum(A11^2) * 2 * (1-rho2) * rho2
c1[3,]<-sum(A2 * A1) * (1-rho2)^3 + sum(B2 * B1) * (rho2)^3 + sum(A22 * A1) * 3 * (1-rho2)^2 * rho2 + sum(B1 * B22) * 3 * (1-rho2) * rho2^2
c1[4,]<-sum(A2 * A2) * (1-rho2)^4 + sum(B2 * B2) * (rho2)^4 + sum(A22 * A2) * 4 * (1-rho2)^3 * rho2 + sum(B2 * B22) * 4 * (1-rho2) * rho2^3 + sum(B1 * B333) * 4 * (1-rho2)^2 * rho2^2 + sum(B22 * B22) * 2 * (1-rho2)^2 * rho2^2
return(c1)
}
| /TransMetaRare/R/SKAT_2Kernel_Ortho_Optimal_Get_Params_each_r_FixedRho1.R | no_license | shijingc/TransMetaRare | R | false | false | 894 | r | SKAT_2Kernel_Ortho_Optimal_Get_Params_each_r_FixedRho1 <-
function(z1.1, z2.1, rho2){
c1<-matrix(rep(0,4* length(rho2)), ncol=length(rho2))
A1<-t(z1.1) %*% z1.1
B1<-t(z2.1) %*% z2.1
A2<-A1 %*% A1
B2<-B1 %*% B1
A11<-t(z1.1) %*% z2.1
A22<-A11 %*% t(A11)
B22<-t(A11) %*% A11
B333<-t(A11) %*% A1 %*% A11
#####################################
#
c1[1,]<-sum(z1.1^2) * (1-rho2) + sum(z2.1^2) * rho2
c1[2,]<-sum(A1^2) * (1-rho2)^2 + sum(B1^2) * (rho2)^2 + sum(A11^2) * 2 * (1-rho2) * rho2
c1[3,]<-sum(A2 * A1) * (1-rho2)^3 + sum(B2 * B1) * (rho2)^3 + sum(A22 * A1) * 3 * (1-rho2)^2 * rho2 + sum(B1 * B22) * 3 * (1-rho2) * rho2^2
c1[4,]<-sum(A2 * A2) * (1-rho2)^4 + sum(B2 * B2) * (rho2)^4 + sum(A22 * A2) * 4 * (1-rho2)^3 * rho2 + sum(B2 * B22) * 4 * (1-rho2) * rho2^3 + sum(B1 * B333) * 4 * (1-rho2)^2 * rho2^2 + sum(B22 * B22) * 2 * (1-rho2)^2 * rho2^2
return(c1)
}
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{stat_fivenumber}
\alias{stat_fivenumber}
\title{Calculate components of a five-number summary}
\usage{
stat_fivenumber(mapping = NULL, data = NULL, geom = "boxplot",
position = "dodge", na.rm = FALSE, ...)
}
\arguments{
\item{na.rm}{If \code{FALSE} (the default), removes missing values with
a warning. If \code{TRUE} silently removes missing values.}
\item{mapping}{The aesthetic mapping, usually constructed
with \code{\link{aes}} or \code{\link{aes_string}}. Only
needs to be set at the layer level if you are overriding
the plot defaults.}
\item{data}{A layer specific dataset - only needed if you
want to override the plot defaults.}
\item{geom}{The geometric object to use display the data}
\item{position}{The position adjustment to use for
overlappling points on this layer}
\item{...}{other arguments passed on to
\code{\link{layer}}. This can include aesthetics whose
values you want to set, not map. See \code{\link{layer}}
for more details.}
}
\value{
A data frame with additional columns:
\item{width}{width of boxplot}
\item{ymin}{minimum}
\item{lower}{lower hinge, 25\% quantile}
\item{notchlower}{lower edge of notch = median - 1.58 * IQR / sqrt(n)}
\item{middle}{median, 50\% quantile}
\item{notchupper}{upper edge of notch = median + 1.58 * IQR / sqrt(n)}
\item{upper}{upper hinge, 75\% quantile}
\item{ymax}{maximum}
}
\description{
The five number summary of a sample is the minimum, first quartile,
median, third quartile, and maximum.
}
\section{Aesthetics}{
\Sexpr[results=rd,stage=build]{ggthemes:::rd_aesthetics("stat_fivenumber", ggthemes:::StatFivenumber)}
}
\seealso{
\code{\link{stat_boxplot}}
}
| /man/stat_fivenumber.Rd | no_license | daroczig/ggthemes | R | false | false | 1,726 | rd | % Generated by roxygen2 (4.0.0): do not edit by hand
\name{stat_fivenumber}
\alias{stat_fivenumber}
\title{Calculate components of a five-number summary}
\usage{
stat_fivenumber(mapping = NULL, data = NULL, geom = "boxplot",
position = "dodge", na.rm = FALSE, ...)
}
\arguments{
\item{na.rm}{If \code{FALSE} (the default), removes missing values with
a warning. If \code{TRUE} silently removes missing values.}
\item{mapping}{The aesthetic mapping, usually constructed
with \code{\link{aes}} or \code{\link{aes_string}}. Only
needs to be set at the layer level if you are overriding
the plot defaults.}
\item{data}{A layer specific dataset - only needed if you
want to override the plot defaults.}
\item{geom}{The geometric object to use display the data}
\item{position}{The position adjustment to use for
overlappling points on this layer}
\item{...}{other arguments passed on to
\code{\link{layer}}. This can include aesthetics whose
values you want to set, not map. See \code{\link{layer}}
for more details.}
}
\value{
A data frame with additional columns:
\item{width}{width of boxplot}
\item{ymin}{minimum}
\item{lower}{lower hinge, 25\% quantile}
\item{notchlower}{lower edge of notch = median - 1.58 * IQR / sqrt(n)}
\item{middle}{median, 50\% quantile}
\item{notchupper}{upper edge of notch = median + 1.58 * IQR / sqrt(n)}
\item{upper}{upper hinge, 75\% quantile}
\item{ymax}{maximum}
}
\description{
The five number summary of a sample is the minimum, first quartile,
median, third quartile, and maximum.
}
\section{Aesthetics}{
\Sexpr[results=rd,stage=build]{ggthemes:::rd_aesthetics("stat_fivenumber", ggthemes:::StatFivenumber)}
}
\seealso{
\code{\link{stat_boxplot}}
}
|
context("Comprehensive Test for Disparate Impact Remover Algorithm")
test_that("running dataset test", {
dd <- aif360::aif_dataset(
data_path = system.file("extdata", "data.csv", package="aif360"),
favor_label=0,
unfavor_label=1,
unprivileged_protected_attribute=0,
privileged_protected_attribute=1,
target_column="income",
protected_attribute="sex")
expect_equal(dd$favorable_label, 0)
expect_equal(dd$unfavorable_label, 1)
bm <- binary_label_dataset_metric(dd, list('sex', 1), list('sex',0))
expect_equal(bm$disparate_impact(), 1.28, tolerance=0.00296)
dr <- disparate_impact_remover(repair_level=1.0, sensitive_attribute='sex')
new_dd <- dr$fit_transform(dd)
new_bm <- binary_label_dataset_metric(new_dd, list('sex', 1), list('sex',0))
expect_equal(new_bm$disparate_impact(), 1.28, tolerance=0.00296)
})
| /aif360/aif360-r/tests/testthat/test-disparate-impact-remover.R | permissive | SumaiyaSaima05/AIF360 | R | false | false | 860 | r | context("Comprehensive Test for Disparate Impact Remover Algorithm")
test_that("running dataset test", {
dd <- aif360::aif_dataset(
data_path = system.file("extdata", "data.csv", package="aif360"),
favor_label=0,
unfavor_label=1,
unprivileged_protected_attribute=0,
privileged_protected_attribute=1,
target_column="income",
protected_attribute="sex")
expect_equal(dd$favorable_label, 0)
expect_equal(dd$unfavorable_label, 1)
bm <- binary_label_dataset_metric(dd, list('sex', 1), list('sex',0))
expect_equal(bm$disparate_impact(), 1.28, tolerance=0.00296)
dr <- disparate_impact_remover(repair_level=1.0, sensitive_attribute='sex')
new_dd <- dr$fit_transform(dd)
new_bm <- binary_label_dataset_metric(new_dd, list('sex', 1), list('sex',0))
expect_equal(new_bm$disparate_impact(), 1.28, tolerance=0.00296)
})
|
########
#Coded by Cristina Robinson
#Last Modified 5-1-2019
########
#SET UP!!!
rm(list = objects())
#setwd()##set your directory here
dir <- getwd()
source("Source_OCEvolution.R")#Libraries and scripts
#load trees and data (this takes a while)
ConsensusTree <- LoadPrettyTree("HacketTrees.nex") #note that the variable Path is also generated here
ConsensusTree$tip.label[which(ConsensusTree$tip.label == "Philesturnus_carunculatus")] <- "Philesturnus_rufusater"
birbs <- Loadbirds("MainDataset.csv", ConsensusTree)
OCVariants <- LoadOtherOC("StabilityDataDiTriCont.csv", birbs)
remove <- which(is.na(OCVariants$tri))
VariantTree <- drop.tip(ConsensusTree, remove)
#Set variables and filestructure to output data
OpenClose <- birbs$O.C
OpenClose <- factor(OpenClose, labels = c("closed","open"))
names(OpenClose) <- birbs$BirdtreeFormat
OpenCloseTri <- OCVariants$tri[-remove]
OpenCloseTri <- factor(OpenCloseTri, labels = c("closed","delayed-closed","open"))
names(OpenCloseTri) <- OCVariants$BirdtreeFormat[-remove]
#Loop variables for main analysis:
call <- names(birbs)[1:(length(birbs))]
call <- call[!call %in% c("O.C", "BirdtreeFormat", "Family", "FormerFamily")]#remove non-independant vars
mod <- rep("log", length(call))
mod[c(1,19)] <- "linear"
FullRate <- RateMatx(ace(x=OpenClose, phy=ConsensusTree, type="discrete", model="ER"))
FullRateTri <- RateMatx(ace(x=OpenCloseTri, phy=VariantTree, type="discrete", model="ER"))
#Loop variables for the Jacknife; based on significnace from the main analysis
mod2 <- mod[c(2, 5, 8, 11, 14, 18)]
call2 <- call[c(2, 5, 8, 11, 14, 18)]
#Create folder Structure
MakeFolderStructure(dir)
#Parsimony Analyis
#Get the number of transitions. This takes while!
ParseTrees <- make.simmap(ConsensusTree, OpenClose, "ER", 10000)
OCRoots <- table(unlist(sapply(1:10000, function(x) names(ParseTrees[[x]]$maps[[1]][1]))))
save <- countSimmap(ParseTrees)
colMeans(save$Tr)
min(save$Tr[,1])
####FIGURES and TABLE DATA:
#Figure 1 V2: make a histogram and boxplot
pdf("Figure 1.pdf")
par(mfrow=c(2,2), mar=c(3,3,1,1), mgp=c(1.5,.5,0))
QuickScatterBox(vari=cbind(birbs$Syllable.rep.final,birbs$Song.rep.final),
OC=OpenClose,title=c("Syllable","Song"), DOIT=FALSE)
QuickScatterBox(vari=cbind(birbs$Syllable.rep.final[-remove]),
OC=OpenCloseTri,
labels=c("Song-Stable", "longer Learning", "Song-Plastic"),
title=c("Syllable"), DOIT=FALSE)
plot(OCVariants$cont[-remove],birbs$Syllable.rep.final[-remove], col=rgb(1,0,1),
xlab="Years Spent Learning", ylab="Syllable Repertoire",
log='y', font.lab=2)
linmodel <- lm(x~y, list(x=OCVariants$cont[-remove], y=log(birbs$Syllable.rep.final[-remove])))
summary(linmodel)
abline(linmodel,
lwd=2)
dev.off()
#Figure 2 V2:
pdf("Figure 2.pdf")
par(mfrow=c(1,2), mar=c(.1,.1,.1,.1))
DataExtraction(OpenClose, birbs, ConsensusTree, call[2], mod=mod[2], fullrate=FullRate,
BROWNIE = FALSE, ANOVA = FALSE,DP=TRUE,Flip="rightwards")
DataExtraction(OpenCloseTri, birbs[-remove,], VariantTree, call[2], mod=mod[2], fullrate=FullRate,
BROWNIE = FALSE, ANOVA = FALSE,DP=TRUE,Flip="leftwards")
dev.off()
#old fig 1
#QuickScatterBox(vari=cbind(birbs$Syllable.rep.final,birbs$Song.rep.final),
# OC=OpenClose,title=c("Syllable","Song"))
#Figure 2AB
#creates RainbowPlots, runs phylANOVA, outputs text, runs Brownielite, plots it
pdf("DoublePlot.pdf")
par(mfrow=c(1,2), mar=c(.1,.1,.1,.1))
DataExtraction(OpenClose, birbs, ConsensusTree, call[2], mod=mod[2], fullrate=FullRate,
BROWNIE = FALSE, ANOVA = FALSE,DP=TRUE,Flip="rightwards")
DataExtraction(OpenClose, birbs, ConsensusTree, call[8], mod=mod[8], fullrate=FullRate,
BROWNIE = FALSE, ANOVA = FALSE,DP=TRUE,Flip="leftwards")
dev.off()
#Table 1 and 2 data
setwd(file.path(dir, "DataWarehouse"))
ANOVAData <- as.list(1:(length(call)-2))
for(i in 18:2){
#get data and rainbow plots
DataExtraction(OpenClose, birbs, ConsensusTree, call[i], mod=mod[i], fullrate=FullRate)
ANOVAData[[i-1]] <- ANOVARun #created by DataExtraction()
#plotting brownie data
dataset <- read.csv(paste0(call[i],".csv"))
datasetFULL <- read.csv(paste0(call[i],"FULL.csv"))
pdf(paste0(call[i], ".Brownie.pdf"))
par(mfrow=c(2,2), mgp=c(1.5,.5,0), mar=c(3,3,2,1))
sink(file = "Brownie.txt", append = TRUE, split = FALSE)
BrowniePlotRates(dataset, paste0(call[i]), Group=c("Stable", "Plastic"))
BrowniePlotRates(datasetFULL, paste0(call[i],"FULL"), Group=c("Stable", "Plastic"))
sink(file=NULL)
dev.off()
}
#get Anova results
ANOVAResults(ANOVAData)
#Figure 3, 7 in one brownie :)
pdf("BrownieFullRatesAlltypes.pdf")
par(mfrow=c(3,3), mgp=c(1.5,.5,0), mar=c(3,3,2,1))
for(i in c(2,8,5,11,14,17,18)){#FUll rates
datasetFULL <- read.csv(paste0(call[i],"FULL.csv"))
title <- unlist(strsplit(call[i],"[.]"))
title <- paste(toupper(substring(title, 1,1)), substring(title, 2),
sep="", collapse=" ")
title <- gsub("Final", "", title)
NAind <- which(is.na(datasetFULL$ARDRate1))
if(length(NAind)>0){
MAX <- datasetFULL$ARDRate1[-NAind]
}else{ MAX <- max(datasetFULL$ARDRate1)}
BrowniePlotRates(datasetFULL, title, Groups=c("Stable", "Plastic"),
Xlim=c(0, max(.2, MAX)))
}
dev.off()
#tristates
setwd(file.path(dir, "DataWarehouse/Tri"))
QuickScatterBox(vari=cbind(birbs$Syllable.rep.final[-remove], birbs$Song.rep.final[-remove]),
OC=OpenCloseTri,
labels=c("Song-Stable", "longer Learning", "Song-Plastic"),
title=c("Syllable","Song"))
pdf("DoublePlotTri.pdf")
par(mfrow=c(1,2), mar=c(.1,.1,.1,.1))
DataExtraction(OpenCloseTri, birbs[-remove,], VariantTree, call[2], mod=mod[2], fullrate=FullRateTri,
BROWNIE = FALSE, ANOVA = TRUE,DP=TRUE,Flip="rightwards")
DataExtraction(OpenCloseTri, birbs[-remove,], VariantTree, call[8], mod=mod[8], fullrate=FullRateTri,
BROWNIE = FALSE, ANOVA = TRUE,DP=TRUE,Flip="leftwards")
dev.off()
ANOVAData <- as.list(1:(length(call)-2))
for(i in 18:2){
#get data and rainbow plots
DataExtraction(OpenCloseTri, birbs[-remove,], VariantTree, call[i], mod=mod[i], fullrate=FullRateTri)
ANOVAData[[i-1]] <- ANOVARun #created by DataExtraction()
#plotting brownie data
dataset <- read.csv(paste0(call[i],".csv"))
datasetFULL <- read.csv(paste0(call[i],"FULL.csv"))
pdf(paste0(call[i], "Tri.Brownie.pdf"))
par(mfrow=c(2,2), mgp=c(1.5,.5,0), mar=c(3,3,2,1))
sink(file = "TriBrownie.txt", append = TRUE, split = FALSE)
BrowniePlotRates(dataset, paste0(call[i], "Tri"),
col = c('blue', 'purple', 'red'),
Group=c("Stable", "Longer-Learning", "Plastic"))
BrowniePlotRates(datasetFULL, paste0(call[i],"FULL-Tri"),
col = c('blue', 'purple', 'red'),
Group=c("Stable", "Longer-Learning", "Plastic"))
sink(file=NULL)
dev.off()
}
#get Anova results
ANOVAResults(ANOVAData)
#Figure 3, 7 in one brownie :)
pdf("BrownieFullRatesAlltypes.pdf")
par(mfrow=c(3,3), mgp=c(1.5,.5,0), mar=c(3,3,2,1))
for(i in c(2,8,5,11,14,17,18)){#FUll rates
datasetFULL <- read.csv(paste0(call[i],"FULL.csv"))
title <- unlist(strsplit(call[i],"[.]"))
title <- paste(toupper(substring(title, 1,1)), substring(title, 2),
sep="", collapse=" ")
title <- gsub("Final", "", title)
NAind <- which(is.na(datasetFULL$ARDRate1))
if(length(NAind)>0){
MAX <- datasetFULL$ARDRate1[-NAind]
}else{ MAX <- max(c(datasetFULL$ARDRate1, datasetFULL$ARDRate0, datasetFULL$ARDRate2))}
BrowniePlotRates(datasetFULL, title,
Xlim=c(0, max(.2, MAX)),
col=c('blue', 'purple', 'red'),
Group=c("Stable", "Delayed", "Plastic"))
}
dev.off()
MakeAllNodePlots(birbs, call, OCVariants, ConsensusTree)
#compare 2 to 3 rates
setwd(file.path(dir, "DataWarehouse/Tri/Di"))
for(i in c(2,5,8)){
DataExtraction(OpenClose[-remove], birbs[-remove,], VariantTree, call[i], mod=mod[i], fullrate=FullRate,
RAIN = FALSE, ANOVA = FALSE)
}
setwd(file.path(dir, "DataWarehouse"))
sink(file = "BrownieTriDi.txt", append = TRUE, split = FALSE)
for(i in c(2,5,8)){
datasetDi <- read.csv(paste0("Tri/Di/",call[i],"FULL.csv"))
datasetTri <- read.csv(paste0("Tri/",call[i],"FULL.csv"))
Mean2 <- mean(datasetDi$ARDloglik)
Mean3 <- mean(datasetTri$ARDloglik)
pval <- round(pchisq(2*(Mean3 - Mean2),1,lower.tail=FALSE),digits=3)
ifelse(pval == 0,pval <- "<0.001", pval <- paste0("=",pval))
writeLines(call[i])
writeLines(paste0("TwoRate=", round(Mean2, digits = 4)))
writeLines(paste0("ThreeRates=", round(Mean3, digits = 4)))
writeLines(paste0("pVal", pval))
writeLines(paste("",sep="\n\n"))
writeLines(paste("",sep="\n\n"))
}
sink(NULL)
setwd(file.path(dir, "DataWarehouse/Tri/newDi"))
OpenCloseTriSwitch <- OpenCloseTri
OpenCloseTriSwitch[which(OpenCloseTriSwitch=="delayed-closed")] <- "open"
OpenCloseTriSwitch <- droplevels(OpenCloseTriSwitch)
DataExtraction(OpenCloseTriSwitch, birbs[-remove,], VariantTree, call[2], mod=mod[2], fullrate=FullRate,
RAIN = FALSE, ANOVA = FALSE)
sink(file = "BrownieTrinewDi.txt", append = TRUE, split = FALSE)
for(i in c(2,8)){
datasetDi <- read.csv(paste0("Tri/newDi/",call[i],"FULL.csv"))
datasetTri <- read.csv(paste0("Tri/",call[i],"FULL.csv"))
Mean2 <- mean(datasetDi$ARDloglik)
Mean3 <- mean(datasetTri$ARDloglik)
pval <- round(pchisq(2*(Mean3 - Mean2),1,lower.tail=FALSE),digits=3)
ifelse(pval == 0,pval <- "<0.001", pval <- paste0("=",pval))
writeLines(call[i])
writeLines(paste0("TwoRate=", round(Mean2, digits = 4)))
writeLines(paste0("ThreeRates=", round(Mean3, digits = 4)))
writeLines(paste0("pVal", pval))
writeLines(paste("",sep="\n\n"))
writeLines(paste("",sep="\n\n"))
}
sink(NULL)
setwd(file.path(dir, "DataWarehouse"))
#Figure 3:
pdf("Figure 3.pdf")
par(mfrow=c(3,3), mgp=c(1.5,.5,0), mar=c(3,3,2,1))
for(i in c(2,8,5)){#FUll rates
datasetFULL <- read.csv(paste0(call[i],"FULL.csv"))
title <- unlist(strsplit(call[i],"[.]"))
title <- paste(toupper(substring(title, 1,1)), substring(title, 2),
sep="", collapse=" ")
title <- gsub("Final", "", title)
NAind <- which(is.na(datasetFULL$ARDRate1))
if(length(NAind)>0){
MAX <- datasetFULL$ARDRate1[-NAind]
}else{ MAX <- max(datasetFULL$ARDRate1)}
BrowniePlotRates(datasetFULL, title, Groups=c("Stable", "Plastic"),
Xlim=c(0, max(.2, MAX)))
datasetFULL <- read.csv(paste0("Tri/",call[i],"FULL.csv"))
title <- unlist(strsplit(call[i],"[.]"))
title <- paste(toupper(substring(title, 1,1)), substring(title, 2),
sep="", collapse=" ")
title <- gsub("Final", "", title)
NAind <- which(is.na(datasetFULL$ARDRate1))
if(length(NAind)>0){
MAX <- datasetFULL$ARDRate1[-NAind]
}else{ MAX <- max(c(datasetFULL$ARDRate1, datasetFULL$ARDRate0, datasetFULL$ARDRate2))}
BrowniePlotRates(datasetFULL, title,
Xlim=c(0, max(.2, MAX)),
col=c('blue', 'purple', 'red'),
Group=c("Stable", "Delayed", "Plastic"))
if(i != 5){
sink(file = "BrownienewDi.txt", append = TRUE, split = FALSE)
datasetFULL <- read.csv(paste0("Tri/newDi/", call[i],"FULL.csv"))
title <- unlist(strsplit(call[i],"[.]"))
title <- paste(toupper(substring(title, 1,1)), substring(title, 2),
sep="", collapse=" ")
title <- gsub("Final", "", title)
NAind <- which(is.na(datasetFULL$ARDRate1))
if(length(NAind)>0){
MAX <- datasetFULL$ARDRate1[-NAind]
}else{ MAX <- max(datasetFULL$ARDRate1)}
BrowniePlotRates(datasetFULL, title, Groups=c("Shorter", "Longer"),
Xlim=c(0, max(.2, MAX)))
sink(NULL)
}
}
dev.off()
pdf("Figure 4.pdf")
par(mfrow=c(2,2), mgp=c(1.5,.5,0), mar=c(3,3,2,1))
for(i in c(11,14,17,18)){#FUll rates
datasetFULL <- read.csv(paste0(call[i],"FULL.csv"))
title <- unlist(strsplit(call[i],"[.]"))
title <- paste(toupper(substring(title, 1,1)), substring(title, 2),
sep="", collapse=" ")
title <- gsub("Final", "", title)
NAind <- which(is.na(datasetFULL$ARDRate1))
if(length(NAind)>0){
MAX <- datasetFULL$ARDRate1[-NAind]
}else{ MAX <- max(datasetFULL$ARDRate1)}
BrowniePlotRates(datasetFULL, title, Groups=c("Stable", "Plastic"),
Xlim=c(0, max(.2, MAX)))
}
dev.off()
#3)Jackknife runs, generates trees we did not show and the table data
#Because Acrocephalidae is paraphyletic, we merged the Lucustellidae with Acrocephalidae
birbs$Family[which(birbs$BirdtreeFormat == "Locustella_naevia")] <- "Acrocephalidae"
#create a list of indicies belonging to each familiy, get indicied and figure out which have 4+ species
Families <- replicate(length(levels(birbs$Family)),NULL)
names(Families) <- levels(birbs$Family)
for(i in 1:length(Families)){Families[[i]]<-which(birbs$Family == names(Families)[i])}
Remove <- names(which(sapply(Families,length)>=4))
Type <- c("", "FULL")
#first loop (i) enters folder for song variable and sets up ANOVA data
#second loop (j) cuts out each of the families in turn and runs the dataextraction protocol
#Third loop (k) run loop 4 with full and partial rates
#Fourth loop (l) generate and plot brownie data
#loop 1
for(i in seq_along(call2)){
setwd(file.path(dir, "DataWarehouse/Jackknife",call2[i]))
ANOVAData <- as.list(1:length(Remove))
#loop 2: Jackknife using the ACE values from the tree created after species with NAs for
#a song variable were removed and those which were removed by the jacknife procedure itself
for(j in 1:length(Remove)){
ConseJack <- drop.tip(ConsensusTree, Families[[Remove[j]]], root.edge = 0)
Jacks <- birbs[-Families[[Remove[j]]],]
OC <- OpenClose[-Families[[Remove[j]]]]
DataExtraction(OC, Jacks, ConseJack, vari=call2[i], RAIN=FALSE,
mod=mod2[i], cotitle=paste0("No", Remove[j]), fullrate=FullRate)
ANOVAData[[j]] <- ANOVARun
}
#loop 3: runs full and partial rates
sink(file = "Brownie.txt", append = TRUE, split = FALSE)
for(k in 1:2){
pdf(paste0(call2[i], Type[k],"Jackknife.pdf"))
par(mfrow=c(3,3), mgp=c(1.5,.5,0), mar=c(3,3,2,1))
#Plot the original brownie run
setwd(file.path(dir, "DataWarehouse"))
dataset <- read.csv(paste0(call2[i],Type[k],".csv"))
BrowniePlotRates(dataset,paste0(call2[i], " All"))
setwd(file.path(dir, "DataWarehouse/Jackknife",call2[i]))
#loop 4 Brownie plot the jacknife Runs
for(l in 1:length(Remove)){
dataset <- read.csv(paste0(call2[i],"No",Remove[l],Type[k],".csv"))
JackLoss <- length(which(is.na(birbs[,call2[i]][Families[[Remove[l]]]])==FALSE))
BrowniePlotRates(dataset,paste0(call2[i]," No ",Remove[l], "(", JackLoss, ")"))
}
dev.off()
}
sink(file = NULL)
ANOVAPrinter(ANOVAData, CritAlpha[which(call == call2[i])-1])
}
#4) Jacknife with individual Mimids:
#based on the data from above, we decided to repeat the Brownie Analysis
#with each Mimid removed in turn for syll.song
setwd(file.path(dir, "DataWarehouse/MimidJackknife"))
speciesIndex <- Families$Mimidae
NAind <- which(is.na(birbs$Syll.song.final)==TRUE)
SylSong <- birbs$Syll.song.final
names(SylSong) <- birbs$BirdtreeFormat
for(i in 1:length(speciesIndex)){
ConseMime <- drop.tip(ConsensusTree, c(speciesIndex[i],NAind), root.edge = 0)
Mime <- birbs[-c(speciesIndex[i], NAind),]
OCmime <- OpenClose[-c(speciesIndex[i],NAind)]
sySo <- log(SylSong[-c(speciesIndex[i],NAind)])
BrownieDataGen(ConseMime, OCmime, sySo, nsim=1300,title=paste(birbs$BirdtreeFormat[speciesIndex[i]]), FullRate)
}
pdf("MimidJackkinfe.pdf")
par(mfrow=c(2,2))
sink(file = "Brownie.txt", append = TRUE, split = FALSE)
for(i in 1:length(speciesIndex)){
dataset <- read.csv(paste0(birbs$BirdtreeFormat[speciesIndex[i]], ".csv"))
BrowniePlotRates(dataset, paste("Syl.Song","No",birbs$BirdtreeFormat[speciesIndex[i]], sep=" "))
}
sink(file = NULL)
dev.off()
#6) Test with Lincolnii Closed
setwd(file.path(dir, "DataWarehouse/ClosedLink"))
OClink <- OpenClose
OClink[which(birbs$BirdtreeFormat == "Melospiza_lincolnii")] <- "closed"
#creates RainbowPlots, runs phylANOVA, outputs text, runs Brownielite, plots it
ANOVAData <- as.list(1:length(call))
for(i in rev(seq_along(call))){
DataExtraction(OClink, birbs, ConsensusTree, call[i], mod=mod[i], fullrate=FullRate, RAIN=FALSE)
ANOVAData[[i]] <- ANOVARun
#plotting data
sink(file = "Brownie.txt", append = TRUE, split = FALSE)
dataset <- read.csv(paste0(call[i],".csv"))
datasetFULL <- read.csv(paste0(call[i],"FULL.csv"))
pdf(paste0(call[i], ".Brownie.pdf"))
par(mfrow=c(2,2))
BrowniePlotRates(dataset, paste0(call[i]))
BrowniePlotRates(datasetFULL ,paste0(call[i],"FULL"))
dev.off()
sink(file=NULL)
}
ANOVAResults(ANOVAData)
setwd(file.path(dir, "DataWarehouse/"))
#Transition Plot Bones
pdf("TransitionBones.pdf", width=8.5, height=11)
plot(ConsensusTree, edge.width=2.5, cex=.7,
label.offset = 1)
tiplabels(pch=ifelse(OpenClose == "closed", 19, 1),
offset=.5)
dev.off()
| /Final Code CMR/things you need to run/OCEvolution.R | no_license | NeuroBio/2018-OCEvolution | R | false | false | 17,198 | r | ########
#Coded by Cristina Robinson
#Last Modified 5-1-2019
########
#SET UP!!!
rm(list = objects())
#setwd()##set your directory here
dir <- getwd()
source("Source_OCEvolution.R")#Libraries and scripts
#load trees and data (this takes a while)
ConsensusTree <- LoadPrettyTree("HacketTrees.nex") #note that the variable Path is also generated here
ConsensusTree$tip.label[which(ConsensusTree$tip.label == "Philesturnus_carunculatus")] <- "Philesturnus_rufusater"
birbs <- Loadbirds("MainDataset.csv", ConsensusTree)
OCVariants <- LoadOtherOC("StabilityDataDiTriCont.csv", birbs)
remove <- which(is.na(OCVariants$tri))
VariantTree <- drop.tip(ConsensusTree, remove)
#Set variables and filestructure to output data
OpenClose <- birbs$O.C
OpenClose <- factor(OpenClose, labels = c("closed","open"))
names(OpenClose) <- birbs$BirdtreeFormat
OpenCloseTri <- OCVariants$tri[-remove]
OpenCloseTri <- factor(OpenCloseTri, labels = c("closed","delayed-closed","open"))
names(OpenCloseTri) <- OCVariants$BirdtreeFormat[-remove]
#Loop variables for main analysis:
call <- names(birbs)[1:(length(birbs))]
call <- call[!call %in% c("O.C", "BirdtreeFormat", "Family", "FormerFamily")]#remove non-independant vars
mod <- rep("log", length(call))
mod[c(1,19)] <- "linear"
FullRate <- RateMatx(ace(x=OpenClose, phy=ConsensusTree, type="discrete", model="ER"))
FullRateTri <- RateMatx(ace(x=OpenCloseTri, phy=VariantTree, type="discrete", model="ER"))
#Loop variables for the Jacknife; based on significnace from the main analysis
mod2 <- mod[c(2, 5, 8, 11, 14, 18)]
call2 <- call[c(2, 5, 8, 11, 14, 18)]
#Create folder Structure
MakeFolderStructure(dir)
#Parsimony Analyis
#Get the number of transitions. This takes while!
ParseTrees <- make.simmap(ConsensusTree, OpenClose, "ER", 10000)
OCRoots <- table(unlist(sapply(1:10000, function(x) names(ParseTrees[[x]]$maps[[1]][1]))))
save <- countSimmap(ParseTrees)
colMeans(save$Tr)
min(save$Tr[,1])
####FIGURES and TABLE DATA:
#Figure 1 V2: make a histogram and boxplot
pdf("Figure 1.pdf")
par(mfrow=c(2,2), mar=c(3,3,1,1), mgp=c(1.5,.5,0))
QuickScatterBox(vari=cbind(birbs$Syllable.rep.final,birbs$Song.rep.final),
OC=OpenClose,title=c("Syllable","Song"), DOIT=FALSE)
QuickScatterBox(vari=cbind(birbs$Syllable.rep.final[-remove]),
OC=OpenCloseTri,
labels=c("Song-Stable", "longer Learning", "Song-Plastic"),
title=c("Syllable"), DOIT=FALSE)
plot(OCVariants$cont[-remove],birbs$Syllable.rep.final[-remove], col=rgb(1,0,1),
xlab="Years Spent Learning", ylab="Syllable Repertoire",
log='y', font.lab=2)
linmodel <- lm(x~y, list(x=OCVariants$cont[-remove], y=log(birbs$Syllable.rep.final[-remove])))
summary(linmodel)
abline(linmodel,
lwd=2)
dev.off()
#Figure 2 V2:
pdf("Figure 2.pdf")
par(mfrow=c(1,2), mar=c(.1,.1,.1,.1))
DataExtraction(OpenClose, birbs, ConsensusTree, call[2], mod=mod[2], fullrate=FullRate,
BROWNIE = FALSE, ANOVA = FALSE,DP=TRUE,Flip="rightwards")
DataExtraction(OpenCloseTri, birbs[-remove,], VariantTree, call[2], mod=mod[2], fullrate=FullRate,
BROWNIE = FALSE, ANOVA = FALSE,DP=TRUE,Flip="leftwards")
dev.off()
#old fig 1
#QuickScatterBox(vari=cbind(birbs$Syllable.rep.final,birbs$Song.rep.final),
# OC=OpenClose,title=c("Syllable","Song"))
#Figure 2AB
#creates RainbowPlots, runs phylANOVA, outputs text, runs Brownielite, plots it
pdf("DoublePlot.pdf")
par(mfrow=c(1,2), mar=c(.1,.1,.1,.1))
DataExtraction(OpenClose, birbs, ConsensusTree, call[2], mod=mod[2], fullrate=FullRate,
BROWNIE = FALSE, ANOVA = FALSE,DP=TRUE,Flip="rightwards")
DataExtraction(OpenClose, birbs, ConsensusTree, call[8], mod=mod[8], fullrate=FullRate,
BROWNIE = FALSE, ANOVA = FALSE,DP=TRUE,Flip="leftwards")
dev.off()
#Table 1 and 2 data
setwd(file.path(dir, "DataWarehouse"))
ANOVAData <- as.list(1:(length(call)-2))
for(i in 18:2){
#get data and rainbow plots
DataExtraction(OpenClose, birbs, ConsensusTree, call[i], mod=mod[i], fullrate=FullRate)
ANOVAData[[i-1]] <- ANOVARun #created by DataExtraction()
#plotting brownie data
dataset <- read.csv(paste0(call[i],".csv"))
datasetFULL <- read.csv(paste0(call[i],"FULL.csv"))
pdf(paste0(call[i], ".Brownie.pdf"))
par(mfrow=c(2,2), mgp=c(1.5,.5,0), mar=c(3,3,2,1))
sink(file = "Brownie.txt", append = TRUE, split = FALSE)
BrowniePlotRates(dataset, paste0(call[i]), Group=c("Stable", "Plastic"))
BrowniePlotRates(datasetFULL, paste0(call[i],"FULL"), Group=c("Stable", "Plastic"))
sink(file=NULL)
dev.off()
}
#get Anova results
ANOVAResults(ANOVAData)
#Figure 3, 7 in one brownie :)
pdf("BrownieFullRatesAlltypes.pdf")
par(mfrow=c(3,3), mgp=c(1.5,.5,0), mar=c(3,3,2,1))
for(i in c(2,8,5,11,14,17,18)){#FUll rates
datasetFULL <- read.csv(paste0(call[i],"FULL.csv"))
title <- unlist(strsplit(call[i],"[.]"))
title <- paste(toupper(substring(title, 1,1)), substring(title, 2),
sep="", collapse=" ")
title <- gsub("Final", "", title)
NAind <- which(is.na(datasetFULL$ARDRate1))
if(length(NAind)>0){
MAX <- datasetFULL$ARDRate1[-NAind]
}else{ MAX <- max(datasetFULL$ARDRate1)}
BrowniePlotRates(datasetFULL, title, Groups=c("Stable", "Plastic"),
Xlim=c(0, max(.2, MAX)))
}
dev.off()
#tristates
setwd(file.path(dir, "DataWarehouse/Tri"))
QuickScatterBox(vari=cbind(birbs$Syllable.rep.final[-remove], birbs$Song.rep.final[-remove]),
OC=OpenCloseTri,
labels=c("Song-Stable", "longer Learning", "Song-Plastic"),
title=c("Syllable","Song"))
pdf("DoublePlotTri.pdf")
par(mfrow=c(1,2), mar=c(.1,.1,.1,.1))
DataExtraction(OpenCloseTri, birbs[-remove,], VariantTree, call[2], mod=mod[2], fullrate=FullRateTri,
BROWNIE = FALSE, ANOVA = TRUE,DP=TRUE,Flip="rightwards")
DataExtraction(OpenCloseTri, birbs[-remove,], VariantTree, call[8], mod=mod[8], fullrate=FullRateTri,
BROWNIE = FALSE, ANOVA = TRUE,DP=TRUE,Flip="leftwards")
dev.off()
ANOVAData <- as.list(1:(length(call)-2))
for(i in 18:2){
#get data and rainbow plots
DataExtraction(OpenCloseTri, birbs[-remove,], VariantTree, call[i], mod=mod[i], fullrate=FullRateTri)
ANOVAData[[i-1]] <- ANOVARun #created by DataExtraction()
#plotting brownie data
dataset <- read.csv(paste0(call[i],".csv"))
datasetFULL <- read.csv(paste0(call[i],"FULL.csv"))
pdf(paste0(call[i], "Tri.Brownie.pdf"))
par(mfrow=c(2,2), mgp=c(1.5,.5,0), mar=c(3,3,2,1))
sink(file = "TriBrownie.txt", append = TRUE, split = FALSE)
BrowniePlotRates(dataset, paste0(call[i], "Tri"),
col = c('blue', 'purple', 'red'),
Group=c("Stable", "Longer-Learning", "Plastic"))
BrowniePlotRates(datasetFULL, paste0(call[i],"FULL-Tri"),
col = c('blue', 'purple', 'red'),
Group=c("Stable", "Longer-Learning", "Plastic"))
sink(file=NULL)
dev.off()
}
#get Anova results
ANOVAResults(ANOVAData)
#Figure 3, 7 in one brownie :)
pdf("BrownieFullRatesAlltypes.pdf")
par(mfrow=c(3,3), mgp=c(1.5,.5,0), mar=c(3,3,2,1))
for(i in c(2,8,5,11,14,17,18)){#FUll rates
datasetFULL <- read.csv(paste0(call[i],"FULL.csv"))
title <- unlist(strsplit(call[i],"[.]"))
title <- paste(toupper(substring(title, 1,1)), substring(title, 2),
sep="", collapse=" ")
title <- gsub("Final", "", title)
NAind <- which(is.na(datasetFULL$ARDRate1))
if(length(NAind)>0){
MAX <- datasetFULL$ARDRate1[-NAind]
}else{ MAX <- max(c(datasetFULL$ARDRate1, datasetFULL$ARDRate0, datasetFULL$ARDRate2))}
BrowniePlotRates(datasetFULL, title,
Xlim=c(0, max(.2, MAX)),
col=c('blue', 'purple', 'red'),
Group=c("Stable", "Delayed", "Plastic"))
}
dev.off()
MakeAllNodePlots(birbs, call, OCVariants, ConsensusTree)
#compare 2 to 3 rates
setwd(file.path(dir, "DataWarehouse/Tri/Di"))
for(i in c(2,5,8)){
DataExtraction(OpenClose[-remove], birbs[-remove,], VariantTree, call[i], mod=mod[i], fullrate=FullRate,
RAIN = FALSE, ANOVA = FALSE)
}
setwd(file.path(dir, "DataWarehouse"))
sink(file = "BrownieTriDi.txt", append = TRUE, split = FALSE)
for(i in c(2,5,8)){
datasetDi <- read.csv(paste0("Tri/Di/",call[i],"FULL.csv"))
datasetTri <- read.csv(paste0("Tri/",call[i],"FULL.csv"))
Mean2 <- mean(datasetDi$ARDloglik)
Mean3 <- mean(datasetTri$ARDloglik)
pval <- round(pchisq(2*(Mean3 - Mean2),1,lower.tail=FALSE),digits=3)
ifelse(pval == 0,pval <- "<0.001", pval <- paste0("=",pval))
writeLines(call[i])
writeLines(paste0("TwoRate=", round(Mean2, digits = 4)))
writeLines(paste0("ThreeRates=", round(Mean3, digits = 4)))
writeLines(paste0("pVal", pval))
writeLines(paste("",sep="\n\n"))
writeLines(paste("",sep="\n\n"))
}
sink(NULL)
setwd(file.path(dir, "DataWarehouse/Tri/newDi"))
OpenCloseTriSwitch <- OpenCloseTri
OpenCloseTriSwitch[which(OpenCloseTriSwitch=="delayed-closed")] <- "open"
OpenCloseTriSwitch <- droplevels(OpenCloseTriSwitch)
DataExtraction(OpenCloseTriSwitch, birbs[-remove,], VariantTree, call[2], mod=mod[2], fullrate=FullRate,
RAIN = FALSE, ANOVA = FALSE)
sink(file = "BrownieTrinewDi.txt", append = TRUE, split = FALSE)
for(i in c(2,8)){
datasetDi <- read.csv(paste0("Tri/newDi/",call[i],"FULL.csv"))
datasetTri <- read.csv(paste0("Tri/",call[i],"FULL.csv"))
Mean2 <- mean(datasetDi$ARDloglik)
Mean3 <- mean(datasetTri$ARDloglik)
pval <- round(pchisq(2*(Mean3 - Mean2),1,lower.tail=FALSE),digits=3)
ifelse(pval == 0,pval <- "<0.001", pval <- paste0("=",pval))
writeLines(call[i])
writeLines(paste0("TwoRate=", round(Mean2, digits = 4)))
writeLines(paste0("ThreeRates=", round(Mean3, digits = 4)))
writeLines(paste0("pVal", pval))
writeLines(paste("",sep="\n\n"))
writeLines(paste("",sep="\n\n"))
}
sink(NULL)
setwd(file.path(dir, "DataWarehouse"))
#Figure 3:
pdf("Figure 3.pdf")
par(mfrow=c(3,3), mgp=c(1.5,.5,0), mar=c(3,3,2,1))
for(i in c(2,8,5)){#FUll rates
datasetFULL <- read.csv(paste0(call[i],"FULL.csv"))
title <- unlist(strsplit(call[i],"[.]"))
title <- paste(toupper(substring(title, 1,1)), substring(title, 2),
sep="", collapse=" ")
title <- gsub("Final", "", title)
NAind <- which(is.na(datasetFULL$ARDRate1))
if(length(NAind)>0){
MAX <- datasetFULL$ARDRate1[-NAind]
}else{ MAX <- max(datasetFULL$ARDRate1)}
BrowniePlotRates(datasetFULL, title, Groups=c("Stable", "Plastic"),
Xlim=c(0, max(.2, MAX)))
datasetFULL <- read.csv(paste0("Tri/",call[i],"FULL.csv"))
title <- unlist(strsplit(call[i],"[.]"))
title <- paste(toupper(substring(title, 1,1)), substring(title, 2),
sep="", collapse=" ")
title <- gsub("Final", "", title)
NAind <- which(is.na(datasetFULL$ARDRate1))
if(length(NAind)>0){
MAX <- datasetFULL$ARDRate1[-NAind]
}else{ MAX <- max(c(datasetFULL$ARDRate1, datasetFULL$ARDRate0, datasetFULL$ARDRate2))}
BrowniePlotRates(datasetFULL, title,
Xlim=c(0, max(.2, MAX)),
col=c('blue', 'purple', 'red'),
Group=c("Stable", "Delayed", "Plastic"))
if(i != 5){
sink(file = "BrownienewDi.txt", append = TRUE, split = FALSE)
datasetFULL <- read.csv(paste0("Tri/newDi/", call[i],"FULL.csv"))
title <- unlist(strsplit(call[i],"[.]"))
title <- paste(toupper(substring(title, 1,1)), substring(title, 2),
sep="", collapse=" ")
title <- gsub("Final", "", title)
NAind <- which(is.na(datasetFULL$ARDRate1))
if(length(NAind)>0){
MAX <- datasetFULL$ARDRate1[-NAind]
}else{ MAX <- max(datasetFULL$ARDRate1)}
BrowniePlotRates(datasetFULL, title, Groups=c("Shorter", "Longer"),
Xlim=c(0, max(.2, MAX)))
sink(NULL)
}
}
dev.off()
pdf("Figure 4.pdf")
par(mfrow=c(2,2), mgp=c(1.5,.5,0), mar=c(3,3,2,1))
for(i in c(11,14,17,18)){#FUll rates
datasetFULL <- read.csv(paste0(call[i],"FULL.csv"))
title <- unlist(strsplit(call[i],"[.]"))
title <- paste(toupper(substring(title, 1,1)), substring(title, 2),
sep="", collapse=" ")
title <- gsub("Final", "", title)
NAind <- which(is.na(datasetFULL$ARDRate1))
if(length(NAind)>0){
MAX <- datasetFULL$ARDRate1[-NAind]
}else{ MAX <- max(datasetFULL$ARDRate1)}
BrowniePlotRates(datasetFULL, title, Groups=c("Stable", "Plastic"),
Xlim=c(0, max(.2, MAX)))
}
dev.off()
#3)Jackknife runs, generates trees we did not show and the table data
#Because Acrocephalidae is paraphyletic, we merged the Lucustellidae with Acrocephalidae
birbs$Family[which(birbs$BirdtreeFormat == "Locustella_naevia")] <- "Acrocephalidae"
#create a list of indicies belonging to each familiy, get indicied and figure out which have 4+ species
Families <- replicate(length(levels(birbs$Family)),NULL)
names(Families) <- levels(birbs$Family)
for(i in 1:length(Families)){Families[[i]]<-which(birbs$Family == names(Families)[i])}
Remove <- names(which(sapply(Families,length)>=4))
Type <- c("", "FULL")
#first loop (i) enters folder for song variable and sets up ANOVA data
#second loop (j) cuts out each of the families in turn and runs the dataextraction protocol
#Third loop (k) run loop 4 with full and partial rates
#Fourth loop (l) generate and plot brownie data
#loop 1
for(i in seq_along(call2)){
setwd(file.path(dir, "DataWarehouse/Jackknife",call2[i]))
ANOVAData <- as.list(1:length(Remove))
#loop 2: Jackknife using the ACE values from the tree created after species with NAs for
#a song variable were removed and those which were removed by the jacknife procedure itself
for(j in 1:length(Remove)){
ConseJack <- drop.tip(ConsensusTree, Families[[Remove[j]]], root.edge = 0)
Jacks <- birbs[-Families[[Remove[j]]],]
OC <- OpenClose[-Families[[Remove[j]]]]
DataExtraction(OC, Jacks, ConseJack, vari=call2[i], RAIN=FALSE,
mod=mod2[i], cotitle=paste0("No", Remove[j]), fullrate=FullRate)
ANOVAData[[j]] <- ANOVARun
}
#loop 3: runs full and partial rates
sink(file = "Brownie.txt", append = TRUE, split = FALSE)
for(k in 1:2){
pdf(paste0(call2[i], Type[k],"Jackknife.pdf"))
par(mfrow=c(3,3), mgp=c(1.5,.5,0), mar=c(3,3,2,1))
#Plot the original brownie run
setwd(file.path(dir, "DataWarehouse"))
dataset <- read.csv(paste0(call2[i],Type[k],".csv"))
BrowniePlotRates(dataset,paste0(call2[i], " All"))
setwd(file.path(dir, "DataWarehouse/Jackknife",call2[i]))
#loop 4 Brownie plot the jacknife Runs
for(l in 1:length(Remove)){
dataset <- read.csv(paste0(call2[i],"No",Remove[l],Type[k],".csv"))
JackLoss <- length(which(is.na(birbs[,call2[i]][Families[[Remove[l]]]])==FALSE))
BrowniePlotRates(dataset,paste0(call2[i]," No ",Remove[l], "(", JackLoss, ")"))
}
dev.off()
}
sink(file = NULL)
ANOVAPrinter(ANOVAData, CritAlpha[which(call == call2[i])-1])
}
#4) Jacknife with individual Mimids:
#based on the data from above, we decided to repeat the Brownie Analysis
#with each Mimid removed in turn for syll.song
setwd(file.path(dir, "DataWarehouse/MimidJackknife"))
speciesIndex <- Families$Mimidae
NAind <- which(is.na(birbs$Syll.song.final)==TRUE)
SylSong <- birbs$Syll.song.final
names(SylSong) <- birbs$BirdtreeFormat
for(i in 1:length(speciesIndex)){
ConseMime <- drop.tip(ConsensusTree, c(speciesIndex[i],NAind), root.edge = 0)
Mime <- birbs[-c(speciesIndex[i], NAind),]
OCmime <- OpenClose[-c(speciesIndex[i],NAind)]
sySo <- log(SylSong[-c(speciesIndex[i],NAind)])
BrownieDataGen(ConseMime, OCmime, sySo, nsim=1300,title=paste(birbs$BirdtreeFormat[speciesIndex[i]]), FullRate)
}
pdf("MimidJackkinfe.pdf")
par(mfrow=c(2,2))
sink(file = "Brownie.txt", append = TRUE, split = FALSE)
for(i in 1:length(speciesIndex)){
dataset <- read.csv(paste0(birbs$BirdtreeFormat[speciesIndex[i]], ".csv"))
BrowniePlotRates(dataset, paste("Syl.Song","No",birbs$BirdtreeFormat[speciesIndex[i]], sep=" "))
}
sink(file = NULL)
dev.off()
#6) Test with Lincolnii Closed
setwd(file.path(dir, "DataWarehouse/ClosedLink"))
OClink <- OpenClose
OClink[which(birbs$BirdtreeFormat == "Melospiza_lincolnii")] <- "closed"
#creates RainbowPlots, runs phylANOVA, outputs text, runs Brownielite, plots it
ANOVAData <- as.list(1:length(call))
for(i in rev(seq_along(call))){
DataExtraction(OClink, birbs, ConsensusTree, call[i], mod=mod[i], fullrate=FullRate, RAIN=FALSE)
ANOVAData[[i]] <- ANOVARun
#plotting data
sink(file = "Brownie.txt", append = TRUE, split = FALSE)
dataset <- read.csv(paste0(call[i],".csv"))
datasetFULL <- read.csv(paste0(call[i],"FULL.csv"))
pdf(paste0(call[i], ".Brownie.pdf"))
par(mfrow=c(2,2))
BrowniePlotRates(dataset, paste0(call[i]))
BrowniePlotRates(datasetFULL ,paste0(call[i],"FULL"))
dev.off()
sink(file=NULL)
}
ANOVAResults(ANOVAData)
setwd(file.path(dir, "DataWarehouse/"))
#Transition Plot Bones
pdf("TransitionBones.pdf", width=8.5, height=11)
plot(ConsensusTree, edge.width=2.5, cex=.7,
label.offset = 1)
tiplabels(pch=ifelse(OpenClose == "closed", 19, 1),
offset=.5)
dev.off()
|
#Read and observe data
accident <- read.csv("~/Documents/IDV/technology/final project/clean data/accident_data/accident.csv", header=TRUE)
vehicle <- read.csv("~/Documents/IDV/technology/final project/clean data/accident_data/vehicle.csv", header=TRUE)
accident <- data.frame(accident)
vehicle <- data.frame(vehicle)
#Keep only these columns
to.keep.a <- c("ST_CASE", "COUNTY", "CITY", "DAY", "MONTH",
"YEAR", "DAY_WEEK", "HOUR", "MINUTE", "LATITUDE",
"LONGITUD", "WEATHER", "FATALS", "DRUNK_DR")
to.keep.v <- c("ST_CASE", "BODY_TYP", "MOD_YEAR")
accident.short <- accident[to.keep.a]
vehicle.short <- vehicle[to.keep.v]
str(accident.short)
#Get rid of uknown or missing values
#Ref https://crashstats.nhtsa.dot.gov/Api/Public/ViewPublication/812315
accident.short <- subset(accident.short, !(COUNTY == 000 | COUNTY == 997 | COUNTY == 998 | COUNTY == 999))
accident.short <- subset(accident.short, !(CITY == 0000 | CITY == 9997 | CITY == 9898 | CITY == 9999))
accident.short <- subset(accident.short, !(DAY == "--"))
accident.short <- subset(accident.short, !(MONTH == "--"))
accident.short <- subset(accident.short, !(YEAR == "--" | YEAR == 99))
accident.short <- subset(accident.short, !(DAY_WEEK == "--"))
accident.short <- subset(accident.short, !(HOUR == "--" | HOUR == 99))
accident.short <- subset(accident.short, !(MINUTE == "--" | MINUTE == 99))
accident.short <- subset(accident.short, !(LATITUDE == 77.7777 | LATITUDE == 88.8888 | LATITUDE == 99.9999))
accident.short <- subset(accident.short, !(LONGITUD == 77.7777 | LONGITUD == 88.8888 | LONGITUD == 99.9999))
accident.short <- subset(accident.short, !(WEATHER == 98 | WEATHER == 99))
str(accident.short)
write.csv(accident.short, file = "fars.csv", row.names = F) | /fars_cleanup.R | no_license | jotasolano/fars_cleanup | R | false | false | 1,766 | r | #Read and observe data
accident <- read.csv("~/Documents/IDV/technology/final project/clean data/accident_data/accident.csv", header=TRUE)
vehicle <- read.csv("~/Documents/IDV/technology/final project/clean data/accident_data/vehicle.csv", header=TRUE)
accident <- data.frame(accident)
vehicle <- data.frame(vehicle)
#Keep only these columns
to.keep.a <- c("ST_CASE", "COUNTY", "CITY", "DAY", "MONTH",
"YEAR", "DAY_WEEK", "HOUR", "MINUTE", "LATITUDE",
"LONGITUD", "WEATHER", "FATALS", "DRUNK_DR")
to.keep.v <- c("ST_CASE", "BODY_TYP", "MOD_YEAR")
accident.short <- accident[to.keep.a]
vehicle.short <- vehicle[to.keep.v]
str(accident.short)
#Get rid of uknown or missing values
#Ref https://crashstats.nhtsa.dot.gov/Api/Public/ViewPublication/812315
accident.short <- subset(accident.short, !(COUNTY == 000 | COUNTY == 997 | COUNTY == 998 | COUNTY == 999))
accident.short <- subset(accident.short, !(CITY == 0000 | CITY == 9997 | CITY == 9898 | CITY == 9999))
accident.short <- subset(accident.short, !(DAY == "--"))
accident.short <- subset(accident.short, !(MONTH == "--"))
accident.short <- subset(accident.short, !(YEAR == "--" | YEAR == 99))
accident.short <- subset(accident.short, !(DAY_WEEK == "--"))
accident.short <- subset(accident.short, !(HOUR == "--" | HOUR == 99))
accident.short <- subset(accident.short, !(MINUTE == "--" | MINUTE == 99))
accident.short <- subset(accident.short, !(LATITUDE == 77.7777 | LATITUDE == 88.8888 | LATITUDE == 99.9999))
accident.short <- subset(accident.short, !(LONGITUD == 77.7777 | LONGITUD == 88.8888 | LONGITUD == 99.9999))
accident.short <- subset(accident.short, !(WEATHER == 98 | WEATHER == 99))
str(accident.short)
write.csv(accident.short, file = "fars.csv", row.names = F) |
#Grid Template sourced from https://github.com/Appsilon/shiny.semantic-hackathon-2020/tree/master/polluter-alert
grid_template <- grid_template(
default = list(
areas = rbind(
c("title", "map"),
c("summary", "map"),
c("user", "map")
),
cols_width = c("400px", "1fr"),
rows_height = c("50px", "auto", "200px")
),
mobile = list(
areas = rbind(
"title",
"map",
"user",
"summary"
),
rows_height = c("60px", "400px", "200px", "auto"),
cols_width = c("100%")
)
)
ui = semanticPage(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "style.css"),
),
grid(
grid_template,
title = div(div(style="display:inline-block; padding-left: 10px; color: white; padding-top: 5px", h1("Vessel Tracker")),
div(style="display:inline-block; padding-right: 10px; padding-top: 7px; float: right",
actionButton("show_info", label = "info"))),
summary = uiOutput("sidebar"),
user = user_inputsUI("user_inputs_1"),
map = leaflet::leafletOutput("map"),
)
)
| /ui.R | no_license | Jamesohare1/marineApp | R | false | false | 1,108 | r | #Grid Template sourced from https://github.com/Appsilon/shiny.semantic-hackathon-2020/tree/master/polluter-alert
grid_template <- grid_template(
default = list(
areas = rbind(
c("title", "map"),
c("summary", "map"),
c("user", "map")
),
cols_width = c("400px", "1fr"),
rows_height = c("50px", "auto", "200px")
),
mobile = list(
areas = rbind(
"title",
"map",
"user",
"summary"
),
rows_height = c("60px", "400px", "200px", "auto"),
cols_width = c("100%")
)
)
ui = semanticPage(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "style.css"),
),
grid(
grid_template,
title = div(div(style="display:inline-block; padding-left: 10px; color: white; padding-top: 5px", h1("Vessel Tracker")),
div(style="display:inline-block; padding-right: 10px; padding-top: 7px; float: right",
actionButton("show_info", label = "info"))),
summary = uiOutput("sidebar"),
user = user_inputsUI("user_inputs_1"),
map = leaflet::leafletOutput("map"),
)
)
|
#Use libraries
library(shiny)
library(plotly)
#Begin Shiny UI
shinyUI(
#Navbar Page
navbarPage(
#App Title
title = 'Iris Species Data Visualization',
#App theme
theme = "styles.css",
#Scatterplot Tabpanel
tabPanel('Scatterplot',
sidebarLayout(
#Sidebar
sidebarPanel( "Species Scatterplot",
#Select species type
selectInput("scatter", label = h3("Choose species"),
choices = list("All" = 'all_data',
"Setosa" = 'setosa_data',
"Versicolor" = 'versicolor_data',
"Virginica" = 'virginica_data'),
selected = "All"),
#Select scatterplot color
radioButtons("color", label = h3("Color"),
choices = list('Set1', 'Set2'),
selected = 'Set1')
),
#Main Panel
mainPanel(
#Plotly output for scatter plot
plotlyOutput("scatter_plot")
)
)
),
#Petal Data Tabpanel
tabPanel('Petal Data',
sidebarLayout(
#Sidebar
sidebarPanel( "Species Petal Lengths and Widths",
#Select species type
selectInput("petals", label = h3("Choose species"),
choices = list("Setosa" = 'setosa_data',
"Versicolor" = 'versicolor_data',
"Virginica" = 'virginica_data'),
selected = "Setosa")
),
#Main panel
mainPanel(
#Plotly output for petal box plots
plotlyOutput("petal_length"),
plotlyOutput("petal_width")
)
)
),
#Sepal Tabpanel
tabPanel('Sepal Data',
sidebarLayout(
#Sidebar
sidebarPanel( "Species Sepal Lengths and Widths",
#Choose Species Type
selectInput("sepals", label = h3("Choose species"),
choices = list("Setosa" = 'setosa_data',
"Versicolor" = 'versicolor_data',
"Virginica" = 'virginica_data'),
selected = "Setosa")
),
#Main panel
mainPanel(
#Plotly output for sepal box plots
plotlyOutput("sepal_length"),
plotlyOutput("sepal_width")
)
)
)
)) | /ui.R | no_license | rhazvita/Shiny-Web-App | R | false | false | 3,393 | r | #Use libraries
library(shiny)
library(plotly)
#Begin Shiny UI
shinyUI(
#Navbar Page
navbarPage(
#App Title
title = 'Iris Species Data Visualization',
#App theme
theme = "styles.css",
#Scatterplot Tabpanel
tabPanel('Scatterplot',
sidebarLayout(
#Sidebar
sidebarPanel( "Species Scatterplot",
#Select species type
selectInput("scatter", label = h3("Choose species"),
choices = list("All" = 'all_data',
"Setosa" = 'setosa_data',
"Versicolor" = 'versicolor_data',
"Virginica" = 'virginica_data'),
selected = "All"),
#Select scatterplot color
radioButtons("color", label = h3("Color"),
choices = list('Set1', 'Set2'),
selected = 'Set1')
),
#Main Panel
mainPanel(
#Plotly output for scatter plot
plotlyOutput("scatter_plot")
)
)
),
#Petal Data Tabpanel
tabPanel('Petal Data',
sidebarLayout(
#Sidebar
sidebarPanel( "Species Petal Lengths and Widths",
#Select species type
selectInput("petals", label = h3("Choose species"),
choices = list("Setosa" = 'setosa_data',
"Versicolor" = 'versicolor_data',
"Virginica" = 'virginica_data'),
selected = "Setosa")
),
#Main panel
mainPanel(
#Plotly output for petal box plots
plotlyOutput("petal_length"),
plotlyOutput("petal_width")
)
)
),
#Sepal Tabpanel
tabPanel('Sepal Data',
sidebarLayout(
#Sidebar
sidebarPanel( "Species Sepal Lengths and Widths",
#Choose Species Type
selectInput("sepals", label = h3("Choose species"),
choices = list("Setosa" = 'setosa_data',
"Versicolor" = 'versicolor_data',
"Virginica" = 'virginica_data'),
selected = "Setosa")
),
#Main panel
mainPanel(
#Plotly output for sepal box plots
plotlyOutput("sepal_length"),
plotlyOutput("sepal_width")
)
)
)
)) |
library(devtools)
library(ggmap)
library(xlsx)
# ํ์ผ ๋ถ๋ฌ์ค๊ธฐ
dep_data <- read.csv("../Data/collectData/N_20200421_์์ธ์ ํ๋๋ฐฑํ์ .csv")
dep_data
# ์ฃผ์ ์ถ์ถ
dep_code <- as.character(dep_data$์ง๋ฒ)
dep_code
# ๊ตฌ๊ธ APIํ์ฉ ์ขํ ์ถ๋ ฅ
googleAPIkey <- "AIzaSyDxB5P_GoIqF7KUzM4cRh9KUZbEYjbVfX4"
register_google(googleAPIkey)
dep_code <- geocode(dep_code)
# CSV๋ก ์ ์ฅ
write.csv(dep_data2, "../Data/preprocessingData/Y_department_hyundai.csv", row.names = FALSE)
| /Data_Preprocessing_R/Y_20200421_department_hyundai.R | no_license | h0n9670/ApartmentPrice | R | false | false | 496 | r | library(devtools)
library(ggmap)
library(xlsx)
# ํ์ผ ๋ถ๋ฌ์ค๊ธฐ
dep_data <- read.csv("../Data/collectData/N_20200421_์์ธ์ ํ๋๋ฐฑํ์ .csv")
dep_data
# ์ฃผ์ ์ถ์ถ
dep_code <- as.character(dep_data$์ง๋ฒ)
dep_code
# ๊ตฌ๊ธ APIํ์ฉ ์ขํ ์ถ๋ ฅ
googleAPIkey <- "AIzaSyDxB5P_GoIqF7KUzM4cRh9KUZbEYjbVfX4"
register_google(googleAPIkey)
dep_code <- geocode(dep_code)
# CSV๋ก ์ ์ฅ
write.csv(dep_data2, "../Data/preprocessingData/Y_department_hyundai.csv", row.names = FALSE)
|
nothern.teams <-c(86, 74, 69, 59, 59, 45, 39)
southern.teams <-c(67, 66, 65, 62, 61, 55, 45, 44, 43, 41, 28, 26, 23)
wilcox.test(nothern.teams, southern.teams)
## Question 4
setwd("C:/Users/qwert/Downloads")
# import the data
rut2000_data <- read.csv(file.choose())
# transform columns
rut2000_data$Date <- as.Date(rut2000_data$Date)
# We will use the closing price for analysis
rut2000_data$Close <- as.numeric(rut2000_data$Close)
# remove NAs
rut2000_data <- na.omit(rut2000_data)
# Calculating the log returns
prices <- rut2000_data$Close
prices
n <- length(prices)
log_returns <- log(prices[-1]/prices[-n])
log_returns
res <- log_returns - mean(log_returns)
library("fGarch")
# Fitting the GARCH(1, 1) model
GARCH_rut2000 <- garchFit(data = res, trace = F)
summary(GARCH_rut2000)
## Question 2
# save the table as table.txt file in a relative location
df <- read.delim(file.choose())
# Data Cleaning
df <- read.csv(file.choose())
# some data cleaning
df$X <- NULL
df$X.1 <- NULL
years <- as.character(1981:1990)
years
colnames(df) <- c("Months", years)
row.names(df) <- df$Months
df$Months <- NULL
model <- lm(df$"1990" ~ df$"1981"+df$"1982"+df$"1983"+df$"1983"+df$"1984"+df$"1985"+df$"1986"+df$"1987"+df$"1988"+df$"1989", data = df)
summary(model)
predict(model , newdata = df$"2000")
month<-rep(c("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"), 10)
year
year<-rep(c(1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990), c(12, 12, 12, 12, 12, 12, 12, 12, 12, 12))
# Durbin-Watson test
library("car")
durbinWatsonTest(model)
## Question 3
abs<-rep(c(4, 3, 2, 1, "unrated"), c(239, 554, 97, 18, 67))
abs<-factor(abs)
REF4<-rep(c(4, 3, 2, 1), c(94, 95, 47, 3))
REF3<-rep(c(4, 3, 2, 1), c(80, 296, 150, 28))
REF2<-rep(c(4, 3, 2, 1), c(4, 29, 54, 10))
REF1<-rep(c(4, 3, 2, 1), c(2, 1, 9, 6))
REFunrated<-rep(c(4, 3, 2, 1), c(3, 6, 37, 21))
R<-c(REF4, REF3, REF2, REF1, REFunrated)
y<-(R-1)/3
model <- glm(y ~ REF4 + REF3 + REF2 + REF1 + REFunrated family = "binomial")
summary(model)
| /financemetheds.R | no_license | TheAlchemistNerd/R-Programming | R | false | false | 2,086 | r | nothern.teams <-c(86, 74, 69, 59, 59, 45, 39)
southern.teams <-c(67, 66, 65, 62, 61, 55, 45, 44, 43, 41, 28, 26, 23)
wilcox.test(nothern.teams, southern.teams)
## Question 4
setwd("C:/Users/qwert/Downloads")
# import the data
rut2000_data <- read.csv(file.choose())
# transform columns
rut2000_data$Date <- as.Date(rut2000_data$Date)
# We will use the closing price for analysis
rut2000_data$Close <- as.numeric(rut2000_data$Close)
# remove NAs
rut2000_data <- na.omit(rut2000_data)
# Calculating the log returns
prices <- rut2000_data$Close
prices
n <- length(prices)
log_returns <- log(prices[-1]/prices[-n])
log_returns
res <- log_returns - mean(log_returns)
library("fGarch")
# Fitting the GARCH(1, 1) model
GARCH_rut2000 <- garchFit(data = res, trace = F)
summary(GARCH_rut2000)
## Question 2
# save the table as table.txt file in a relative location
df <- read.delim(file.choose())
# Data Cleaning
df <- read.csv(file.choose())
# some data cleaning
df$X <- NULL
df$X.1 <- NULL
years <- as.character(1981:1990)
years
colnames(df) <- c("Months", years)
row.names(df) <- df$Months
df$Months <- NULL
model <- lm(df$"1990" ~ df$"1981"+df$"1982"+df$"1983"+df$"1983"+df$"1984"+df$"1985"+df$"1986"+df$"1987"+df$"1988"+df$"1989", data = df)
summary(model)
predict(model , newdata = df$"2000")
month<-rep(c("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"), 10)
year
year<-rep(c(1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990), c(12, 12, 12, 12, 12, 12, 12, 12, 12, 12))
# Durbin-Watson test
library("car")
durbinWatsonTest(model)
## Question 3
abs<-rep(c(4, 3, 2, 1, "unrated"), c(239, 554, 97, 18, 67))
abs<-factor(abs)
REF4<-rep(c(4, 3, 2, 1), c(94, 95, 47, 3))
REF3<-rep(c(4, 3, 2, 1), c(80, 296, 150, 28))
REF2<-rep(c(4, 3, 2, 1), c(4, 29, 54, 10))
REF1<-rep(c(4, 3, 2, 1), c(2, 1, 9, 6))
REFunrated<-rep(c(4, 3, 2, 1), c(3, 6, 37, 21))
R<-c(REF4, REF3, REF2, REF1, REFunrated)
y<-(R-1)/3
model <- glm(y ~ REF4 + REF3 + REF2 + REF1 + REFunrated family = "binomial")
summary(model)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deriv.R
\name{derivicefiles}
\alias{derivicefiles}
\title{Load metadata and location of files of derived sea ice data products.}
\usage{
derivicefiles(product = "time_since_melt", ...)
}
\arguments{
\item{product}{which derived product}
\item{...}{reserved for future use, currently ignored}
}
\value{
data.frame of \code{file} and \code{date}
}
\description{
This function loads the latest cache of stored files for
ice products, currently only daily NSIDC southern hemisphere is available.
}
| /man/derivicefiles.Rd | no_license | AustralianAntarcticDivision/raadtools | R | false | true | 573 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deriv.R
\name{derivicefiles}
\alias{derivicefiles}
\title{Load metadata and location of files of derived sea ice data products.}
\usage{
derivicefiles(product = "time_since_melt", ...)
}
\arguments{
\item{product}{which derived product}
\item{...}{reserved for future use, currently ignored}
}
\value{
data.frame of \code{file} and \code{date}
}
\description{
This function loads the latest cache of stored files for
ice products, currently only daily NSIDC southern hemisphere is available.
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(
set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse
)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinverse()
if (!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
| /cachematrix.R | no_license | paolobroglio/ProgrammingAssignment2 | R | false | false | 785 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(
set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse
)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinverse()
if (!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
|
#' Generate the panel organization UI
#'
#' Generates the user interface to control the organization of the panels, specifically their sizes.
#'
#' @param active_panels A data.frame specifying the currently active panels, see the output of \code{\link{.setup_initial}}.
#'
#' @return
#' A HTML tag object containing the UI elements for panel sizing.
#'
#' @details
#' This function will create a series of UI elements for all active panels, specifying the width or height of the panels.
#' We use a select element for the width as this is very discrete, and we use a slider for the height.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_panel_organization
#' @seealso
#' \code{\link{iSEE}},
#' \code{\link{.panel_generation}},
#' \code{\link{.setup_initial}}
#'
#' @importFrom shiny tagList selectInput sliderInput
#' @importFrom shinydashboard box
.panel_organization <- function(active_panels) {
N <- nrow(active_panels)
collected <- vector("list", N)
counter <- 1L
for (i in seq_len(N)) {
mode <- active_panels$Type[i]
id <- active_panels$ID[i]
prefix <- paste0(mode, id, "_")
ctrl_panel <- box(
selectInput(paste0(prefix, .organizationWidth), label="Width",
choices=seq(width_limits[1], width_limits[2]), selected=active_panels$Width[i]),
sliderInput(paste0(prefix, .organizationHeight), label="Height",
min=height_limits[1], max=height_limits[2], value=active_panels$Height[i], step=10),
title=.decode_panel_name(mode, id), status="danger", width=NULL, solidHeader=TRUE
)
# Coercing to a different box status ('danger' is a placeholder, above).
collected[[i]] <- .coerce_box_status(ctrl_panel, mode)
}
do.call(tagList, collected)
}
#' Show and hide panels in the User Interface
#'
#' @param mode Panel mode. See \code{\link{panelCodes}}.
#' @param id Integer scalar specifying the index of a panel of the specified type, for the current plot.
#' @param active_panels A data.frame specifying the currently active panels, see the output of \code{\link{.setup_initial}}.
#' @param width Grid width of the new panel (must be between 1 and 12).
#' @param height Height of the new panel (in pixels).
#'
#' @return A data.frame specifying the new set of active panels.
#' @rdname INTERNAL_show_panel
#'
#' @author Kevin Rue-Albrecht
.showPanel <- function(mode, id, active_panels, width=4L, height=500L) {
active_panels <- rbind(active_panels, DataFrame(Type=mode, ID=id, Width=width, Height=height))
active_panels
}
#' @param pObjects An environment containing \code{table_links}, a graph produced by \code{\link{.spawn_table_links}};
#' and \code{memory}, a list of DataFrames containing parameters for each panel of each type.
#' @rdname INTERNAL_show_panel
#' @author Kevin Rue-Albrecht
.hidePanel <- function(mode, id, active_panels, pObjects) {
current_type <- active_panels$Type == mode
panel_name <- paste0(mode, id)
# Destroying links for point selection or tables.
.destroy_selection_panel(pObjects, panel_name)
if (mode %in% linked_table_types) {
.destroy_table(pObjects, panel_name)
} else if (mode %in% point_plot_types) {
.delete_table_links(mode, id, pObjects)
}
# Triggering re-rendering of the UI via change to active_panels.
index <- which(current_type & active_panels$ID == id)
active_panels <- active_panels[-index, ]
# Return the updated table of active panels
active_panels
}
#' Generate the panels in the app body
#'
#' Constructs the active panels in the main body of the app to show the plotting results and tables.
#'
#' @param active_panels A data.frame specifying the currently active panels, see the output of \code{\link{.setup_initial}}.
#' @param memory A list of DataFrames, where each DataFrame corresponds to a panel type and contains the initial settings for each individual panel of that type.
#' @param se A SingleCellExperiment object.
#'
#' @return
#' A HTML tag object containing the UI elements for the main body of the app.
#' This includes the output plots/tables as well as UI elements to control them.
#'
#' @details
#' This function generates the various panels in the main body of the app, taking into account their variable widths to dynamically assign them to particular rows.
#' It will try to assign as many panels to the same row until the row is filled, at which point it will start on the next row.
#'
#' Each panel contains the actual endpoint element (i.e., the plot or table to display) as well as a number of control elements to set the parameters.
#' All control elements lie within \code{\link{collapseBox}} elements to avoid cluttering the interface.
#' The open/closed status of these boxes are retrieved from memory, and are generally closed by default.
#'
#' Construction of each panel is done by retrieving all of the memorized parameters and using them to set the initial values of various control elements.
#' This ensures that the plots are not reset during re-rendering.
#' The exception is that of the Shiny brush, which cannot be fully restored in the current version - instead, only the bounding box is shown.
#'
#' Note that control of the tables lies within \code{\link{iSEE}} itself.
#' Also, feature name selections will open up a \code{selectizeInput} where the values are filled on the server-side, rather than being sent to the client.
#' This avoids long start-up times during re-rendering.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_panel_generation
#' @seealso
#' \code{\link{iSEE}}
#'
#' @importFrom SummarizedExperiment colData rowData assayNames
#' @importFrom BiocGenerics rownames
#' @importFrom SingleCellExperiment reducedDimNames reducedDim
#' @importFrom shiny actionButton fluidRow selectInput plotOutput uiOutput
#' sliderInput tagList column radioButtons tags hr brushOpts
#' selectizeInput checkboxGroupInput textAreaInput
.panel_generation <- function(active_panels, memory, se) {
collected <- list()
counter <- 1L
cumulative.width <- 0L
cur.row <- list()
row.counter <- 1L
# Extracting useful fields from the SE object.
column_covariates <- colnames(colData(se))
row_covariates <- colnames(rowData(se))
all_assays <- .get_internal_info(se, "all_assays")
red_dim_names <- .get_internal_info(se, "red_dim_names")
sample_names <- .get_internal_info(se, "sample_names")
custom_data_fun <- .get_internal_info(se, "custom_data_fun")
custom_data_funnames <- c(.noSelection, names(custom_data_fun))
custom_stat_fun <- .get_internal_info(se, "custom_stat_fun")
custom_stat_funnames <- c(.noSelection, names(custom_stat_fun))
# Defining all transmitting tables and plots for linking.
link_sources <- .define_link_sources(active_panels)
tab_by_row <- c(.noSelection, link_sources$row_tab)
tab_by_col <- c(.noSelection, link_sources$col_tab)
row_selectable <- c(.noSelection, link_sources$row_plot)
col_selectable <- c(.noSelection, link_sources$col_plot)
heatmap_sources <- c(.noSelection, link_sources$row_plot, link_sources$row_tab)
for (i in seq_len(nrow(active_panels))) {
mode <- active_panels$Type[i]
id <- active_panels$ID[i]
panel_name <- paste0(mode, id)
panel_width <- active_panels$Width[i]
param_choices <- memory[[mode]][id,]
.input_FUN <- function(field) { paste0(panel_name, "_", field) }
# Checking what to do with plot-specific parameters (e.g., brushing, clicking, plot height).
if (! mode %in% c(linked_table_types, "customStatTable")) {
brush.opts <- brushOpts(.input_FUN(.brushField), resetOnNew=FALSE,
direction=ifelse(mode=="heatMapPlot", "y", "xy"),
fill=brush_fill_color[mode], stroke=brush_stroke_color[mode],
opacity=.brushFillOpacity)
dblclick <- .input_FUN(.zoomClick)
clickopt <- .input_FUN(.lassoClick)
panel_height <- paste0(active_panels$Height[i], "px")
}
# Creating the plot fields.
if (mode == "redDimPlot") {
obj <- plotOutput(panel_name, brush=brush.opts, dblclick=dblclick, click=clickopt, height=panel_height)
cur_reddim <- param_choices[[.redDimType]]
max_dim <- ncol(reducedDim(se, cur_reddim))
choices <- seq_len(max_dim)
names(choices) <- choices
plot.param <- list(
selectInput(.input_FUN(.redDimType), label="Type",
choices=red_dim_names, selected=cur_reddim),
selectInput(.input_FUN(.redDimXAxis), label="Dimension 1",
choices=choices, selected=param_choices[[.redDimXAxis]]),
selectInput(.input_FUN(.redDimYAxis), label="Dimension 2",
choices=choices, selected=param_choices[[.redDimYAxis]])
)
} else if (mode == "colDataPlot") {
obj <- plotOutput(panel_name, brush=brush.opts, dblclick=dblclick, click=clickopt, height=panel_height)
plot.param <- list(
selectInput(.input_FUN(.colDataYAxis),
label="Column of interest (Y-axis):",
choices=column_covariates, selected=param_choices[[.colDataYAxis]]),
radioButtons(.input_FUN(.colDataXAxis), label="X-axis:", inline=TRUE,
choices=c(.colDataXAxisNothingTitle, .colDataXAxisColDataTitle),
selected=param_choices[[.colDataXAxis]]),
.conditional_on_radio(.input_FUN(.colDataXAxis),
.colDataXAxisColDataTitle,
selectInput(.input_FUN(.colDataXAxisColData),
label="Column of interest (X-axis):",
choices=column_covariates, selected=param_choices[[.colDataXAxisColData]]))
)
} else if (mode == "featAssayPlot") {
obj <- plotOutput(panel_name, brush=brush.opts, dblclick=dblclick, click=clickopt, height=panel_height)
xaxis_choices <- c(.featAssayXAxisNothingTitle)
if (length(column_covariates)) { # As it is possible for thsi plot to be _feasible_ but for no column data to exist.
xaxis_choices <- c(xaxis_choices, .featAssayXAxisColDataTitle)
}
xaxis_choices <- c(xaxis_choices, .featAssayXAxisFeatNameTitle)
plot.param <- list(
selectizeInput(.input_FUN(.featAssayYAxisFeatName),
label="Y-axis feature:", choices=NULL, selected=NULL, multiple=FALSE),
selectInput(.input_FUN(.featAssayYAxisRowTable), label=NULL, choices=tab_by_row,
selected=.choose_link(param_choices[[.featAssayYAxisRowTable]], tab_by_row, force_default=TRUE)),
selectInput(.input_FUN(.featAssayAssay), label=NULL,
choices=all_assays, selected=param_choices[[.featAssayAssay]]),
radioButtons(.input_FUN(.featAssayXAxis), label="X-axis:", inline=TRUE,
choices=xaxis_choices, selected=param_choices[[.featAssayXAxis]]),
.conditional_on_radio(.input_FUN(.featAssayXAxis),
.featAssayXAxisColDataTitle,
selectInput(.input_FUN(.featAssayXAxisColData),
label="X-axis column data:",
choices=column_covariates, selected=param_choices[[.featAssayXAxisColData]])),
.conditional_on_radio(.input_FUN(.featAssayXAxis),
.featAssayXAxisFeatNameTitle,
selectizeInput(.input_FUN(.featAssayXAxisFeatName),
label="X-axis feature:", choices=NULL, selected=NULL, multiple=FALSE),
selectInput(.input_FUN(.featAssayXAxisRowTable), label=NULL,
choices=tab_by_row, selected=param_choices[[.featAssayXAxisRowTable]]))
)
} else if (mode == "rowStatTable") {
obj <- tagList(dataTableOutput(panel_name), uiOutput(.input_FUN("annotation")))
} else if (mode == "colStatTable") {
obj <- dataTableOutput(panel_name)
} else if (mode == "customStatTable" || mode == "customDataPlot") {
if (mode == "customDataPlot") {
obj <- plotOutput(panel_name, height=panel_height)
fun_choices <- custom_data_funnames
} else {
obj <- dataTableOutput(panel_name)
fun_choices <- custom_stat_funnames
}
argsUpToDate <- param_choices[[.customArgs]] == param_choices[[.customVisibleArgs]]
if (is.na(argsUpToDate) || argsUpToDate) {
button_label <- .buttonUpToDateLabel
} else {
button_label <- .buttonUpdateLabel
}
plot.param <- list(
selectInput(
.input_FUN(.customFun), label="Custom function:",
choices=fun_choices, selected=param_choices[[.customFun]]),
textAreaInput(
.input_FUN(.customVisibleArgs), label="Custom arguments:", rows=5,
value=param_choices[[.customVisibleArgs]]),
actionButton(.input_FUN(.customSubmit), button_label)
)
} else if (mode == "rowDataPlot") {
obj <- plotOutput(panel_name, brush=brush.opts, dblclick=dblclick, click=clickopt, height=panel_height)
plot.param <- list(
selectInput(.input_FUN(.rowDataYAxis),
label="Column of interest (Y-axis):",
choices=row_covariates, selected=param_choices[[.rowDataYAxis]]),
radioButtons(.input_FUN(.rowDataXAxis), label="X-axis:", inline=TRUE,
choices=c(.rowDataXAxisNothingTitle, .rowDataXAxisRowDataTitle),
selected=param_choices[[.rowDataXAxis]]),
.conditional_on_radio(.input_FUN(.rowDataXAxis),
.rowDataXAxisRowDataTitle,
selectInput(.input_FUN(.rowDataXAxisRowData),
label="Column of interest (X-axis):",
choices=row_covariates, selected=param_choices[[.rowDataXAxisRowData]]))
)
} else if (mode == "sampAssayPlot") {
obj <- plotOutput(panel_name, brush=brush.opts, dblclick=dblclick, click=clickopt, height=panel_height)
xaxis_choices <- c(.sampAssayXAxisNothingTitle)
if (length(row_covariates)) { # As it is possible for this plot to be _feasible_ but for no row data to exist.
xaxis_choices <- c(xaxis_choices, .sampAssayXAxisRowDataTitle)
}
xaxis_choices <- c(xaxis_choices, .sampAssayXAxisSampNameTitle)
plot.param <- list(
selectInput(
.input_FUN(.sampAssayYAxisSampName),
label="Sample of interest (Y-axis):",
choices=sample_names, selected=param_choices[[.sampAssayYAxisSampName]]),
selectInput(
.input_FUN(.sampAssayYAxisColTable), label=NULL, choices=tab_by_col,
selected=.choose_link(param_choices[[.sampAssayYAxisColTable]], tab_by_col, force_default=TRUE)),
selectInput(
.input_FUN(.sampAssayAssay), label=NULL,
choices=all_assays, selected=param_choices[[.sampAssayAssay]]),
radioButtons(
.input_FUN(.sampAssayXAxis), label="X-axis:", inline=TRUE,
choices=xaxis_choices, selected=param_choices[[.sampAssayXAxis]]),
.conditional_on_radio(
.input_FUN(.sampAssayXAxis),
.sampAssayXAxisRowDataTitle,
selectInput(
.input_FUN(.sampAssayXAxisRowData),
label="Row data of interest (X-axis):",
choices=row_covariates, selected=param_choices[[.sampAssayXAxisRowData]])),
.conditional_on_radio(
.input_FUN(.sampAssayXAxis),
.sampAssayXAxisSampNameTitle,
selectInput(
.input_FUN(.sampAssayXAxisSampName),
label="Sample of interest (X-axis):",
choices=sample_names, selected=param_choices[[.sampAssayXAxisSampName]]),
selectInput(.input_FUN(.sampAssayXAxisColTable), label=NULL,
choices=tab_by_col, selected=param_choices[[.sampAssayXAxisColTable]]))
)
} else if (mode == "heatMapPlot") {
obj <- plotOutput(panel_name, brush=brush.opts, dblclick=dblclick, height=panel_height)
plot.param <- list(
collapseBox(
id=.input_FUN(.heatMapFeatNameBoxOpen),
title="Feature parameters",
open=param_choices[[.heatMapFeatNameBoxOpen]],
selectInput(
.input_FUN(.heatMapImportSource), label="Import from", choices=heatmap_sources,
selected=.choose_link(param_choices[[.heatMapImportSource]], heatmap_sources, force_default=TRUE)),
actionButton(.input_FUN(.heatMapImportFeatures), "Import features"),
actionButton(.input_FUN(.heatMapCluster), "Cluster features"),
actionButton(.input_FUN(.heatMapClearFeatures), "Clear features"),
selectizeInput(
.input_FUN(.heatMapFeatName),
label="Features:",
choices=NULL, selected=NULL, multiple=TRUE,
options=list(plugins=list('remove_button', 'drag_drop'))),
selectInput(
.input_FUN(.heatMapAssay), label=NULL,
choices=all_assays, selected=param_choices[[.heatMapAssay]]),
hr(),
checkboxGroupInput(
.input_FUN(.heatMapCenterScale), label="Expression values are:",
selected=param_choices[[.heatMapCenterScale]][[1]],
choices=c(.heatMapCenterTitle, .heatMapScaleTitle), inline=TRUE),
numericInput(
.input_FUN(.heatMapLower), label="Lower bound:",
value=param_choices[[.heatMapLower]]),
numericInput(
.input_FUN(.heatMapUpper), label="Upper bound:",
value=param_choices[[.heatMapUpper]]),
.conditional_on_check_group(
.input_FUN(.heatMapCenterScale), .heatMapCenterTitle,
selectInput(
.input_FUN(.heatMapCenteredColors), label="Color scale:",
choices=c("purple-black-yellow", "blue-white-orange"),
selected=param_choices[[.heatMapCenteredColors]]))
),
collapseBox(
id=.input_FUN(.heatMapColDataBoxOpen),
title="Column data parameters",
open=param_choices[[.heatMapColDataBoxOpen]],
selectizeInput(
.input_FUN(.heatMapColData),
label="Column data:",
choices=column_covariates,
multiple=TRUE,
selected=param_choices[[.heatMapColData]][[1]],
options=list(plugins=list('remove_button', 'drag_drop'))),
plotOutput(.input_FUN(.heatMapLegend))
)
)
} else {
stop(sprintf("'%s' is not a recognized panel mode", mode))
}
# Adding graphical parameters if we're plotting.
if (mode %in% linked_table_types) {
if (mode %in% "rowStatTable") {
source_type <- "row"
selectable <- row_selectable
} else {
source_type <- "column"
selectable <- col_selectable
}
param <- list(hr(),
tags$div(class="panel-group", role="tablist",
.create_selection_param_box_define_box(mode, id, param_choices,
.create_selection_param_box_define_choices(mode, id, param_choices, .selectByPlot, selectable, source_type)
)
)
)
} else if (mode=="heatMapPlot") {
param <- list(do.call(tags$div, c(list(class="panel-group", role="tablist"),
plot.param,
.create_selection_param_box(mode, id, param_choices, col_selectable, "column")
)))
} else {
# Options for fundamental plot parameters.
data_box <- do.call(collapseBox, c(list(id=.input_FUN(.dataParamBoxOpen),
title="Data parameters", open=param_choices[[.dataParamBoxOpen]]), plot.param))
if (mode %in% custom_panel_types) {
param <- list(
tags$div(class="panel-group", role="tablist",
data_box,
.create_selection_param_box_define_box(
mode, id, param_choices,
.create_selection_param_box_define_choices(mode, id, param_choices, .customRowSource, row_selectable, "row"),
.create_selection_param_box_define_choices(mode, id, param_choices, .customColSource, col_selectable, "column")
)
)
)
} else {
if (mode %in% row_point_plot_types) {
select_choices <- row_selectable
create_FUN <- .create_visual_box_for_row_plots
source_type <- "row"
} else {
select_choices <- col_selectable
create_FUN <- .create_visual_box_for_column_plots
source_type <- "column"
}
param <- list(
tags$div(class="panel-group", role="tablist",
data_box,
create_FUN(mode, id, param_choices, tab_by_row, tab_by_col, se), # Options for visual parameters.
.create_selection_param_box(mode, id, param_choices, select_choices, source_type) # Options for point selection parameters.
)
)
}
}
# Deciding whether to continue on the current row, or start a new row.
extra <- cumulative.width + panel_width
if (extra > 12L) {
collected[[counter]] <- do.call(fluidRow, cur.row)
counter <- counter + 1L
collected[[counter]] <- hr()
counter <- counter + 1L
cur.row <- list()
row.counter <- 1L
cumulative.width <- 0L
}
# Aggregating together everything into a box, and then into a column.
cur_box <- do.call(box, c(
list(obj),
param,
list(uiOutput(.input_FUN(.panelGeneralInfo)), uiOutput(.input_FUN(.panelLinkInfo))),
list(title=.decode_panel_name(mode, id), solidHeader=TRUE, width=NULL, status="danger")))
cur_box <- .coerce_box_status(cur_box, mode)
cur.row[[row.counter]] <- column(width=panel_width, cur_box, style='padding:3px;')
row.counter <- row.counter + 1L
cumulative.width <- cumulative.width + panel_width
}
# Cleaning up the leftovers.
collected[[counter]] <- do.call(fluidRow, cur.row)
counter <- counter + 1L
collected[[counter]] <- hr()
# Convert the list to a tagList - this is necessary for the list of items to display properly.
do.call(tagList, collected)
}
#' Define link sources
#'
#' Define all possible sources of links between active panels, i.e., feature selections from row statistics tables or point selections from plots.
#'
#' @param active_panels A data.frame specifying the currently active panels, see the output of \code{\link{.setup_initial}}.
#'
#' @return
#' A list containing:
#' \describe{
#' \item{\code{tab}:}{A character vector of decoded names for all active row statistics tables.}
#' \item{\code{row}:}{A character vector of decoded names for all active row data plots.}
#' \item{\code{col}:}{A character vector of decoded names for all active sample-based plots, i.e., where each point is a sample.}
#' }
#'
#' @details
#' Decoded names are returned as the output values are intended to be displayed to the user.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_define_link_sources
#' @seealso
#' \code{\link{.sanitize_memory}},
#' \code{\link{.panel_generation}}
.define_link_sources <- function(active_panels) {
all_names <- .decode_panel_name(active_panels$Type, active_panels$ID)
list(
row_tab=all_names[active_panels$Type == "rowStatTable"],
col_tab=all_names[active_panels$Type == "colStatTable"],
row_plot=all_names[active_panels$Type %in% row_point_plot_types],
col_plot=all_names[active_panels$Type %in% col_point_plot_types]
)
}
#' Choose a linked panel
#'
#' Chooses a linked panel from those available, forcing a valid choice if required.
#'
#' @param chosen String specifying the proposed choice, usually a decoded panel name.
#' @param available Character vector containing the valid choices, usually decoded panel names.
#' @param force_default Logical scalar indicating whether a non-empty default should be returned if \code{chosen} is not valid.
#'
#' @return A string containing a valid choice, or an empty string.
#'
#' @details
#' If \code{chosen} is in \code{available}, it will be directly returned.
#' If not, and if \code{force_default=TRUE} and \code{available} is not empty, the first element of \code{available} is returned.
#' Otherwise, an empty string is returned.
#'
#' Setting \code{force_default=TRUE} is required for panels linking to row statistics tables, where an empty choice would result in an invalid plot.
#' However, a default choice is not necessary for point selection transmission, where no selection is perfectly valid.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_choose_link
#' @seealso
#' \code{\link{.panel_generation}}
.choose_link <- function(chosen, available, force_default=FALSE) {
if (!chosen %in% available) {
if (force_default && length(available)) {
return(available[1])
}
return("")
}
return(chosen)
}
#' Add a visual parameter box for column plots
#'
#' Create a visual parameter box for column-based plots, i.e., where each sample is a point.
#'
#' @param mode String specifying the encoded panel type of the current plot.
#' @param id Integer scalar specifying the index of a panel of the specified type, for the current plot.
#' @param param_choices A DataFrame with one row, containing the parameter choices for the current plot.
#' @param active_row_tab A character vector of decoded names for available row statistics tables.
#' @param active_col_tab A character vector of decoded names for available column statistics tables.
#' @param se A SingleCellExperiment object with precomputed UI information from \code{\link{.precompute_UI_info}}.
#'
#' @return
#' A HTML tag object containing a \code{\link{collapseBox}} with visual parameters for column-based plots.
#'
#' @details
#' Column-based plots can be coloured by nothing, by column metadata or by the expression of certain features.
#' This function creates a collapsible box that contains all of these options, initialized with the choices in \code{memory}.
#' The box will also contain options for font size, point size and opacity, and legend placement.
#'
#' Each option, once selected, yields a further subset of nested options.
#' For example, choosing to colour by column metadata will open up a \code{selectInput} to specify the metadata field to use.
#' Choosing to colour by feature name will open up a \code{selectizeInput}.
#' However, the values are filled on the server-side, rather than being sent to the client; this avoids long start times during re-rendering.
#'
#' Note that some options will be disabled depending on the nature of the input, namely:
#' \itemize{
#' \item If there are no column metadata fields, users will not be allowed to colour by column metadata, obviously.
#' \item If there are no features, users cannot colour by features.
#' \item If there are no categorical column metadata fields, users will not be allowed to view the faceting options.
#' }
#'
#' @author Aaron Lun
#' @rdname INTERNAL_create_visual_box_for_column_plots
#' @seealso
#' \code{\link{.panel_generation}},
#' \code{\link{.create_visual_box_for_row_plots}}
#'
#' @importFrom shiny radioButtons tagList selectInput selectizeInput
#' checkboxGroupInput
#' @importFrom colourpicker colourInput
.create_visual_box_for_column_plots <- function(mode, id, param_choices, active_row_tab, active_col_tab, se) {
covariates <- colnames(colData(se))
discrete_covariates <- .get_internal_info(se, "column_groupable")
numeric_covariates <- .get_internal_info(se, "column_numeric")
all_assays <- .get_internal_info(se, "all_assays")
colorby_field <- paste0(mode, id, "_", .colorByField)
shapeby_field <- paste0(mode, id, "_", .shapeByField)
sizeby_field <- paste0(mode, id, "_", .sizeByField)
pchoice_field <- paste0(mode, id, "_", .visualParamChoice)
collapseBox(
id=paste0(mode, id, "_", .visualParamBoxOpen),
title="Visual parameters",
open=param_choices[[.visualParamBoxOpen]],
checkboxGroupInput(
inputId=pchoice_field, label=NULL, inline=TRUE,
selected=param_choices[[.visualParamChoice]][[1]],
choices=.define_visual_options(discrete_covariates, numeric_covariates)),
.conditional_on_check_group(
pchoice_field, .visualParamChoiceColorTitle,
hr(),
radioButtons(
colorby_field, label="Color by:", inline=TRUE,
choices=.define_color_options_for_column_plots(se),
selected=param_choices[[.colorByField]]
),
.conditional_on_radio(
colorby_field, .colorByNothingTitle,
colourInput(paste0(mode, id, "_", .colorByDefaultColor), label=NULL,
value=param_choices[[.colorByDefaultColor]])
),
.conditional_on_radio(
colorby_field, .colorByColDataTitle,
selectInput(paste0(mode, id, "_", .colorByColData), label=NULL,
choices=covariates, selected=param_choices[[.colorByColData]])
),
.conditional_on_radio(
colorby_field, .colorByFeatNameTitle,
tagList(
selectizeInput(paste0(mode, id, "_", .colorByFeatName), label=NULL, choices=NULL, selected=NULL, multiple=FALSE),
selectInput(
paste0(mode, id, "_", .colorByFeatNameAssay), label=NULL,
choices=all_assays, selected=param_choices[[.colorByFeatNameAssay]])),
selectInput(
paste0(mode, id, "_", .colorByRowTable), label=NULL, choices=active_row_tab,
selected=.choose_link(param_choices[[.colorByRowTable]], active_row_tab, force_default=TRUE))
),
.conditional_on_radio(colorby_field, .colorBySampNameTitle,
tagList(
selectizeInput(paste0(mode, id, "_", .colorBySampName), label=NULL, selected=NULL, choices=NULL, multiple=FALSE),
selectInput(
paste0(mode, id, "_", .colorByColTable), label=NULL, choices=active_col_tab,
selected=.choose_link(param_choices[[.colorByColTable]], active_col_tab, force_default=TRUE)),
colourInput(
paste0(mode, id, "_", .colorBySampNameColor), label=NULL,
value=param_choices[[.colorBySampNameColor]]))
)
),
.conditional_on_check_group(pchoice_field, .visualParamChoiceShapeTitle,
hr(),
radioButtons(
shapeby_field, label="Shape by:", inline=TRUE,
choices=.define_shape_options_for_column_plots(se),
selected=param_choices[[.shapeByField]]
),
.conditional_on_radio(
shapeby_field, .shapeByColDataTitle,
selectInput(
paste0(mode, id, "_", .shapeByColData), label=NULL,
choices=discrete_covariates, selected=param_choices[[.shapeByColData]])
)
),
.conditional_on_check_group(
pchoice_field, .visualParamChoiceFacetTitle,
hr(), .add_facet_UI_elements_for_column_plots(mode, id, param_choices, discrete_covariates)),
.conditional_on_check_group(
pchoice_field, .visualParamChoicePointTitle,
hr(),
radioButtons(
sizeby_field, label="Size by:", inline=TRUE,
choices=.define_size_options_for_column_plots(se),
selected=param_choices[[.sizeByField]]
),
.conditional_on_radio(
sizeby_field, .sizeByNothingTitle,
numericInput(
paste0(mode, id, "_", .plotPointSize), label="Point size:",
min=0, value=param_choices[,.plotPointSize])
),
.conditional_on_radio(
sizeby_field, .sizeByColDataTitle,
selectInput(paste0(mode, id, "_", .sizeByColData), label=NULL,
choices=numeric_covariates, selected=param_choices[[.sizeByColData]])
),
.add_point_UI_elements(mode, id, param_choices)),
.conditional_on_check_group(
pchoice_field, .visualParamChoiceOtherTitle,
hr(),
checkboxInput(
inputId=paste0(mode, id, "_", .contourAddTitle),
label="Add contour (scatter only)",
value=FALSE),
.conditional_on_check_solo(
paste0(mode, id, "_", .contourAddTitle),
on_select=TRUE,
colourInput(
paste0(mode, id, "_", .contourColor), label=NULL,
value=param_choices[[.contourColor]])),
.add_other_UI_elements(mode, id, param_choices))
)
}
#' Define colouring options
#'
#' Define the available colouring options for row- or column-based plots,
#' where availability is defined on the presence of the appropriate data in a SingleCellExperiment object.
#'
#' @param se A SingleCellExperiment object.
#'
#' @details
#' Colouring by column data is not available if no column data exists in \code{se} - same for the row data.
#' Colouring by feature names is not available if there are no features in \code{se}.
#' For column plots, we have an additional requirement that there must also be assays in \code{se} to colour by features.
#'
#' @return A character vector of available colouring modes, i.e., nothing, by column/row data or by feature name.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_define_color_options
.define_color_options_for_column_plots <- function(se) {
color_choices <- .colorByNothingTitle
if (ncol(colData(se))) {
color_choices <- c(color_choices, .colorByColDataTitle)
}
if (nrow(se) && length(assayNames(se))) {
color_choices <- c(color_choices, .colorByFeatNameTitle)
}
if (ncol(se)) {
color_choices <- c(color_choices, .colorBySampNameTitle)
}
return(color_choices)
}
#' Define shaping options
#'
#' Define the available shaping options for row- or column-based plots,
#' where availability is defined on the presence of the appropriate data in a SingleCellExperiment object.
#'
#' @param se A SingleCellExperiment object.
#'
#' @details
#' Shaping by column data is not available if no column data exists in \code{se} - same for the row data.
#' For column plots, we have an additional requirement that there must also be assays in \code{se} to shape by features.
#'
#' @return A character vector of available shaping modes, i.e., nothing or by column/row data
#'
#' @author Kevin Rue-Albrecht
#' @rdname INTERNAL_define_shape_options
.define_shape_options_for_column_plots <- function(se) {
shape_choices <- .shapeByNothingTitle
col_groupable <- .get_internal_info(se, "column_groupable")
if (length(col_groupable)) {
shape_choices <- c(shape_choices, .shapeByColDataTitle)
}
return(shape_choices)
}
#' Define sizing options
#'
#' Define the available sizing options for row- or column-based plots,
#' where availability is defined on the presence of the appropriate data in a SingleCellExperiment object.
#'
#' @param se A SingleCellExperiment object.
#'
#' @details
#' Sizing by column data is not available if no column data exists in \code{se} - same for the row data.
#' For column plots, we have an additional requirement that there must also be assays in \code{se} to size by features.
#'
#' @return A character vector of available sizing modes, i.e., nothing or by column/row data
#'
#' @author Kevin Rue-Albrecht, Charlotte Soneson
#' @rdname INTERNAL_define_size_options
.define_size_options_for_column_plots <- function(se) {
size_choices <- .sizeByNothingTitle
col_numeric <- .get_internal_info(se, "column_numeric")
if (length(col_numeric)) {
size_choices <- c(size_choices, .sizeByColDataTitle)
}
return(size_choices)
}
#' Define visual parameter check options
#'
#' Define the available visual parameter check boxes that can be ticked.
#'
#' @param discrete_covariates A character vector of names of categorical covariates.
#' @param numeric_covariates A character vector of names of numeric covariates.
#'
#' @details
#' Currently, the only special case is when there are no categorical covariates, in which case the shaping and faceting check boxes will not be available.
#' The check boxes for showing the colouring, point aesthetics and other options are always available.
#'
#' @return A character vector of check boxes that can be clicked in the UI.
#'
#' @author Aaron Lun, Kevin Rue-Albrecht
#' @rdname INTERNAL_define_visual_options
.define_visual_options <- function(discrete_covariates, numeric_covariates) {
pchoices <- c(.visualParamChoiceColorTitle)
if (length(discrete_covariates)) {
pchoices <- c(pchoices, .visualParamChoiceShapeTitle)
}
# Insert the point choice _after_ the shape aesthetic, if present
pchoices <- c(pchoices, .visualParamChoicePointTitle)
if (length(discrete_covariates)) {
pchoices <- c(pchoices, .visualParamChoiceFacetTitle)
}
pchoices <- c(pchoices, .visualParamChoiceOtherTitle)
return(pchoices)
}
#' Visual parameter box for row plots
#'
#' Create a visual parameter box for row-based plots, i.e., where each feature is a point.
#'
#' @param mode String specifying the encoded panel type of the current plot.
#' @param id Integer scalar specifying the index of a panel of the specified type, for the current plot.
#' @param param_choices A DataFrame with one row, containing the parameter choices for the current plot.
#' @param active_row_tab A character vector of decoded names for available row statistics tables.
#' @param active_col_tab A character vector of decoded names for available row statistics tables.
#' @param se A SingleCellExperiment object with precomputed UI information from \code{\link{.precompute_UI_info}}.
#'
#' @return
#' A HTML tag object containing a \code{\link{collapseBox}} with visual parameters for row-based plots.
#'
#' @details
#' This is similar to \code{\link{.create_visual_box_for_column_plots}}, with some differences.
#' Row-based plots can be coloured by nothing, by row metadata or by the \emph{selection} of certain features.
#' That is, the single chosen feature will be highlighted on the plot; its expression values are ignored.
#' Options are provided to choose the colour with which the highlighting is performed.
#'
#' Note that some options will be disabled depending on the nature of the input, namely:
#' \itemize{
#' \item If there are no row metadata fields, users will not be allowed to colour by row metadata, obviously.
#' \item If there are no features, users cannot colour by features.
#' \item If there are no categorical column metadata fields, users will not be allowed to view the faceting options.
#' }
#'
#' @author Aaron Lun
#' @rdname INTERNAL_create_visual_box_for_row_plots
#' @seealso
#' \code{\link{.panel_generation}},
#' \code{\link{.create_visual_box_for_column_plots}}
#'
#' @importFrom shiny radioButtons tagList selectInput selectizeInput
#' checkboxGroupInput
#' @importFrom colourpicker colourInput
.create_visual_box_for_row_plots <- function(mode, id, param_choices, active_row_tab, active_col_tab, se) {
covariates <- colnames(rowData(se))
discrete_covariates <- .get_internal_info(se, "row_groupable")
numeric_covariates <- .get_internal_info(se, "row_numeric")
all_assays <- .get_internal_info(se, "all_assays")
colorby_field <- paste0(mode, id, "_", .colorByField)
shapeby_field <- paste0(mode, id, "_", .shapeByField)
sizeby_field <- paste0(mode, id, "_", .sizeByField)
pchoice_field <- paste0(mode, id, "_", .visualParamChoice)
collapseBox(
id=paste0(mode, id, "_", .visualParamBoxOpen),
title="Visual parameters",
open=param_choices[[.visualParamBoxOpen]],
checkboxGroupInput(
inputId=pchoice_field, label=NULL, inline=TRUE,
selected=param_choices[[.visualParamChoice]][[1]],
choices=.define_visual_options(discrete_covariates, numeric_covariates)),
.conditional_on_check_group(
pchoice_field, .visualParamChoiceColorTitle,
radioButtons(
colorby_field, label="Color by:", inline=TRUE,
choices=.define_color_options_for_row_plots(se),
selected=param_choices[[.colorByField]]
),
.conditional_on_radio(
colorby_field, .colorByNothingTitle,
colourInput(
paste0(mode, id, "_", .colorByDefaultColor), label=NULL,
value=param_choices[[.colorByDefaultColor]])
),
.conditional_on_radio(
colorby_field, .colorByRowDataTitle,
selectInput(
paste0(mode, id, "_", .colorByRowData), label=NULL,
choices=covariates, selected=param_choices[[.colorByRowData]])
),
.conditional_on_radio(colorby_field, .colorByFeatNameTitle,
tagList(
selectizeInput(paste0(mode, id, "_", .colorByFeatName), label=NULL, selected=NULL, choices=NULL, multiple=FALSE),
selectInput(
paste0(mode, id, "_", .colorByRowTable), label=NULL, choices=active_row_tab,
selected=.choose_link(param_choices[[.colorByRowTable]], active_row_tab, force_default=TRUE)),
colourInput(paste0(mode, id, "_", .colorByFeatNameColor), label=NULL,
value=param_choices[[.colorByFeatNameColor]]))
),
.conditional_on_radio(colorby_field, .colorBySampNameTitle,
tagList(
selectizeInput(paste0(mode, id, "_", .colorBySampName), label=NULL, choices=NULL, selected=NULL, multiple=FALSE),
selectInput(
paste0(mode, id, "_", .colorBySampNameAssay), label=NULL,
choices=all_assays, selected=param_choices[[.colorBySampNameAssay]])),
selectInput(
paste0(mode, id, "_", .colorByColTable), label=NULL, choices=active_col_tab,
selected=.choose_link(param_choices[[.colorByColTable]], active_col_tab, force_default=TRUE))
)
),
.conditional_on_check_group(
pchoice_field, .visualParamChoiceShapeTitle,
hr(),
radioButtons(
shapeby_field, label="Shape by:", inline=TRUE,
choices=.define_shape_options_for_row_plots(se),
selected=param_choices[[.shapeByField]]
),
.conditional_on_radio(
shapeby_field, .shapeByRowDataTitle,
selectInput(
paste0(mode, id, "_", .shapeByRowData), label=NULL,
choices=discrete_covariates, selected=param_choices[[.shapeByRowData]])
)
),
.conditional_on_check_group(
pchoice_field, .visualParamChoiceFacetTitle,
hr(), .add_facet_UI_elements_for_row_plots(mode, id, param_choices, discrete_covariates)),
.conditional_on_check_group(
pchoice_field, .visualParamChoicePointTitle,
hr(),
radioButtons(
sizeby_field, label="Size by:", inline=TRUE,
choices=.define_size_options_for_row_plots(se),
selected=param_choices[[.sizeByField]]
),
.conditional_on_radio(
sizeby_field, .sizeByNothingTitle,
numericInput(
paste0(mode, id, "_", .plotPointSize), label="Point size:",
min=0, value=param_choices[,.plotPointSize])
),
.conditional_on_radio(
sizeby_field, .sizeByRowDataTitle,
selectInput(paste0(mode, id, "_", .sizeByRowData), label=NULL,
choices=numeric_covariates, selected=param_choices[[.sizeByRowData]])
),
.add_point_UI_elements(mode, id, param_choices)),
.conditional_on_check_group(
pchoice_field, .visualParamChoicePointTitle,
hr(), .add_point_UI_elements(mode, id, param_choices)),
.conditional_on_check_group(
pchoice_field, .visualParamChoiceOtherTitle,
hr(), .add_other_UI_elements(mode, id, param_choices))
)
}
#' @rdname INTERNAL_define_color_options
.define_color_options_for_row_plots <- function(se) {
color_choices <- .colorByNothingTitle
if (ncol(rowData(se))) {
color_choices <- c(color_choices, .colorByRowDataTitle)
}
if (nrow(se)) {
color_choices <- c(color_choices, .colorByFeatNameTitle)
}
if (ncol(se) && length(assayNames(se))) {
color_choices <- c(color_choices, .colorBySampNameTitle)
}
return(color_choices)
}
#' @rdname INTERNAL_define_shape_options
.define_shape_options_for_row_plots <- function(se) {
shape_choices <- .shapeByNothingTitle
row_groupable <- .get_internal_info(se, "row_groupable")
if (length(row_groupable)) {
shape_choices <- c(shape_choices, .shapeByRowDataTitle)
}
return(shape_choices)
}
#' @rdname INTERNAL_define_size_options
.define_size_options_for_row_plots <- function(se) {
size_choices <- .sizeByNothingTitle
row_numeric <- .get_internal_info(se, "row_numeric")
if (length(row_numeric)) {
size_choices <- c(size_choices, .sizeByRowDataTitle)
}
return(size_choices)
}
#' Faceting visual parameters
#'
#' Create UI elements for selection of faceting visual parameters.
#'
#' @param mode String specifying the encoded panel type of the current plot.
#' @param id Integer scalar specifying the index of a panel of the specified type, for the current plot.
#' @param param_choices A DataFrame with one row, containing the parameter choices for the current plot.
#' @param covariates Character vector listing available covariates from the \code{colData} or \code{rowData} slot, respectively.
#'
#' @return
#' A HTML tag object containing faceting parameter inputs.
#'
#' @details
#' This creates UI elements to choose the row and column faceting covariates.
#'
#' @author Kevin Rue-Albrecht
#' @rdname INTERNAL_add_facet_UI_elements
#' @seealso
#' \code{\link{.panel_generation}},
#' \code{\link{.create_visual_box_for_column_plots}},
#' \code{\link{.create_visual_box_for_row_plots}}
#'
#' @importFrom shiny tagList selectInput
.add_facet_UI_elements_for_column_plots <- function(mode, id, param_choices, covariates) {
rowId <- paste0(mode, id, "_", .facetByRow)
columnId <- paste0(mode, id, "_", .facetByColumn)
tagList(
checkboxInput(
rowId, label="Facet by row",
value=param_choices[, .facetByRow]),
.conditional_on_check_solo(
rowId, on_select=TRUE,
selectInput(paste0(mode, id, "_", .facetRowsByColData), label=NULL,
choices=covariates, selected=param_choices[[.facetRowsByColData]])
),
checkboxInput(
columnId, label="Facet by column",
value=param_choices[, .facetByColumn]),
.conditional_on_check_solo(
columnId, on_select=TRUE,
selectInput(paste0(mode, id, "_", .facetColumnsByColData), label=NULL,
choices=covariates, selected=param_choices[[.facetColumnsByColData]])
)
)
}
#' @rdname INTERNAL_add_facet_UI_elements
.add_facet_UI_elements_for_row_plots <- function(mode, id, param_choices, covariates) {
rowId <- paste0(mode, id, "_", .facetByRow)
columnId <- paste0(mode, id, "_", .facetByColumn)
tagList(
checkboxInput(
rowId, label="Facet by row",
value=param_choices[, .facetByRow]),
.conditional_on_check_solo(
rowId, on_select=TRUE,
selectInput(
paste0(mode, id, "_", .facetRowsByRowData), label=NULL,
choices=covariates, selected=param_choices[[.facetRowsByRowData]])
),
checkboxInput(
columnId, label="Facet by column",
value=param_choices[, .facetByColumn]),
.conditional_on_check_solo(
columnId, on_select=TRUE,
selectInput(paste0(mode, id, "_", .facetColumnsByRowData), label=NULL,
choices=covariates, selected=param_choices[[.facetColumnsByRowData]])
)
)
}
#' General visual parameters
#'
#' Create UI elements for selection of general visual parameters.
#'
#' @param mode String specifying the encoded panel type of the current plot.
#' @param id Integer scalar specifying the index of a panel of the specified type, for the current plot.
#' @param param_choices A DataFrame with one row, containing the parameter choices for the current plot.
#'
#' @return
#' A HTML tag object containing visual parameter inputs.
#'
#' @details
#' This creates UI elements to choose the font size, point size and opacity, and legend placement.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_add_visual_UI_elements
#' @seealso
#' \code{\link{.panel_generation}},
#' \code{\link{.create_visual_box_for_column_plots}},
#' \code{\link{.create_visual_box_for_row_plots}}
#'
#' @importFrom shiny tagList numericInput sliderInput hr checkboxInput
.add_point_UI_elements <- function(mode, id, param_choices) {
ds_id <- paste0(mode, id, "_", .plotPointDownsample)
tagList(
sliderInput(
paste0(mode, id, "_", .plotPointAlpha), label="Point opacity",
min=0.1, max=1, value=param_choices[,.plotPointAlpha]),
hr(),
checkboxInput(
ds_id, label="Downsample points for speed",
value=param_choices[,.plotPointDownsample]),
.conditional_on_check_solo(
ds_id, on_select=TRUE,
numericInput(
paste0(mode, id, "_", .plotPointSampleRes), label="Sampling resolution:",
min=1, value=param_choices[,.plotPointSampleRes])
)
)
}
#' @rdname INTERNAL_add_visual_UI_elements
#' @importFrom shiny tagList radioButtons numericInput
.add_other_UI_elements <- function(mode, id, param_choices) {
tagList(
numericInput(
paste0(mode, id, "_", .plotFontSize), label="Font size:",
min=0, value=param_choices[,.plotFontSize]),
radioButtons(
paste0(mode, id, "_", .plotLegendPosition), label="Legend position:", inline=TRUE,
choices=c(.plotLegendBottomTitle, .plotLegendRightTitle),
selected=param_choices[,.plotLegendPosition])
)
}
#' Point selection parameter box
#'
#' Create a point selection parameter box for all point-based plots.
#'
#' @param mode String specifying the encoded panel type of the current plot.
#' @param id Integer scalar specifying the index of a panel of the specified type, for the current plot.
#' @param param_choices A DataFrame with one row, containing the parameter choices for the current plot.
#' @param selectable A character vector of decoded names for available transmitting panels.
#' @param source_type Type of the panel that is source of the selection. Either \code{"row"} or \code{"column"}.
#' @param ... Additional arguments passed to \code{\link{collapseBox}}.
#' @param field Column name in the DataFrame of parameters choices for the current plot.
#'
#' @return
#' For \code{.create_selection_param_box} and \code{.create_selection_param_box_define_box},
#' a HTML tag object containing a \code{\link{collapseBox}} with UI elements for changing point selection parameters.
#'
#' For \code{.create_selection_param_box_define_choices}, a HTML tag object containing a \code{selectInput} for choosing the transmitting panels.
#'
#' @details
#' The \code{.create_selection_param_box} function creates a collapsible box that contains point selection options, initialized with the choices in \code{memory}.
#' Options include the choice of transmitting plot and the type of selection effect.
#' Each effect option, once selected, may yield a further subset of nested options.
#' For example, choosing to colour on the selected points will open up a choice of colour to use.
#'
#' The other two functions are helper functions that avoid re-writing related code in the \code{\link{.panel_generation}} function.
#' This is mostly for other panel types that take selections but do not follow the exact structure produced by \code{.create_selection_param_box}.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_create_selection_param_box
#' @seealso
#' \code{\link{.panel_generation}}
#'
#' @importFrom shiny sliderInput radioButtons selectInput
#' @importFrom colourpicker colourInput
.create_selection_param_box <- function(mode, id, param_choices, selectable, source_type=c("row", "column")) {
select_effect <- paste0(mode, id, "_", .selectEffect)
source_type <- match.arg(source_type)
.create_selection_param_box_define_box(
mode, id, param_choices,
.create_selection_param_box_define_choices(mode, id, param_choices, field=.selectByPlot, selectable=selectable, source_type),
radioButtons(
select_effect, label="Selection effect:", inline=TRUE,
choices=c(.selectRestrictTitle, .selectColorTitle, .selectTransTitle),
selected=param_choices[[.selectEffect]]),
.conditional_on_radio(
select_effect, .selectColorTitle,
colourInput(
paste0(mode, id, "_", .selectColor), label=NULL,
value=param_choices[[.selectColor]])
),
.conditional_on_radio(
select_effect, .selectTransTitle,
sliderInput(
paste0(mode, id, "_", .selectTransAlpha), label=NULL,
min=0, max=1, value=param_choices[[.selectTransAlpha]])
)
)
}
#' @rdname INTERNAL_create_selection_param_box
.create_selection_param_box_define_box <- function(mode, id, param_choices, ...) {
collapseBox(
id=paste0(mode, id, "_", .selectParamBoxOpen),
title="Selection parameters",
open=param_choices[[.selectParamBoxOpen]],
...)
}
#' @rdname INTERNAL_create_selection_param_box
.create_selection_param_box_define_choices <- function(mode, id, param_choices, field, selectable, source_type=c("row", "column")) {
selectInput(
paste0(mode, id, "_", field),
label=sprintf("Receive %s selection from:", source_type),
choices=selectable,
selected=.choose_link(param_choices[[field]], selectable))
}
#' Conditional elements on radio or checkbox selection
#'
#' Creates a conditional UI element that appears upon a certain choice in a radio button or checkbox group selection.
#'
#' @param id String containing the id of the UI element for the radio buttons or checkbox group.
#' @param choice String containing the choice on which to show the conditional elements.
#' @param on_select Logical scalar specifying whether the conditional element should be shown upon selection in a check box, or upon de-selection (if \code{FALSE}).
#' @param ... UI elements to show conditionally.
#'
#' @return
#' A HTML object containing elements that only appear when \code{choice} is selected in the UI element for \code{id}.
#'
#' @details
#' This function is useful for hiding options that are irrelevant when a different radio button is selected, or when the corresponding checkbox element is unselected.
#' In this manner, we can avoid cluttering the UI.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_conditional_elements
#' @seealso
#' \code{\link{.panel_generation}},
#' \code{\link{.create_selection_param_box}},
#' \code{\link{.create_visual_box_for_row_plots}},
#' \code{\link{.create_visual_box_for_column_plots}}
#'
#' @importFrom shiny conditionalPanel
.conditional_on_radio <- function(id, choice, ...) {
conditionalPanel(condition=sprintf('(input["%s"] == "%s")', id, choice), ...)
}
#' @rdname INTERNAL_conditional_elements
#' @importFrom shiny conditionalPanel
.conditional_on_check_solo <- function(id, on_select=TRUE, ...) {
choice <- ifelse(on_select, 'true', 'false')
conditionalPanel(condition=sprintf('(input["%s"] == %s)', id, choice), ...)
}
#' @rdname INTERNAL_conditional_elements
#' @importFrom shiny conditionalPanel
.conditional_on_check_group <- function(id, choice, ...) {
conditionalPanel(condition=sprintf('(input["%s"].includes("%s"))', id, choice), ...)
}
#' Coerce box status to custom classes
#'
#' Coerce the status of a \code{shinydashboard::box} to use a custom \pkg{iSEE} class.
#'
#' @param in_box A HTML tag object corresponding to a \code{box} object from the \pkg{shinydashboard} package.
#' @param mode String specifying the encoded panel type of the current plot.
#' @param old_status String specifying the current status of the \code{box}, to be replaced by \code{mode}.
#'
#' @return A modified \code{in_box} where the status is changed from \code{old_status} to \code{mode}.
#'
#' @details
#' The \code{\link[shinydashboard]{box}} function does not allow use of custom statuses.
#' As a result, we generate the box using the \code{"danger"} status, and replace it afterwards with our custom status.
#' This gives us full control over the box colours, necessary for proper colour-coding of each panel type.
#'
#' Note that the boxes from \pkg{shinydashboard} are used to enclose each plot/table panel in the \code{iSEE} app.
#' They do \emph{not} represent the parameter boxes, which are instead enclosed in Bootstrap panels (see \code{\link{collapseBox}}).
#'
#' @author Aaron Lun
#' @rdname INTERNAL_coerce_box_status
#' @seealso
#' \code{\link{.panel_organization}},
#' \code{\link{.panel_generation}}
.coerce_box_status <- function(in_box, mode, old_status="danger") {
in_box$children[[1]]$attribs$class <- sub(
paste0("box-", old_status),
paste0("box-", tolower(mode)),
in_box$children[[1]]$attribs$class)
return(in_box)
}
.actionbutton_biocstyle <- "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
#' Precompute UI information
#'
#' Precompute information to be shown in the UI and store it in the internal metadata of a SingleCellExperiment object.
#'
#' @param se A SingleCellExperiment object.
#' @param data_fun_list A named list of custom plotting functions.
#' @param stat_fun_list A named list of custom statistics functions.
#'
#' @details
#' Precomputed information includes:
#' \itemize{
#' \item Unique-ified selectize choices, to avoid problems with selecting between different unnamed assays, samples or reduced dimension results.
#' \item The names of discrete metadata fields, for use in restricting choices for faceting.
#' \item A list of the custom data plot functions supplied to the \code{\link{iSEE}} function.
#' \item A list of the custom statistics table functions supplied to the \code{\link{iSEE}} function.
#' }
#'
#' Storage in the internal metadata allows us to pass a single argument to various UI functions and for them to extract out the relevant fields.
#' This avoids creating functions with many different arguments, which would be difficult to maintain.
#'
#' @author Aaron Lun
#'
#' @return A SingleCellExperiment with values stored in an \code{iSEE} field in the internal metadata.
#'
#' @seealso
#' \code{\link{.which_groupable}},
#' \code{\link{.sanitize_names}},
#' \code{\link{.get_internal_info}}
#' @rdname INTERNAL_precompute_UI_info
#' @importFrom SingleCellExperiment int_metadata
.precompute_UI_info <- function(se, data_fun_list, stat_fun_list) {
out <- list(
column_groupable=colnames(colData(se))[.which_groupable(colData(se))],
row_groupable=colnames(rowData(se))[.which_groupable(rowData(se))],
column_numeric=colnames(colData(se))[.which_numeric(colData(se))],
row_numeric=colnames(rowData(se))[.which_numeric(rowData(se))],
all_assays=.sanitize_names(assayNames(se)),
red_dim_names=.sanitize_names(reducedDimNames(se)),
sample_names=.sanitize_names(colnames(se)),
custom_data_fun=data_fun_list,
custom_stat_fun=stat_fun_list
)
if (is.null(colnames(se))) {
out$sample_names <- sprintf("Sample %i", seq_len(ncol(se)))
}
int_metadata(se)$iSEE <- out
return(se)
}
#' Sanitize names
#'
#' Convert a vector of names into a named integer vector of indices.
#'
#' @param raw_names A character vector of names.
#'
#' @return
#' An integer vector of \code{seq_along(raw_names)}, with names based on \code{raw_names}.
#'
#' @details
#' This function protects against non-unique names by converting them to integer indices, which can be used for indexing within the function.
#' The names are also made unique for display to the user by prefixing them with \code{(<index>)}.
#'
#' @author Kevin Rue-Albrecht, Aaron Lun
#' @rdname INTERNAL_sanitize_names
#' @seealso
#' \code{\link{.panel_generation}}
.sanitize_names <- function(raw_names) {
indices <- seq_along(raw_names)
names(indices) <- sprintf("(%i) %s", indices, raw_names)
indices
}
#' Extract internal information
#'
#' Extracts the requested fields from the internal metadata field of a SingleCellExperiment object.
#'
#' @param se A SingleCellExperiment.
#' @param field A string specifying the field to extract.
#' @param empty_fail Logical scalar indicating whether a warning should be raised when no internal info is present.
#'
#' @details This function is only safe to run \emph{after} \code{\link{.precompute_UI_info}} has been called.
#' As such, \code{empty_fail} is set to \code{TRUE} to catch any possible instances of unsafe execution.
#' If you turn this off, you should ensure that the surrounding code will recompute any fields when the returned value is \code{NULL}.
#'
#' @return The value of \code{field} in the internal metadata of \code{se}.
#'
#' @author Aaron Lun
#'
#' @seealso \code{\link{.precompute_UI_info}}
#' @rdname INTERNAL_get_internal_info
#' @importFrom SingleCellExperiment int_metadata
.get_internal_info <- function(se, field, empty_fail=TRUE) {
info <- int_metadata(se)$iSEE
if (is.null(info) && empty_fail) {
stop("no internal metadata in 'se'")
}
info[[field]]
}
| /R/dynamicUI.R | permissive | sorjuela/iSEE | R | false | false | 64,326 | r | #' Generate the panel organization UI
#'
#' Generates the user interface to control the organization of the panels, specifically their sizes.
#'
#' @param active_panels A data.frame specifying the currently active panels, see the output of \code{\link{.setup_initial}}.
#'
#' @return
#' A HTML tag object containing the UI elements for panel sizing.
#'
#' @details
#' This function will create a series of UI elements for all active panels, specifying the width or height of the panels.
#' We use a select element for the width as this is very discrete, and we use a slider for the height.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_panel_organization
#' @seealso
#' \code{\link{iSEE}},
#' \code{\link{.panel_generation}},
#' \code{\link{.setup_initial}}
#'
#' @importFrom shiny tagList selectInput sliderInput
#' @importFrom shinydashboard box
.panel_organization <- function(active_panels) {
N <- nrow(active_panels)
collected <- vector("list", N)
counter <- 1L
for (i in seq_len(N)) {
mode <- active_panels$Type[i]
id <- active_panels$ID[i]
prefix <- paste0(mode, id, "_")
ctrl_panel <- box(
selectInput(paste0(prefix, .organizationWidth), label="Width",
choices=seq(width_limits[1], width_limits[2]), selected=active_panels$Width[i]),
sliderInput(paste0(prefix, .organizationHeight), label="Height",
min=height_limits[1], max=height_limits[2], value=active_panels$Height[i], step=10),
title=.decode_panel_name(mode, id), status="danger", width=NULL, solidHeader=TRUE
)
# Coercing to a different box status ('danger' is a placeholder, above).
collected[[i]] <- .coerce_box_status(ctrl_panel, mode)
}
do.call(tagList, collected)
}
#' Show and hide panels in the User Interface
#'
#' @param mode Panel mode. See \code{\link{panelCodes}}.
#' @param id Integer scalar specifying the index of a panel of the specified type, for the current plot.
#' @param active_panels A data.frame specifying the currently active panels, see the output of \code{\link{.setup_initial}}.
#' @param width Grid width of the new panel (must be between 1 and 12).
#' @param height Height of the new panel (in pixels).
#'
#' @return A data.frame specifying the new set of active panels.
#' @rdname INTERNAL_show_panel
#'
#' @author Kevin Rue-Albrecht
.showPanel <- function(mode, id, active_panels, width=4L, height=500L) {
active_panels <- rbind(active_panels, DataFrame(Type=mode, ID=id, Width=width, Height=height))
active_panels
}
#' @param pObjects An environment containing \code{table_links}, a graph produced by \code{\link{.spawn_table_links}};
#' and \code{memory}, a list of DataFrames containing parameters for each panel of each type.
#' @rdname INTERNAL_show_panel
#' @author Kevin Rue-Albrecht
.hidePanel <- function(mode, id, active_panels, pObjects) {
current_type <- active_panels$Type == mode
panel_name <- paste0(mode, id)
# Destroying links for point selection or tables.
.destroy_selection_panel(pObjects, panel_name)
if (mode %in% linked_table_types) {
.destroy_table(pObjects, panel_name)
} else if (mode %in% point_plot_types) {
.delete_table_links(mode, id, pObjects)
}
# Triggering re-rendering of the UI via change to active_panels.
index <- which(current_type & active_panels$ID == id)
active_panels <- active_panels[-index, ]
# Return the updated table of active panels
active_panels
}
#' Generate the panels in the app body
#'
#' Constructs the active panels in the main body of the app to show the plotting results and tables.
#'
#' @param active_panels A data.frame specifying the currently active panels, see the output of \code{\link{.setup_initial}}.
#' @param memory A list of DataFrames, where each DataFrame corresponds to a panel type and contains the initial settings for each individual panel of that type.
#' @param se A SingleCellExperiment object.
#'
#' @return
#' A HTML tag object containing the UI elements for the main body of the app.
#' This includes the output plots/tables as well as UI elements to control them.
#'
#' @details
#' This function generates the various panels in the main body of the app, taking into account their variable widths to dynamically assign them to particular rows.
#' It will try to assign as many panels to the same row until the row is filled, at which point it will start on the next row.
#'
#' Each panel contains the actual endpoint element (i.e., the plot or table to display) as well as a number of control elements to set the parameters.
#' All control elements lie within \code{\link{collapseBox}} elements to avoid cluttering the interface.
#' The open/closed status of these boxes are retrieved from memory, and are generally closed by default.
#'
#' Construction of each panel is done by retrieving all of the memorized parameters and using them to set the initial values of various control elements.
#' This ensures that the plots are not reset during re-rendering.
#' The exception is that of the Shiny brush, which cannot be fully restored in the current version - instead, only the bounding box is shown.
#'
#' Note that control of the tables lies within \code{\link{iSEE}} itself.
#' Also, feature name selections will open up a \code{selectizeInput} where the values are filled on the server-side, rather than being sent to the client.
#' This avoids long start-up times during re-rendering.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_panel_generation
#' @seealso
#' \code{\link{iSEE}}
#'
#' @importFrom SummarizedExperiment colData rowData assayNames
#' @importFrom BiocGenerics rownames
#' @importFrom SingleCellExperiment reducedDimNames reducedDim
#' @importFrom shiny actionButton fluidRow selectInput plotOutput uiOutput
#' sliderInput tagList column radioButtons tags hr brushOpts
#' selectizeInput checkboxGroupInput textAreaInput
.panel_generation <- function(active_panels, memory, se) {
collected <- list()
counter <- 1L
cumulative.width <- 0L
cur.row <- list()
row.counter <- 1L
# Extracting useful fields from the SE object.
column_covariates <- colnames(colData(se))
row_covariates <- colnames(rowData(se))
all_assays <- .get_internal_info(se, "all_assays")
red_dim_names <- .get_internal_info(se, "red_dim_names")
sample_names <- .get_internal_info(se, "sample_names")
custom_data_fun <- .get_internal_info(se, "custom_data_fun")
custom_data_funnames <- c(.noSelection, names(custom_data_fun))
custom_stat_fun <- .get_internal_info(se, "custom_stat_fun")
custom_stat_funnames <- c(.noSelection, names(custom_stat_fun))
# Defining all transmitting tables and plots for linking.
link_sources <- .define_link_sources(active_panels)
tab_by_row <- c(.noSelection, link_sources$row_tab)
tab_by_col <- c(.noSelection, link_sources$col_tab)
row_selectable <- c(.noSelection, link_sources$row_plot)
col_selectable <- c(.noSelection, link_sources$col_plot)
heatmap_sources <- c(.noSelection, link_sources$row_plot, link_sources$row_tab)
for (i in seq_len(nrow(active_panels))) {
mode <- active_panels$Type[i]
id <- active_panels$ID[i]
panel_name <- paste0(mode, id)
panel_width <- active_panels$Width[i]
param_choices <- memory[[mode]][id,]
.input_FUN <- function(field) { paste0(panel_name, "_", field) }
# Checking what to do with plot-specific parameters (e.g., brushing, clicking, plot height).
if (! mode %in% c(linked_table_types, "customStatTable")) {
brush.opts <- brushOpts(.input_FUN(.brushField), resetOnNew=FALSE,
direction=ifelse(mode=="heatMapPlot", "y", "xy"),
fill=brush_fill_color[mode], stroke=brush_stroke_color[mode],
opacity=.brushFillOpacity)
dblclick <- .input_FUN(.zoomClick)
clickopt <- .input_FUN(.lassoClick)
panel_height <- paste0(active_panels$Height[i], "px")
}
# Creating the plot fields.
if (mode == "redDimPlot") {
obj <- plotOutput(panel_name, brush=brush.opts, dblclick=dblclick, click=clickopt, height=panel_height)
cur_reddim <- param_choices[[.redDimType]]
max_dim <- ncol(reducedDim(se, cur_reddim))
choices <- seq_len(max_dim)
names(choices) <- choices
plot.param <- list(
selectInput(.input_FUN(.redDimType), label="Type",
choices=red_dim_names, selected=cur_reddim),
selectInput(.input_FUN(.redDimXAxis), label="Dimension 1",
choices=choices, selected=param_choices[[.redDimXAxis]]),
selectInput(.input_FUN(.redDimYAxis), label="Dimension 2",
choices=choices, selected=param_choices[[.redDimYAxis]])
)
} else if (mode == "colDataPlot") {
obj <- plotOutput(panel_name, brush=brush.opts, dblclick=dblclick, click=clickopt, height=panel_height)
plot.param <- list(
selectInput(.input_FUN(.colDataYAxis),
label="Column of interest (Y-axis):",
choices=column_covariates, selected=param_choices[[.colDataYAxis]]),
radioButtons(.input_FUN(.colDataXAxis), label="X-axis:", inline=TRUE,
choices=c(.colDataXAxisNothingTitle, .colDataXAxisColDataTitle),
selected=param_choices[[.colDataXAxis]]),
.conditional_on_radio(.input_FUN(.colDataXAxis),
.colDataXAxisColDataTitle,
selectInput(.input_FUN(.colDataXAxisColData),
label="Column of interest (X-axis):",
choices=column_covariates, selected=param_choices[[.colDataXAxisColData]]))
)
} else if (mode == "featAssayPlot") {
obj <- plotOutput(panel_name, brush=brush.opts, dblclick=dblclick, click=clickopt, height=panel_height)
xaxis_choices <- c(.featAssayXAxisNothingTitle)
if (length(column_covariates)) { # As it is possible for thsi plot to be _feasible_ but for no column data to exist.
xaxis_choices <- c(xaxis_choices, .featAssayXAxisColDataTitle)
}
xaxis_choices <- c(xaxis_choices, .featAssayXAxisFeatNameTitle)
plot.param <- list(
selectizeInput(.input_FUN(.featAssayYAxisFeatName),
label="Y-axis feature:", choices=NULL, selected=NULL, multiple=FALSE),
selectInput(.input_FUN(.featAssayYAxisRowTable), label=NULL, choices=tab_by_row,
selected=.choose_link(param_choices[[.featAssayYAxisRowTable]], tab_by_row, force_default=TRUE)),
selectInput(.input_FUN(.featAssayAssay), label=NULL,
choices=all_assays, selected=param_choices[[.featAssayAssay]]),
radioButtons(.input_FUN(.featAssayXAxis), label="X-axis:", inline=TRUE,
choices=xaxis_choices, selected=param_choices[[.featAssayXAxis]]),
.conditional_on_radio(.input_FUN(.featAssayXAxis),
.featAssayXAxisColDataTitle,
selectInput(.input_FUN(.featAssayXAxisColData),
label="X-axis column data:",
choices=column_covariates, selected=param_choices[[.featAssayXAxisColData]])),
.conditional_on_radio(.input_FUN(.featAssayXAxis),
.featAssayXAxisFeatNameTitle,
selectizeInput(.input_FUN(.featAssayXAxisFeatName),
label="X-axis feature:", choices=NULL, selected=NULL, multiple=FALSE),
selectInput(.input_FUN(.featAssayXAxisRowTable), label=NULL,
choices=tab_by_row, selected=param_choices[[.featAssayXAxisRowTable]]))
)
} else if (mode == "rowStatTable") {
obj <- tagList(dataTableOutput(panel_name), uiOutput(.input_FUN("annotation")))
} else if (mode == "colStatTable") {
obj <- dataTableOutput(panel_name)
} else if (mode == "customStatTable" || mode == "customDataPlot") {
if (mode == "customDataPlot") {
obj <- plotOutput(panel_name, height=panel_height)
fun_choices <- custom_data_funnames
} else {
obj <- dataTableOutput(panel_name)
fun_choices <- custom_stat_funnames
}
argsUpToDate <- param_choices[[.customArgs]] == param_choices[[.customVisibleArgs]]
if (is.na(argsUpToDate) || argsUpToDate) {
button_label <- .buttonUpToDateLabel
} else {
button_label <- .buttonUpdateLabel
}
plot.param <- list(
selectInput(
.input_FUN(.customFun), label="Custom function:",
choices=fun_choices, selected=param_choices[[.customFun]]),
textAreaInput(
.input_FUN(.customVisibleArgs), label="Custom arguments:", rows=5,
value=param_choices[[.customVisibleArgs]]),
actionButton(.input_FUN(.customSubmit), button_label)
)
} else if (mode == "rowDataPlot") {
obj <- plotOutput(panel_name, brush=brush.opts, dblclick=dblclick, click=clickopt, height=panel_height)
plot.param <- list(
selectInput(.input_FUN(.rowDataYAxis),
label="Column of interest (Y-axis):",
choices=row_covariates, selected=param_choices[[.rowDataYAxis]]),
radioButtons(.input_FUN(.rowDataXAxis), label="X-axis:", inline=TRUE,
choices=c(.rowDataXAxisNothingTitle, .rowDataXAxisRowDataTitle),
selected=param_choices[[.rowDataXAxis]]),
.conditional_on_radio(.input_FUN(.rowDataXAxis),
.rowDataXAxisRowDataTitle,
selectInput(.input_FUN(.rowDataXAxisRowData),
label="Column of interest (X-axis):",
choices=row_covariates, selected=param_choices[[.rowDataXAxisRowData]]))
)
} else if (mode == "sampAssayPlot") {
obj <- plotOutput(panel_name, brush=brush.opts, dblclick=dblclick, click=clickopt, height=panel_height)
xaxis_choices <- c(.sampAssayXAxisNothingTitle)
if (length(row_covariates)) { # As it is possible for this plot to be _feasible_ but for no row data to exist.
xaxis_choices <- c(xaxis_choices, .sampAssayXAxisRowDataTitle)
}
xaxis_choices <- c(xaxis_choices, .sampAssayXAxisSampNameTitle)
plot.param <- list(
selectInput(
.input_FUN(.sampAssayYAxisSampName),
label="Sample of interest (Y-axis):",
choices=sample_names, selected=param_choices[[.sampAssayYAxisSampName]]),
selectInput(
.input_FUN(.sampAssayYAxisColTable), label=NULL, choices=tab_by_col,
selected=.choose_link(param_choices[[.sampAssayYAxisColTable]], tab_by_col, force_default=TRUE)),
selectInput(
.input_FUN(.sampAssayAssay), label=NULL,
choices=all_assays, selected=param_choices[[.sampAssayAssay]]),
radioButtons(
.input_FUN(.sampAssayXAxis), label="X-axis:", inline=TRUE,
choices=xaxis_choices, selected=param_choices[[.sampAssayXAxis]]),
.conditional_on_radio(
.input_FUN(.sampAssayXAxis),
.sampAssayXAxisRowDataTitle,
selectInput(
.input_FUN(.sampAssayXAxisRowData),
label="Row data of interest (X-axis):",
choices=row_covariates, selected=param_choices[[.sampAssayXAxisRowData]])),
.conditional_on_radio(
.input_FUN(.sampAssayXAxis),
.sampAssayXAxisSampNameTitle,
selectInput(
.input_FUN(.sampAssayXAxisSampName),
label="Sample of interest (X-axis):",
choices=sample_names, selected=param_choices[[.sampAssayXAxisSampName]]),
selectInput(.input_FUN(.sampAssayXAxisColTable), label=NULL,
choices=tab_by_col, selected=param_choices[[.sampAssayXAxisColTable]]))
)
} else if (mode == "heatMapPlot") {
obj <- plotOutput(panel_name, brush=brush.opts, dblclick=dblclick, height=panel_height)
plot.param <- list(
collapseBox(
id=.input_FUN(.heatMapFeatNameBoxOpen),
title="Feature parameters",
open=param_choices[[.heatMapFeatNameBoxOpen]],
selectInput(
.input_FUN(.heatMapImportSource), label="Import from", choices=heatmap_sources,
selected=.choose_link(param_choices[[.heatMapImportSource]], heatmap_sources, force_default=TRUE)),
actionButton(.input_FUN(.heatMapImportFeatures), "Import features"),
actionButton(.input_FUN(.heatMapCluster), "Cluster features"),
actionButton(.input_FUN(.heatMapClearFeatures), "Clear features"),
selectizeInput(
.input_FUN(.heatMapFeatName),
label="Features:",
choices=NULL, selected=NULL, multiple=TRUE,
options=list(plugins=list('remove_button', 'drag_drop'))),
selectInput(
.input_FUN(.heatMapAssay), label=NULL,
choices=all_assays, selected=param_choices[[.heatMapAssay]]),
hr(),
checkboxGroupInput(
.input_FUN(.heatMapCenterScale), label="Expression values are:",
selected=param_choices[[.heatMapCenterScale]][[1]],
choices=c(.heatMapCenterTitle, .heatMapScaleTitle), inline=TRUE),
numericInput(
.input_FUN(.heatMapLower), label="Lower bound:",
value=param_choices[[.heatMapLower]]),
numericInput(
.input_FUN(.heatMapUpper), label="Upper bound:",
value=param_choices[[.heatMapUpper]]),
.conditional_on_check_group(
.input_FUN(.heatMapCenterScale), .heatMapCenterTitle,
selectInput(
.input_FUN(.heatMapCenteredColors), label="Color scale:",
choices=c("purple-black-yellow", "blue-white-orange"),
selected=param_choices[[.heatMapCenteredColors]]))
),
collapseBox(
id=.input_FUN(.heatMapColDataBoxOpen),
title="Column data parameters",
open=param_choices[[.heatMapColDataBoxOpen]],
selectizeInput(
.input_FUN(.heatMapColData),
label="Column data:",
choices=column_covariates,
multiple=TRUE,
selected=param_choices[[.heatMapColData]][[1]],
options=list(plugins=list('remove_button', 'drag_drop'))),
plotOutput(.input_FUN(.heatMapLegend))
)
)
} else {
stop(sprintf("'%s' is not a recognized panel mode", mode))
}
# Adding graphical parameters if we're plotting.
if (mode %in% linked_table_types) {
if (mode %in% "rowStatTable") {
source_type <- "row"
selectable <- row_selectable
} else {
source_type <- "column"
selectable <- col_selectable
}
param <- list(hr(),
tags$div(class="panel-group", role="tablist",
.create_selection_param_box_define_box(mode, id, param_choices,
.create_selection_param_box_define_choices(mode, id, param_choices, .selectByPlot, selectable, source_type)
)
)
)
} else if (mode=="heatMapPlot") {
param <- list(do.call(tags$div, c(list(class="panel-group", role="tablist"),
plot.param,
.create_selection_param_box(mode, id, param_choices, col_selectable, "column")
)))
} else {
# Options for fundamental plot parameters.
data_box <- do.call(collapseBox, c(list(id=.input_FUN(.dataParamBoxOpen),
title="Data parameters", open=param_choices[[.dataParamBoxOpen]]), plot.param))
if (mode %in% custom_panel_types) {
param <- list(
tags$div(class="panel-group", role="tablist",
data_box,
.create_selection_param_box_define_box(
mode, id, param_choices,
.create_selection_param_box_define_choices(mode, id, param_choices, .customRowSource, row_selectable, "row"),
.create_selection_param_box_define_choices(mode, id, param_choices, .customColSource, col_selectable, "column")
)
)
)
} else {
if (mode %in% row_point_plot_types) {
select_choices <- row_selectable
create_FUN <- .create_visual_box_for_row_plots
source_type <- "row"
} else {
select_choices <- col_selectable
create_FUN <- .create_visual_box_for_column_plots
source_type <- "column"
}
param <- list(
tags$div(class="panel-group", role="tablist",
data_box,
create_FUN(mode, id, param_choices, tab_by_row, tab_by_col, se), # Options for visual parameters.
.create_selection_param_box(mode, id, param_choices, select_choices, source_type) # Options for point selection parameters.
)
)
}
}
# Deciding whether to continue on the current row, or start a new row.
extra <- cumulative.width + panel_width
if (extra > 12L) {
collected[[counter]] <- do.call(fluidRow, cur.row)
counter <- counter + 1L
collected[[counter]] <- hr()
counter <- counter + 1L
cur.row <- list()
row.counter <- 1L
cumulative.width <- 0L
}
# Aggregating together everything into a box, and then into a column.
cur_box <- do.call(box, c(
list(obj),
param,
list(uiOutput(.input_FUN(.panelGeneralInfo)), uiOutput(.input_FUN(.panelLinkInfo))),
list(title=.decode_panel_name(mode, id), solidHeader=TRUE, width=NULL, status="danger")))
cur_box <- .coerce_box_status(cur_box, mode)
cur.row[[row.counter]] <- column(width=panel_width, cur_box, style='padding:3px;')
row.counter <- row.counter + 1L
cumulative.width <- cumulative.width + panel_width
}
# Cleaning up the leftovers.
collected[[counter]] <- do.call(fluidRow, cur.row)
counter <- counter + 1L
collected[[counter]] <- hr()
# Convert the list to a tagList - this is necessary for the list of items to display properly.
do.call(tagList, collected)
}
#' Define link sources
#'
#' Define all possible sources of links between active panels, i.e., feature selections from row statistics tables or point selections from plots.
#'
#' @param active_panels A data.frame specifying the currently active panels, see the output of \code{\link{.setup_initial}}.
#'
#' @return
#' A list containing:
#' \describe{
#' \item{\code{tab}:}{A character vector of decoded names for all active row statistics tables.}
#' \item{\code{row}:}{A character vector of decoded names for all active row data plots.}
#' \item{\code{col}:}{A character vector of decoded names for all active sample-based plots, i.e., where each point is a sample.}
#' }
#'
#' @details
#' Decoded names are returned as the output values are intended to be displayed to the user.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_define_link_sources
#' @seealso
#' \code{\link{.sanitize_memory}},
#' \code{\link{.panel_generation}}
.define_link_sources <- function(active_panels) {
all_names <- .decode_panel_name(active_panels$Type, active_panels$ID)
list(
row_tab=all_names[active_panels$Type == "rowStatTable"],
col_tab=all_names[active_panels$Type == "colStatTable"],
row_plot=all_names[active_panels$Type %in% row_point_plot_types],
col_plot=all_names[active_panels$Type %in% col_point_plot_types]
)
}
#' Choose a linked panel
#'
#' Chooses a linked panel from those available, forcing a valid choice if required.
#'
#' @param chosen String specifying the proposed choice, usually a decoded panel name.
#' @param available Character vector containing the valid choices, usually decoded panel names.
#' @param force_default Logical scalar indicating whether a non-empty default should be returned if \code{chosen} is not valid.
#'
#' @return A string containing a valid choice, or an empty string.
#'
#' @details
#' If \code{chosen} is in \code{available}, it will be directly returned.
#' If not, and if \code{force_default=TRUE} and \code{available} is not empty, the first element of \code{available} is returned.
#' Otherwise, an empty string is returned.
#'
#' Setting \code{force_default=TRUE} is required for panels linking to row statistics tables, where an empty choice would result in an invalid plot.
#' However, a default choice is not necessary for point selection transmission, where no selection is perfectly valid.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_choose_link
#' @seealso
#' \code{\link{.panel_generation}}
.choose_link <- function(chosen, available, force_default=FALSE) {
if (!chosen %in% available) {
if (force_default && length(available)) {
return(available[1])
}
return("")
}
return(chosen)
}
#' Add a visual parameter box for column plots
#'
#' Create a visual parameter box for column-based plots, i.e., where each sample is a point.
#'
#' @param mode String specifying the encoded panel type of the current plot.
#' @param id Integer scalar specifying the index of a panel of the specified type, for the current plot.
#' @param param_choices A DataFrame with one row, containing the parameter choices for the current plot.
#' @param active_row_tab A character vector of decoded names for available row statistics tables.
#' @param active_col_tab A character vector of decoded names for available column statistics tables.
#' @param se A SingleCellExperiment object with precomputed UI information from \code{\link{.precompute_UI_info}}.
#'
#' @return
#' A HTML tag object containing a \code{\link{collapseBox}} with visual parameters for column-based plots.
#'
#' @details
#' Column-based plots can be coloured by nothing, by column metadata or by the expression of certain features.
#' This function creates a collapsible box that contains all of these options, initialized with the choices in \code{memory}.
#' The box will also contain options for font size, point size and opacity, and legend placement.
#'
#' Each option, once selected, yields a further subset of nested options.
#' For example, choosing to colour by column metadata will open up a \code{selectInput} to specify the metadata field to use.
#' Choosing to colour by feature name will open up a \code{selectizeInput}.
#' However, the values are filled on the server-side, rather than being sent to the client; this avoids long start times during re-rendering.
#'
#' Note that some options will be disabled depending on the nature of the input, namely:
#' \itemize{
#' \item If there are no column metadata fields, users will not be allowed to colour by column metadata, obviously.
#' \item If there are no features, users cannot colour by features.
#' \item If there are no categorical column metadata fields, users will not be allowed to view the faceting options.
#' }
#'
#' @author Aaron Lun
#' @rdname INTERNAL_create_visual_box_for_column_plots
#' @seealso
#' \code{\link{.panel_generation}},
#' \code{\link{.create_visual_box_for_row_plots}}
#'
#' @importFrom shiny radioButtons tagList selectInput selectizeInput
#' checkboxGroupInput
#' @importFrom colourpicker colourInput
.create_visual_box_for_column_plots <- function(mode, id, param_choices, active_row_tab, active_col_tab, se) {
covariates <- colnames(colData(se))
discrete_covariates <- .get_internal_info(se, "column_groupable")
numeric_covariates <- .get_internal_info(se, "column_numeric")
all_assays <- .get_internal_info(se, "all_assays")
colorby_field <- paste0(mode, id, "_", .colorByField)
shapeby_field <- paste0(mode, id, "_", .shapeByField)
sizeby_field <- paste0(mode, id, "_", .sizeByField)
pchoice_field <- paste0(mode, id, "_", .visualParamChoice)
collapseBox(
id=paste0(mode, id, "_", .visualParamBoxOpen),
title="Visual parameters",
open=param_choices[[.visualParamBoxOpen]],
checkboxGroupInput(
inputId=pchoice_field, label=NULL, inline=TRUE,
selected=param_choices[[.visualParamChoice]][[1]],
choices=.define_visual_options(discrete_covariates, numeric_covariates)),
.conditional_on_check_group(
pchoice_field, .visualParamChoiceColorTitle,
hr(),
radioButtons(
colorby_field, label="Color by:", inline=TRUE,
choices=.define_color_options_for_column_plots(se),
selected=param_choices[[.colorByField]]
),
.conditional_on_radio(
colorby_field, .colorByNothingTitle,
colourInput(paste0(mode, id, "_", .colorByDefaultColor), label=NULL,
value=param_choices[[.colorByDefaultColor]])
),
.conditional_on_radio(
colorby_field, .colorByColDataTitle,
selectInput(paste0(mode, id, "_", .colorByColData), label=NULL,
choices=covariates, selected=param_choices[[.colorByColData]])
),
.conditional_on_radio(
colorby_field, .colorByFeatNameTitle,
tagList(
selectizeInput(paste0(mode, id, "_", .colorByFeatName), label=NULL, choices=NULL, selected=NULL, multiple=FALSE),
selectInput(
paste0(mode, id, "_", .colorByFeatNameAssay), label=NULL,
choices=all_assays, selected=param_choices[[.colorByFeatNameAssay]])),
selectInput(
paste0(mode, id, "_", .colorByRowTable), label=NULL, choices=active_row_tab,
selected=.choose_link(param_choices[[.colorByRowTable]], active_row_tab, force_default=TRUE))
),
.conditional_on_radio(colorby_field, .colorBySampNameTitle,
tagList(
selectizeInput(paste0(mode, id, "_", .colorBySampName), label=NULL, selected=NULL, choices=NULL, multiple=FALSE),
selectInput(
paste0(mode, id, "_", .colorByColTable), label=NULL, choices=active_col_tab,
selected=.choose_link(param_choices[[.colorByColTable]], active_col_tab, force_default=TRUE)),
colourInput(
paste0(mode, id, "_", .colorBySampNameColor), label=NULL,
value=param_choices[[.colorBySampNameColor]]))
)
),
.conditional_on_check_group(pchoice_field, .visualParamChoiceShapeTitle,
hr(),
radioButtons(
shapeby_field, label="Shape by:", inline=TRUE,
choices=.define_shape_options_for_column_plots(se),
selected=param_choices[[.shapeByField]]
),
.conditional_on_radio(
shapeby_field, .shapeByColDataTitle,
selectInput(
paste0(mode, id, "_", .shapeByColData), label=NULL,
choices=discrete_covariates, selected=param_choices[[.shapeByColData]])
)
),
.conditional_on_check_group(
pchoice_field, .visualParamChoiceFacetTitle,
hr(), .add_facet_UI_elements_for_column_plots(mode, id, param_choices, discrete_covariates)),
.conditional_on_check_group(
pchoice_field, .visualParamChoicePointTitle,
hr(),
radioButtons(
sizeby_field, label="Size by:", inline=TRUE,
choices=.define_size_options_for_column_plots(se),
selected=param_choices[[.sizeByField]]
),
.conditional_on_radio(
sizeby_field, .sizeByNothingTitle,
numericInput(
paste0(mode, id, "_", .plotPointSize), label="Point size:",
min=0, value=param_choices[,.plotPointSize])
),
.conditional_on_radio(
sizeby_field, .sizeByColDataTitle,
selectInput(paste0(mode, id, "_", .sizeByColData), label=NULL,
choices=numeric_covariates, selected=param_choices[[.sizeByColData]])
),
.add_point_UI_elements(mode, id, param_choices)),
.conditional_on_check_group(
pchoice_field, .visualParamChoiceOtherTitle,
hr(),
checkboxInput(
inputId=paste0(mode, id, "_", .contourAddTitle),
label="Add contour (scatter only)",
value=FALSE),
.conditional_on_check_solo(
paste0(mode, id, "_", .contourAddTitle),
on_select=TRUE,
colourInput(
paste0(mode, id, "_", .contourColor), label=NULL,
value=param_choices[[.contourColor]])),
.add_other_UI_elements(mode, id, param_choices))
)
}
#' Define colouring options
#'
#' Define the available colouring options for row- or column-based plots,
#' where availability is defined on the presence of the appropriate data in a SingleCellExperiment object.
#'
#' @param se A SingleCellExperiment object.
#'
#' @details
#' Colouring by column data is not available if no column data exists in \code{se} - same for the row data.
#' Colouring by feature names is not available if there are no features in \code{se}.
#' For column plots, we have an additional requirement that there must also be assays in \code{se} to colour by features.
#'
#' @return A character vector of available colouring modes, i.e., nothing, by column/row data or by feature name.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_define_color_options
.define_color_options_for_column_plots <- function(se) {
color_choices <- .colorByNothingTitle
if (ncol(colData(se))) {
color_choices <- c(color_choices, .colorByColDataTitle)
}
if (nrow(se) && length(assayNames(se))) {
color_choices <- c(color_choices, .colorByFeatNameTitle)
}
if (ncol(se)) {
color_choices <- c(color_choices, .colorBySampNameTitle)
}
return(color_choices)
}
#' Define shaping options
#'
#' Define the available shaping options for row- or column-based plots,
#' where availability is defined on the presence of the appropriate data in a SingleCellExperiment object.
#'
#' @param se A SingleCellExperiment object.
#'
#' @details
#' Shaping by column data is not available if no column data exists in \code{se} - same for the row data.
#' For column plots, we have an additional requirement that there must also be assays in \code{se} to shape by features.
#'
#' @return A character vector of available shaping modes, i.e., nothing or by column/row data
#'
#' @author Kevin Rue-Albrecht
#' @rdname INTERNAL_define_shape_options
.define_shape_options_for_column_plots <- function(se) {
shape_choices <- .shapeByNothingTitle
col_groupable <- .get_internal_info(se, "column_groupable")
if (length(col_groupable)) {
shape_choices <- c(shape_choices, .shapeByColDataTitle)
}
return(shape_choices)
}
#' Define sizing options
#'
#' Define the available sizing options for row- or column-based plots,
#' where availability is defined on the presence of the appropriate data in a SingleCellExperiment object.
#'
#' @param se A SingleCellExperiment object.
#'
#' @details
#' Sizing by column data is not available if no column data exists in \code{se} - same for the row data.
#' For column plots, we have an additional requirement that there must also be assays in \code{se} to size by features.
#'
#' @return A character vector of available sizing modes, i.e., nothing or by column/row data
#'
#' @author Kevin Rue-Albrecht, Charlotte Soneson
#' @rdname INTERNAL_define_size_options
.define_size_options_for_column_plots <- function(se) {
size_choices <- .sizeByNothingTitle
col_numeric <- .get_internal_info(se, "column_numeric")
if (length(col_numeric)) {
size_choices <- c(size_choices, .sizeByColDataTitle)
}
return(size_choices)
}
#' Define visual parameter check options
#'
#' Define the available visual parameter check boxes that can be ticked.
#'
#' @param discrete_covariates A character vector of names of categorical covariates.
#' @param numeric_covariates A character vector of names of numeric covariates.
#'
#' @details
#' Currently, the only special case is when there are no categorical covariates, in which case the shaping and faceting check boxes will not be available.
#' The check boxes for showing the colouring, point aesthetics and other options are always available.
#'
#' @return A character vector of check boxes that can be clicked in the UI.
#'
#' @author Aaron Lun, Kevin Rue-Albrecht
#' @rdname INTERNAL_define_visual_options
.define_visual_options <- function(discrete_covariates, numeric_covariates) {
pchoices <- c(.visualParamChoiceColorTitle)
if (length(discrete_covariates)) {
pchoices <- c(pchoices, .visualParamChoiceShapeTitle)
}
# Insert the point choice _after_ the shape aesthetic, if present
pchoices <- c(pchoices, .visualParamChoicePointTitle)
if (length(discrete_covariates)) {
pchoices <- c(pchoices, .visualParamChoiceFacetTitle)
}
pchoices <- c(pchoices, .visualParamChoiceOtherTitle)
return(pchoices)
}
#' Visual parameter box for row plots
#'
#' Create a visual parameter box for row-based plots, i.e., where each feature is a point.
#'
#' @param mode String specifying the encoded panel type of the current plot.
#' @param id Integer scalar specifying the index of a panel of the specified type, for the current plot.
#' @param param_choices A DataFrame with one row, containing the parameter choices for the current plot.
#' @param active_row_tab A character vector of decoded names for available row statistics tables.
#' @param active_col_tab A character vector of decoded names for available row statistics tables.
#' @param se A SingleCellExperiment object with precomputed UI information from \code{\link{.precompute_UI_info}}.
#'
#' @return
#' A HTML tag object containing a \code{\link{collapseBox}} with visual parameters for row-based plots.
#'
#' @details
#' This is similar to \code{\link{.create_visual_box_for_column_plots}}, with some differences.
#' Row-based plots can be coloured by nothing, by row metadata or by the \emph{selection} of certain features.
#' That is, the single chosen feature will be highlighted on the plot; its expression values are ignored.
#' Options are provided to choose the colour with which the highlighting is performed.
#'
#' Note that some options will be disabled depending on the nature of the input, namely:
#' \itemize{
#' \item If there are no row metadata fields, users will not be allowed to colour by row metadata, obviously.
#' \item If there are no features, users cannot colour by features.
#' \item If there are no categorical column metadata fields, users will not be allowed to view the faceting options.
#' }
#'
#' @author Aaron Lun
#' @rdname INTERNAL_create_visual_box_for_row_plots
#' @seealso
#' \code{\link{.panel_generation}},
#' \code{\link{.create_visual_box_for_column_plots}}
#'
#' @importFrom shiny radioButtons tagList selectInput selectizeInput
#' checkboxGroupInput
#' @importFrom colourpicker colourInput
.create_visual_box_for_row_plots <- function(mode, id, param_choices, active_row_tab, active_col_tab, se) {
covariates <- colnames(rowData(se))
discrete_covariates <- .get_internal_info(se, "row_groupable")
numeric_covariates <- .get_internal_info(se, "row_numeric")
all_assays <- .get_internal_info(se, "all_assays")
colorby_field <- paste0(mode, id, "_", .colorByField)
shapeby_field <- paste0(mode, id, "_", .shapeByField)
sizeby_field <- paste0(mode, id, "_", .sizeByField)
pchoice_field <- paste0(mode, id, "_", .visualParamChoice)
collapseBox(
id=paste0(mode, id, "_", .visualParamBoxOpen),
title="Visual parameters",
open=param_choices[[.visualParamBoxOpen]],
checkboxGroupInput(
inputId=pchoice_field, label=NULL, inline=TRUE,
selected=param_choices[[.visualParamChoice]][[1]],
choices=.define_visual_options(discrete_covariates, numeric_covariates)),
.conditional_on_check_group(
pchoice_field, .visualParamChoiceColorTitle,
radioButtons(
colorby_field, label="Color by:", inline=TRUE,
choices=.define_color_options_for_row_plots(se),
selected=param_choices[[.colorByField]]
),
.conditional_on_radio(
colorby_field, .colorByNothingTitle,
colourInput(
paste0(mode, id, "_", .colorByDefaultColor), label=NULL,
value=param_choices[[.colorByDefaultColor]])
),
.conditional_on_radio(
colorby_field, .colorByRowDataTitle,
selectInput(
paste0(mode, id, "_", .colorByRowData), label=NULL,
choices=covariates, selected=param_choices[[.colorByRowData]])
),
.conditional_on_radio(colorby_field, .colorByFeatNameTitle,
tagList(
selectizeInput(paste0(mode, id, "_", .colorByFeatName), label=NULL, selected=NULL, choices=NULL, multiple=FALSE),
selectInput(
paste0(mode, id, "_", .colorByRowTable), label=NULL, choices=active_row_tab,
selected=.choose_link(param_choices[[.colorByRowTable]], active_row_tab, force_default=TRUE)),
colourInput(paste0(mode, id, "_", .colorByFeatNameColor), label=NULL,
value=param_choices[[.colorByFeatNameColor]]))
),
.conditional_on_radio(colorby_field, .colorBySampNameTitle,
tagList(
selectizeInput(paste0(mode, id, "_", .colorBySampName), label=NULL, choices=NULL, selected=NULL, multiple=FALSE),
selectInput(
paste0(mode, id, "_", .colorBySampNameAssay), label=NULL,
choices=all_assays, selected=param_choices[[.colorBySampNameAssay]])),
selectInput(
paste0(mode, id, "_", .colorByColTable), label=NULL, choices=active_col_tab,
selected=.choose_link(param_choices[[.colorByColTable]], active_col_tab, force_default=TRUE))
)
),
.conditional_on_check_group(
pchoice_field, .visualParamChoiceShapeTitle,
hr(),
radioButtons(
shapeby_field, label="Shape by:", inline=TRUE,
choices=.define_shape_options_for_row_plots(se),
selected=param_choices[[.shapeByField]]
),
.conditional_on_radio(
shapeby_field, .shapeByRowDataTitle,
selectInput(
paste0(mode, id, "_", .shapeByRowData), label=NULL,
choices=discrete_covariates, selected=param_choices[[.shapeByRowData]])
)
),
.conditional_on_check_group(
pchoice_field, .visualParamChoiceFacetTitle,
hr(), .add_facet_UI_elements_for_row_plots(mode, id, param_choices, discrete_covariates)),
.conditional_on_check_group(
pchoice_field, .visualParamChoicePointTitle,
hr(),
radioButtons(
sizeby_field, label="Size by:", inline=TRUE,
choices=.define_size_options_for_row_plots(se),
selected=param_choices[[.sizeByField]]
),
.conditional_on_radio(
sizeby_field, .sizeByNothingTitle,
numericInput(
paste0(mode, id, "_", .plotPointSize), label="Point size:",
min=0, value=param_choices[,.plotPointSize])
),
.conditional_on_radio(
sizeby_field, .sizeByRowDataTitle,
selectInput(paste0(mode, id, "_", .sizeByRowData), label=NULL,
choices=numeric_covariates, selected=param_choices[[.sizeByRowData]])
),
.add_point_UI_elements(mode, id, param_choices)),
.conditional_on_check_group(
pchoice_field, .visualParamChoicePointTitle,
hr(), .add_point_UI_elements(mode, id, param_choices)),
.conditional_on_check_group(
pchoice_field, .visualParamChoiceOtherTitle,
hr(), .add_other_UI_elements(mode, id, param_choices))
)
}
#' @rdname INTERNAL_define_color_options
.define_color_options_for_row_plots <- function(se) {
color_choices <- .colorByNothingTitle
if (ncol(rowData(se))) {
color_choices <- c(color_choices, .colorByRowDataTitle)
}
if (nrow(se)) {
color_choices <- c(color_choices, .colorByFeatNameTitle)
}
if (ncol(se) && length(assayNames(se))) {
color_choices <- c(color_choices, .colorBySampNameTitle)
}
return(color_choices)
}
#' @rdname INTERNAL_define_shape_options
.define_shape_options_for_row_plots <- function(se) {
shape_choices <- .shapeByNothingTitle
row_groupable <- .get_internal_info(se, "row_groupable")
if (length(row_groupable)) {
shape_choices <- c(shape_choices, .shapeByRowDataTitle)
}
return(shape_choices)
}
#' @rdname INTERNAL_define_size_options
.define_size_options_for_row_plots <- function(se) {
size_choices <- .sizeByNothingTitle
row_numeric <- .get_internal_info(se, "row_numeric")
if (length(row_numeric)) {
size_choices <- c(size_choices, .sizeByRowDataTitle)
}
return(size_choices)
}
#' Faceting visual parameters
#'
#' Create UI elements for selection of faceting visual parameters.
#'
#' @param mode String specifying the encoded panel type of the current plot.
#' @param id Integer scalar specifying the index of a panel of the specified type, for the current plot.
#' @param param_choices A DataFrame with one row, containing the parameter choices for the current plot.
#' @param covariates Character vector listing available covariates from the \code{colData} or \code{rowData} slot, respectively.
#'
#' @return
#' A HTML tag object containing faceting parameter inputs.
#'
#' @details
#' This creates UI elements to choose the row and column faceting covariates.
#'
#' @author Kevin Rue-Albrecht
#' @rdname INTERNAL_add_facet_UI_elements
#' @seealso
#' \code{\link{.panel_generation}},
#' \code{\link{.create_visual_box_for_column_plots}},
#' \code{\link{.create_visual_box_for_row_plots}}
#'
#' @importFrom shiny tagList selectInput
.add_facet_UI_elements_for_column_plots <- function(mode, id, param_choices, covariates) {
rowId <- paste0(mode, id, "_", .facetByRow)
columnId <- paste0(mode, id, "_", .facetByColumn)
tagList(
checkboxInput(
rowId, label="Facet by row",
value=param_choices[, .facetByRow]),
.conditional_on_check_solo(
rowId, on_select=TRUE,
selectInput(paste0(mode, id, "_", .facetRowsByColData), label=NULL,
choices=covariates, selected=param_choices[[.facetRowsByColData]])
),
checkboxInput(
columnId, label="Facet by column",
value=param_choices[, .facetByColumn]),
.conditional_on_check_solo(
columnId, on_select=TRUE,
selectInput(paste0(mode, id, "_", .facetColumnsByColData), label=NULL,
choices=covariates, selected=param_choices[[.facetColumnsByColData]])
)
)
}
#' @rdname INTERNAL_add_facet_UI_elements
.add_facet_UI_elements_for_row_plots <- function(mode, id, param_choices, covariates) {
rowId <- paste0(mode, id, "_", .facetByRow)
columnId <- paste0(mode, id, "_", .facetByColumn)
tagList(
checkboxInput(
rowId, label="Facet by row",
value=param_choices[, .facetByRow]),
.conditional_on_check_solo(
rowId, on_select=TRUE,
selectInput(
paste0(mode, id, "_", .facetRowsByRowData), label=NULL,
choices=covariates, selected=param_choices[[.facetRowsByRowData]])
),
checkboxInput(
columnId, label="Facet by column",
value=param_choices[, .facetByColumn]),
.conditional_on_check_solo(
columnId, on_select=TRUE,
selectInput(paste0(mode, id, "_", .facetColumnsByRowData), label=NULL,
choices=covariates, selected=param_choices[[.facetColumnsByRowData]])
)
)
}
#' General visual parameters
#'
#' Create UI elements for selection of general visual parameters.
#'
#' @param mode String specifying the encoded panel type of the current plot.
#' @param id Integer scalar specifying the index of a panel of the specified type, for the current plot.
#' @param param_choices A DataFrame with one row, containing the parameter choices for the current plot.
#'
#' @return
#' A HTML tag object containing visual parameter inputs.
#'
#' @details
#' This creates UI elements to choose the font size, point size and opacity, and legend placement.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_add_visual_UI_elements
#' @seealso
#' \code{\link{.panel_generation}},
#' \code{\link{.create_visual_box_for_column_plots}},
#' \code{\link{.create_visual_box_for_row_plots}}
#'
#' @importFrom shiny tagList numericInput sliderInput hr checkboxInput
.add_point_UI_elements <- function(mode, id, param_choices) {
ds_id <- paste0(mode, id, "_", .plotPointDownsample)
tagList(
sliderInput(
paste0(mode, id, "_", .plotPointAlpha), label="Point opacity",
min=0.1, max=1, value=param_choices[,.plotPointAlpha]),
hr(),
checkboxInput(
ds_id, label="Downsample points for speed",
value=param_choices[,.plotPointDownsample]),
.conditional_on_check_solo(
ds_id, on_select=TRUE,
numericInput(
paste0(mode, id, "_", .plotPointSampleRes), label="Sampling resolution:",
min=1, value=param_choices[,.plotPointSampleRes])
)
)
}
#' @rdname INTERNAL_add_visual_UI_elements
#' @importFrom shiny tagList radioButtons numericInput
.add_other_UI_elements <- function(mode, id, param_choices) {
tagList(
numericInput(
paste0(mode, id, "_", .plotFontSize), label="Font size:",
min=0, value=param_choices[,.plotFontSize]),
radioButtons(
paste0(mode, id, "_", .plotLegendPosition), label="Legend position:", inline=TRUE,
choices=c(.plotLegendBottomTitle, .plotLegendRightTitle),
selected=param_choices[,.plotLegendPosition])
)
}
#' Point selection parameter box
#'
#' Create a point selection parameter box for all point-based plots.
#'
#' @param mode String specifying the encoded panel type of the current plot.
#' @param id Integer scalar specifying the index of a panel of the specified type, for the current plot.
#' @param param_choices A DataFrame with one row, containing the parameter choices for the current plot.
#' @param selectable A character vector of decoded names for available transmitting panels.
#' @param source_type Type of the panel that is source of the selection. Either \code{"row"} or \code{"column"}.
#' @param ... Additional arguments passed to \code{\link{collapseBox}}.
#' @param field Column name in the DataFrame of parameters choices for the current plot.
#'
#' @return
#' For \code{.create_selection_param_box} and \code{.create_selection_param_box_define_box},
#' a HTML tag object containing a \code{\link{collapseBox}} with UI elements for changing point selection parameters.
#'
#' For \code{.create_selection_param_box_define_choices}, a HTML tag object containing a \code{selectInput} for choosing the transmitting panels.
#'
#' @details
#' The \code{.create_selection_param_box} function creates a collapsible box that contains point selection options, initialized with the choices in \code{memory}.
#' Options include the choice of transmitting plot and the type of selection effect.
#' Each effect option, once selected, may yield a further subset of nested options.
#' For example, choosing to colour on the selected points will open up a choice of colour to use.
#'
#' The other two functions are helper functions that avoid re-writing related code in the \code{\link{.panel_generation}} function.
#' This is mostly for other panel types that take selections but do not follow the exact structure produced by \code{.create_selection_param_box}.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_create_selection_param_box
#' @seealso
#' \code{\link{.panel_generation}}
#'
#' @importFrom shiny sliderInput radioButtons selectInput
#' @importFrom colourpicker colourInput
.create_selection_param_box <- function(mode, id, param_choices, selectable, source_type=c("row", "column")) {
select_effect <- paste0(mode, id, "_", .selectEffect)
source_type <- match.arg(source_type)
.create_selection_param_box_define_box(
mode, id, param_choices,
.create_selection_param_box_define_choices(mode, id, param_choices, field=.selectByPlot, selectable=selectable, source_type),
radioButtons(
select_effect, label="Selection effect:", inline=TRUE,
choices=c(.selectRestrictTitle, .selectColorTitle, .selectTransTitle),
selected=param_choices[[.selectEffect]]),
.conditional_on_radio(
select_effect, .selectColorTitle,
colourInput(
paste0(mode, id, "_", .selectColor), label=NULL,
value=param_choices[[.selectColor]])
),
.conditional_on_radio(
select_effect, .selectTransTitle,
sliderInput(
paste0(mode, id, "_", .selectTransAlpha), label=NULL,
min=0, max=1, value=param_choices[[.selectTransAlpha]])
)
)
}
#' @rdname INTERNAL_create_selection_param_box
.create_selection_param_box_define_box <- function(mode, id, param_choices, ...) {
collapseBox(
id=paste0(mode, id, "_", .selectParamBoxOpen),
title="Selection parameters",
open=param_choices[[.selectParamBoxOpen]],
...)
}
#' @rdname INTERNAL_create_selection_param_box
.create_selection_param_box_define_choices <- function(mode, id, param_choices, field, selectable, source_type=c("row", "column")) {
selectInput(
paste0(mode, id, "_", field),
label=sprintf("Receive %s selection from:", source_type),
choices=selectable,
selected=.choose_link(param_choices[[field]], selectable))
}
#' Conditional elements on radio or checkbox selection
#'
#' Creates a conditional UI element that appears upon a certain choice in a radio button or checkbox group selection.
#'
#' @param id String containing the id of the UI element for the radio buttons or checkbox group.
#' @param choice String containing the choice on which to show the conditional elements.
#' @param on_select Logical scalar specifying whether the conditional element should be shown upon selection in a check box, or upon de-selection (if \code{FALSE}).
#' @param ... UI elements to show conditionally.
#'
#' @return
#' A HTML object containing elements that only appear when \code{choice} is selected in the UI element for \code{id}.
#'
#' @details
#' This function is useful for hiding options that are irrelevant when a different radio button is selected, or when the corresponding checkbox element is unselected.
#' In this manner, we can avoid cluttering the UI.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_conditional_elements
#' @seealso
#' \code{\link{.panel_generation}},
#' \code{\link{.create_selection_param_box}},
#' \code{\link{.create_visual_box_for_row_plots}},
#' \code{\link{.create_visual_box_for_column_plots}}
#'
#' @importFrom shiny conditionalPanel
.conditional_on_radio <- function(id, choice, ...) {
conditionalPanel(condition=sprintf('(input["%s"] == "%s")', id, choice), ...)
}
#' @rdname INTERNAL_conditional_elements
#' @importFrom shiny conditionalPanel
.conditional_on_check_solo <- function(id, on_select=TRUE, ...) {
choice <- ifelse(on_select, 'true', 'false')
conditionalPanel(condition=sprintf('(input["%s"] == %s)', id, choice), ...)
}
#' @rdname INTERNAL_conditional_elements
#' @importFrom shiny conditionalPanel
.conditional_on_check_group <- function(id, choice, ...) {
conditionalPanel(condition=sprintf('(input["%s"].includes("%s"))', id, choice), ...)
}
#' Coerce box status to custom classes
#'
#' Coerce the status of a \code{shinydashboard::box} to use a custom \pkg{iSEE} class.
#'
#' @param in_box A HTML tag object corresponding to a \code{box} object from the \pkg{shinydashboard} package.
#' @param mode String specifying the encoded panel type of the current plot.
#' @param old_status String specifying the current status of the \code{box}, to be replaced by \code{mode}.
#'
#' @return A modified \code{in_box} where the status is changed from \code{old_status} to \code{mode}.
#'
#' @details
#' The \code{\link[shinydashboard]{box}} function does not allow use of custom statuses.
#' As a result, we generate the box using the \code{"danger"} status, and replace it afterwards with our custom status.
#' This gives us full control over the box colours, necessary for proper colour-coding of each panel type.
#'
#' Note that the boxes from \pkg{shinydashboard} are used to enclose each plot/table panel in the \code{iSEE} app.
#' They do \emph{not} represent the parameter boxes, which are instead enclosed in Bootstrap panels (see \code{\link{collapseBox}}).
#'
#' @author Aaron Lun
#' @rdname INTERNAL_coerce_box_status
#' @seealso
#' \code{\link{.panel_organization}},
#' \code{\link{.panel_generation}}
.coerce_box_status <- function(in_box, mode, old_status="danger") {
in_box$children[[1]]$attribs$class <- sub(
paste0("box-", old_status),
paste0("box-", tolower(mode)),
in_box$children[[1]]$attribs$class)
return(in_box)
}
.actionbutton_biocstyle <- "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
#' Precompute UI information
#'
#' Precompute information to be shown in the UI and store it in the internal metadata of a SingleCellExperiment object.
#'
#' @param se A SingleCellExperiment object.
#' @param data_fun_list A named list of custom plotting functions.
#' @param stat_fun_list A named list of custom statistics functions.
#'
#' @details
#' Precomputed information includes:
#' \itemize{
#' \item Unique-ified selectize choices, to avoid problems with selecting between different unnamed assays, samples or reduced dimension results.
#' \item The names of discrete metadata fields, for use in restricting choices for faceting.
#' \item A list of the custom data plot functions supplied to the \code{\link{iSEE}} function.
#' \item A list of the custom statistics table functions supplied to the \code{\link{iSEE}} function.
#' }
#'
#' Storage in the internal metadata allows us to pass a single argument to various UI functions and for them to extract out the relevant fields.
#' This avoids creating functions with many different arguments, which would be difficult to maintain.
#'
#' @author Aaron Lun
#'
#' @return A SingleCellExperiment with values stored in an \code{iSEE} field in the internal metadata.
#'
#' @seealso
#' \code{\link{.which_groupable}},
#' \code{\link{.sanitize_names}},
#' \code{\link{.get_internal_info}}
#' @rdname INTERNAL_precompute_UI_info
#' @importFrom SingleCellExperiment int_metadata
.precompute_UI_info <- function(se, data_fun_list, stat_fun_list) {
out <- list(
column_groupable=colnames(colData(se))[.which_groupable(colData(se))],
row_groupable=colnames(rowData(se))[.which_groupable(rowData(se))],
column_numeric=colnames(colData(se))[.which_numeric(colData(se))],
row_numeric=colnames(rowData(se))[.which_numeric(rowData(se))],
all_assays=.sanitize_names(assayNames(se)),
red_dim_names=.sanitize_names(reducedDimNames(se)),
sample_names=.sanitize_names(colnames(se)),
custom_data_fun=data_fun_list,
custom_stat_fun=stat_fun_list
)
if (is.null(colnames(se))) {
out$sample_names <- sprintf("Sample %i", seq_len(ncol(se)))
}
int_metadata(se)$iSEE <- out
return(se)
}
#' Sanitize names
#'
#' Convert a vector of names into a named integer vector of indices.
#'
#' @param raw_names A character vector of names.
#'
#' @return
#' An integer vector of \code{seq_along(raw_names)}, with names based on \code{raw_names}.
#'
#' @details
#' This function protects against non-unique names by converting them to integer indices, which can be used for indexing within the function.
#' The names are also made unique for display to the user by prefixing them with \code{(<index>)}.
#'
#' @author Kevin Rue-Albrecht, Aaron Lun
#' @rdname INTERNAL_sanitize_names
#' @seealso
#' \code{\link{.panel_generation}}
.sanitize_names <- function(raw_names) {
indices <- seq_along(raw_names)
names(indices) <- sprintf("(%i) %s", indices, raw_names)
indices
}
#' Extract internal information
#'
#' Extracts the requested fields from the internal metadata field of a SingleCellExperiment object.
#'
#' @param se A SingleCellExperiment.
#' @param field A string specifying the field to extract.
#' @param empty_fail Logical scalar indicating whether a warning should be raised when no internal info is present.
#'
#' @details This function is only safe to run \emph{after} \code{\link{.precompute_UI_info}} has been called.
#' As such, \code{empty_fail} is set to \code{TRUE} to catch any possible instances of unsafe execution.
#' If you turn this off, you should ensure that the surrounding code will recompute any fields when the returned value is \code{NULL}.
#'
#' @return The value of \code{field} in the internal metadata of \code{se}.
#'
#' @author Aaron Lun
#'
#' @seealso \code{\link{.precompute_UI_info}}
#' @rdname INTERNAL_get_internal_info
#' @importFrom SingleCellExperiment int_metadata
.get_internal_info <- function(se, field, empty_fail=TRUE) {
info <- int_metadata(se)$iSEE
if (is.null(info) && empty_fail) {
stop("no internal metadata in 'se'")
}
info[[field]]
}
|
####################################################
# METAPOPULATION MODEL FOR INFECTION DYNAMICS
# WITH ANNUAL BIRTH PULSES
# DENSITY-DEPENDENT DEATH RATE
# AND MATERNAL TRANSFER OF ANTIBODIES
# -------------------------------------------
# ANALYSIS OF RESULTS FROM SIMULATION SERIES 3
####################################################
# In this series, simulations were started with 5 infected case and R0=4, hence a low probability of initial fade-out
source('Models/Metapop DDD MSIR adaptivetau.R')
load('Outputs/DDD_MSIR_Stoch_Series_3.RData')
library(dplyr)
library(tidyr)
library(ggplot2)
# --------------- Analyse extinctions ----------------------------
# Record summary statistics
n.sim <- nrow(DDD.MSIR.stoch.series.3)
p.ext <- apply(DDD.MSIR.stoch.series.3,2, function(x) length(which(is.finite(x)))/n.sim)
t.ext.mean <- apply(DDD.MSIR.stoch.series.3,2, mean, na.rm=T)
t.ext.median <- apply(DDD.MSIR.stoch.series.3,2, median, na.rm=T)
range(p.ext)
range(DDD.MSIR.stoch.series.3,na.rm = T)
# Density plot of time to extinction from first series
plot(density(DDD.MSIR.stoch.series.3[,1],na.rm=T,kernel = "biweight"))
# Complete Dataframe
MSIR.series.ext <- cbind(par.try,p.ext=p.ext,t.ext.mean=t.ext.mean,t.ext.median=t.ext.median)
MSIR.series.ext <- MSIR.series.ext %>% mutate(MP.m=round(12*rho*MP))
# Heatmap for P(ext) - MP vs K
MSIR.series.ext %>% ggplot(aes(factor(MP.m),factor(K))) + geom_tile(aes(fill=p.ext)) + facet_grid(~s, labeller=label_both) + labs(fill='P(extinction)') + scale_fill_gradient2(low=rgb(1,1,1),mid=rgb(1,1,0),high=rgb(1,0,0),midpoint=0.5) + xlab("Duration of MAb protection (months)") + ylab("Population size") + theme(text=element_text(size=18))
# s vs K
MSIR.series.ext %>% ggplot(aes(factor(s),factor(K))) + geom_tile(aes(fill=p.ext)) + facet_grid(~MP.m, labeller=label_both) + labs(fill='P(extinction)') + scale_fill_gradient2(low=rgb(1,1,1),mid=rgb(1,1,0),high=rgb(1,0,0),midpoint=0.5) + xlab("Tightness of birth pulse") + ylab("Population size") + theme(text=element_text(size=18))
# Contour plot
MSIR.series.ext %>% ggplot(aes(log10(1+s),log10(K))) + geom_contour(aes(z=p.ext)) + facet_grid(~MP.m, labeller=label_both) + labs(fill='P(extinction)') + xlab("Tightness of birth pulse") + ylab("Population size") + theme(text=element_text(size=18))
# Heatmap for <T(ext)> - MP vs K
MSIR.series.ext %>% ggplot(aes(factor(MP.m),factor(K))) + geom_tile(aes(fill=t.ext.mean)) + facet_grid(~s, labeller=label_both) + labs(fill='T(extinction)') + scale_fill_continuous(low=rgb(0.9,0,0.5),high=rgb(0,0.9,0.5)) + xlab("Duration of MAb protection (months)") + ylab("Population size") + theme(text=element_text(size=18))
# Bubble plot combining P(ext) and <T(ext)> - s vs K
MSIR.series.ext %>% ggplot(aes(factor(s),factor(K))) + geom_point(aes(col=t.ext.mean,size=p.ext)) + facet_grid(~MP.m, labeller=label_both) + labs(fill='T(extinction)') + scale_color_continuous(low=rgb(1,1,0.2),high=rgb(0,0,0.8)) + xlab("Tightness of birth pulse") + ylab("Population size") + theme(text=element_text(size=18)) + scale_size(range = c(2,10))
# ----------- Estimate CCS --------------
library(MASS)
# Fit a binomial glm to extinctions ~ log10(K) to each series, and use MASS::dose.p() to estimate the corresponding CCS_50.
MSIR.series.CCS <- MSIR.series.ext %>% group_by(s,MP.m) %>% mutate(N.ext = round(p.ext*n.sim), N.per = round((1-p.ext)*n.sim)) %>% summarise(CCS = as.numeric(10^dose.p(glm(cbind(N.per,N.ext)~log10(K),binomial))))
MSIR.series.CCS %>% ggplot(aes(factor(s),factor(MP.m))) + geom_tile(aes(fill=CCS)) + scale_fill_gradient(high=rgb(1,1,0.5),low=rgb(0.5,0,0))
# ================================================ DETERMINISTIC DYNAMICS ===============================
# load('Outputs/DDD_MSIR_Series_3_ODE.RData')
par(mfrow=c(2,2))
sel.1 <- which(ode.par.try$s==0 & ode.par.try$IP==ode.par.list$IP[1] & ode.par.try$MP==ode.par.list$MP[1] & ode.par.try$rho==0)
matplot(ode.DDD.MSIR.series.3[[sel.1]],lwd=2,main=paste(names(ode.par.try),round(as.numeric(ode.par.try[sel.1,]),3),sep="=",collapse=", "),log="y",ylim=c(1,1000),xlim=c(0,10))x
sel.2 <- which(ode.par.try$s==100 & ode.par.try$IP==ode.par.list$IP[1] & ode.par.try$MP==ode.par.list$MP[1] & ode.par.try$rho==0)
matplot(ode.DDD.MSIR.series.3[[sel.2]],lwd=2,main=paste(names(ode.par.try),round(as.numeric(ode.par.try[sel.2,]),3),sep="=",collapse=", "),log="y",ylim=c(1,1000),xlim=c(0,10))
sel.3 <- which(ode.par.try$s==0 & ode.par.try$IP==ode.par.list$IP[1] & ode.par.try$MP==ode.par.list$MP[3] & ode.par.try$rho==1)
matplot(ode.DDD.MSIR.series.3[[sel.3]],lwd=2,main=paste(names(ode.par.try),round(as.numeric(ode.par.try[sel.3,]),3),sep="=",collapse=", "),log="y",ylim=c(1,1000),xlim=c(0,10))
sel.4 <- which(ode.par.try$s==100 & ode.par.try$IP==ode.par.list$IP[1] & ode.par.try$MP==ode.par.list$MP[3] & ode.par.try$rho==1)
matplot(ode.DDD.MSIR.series.3[[sel.4]],lwd=2,main=paste(names(ode.par.try),round(as.numeric(ode.par.try[sel.4,]),3),sep="=",collapse=", "),log="y",ylim=c(1,1000),xlim=c(0,10))
| /MSIR/Outputs/DDD MSIR Series 3 stoch Analysis.R | no_license | orestif/metapopulation | R | false | false | 5,041 | r | ####################################################
# METAPOPULATION MODEL FOR INFECTION DYNAMICS
# WITH ANNUAL BIRTH PULSES
# DENSITY-DEPENDENT DEATH RATE
# AND MATERNAL TRANSFER OF ANTIBODIES
# -------------------------------------------
# ANALYSIS OF RESULTS FROM SIMULATION SERIES 3
####################################################
# In this series, simulations were started with 5 infected case and R0=4, hence a low probability of initial fade-out
source('Models/Metapop DDD MSIR adaptivetau.R')
load('Outputs/DDD_MSIR_Stoch_Series_3.RData')
library(dplyr)
library(tidyr)
library(ggplot2)
# --------------- Analyse extinctions ----------------------------
# Record summary statistics
n.sim <- nrow(DDD.MSIR.stoch.series.3)
p.ext <- apply(DDD.MSIR.stoch.series.3,2, function(x) length(which(is.finite(x)))/n.sim)
t.ext.mean <- apply(DDD.MSIR.stoch.series.3,2, mean, na.rm=T)
t.ext.median <- apply(DDD.MSIR.stoch.series.3,2, median, na.rm=T)
range(p.ext)
range(DDD.MSIR.stoch.series.3,na.rm = T)
# Density plot of time to extinction from first series
plot(density(DDD.MSIR.stoch.series.3[,1],na.rm=T,kernel = "biweight"))
# Complete Dataframe
MSIR.series.ext <- cbind(par.try,p.ext=p.ext,t.ext.mean=t.ext.mean,t.ext.median=t.ext.median)
MSIR.series.ext <- MSIR.series.ext %>% mutate(MP.m=round(12*rho*MP))
# Heatmap for P(ext) - MP vs K
MSIR.series.ext %>% ggplot(aes(factor(MP.m),factor(K))) + geom_tile(aes(fill=p.ext)) + facet_grid(~s, labeller=label_both) + labs(fill='P(extinction)') + scale_fill_gradient2(low=rgb(1,1,1),mid=rgb(1,1,0),high=rgb(1,0,0),midpoint=0.5) + xlab("Duration of MAb protection (months)") + ylab("Population size") + theme(text=element_text(size=18))
# s vs K
MSIR.series.ext %>% ggplot(aes(factor(s),factor(K))) + geom_tile(aes(fill=p.ext)) + facet_grid(~MP.m, labeller=label_both) + labs(fill='P(extinction)') + scale_fill_gradient2(low=rgb(1,1,1),mid=rgb(1,1,0),high=rgb(1,0,0),midpoint=0.5) + xlab("Tightness of birth pulse") + ylab("Population size") + theme(text=element_text(size=18))
# Contour plot
MSIR.series.ext %>% ggplot(aes(log10(1+s),log10(K))) + geom_contour(aes(z=p.ext)) + facet_grid(~MP.m, labeller=label_both) + labs(fill='P(extinction)') + xlab("Tightness of birth pulse") + ylab("Population size") + theme(text=element_text(size=18))
# Heatmap for <T(ext)> - MP vs K
MSIR.series.ext %>% ggplot(aes(factor(MP.m),factor(K))) + geom_tile(aes(fill=t.ext.mean)) + facet_grid(~s, labeller=label_both) + labs(fill='T(extinction)') + scale_fill_continuous(low=rgb(0.9,0,0.5),high=rgb(0,0.9,0.5)) + xlab("Duration of MAb protection (months)") + ylab("Population size") + theme(text=element_text(size=18))
# Bubble plot combining P(ext) and <T(ext)> - s vs K
MSIR.series.ext %>% ggplot(aes(factor(s),factor(K))) + geom_point(aes(col=t.ext.mean,size=p.ext)) + facet_grid(~MP.m, labeller=label_both) + labs(fill='T(extinction)') + scale_color_continuous(low=rgb(1,1,0.2),high=rgb(0,0,0.8)) + xlab("Tightness of birth pulse") + ylab("Population size") + theme(text=element_text(size=18)) + scale_size(range = c(2,10))
# ----------- Estimate CCS --------------
library(MASS)
# Fit a binomial glm to extinctions ~ log10(K) to each series, and use MASS::dose.p() to estimate the corresponding CCS_50.
MSIR.series.CCS <- MSIR.series.ext %>% group_by(s,MP.m) %>% mutate(N.ext = round(p.ext*n.sim), N.per = round((1-p.ext)*n.sim)) %>% summarise(CCS = as.numeric(10^dose.p(glm(cbind(N.per,N.ext)~log10(K),binomial))))
MSIR.series.CCS %>% ggplot(aes(factor(s),factor(MP.m))) + geom_tile(aes(fill=CCS)) + scale_fill_gradient(high=rgb(1,1,0.5),low=rgb(0.5,0,0))
# ================================================ DETERMINISTIC DYNAMICS ===============================
# load('Outputs/DDD_MSIR_Series_3_ODE.RData')
par(mfrow=c(2,2))
sel.1 <- which(ode.par.try$s==0 & ode.par.try$IP==ode.par.list$IP[1] & ode.par.try$MP==ode.par.list$MP[1] & ode.par.try$rho==0)
matplot(ode.DDD.MSIR.series.3[[sel.1]],lwd=2,main=paste(names(ode.par.try),round(as.numeric(ode.par.try[sel.1,]),3),sep="=",collapse=", "),log="y",ylim=c(1,1000),xlim=c(0,10))x
sel.2 <- which(ode.par.try$s==100 & ode.par.try$IP==ode.par.list$IP[1] & ode.par.try$MP==ode.par.list$MP[1] & ode.par.try$rho==0)
matplot(ode.DDD.MSIR.series.3[[sel.2]],lwd=2,main=paste(names(ode.par.try),round(as.numeric(ode.par.try[sel.2,]),3),sep="=",collapse=", "),log="y",ylim=c(1,1000),xlim=c(0,10))
sel.3 <- which(ode.par.try$s==0 & ode.par.try$IP==ode.par.list$IP[1] & ode.par.try$MP==ode.par.list$MP[3] & ode.par.try$rho==1)
matplot(ode.DDD.MSIR.series.3[[sel.3]],lwd=2,main=paste(names(ode.par.try),round(as.numeric(ode.par.try[sel.3,]),3),sep="=",collapse=", "),log="y",ylim=c(1,1000),xlim=c(0,10))
sel.4 <- which(ode.par.try$s==100 & ode.par.try$IP==ode.par.list$IP[1] & ode.par.try$MP==ode.par.list$MP[3] & ode.par.try$rho==1)
matplot(ode.DDD.MSIR.series.3[[sel.4]],lwd=2,main=paste(names(ode.par.try),round(as.numeric(ode.par.try[sel.4,]),3),sep="=",collapse=", "),log="y",ylim=c(1,1000),xlim=c(0,10))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imagebuilder_operations.R
\name{imagebuilder_list_workflow_step_executions}
\alias{imagebuilder_list_workflow_step_executions}
\title{Shows runtime data for each step in a runtime instance of the workflow
that you specify in the request}
\usage{
imagebuilder_list_workflow_step_executions(
maxResults = NULL,
nextToken = NULL,
workflowExecutionId
)
}
\arguments{
\item{maxResults}{The maximum items to return in a request.}
\item{nextToken}{A token to specify where to start paginating. This is the NextToken from
a previously truncated response.}
\item{workflowExecutionId}{[required] The unique identifier that Image Builder assigned to keep track of
runtime details when it ran the workflow.}
}
\description{
Shows runtime data for each step in a runtime instance of the workflow that you specify in the request.
See \url{https://www.paws-r-sdk.com/docs/imagebuilder_list_workflow_step_executions/} for full documentation.
}
\keyword{internal}
| /cran/paws.compute/man/imagebuilder_list_workflow_step_executions.Rd | permissive | paws-r/paws | R | false | true | 1,034 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imagebuilder_operations.R
\name{imagebuilder_list_workflow_step_executions}
\alias{imagebuilder_list_workflow_step_executions}
\title{Shows runtime data for each step in a runtime instance of the workflow
that you specify in the request}
\usage{
imagebuilder_list_workflow_step_executions(
maxResults = NULL,
nextToken = NULL,
workflowExecutionId
)
}
\arguments{
\item{maxResults}{The maximum items to return in a request.}
\item{nextToken}{A token to specify where to start paginating. This is the NextToken from
a previously truncated response.}
\item{workflowExecutionId}{[required] The unique identifier that Image Builder assigned to keep track of
runtime details when it ran the workflow.}
}
\description{
Shows runtime data for each step in a runtime instance of the workflow that you specify in the request.
See \url{https://www.paws-r-sdk.com/docs/imagebuilder_list_workflow_step_executions/} for full documentation.
}
\keyword{internal}
|
cv.coxsplsDR =
function (data, method = c("efron", "breslow"), nfold = 5, nt = 10, eta = .5, plot.it = TRUE,
se = TRUE, givefold, scaleX = TRUE, scaleY = FALSE, folddetails = FALSE, allCVcrit=FALSE,
details=FALSE, namedataset="data", save=FALSE, verbose=TRUE,...)
{
try(attachNamespace("survival"),silent=TRUE)
#on.exit(try(unloadNamespace("survival"),silent=TRUE))
try(attachNamespace("rms"),silent=TRUE)
on.exit(try(unloadNamespace("rms"),silent=TRUE))
cv.error1<-NULL;cv.error2<-NULL;cv.error3<-NULL;cv.error4<-NULL;cv.error5<-NULL;cv.error6<-NULL;cv.error7<-NULL;cv.error8<-NULL;cv.error9<-NULL;cv.error10<-NULL;cv.error11<-NULL;cv.error12<-NULL;cv.error13<-NULL;cv.error14<-NULL
cv.se1<-NULL;cv.se2<-NULL;cv.se3<-NULL;cv.se4<-NULL;cv.se5<-NULL;cv.se6<-NULL;cv.se7<-NULL;cv.se8<-NULL;cv.se9<-NULL;cv.se10<-NULL;cv.se11<-NULL;cv.se12<-NULL;cv.se13<-NULL;cv.se14<-NULL
lamin1<-NULL;lamin2<-NULL;lamin3<-NULL;lamin4<-NULL;lamin5<-NULL;lamin6<-NULL;lamin7<-NULL;lamin8<-NULL;lamin9<-NULL;lamin10<-NULL;lamin11<-NULL;lamin12<-NULL;lamin13<-NULL;lamin14<-NULL
completed.cv1<-NULL;completed.cv2<-NULL;completed.cv3<-NULL;completed.cv4<-NULL;completed.cv5<-NULL;completed.cv6<-NULL;completed.cv7<-NULL;completed.cv8<-NULL;completed.cv9<-NULL;completed.cv10<-NULL;completed.cv11<-NULL;completed.cv12<-NULL;completed.cv13<-NULL;completed.cv14<-NULL
method <- match.arg(method)
x <- data$x
time <- data$time
status <- data$status
n <- length(time)
if(missing(givefold)){
folds <- split(sample(seq(n)), rep(1:nfold, length = n))} else
{
folds <- givefold
}
number_ind = 14
titlesCV = c("Cross-validated log-partial-likelihood","van Houwelingen Cross-validated log-partial-likelihood","iAUC_CD","iAUC_hc","iAUC_sh","iAUC_Uno","iAUC_hz.train","iAUC_hz.test","iAUC_survivalROC.train","iAUC_survivalROC.test","iBrierScore unw","iSchmidScore (robust BS) unw","iBrierScore w","iSchmidScore (robust BS) w")
ylabsCV = c(rep("Minus log-partial-likelihood",2),rep("iAUC",8),rep("Prediction Error",4))
xlabsCV = c(rep("nbr of components",14))
signCVerror = c(rep(1,2),rep(-1,8),rep(1,4))
show_nbr_var = TRUE
for(ind in 1:number_ind) {
assign(paste("errormat",ind,sep=""),matrix(NA, nt+1, nfold))
}
for (i in seq(nfold)) {
for(ind in 1:number_ind) {
assign(paste("pred",ind,sep=""),rep(NA, nt+1))
}
omit <- folds[[i]]
trdata <- list(x = x[-omit, ], time = time[-omit], status = status[-omit])
tsdata <- list(x = x[omit, ], time = time[omit], status = status[omit])
if(!file.exists(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,".RData",sep=""))){
assign(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""),coxsplsDR(Xplan=trdata$x, time=trdata$time, event=trdata$status, ncomp=nt, eta=eta, allres=TRUE, scaleX=TRUE, scaleY=FALSE, verbose=verbose, ...))
if(save){save(list=c(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep="")),file=paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,".RData",sep=""))}
} else {
load(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,".RData",sep=""))
}
coeffit <- get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$CoeffCFull
nzb <- cumsum(c(0,sapply(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$splsDR_mod$new2As,length)))
for(jj in 1:nt){
Avalues <- get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$splsDR_mod$A
newxdata=(predict.pls.cox(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$splsDR_modplsr,
newdata=scale((tsdata$x)[,Avalues],
(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$XplanCent)[Avalues],
(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$XplanScal)[Avalues]),
scale.X=FALSE,scale.Y=FALSE)$variates)[,1:jj,drop=FALSE]
oldxdata=(as.matrix(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$tt_splsDR))[,1:jj,drop=FALSE]
allxdata=(predict.pls.cox(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$splsDR_modplsr,
newdata=scale(x[,Avalues],
(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$XplanCent)[Avalues],
(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$XplanScal)[Avalues]),
scale.X=FALSE,scale.Y=FALSE)$variates)[,1:jj,drop=FALSE]
if(jj==1){
pred1[1] <- logplik(x=newxdata[,1,drop=FALSE], time=tsdata$time, status=tsdata$status, b=matrix(0), method = method,return.all = FALSE) #"efron" match with loglik of coxph
plfull <- logplik(x=allxdata[,1,drop=FALSE], time=time, status=status, b=matrix(0), method = method,return.all = FALSE) #"efron"
plminusk <- logplik(x=oldxdata[,1,drop=FALSE], time=trdata$time, status=trdata$status, b=matrix(0), method = method,return.all = FALSE) #"efron"
pred2[1] = plfull - plminusk
Xlp <- rep(0,length(time))
assign(paste("dataset_",namedataset,"_",0,sep=""),as.data.frame(cbind(time=time,status=status,Xlp=Xlp)))
TR <- get(paste("dataset_",namedataset,"_",0,sep=""))[-omit,]
TE <- get(paste("dataset_",namedataset,"_",0,sep=""))[omit,]
survival.time <- get(paste("dataset_",namedataset,"_",0,sep=""))[,"time"]
survival.status <- get(paste("dataset_",namedataset,"_",0,sep=""))[,"status"]
tr.survival.time <- trdata$time
tr.survival.status <- trdata$status
te.survival.time <- tsdata$time
te.survival.status <- tsdata$status
#require(survival)
Surv.rsp <- Surv(tr.survival.time, tr.survival.status)
Surv.rsp.new <- Surv(te.survival.time, te.survival.status)
train.fit <- coxph(Surv(time,status) ~ Xlp, x=TRUE, y=TRUE, method=method, data=TR, iter.max=0, init=1)
#library(rms)
train.fit.cph <- cph(Surv(time,status) ~ Xlp, x=TRUE, y=TRUE, method=method, data=TR, iter.max=0, init=1)
lp <- predict(train.fit)
lpnew <- predict(train.fit, newdata=TE)
if(allCVcrit){
AUCs <- getIndicCV(lp,lpnew,Surv.rsp,Surv.rsp.new,times.auc=seq(0,max(time),length.out=1000),times.prederr=seq(0,max(time),length.out=1000)[-(990:1000)],train.fit,plot.it=FALSE)
pred3[1] = AUCs$AUC_CD$iauc
pred4[1] = AUCs$AUC_hc$iauc
pred5[1] = AUCs$AUC_sh$iauc
pred6[1] = AUCs$AUC_Uno$iauc
pred7[1] = AUCs$AUC_hz.train$iauc
pred8[1] = AUCs$AUC_hz.test$iauc
pred9[1] = AUCs$AUC_survivalROC.train$iauc
pred10[1] = AUCs$AUC_survivalROC.test$iauc
pred11[1] = AUCs$prederr$brier.unw$ierror
pred12[1] = AUCs$prederr$robust.unw$ierror
pred13[1] = AUCs$prederr$brier.w$ierror
pred14[1] = AUCs$prederr$robust.w$ierror
} else {
AUCs <- getIndicCViAUCSurvROCTest(lp,lpnew,Surv.rsp,Surv.rsp.new,times.auc=seq(0,max(time),length.out=1000),times.prederr=seq(0,max(time),length.out=1000)[-(990:1000)],train.fit,plot.it=FALSE)
pred3[1] = NA
pred4[1] = NA
pred5[1] = NA
pred6[1] = NA
pred7[1] = NA
pred8[1] = NA
pred9[1] = NA
pred10[1] = AUCs$AUC_survivalROC.test$iauc
pred11[1] = NA
pred12[1] = NA
pred13[1] = NA
pred14[1] = NA
}
}
pred1[jj+1] <- logplik(x=newxdata[,1:jj,drop=FALSE], time=tsdata$time, status=tsdata$status, b=coeffit[1:jj,jj,drop=FALSE], method = method,return.all = FALSE) #"efron" match with loglik of coxph
plfull <- logplik(x=allxdata[,1:jj,drop=FALSE], time=time, status=status, b=coeffit[1:jj,jj,drop=FALSE], method = method,return.all = FALSE) #"efron"
plminusk <- logplik(x=oldxdata[,1:jj,drop=FALSE], time=trdata$time, status=trdata$status, b=coeffit[1:jj,jj,drop=FALSE], method = method,return.all = FALSE) #"efron"
pred2[jj+1] = plfull - plminusk
predict.trainvectjj <- oldxdata%*%(coeffit[1:jj,jj,drop=FALSE])
predictvectjj <- newxdata%*%(coeffit[1:jj,jj,drop=FALSE])
Xlp <- rep(NA,length(time))
Xlp[-omit] <- predict.trainvectjj
Xlp[omit] <- predictvectjj
assign(paste("dataset_",namedataset,"_",jj,sep=""),as.data.frame(cbind(time=time,status=status,Xlp=Xlp)))
TR <- get(paste("dataset_",namedataset,"_",jj,sep=""))[-omit,]
TE <- get(paste("dataset_",namedataset,"_",jj,sep=""))[omit,]
survival.time <- get(paste("dataset_",namedataset,"_",jj,sep=""))[,"time"]
survival.status <- get(paste("dataset_",namedataset,"_",jj,sep=""))[,"status"]
tr.survival.time <- trdata$time
tr.survival.status <- trdata$status
te.survival.time <- tsdata$time
te.survival.status <- tsdata$status
#require(survival)
Surv.rsp <- Surv(tr.survival.time, tr.survival.status)
Surv.rsp.new <- Surv(te.survival.time, te.survival.status)
train.fit <- coxph(Surv(time,status) ~ Xlp, x=TRUE, y=TRUE, method=method, data=TR, iter.max=0, init=1) #offset
#library(rms)
train.fit.cph <- cph(Surv(time,status) ~ Xlp, x=TRUE, y=TRUE, method=method, data=TR, iter.max=0, init=1) #offset
lp <- predict(train.fit)
lpnew <- predict(train.fit, newdata=TE)
if(allCVcrit){
AUCs <- getIndicCV(lp,lpnew,Surv.rsp,Surv.rsp.new,times.auc=seq(0,max(time),length.out=1000),times.prederr=seq(0,max(time),length.out=1000)[-(990:1000)],train.fit,plot.it=FALSE)
pred3[jj+1] = AUCs$AUC_CD$iauc
pred4[jj+1] = AUCs$AUC_hc$iauc
pred5[jj+1] = AUCs$AUC_sh$iauc
pred6[jj+1] = AUCs$AUC_Uno$iauc
pred7[jj+1] = AUCs$AUC_hz.train$iauc
pred8[jj+1] = AUCs$AUC_hz.test$iauc
pred9[jj+1] = AUCs$AUC_survivalROC.train$iauc
pred10[jj+1] = AUCs$AUC_survivalROC.test$iauc
pred11[jj+1] = AUCs$prederr$brier.unw$ierror
pred12[jj+1] = AUCs$prederr$robust.unw$ierror
pred13[jj+1] = AUCs$prederr$brier.w$ierror
pred14[jj+1] = AUCs$prederr$robust.w$ierror
} else {
AUCs <- getIndicCViAUCSurvROCTest(lp,lpnew,Surv.rsp,Surv.rsp.new,times.auc=seq(0,max(time),length.out=1000),times.prederr=seq(0,max(time),length.out=1000)[-(990:1000)],train.fit,plot.it=FALSE)
pred3[jj+1] = NA
pred4[jj+1] = NA
pred5[jj+1] = NA
pred6[jj+1] = NA
pred7[jj+1] = NA
pred8[jj+1] = NA
pred9[jj+1] = NA
pred10[jj+1] = AUCs$AUC_survivalROC.test$iauc
pred11[jj+1] = NA
pred12[jj+1] = NA
pred13[jj+1] = NA
pred14[jj+1] = NA
}
}
# if(allCVcrit){
if(is.na(pred10[1])){pred10[1]<-.5}
# }
if (length(omit) == 1){
for(ind in 1:number_ind) {
assign(paste("pred",ind,sep=""),matrix(get(paste("pred",ind,sep="")), nrow = 1))
}
}
#if(any(is.na(pred10))){save(list=c("pred10"),file=paste(Predspath,"/failed.fold.cv.",typemodel,"_",namedataset,"_folds_",i,".RData",sep=""))}
if(allCVcrit){
errormat1[, i] <- ifelse(is.finite(pred1),-pred1/length(omit),NA)
errormat2[, i] <- ifelse(is.finite(pred2),-pred2/length(omit),NA)
errormat3[, i] <- ifelse(is.finite(pred3),pred3,NA)
errormat4[, i] <- ifelse(is.finite(pred4),pred4,NA)
errormat5[, i] <- ifelse(is.finite(pred5),pred5,NA)
errormat6[, i] <- ifelse(is.finite(pred6),pred6,NA)
errormat7[, i] <- ifelse(is.finite(pred7),pred7,NA)
errormat8[, i] <- ifelse(is.finite(pred8),pred8,NA)
errormat9[, i] <- ifelse(is.finite(pred9),pred9,NA)
errormat10[, i] <- ifelse(is.finite(pred10),pred10,NA)
errormat11[, i] <- ifelse(is.finite(pred11),pred11,NA)
errormat12[, i] <- ifelse(is.finite(pred12),pred12,NA)
errormat13[, i] <- ifelse(is.finite(pred13),pred13,NA)
errormat14[, i] <- ifelse(is.finite(pred14),pred14,NA)
} else {
errormat10[, i] <- ifelse(is.finite(pred10),pred10,NA)
}
if(verbose){cat("CV Fold", i, "\n")}
rm(list=c(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep="")))
}
if(allCVcrit){
for(ind in 1:number_ind) {
assign(paste("cv.error",ind,sep=""),apply(get(paste("errormat",ind,sep="")), 1, mean, na.rm=TRUE))
assign(paste("completed.cv",ind,sep=""),is.finite(get(paste("errormat",ind,sep=""))))
assign(paste("cv.se",ind,sep=""),sqrt(apply(get(paste("errormat",ind,sep="")), 1, var, na.rm=TRUE))/nfold)
assign(paste("lamin",ind,sep=""),getmin2(0:nt,signCVerror[ind]*get(paste("cv.error",ind,sep="")),get(paste("cv.se",ind,sep=""))))
}} else {
ind=10
assign(paste("cv.error",ind,sep=""),apply(get(paste("errormat",ind,sep="")), 1, mean, na.rm=TRUE))
assign(paste("completed.cv",ind,sep=""),is.finite(get(paste("errormat",ind,sep=""))))
assign(paste("cv.se",ind,sep=""),sqrt(apply(get(paste("errormat",ind,sep="")), 1, var, na.rm=TRUE))/nfold)
assign(paste("lamin",ind,sep=""),getmin2(0:nt,signCVerror[ind]*get(paste("cv.error",ind,sep="")),get(paste("cv.se",ind,sep=""))))
}
sign.lambda=1
if(allCVcrit){
object <- list(nt=nt, cv.error1 = cv.error1, cv.error2 = cv.error2, cv.error3 = cv.error3, cv.error4 = cv.error4, cv.error5 = cv.error5, cv.error6 = cv.error6, cv.error7 = cv.error7, cv.error8 = cv.error8, cv.error9 = cv.error9, cv.error10 = cv.error10, cv.error11 = cv.error11, cv.error12 = cv.error12, cv.error13 = cv.error13, cv.error14 = cv.error14,
cv.se1 = cv.se1, cv.se2 = cv.se2, cv.se3 = cv.se3, cv.se4 = cv.se4, cv.se5 = cv.se5, cv.se6 = cv.se6, cv.se7 = cv.se7, cv.se8 = cv.se8, cv.se9 = cv.se9, cv.se10 = cv.se10, cv.se11 = cv.se11, cv.se12 = cv.se12, cv.se13 = cv.se13, cv.se14 = cv.se14,
folds = folds,
lambda.min1 = lamin1[[1]], lambda.1se1 = lamin1[[2]], lambda.min2 = lamin2[[1]], lambda.1se2 = lamin2[[2]], lambda.min3 = lamin3[[1]], lambda.1se3 = lamin3[[2]], lambda.min4 = lamin4[[1]], lambda.1se4 = lamin4[[2]],
lambda.min5 = lamin5[[1]], lambda.1se5 = lamin5[[2]], lambda.min6 = lamin6[[1]], lambda.1se6 = lamin6[[2]], lambda.min7 = lamin7[[1]], lambda.1se7 = lamin7[[2]], lambda.min8 = lamin8[[1]], lambda.1se8 = lamin8[[2]],
lambda.min9 = lamin9[[1]], lambda.1se9 = lamin9[[2]], lambda.min10 = lamin10[[1]], lambda.1se10 = lamin10[[2]], lambda.min11 = lamin11[[1]], lambda.1se11 = lamin11[[2]], lambda.min12 = lamin12[[1]], lambda.1se12 = lamin12[[2]],
lambda.min13 = lamin13[[1]], lambda.1se13 = lamin13[[2]], lambda.min14 = lamin14[[1]], lambda.1se14 = lamin14[[2]],
nzb=nzb)#sign.lambda=sign.lambda
if(folddetails){object <- c(object,list(errormat1 = errormat1, errormat2 = errormat2, errormat3 = errormat3, errormat4 = errormat4, errormat5 = errormat5, errormat6 = errormat6, errormat7 = errormat7, errormat8 = errormat8, errormat9 = errormat9, errormat10 = errormat10, errormat11 = errormat11, errormat12 = errormat12, errormat13 = errormat13, errormat14 = errormat14,
completed.cv1 = completed.cv1, completed.cv2 = completed.cv2, completed.cv3 = completed.cv3, completed.cv4 = completed.cv4, completed.cv5 = completed.cv5, completed.cv6 = completed.cv6, completed.cv7 = completed.cv7,
completed.cv8 = completed.cv8, completed.cv9 = completed.cv9, completed.cv10 = completed.cv10, completed.cv11 = completed.cv11, completed.cv12 = completed.cv12, completed.cv13 = completed.cv13, completed.cv14 = completed.cv14))}
if(details){object <- c(object,list(All_indics=AUCs))}
} else {
object <- list(nt=nt,cv.error10=cv.error10,cv.se10=cv.se10,folds=folds,lambda.min10=lamin10[[1]],lambda.1se10=lamin10[[2]],nzb=nzb)
if(folddetails){object <- c(object,list(errormat10 = errormat10, completed.cv10 = completed.cv10))}
}
if (plot.it) {
if(allCVcrit){
for(ind in 1:number_ind) {
if((ind%% 4)==1){dev.new();layout(matrix(1:4,nrow=2))}
plot((sign.lambda*(0:nt))[!is.nan(get(paste("cv.error",ind,sep="")))], get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))], type = "l", xlim=c(0,nt), ylim = range(c(get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] -
get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))], get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] + get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))])), xlab = xlabsCV[ind], ylab = ylabsCV[ind],
main = titlesCV[ind]
)
abline(v = sign.lambda*getElement(object,paste("lambda.min",ind,sep="")), lty = 3)
abline(v = sign.lambda*getElement(object,paste("lambda.1se",ind,sep="")), lty = 3, col="red")
if(show_nbr_var){axis(side = 3, at = sign.lambda*(0:nt), labels = paste(object$nzb), tick = FALSE, line = -1)}
if (se)
segments(sign.lambda*((0:nt)[!is.nan(get(paste("cv.error",ind,sep="")))]), get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] - get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))], sign.lambda*((0:nt)[!is.nan(get(paste("cv.error",ind,sep="")))]), get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] +
get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))])
}
layout(1)
} else {
ind=10
plot((sign.lambda*(0:nt))[!is.nan(get(paste("cv.error",ind,sep="")))], get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))], type = "l", xlim=c(0,nt), ylim = range(c(get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] -
get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))], get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] + get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))])), xlab = xlabsCV[ind], ylab = ylabsCV[ind],
main = titlesCV[ind]
)
abline(v = sign.lambda*getElement(object,paste("lambda.min",ind,sep="")), lty = 3)
abline(v = sign.lambda*getElement(object,paste("lambda.1se",ind,sep="")), lty = 3, col="red")
if(show_nbr_var){axis(side = 3, at = sign.lambda*(0:nt), labels = paste(object$nzb), tick = FALSE, line = -1)}
if (se)
segments(sign.lambda*((0:nt)[!is.nan(get(paste("cv.error",ind,sep="")))]), get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] - get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))], sign.lambda*((0:nt)[!is.nan(get(paste("cv.error",ind,sep="")))]), get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] +
get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))])
}
}
invisible(object)
}
| /plsRcox/R/cv.coxsplsDR.R | no_license | ingted/R-Examples | R | false | false | 20,097 | r | cv.coxsplsDR =
function (data, method = c("efron", "breslow"), nfold = 5, nt = 10, eta = .5, plot.it = TRUE,
se = TRUE, givefold, scaleX = TRUE, scaleY = FALSE, folddetails = FALSE, allCVcrit=FALSE,
details=FALSE, namedataset="data", save=FALSE, verbose=TRUE,...)
{
try(attachNamespace("survival"),silent=TRUE)
#on.exit(try(unloadNamespace("survival"),silent=TRUE))
try(attachNamespace("rms"),silent=TRUE)
on.exit(try(unloadNamespace("rms"),silent=TRUE))
cv.error1<-NULL;cv.error2<-NULL;cv.error3<-NULL;cv.error4<-NULL;cv.error5<-NULL;cv.error6<-NULL;cv.error7<-NULL;cv.error8<-NULL;cv.error9<-NULL;cv.error10<-NULL;cv.error11<-NULL;cv.error12<-NULL;cv.error13<-NULL;cv.error14<-NULL
cv.se1<-NULL;cv.se2<-NULL;cv.se3<-NULL;cv.se4<-NULL;cv.se5<-NULL;cv.se6<-NULL;cv.se7<-NULL;cv.se8<-NULL;cv.se9<-NULL;cv.se10<-NULL;cv.se11<-NULL;cv.se12<-NULL;cv.se13<-NULL;cv.se14<-NULL
lamin1<-NULL;lamin2<-NULL;lamin3<-NULL;lamin4<-NULL;lamin5<-NULL;lamin6<-NULL;lamin7<-NULL;lamin8<-NULL;lamin9<-NULL;lamin10<-NULL;lamin11<-NULL;lamin12<-NULL;lamin13<-NULL;lamin14<-NULL
completed.cv1<-NULL;completed.cv2<-NULL;completed.cv3<-NULL;completed.cv4<-NULL;completed.cv5<-NULL;completed.cv6<-NULL;completed.cv7<-NULL;completed.cv8<-NULL;completed.cv9<-NULL;completed.cv10<-NULL;completed.cv11<-NULL;completed.cv12<-NULL;completed.cv13<-NULL;completed.cv14<-NULL
method <- match.arg(method)
x <- data$x
time <- data$time
status <- data$status
n <- length(time)
if(missing(givefold)){
folds <- split(sample(seq(n)), rep(1:nfold, length = n))} else
{
folds <- givefold
}
number_ind = 14
titlesCV = c("Cross-validated log-partial-likelihood","van Houwelingen Cross-validated log-partial-likelihood","iAUC_CD","iAUC_hc","iAUC_sh","iAUC_Uno","iAUC_hz.train","iAUC_hz.test","iAUC_survivalROC.train","iAUC_survivalROC.test","iBrierScore unw","iSchmidScore (robust BS) unw","iBrierScore w","iSchmidScore (robust BS) w")
ylabsCV = c(rep("Minus log-partial-likelihood",2),rep("iAUC",8),rep("Prediction Error",4))
xlabsCV = c(rep("nbr of components",14))
signCVerror = c(rep(1,2),rep(-1,8),rep(1,4))
show_nbr_var = TRUE
for(ind in 1:number_ind) {
assign(paste("errormat",ind,sep=""),matrix(NA, nt+1, nfold))
}
for (i in seq(nfold)) {
for(ind in 1:number_ind) {
assign(paste("pred",ind,sep=""),rep(NA, nt+1))
}
omit <- folds[[i]]
trdata <- list(x = x[-omit, ], time = time[-omit], status = status[-omit])
tsdata <- list(x = x[omit, ], time = time[omit], status = status[omit])
if(!file.exists(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,".RData",sep=""))){
assign(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""),coxsplsDR(Xplan=trdata$x, time=trdata$time, event=trdata$status, ncomp=nt, eta=eta, allres=TRUE, scaleX=TRUE, scaleY=FALSE, verbose=verbose, ...))
if(save){save(list=c(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep="")),file=paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,".RData",sep=""))}
} else {
load(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,".RData",sep=""))
}
coeffit <- get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$CoeffCFull
nzb <- cumsum(c(0,sapply(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$splsDR_mod$new2As,length)))
for(jj in 1:nt){
Avalues <- get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$splsDR_mod$A
newxdata=(predict.pls.cox(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$splsDR_modplsr,
newdata=scale((tsdata$x)[,Avalues],
(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$XplanCent)[Avalues],
(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$XplanScal)[Avalues]),
scale.X=FALSE,scale.Y=FALSE)$variates)[,1:jj,drop=FALSE]
oldxdata=(as.matrix(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$tt_splsDR))[,1:jj,drop=FALSE]
allxdata=(predict.pls.cox(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$splsDR_modplsr,
newdata=scale(x[,Avalues],
(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$XplanCent)[Avalues],
(get(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep=""))$XplanScal)[Avalues]),
scale.X=FALSE,scale.Y=FALSE)$variates)[,1:jj,drop=FALSE]
if(jj==1){
pred1[1] <- logplik(x=newxdata[,1,drop=FALSE], time=tsdata$time, status=tsdata$status, b=matrix(0), method = method,return.all = FALSE) #"efron" match with loglik of coxph
plfull <- logplik(x=allxdata[,1,drop=FALSE], time=time, status=status, b=matrix(0), method = method,return.all = FALSE) #"efron"
plminusk <- logplik(x=oldxdata[,1,drop=FALSE], time=trdata$time, status=trdata$status, b=matrix(0), method = method,return.all = FALSE) #"efron"
pred2[1] = plfull - plminusk
Xlp <- rep(0,length(time))
assign(paste("dataset_",namedataset,"_",0,sep=""),as.data.frame(cbind(time=time,status=status,Xlp=Xlp)))
TR <- get(paste("dataset_",namedataset,"_",0,sep=""))[-omit,]
TE <- get(paste("dataset_",namedataset,"_",0,sep=""))[omit,]
survival.time <- get(paste("dataset_",namedataset,"_",0,sep=""))[,"time"]
survival.status <- get(paste("dataset_",namedataset,"_",0,sep=""))[,"status"]
tr.survival.time <- trdata$time
tr.survival.status <- trdata$status
te.survival.time <- tsdata$time
te.survival.status <- tsdata$status
#require(survival)
Surv.rsp <- Surv(tr.survival.time, tr.survival.status)
Surv.rsp.new <- Surv(te.survival.time, te.survival.status)
train.fit <- coxph(Surv(time,status) ~ Xlp, x=TRUE, y=TRUE, method=method, data=TR, iter.max=0, init=1)
#library(rms)
train.fit.cph <- cph(Surv(time,status) ~ Xlp, x=TRUE, y=TRUE, method=method, data=TR, iter.max=0, init=1)
lp <- predict(train.fit)
lpnew <- predict(train.fit, newdata=TE)
if(allCVcrit){
AUCs <- getIndicCV(lp,lpnew,Surv.rsp,Surv.rsp.new,times.auc=seq(0,max(time),length.out=1000),times.prederr=seq(0,max(time),length.out=1000)[-(990:1000)],train.fit,plot.it=FALSE)
pred3[1] = AUCs$AUC_CD$iauc
pred4[1] = AUCs$AUC_hc$iauc
pred5[1] = AUCs$AUC_sh$iauc
pred6[1] = AUCs$AUC_Uno$iauc
pred7[1] = AUCs$AUC_hz.train$iauc
pred8[1] = AUCs$AUC_hz.test$iauc
pred9[1] = AUCs$AUC_survivalROC.train$iauc
pred10[1] = AUCs$AUC_survivalROC.test$iauc
pred11[1] = AUCs$prederr$brier.unw$ierror
pred12[1] = AUCs$prederr$robust.unw$ierror
pred13[1] = AUCs$prederr$brier.w$ierror
pred14[1] = AUCs$prederr$robust.w$ierror
} else {
AUCs <- getIndicCViAUCSurvROCTest(lp,lpnew,Surv.rsp,Surv.rsp.new,times.auc=seq(0,max(time),length.out=1000),times.prederr=seq(0,max(time),length.out=1000)[-(990:1000)],train.fit,plot.it=FALSE)
pred3[1] = NA
pred4[1] = NA
pred5[1] = NA
pred6[1] = NA
pred7[1] = NA
pred8[1] = NA
pred9[1] = NA
pred10[1] = AUCs$AUC_survivalROC.test$iauc
pred11[1] = NA
pred12[1] = NA
pred13[1] = NA
pred14[1] = NA
}
}
pred1[jj+1] <- logplik(x=newxdata[,1:jj,drop=FALSE], time=tsdata$time, status=tsdata$status, b=coeffit[1:jj,jj,drop=FALSE], method = method,return.all = FALSE) #"efron" match with loglik of coxph
plfull <- logplik(x=allxdata[,1:jj,drop=FALSE], time=time, status=status, b=coeffit[1:jj,jj,drop=FALSE], method = method,return.all = FALSE) #"efron"
plminusk <- logplik(x=oldxdata[,1:jj,drop=FALSE], time=trdata$time, status=trdata$status, b=coeffit[1:jj,jj,drop=FALSE], method = method,return.all = FALSE) #"efron"
pred2[jj+1] = plfull - plminusk
predict.trainvectjj <- oldxdata%*%(coeffit[1:jj,jj,drop=FALSE])
predictvectjj <- newxdata%*%(coeffit[1:jj,jj,drop=FALSE])
Xlp <- rep(NA,length(time))
Xlp[-omit] <- predict.trainvectjj
Xlp[omit] <- predictvectjj
assign(paste("dataset_",namedataset,"_",jj,sep=""),as.data.frame(cbind(time=time,status=status,Xlp=Xlp)))
TR <- get(paste("dataset_",namedataset,"_",jj,sep=""))[-omit,]
TE <- get(paste("dataset_",namedataset,"_",jj,sep=""))[omit,]
survival.time <- get(paste("dataset_",namedataset,"_",jj,sep=""))[,"time"]
survival.status <- get(paste("dataset_",namedataset,"_",jj,sep=""))[,"status"]
tr.survival.time <- trdata$time
tr.survival.status <- trdata$status
te.survival.time <- tsdata$time
te.survival.status <- tsdata$status
#require(survival)
Surv.rsp <- Surv(tr.survival.time, tr.survival.status)
Surv.rsp.new <- Surv(te.survival.time, te.survival.status)
train.fit <- coxph(Surv(time,status) ~ Xlp, x=TRUE, y=TRUE, method=method, data=TR, iter.max=0, init=1) #offset
#library(rms)
train.fit.cph <- cph(Surv(time,status) ~ Xlp, x=TRUE, y=TRUE, method=method, data=TR, iter.max=0, init=1) #offset
lp <- predict(train.fit)
lpnew <- predict(train.fit, newdata=TE)
if(allCVcrit){
AUCs <- getIndicCV(lp,lpnew,Surv.rsp,Surv.rsp.new,times.auc=seq(0,max(time),length.out=1000),times.prederr=seq(0,max(time),length.out=1000)[-(990:1000)],train.fit,plot.it=FALSE)
pred3[jj+1] = AUCs$AUC_CD$iauc
pred4[jj+1] = AUCs$AUC_hc$iauc
pred5[jj+1] = AUCs$AUC_sh$iauc
pred6[jj+1] = AUCs$AUC_Uno$iauc
pred7[jj+1] = AUCs$AUC_hz.train$iauc
pred8[jj+1] = AUCs$AUC_hz.test$iauc
pred9[jj+1] = AUCs$AUC_survivalROC.train$iauc
pred10[jj+1] = AUCs$AUC_survivalROC.test$iauc
pred11[jj+1] = AUCs$prederr$brier.unw$ierror
pred12[jj+1] = AUCs$prederr$robust.unw$ierror
pred13[jj+1] = AUCs$prederr$brier.w$ierror
pred14[jj+1] = AUCs$prederr$robust.w$ierror
} else {
AUCs <- getIndicCViAUCSurvROCTest(lp,lpnew,Surv.rsp,Surv.rsp.new,times.auc=seq(0,max(time),length.out=1000),times.prederr=seq(0,max(time),length.out=1000)[-(990:1000)],train.fit,plot.it=FALSE)
pred3[jj+1] = NA
pred4[jj+1] = NA
pred5[jj+1] = NA
pred6[jj+1] = NA
pred7[jj+1] = NA
pred8[jj+1] = NA
pred9[jj+1] = NA
pred10[jj+1] = AUCs$AUC_survivalROC.test$iauc
pred11[jj+1] = NA
pred12[jj+1] = NA
pred13[jj+1] = NA
pred14[jj+1] = NA
}
}
# if(allCVcrit){
if(is.na(pred10[1])){pred10[1]<-.5}
# }
if (length(omit) == 1){
for(ind in 1:number_ind) {
assign(paste("pred",ind,sep=""),matrix(get(paste("pred",ind,sep="")), nrow = 1))
}
}
#if(any(is.na(pred10))){save(list=c("pred10"),file=paste(Predspath,"/failed.fold.cv.",typemodel,"_",namedataset,"_folds_",i,".RData",sep=""))}
if(allCVcrit){
errormat1[, i] <- ifelse(is.finite(pred1),-pred1/length(omit),NA)
errormat2[, i] <- ifelse(is.finite(pred2),-pred2/length(omit),NA)
errormat3[, i] <- ifelse(is.finite(pred3),pred3,NA)
errormat4[, i] <- ifelse(is.finite(pred4),pred4,NA)
errormat5[, i] <- ifelse(is.finite(pred5),pred5,NA)
errormat6[, i] <- ifelse(is.finite(pred6),pred6,NA)
errormat7[, i] <- ifelse(is.finite(pred7),pred7,NA)
errormat8[, i] <- ifelse(is.finite(pred8),pred8,NA)
errormat9[, i] <- ifelse(is.finite(pred9),pred9,NA)
errormat10[, i] <- ifelse(is.finite(pred10),pred10,NA)
errormat11[, i] <- ifelse(is.finite(pred11),pred11,NA)
errormat12[, i] <- ifelse(is.finite(pred12),pred12,NA)
errormat13[, i] <- ifelse(is.finite(pred13),pred13,NA)
errormat14[, i] <- ifelse(is.finite(pred14),pred14,NA)
} else {
errormat10[, i] <- ifelse(is.finite(pred10),pred10,NA)
}
if(verbose){cat("CV Fold", i, "\n")}
rm(list=c(paste("cv.coxsplsDR_",namedataset,"_folds_",i,"_eta_",eta,sep="")))
}
if(allCVcrit){
for(ind in 1:number_ind) {
assign(paste("cv.error",ind,sep=""),apply(get(paste("errormat",ind,sep="")), 1, mean, na.rm=TRUE))
assign(paste("completed.cv",ind,sep=""),is.finite(get(paste("errormat",ind,sep=""))))
assign(paste("cv.se",ind,sep=""),sqrt(apply(get(paste("errormat",ind,sep="")), 1, var, na.rm=TRUE))/nfold)
assign(paste("lamin",ind,sep=""),getmin2(0:nt,signCVerror[ind]*get(paste("cv.error",ind,sep="")),get(paste("cv.se",ind,sep=""))))
}} else {
ind=10
assign(paste("cv.error",ind,sep=""),apply(get(paste("errormat",ind,sep="")), 1, mean, na.rm=TRUE))
assign(paste("completed.cv",ind,sep=""),is.finite(get(paste("errormat",ind,sep=""))))
assign(paste("cv.se",ind,sep=""),sqrt(apply(get(paste("errormat",ind,sep="")), 1, var, na.rm=TRUE))/nfold)
assign(paste("lamin",ind,sep=""),getmin2(0:nt,signCVerror[ind]*get(paste("cv.error",ind,sep="")),get(paste("cv.se",ind,sep=""))))
}
sign.lambda=1
if(allCVcrit){
object <- list(nt=nt, cv.error1 = cv.error1, cv.error2 = cv.error2, cv.error3 = cv.error3, cv.error4 = cv.error4, cv.error5 = cv.error5, cv.error6 = cv.error6, cv.error7 = cv.error7, cv.error8 = cv.error8, cv.error9 = cv.error9, cv.error10 = cv.error10, cv.error11 = cv.error11, cv.error12 = cv.error12, cv.error13 = cv.error13, cv.error14 = cv.error14,
cv.se1 = cv.se1, cv.se2 = cv.se2, cv.se3 = cv.se3, cv.se4 = cv.se4, cv.se5 = cv.se5, cv.se6 = cv.se6, cv.se7 = cv.se7, cv.se8 = cv.se8, cv.se9 = cv.se9, cv.se10 = cv.se10, cv.se11 = cv.se11, cv.se12 = cv.se12, cv.se13 = cv.se13, cv.se14 = cv.se14,
folds = folds,
lambda.min1 = lamin1[[1]], lambda.1se1 = lamin1[[2]], lambda.min2 = lamin2[[1]], lambda.1se2 = lamin2[[2]], lambda.min3 = lamin3[[1]], lambda.1se3 = lamin3[[2]], lambda.min4 = lamin4[[1]], lambda.1se4 = lamin4[[2]],
lambda.min5 = lamin5[[1]], lambda.1se5 = lamin5[[2]], lambda.min6 = lamin6[[1]], lambda.1se6 = lamin6[[2]], lambda.min7 = lamin7[[1]], lambda.1se7 = lamin7[[2]], lambda.min8 = lamin8[[1]], lambda.1se8 = lamin8[[2]],
lambda.min9 = lamin9[[1]], lambda.1se9 = lamin9[[2]], lambda.min10 = lamin10[[1]], lambda.1se10 = lamin10[[2]], lambda.min11 = lamin11[[1]], lambda.1se11 = lamin11[[2]], lambda.min12 = lamin12[[1]], lambda.1se12 = lamin12[[2]],
lambda.min13 = lamin13[[1]], lambda.1se13 = lamin13[[2]], lambda.min14 = lamin14[[1]], lambda.1se14 = lamin14[[2]],
nzb=nzb)#sign.lambda=sign.lambda
if(folddetails){object <- c(object,list(errormat1 = errormat1, errormat2 = errormat2, errormat3 = errormat3, errormat4 = errormat4, errormat5 = errormat5, errormat6 = errormat6, errormat7 = errormat7, errormat8 = errormat8, errormat9 = errormat9, errormat10 = errormat10, errormat11 = errormat11, errormat12 = errormat12, errormat13 = errormat13, errormat14 = errormat14,
completed.cv1 = completed.cv1, completed.cv2 = completed.cv2, completed.cv3 = completed.cv3, completed.cv4 = completed.cv4, completed.cv5 = completed.cv5, completed.cv6 = completed.cv6, completed.cv7 = completed.cv7,
completed.cv8 = completed.cv8, completed.cv9 = completed.cv9, completed.cv10 = completed.cv10, completed.cv11 = completed.cv11, completed.cv12 = completed.cv12, completed.cv13 = completed.cv13, completed.cv14 = completed.cv14))}
if(details){object <- c(object,list(All_indics=AUCs))}
} else {
object <- list(nt=nt,cv.error10=cv.error10,cv.se10=cv.se10,folds=folds,lambda.min10=lamin10[[1]],lambda.1se10=lamin10[[2]],nzb=nzb)
if(folddetails){object <- c(object,list(errormat10 = errormat10, completed.cv10 = completed.cv10))}
}
if (plot.it) {
if(allCVcrit){
for(ind in 1:number_ind) {
if((ind%% 4)==1){dev.new();layout(matrix(1:4,nrow=2))}
plot((sign.lambda*(0:nt))[!is.nan(get(paste("cv.error",ind,sep="")))], get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))], type = "l", xlim=c(0,nt), ylim = range(c(get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] -
get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))], get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] + get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))])), xlab = xlabsCV[ind], ylab = ylabsCV[ind],
main = titlesCV[ind]
)
abline(v = sign.lambda*getElement(object,paste("lambda.min",ind,sep="")), lty = 3)
abline(v = sign.lambda*getElement(object,paste("lambda.1se",ind,sep="")), lty = 3, col="red")
if(show_nbr_var){axis(side = 3, at = sign.lambda*(0:nt), labels = paste(object$nzb), tick = FALSE, line = -1)}
if (se)
segments(sign.lambda*((0:nt)[!is.nan(get(paste("cv.error",ind,sep="")))]), get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] - get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))], sign.lambda*((0:nt)[!is.nan(get(paste("cv.error",ind,sep="")))]), get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] +
get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))])
}
layout(1)
} else {
ind=10
plot((sign.lambda*(0:nt))[!is.nan(get(paste("cv.error",ind,sep="")))], get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))], type = "l", xlim=c(0,nt), ylim = range(c(get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] -
get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))], get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] + get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))])), xlab = xlabsCV[ind], ylab = ylabsCV[ind],
main = titlesCV[ind]
)
abline(v = sign.lambda*getElement(object,paste("lambda.min",ind,sep="")), lty = 3)
abline(v = sign.lambda*getElement(object,paste("lambda.1se",ind,sep="")), lty = 3, col="red")
if(show_nbr_var){axis(side = 3, at = sign.lambda*(0:nt), labels = paste(object$nzb), tick = FALSE, line = -1)}
if (se)
segments(sign.lambda*((0:nt)[!is.nan(get(paste("cv.error",ind,sep="")))]), get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] - get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))], sign.lambda*((0:nt)[!is.nan(get(paste("cv.error",ind,sep="")))]), get(paste("cv.error",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))] +
get(paste("cv.se",ind,sep=""))[!is.nan(get(paste("cv.error",ind,sep="")))])
}
}
invisible(object)
}
|
# Load libraries
library(tidyverse)
library(psych)
# Load Floyd week data
con = DBI::dbConnect(RMariaDB::MariaDB(), dbname = 'household_pulse',
host = '127.0.0.1', user = '', password = '')
pulse_floyd = tbl(con, 'pulse2020_puf_05') %>%
select(SCRAM, gad2_sum, phq2_sum, is_black, is_white,
is_hispanic, is_asian, WEEK, state, PWEIGHT,
INCOME, is_female, age_in_years, EEDUC) %>%
as_tibble()
# Load pre-Floyd data
pulse_base = tibble()
for (i in seq(4)) {
df = tbl(con, paste0('pulse2020_puf_0', i)) %>%
select(SCRAM, gad2_sum, phq2_sum, is_black, is_white,
is_hispanic, is_asian, WEEK, state, PWEIGHT,
INCOME, is_female, age_in_years, EEDUC) %>%
as_tibble()
pulse_base = rbind(pulse_base, df)
}
DBI::dbDisconnect(con)
# Combine Floyd and pre-Floyd data with identifier
pulse = pulse_floyd %>%
bind_rows(pulse_base) %>%
mutate(floyd_weekOrNot = if_else(WEEK == 5, 1, 0),
PWEIGHT = as.numeric(PWEIGHT))
#' Weighted Cohen's d
#'
#' Computes a Cohen's d effect size using survey weights to calculate the weighted mean and variance.
#'
#' @param x The data vector, numeric.
#' @param groups A vector containing two groups, characters or factors, of the same size as \code{x}.
#' @param w A weight vector, numeric, of the same size as \code{x}.
#' @return A tibble containing the weighted Cohen's d estimate, the weighted standard error, and the size of each group.
weighted_cohens_d = function(x, groups, w, na.rm = T) {
df = tibble(x = x, g = groups, w = w)
d = df %>%
group_by(g) %>%
summarize(m = weighted.mean(x, w, na.rm = {{ na.rm }}),
v = Hmisc::wtd.var(x, w, na.rm = {{ na.rm }}),
sd = sqrt(v)) %>%
ungroup() %>%
summarize(m = diff(m),
sd = sqrt(sum(sd^2) / length(sd))) %>%
summarize(d = m / sd) %>%
.$d
groups = unique(groups)
g1 = groups[1]
g2 = groups[2]
n1 = df %>%
drop_na() %>%
filter(g == {{ g1 }}) %>%
summarize(n = n()) %>%
.$n
n2 = df %>%
drop_na() %>%
filter(g == {{ g2 }}) %>%
summarize(n = n()) %>%
.$n
se = cohen.d.ci(d, n1, n2)
se = (se[2] - se[1]) / 1.96
return(tibble(d = d, se = se, n1 = n1, n2 = n2))
} | /setup_census.R | no_license | gtsherman/floyd-mental-health | R | false | false | 2,263 | r | # Load libraries
library(tidyverse)
library(psych)
# Load Floyd week data
con = DBI::dbConnect(RMariaDB::MariaDB(), dbname = 'household_pulse',
host = '127.0.0.1', user = '', password = '')
pulse_floyd = tbl(con, 'pulse2020_puf_05') %>%
select(SCRAM, gad2_sum, phq2_sum, is_black, is_white,
is_hispanic, is_asian, WEEK, state, PWEIGHT,
INCOME, is_female, age_in_years, EEDUC) %>%
as_tibble()
# Load pre-Floyd data
pulse_base = tibble()
for (i in seq(4)) {
df = tbl(con, paste0('pulse2020_puf_0', i)) %>%
select(SCRAM, gad2_sum, phq2_sum, is_black, is_white,
is_hispanic, is_asian, WEEK, state, PWEIGHT,
INCOME, is_female, age_in_years, EEDUC) %>%
as_tibble()
pulse_base = rbind(pulse_base, df)
}
DBI::dbDisconnect(con)
# Combine Floyd and pre-Floyd data with identifier
pulse = pulse_floyd %>%
bind_rows(pulse_base) %>%
mutate(floyd_weekOrNot = if_else(WEEK == 5, 1, 0),
PWEIGHT = as.numeric(PWEIGHT))
#' Weighted Cohen's d
#'
#' Computes a Cohen's d effect size using survey weights to calculate the weighted mean and variance.
#'
#' @param x The data vector, numeric.
#' @param groups A vector containing two groups, characters or factors, of the same size as \code{x}.
#' @param w A weight vector, numeric, of the same size as \code{x}.
#' @return A tibble containing the weighted Cohen's d estimate, the weighted standard error, and the size of each group.
weighted_cohens_d = function(x, groups, w, na.rm = T) {
df = tibble(x = x, g = groups, w = w)
d = df %>%
group_by(g) %>%
summarize(m = weighted.mean(x, w, na.rm = {{ na.rm }}),
v = Hmisc::wtd.var(x, w, na.rm = {{ na.rm }}),
sd = sqrt(v)) %>%
ungroup() %>%
summarize(m = diff(m),
sd = sqrt(sum(sd^2) / length(sd))) %>%
summarize(d = m / sd) %>%
.$d
groups = unique(groups)
g1 = groups[1]
g2 = groups[2]
n1 = df %>%
drop_na() %>%
filter(g == {{ g1 }}) %>%
summarize(n = n()) %>%
.$n
n2 = df %>%
drop_na() %>%
filter(g == {{ g2 }}) %>%
summarize(n = n()) %>%
.$n
se = cohen.d.ci(d, n1, n2)
se = (se[2] - se[1]) / 1.96
return(tibble(d = d, se = se, n1 = n1, n2 = n2))
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acdatabase_dbtest.R
\name{agdb.checkid}
\alias{agdb.checkid}
\title{Check an id}
\usage{
agdb.checkid(id, null_permitted = F)
}
\arguments{
\item{id}{char: A proposed id}
\item{null_permitted}{bool: whether NULL should return T or F}
}
\value{
character
}
\description{
Checks if a proposed id follows the formatting rules.
}
\details{
The following rules are currently checked:
\enumerate{
\item id is be character type
\item id has length 6
\item all characters are capital letters or numerals 0:9
}
}
| /man/agdb.checkid.Rd | permissive | SamT123/acutilsLite | R | false | true | 583 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acdatabase_dbtest.R
\name{agdb.checkid}
\alias{agdb.checkid}
\title{Check an id}
\usage{
agdb.checkid(id, null_permitted = F)
}
\arguments{
\item{id}{char: A proposed id}
\item{null_permitted}{bool: whether NULL should return T or F}
}
\value{
character
}
\description{
Checks if a proposed id follows the formatting rules.
}
\details{
The following rules are currently checked:
\enumerate{
\item id is be character type
\item id has length 6
\item all characters are capital letters or numerals 0:9
}
}
|
# This function gets slope and adj_R from lm
# This might not be usfeul as of 2017-10-05
lm_slope_adj_R <- function(formula){
regresion <- lm( formula )
ss <- summary(regresion)
return(c( coef(regresion)[2] , ss$adj.r.squared))
} | /lm_slope_adj_R.R | no_license | matiasandina/useful_functions | R | false | false | 237 | r | # This function gets slope and adj_R from lm
# This might not be usfeul as of 2017-10-05
lm_slope_adj_R <- function(formula){
regresion <- lm( formula )
ss <- summary(regresion)
return(c( coef(regresion)[2] , ss$adj.r.squared))
} |
## 1. Install packages
install.packages("dplyr")
install.packages("tidyr")
install.packages("tidyverse")
## 2. Load libraries
library(dplyr)
library(tidyr)
library(tidyverse)
## 3. Set Working Directory
setwd("/Users/ussadethlo/Desktop/DSTR/EDA_Project_2/EDA_Project_2")
## 4. Download and Extract file
zip_file <- download.file('https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip', 'data_for_peer_assessment.zip', mode = 'wb')
unzip(zipfile = "data_for_peer_assessment.zip", exdir = "/Users/ussadethlo/Desktop/DSTR/EDA_Project_2/EDA_Project_2")
## 5. Read RDS Files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## 6. Question 3. Make a Plot using GGPlot2 to show emissions in Baltimore City by Type
png(filename = 'plot3.png')
NEI_blt_type <- NEI %>%
subset(fips == "24510") %>%
group_by(year, type) %>%
summarize(Emissions = sum(Emissions))
ggplot(NEI_blt_type, aes(factor(year), Emissions, fill=type)) +
geom_bar(stat = "identity") +
facet_grid(.~type, scales = "free", space = "free") +
labs(x = "year", y = "Total Emissions (Tons)", title = "Emissions in Baltimore City by Type")
dev.off() | /plot3.R | no_license | sdethloff/EDA_Project_2 | R | false | false | 1,179 | r | ## 1. Install packages
install.packages("dplyr")
install.packages("tidyr")
install.packages("tidyverse")
## 2. Load libraries
library(dplyr)
library(tidyr)
library(tidyverse)
## 3. Set Working Directory
setwd("/Users/ussadethlo/Desktop/DSTR/EDA_Project_2/EDA_Project_2")
## 4. Download and Extract file
zip_file <- download.file('https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip', 'data_for_peer_assessment.zip', mode = 'wb')
unzip(zipfile = "data_for_peer_assessment.zip", exdir = "/Users/ussadethlo/Desktop/DSTR/EDA_Project_2/EDA_Project_2")
## 5. Read RDS Files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## 6. Question 3. Make a Plot using GGPlot2 to show emissions in Baltimore City by Type
png(filename = 'plot3.png')
NEI_blt_type <- NEI %>%
subset(fips == "24510") %>%
group_by(year, type) %>%
summarize(Emissions = sum(Emissions))
ggplot(NEI_blt_type, aes(factor(year), Emissions, fill=type)) +
geom_bar(stat = "identity") +
facet_grid(.~type, scales = "free", space = "free") +
labs(x = "year", y = "Total Emissions (Tons)", title = "Emissions in Baltimore City by Type")
dev.off() |
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{AverageDrawdown}
\alias{AverageDrawdown}
\alias{AverageRecovery}
\title{Calculates the average of the observed drawdowns.}
\usage{
AverageDrawdown(R, ...)
AverageRecovery(R, ...)
}
\arguments{
\item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of
asset returns}
\item{\dots}{any other passthru parameters}
}
\description{
ADD = abs(sum[j=1,2,...,d](D_j/d)) where
D'_j = jth drawdown over entire period
d = total number of drawdowns in the entire period
}
| /man/AverageDrawdown.Rd | no_license | ecjbosu/PerformanceAnalytics | R | false | false | 558 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{AverageDrawdown}
\alias{AverageDrawdown}
\alias{AverageRecovery}
\title{Calculates the average of the observed drawdowns.}
\usage{
AverageDrawdown(R, ...)
AverageRecovery(R, ...)
}
\arguments{
\item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of
asset returns}
\item{\dots}{any other passthru parameters}
}
\description{
ADD = abs(sum[j=1,2,...,d](D_j/d)) where
D'_j = jth drawdown over entire period
d = total number of drawdowns in the entire period
}
|
##The cacheSolve function can be used to return the inverse of a matrix.
##Efficiency is provided by caching any inverse using the makeCacheMatrix:
## An efficient approach is used in that once the matrix inverse is calculated
## it is cached by the makeCacheMatrix method. Future access to the
## matrix inverse then comes from the cached version avoiding duplication of
## possibly expensive matrix inverse calculations.
##makeCacheMatrix: Input parameter, x, an invertible matrix.
## Returns: List of functions providing access cached matrix inverse
makeCacheMatrix <- function(x = matrix()) {
m.inv = NULL; #inverse of matrix
#return a list of functions: set, get, setinverse, getinverse
list (
set = function(y) {
x <<- y
m.inv <<- NULL
},
get = function() { x },
setinverse = function(minv) { m.inv <<- minv},
getinverse = function() { m.inv }
)
}
##cacheSolve: Input parameter, x, an invertible matrix
## Returns: the inverse of the matrix x.
cacheSolve <- function(x, ...) {
#Access then test to see if the cache holds a previous inversion
m = x$getinverse()
if (!is.null(m)) {
#message("getting cached inverse")
#return cached version
return(m)
}
#no cached inversion, so calculate matrix inverse and store in cache
data = x$get() #get the original matrix
m = solve(data)
x$setinverse(m) #store inverse in the cache
m
}
| /cachematrix.R | no_license | lhcrsa/ProgrammingAssignment2 | R | false | false | 1,482 | r | ##The cacheSolve function can be used to return the inverse of a matrix.
##Efficiency is provided by caching any inverse using the makeCacheMatrix:
## An efficient approach is used in that once the matrix inverse is calculated
## it is cached by the makeCacheMatrix method. Future access to the
## matrix inverse then comes from the cached version avoiding duplication of
## possibly expensive matrix inverse calculations.
##makeCacheMatrix: Input parameter, x, an invertible matrix.
## Returns: List of functions providing access cached matrix inverse
makeCacheMatrix <- function(x = matrix()) {
m.inv = NULL; #inverse of matrix
#return a list of functions: set, get, setinverse, getinverse
list (
set = function(y) {
x <<- y
m.inv <<- NULL
},
get = function() { x },
setinverse = function(minv) { m.inv <<- minv},
getinverse = function() { m.inv }
)
}
##cacheSolve: Input parameter, x, an invertible matrix
## Returns: the inverse of the matrix x.
cacheSolve <- function(x, ...) {
#Access then test to see if the cache holds a previous inversion
m = x$getinverse()
if (!is.null(m)) {
#message("getting cached inverse")
#return cached version
return(m)
}
#no cached inversion, so calculate matrix inverse and store in cache
data = x$get() #get the original matrix
m = solve(data)
x$setinverse(m) #store inverse in the cache
m
}
|
power <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format="%H:%M:%S")
subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
par(mfrow=c(2,2))
with(subpower,{
plot(subpower$Time,as.numeric(as.character(subpower$Global_active_power)),type="l", xlab="",ylab="Global Active Power")
plot(subpower$Time,as.numeric(as.character(subpower$Voltage)), type="l",xlab="datetime",ylab="Voltage")
plot(subpower$Time,subpower$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), cex = 0.6)
plot(subpower$Time,as.numeric(as.character(subpower$Global_reactive_power)),type="l",xlab="datetime",ylab="Global_reactive_power")
})
| /plot4.R | no_license | Sahil54/ExData_Plotting1 | R | false | false | 1,460 | r | power <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format="%H:%M:%S")
subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
par(mfrow=c(2,2))
with(subpower,{
plot(subpower$Time,as.numeric(as.character(subpower$Global_active_power)),type="l", xlab="",ylab="Global Active Power")
plot(subpower$Time,as.numeric(as.character(subpower$Voltage)), type="l",xlab="datetime",ylab="Voltage")
plot(subpower$Time,subpower$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), cex = 0.6)
plot(subpower$Time,as.numeric(as.character(subpower$Global_reactive_power)),type="l",xlab="datetime",ylab="Global_reactive_power")
})
|
##########################################
# Zero-inflated Negative Binomial (ZINB) #
##########################################
#####################################
# Install or Load Required Packages #
#####################################
if(! require("pacman")) install.packages("pacman", repos='http://cran.us.r-project.org')
suppressPackageStartupMessages(library("pacman"))
pacman::p_load('dplyr', 'pbapply', 'pscl', 'glmmTMB')
#########################
# Fit ZINB To A Dataset #
#########################
fit.ZINB <- function(features,
metadata,
libSize,
ID,
transformation,
MultTestCorrection){
#########################
# Transformation if any #
#########################
if (transformation!='NONE') stop ('Transformation currently not supported for a default ZINB model. Use NONE.')
#####################
# Per-feature model #
#####################
paras <- pbapply::pbsapply(1:ncol(features), simplify=FALSE, function(x){
###############################
# Extract features one by one #
###############################
featuresVector <- features[, x]
#################################
# Create per-feature input data #
#################################
dat_sub <- data.frame(expr = as.numeric(featuresVector), metadata, libSize, ID)
formula<-as.formula(paste("expr ~ ", paste(colnames(metadata), collapse= "+")))
##############################################
# Automatic library size adjustment for GLMs #
##############################################
if(length(unique(libSize)) > 1){ # To prevent offsetting with TSS-normalized data
formula<-update(formula, . ~ . - offset(log(libSize)))
}
#######################
# Random effect model #
#######################
if(!length(ID) == length(unique(ID))){
formula<-update(formula, . ~ . + (1|ID))
fit <- tryCatch({
fit1 <- glmmTMB::glmmTMB(formula = formula,
data = dat_sub,
family = nbinom2,
ziformula = ~1)
}, error=function(err){
fit1 <- try({glmmTMB::glmmTMB(formula = formula,
data = dat_sub,
family = nbinom2,
ziformula = ~1)})
return(fit1)
})
###################################
# Summarize Coefficient Estimates #
###################################
if (class(fit) != "try-error"){
para<-as.data.frame(coef(summary(fit))$cond)[-1,-3]
} else{
print(paste("Fitting problem for feature", x, "returning NA"))
para<- as.data.frame(matrix(NA, nrow=ncol(metadata), ncol=3))
}
colnames(para)<-c('coef', 'stderr', 'pval')
para$metadata<-colnames(metadata)
para$feature<-colnames(features)[x]
}
#######################
# Fixed effects model #
#######################
else{
fit <- tryCatch({
fit1 <- pscl::zeroinfl(formula,
data = dat_sub,
dist = "negbin")
}, error=function(err){
fit1 <- try({pscl::zeroinfl(formula,
data = dat_sub,
dist = "negbin")})
return(fit1)
})
###################################
# Summarize Coefficient Estimates #
###################################
if (class(fit) != "try-error"){
para<-as.data.frame(summary(fit)$coefficients$count)[-c(1, (ncol(metadata)+2)),-3]
} else{
print(paste("Fitting problem for feature", x, "returning NA"))
para<- as.data.frame(matrix(NA, nrow=ncol(metadata), ncol=3))
}
colnames(para)<-c('coef', 'stderr', 'pval')
para$metadata<-colnames(metadata)
para$feature<-colnames(features)[x]
}
return(para)
})
#################
# Return output #
#################
paras<-do.call(rbind, paras)
paras$qval<-as.numeric(p.adjust(paras$pval, method = MultTestCorrection))
paras<-paras[order(paras$qval, decreasing=FALSE),]
paras<-dplyr::select(paras, c('feature', 'metadata'), everything())
rownames(paras)<-NULL
return(paras)
}
##################################
# Fit ZINB To A List of Datasets #
##################################
list.ZINB<-function(physeq, transformation = 'NONE', MultTestCorrection = "BH"){
foreach(physeq = physeq,
.export = c("fit.ZINB"),
.packages = c("dplyr", "pbapply", "pscl", "glmmTMB"),
.errorhandling = "remove") %dopar%
{
start.time<-Sys.time()
features<-physeq$features
metadata<-physeq$metadata
libSize<-physeq$libSize
ID<-physeq$ID
DD<-fit.ZINB(features, metadata, libSize, ID, transformation, MultTestCorrection)
DD$pairwiseAssociation<-paste('pairwiseAssociation', 1:nrow(DD), sep='')
wh.TP<-intersect(grep("[[:print:]]+\\_TP$", DD$metadata), grep("[[:print:]]+\\_TP$", DD$feature))
newname<-paste0(DD$pairwiseAssociation[wh.TP], "_TP")
DD$pairwiseAssociation[wh.TP]<-newname
DD<-dplyr::select(DD, c('pairwiseAssociation', 'feature', 'metadata'), everything())
stop.time<-Sys.time()
time<-as.numeric(round(difftime(stop.time, start.time, units="min"),3), units = "mins")
DD$time<-time
return(DD)
}
}
| /Library/run_ZINB.R | no_license | biobakery/maaslin2_benchmark | R | false | false | 5,650 | r | ##########################################
# Zero-inflated Negative Binomial (ZINB) #
##########################################
#####################################
# Install or Load Required Packages #
#####################################
if(! require("pacman")) install.packages("pacman", repos='http://cran.us.r-project.org')
suppressPackageStartupMessages(library("pacman"))
pacman::p_load('dplyr', 'pbapply', 'pscl', 'glmmTMB')
#########################
# Fit ZINB To A Dataset #
#########################
fit.ZINB <- function(features,
metadata,
libSize,
ID,
transformation,
MultTestCorrection){
#########################
# Transformation if any #
#########################
if (transformation!='NONE') stop ('Transformation currently not supported for a default ZINB model. Use NONE.')
#####################
# Per-feature model #
#####################
paras <- pbapply::pbsapply(1:ncol(features), simplify=FALSE, function(x){
###############################
# Extract features one by one #
###############################
featuresVector <- features[, x]
#################################
# Create per-feature input data #
#################################
dat_sub <- data.frame(expr = as.numeric(featuresVector), metadata, libSize, ID)
formula<-as.formula(paste("expr ~ ", paste(colnames(metadata), collapse= "+")))
##############################################
# Automatic library size adjustment for GLMs #
##############################################
if(length(unique(libSize)) > 1){ # To prevent offsetting with TSS-normalized data
formula<-update(formula, . ~ . - offset(log(libSize)))
}
#######################
# Random effect model #
#######################
if(!length(ID) == length(unique(ID))){
formula<-update(formula, . ~ . + (1|ID))
fit <- tryCatch({
fit1 <- glmmTMB::glmmTMB(formula = formula,
data = dat_sub,
family = nbinom2,
ziformula = ~1)
}, error=function(err){
fit1 <- try({glmmTMB::glmmTMB(formula = formula,
data = dat_sub,
family = nbinom2,
ziformula = ~1)})
return(fit1)
})
###################################
# Summarize Coefficient Estimates #
###################################
if (class(fit) != "try-error"){
para<-as.data.frame(coef(summary(fit))$cond)[-1,-3]
} else{
print(paste("Fitting problem for feature", x, "returning NA"))
para<- as.data.frame(matrix(NA, nrow=ncol(metadata), ncol=3))
}
colnames(para)<-c('coef', 'stderr', 'pval')
para$metadata<-colnames(metadata)
para$feature<-colnames(features)[x]
}
#######################
# Fixed effects model #
#######################
else{
fit <- tryCatch({
fit1 <- pscl::zeroinfl(formula,
data = dat_sub,
dist = "negbin")
}, error=function(err){
fit1 <- try({pscl::zeroinfl(formula,
data = dat_sub,
dist = "negbin")})
return(fit1)
})
###################################
# Summarize Coefficient Estimates #
###################################
if (class(fit) != "try-error"){
para<-as.data.frame(summary(fit)$coefficients$count)[-c(1, (ncol(metadata)+2)),-3]
} else{
print(paste("Fitting problem for feature", x, "returning NA"))
para<- as.data.frame(matrix(NA, nrow=ncol(metadata), ncol=3))
}
colnames(para)<-c('coef', 'stderr', 'pval')
para$metadata<-colnames(metadata)
para$feature<-colnames(features)[x]
}
return(para)
})
#################
# Return output #
#################
paras<-do.call(rbind, paras)
paras$qval<-as.numeric(p.adjust(paras$pval, method = MultTestCorrection))
paras<-paras[order(paras$qval, decreasing=FALSE),]
paras<-dplyr::select(paras, c('feature', 'metadata'), everything())
rownames(paras)<-NULL
return(paras)
}
##################################
# Fit ZINB To A List of Datasets #
##################################
list.ZINB<-function(physeq, transformation = 'NONE', MultTestCorrection = "BH"){
foreach(physeq = physeq,
.export = c("fit.ZINB"),
.packages = c("dplyr", "pbapply", "pscl", "glmmTMB"),
.errorhandling = "remove") %dopar%
{
start.time<-Sys.time()
features<-physeq$features
metadata<-physeq$metadata
libSize<-physeq$libSize
ID<-physeq$ID
DD<-fit.ZINB(features, metadata, libSize, ID, transformation, MultTestCorrection)
DD$pairwiseAssociation<-paste('pairwiseAssociation', 1:nrow(DD), sep='')
wh.TP<-intersect(grep("[[:print:]]+\\_TP$", DD$metadata), grep("[[:print:]]+\\_TP$", DD$feature))
newname<-paste0(DD$pairwiseAssociation[wh.TP], "_TP")
DD$pairwiseAssociation[wh.TP]<-newname
DD<-dplyr::select(DD, c('pairwiseAssociation', 'feature', 'metadata'), everything())
stop.time<-Sys.time()
time<-as.numeric(round(difftime(stop.time, start.time, units="min"),3), units = "mins")
DD$time<-time
return(DD)
}
}
|
#user can manually enter candidates_idx_array
# choose top 3 models
if ( ! exists("candidates_idx_array", envir=.GlobalEnv) ) {
candidates_idx_array <- chosen_min_test_err_idx[order( v_auc[chosen_min_test_err_idx], decreasing=TRUE ) ]
}
for (chosen_index in candidates_idx_array[1:min(length(candidates_idx_array),3)]) { # iteration over chosen indices
e_chosen <- m_list[[chosen_index]]
m_chosen <- e_chosen$model
#diagnostics
tr_str <- paste(paste( format(Sys.time(), "%H:%M:%S"),"e_chosen:",chosen_index),
paste("chosen:test_err ", flta_prettify(e_chosen$test.pred.err)),
paste("pred:test_tpr ", flta_prettify(e_chosen$test.pred.tpr)),
paste("pred:test_fpr ", flta_prettify(e_chosen$test.pred.fpr)),
paste("pred:test_tpr_by_fpr ", flta_prettify(e_chosen$test.pred.tpr/e_chosen$test.pred.fpr)),
paste("pred:test_auc", flta_prettify(e_chosen$test.pred.auc)),
sep="\n"
)
# log the diagnostics to screen and file
cat( tr_str, sep="\n")
cat( tr_str, sep="\n", file=out_file, append=TRUE)
p_df <- pred_model(m_chosen, x)
# done with prediction
tr_str <- paste( format(Sys.time(), "%H:%M:%S"), "done with modeling .. ")
cat( tr_str, sep="\n")
cat( tr_str, sep="\n", file=out_file, append=TRUE)
# create an intermediate data-frame, aggregate by uid (on FUN=mean)
f_df <- data.frame(uid=demog_unlbl_x_hdl$uid, pred0=p_df$pred.raw[,1], pred1=p_df$pred.raw[,2])
final_df <- aggregate( f_df, by=list( f_df$uid), FUN="mean")
#
predfile <- paste("pred_", Sys.getpid(), "_", chosen_index, "_a", flta_prettify((e_chosen$test.pred.auc)*1000), ".csv", sep="")
write.csv( final_df[,which(colnames(final_df) %in% c("uid", "pred1")) ], file=predfile, row.names=FALSE )
tr_str <- paste("writing out predictions to file ", predfile)
cat( tr_str, sep="\n")
cat( tr_str, sep="\n", file=out_file, append=TRUE)
} # iteration over chosen indices
| /kaggle_rapleaf_hackerdojo/naive_bayes/res_pred_and_save.r | no_license | sonalranjan/analytics_machine_learning | R | false | false | 2,061 | r |
#user can manually enter candidates_idx_array
# choose top 3 models
if ( ! exists("candidates_idx_array", envir=.GlobalEnv) ) {
candidates_idx_array <- chosen_min_test_err_idx[order( v_auc[chosen_min_test_err_idx], decreasing=TRUE ) ]
}
for (chosen_index in candidates_idx_array[1:min(length(candidates_idx_array),3)]) { # iteration over chosen indices
e_chosen <- m_list[[chosen_index]]
m_chosen <- e_chosen$model
#diagnostics
tr_str <- paste(paste( format(Sys.time(), "%H:%M:%S"),"e_chosen:",chosen_index),
paste("chosen:test_err ", flta_prettify(e_chosen$test.pred.err)),
paste("pred:test_tpr ", flta_prettify(e_chosen$test.pred.tpr)),
paste("pred:test_fpr ", flta_prettify(e_chosen$test.pred.fpr)),
paste("pred:test_tpr_by_fpr ", flta_prettify(e_chosen$test.pred.tpr/e_chosen$test.pred.fpr)),
paste("pred:test_auc", flta_prettify(e_chosen$test.pred.auc)),
sep="\n"
)
# log the diagnostics to screen and file
cat( tr_str, sep="\n")
cat( tr_str, sep="\n", file=out_file, append=TRUE)
p_df <- pred_model(m_chosen, x)
# done with prediction
tr_str <- paste( format(Sys.time(), "%H:%M:%S"), "done with modeling .. ")
cat( tr_str, sep="\n")
cat( tr_str, sep="\n", file=out_file, append=TRUE)
# create an intermediate data-frame, aggregate by uid (on FUN=mean)
f_df <- data.frame(uid=demog_unlbl_x_hdl$uid, pred0=p_df$pred.raw[,1], pred1=p_df$pred.raw[,2])
final_df <- aggregate( f_df, by=list( f_df$uid), FUN="mean")
#
predfile <- paste("pred_", Sys.getpid(), "_", chosen_index, "_a", flta_prettify((e_chosen$test.pred.auc)*1000), ".csv", sep="")
write.csv( final_df[,which(colnames(final_df) %in% c("uid", "pred1")) ], file=predfile, row.names=FALSE )
tr_str <- paste("writing out predictions to file ", predfile)
cat( tr_str, sep="\n")
cat( tr_str, sep="\n", file=out_file, append=TRUE)
} # iteration over chosen indices
|
# check if data is whitened or of class "coords"
whitenCheck <- function(x, verbose = FALSE) {
whitened <- FALSE
# 1) check whether 'coords' class
# and if so, if it is actually whitened
if(class(x) == "coords") {
xw <- x
z <- x$y
zCov <- cov(z)
zCovZero <- zCov - diag(nrow(zCov))
covDelta <- 1e-10
zCovZeroVector <- as.vector(zCovZero)
if (all(abs(zCovZeroVector) < covDelta)) {
whitened <- TRUE
}
}
if(whitened == FALSE) {
# 2) class 'coords' but not whitened
if(class(x) == "coords") {
if(verbose == TRUE) {
cat("Data 'x' of type 'coords' but not whitened: whitening using jvcoords")
}
xw <- jvcoords::whiten(x$y, compute.scores=TRUE)
z <- xw$y
} else {
# 3) not class 'coords'
if(verbose == TRUE) {
cat("Data 'x' not whitened: whitening using jvcoords")
}
xw <- jvcoords::whiten(x, compute.scores=TRUE)
z <- xw$y
}
}
res <- list(xw=xw, z=z)
res
}
# fastICA initialisation
# find a w using the fastICA objective function
# use this alongside random directions
fastICAInitialisation <- function(z, IC, m, k, norm.sampl) {
n <- nrow(z)
nNorm <- length(norm.sampl)
p <- ncol(z)
r <- p - k + 1 # the dimension of the search space
# start with random direction
w <- rnorm(r)
w <- w / sqrt(sum(w^2))
# optim
opt <- optim(w, function(w) {
w <- w / sqrt(sum(w^2))
wProj <- IC %*% c(rep(0, k-1), w)
xOrigSpace <- z %*% wProj
TermOne <- (1 / n) * sum(log(cosh(xOrigSpace)))
TermTwo <- (1 / nNorm) * sum(log(cosh(norm.sampl)))
output <- (TermOne - TermTwo)^2
-output
}, method = "BFGS")
trial <- opt$par
trial <- trial / sqrt(sum(trial^2))
wProj <- IC %*% c(rep(0, k-1), trial)
xOrigSpace <- z %*% wProj
# switch to columns for each trial so that entr works
entropy <- mSpacingEntropy(t(xOrigSpace), m=m)
res <- list(dir = trial, entropy = entropy)
res
}
# produce random directions, and choose the 'out' best directions
# best directions are those that minimise entropy
randomSearch <- function(z, IC, k, m, iter, out) {
p <- ncol(IC)
r <- p - k + 1 # the dimension of the search space
trialsMat <- matrix(rnorm(r*iter), iter, r)
trialsMat <- trialsMat / sqrt(rowSums(trialsMat^2))
trialsOrigSpace <- trialsMat %*% t(IC[,k:p])
# each column corresponds to a trial s.t.
# mSpacingEntropy function input is correct
trialsProj <- trialsOrigSpace %*% t(z[,1:p])
entr <- mSpacingEntropy(trialsProj, m = m)
dirTable <- cbind(entr, trialsMat)
# arange in order
dirTable <- dirTable[order(dirTable[,1]),]
namesW <- paste0('dir', seq_len(iter))
if(!missing(out)) {
if(out > iter) {
warning("out > iter: have set out = iter")
out <- iter
}
dirTable <- dirTable[1:out, ]
namesW <- paste0('dir', seq_len(out))
}
entropy <- dirTable[,1]
dirs <- dirTable[,-1]
rownames(dirs) <- namesW
colnames(dirs) <- NULL
output <- list()
output$entropy <- entropy
output$dirs <- dirs
output
}
# put random directions into clusters
# uses divisive kmeans clustering from clusterProjDivisive
# out: best direction from each cluster
clusterRandomSearch <- function(z, IC, k, m, dirs, kmean.tol,
kmean.iter) {
p <- ncol(IC)
entropy <- dirs$entropy
dirs <- dirs$dirs
# K-Means Cluster Analysis: Divisive
c <- clusterProjDivisive(X=dirs, tol=kmean.tol, iter.max=kmean.iter)
clusters <- max(c$c)
# append cluster assignment & put into list
res <- list(entropy = numeric(0))
dirsClusterAppend <- cbind(c$c, entropy, dirs)
for(i in 1:clusters) {
whichCluster <- which(dirsClusterAppend[,1] == i)
entropyCluster <- dirsClusterAppend[whichCluster, 2]
entropyMin <- which.min(entropyCluster)
res$entropy <- c(res$entropy, entropyCluster[entropyMin])
#res[[i]]$entropy <- entropyMin[entropyMin]
directionsCluster <- dirsClusterAppend[whichCluster, c(-1, -2),
drop=FALSE]
res$directions <- cbind(res$directions, directionsCluster[entropyMin, ])
#res[[i]]$direction <- directionsCluster[entropyMin,]
}
res
}
# optimise each direction
# here dir is a single direction (vector)
# cluster arg only used for cat() in clusterICA
.optimiseDirection <- function(z, IC, k, m, dirs, maxit=1000,
cluster, opt.method="BFGS") {
# if optim method is L-BFGS-B, then use upper bound
if(opt.method == "L-BFGS-B") {
opt <- optim(par = dirs,
fn=function(w) {
w <- w / sqrt(sum(w^2))
wOrigSpace <- IC %*% c(rep(0, k-1), w)
zProj <- t(z %*% wOrigSpace)
mSpacingEntropy(zProj, m = m)
},
gr=function(w) {
w <- w / sqrt(sum(w^2))
wOrigSpace <- IC %*% c(rep(0, k-1), w)
zProj <- t(z %*% wOrigSpace)
entropyGradOrigSpace <- optimEntropyDeriv(xProj=zProj, x=z, m=m)
#TODO: is this correct?
if(k > 1) {
# use chain rule to obtain \delta w \in \R^r
r <- length(w)
zeroMatrixTop <- matrix(0, nrow = (k-1),
ncol = r)
paddedI <- rbind(zeroMatrixTop, diag(r))
# with u = IC %*% (rep(0, k-1), w), want du/dw
dudw <- IC %*% paddedI
entropyGrad <- entropyGradOrigSpace %*% dudw
entropyGrad
} else {
entropyGradOrigSpace
}
},
lower=-Inf, upper=(0.5 * (log(2 * pi) + 1)),
method = opt.method, control = list(maxit = maxit, trace=0))
} else {
opt <- optim(par = dirs,
fn=function(w) {
w <- w / sqrt(sum(w^2))
wOrigSpace <- IC %*% c(rep(0, k-1), w)
zProj <- t(z %*% wOrigSpace)
mSpacingEntropy(zProj, m = m)
},
gr=function(w) {
w <- w / sqrt(sum(w^2))
wOrigSpace <- IC %*% c(rep(0, k-1), w)
zProj <- t(z %*% wOrigSpace)
entropyGradOrigSpace <- optimEntropyDeriv(xProj=zProj, x=z, m=m)
if(k > 1) {
# use chain rule to obtain \delta w \in \R^r
r <- length(w)
zeroMatrixTop <- matrix(0, nrow = (k-1),
ncol = r)
paddedI <- rbind(zeroMatrixTop, diag(r))
# with u = IC %*% (rep(0, k-1), w), want du/dw
dudw <- IC %*% paddedI
entropyGrad <- entropyGradOrigSpace %*% dudw
entropyGrad
} else {
entropyGradOrigSpace
}
},
method = opt.method, control = list(maxit = maxit, trace=0))
}
if (opt$convergence == 1) {
if (is.na(cluster)) { # cluster = NA when ensureOrder run
warning("In loading ", k,
" optimisation did not converge, consider increasing maxit \n")
} else {
warning("In loading ", k, ", cluster ", cluster,
" optimisation did not converge, consider increasing maxit \n")
}
} else if (opt$convergence != 0) {
if (is.na(cluster)) { # cluster = NA when ensureOrder run
warning("In loading ", k,
" optimisation did not converge (error ", opt$convergence, ") \n")
} else {
warning("In loading ", k, ", cluster ", cluster,
" optimisation did not converge (error ", opt$convergence, ") \n")
}
}
entrTmp <- opt$value
dirTmp <- opt$par
dirTmp <- dirTmp / sqrt(sum(dirTmp^2))
# output
output <- list()
output$entr <- entrTmp
output$dirs <- dirTmp
output
}
# create a single ICA loading from clustered random projections
# input is from clusterRandomSearch
optimiseAll <- function(z, IC, k, m, clustered.dirs, maxit=1000,
opt.method="BFGS", verbose=FALSE) {
n <- nrow(z)
p <- ncol(IC)
if (is.vector(clustered.dirs)) {
clustered.dirs <- matrix(clustered.dirs, ncol = 1)
}
clusters <- ncol(clustered.dirs)
if (verbose == TRUE) {
cat("////Optimising direction of projection on ",
clusters, " clusters \n")
}
dirOpt <- matrix(nrow = clusters, ncol = (p - k + 1 + 1))
dirOptMany <- vector(mode="list", length=clusters)
for(i in 1:clusters) {
if (verbose == TRUE) {
cat("//// Optimising cluster ", i, "\n")
}
dirTmp <- clustered.dirs[, i]
dirOptTmp <- .optimiseDirection(z = z, IC = IC, dirs = dirTmp,
k = k, m = m, maxit = maxit,
cluster = i, opt.method = opt.method)
dirOpt[i,] <- c(dirOptTmp$entr, dirOptTmp$dirs)
}
clusterNum <- which.min(dirOpt[,1])
output <- list()
output$clusterNum <- clusterNum
output$optimumEntropy <- dirOpt[clusterNum, 1]
output$optimumDirection <- dirOpt[clusterNum, -1]
output
}
ensureOrder <- function(z, IC, p, m, best.dir, best.entr, entr,
maxit, opt.method, verbose) {
k.check <- min(which(best.entr < entr))
counter <- 0
while(TRUE) {
k <- k.check
verboseFunction(which.one = 5, verbose = verbose, k = k)
lenBestDir <- length(best.dir)
r <- (p - k + 1)
bestDirOrigSpace <- c(rep(0, times = (r - lenBestDir)), best.dir)
trialsOrigSpace <- bestDirOrigSpace %*% t(IC[,k:p])
icaLoading <- .optimiseDirection(z = z, IC = IC, dirs = bestDirOrigSpace,
k = k, m = m, maxit = maxit,
cluster = NA, opt.method = opt.method)
newDir <- icaLoading$dirs
newEntr <- icaLoading$entr
k.check <- min(which(newEntr < entr))
if (k.check == k) break
if (k.check > k) {
if (counter == 1) break
k.check <- k.check + 1
counter <- 1
}
}
entr <- entr[1:k]
res <- list(newDir = newDir, newEntr = newEntr,
entr = entr, newK = k, newR = r)
res
}
householderTransform <- function(IC, best.dir, r, k, p) {
# Use a Householder reflection which maps e1 to best.dir to update IC.
e1 <- c(1, rep(0, r - 1))
# take sign of x_k s.t.
# k is the last col entry of non-zero in UT form A = QR
signTmp <- sign(best.dir[1])
v <- best.dir - signTmp * e1
v <- v / sqrt(sum(v^2))
P <- diag(r) - 2 * tcrossprod(v)
IC[, k:p] <- IC[, k:p, drop=FALSE] %*% P
IC
}
verboseFunction <- function(which.one, verbose, k=NA, rand.iter=NA, p.ica=NA,
dir=NA, clustered.dirs=NA, loading=NA) {
if (verbose == TRUE) {
if (which.one == 1) {
cat("optimising direction", k, "out of", p.ica, "\n")
cat("// Finding ", rand.iter, "random starting points", "\n")
}
if (which.one == 2) {
cat("/// Found ", length(dir$entropy), " starting directions", "\n",
sep="")
cat("/// Sorting these into clusters \n")
}
if (which.one == 3) {
numClusters <- length(clustered.dirs$entropy)
cat("//// Sorted into ", numClusters,
" clusters", "\n", sep="")
entrPreOptim <- clustered.dirs$entropy
cat("//// Best pre-optim entropy = ", min(entrPreOptim), "\n", sep="")
cat("//// Optimising ", numClusters, " clusters", "\n", sep="")
}
if (which.one == 4) {
cat("//// Optimised direction has entropy ",
loading$optimumEntropy, "\n", sep="")
}
if (which.one == 5) {
cat("///// Current projection better than ", k,
"th projection", "\n")
cat("///// Replacing ", k, "th projection", "\n")
}
if (which.one == 6) {
cat("///// Householder reflection\n\n")
}
}
}
# for class clusterICA
#' @export
print.clusterICA <- function(x, ...) {
loadings <- ncol(x$r)
length <- nrow(x$r)
entr1 <- round(x$entropy[1], digits = 5)
cat("Cluster ICA: ", loadings, " loading(s) found of length ", length,
". Best projection has entropy ", entr1, ".\n", sep="")
invisible(x)
}
| /R/clusterICA_util.R | no_license | pws3141/clusterICA | R | false | false | 14,315 | r | # check if data is whitened or of class "coords"
whitenCheck <- function(x, verbose = FALSE) {
whitened <- FALSE
# 1) check whether 'coords' class
# and if so, if it is actually whitened
if(class(x) == "coords") {
xw <- x
z <- x$y
zCov <- cov(z)
zCovZero <- zCov - diag(nrow(zCov))
covDelta <- 1e-10
zCovZeroVector <- as.vector(zCovZero)
if (all(abs(zCovZeroVector) < covDelta)) {
whitened <- TRUE
}
}
if(whitened == FALSE) {
# 2) class 'coords' but not whitened
if(class(x) == "coords") {
if(verbose == TRUE) {
cat("Data 'x' of type 'coords' but not whitened: whitening using jvcoords")
}
xw <- jvcoords::whiten(x$y, compute.scores=TRUE)
z <- xw$y
} else {
# 3) not class 'coords'
if(verbose == TRUE) {
cat("Data 'x' not whitened: whitening using jvcoords")
}
xw <- jvcoords::whiten(x, compute.scores=TRUE)
z <- xw$y
}
}
res <- list(xw=xw, z=z)
res
}
# fastICA initialisation
# find a w using the fastICA objective function
# use this alongside random directions
fastICAInitialisation <- function(z, IC, m, k, norm.sampl) {
n <- nrow(z)
nNorm <- length(norm.sampl)
p <- ncol(z)
r <- p - k + 1 # the dimension of the search space
# start with random direction
w <- rnorm(r)
w <- w / sqrt(sum(w^2))
# optim
opt <- optim(w, function(w) {
w <- w / sqrt(sum(w^2))
wProj <- IC %*% c(rep(0, k-1), w)
xOrigSpace <- z %*% wProj
TermOne <- (1 / n) * sum(log(cosh(xOrigSpace)))
TermTwo <- (1 / nNorm) * sum(log(cosh(norm.sampl)))
output <- (TermOne - TermTwo)^2
-output
}, method = "BFGS")
trial <- opt$par
trial <- trial / sqrt(sum(trial^2))
wProj <- IC %*% c(rep(0, k-1), trial)
xOrigSpace <- z %*% wProj
# switch to columns for each trial so that entr works
entropy <- mSpacingEntropy(t(xOrigSpace), m=m)
res <- list(dir = trial, entropy = entropy)
res
}
# produce random directions, and choose the 'out' best directions
# best directions are those that minimise entropy
randomSearch <- function(z, IC, k, m, iter, out) {
p <- ncol(IC)
r <- p - k + 1 # the dimension of the search space
trialsMat <- matrix(rnorm(r*iter), iter, r)
trialsMat <- trialsMat / sqrt(rowSums(trialsMat^2))
trialsOrigSpace <- trialsMat %*% t(IC[,k:p])
# each column corresponds to a trial s.t.
# mSpacingEntropy function input is correct
trialsProj <- trialsOrigSpace %*% t(z[,1:p])
entr <- mSpacingEntropy(trialsProj, m = m)
dirTable <- cbind(entr, trialsMat)
# arange in order
dirTable <- dirTable[order(dirTable[,1]),]
namesW <- paste0('dir', seq_len(iter))
if(!missing(out)) {
if(out > iter) {
warning("out > iter: have set out = iter")
out <- iter
}
dirTable <- dirTable[1:out, ]
namesW <- paste0('dir', seq_len(out))
}
entropy <- dirTable[,1]
dirs <- dirTable[,-1]
rownames(dirs) <- namesW
colnames(dirs) <- NULL
output <- list()
output$entropy <- entropy
output$dirs <- dirs
output
}
# put random directions into clusters
# uses divisive kmeans clustering from clusterProjDivisive
# out: best direction from each cluster
clusterRandomSearch <- function(z, IC, k, m, dirs, kmean.tol,
kmean.iter) {
p <- ncol(IC)
entropy <- dirs$entropy
dirs <- dirs$dirs
# K-Means Cluster Analysis: Divisive
c <- clusterProjDivisive(X=dirs, tol=kmean.tol, iter.max=kmean.iter)
clusters <- max(c$c)
# append cluster assignment & put into list
res <- list(entropy = numeric(0))
dirsClusterAppend <- cbind(c$c, entropy, dirs)
for(i in 1:clusters) {
whichCluster <- which(dirsClusterAppend[,1] == i)
entropyCluster <- dirsClusterAppend[whichCluster, 2]
entropyMin <- which.min(entropyCluster)
res$entropy <- c(res$entropy, entropyCluster[entropyMin])
#res[[i]]$entropy <- entropyMin[entropyMin]
directionsCluster <- dirsClusterAppend[whichCluster, c(-1, -2),
drop=FALSE]
res$directions <- cbind(res$directions, directionsCluster[entropyMin, ])
#res[[i]]$direction <- directionsCluster[entropyMin,]
}
res
}
# optimise each direction
# here dir is a single direction (vector)
# cluster arg only used for cat() in clusterICA
.optimiseDirection <- function(z, IC, k, m, dirs, maxit=1000,
cluster, opt.method="BFGS") {
# if optim method is L-BFGS-B, then use upper bound
if(opt.method == "L-BFGS-B") {
opt <- optim(par = dirs,
fn=function(w) {
w <- w / sqrt(sum(w^2))
wOrigSpace <- IC %*% c(rep(0, k-1), w)
zProj <- t(z %*% wOrigSpace)
mSpacingEntropy(zProj, m = m)
},
gr=function(w) {
w <- w / sqrt(sum(w^2))
wOrigSpace <- IC %*% c(rep(0, k-1), w)
zProj <- t(z %*% wOrigSpace)
entropyGradOrigSpace <- optimEntropyDeriv(xProj=zProj, x=z, m=m)
#TODO: is this correct?
if(k > 1) {
# use chain rule to obtain \delta w \in \R^r
r <- length(w)
zeroMatrixTop <- matrix(0, nrow = (k-1),
ncol = r)
paddedI <- rbind(zeroMatrixTop, diag(r))
# with u = IC %*% (rep(0, k-1), w), want du/dw
dudw <- IC %*% paddedI
entropyGrad <- entropyGradOrigSpace %*% dudw
entropyGrad
} else {
entropyGradOrigSpace
}
},
lower=-Inf, upper=(0.5 * (log(2 * pi) + 1)),
method = opt.method, control = list(maxit = maxit, trace=0))
} else {
opt <- optim(par = dirs,
fn=function(w) {
w <- w / sqrt(sum(w^2))
wOrigSpace <- IC %*% c(rep(0, k-1), w)
zProj <- t(z %*% wOrigSpace)
mSpacingEntropy(zProj, m = m)
},
gr=function(w) {
w <- w / sqrt(sum(w^2))
wOrigSpace <- IC %*% c(rep(0, k-1), w)
zProj <- t(z %*% wOrigSpace)
entropyGradOrigSpace <- optimEntropyDeriv(xProj=zProj, x=z, m=m)
if(k > 1) {
# use chain rule to obtain \delta w \in \R^r
r <- length(w)
zeroMatrixTop <- matrix(0, nrow = (k-1),
ncol = r)
paddedI <- rbind(zeroMatrixTop, diag(r))
# with u = IC %*% (rep(0, k-1), w), want du/dw
dudw <- IC %*% paddedI
entropyGrad <- entropyGradOrigSpace %*% dudw
entropyGrad
} else {
entropyGradOrigSpace
}
},
method = opt.method, control = list(maxit = maxit, trace=0))
}
if (opt$convergence == 1) {
if (is.na(cluster)) { # cluster = NA when ensureOrder run
warning("In loading ", k,
" optimisation did not converge, consider increasing maxit \n")
} else {
warning("In loading ", k, ", cluster ", cluster,
" optimisation did not converge, consider increasing maxit \n")
}
} else if (opt$convergence != 0) {
if (is.na(cluster)) { # cluster = NA when ensureOrder run
warning("In loading ", k,
" optimisation did not converge (error ", opt$convergence, ") \n")
} else {
warning("In loading ", k, ", cluster ", cluster,
" optimisation did not converge (error ", opt$convergence, ") \n")
}
}
entrTmp <- opt$value
dirTmp <- opt$par
dirTmp <- dirTmp / sqrt(sum(dirTmp^2))
# output
output <- list()
output$entr <- entrTmp
output$dirs <- dirTmp
output
}
# create a single ICA loading from clustered random projections
# input is from clusterRandomSearch
optimiseAll <- function(z, IC, k, m, clustered.dirs, maxit=1000,
opt.method="BFGS", verbose=FALSE) {
n <- nrow(z)
p <- ncol(IC)
if (is.vector(clustered.dirs)) {
clustered.dirs <- matrix(clustered.dirs, ncol = 1)
}
clusters <- ncol(clustered.dirs)
if (verbose == TRUE) {
cat("////Optimising direction of projection on ",
clusters, " clusters \n")
}
dirOpt <- matrix(nrow = clusters, ncol = (p - k + 1 + 1))
dirOptMany <- vector(mode="list", length=clusters)
for(i in 1:clusters) {
if (verbose == TRUE) {
cat("//// Optimising cluster ", i, "\n")
}
dirTmp <- clustered.dirs[, i]
dirOptTmp <- .optimiseDirection(z = z, IC = IC, dirs = dirTmp,
k = k, m = m, maxit = maxit,
cluster = i, opt.method = opt.method)
dirOpt[i,] <- c(dirOptTmp$entr, dirOptTmp$dirs)
}
clusterNum <- which.min(dirOpt[,1])
output <- list()
output$clusterNum <- clusterNum
output$optimumEntropy <- dirOpt[clusterNum, 1]
output$optimumDirection <- dirOpt[clusterNum, -1]
output
}
ensureOrder <- function(z, IC, p, m, best.dir, best.entr, entr,
maxit, opt.method, verbose) {
k.check <- min(which(best.entr < entr))
counter <- 0
while(TRUE) {
k <- k.check
verboseFunction(which.one = 5, verbose = verbose, k = k)
lenBestDir <- length(best.dir)
r <- (p - k + 1)
bestDirOrigSpace <- c(rep(0, times = (r - lenBestDir)), best.dir)
trialsOrigSpace <- bestDirOrigSpace %*% t(IC[,k:p])
icaLoading <- .optimiseDirection(z = z, IC = IC, dirs = bestDirOrigSpace,
k = k, m = m, maxit = maxit,
cluster = NA, opt.method = opt.method)
newDir <- icaLoading$dirs
newEntr <- icaLoading$entr
k.check <- min(which(newEntr < entr))
if (k.check == k) break
if (k.check > k) {
if (counter == 1) break
k.check <- k.check + 1
counter <- 1
}
}
entr <- entr[1:k]
res <- list(newDir = newDir, newEntr = newEntr,
entr = entr, newK = k, newR = r)
res
}
householderTransform <- function(IC, best.dir, r, k, p) {
# Use a Householder reflection which maps e1 to best.dir to update IC.
e1 <- c(1, rep(0, r - 1))
# take sign of x_k s.t.
# k is the last col entry of non-zero in UT form A = QR
signTmp <- sign(best.dir[1])
v <- best.dir - signTmp * e1
v <- v / sqrt(sum(v^2))
P <- diag(r) - 2 * tcrossprod(v)
IC[, k:p] <- IC[, k:p, drop=FALSE] %*% P
IC
}
verboseFunction <- function(which.one, verbose, k=NA, rand.iter=NA, p.ica=NA,
dir=NA, clustered.dirs=NA, loading=NA) {
if (verbose == TRUE) {
if (which.one == 1) {
cat("optimising direction", k, "out of", p.ica, "\n")
cat("// Finding ", rand.iter, "random starting points", "\n")
}
if (which.one == 2) {
cat("/// Found ", length(dir$entropy), " starting directions", "\n",
sep="")
cat("/// Sorting these into clusters \n")
}
if (which.one == 3) {
numClusters <- length(clustered.dirs$entropy)
cat("//// Sorted into ", numClusters,
" clusters", "\n", sep="")
entrPreOptim <- clustered.dirs$entropy
cat("//// Best pre-optim entropy = ", min(entrPreOptim), "\n", sep="")
cat("//// Optimising ", numClusters, " clusters", "\n", sep="")
}
if (which.one == 4) {
cat("//// Optimised direction has entropy ",
loading$optimumEntropy, "\n", sep="")
}
if (which.one == 5) {
cat("///// Current projection better than ", k,
"th projection", "\n")
cat("///// Replacing ", k, "th projection", "\n")
}
if (which.one == 6) {
cat("///// Householder reflection\n\n")
}
}
}
# for class clusterICA
#' @export
print.clusterICA <- function(x, ...) {
loadings <- ncol(x$r)
length <- nrow(x$r)
entr1 <- round(x$entropy[1], digits = 5)
cat("Cluster ICA: ", loadings, " loading(s) found of length ", length,
". Best projection has entropy ", entr1, ".\n", sep="")
invisible(x)
}
|
dir.create("~/psm/models", recursive = T)
dir.create("~/psm/psm_combined")
dotR <- file.path(Sys.getenv("HOME"), ".R")
if (!file.exists(dotR)) dir.create(dotR)
M <- file.path(dotR, "Makevars")
if (!file.exists(M)) file.create(M)
cat("\nCXX14FLAGS=-O3 -march=native -mtune=native -fPIC",
"CXX14=g++", # or clang++ but you may need a version postfix
file = M, sep = "\n", append = TRUE)
install.packages("brms")
# setwd("/scratch/edbeck")
library(rstan)
library(brms)
# library(tidybayes)
library(plyr)
library(tidyverse)
sessionInfo()
dotR <- file.path(Sys.getenv("HOME"), ".R")
if (!file.exists(dotR)) dir.create(dotR)
M <- file.path(dotR, "Makevars")
if (!file.exists(M)) file.create(M)
cat("\nCXX14FLAGS=-O3 -march=native -mtune=native -fPIC",
"CXX14=g++", # or clang++ but you may need a version postfix
file = M, sep = "\n", append = TRUE)
# jobid = as.integer(Sys.getenv("PBS_ARRAYID"))
# print(jobid)
args <- read.table("~/psm_matched_args_old.txt", header = F, stringsAsFactors = F)
# print(args)
trait <- args[1,1]; outcome <- args[1,2]; mod <- args[1,3]; chain <- args[1,4]
m <- if(mod == "SES") c("parEdu", "grsWages", "parOccPrstg") else mod
print(paste("m =", m))
d <- if(mod %in% c("reliability", "predInt")){"none"} else mod
print(paste("d =", d))
# load data & sample model
load(sprintf("~/psm/psm_combined/%s_%s_%s.RData", outcome, trait, d))
# clean data & keep only needed columns and a subset of the used variables
d1 <- df_l[[1]] %>%
group_by(study, o_value) %>%
nest() %>%
ungroup() %>%
mutate(data = map(data, ~(.) %>% filter(row_number() %in% sample(1:nrow(.), 50, replace = F)))) %>%
unnest(data) %>%
select(study, SID, p_value, o_value, one_of(d)) %>%
filter(complete.cases(.))
# set priors & model specifications
Prior <- c(set_prior("cauchy(0,1)", class = "sd"),
set_prior("student_t(3, 0, 2)", class = "b"),
set_prior("student_t(3, 0, 5)", class = "Intercept"))
Iter <- 30; Warmup <- 21; treedepth <- 20
f <- formula(paste("o_value ~ p_value + ", paste("p_value*", m, collapse = " + "), "+ (", paste("p_value*", m, collapse = " + "), " | study)", sep = ""))
fit2 <- brm(formula = f
# , data = df_l
, data = d1
, prior = Prior
, iter = Iter
, warmup = Warmup
, family = bernoulli(link = "logit")
# , control = list(adapt_delta = 0.99, max_treedepth = treedepth)
, cores = 4)
save(fit2, file = "~/matched_compiled_small.RData")
# brms_matched_fun <- function(trait, outcome, mod, chain, ncores){
rm(list = ls())
brms_matched_fun <- function(i){
trait <- args[i,1]; outcome <- args[i,2]; mod <- args[i,3]; chain <- args[i,4]
print(sprintf("outcome = %s, trait = %s, mod = %s, chain = %s", outcome, trait, mod, chain))
# setup
m <- if(mod == "SES") c("parEdu", "grsWages", "parOccPrstg") else mod
print(paste("m =", m))
d <- if(mod %in% c("reliability", "predInt")){"none"} else mod
print(paste("d =", d))
# load data & sample model
load(sprintf("~/psm/psm_combined/%s_%s_%s.RData", outcome, trait, d))
load(sprintf("~/matched_compiled_small.RData"))
# clean data to keep only needed columns
df_l <- map(df_l, ~(.) %>%
# comment out here
# group_by(study, o_value) %>%
# nest() %>%
# ungroup() %>%
# mutate(data = map(data, ~(.) %>% filter(row_number() %in% sample(1:nrow(.), 50, replace = F)))) %>%
# unnest(data) %>%
# to here
select(study, SID, p_value, o_value, one_of(m)) %>%
filter(complete.cases(.)))
d1 <- df_l[chain]
# formula
if(mod == "none"){f <- formula(o_value ~ p_value + (p_value | study))}
else {f <- formula(paste("o_value ~ p_value + ", paste("p_value*", m, collapse = " + "), "+ (", paste("p_value*", m, collapse = " + "), " | study)", sep = ""))}
Prior <- c(set_prior("cauchy(0,1)", class = "sd"),
set_prior("student_t(3, 0, 2)", class = "b"),
set_prior("student_t(3, 0, 5)", class = "Intercept"))
Iter <- 2000; Warmup <- 1000; treedepth <- 20
# run the models using update and previously compiled C++ stan code
start.tmp <- Sys.time()
# plan(multiprocess)
# fit <- future_map(df_l, function(x){
fit <- #map(df_l, function(x){
# tmp <-
update(fit2
, formula = f
, newdata = d1#x
, iter = Iter
, warmup = Warmup
, cores = 4
)
# class(fit) <- c("brmsfit_multiple", class(fit))
# return(tmp)
# })
# }, .progress = T)
print(end.tmp <- Sys.time() - start.tmp)
# combine models
# rhats <- map_df(fit, function(x)data.frame(as.list(rhat(x))))
# fit <- combine_models(mlist = fit, check_data = FALSE)
# fit$data.name <- "df_l"
# fit$rhats <- rhats
# class(fit) <- c("brmsfit_multiple", class(fit))
# fit2 <- brm_multiple(formula = f
# # , data = df_l
# , data = d1
# , prior = Prior
# , iter = Iter
# , warmup = Warmup
# , family = bernoulli(link = "logit")
# , control = list(adapt_delta = 0.99, max_treedepth = treedepth))
# # , cores = 4)
# extract key parameters
# fixed effects
# fx <- fixef(fit, probs = c(0.055, 0.945)) %>% data.frame %>%
# rownames_to_column("names") %>%
# mutate_at(vars(-names), lst(OR = inv_logit_scaled)) %>%
# tbl_df
# # random effects
# rx <- ranef(fit, probs = c(0.055, 0.945))[[1]] %>% array_tree(3) %>%
# tibble(names = names(.), data = .) %>%
# mutate(data = map(data, ~(.) %>% data.frame %>%
# rownames_to_column("study"))) %>%
# unnest(data) %>%
# mutate_at(vars(-names, -study), lst(OR = inv_logit_scaled)) %>%
# tbl_df
#
# # samples
# fx.draws <- fit %>% tidy_draws() %>%
# select(.chain:.draw, matches("^b_"), matches("p_value]$")) %>%
# mutate_at(vars(matches("p_value]$")), ~(.) + b_p_value) %>%
# gather(key = item, value = s_value, -(.chain:.draw))
#
# tau.draws <- fit %>% tidy_draws() %>%
# select(.chain:.draw, matches("^sd"), matches("^cor"))
#
# if(mod != "none"){
# pred.fx <- fx_pred_fun(fit, m)
# pred.rx <- rx_pred_fun(fit, m)
# save(pred.fx, pred.rx, file = sprintf("/scratch/edbeck/psm/matched/predicted/matched_pred_%s_%s_%s", trait, outcome, mod))
# rm(c("pred.fx", "pred.rx"))
# }
#
save(fit, file = sprintf("~/psm/models/matched_%s_%s_%s_%s.RData", trait, outcome, mod, chain))
# save(fx, rx, file = sprintf("/scratch/edbeck/psm/matched/summary/matched_%s_%s_%s", trait, outcome, mod))
# save(fx.draws, tau.draws, file = sprintf("/scratch/edbeck/psm/matched/draws/matched_%s_%s_%s", trait, outcome, mod))
# rm(c("fit", "fx", "rx", "fx.draws", "rx.draws", "df"))
rm(list = c("fit", "fit2", "df_l", "d1"))
gc()
}
map(1:nrow(args), brms_matched_fun)
# brms_matched_fun(args[,1], args[,2], args[,3], args[,4], args[,5]) | /scripts/psm_cluster/psm_matched_run_do.R | no_license | emoriebeck/big-five-prediction | R | false | false | 7,155 | r | dir.create("~/psm/models", recursive = T)
dir.create("~/psm/psm_combined")
dotR <- file.path(Sys.getenv("HOME"), ".R")
if (!file.exists(dotR)) dir.create(dotR)
M <- file.path(dotR, "Makevars")
if (!file.exists(M)) file.create(M)
cat("\nCXX14FLAGS=-O3 -march=native -mtune=native -fPIC",
"CXX14=g++", # or clang++ but you may need a version postfix
file = M, sep = "\n", append = TRUE)
install.packages("brms")
# setwd("/scratch/edbeck")
library(rstan)
library(brms)
# library(tidybayes)
library(plyr)
library(tidyverse)
sessionInfo()
dotR <- file.path(Sys.getenv("HOME"), ".R")
if (!file.exists(dotR)) dir.create(dotR)
M <- file.path(dotR, "Makevars")
if (!file.exists(M)) file.create(M)
cat("\nCXX14FLAGS=-O3 -march=native -mtune=native -fPIC",
"CXX14=g++", # or clang++ but you may need a version postfix
file = M, sep = "\n", append = TRUE)
# jobid = as.integer(Sys.getenv("PBS_ARRAYID"))
# print(jobid)
args <- read.table("~/psm_matched_args_old.txt", header = F, stringsAsFactors = F)
# print(args)
trait <- args[1,1]; outcome <- args[1,2]; mod <- args[1,3]; chain <- args[1,4]
m <- if(mod == "SES") c("parEdu", "grsWages", "parOccPrstg") else mod
print(paste("m =", m))
d <- if(mod %in% c("reliability", "predInt")){"none"} else mod
print(paste("d =", d))
# load data & sample model
load(sprintf("~/psm/psm_combined/%s_%s_%s.RData", outcome, trait, d))
# clean data & keep only needed columns and a subset of the used variables
d1 <- df_l[[1]] %>%
group_by(study, o_value) %>%
nest() %>%
ungroup() %>%
mutate(data = map(data, ~(.) %>% filter(row_number() %in% sample(1:nrow(.), 50, replace = F)))) %>%
unnest(data) %>%
select(study, SID, p_value, o_value, one_of(d)) %>%
filter(complete.cases(.))
# set priors & model specifications
Prior <- c(set_prior("cauchy(0,1)", class = "sd"),
set_prior("student_t(3, 0, 2)", class = "b"),
set_prior("student_t(3, 0, 5)", class = "Intercept"))
Iter <- 30; Warmup <- 21; treedepth <- 20
f <- formula(paste("o_value ~ p_value + ", paste("p_value*", m, collapse = " + "), "+ (", paste("p_value*", m, collapse = " + "), " | study)", sep = ""))
fit2 <- brm(formula = f
# , data = df_l
, data = d1
, prior = Prior
, iter = Iter
, warmup = Warmup
, family = bernoulli(link = "logit")
# , control = list(adapt_delta = 0.99, max_treedepth = treedepth)
, cores = 4)
save(fit2, file = "~/matched_compiled_small.RData")
# brms_matched_fun <- function(trait, outcome, mod, chain, ncores){
rm(list = ls())
brms_matched_fun <- function(i){
trait <- args[i,1]; outcome <- args[i,2]; mod <- args[i,3]; chain <- args[i,4]
print(sprintf("outcome = %s, trait = %s, mod = %s, chain = %s", outcome, trait, mod, chain))
# setup
m <- if(mod == "SES") c("parEdu", "grsWages", "parOccPrstg") else mod
print(paste("m =", m))
d <- if(mod %in% c("reliability", "predInt")){"none"} else mod
print(paste("d =", d))
# load data & sample model
load(sprintf("~/psm/psm_combined/%s_%s_%s.RData", outcome, trait, d))
load(sprintf("~/matched_compiled_small.RData"))
# clean data to keep only needed columns
df_l <- map(df_l, ~(.) %>%
# comment out here
# group_by(study, o_value) %>%
# nest() %>%
# ungroup() %>%
# mutate(data = map(data, ~(.) %>% filter(row_number() %in% sample(1:nrow(.), 50, replace = F)))) %>%
# unnest(data) %>%
# to here
select(study, SID, p_value, o_value, one_of(m)) %>%
filter(complete.cases(.)))
d1 <- df_l[chain]
# formula
if(mod == "none"){f <- formula(o_value ~ p_value + (p_value | study))}
else {f <- formula(paste("o_value ~ p_value + ", paste("p_value*", m, collapse = " + "), "+ (", paste("p_value*", m, collapse = " + "), " | study)", sep = ""))}
Prior <- c(set_prior("cauchy(0,1)", class = "sd"),
set_prior("student_t(3, 0, 2)", class = "b"),
set_prior("student_t(3, 0, 5)", class = "Intercept"))
Iter <- 2000; Warmup <- 1000; treedepth <- 20
# run the models using update and previously compiled C++ stan code
start.tmp <- Sys.time()
# plan(multiprocess)
# fit <- future_map(df_l, function(x){
fit <- #map(df_l, function(x){
# tmp <-
update(fit2
, formula = f
, newdata = d1#x
, iter = Iter
, warmup = Warmup
, cores = 4
)
# class(fit) <- c("brmsfit_multiple", class(fit))
# return(tmp)
# })
# }, .progress = T)
print(end.tmp <- Sys.time() - start.tmp)
# combine models
# rhats <- map_df(fit, function(x)data.frame(as.list(rhat(x))))
# fit <- combine_models(mlist = fit, check_data = FALSE)
# fit$data.name <- "df_l"
# fit$rhats <- rhats
# class(fit) <- c("brmsfit_multiple", class(fit))
# fit2 <- brm_multiple(formula = f
# # , data = df_l
# , data = d1
# , prior = Prior
# , iter = Iter
# , warmup = Warmup
# , family = bernoulli(link = "logit")
# , control = list(adapt_delta = 0.99, max_treedepth = treedepth))
# # , cores = 4)
# extract key parameters
# fixed effects
# fx <- fixef(fit, probs = c(0.055, 0.945)) %>% data.frame %>%
# rownames_to_column("names") %>%
# mutate_at(vars(-names), lst(OR = inv_logit_scaled)) %>%
# tbl_df
# # random effects
# rx <- ranef(fit, probs = c(0.055, 0.945))[[1]] %>% array_tree(3) %>%
# tibble(names = names(.), data = .) %>%
# mutate(data = map(data, ~(.) %>% data.frame %>%
# rownames_to_column("study"))) %>%
# unnest(data) %>%
# mutate_at(vars(-names, -study), lst(OR = inv_logit_scaled)) %>%
# tbl_df
#
# # samples
# fx.draws <- fit %>% tidy_draws() %>%
# select(.chain:.draw, matches("^b_"), matches("p_value]$")) %>%
# mutate_at(vars(matches("p_value]$")), ~(.) + b_p_value) %>%
# gather(key = item, value = s_value, -(.chain:.draw))
#
# tau.draws <- fit %>% tidy_draws() %>%
# select(.chain:.draw, matches("^sd"), matches("^cor"))
#
# if(mod != "none"){
# pred.fx <- fx_pred_fun(fit, m)
# pred.rx <- rx_pred_fun(fit, m)
# save(pred.fx, pred.rx, file = sprintf("/scratch/edbeck/psm/matched/predicted/matched_pred_%s_%s_%s", trait, outcome, mod))
# rm(c("pred.fx", "pred.rx"))
# }
#
save(fit, file = sprintf("~/psm/models/matched_%s_%s_%s_%s.RData", trait, outcome, mod, chain))
# save(fx, rx, file = sprintf("/scratch/edbeck/psm/matched/summary/matched_%s_%s_%s", trait, outcome, mod))
# save(fx.draws, tau.draws, file = sprintf("/scratch/edbeck/psm/matched/draws/matched_%s_%s_%s", trait, outcome, mod))
# rm(c("fit", "fx", "rx", "fx.draws", "rx.draws", "df"))
rm(list = c("fit", "fit2", "df_l", "d1"))
gc()
}
map(1:nrow(args), brms_matched_fun)
# brms_matched_fun(args[,1], args[,2], args[,3], args[,4], args[,5]) |
## Enter names of counts and interviews data files (either SAS or CSV files)
CNTS_FILE <- "ashcnts.sas7bdat"
INTS_FILE <- "ashints.sas7bdat"
## Enter creel location must be "ash","byf","cpw","lsb","rdc","sax","sup", "wsh"
LOCATION <- "ash"
## Enter start and end dates (must be two digits mon/day and four-digit year)
START_DATE <- "05/16/2014"
END_DATE <- "09/30/2014"
## Enter day length for each month
DAY_LENGTH <- c(Jan=00,Feb=00,Mar=00,Apr=00,May=14,Jun=14,
Jul=14,Aug=14,Sep=13,Oct=00,Nov=00,Dec=00) | /zzzOgleOnly/zzzOld/LS_OPEN_2014_BEFORE ACCESS/data/ash_2014_info.R | no_license | droglenc/WiDNR_Creel | R | false | false | 522 | r | ## Enter names of counts and interviews data files (either SAS or CSV files)
CNTS_FILE <- "ashcnts.sas7bdat"
INTS_FILE <- "ashints.sas7bdat"
## Enter creel location must be "ash","byf","cpw","lsb","rdc","sax","sup", "wsh"
LOCATION <- "ash"
## Enter start and end dates (must be two digits mon/day and four-digit year)
START_DATE <- "05/16/2014"
END_DATE <- "09/30/2014"
## Enter day length for each month
DAY_LENGTH <- c(Jan=00,Feb=00,Mar=00,Apr=00,May=14,Jun=14,
Jul=14,Aug=14,Sep=13,Oct=00,Nov=00,Dec=00) |
testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818016386547e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613108187-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 344 | r | testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818016386547e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
grid::grid.newpage()
grid::pushViewport(grid::viewport(
height = unit(0.8, "npc"),
layout = grid::grid.layout(nrow = 3, ncol = 1,
heights = unit.c(unit(0.2, "npc"),
unit(2.5, "lines"),
unit(1, "null")))
))
grid::pushViewport(grid::viewport(layout.pos.row = 1))
grid::grid.raster(img)
grid::upViewport()
grid::pushViewport(grid::viewport(layout.pos.row = 2))
grid::grid.text("Version 3.2",
x = unit(0.8, "npc"), y = unit(0.75, "npc"),
just = 'right')
grid::grid.text("Release date: 23 Jan 2018",
x = unit(0.8, "npc"), y = unit(0.25, "npc"),
just = 'right', gp = gpar(fontsize = 9))
| /tests/splash.R | no_license | iNZightVIT/dev | R | false | false | 782 | r | grid::grid.newpage()
grid::pushViewport(grid::viewport(
height = unit(0.8, "npc"),
layout = grid::grid.layout(nrow = 3, ncol = 1,
heights = unit.c(unit(0.2, "npc"),
unit(2.5, "lines"),
unit(1, "null")))
))
grid::pushViewport(grid::viewport(layout.pos.row = 1))
grid::grid.raster(img)
grid::upViewport()
grid::pushViewport(grid::viewport(layout.pos.row = 2))
grid::grid.text("Version 3.2",
x = unit(0.8, "npc"), y = unit(0.75, "npc"),
just = 'right')
grid::grid.text("Release date: 23 Jan 2018",
x = unit(0.8, "npc"), y = unit(0.25, "npc"),
just = 'right', gp = gpar(fontsize = 9))
|
# Exploratory Data Analysis - Assignment 2
#3.Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable,
#which of these four sources have seen decreases in emissions from 1999-2008 for Baltimore City?
#Which have seen increases in emissions from 1999-2008?
#Use the ggplot2 plotting system to make a plot answer this question
library(ggplot2)
# Loading Datasets from working directory
summary <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Subset out Baltimore
Baltimore <- subset(summary, fips == 24510)
#Aggregate the data
aggBalt <- aggregate(Baltimore$Emissions, by = list(Baltimore$year, Baltimore$type), FUN = sum)
# Generate the graph in the same directory as the source code
png(filename="plot3.png", width = 1200, height=500, units='px')
ggplot(aggBalt, aes(Group.1, x)) + geom_line() + facet_grid(. ~ Group.2) + labs(x = "Year", y = expression("PM" [2.5] ~ "Emitted (tons)"))
dev.off() | /Plot3.R | no_license | azpmerrill/ExData_Project2 | R | false | false | 983 | r | # Exploratory Data Analysis - Assignment 2
#3.Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable,
#which of these four sources have seen decreases in emissions from 1999-2008 for Baltimore City?
#Which have seen increases in emissions from 1999-2008?
#Use the ggplot2 plotting system to make a plot answer this question
library(ggplot2)
# Loading Datasets from working directory
summary <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Subset out Baltimore
Baltimore <- subset(summary, fips == 24510)
#Aggregate the data
aggBalt <- aggregate(Baltimore$Emissions, by = list(Baltimore$year, Baltimore$type), FUN = sum)
# Generate the graph in the same directory as the source code
png(filename="plot3.png", width = 1200, height=500, units='px')
ggplot(aggBalt, aes(Group.1, x)) + geom_line() + facet_grid(. ~ Group.2) + labs(x = "Year", y = expression("PM" [2.5] ~ "Emitted (tons)"))
dev.off() |
#' @importFrom tidyr pivot_wider
#' @importFrom tidyr separate
#' @importFrom dplyr select
#' @importFrom tibble tibble
#' @import SummarizedExperiment
#' @importFrom magrittr %>%
## Returns tidy data to SE or DF
.tidyReturn <- function(tidyData, compVars, sampleVars, metaData = NULL, toSE) {
## Return data to wide format data frame
rtn <- tidyData %>%
pivot_wider(id_cols = compVars,
names_from = sampleVars,
values_from = "abundance")
## If selected, convert data to SummarizedExperiment
if(toSE) {
## Get row data
seRowData <- select(rtn, compVars)
## Get assay data
seAssay <- select(rtn, -compVars) %>%
as.matrix()
## Get column data
seColumnData <- tibble("samples" = colnames(seAssay)) %>%
separate(col = "samples", into = sampleVars, sep = "_")
## Build SummarizedExperiment
rtn <- SummarizedExperiment(assays = list(abundance = seAssay),
colData = seColumnData,
rowData = seRowData,
metadata = metaData)
}
return(rtn)
}
#' @importFrom tidyr pivot_wider
#' @importFrom tidyr separate
#' @importFrom dplyr select
#' @importFrom tibble tibble
#' @importFrom magrittr %>%
#' @import SummarizedExperiment
## Converts DF to SE
.dfToSE <- function(DF, compVars, sampleVars, separator, colExtraText = NULL,
metaData = NULL) {
## Get column data
sampleCols <- select(DF, -compVars)
seColumnData <- tibble("samples" = colnames(sampleCols))
## Remove colExtraText text if present
if(!is.null(colExtraText)){
seColumnData <- mutate(seColumnData, "samples" =
str_replace_all(.data$samples,
colExtraText, ""))
}
seColumnData <- separate(seColumnData, col = "samples", into = sampleVars,
sep = separator)
## Get row data
seRowData <- select(DF, compVars)
## Get abundance data
seAssay <- select(DF, -compVars) %>%
as.matrix()
## Build SummarizedExperiment
rtn <- SummarizedExperiment(assays = list(abundance = seAssay),
colData = seColumnData,
rowData = seRowData,
metadata = metaData)
}
#' @import SummarizedExperiment
## Converts SE to DF
## Note: Currently metadata is lost in conversion
.seToDF <- function(SE, colExtraText = NULL, seperator = "_") {
## Get row data, compound variables, and technical variables
rowData <- as_tibble(rowData(SE))
colData <- as_tibble(colData(SE))
abundanceData <- as.data.frame(assay(SE))
if (!is.null(colExtraText)) {
colData <- tibble::add_column(colData,
"colExtraText" =
rep(colExtraText, nrow(colData)),
.before = 1)
}
columnNames <- apply(colData, 1, paste, collapse=seperator)
colnames(abundanceData) <- columnNames
cbind(rowData, abundanceData)
} | /R/return.R | no_license | tuh8888/MSPrep | R | false | false | 3,302 | r | #' @importFrom tidyr pivot_wider
#' @importFrom tidyr separate
#' @importFrom dplyr select
#' @importFrom tibble tibble
#' @import SummarizedExperiment
#' @importFrom magrittr %>%
## Returns tidy data to SE or DF
.tidyReturn <- function(tidyData, compVars, sampleVars, metaData = NULL, toSE) {
## Return data to wide format data frame
rtn <- tidyData %>%
pivot_wider(id_cols = compVars,
names_from = sampleVars,
values_from = "abundance")
## If selected, convert data to SummarizedExperiment
if(toSE) {
## Get row data
seRowData <- select(rtn, compVars)
## Get assay data
seAssay <- select(rtn, -compVars) %>%
as.matrix()
## Get column data
seColumnData <- tibble("samples" = colnames(seAssay)) %>%
separate(col = "samples", into = sampleVars, sep = "_")
## Build SummarizedExperiment
rtn <- SummarizedExperiment(assays = list(abundance = seAssay),
colData = seColumnData,
rowData = seRowData,
metadata = metaData)
}
return(rtn)
}
#' @importFrom tidyr pivot_wider
#' @importFrom tidyr separate
#' @importFrom dplyr select
#' @importFrom tibble tibble
#' @importFrom magrittr %>%
#' @import SummarizedExperiment
## Converts DF to SE
.dfToSE <- function(DF, compVars, sampleVars, separator, colExtraText = NULL,
metaData = NULL) {
## Get column data
sampleCols <- select(DF, -compVars)
seColumnData <- tibble("samples" = colnames(sampleCols))
## Remove colExtraText text if present
if(!is.null(colExtraText)){
seColumnData <- mutate(seColumnData, "samples" =
str_replace_all(.data$samples,
colExtraText, ""))
}
seColumnData <- separate(seColumnData, col = "samples", into = sampleVars,
sep = separator)
## Get row data
seRowData <- select(DF, compVars)
## Get abundance data
seAssay <- select(DF, -compVars) %>%
as.matrix()
## Build SummarizedExperiment
rtn <- SummarizedExperiment(assays = list(abundance = seAssay),
colData = seColumnData,
rowData = seRowData,
metadata = metaData)
}
#' @import SummarizedExperiment
## Converts SE to DF
## Note: Currently metadata is lost in conversion
.seToDF <- function(SE, colExtraText = NULL, seperator = "_") {
## Get row data, compound variables, and technical variables
rowData <- as_tibble(rowData(SE))
colData <- as_tibble(colData(SE))
abundanceData <- as.data.frame(assay(SE))
if (!is.null(colExtraText)) {
colData <- tibble::add_column(colData,
"colExtraText" =
rep(colExtraText, nrow(colData)),
.before = 1)
}
columnNames <- apply(colData, 1, paste, collapse=seperator)
colnames(abundanceData) <- columnNames
cbind(rowData, abundanceData)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{Ctry.info}
\alias{Ctry.info}
\title{Ctry.info}
\usage{
Ctry.info(x, y)
}
\arguments{
\item{x}{= a vector of country codes}
\item{y}{= a column in the classif-ctry file}
}
\description{
handles the addition and removal of countries from an index
}
\examples{
Ctry.info("PK", "CtryNm")
}
\seealso{
Other Ctry: \code{\link{Ctry.msci.index.changes}},
\code{\link{Ctry.msci.members.rng}},
\code{\link{Ctry.msci.members}},
\code{\link{Ctry.msci.sql}}, \code{\link{Ctry.msci}},
\code{\link{Ctry.to.CtryGrp}}
}
\keyword{Ctry.info}
| /man/Ctry.info.Rd | no_license | Turnado-dx/EPFR | R | false | true | 623 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{Ctry.info}
\alias{Ctry.info}
\title{Ctry.info}
\usage{
Ctry.info(x, y)
}
\arguments{
\item{x}{= a vector of country codes}
\item{y}{= a column in the classif-ctry file}
}
\description{
handles the addition and removal of countries from an index
}
\examples{
Ctry.info("PK", "CtryNm")
}
\seealso{
Other Ctry: \code{\link{Ctry.msci.index.changes}},
\code{\link{Ctry.msci.members.rng}},
\code{\link{Ctry.msci.members}},
\code{\link{Ctry.msci.sql}}, \code{\link{Ctry.msci}},
\code{\link{Ctry.to.CtryGrp}}
}
\keyword{Ctry.info}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/debug.R
\name{debug_seedHash}
\alias{debug_seedHash}
\title{Seed hash of the block}
\usage{
debug_seedHash(number)
}
\arguments{
\item{number}{Integer - Number of the block.}
}
\value{
Data - Seed hash of the block by number.
}
\description{
\code{debug_seedHash} fetches and retrieves the seed hash of the block by
number.
}
\examples{
\donttest{
debug_seedHash(29)
}
}
\seealso{
Other debug functions: \code{\link{debug_backtraceAt}},
\code{\link{debug_blockProfile}},
\code{\link{debug_cpuProfile}},
\code{\link{debug_dumpBlock}},
\code{\link{debug_gcStats}},
\code{\link{debug_getBlockRlp}},
\code{\link{debug_goTrace}},
\code{\link{debug_memStats}},
\code{\link{debug_setBlockProfileRate}},
\code{\link{debug_setHead}}, \code{\link{debug_stacks}},
\code{\link{debug_startCPUProfile}},
\code{\link{debug_startGoTrace}},
\code{\link{debug_stopCPUProfile}},
\code{\link{debug_stopGoTrace}},
\code{\link{debug_traceBlockByHash}},
\code{\link{debug_traceBlockByNumber}},
\code{\link{debug_traceBlockFromFile}},
\code{\link{debug_traceBlock}},
\code{\link{debug_traceTransaction}},
\code{\link{debug_verbosity}},
\code{\link{debug_vmodule}},
\code{\link{debug_writeBlockProfile}},
\code{\link{debug_writeMemProfile}}, \code{\link{gethr}}
}
\concept{debug functions}
| /man/debug_seedHash.Rd | no_license | cran/gethr | R | false | true | 1,438 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/debug.R
\name{debug_seedHash}
\alias{debug_seedHash}
\title{Seed hash of the block}
\usage{
debug_seedHash(number)
}
\arguments{
\item{number}{Integer - Number of the block.}
}
\value{
Data - Seed hash of the block by number.
}
\description{
\code{debug_seedHash} fetches and retrieves the seed hash of the block by
number.
}
\examples{
\donttest{
debug_seedHash(29)
}
}
\seealso{
Other debug functions: \code{\link{debug_backtraceAt}},
\code{\link{debug_blockProfile}},
\code{\link{debug_cpuProfile}},
\code{\link{debug_dumpBlock}},
\code{\link{debug_gcStats}},
\code{\link{debug_getBlockRlp}},
\code{\link{debug_goTrace}},
\code{\link{debug_memStats}},
\code{\link{debug_setBlockProfileRate}},
\code{\link{debug_setHead}}, \code{\link{debug_stacks}},
\code{\link{debug_startCPUProfile}},
\code{\link{debug_startGoTrace}},
\code{\link{debug_stopCPUProfile}},
\code{\link{debug_stopGoTrace}},
\code{\link{debug_traceBlockByHash}},
\code{\link{debug_traceBlockByNumber}},
\code{\link{debug_traceBlockFromFile}},
\code{\link{debug_traceBlock}},
\code{\link{debug_traceTransaction}},
\code{\link{debug_verbosity}},
\code{\link{debug_vmodule}},
\code{\link{debug_writeBlockProfile}},
\code{\link{debug_writeMemProfile}}, \code{\link{gethr}}
}
\concept{debug functions}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vapour_input_geometry.R
\name{vapour_read_geometry}
\alias{vapour_read_geometry}
\alias{vapour_read_geometry_text}
\alias{vapour_read_extent}
\alias{vapour_read_type}
\title{Read GDAL feature geometry}
\usage{
vapour_read_geometry(
dsource,
layer = 0L,
sql = "",
limit_n = NULL,
skip_n = 0,
extent = NA
)
vapour_read_geometry_text(
dsource,
layer = 0L,
sql = "",
textformat = "json",
limit_n = NULL,
skip_n = 0,
extent = NA
)
vapour_read_extent(
dsource,
layer = 0L,
sql = "",
limit_n = NULL,
skip_n = 0,
extent = NA
)
vapour_read_type(
dsource,
layer = 0L,
sql = "",
limit_n = NULL,
skip_n = 0,
extent = NA
)
}
\arguments{
\item{dsource}{data source name (path to file, connection string, URL)}
\item{layer}{integer of layer to work with, defaults to the first (0) or the name of the layer}
\item{sql}{if not empty this is executed against the data source (layer will be ignored)}
\item{limit_n}{an arbitrary limit to the number of features scanned}
\item{skip_n}{an arbitrary number of features to skip}
\item{extent}{apply an arbitrary extent, only when 'sql' used (must be 'ex = c(xmin, xmax, ymin, ymax)' but sp bbox, sf bbox, and raster extent also accepted)}
\item{textformat}{indicate text output format, available are "json" (default), "gml", "kml", "wkt"}
}
\description{
Read GDAL geometry as binary blob, text, or numeric extent.
}
\details{
\code{vapour_read_geometry} will read features as binary WKB, \code{vapour_read_geometry_text} as various text formats (geo-json, wkt, kml, gml),
\code{vapour_read_extent} a numeric extent which is the native bounding box, the four numbers (in this order) \verb{xmin, xmax, ymin, ymax}.
For each function an optional SQL string will be evaluated against the data source before reading.
\code{vapour_read_geometry_cpp} will read a feature for each of the ways listed above and is used by those functions. It's recommended
to use the more specialist functions rather than this more general one.
\code{vapour_read_type} will read the (wkb) type of the geometry as an integer. These are
\code{0} unknown, \code{1} Point, \code{2} LineString, \code{3} Polygon, \code{4} MultiPoint, \code{5} MultiLineString,
\code{6} MultiPolygon, \code{7} GeometryCollection, and the other more exotic types listed in "api/vector_c_api.html" from the
GDAL home page (as at October 2020).
Note that \code{limit_n} and \code{skip_n} interact with the affect of \code{sql}, first the query is executed on the data source, then
while looping through available features \code{skip_n} features are ignored, and then a feature-count begins and the loop
is stopped if \code{limit_n} is reached.
Note that \code{extent} applies to the 'SpatialFilter' of 'ExecuteSQL': https://gdal.org/user/ogr_sql_dialect.html#executesql.
}
\examples{
file <- "list_locality_postcode_meander_valley.tab"
## A MapInfo TAB file with polygons
mvfile <- system.file(file.path("extdata/tab", file), package="vapour")
## A shapefile with points
pfile <- system.file("extdata/point.shp", package = "vapour")
## raw binary WKB points in a list
ptgeom <- vapour_read_geometry(pfile)
## create a filter query to ensure data read is small
SQL <- "SELECT FID FROM list_locality_postcode_meander_valley WHERE FID < 3"
## polygons in raw binary (WKB)
plgeom <- vapour_read_geometry_text(mvfile, sql = SQL)
## polygons in raw text (GeoJSON)
txtjson <- vapour_read_geometry_text(mvfile, sql = SQL)
## polygon extents in a list xmin, xmax, ymin, ymax
exgeom <- vapour_read_extent(mvfile)
## points in raw text (GeoJSON)
txtpointjson <- vapour_read_geometry_text(pfile)
## points in raw text (WKT)
txtpointwkt <- vapour_read_geometry_text(pfile, textformat = "wkt")
}
| /man/vapour_read_geometry.Rd | no_license | jsta/vapour | R | false | true | 3,802 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vapour_input_geometry.R
\name{vapour_read_geometry}
\alias{vapour_read_geometry}
\alias{vapour_read_geometry_text}
\alias{vapour_read_extent}
\alias{vapour_read_type}
\title{Read GDAL feature geometry}
\usage{
vapour_read_geometry(
dsource,
layer = 0L,
sql = "",
limit_n = NULL,
skip_n = 0,
extent = NA
)
vapour_read_geometry_text(
dsource,
layer = 0L,
sql = "",
textformat = "json",
limit_n = NULL,
skip_n = 0,
extent = NA
)
vapour_read_extent(
dsource,
layer = 0L,
sql = "",
limit_n = NULL,
skip_n = 0,
extent = NA
)
vapour_read_type(
dsource,
layer = 0L,
sql = "",
limit_n = NULL,
skip_n = 0,
extent = NA
)
}
\arguments{
\item{dsource}{data source name (path to file, connection string, URL)}
\item{layer}{integer of layer to work with, defaults to the first (0) or the name of the layer}
\item{sql}{if not empty this is executed against the data source (layer will be ignored)}
\item{limit_n}{an arbitrary limit to the number of features scanned}
\item{skip_n}{an arbitrary number of features to skip}
\item{extent}{apply an arbitrary extent, only when 'sql' used (must be 'ex = c(xmin, xmax, ymin, ymax)' but sp bbox, sf bbox, and raster extent also accepted)}
\item{textformat}{indicate text output format, available are "json" (default), "gml", "kml", "wkt"}
}
\description{
Read GDAL geometry as binary blob, text, or numeric extent.
}
\details{
\code{vapour_read_geometry} will read features as binary WKB, \code{vapour_read_geometry_text} as various text formats (geo-json, wkt, kml, gml),
\code{vapour_read_extent} a numeric extent which is the native bounding box, the four numbers (in this order) \verb{xmin, xmax, ymin, ymax}.
For each function an optional SQL string will be evaluated against the data source before reading.
\code{vapour_read_geometry_cpp} will read a feature for each of the ways listed above and is used by those functions. It's recommended
to use the more specialist functions rather than this more general one.
\code{vapour_read_type} will read the (wkb) type of the geometry as an integer. These are
\code{0} unknown, \code{1} Point, \code{2} LineString, \code{3} Polygon, \code{4} MultiPoint, \code{5} MultiLineString,
\code{6} MultiPolygon, \code{7} GeometryCollection, and the other more exotic types listed in "api/vector_c_api.html" from the
GDAL home page (as at October 2020).
Note that \code{limit_n} and \code{skip_n} interact with the affect of \code{sql}, first the query is executed on the data source, then
while looping through available features \code{skip_n} features are ignored, and then a feature-count begins and the loop
is stopped if \code{limit_n} is reached.
Note that \code{extent} applies to the 'SpatialFilter' of 'ExecuteSQL': https://gdal.org/user/ogr_sql_dialect.html#executesql.
}
\examples{
file <- "list_locality_postcode_meander_valley.tab"
## A MapInfo TAB file with polygons
mvfile <- system.file(file.path("extdata/tab", file), package="vapour")
## A shapefile with points
pfile <- system.file("extdata/point.shp", package = "vapour")
## raw binary WKB points in a list
ptgeom <- vapour_read_geometry(pfile)
## create a filter query to ensure data read is small
SQL <- "SELECT FID FROM list_locality_postcode_meander_valley WHERE FID < 3"
## polygons in raw binary (WKB)
plgeom <- vapour_read_geometry_text(mvfile, sql = SQL)
## polygons in raw text (GeoJSON)
txtjson <- vapour_read_geometry_text(mvfile, sql = SQL)
## polygon extents in a list xmin, xmax, ymin, ymax
exgeom <- vapour_read_extent(mvfile)
## points in raw text (GeoJSON)
txtpointjson <- vapour_read_geometry_text(pfile)
## points in raw text (WKT)
txtpointwkt <- vapour_read_geometry_text(pfile, textformat = "wkt")
}
|
## Put comments here that give an overall description of what your
## functions do
## The overall objective is to compute the inverse of a matrix and
## store ot locally to save computation time if it needs to be accessed repeatedly
##Assumption in this case is the matrix is always invertible
## Concept is there are 2 divisions existing, get and set, using get we would be able to
## retrieve the stored value and set would be used to set the inverse value to the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set,get = get,setinv = setinv,getinv = getinv)
}
## Write a short comment describing this function
## Function to check if the inverse has already been computed, if yes, then it directly can return the value,
## else it calls from set. Here the function solve is used to get the inverse of the matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if (!is.null(inv)) {
message("get cached data")
return(inv)
}
val <- x$get()
inv <- solve(val, ...)
x$setinv(inv)
inv
}
| /cachematrix.R | no_license | harshithbiotech/ProgrammingAssignment2 | R | false | false | 1,254 | r | ## Put comments here that give an overall description of what your
## functions do
## The overall objective is to compute the inverse of a matrix and
## store ot locally to save computation time if it needs to be accessed repeatedly
##Assumption in this case is the matrix is always invertible
## Concept is there are 2 divisions existing, get and set, using get we would be able to
## retrieve the stored value and set would be used to set the inverse value to the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set,get = get,setinv = setinv,getinv = getinv)
}
## Write a short comment describing this function
## Function to check if the inverse has already been computed, if yes, then it directly can return the value,
## else it calls from set. Here the function solve is used to get the inverse of the matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if (!is.null(inv)) {
message("get cached data")
return(inv)
}
val <- x$get()
inv <- solve(val, ...)
x$setinv(inv)
inv
}
|
ui <- fixedPage(theme = shinythemes::shinytheme("lumen"), # paper lumen cosmo
tags$head(includeCSS(paste0("./www/styles.css"))),
div(headerPanel(app_title), style = 'width:1560px;'),
div(tabsetPanel(
# ================ #
tabPanel("Welcome!", fluid = TRUE,
mainPanel(class = "welcome",
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, tags$br()),
column(12, align = "center",
uiOutput("plot.uiDatFeatPlotH1"), tags$br()),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, align = "center",
tags$b("Select Analysis"),
column(12, tags$br()),
pickerInput("Analysis", label = "",
choices = list(Combined = names(file_list)),
selected = "all she-pos. cells", width = "50%")
),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, tags$hr()),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(10, align = "center", offset = 1,
column(12, align = "left", tags$b("Instructions")),
column(12, align = "left",
'All genes available for plotting can be downloaded in the
Excel spreadsheet below labeled "genes in dataset", using either
Ensembl IDs or common names from the',
tags$b("Gene.name.unique"),'column as input. You cannot, however,
mix common names with Ensembl IDs in the same query. Groups of
genes can be directly copied/pasted from the spreadsheet into
the app input field and will have the necessary spacing
by default. Please note that this data set was produced with the
Ensembl 91 gene annotation in zebrafish (genome version 10).
We therefore recommend using Ensembl gene IDs as input,
as common gene names can change with annotation updates.',
'Cluster markers and related figures can be downloaded in',
tags$a(href = "http://bioinfo/n/projects/ddiaz/Analysis/Scripts/sb2191-regen/regen-summary/site/IntegratedData/",
tags$b("this notebook")),
'. All genes used for this dataset can be downloaded below:'),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, align = "center", offset = 0,
downloadButton("downloadDatasetGenes", "Genes in Data Set",
style = "padding:8px; font-size:80%")),
fluidRow(tags$br()),
fluidRow(tags$br()),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, align = "left", tags$b("An important note on ambiguous gene names")),
column(12, align = "left",
'Gene expression in this data set is quantfied by the number of
deduplicated UMIs (unique molecular index) that map to Ensembl
gene IDs, which are identify unique coding sequences (CDS) in the
zebrafish genome. In some cases different Ensembl IDs
will have the same common gene name. This may occur
when there is no consensus over which CDS represents
the common name, or because the product of a particular
CDS has not been characterized in detail. These repeated
common names are denoted by an asterisk followed by an
integer value (e.g. sox4a*1). The asterisk is not a part of the
common name by default; it was added to signify that the name
is repeated in this data set and needs further clarification.
The integer after the asterisk highlights which occurrence
of the repeat you are viewing, but does not carry any
functional significance. For example, sox4a has two
different Ensembl IDs in version 91 of the annotation,
ENSDARG00000096389 - sox4, and ENSDARG00000004588 - sox4a*1, but
only ENSDARG00000004588 - sox4a*1 is expressed in this data set.',
fluidRow(tags$br()),
fluidRow(tags$br()),
'The most important item to consider when referencing the
nucleotide sequence of a gene is',
tags$b("which Ensembl ID corresponds to
your expression pattern of interest."),
'This ensures that you are targeting the same CDS that reads
are mapping to for this particular data set. You can easily
check if a gene name is ambiguous by copying any portion
of the common name into the Gene Database section of the app
(sox4a is shown as an example by default). Details on
how Ensembl IDs are curated can be found at the folowing link:',
tags$a(
href = "http://www.ensembl.org/info/genome/genebuild/genome_annotation.html",
"http://www.ensembl.org/info/genome/genebuild/genome_annotation.html"),
'. Additionally, there are two spreadsheets below with all
of the repeated common names in both this data set, and the
Ensembl 91 zebrafish annotation.')),
fluidRow(tags$br()),
fluidRow(tags$br())
)
),
# ================ #
tabPanel("Gene Database", fluid = TRUE,
sidebarLayout(
sidebarPanel(
textInput("dbGenes", "Insert gene name or ensembl ID:",
value = "gadd45gb.1 slc1a3a znf185 si:ch73-261i21.5"),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDatFeatPlotV6"), align = "center"),
fluidRow(tags$br())
),
mainPanel(fluidRow(
column(11, tags$br()),
uiOutput("GeneDB")
)
)
)
),
# ================ #
tabPanel("Feature Plots", fluid = FALSE,
sidebarLayout(fluid = TRUE,
sidebarPanel(width = 4,
column(12, align = "left",
textInput("featureGenes", "Insert gene name or ensembl ID:",
value = smpl_genes_sm)),
column(12, align = "center",
actionButton("runFeatPlot", "Generate Plots",
style = 'padding:5px; font-size:80%')),
column(12, tags$hr(width = "50%"), align = "center"),
column(12, align = "center",
downloadButton("downloadFeaturePlotF", "Download png",
style = 'padding:5px; font-size:80%')),
column(12, tags$br()),
column(12, align = "center", uiOutput("cellSelectFeat")),
column(12, tags$br()),
column(12,
column(6, textInput("CellBackCol",
"Cell background color:", value = "azure3")),
column(6, textInput("CellForeCol",
"Cell foreground color:", value = "blue3"))
),
column(12, tags$br()),
column(12, align = "center",
column(6, align = "left",
numericInput("featDPI", "Download quality (DPI):",
value = 200, min = 50, step = 25, max = 400, width = "100%")),
column(6, align = "left",
numericInput("ptSizeFeature", "Input cell size:", value = 0.50,
min = 0.25, step = 0.25, max = 2.00, width = "100%"))
),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDatFeatPlotV1"), align = "center"),
fluidRow(tags$br()),
fluidRow(tags$br())
),
mainPanel(
fluidRow(
column(8, tags$br()),
column(8, tags$b("Mismatches or genes not present"),
"(if applicable)", tags$b(":")),
column(8,uiOutput("notInFeat")),
column(8, tags$hr()),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiFeaturePlotF")
)
)
)
)
),
# ================ #
tabPanel("Violin Plots", #fluid = FALSE,
sidebarLayout(fluid = TRUE,
sidebarPanel(fluid = FALSE, width = 4,
column(12, textInput("vlnGenes", width = "100%",
"Insert gene name or ensembl ID:",
value = smpl_genes_sm)),
column(12, align = "center",
actionButton("runVlnPlot", "Generate Plots",
style = 'padding:5px; font-size:80%')),
column(12, tags$hr(width = "50%"), align = "center"),
column(12, align = "center", downloadButton(
"downloadVlnPlot", "Download pdf",
style = 'padding:5px; font-size:80%')),
column(12, tags$br()),
column(12, align = "center", uiOutput("cellSelectVln")), # New
column(12, tags$br()),
column(12, align = "center",
column(6,
radioGroupButtons("selectGrpVln",
"Group cells by:", choices = list(Time = "data.set",
Cluster = "cell.type.ident"), width = "100%")),
column(6,
numericInput("ptSizeVln", "Input cell size:", value = 0.25,
min = 0.00, step = 0.75, max = 2.00, width = "80%"))
),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDatFeatPlotV2"), align = "center"),
fluidRow(tags$br()),
fluidRow(tags$br())
),
mainPanel(
fluidRow(
column(8, tags$br()),
column(8, tags$b("Gene mismatches"), "(if present)", tags$b(":")),
column(8,uiOutput("notInVln")),
column(8, tags$hr()),
# column(8, tags$b(uiOutput("SelectedDataVln"))),
column(12, uiOutput("plot.uiVlnPlotF")
)
)
)
)
),
# ================ #
# tabPanel("Ridge Plots", #fluid = FALSE,
# sidebarLayout(fluid = TRUE,
# sidebarPanel(fluid = FALSE, width = 4,
# column(12, textInput("rdgGenes", width = "100%",
# "Insert gene name or ensembl ID:",
# value = smpl_genes_sm)),
# column(12, align = "center",
# actionButton("runRdgPlot", "Generate Plots",
# style = 'padding:5px; font-size:80%')),
# column(12, tags$hr(width = "50%"), align = "center"),
# column(12, align = "center", downloadButton(
# "downloadRdgPlot", "Download pdf",
# style = 'padding:5px; font-size:80%')),
# column(12, tags$br()),
# column(12, align = "center", uiOutput("cellSelectRdg")), # New
# column(12, tags$br()),
# column(12, align = "center",
# column(6,
# radioGroupButtons("selectGrpRdg",
# "Group cells by:", choices = list(Time = "data.set",
# Cluster = "cell.type.ident"), width = "100%")),
# column(6,
# numericInput("ptSizeRdg", "Input cell size:", value = 0.25,
# min = 0.00, step = 0.75, max = 2.00, width = "80%"))
# ),
# fluidRow(tags$br()),
# fluidRow(tags$br()),
# column(12, uiOutput("plot.uiDatFeatPlotV3"), align = "center"),
# fluidRow(tags$br()),
# fluidRow(tags$br())
# ),
# mainPanel(
# fluidRow(
# column(8, tags$br()),
# column(8, tags$b("Gene mismatches"), "(if present)", tags$b(":")),
# column(8,uiOutput("notInRdg")),
# column(8, tags$hr()),
# # column(8, tags$b(uiOutput("SelectedDataRdg"))),
# column(12, uiOutput("plot.uiRdgPlotF")
# )
# )
# )
# )
# ),
# ================ #
tabPanel("Dot Plot", #fluid = FALSE,
sidebarLayout(fluid = TRUE,
sidebarPanel(fluid = FALSE, width = 4,
column(12, align = "left ",
textInput("dotGenes",
"Insert gene name or ensembl ID:",
value = smpl_genes_lg),
checkboxInput("dPlotClust",
label = "Check box to enable row clustering.", value = FALSE)),
column(12, align = "center",
actionButton("runDotPlot", "Generate Plots",
style = 'padding:5px; font-size:80%')),
column(12, tags$hr(width = "50%"), align = "center"),
column(12, align = "center", downloadButton(
"downloadDotPlot", "Download pdf",
style = 'padding:5px; font-size:80%')),
column(12, tags$br()),
column(12, align = "center", uiOutput("cellSelectDot")), # New
column(12, tags$br()),
column(12, align = "center",
column(6,
radioGroupButtons("selectGrpDot",
"Group cells by:", choices = list(
Combined = "cell.type.ident.by.data.set",
Time = "data.set",Cluster = "cell.type.ident"), width = "100%",size = "xs")),
column(6,
numericInput("dotScale", "Dot diameter:", value = 10, min = 4,
step = 1, max = 20, width = "60%"), align = "center")
),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDatFeatPlotV4"), align = "center"),
fluidRow(tags$br()),
fluidRow(tags$br())
),
mainPanel(
fluidRow(
column(8, tags$br()),
column(8, tags$b("Mismatches or genes not present"),
"(if applicable)", tags$b(":")),
column(8, uiOutput("notInDot")),
column(8, tags$hr()),
column(8, align = "left",
# column(4, align = "center", "Manual figure adjustment:",
# column(11, style = "padding-top: 8px;",
# switchInput("manAdjustDot", value = FALSE))),
column(3, align = "left", numericInput(
"manAdjustDotW", label = "Width (pixels):", value = 2400, step = 50,
width = "100%")),
column(3, align = "left", numericInput(
"manAdjustDotH", label = "Height (pixels):", value = 900, step = 50,
width = "100%"))
),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDotPlotF"))
)
)
)
),
# ================ # ggplot groupedheatmap
tabPanel("Heat Map", #fluid = FALSE,
sidebarLayout(fluid = TRUE,
sidebarPanel(fluid = FALSE, width = 4,
column(12, align = "left ",
textInput("PhmapGenes",
"Insert gene name or ensembl ID:",
value = smpl_genes_lg),
checkboxInput("pHmapClust",
label = "Check box to enable row clustering.", value = FALSE)),
column(12, align = "center",
actionButton("runPhmap", "Generate Plots",
style = 'padding:5px; font-size:80%')),
column(12, tags$hr(width = "50%"), align = "center"),
column(12, align = "center", downloadButton(
"downloadhmap", "Download pdf",
style = 'padding:5px; font-size:80%')),
column(12, tags$br()),
column(12, align = "center", uiOutput("cellSelectHmap")), # New
column(12, tags$br()),
column(12, align = "center",
column(12,
radioGroupButtons("selectGrpHmap",
"Group cells by:",
choices = list(Combined = "cell.type.ident.by.data.set",
Time = "data.set",
Cluster = "cell.type.ident"),
width = "100%"))
),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDatFeatPlotV7"), align = "center"),
fluidRow(tags$br()),
fluidRow(tags$br())
),
mainPanel(
fluidRow(
column(8, tags$br()),
column(8, tags$b("Mismatches or genes not present"),
"(if applicable)", tags$b(":")),
column(8, uiOutput("notInPhmap")),
column(8, tags$hr()),
column(8, align = "left",
column(3, align = "left", numericInput(
"manAdjustHmapW", label = "Width (pixels):", value = 2400, step = 50,
width = "100%")),
column(3, align = "left", numericInput(
"manAdjustHmapH", label = "Height (pixels):", value = 900, step = 50,
width = "100%"))
),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiPheatmapF"))
)
)
)
),
#================ # ggplot Indv. Cell heatmap
tabPanel("Indv. Cell Heatmap", #fluid = FALSE,
sidebarLayout(fluid = TRUE,
sidebarPanel(fluid = FALSE, width = 4,
column(12, align = "left ",
textInput("IndvPhmapGenes",
"Insert gene name or ensembl ID:",
value = smpl_genes_lg),
checkboxInput("IndvpHmapClust",
label = "Check box to enable row clustering.", value = FALSE)),
column(12, align = "center",
actionButton("runIndvPhmap", "Generate Plots",
style = 'padding:5px; font-size:80%')),
column(12, tags$hr(width = "50%"), align = "center"),
column(12, align = "center", downloadButton(
"downloadIndvhmap", "Download pdf",
style = 'padding:5px; font-size:80%')),
column(12, tags$br()),
column(12, align = "center", uiOutput("cellSelectIndvHmap")), # New
column(12, tags$br()),
column(12, align = "center", uiOutput("SelectDownSamplePropIndvHmap")), #downsample drop down
column(12, tags$br()),
column(12, align = "center",
column(12,
radioGroupButtons("selectGrpIndvHmap",
"Group cells by:",
choices = list(
Cluster = "cell.type.ident",
Time = "data.set",
Combined = "cell.type.ident.by.data.set"),
width = "100%"))
),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDatFeatPlotV8"), align = "center"),
fluidRow(tags$br()),
fluidRow(tags$br())
),
mainPanel(
fluidRow(
column(8, tags$br()),
column(8, tags$b("Mismatches or genes not present"),
"(if applicable)", tags$b(":")),
column(8, uiOutput("notInIndvPhmap")),
column(8, tags$hr()),
column(8, align = "left",
column(3, align = "left", numericInput(
"manAdjustIndvHmapW", label = "Width (pixels):", value = 2400, step = 50,
width = "100%")),
column(3, align = "left", numericInput(
"manAdjustIndvHmapH", label = "Height (pixels):", value = 900, step = 50,
width = "100%"))
),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiIndvpHeatmapF"),style = "overflow-y: scroll;overflow-x: scroll;")
)
)
)
),
# ================ #
tabPanel("Differential Expression", fluid = TRUE,
sidebarLayout(
sidebarPanel(
uiOutput("idents"),
column(12, align = "center",
uiOutput("diffOut1"),
fluidRow(tags$br()),
uiOutput("diffOut2")),
column(12, tags$br()),
column(12, align = "center", uiOutput("cellSelectDiff")),
column(12, tags$hr(width = "50%"), align = "center"),
fluidRow(tags$br()),
column(12, align = "center",
pickerInput("statSelectDiff", label = "Select statistical test:",
multiple = FALSE, selected = "wilcox", width = "210px",
choices = list(wilcox = "wilcox", bimodal = "bimod", ROC = "roc",
t = "t", negbinom = "negbinom", poisson = "poisson", LR = "LR",
MAST = "MAST", DESeq2 = "DESeq2"))),
fluidRow(tags$br()),
column(12, align = "center", numericInput("pValCutoff",
"Input adjusted p-value cutoff:", value = 0.05, min = 0.00,
step = 0.001, max = 1.00, width = "210px")),
fluidRow(tags$br()),
column(12, align = "center",
actionButton("runDiffExp", "Run Differential Expression",
style = "padding:5px; font-size:80%")),
column(12, tags$hr(width = "50%"), align = "center"),
column(12, align = "center",
downloadButton("downloadDiffExp",
"Download Results",
style = 'padding:5px; font-size:80%')),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDatFeatPlotV5"), align = "center"),
fluidRow(tags$br()),
fluidRow(tags$br())
),
mainPanel(
fluidRow(
column(8, tags$br()),
# column(8, tags$b(uiOutput("SelectedDataDiff "))),
column(12, align = "left", class = "diffExpMain",
uiOutput("diffTable"),
column(12, align = "center",
fluidRow(tags$br()),
tags$b('Click "Run Differential Expression"'))
)
)
)
)
)
), style = 'width:1500px;')#,
# shinyDebuggingPanel::withDebuggingPanel()
)
| /regeneration_additional_timepoints/app_ui.R | no_license | diazdc/shiny-apps-main | R | false | false | 19,081 | r | ui <- fixedPage(theme = shinythemes::shinytheme("lumen"), # paper lumen cosmo
tags$head(includeCSS(paste0("./www/styles.css"))),
div(headerPanel(app_title), style = 'width:1560px;'),
div(tabsetPanel(
# ================ #
tabPanel("Welcome!", fluid = TRUE,
mainPanel(class = "welcome",
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, tags$br()),
column(12, align = "center",
uiOutput("plot.uiDatFeatPlotH1"), tags$br()),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, align = "center",
tags$b("Select Analysis"),
column(12, tags$br()),
pickerInput("Analysis", label = "",
choices = list(Combined = names(file_list)),
selected = "all she-pos. cells", width = "50%")
),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, tags$hr()),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(10, align = "center", offset = 1,
column(12, align = "left", tags$b("Instructions")),
column(12, align = "left",
'All genes available for plotting can be downloaded in the
Excel spreadsheet below labeled "genes in dataset", using either
Ensembl IDs or common names from the',
tags$b("Gene.name.unique"),'column as input. You cannot, however,
mix common names with Ensembl IDs in the same query. Groups of
genes can be directly copied/pasted from the spreadsheet into
the app input field and will have the necessary spacing
by default. Please note that this data set was produced with the
Ensembl 91 gene annotation in zebrafish (genome version 10).
We therefore recommend using Ensembl gene IDs as input,
as common gene names can change with annotation updates.',
'Cluster markers and related figures can be downloaded in',
tags$a(href = "http://bioinfo/n/projects/ddiaz/Analysis/Scripts/sb2191-regen/regen-summary/site/IntegratedData/",
tags$b("this notebook")),
'. All genes used for this dataset can be downloaded below:'),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, align = "center", offset = 0,
downloadButton("downloadDatasetGenes", "Genes in Data Set",
style = "padding:8px; font-size:80%")),
fluidRow(tags$br()),
fluidRow(tags$br()),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, align = "left", tags$b("An important note on ambiguous gene names")),
column(12, align = "left",
'Gene expression in this data set is quantfied by the number of
deduplicated UMIs (unique molecular index) that map to Ensembl
gene IDs, which are identify unique coding sequences (CDS) in the
zebrafish genome. In some cases different Ensembl IDs
will have the same common gene name. This may occur
when there is no consensus over which CDS represents
the common name, or because the product of a particular
CDS has not been characterized in detail. These repeated
common names are denoted by an asterisk followed by an
integer value (e.g. sox4a*1). The asterisk is not a part of the
common name by default; it was added to signify that the name
is repeated in this data set and needs further clarification.
The integer after the asterisk highlights which occurrence
of the repeat you are viewing, but does not carry any
functional significance. For example, sox4a has two
different Ensembl IDs in version 91 of the annotation,
ENSDARG00000096389 - sox4, and ENSDARG00000004588 - sox4a*1, but
only ENSDARG00000004588 - sox4a*1 is expressed in this data set.',
fluidRow(tags$br()),
fluidRow(tags$br()),
'The most important item to consider when referencing the
nucleotide sequence of a gene is',
tags$b("which Ensembl ID corresponds to
your expression pattern of interest."),
'This ensures that you are targeting the same CDS that reads
are mapping to for this particular data set. You can easily
check if a gene name is ambiguous by copying any portion
of the common name into the Gene Database section of the app
(sox4a is shown as an example by default). Details on
how Ensembl IDs are curated can be found at the folowing link:',
tags$a(
href = "http://www.ensembl.org/info/genome/genebuild/genome_annotation.html",
"http://www.ensembl.org/info/genome/genebuild/genome_annotation.html"),
'. Additionally, there are two spreadsheets below with all
of the repeated common names in both this data set, and the
Ensembl 91 zebrafish annotation.')),
fluidRow(tags$br()),
fluidRow(tags$br())
)
),
# ================ #
tabPanel("Gene Database", fluid = TRUE,
sidebarLayout(
sidebarPanel(
textInput("dbGenes", "Insert gene name or ensembl ID:",
value = "gadd45gb.1 slc1a3a znf185 si:ch73-261i21.5"),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDatFeatPlotV6"), align = "center"),
fluidRow(tags$br())
),
mainPanel(fluidRow(
column(11, tags$br()),
uiOutput("GeneDB")
)
)
)
),
# ================ #
tabPanel("Feature Plots", fluid = FALSE,
sidebarLayout(fluid = TRUE,
sidebarPanel(width = 4,
column(12, align = "left",
textInput("featureGenes", "Insert gene name or ensembl ID:",
value = smpl_genes_sm)),
column(12, align = "center",
actionButton("runFeatPlot", "Generate Plots",
style = 'padding:5px; font-size:80%')),
column(12, tags$hr(width = "50%"), align = "center"),
column(12, align = "center",
downloadButton("downloadFeaturePlotF", "Download png",
style = 'padding:5px; font-size:80%')),
column(12, tags$br()),
column(12, align = "center", uiOutput("cellSelectFeat")),
column(12, tags$br()),
column(12,
column(6, textInput("CellBackCol",
"Cell background color:", value = "azure3")),
column(6, textInput("CellForeCol",
"Cell foreground color:", value = "blue3"))
),
column(12, tags$br()),
column(12, align = "center",
column(6, align = "left",
numericInput("featDPI", "Download quality (DPI):",
value = 200, min = 50, step = 25, max = 400, width = "100%")),
column(6, align = "left",
numericInput("ptSizeFeature", "Input cell size:", value = 0.50,
min = 0.25, step = 0.25, max = 2.00, width = "100%"))
),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDatFeatPlotV1"), align = "center"),
fluidRow(tags$br()),
fluidRow(tags$br())
),
mainPanel(
fluidRow(
column(8, tags$br()),
column(8, tags$b("Mismatches or genes not present"),
"(if applicable)", tags$b(":")),
column(8,uiOutput("notInFeat")),
column(8, tags$hr()),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiFeaturePlotF")
)
)
)
)
),
# ================ #
tabPanel("Violin Plots", #fluid = FALSE,
sidebarLayout(fluid = TRUE,
sidebarPanel(fluid = FALSE, width = 4,
column(12, textInput("vlnGenes", width = "100%",
"Insert gene name or ensembl ID:",
value = smpl_genes_sm)),
column(12, align = "center",
actionButton("runVlnPlot", "Generate Plots",
style = 'padding:5px; font-size:80%')),
column(12, tags$hr(width = "50%"), align = "center"),
column(12, align = "center", downloadButton(
"downloadVlnPlot", "Download pdf",
style = 'padding:5px; font-size:80%')),
column(12, tags$br()),
column(12, align = "center", uiOutput("cellSelectVln")), # New
column(12, tags$br()),
column(12, align = "center",
column(6,
radioGroupButtons("selectGrpVln",
"Group cells by:", choices = list(Time = "data.set",
Cluster = "cell.type.ident"), width = "100%")),
column(6,
numericInput("ptSizeVln", "Input cell size:", value = 0.25,
min = 0.00, step = 0.75, max = 2.00, width = "80%"))
),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDatFeatPlotV2"), align = "center"),
fluidRow(tags$br()),
fluidRow(tags$br())
),
mainPanel(
fluidRow(
column(8, tags$br()),
column(8, tags$b("Gene mismatches"), "(if present)", tags$b(":")),
column(8,uiOutput("notInVln")),
column(8, tags$hr()),
# column(8, tags$b(uiOutput("SelectedDataVln"))),
column(12, uiOutput("plot.uiVlnPlotF")
)
)
)
)
),
# ================ #
# tabPanel("Ridge Plots", #fluid = FALSE,
# sidebarLayout(fluid = TRUE,
# sidebarPanel(fluid = FALSE, width = 4,
# column(12, textInput("rdgGenes", width = "100%",
# "Insert gene name or ensembl ID:",
# value = smpl_genes_sm)),
# column(12, align = "center",
# actionButton("runRdgPlot", "Generate Plots",
# style = 'padding:5px; font-size:80%')),
# column(12, tags$hr(width = "50%"), align = "center"),
# column(12, align = "center", downloadButton(
# "downloadRdgPlot", "Download pdf",
# style = 'padding:5px; font-size:80%')),
# column(12, tags$br()),
# column(12, align = "center", uiOutput("cellSelectRdg")), # New
# column(12, tags$br()),
# column(12, align = "center",
# column(6,
# radioGroupButtons("selectGrpRdg",
# "Group cells by:", choices = list(Time = "data.set",
# Cluster = "cell.type.ident"), width = "100%")),
# column(6,
# numericInput("ptSizeRdg", "Input cell size:", value = 0.25,
# min = 0.00, step = 0.75, max = 2.00, width = "80%"))
# ),
# fluidRow(tags$br()),
# fluidRow(tags$br()),
# column(12, uiOutput("plot.uiDatFeatPlotV3"), align = "center"),
# fluidRow(tags$br()),
# fluidRow(tags$br())
# ),
# mainPanel(
# fluidRow(
# column(8, tags$br()),
# column(8, tags$b("Gene mismatches"), "(if present)", tags$b(":")),
# column(8,uiOutput("notInRdg")),
# column(8, tags$hr()),
# # column(8, tags$b(uiOutput("SelectedDataRdg"))),
# column(12, uiOutput("plot.uiRdgPlotF")
# )
# )
# )
# )
# ),
# ================ #
tabPanel("Dot Plot", #fluid = FALSE,
sidebarLayout(fluid = TRUE,
sidebarPanel(fluid = FALSE, width = 4,
column(12, align = "left ",
textInput("dotGenes",
"Insert gene name or ensembl ID:",
value = smpl_genes_lg),
checkboxInput("dPlotClust",
label = "Check box to enable row clustering.", value = FALSE)),
column(12, align = "center",
actionButton("runDotPlot", "Generate Plots",
style = 'padding:5px; font-size:80%')),
column(12, tags$hr(width = "50%"), align = "center"),
column(12, align = "center", downloadButton(
"downloadDotPlot", "Download pdf",
style = 'padding:5px; font-size:80%')),
column(12, tags$br()),
column(12, align = "center", uiOutput("cellSelectDot")), # New
column(12, tags$br()),
column(12, align = "center",
column(6,
radioGroupButtons("selectGrpDot",
"Group cells by:", choices = list(
Combined = "cell.type.ident.by.data.set",
Time = "data.set",Cluster = "cell.type.ident"), width = "100%",size = "xs")),
column(6,
numericInput("dotScale", "Dot diameter:", value = 10, min = 4,
step = 1, max = 20, width = "60%"), align = "center")
),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDatFeatPlotV4"), align = "center"),
fluidRow(tags$br()),
fluidRow(tags$br())
),
mainPanel(
fluidRow(
column(8, tags$br()),
column(8, tags$b("Mismatches or genes not present"),
"(if applicable)", tags$b(":")),
column(8, uiOutput("notInDot")),
column(8, tags$hr()),
column(8, align = "left",
# column(4, align = "center", "Manual figure adjustment:",
# column(11, style = "padding-top: 8px;",
# switchInput("manAdjustDot", value = FALSE))),
column(3, align = "left", numericInput(
"manAdjustDotW", label = "Width (pixels):", value = 2400, step = 50,
width = "100%")),
column(3, align = "left", numericInput(
"manAdjustDotH", label = "Height (pixels):", value = 900, step = 50,
width = "100%"))
),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDotPlotF"))
)
)
)
),
# ================ # ggplot groupedheatmap
tabPanel("Heat Map", #fluid = FALSE,
sidebarLayout(fluid = TRUE,
sidebarPanel(fluid = FALSE, width = 4,
column(12, align = "left ",
textInput("PhmapGenes",
"Insert gene name or ensembl ID:",
value = smpl_genes_lg),
checkboxInput("pHmapClust",
label = "Check box to enable row clustering.", value = FALSE)),
column(12, align = "center",
actionButton("runPhmap", "Generate Plots",
style = 'padding:5px; font-size:80%')),
column(12, tags$hr(width = "50%"), align = "center"),
column(12, align = "center", downloadButton(
"downloadhmap", "Download pdf",
style = 'padding:5px; font-size:80%')),
column(12, tags$br()),
column(12, align = "center", uiOutput("cellSelectHmap")), # New
column(12, tags$br()),
column(12, align = "center",
column(12,
radioGroupButtons("selectGrpHmap",
"Group cells by:",
choices = list(Combined = "cell.type.ident.by.data.set",
Time = "data.set",
Cluster = "cell.type.ident"),
width = "100%"))
),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDatFeatPlotV7"), align = "center"),
fluidRow(tags$br()),
fluidRow(tags$br())
),
mainPanel(
fluidRow(
column(8, tags$br()),
column(8, tags$b("Mismatches or genes not present"),
"(if applicable)", tags$b(":")),
column(8, uiOutput("notInPhmap")),
column(8, tags$hr()),
column(8, align = "left",
column(3, align = "left", numericInput(
"manAdjustHmapW", label = "Width (pixels):", value = 2400, step = 50,
width = "100%")),
column(3, align = "left", numericInput(
"manAdjustHmapH", label = "Height (pixels):", value = 900, step = 50,
width = "100%"))
),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiPheatmapF"))
)
)
)
),
#================ # ggplot Indv. Cell heatmap
tabPanel("Indv. Cell Heatmap", #fluid = FALSE,
sidebarLayout(fluid = TRUE,
sidebarPanel(fluid = FALSE, width = 4,
column(12, align = "left ",
textInput("IndvPhmapGenes",
"Insert gene name or ensembl ID:",
value = smpl_genes_lg),
checkboxInput("IndvpHmapClust",
label = "Check box to enable row clustering.", value = FALSE)),
column(12, align = "center",
actionButton("runIndvPhmap", "Generate Plots",
style = 'padding:5px; font-size:80%')),
column(12, tags$hr(width = "50%"), align = "center"),
column(12, align = "center", downloadButton(
"downloadIndvhmap", "Download pdf",
style = 'padding:5px; font-size:80%')),
column(12, tags$br()),
column(12, align = "center", uiOutput("cellSelectIndvHmap")), # New
column(12, tags$br()),
column(12, align = "center", uiOutput("SelectDownSamplePropIndvHmap")), #downsample drop down
column(12, tags$br()),
column(12, align = "center",
column(12,
radioGroupButtons("selectGrpIndvHmap",
"Group cells by:",
choices = list(
Cluster = "cell.type.ident",
Time = "data.set",
Combined = "cell.type.ident.by.data.set"),
width = "100%"))
),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDatFeatPlotV8"), align = "center"),
fluidRow(tags$br()),
fluidRow(tags$br())
),
mainPanel(
fluidRow(
column(8, tags$br()),
column(8, tags$b("Mismatches or genes not present"),
"(if applicable)", tags$b(":")),
column(8, uiOutput("notInIndvPhmap")),
column(8, tags$hr()),
column(8, align = "left",
column(3, align = "left", numericInput(
"manAdjustIndvHmapW", label = "Width (pixels):", value = 2400, step = 50,
width = "100%")),
column(3, align = "left", numericInput(
"manAdjustIndvHmapH", label = "Height (pixels):", value = 900, step = 50,
width = "100%"))
),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiIndvpHeatmapF"),style = "overflow-y: scroll;overflow-x: scroll;")
)
)
)
),
# ================ #
tabPanel("Differential Expression", fluid = TRUE,
sidebarLayout(
sidebarPanel(
uiOutput("idents"),
column(12, align = "center",
uiOutput("diffOut1"),
fluidRow(tags$br()),
uiOutput("diffOut2")),
column(12, tags$br()),
column(12, align = "center", uiOutput("cellSelectDiff")),
column(12, tags$hr(width = "50%"), align = "center"),
fluidRow(tags$br()),
column(12, align = "center",
pickerInput("statSelectDiff", label = "Select statistical test:",
multiple = FALSE, selected = "wilcox", width = "210px",
choices = list(wilcox = "wilcox", bimodal = "bimod", ROC = "roc",
t = "t", negbinom = "negbinom", poisson = "poisson", LR = "LR",
MAST = "MAST", DESeq2 = "DESeq2"))),
fluidRow(tags$br()),
column(12, align = "center", numericInput("pValCutoff",
"Input adjusted p-value cutoff:", value = 0.05, min = 0.00,
step = 0.001, max = 1.00, width = "210px")),
fluidRow(tags$br()),
column(12, align = "center",
actionButton("runDiffExp", "Run Differential Expression",
style = "padding:5px; font-size:80%")),
column(12, tags$hr(width = "50%"), align = "center"),
column(12, align = "center",
downloadButton("downloadDiffExp",
"Download Results",
style = 'padding:5px; font-size:80%')),
fluidRow(tags$br()),
fluidRow(tags$br()),
column(12, uiOutput("plot.uiDatFeatPlotV5"), align = "center"),
fluidRow(tags$br()),
fluidRow(tags$br())
),
mainPanel(
fluidRow(
column(8, tags$br()),
# column(8, tags$b(uiOutput("SelectedDataDiff "))),
column(12, align = "left", class = "diffExpMain",
uiOutput("diffTable"),
column(12, align = "center",
fluidRow(tags$br()),
tags$b('Click "Run Differential Expression"'))
)
)
)
)
)
), style = 'width:1500px;')#,
# shinyDebuggingPanel::withDebuggingPanel()
)
|
# location clustering
# libraries ---------------------------------------------------------------
library(readr) # For reading in data
library(dplyr) # for data manipulation
# Read in Data ------------------------------------------------------------
your_data_file_path <- "IAA/Fall 3/clustering/data/"
calendar <- read_csv(paste0(your_data_file_path,"calendar.csv"))
listings <- read_csv(paste0(your_data_file_path,"listings.csv"))
reviews <- read_csv(paste0(your_data_file_path,"reviews.csv"))
# Explore -----------------------------------------------------------------
names(listings)
# There are 39 "Smart" locations, some of them look exactly the same (i.e. Boston, MA vs. Boston , MA)
unique(as.factor(listings$smart_location))
listings %>%
group_by(smart_location) %>%
summarise(n=n()) %>%
arrange(desc(n))
# There seem to mostly (>95%) listed as just boston. Therefore, this column doesnt seem to be useful
# Experiences offered are ALL NONE!
# Not a useful column
listings %>%
group_by(experiences_offered) %>%
summarise(n=n()) %>%
arrange(desc(n))
# Seems to be clear separation of neighborhoods
listings %>%
group_by(neighbourhood) %>%
summarise(n=n()) %>%
arrange(desc(n))
# Lots of different zipcodes too
listings %>%
group_by(zipcode) %>%
summarise(n=n()) %>%
arrange(desc(n))
# Seems to be a description of whether there are nearby places
listings %>%
group_by(transit) %>%
summarise(n=n()) %>%
arrange(desc(n))
# Also gives a ton of attraction
listings %>%
group_by(neighborhood_overview) %>%
summarise(n=n()) %>%
arrange(desc(n))
| /clustering/HW1/location_clustering.R | no_license | sopheeli/F3-Blueteam12 | R | false | false | 1,611 | r | # location clustering
# libraries ---------------------------------------------------------------
library(readr) # For reading in data
library(dplyr) # for data manipulation
# Read in Data ------------------------------------------------------------
your_data_file_path <- "IAA/Fall 3/clustering/data/"
calendar <- read_csv(paste0(your_data_file_path,"calendar.csv"))
listings <- read_csv(paste0(your_data_file_path,"listings.csv"))
reviews <- read_csv(paste0(your_data_file_path,"reviews.csv"))
# Explore -----------------------------------------------------------------
names(listings)
# There are 39 "Smart" locations, some of them look exactly the same (i.e. Boston, MA vs. Boston , MA)
unique(as.factor(listings$smart_location))
listings %>%
group_by(smart_location) %>%
summarise(n=n()) %>%
arrange(desc(n))
# There seem to mostly (>95%) listed as just boston. Therefore, this column doesnt seem to be useful
# Experiences offered are ALL NONE!
# Not a useful column
listings %>%
group_by(experiences_offered) %>%
summarise(n=n()) %>%
arrange(desc(n))
# Seems to be clear separation of neighborhoods
listings %>%
group_by(neighbourhood) %>%
summarise(n=n()) %>%
arrange(desc(n))
# Lots of different zipcodes too
listings %>%
group_by(zipcode) %>%
summarise(n=n()) %>%
arrange(desc(n))
# Seems to be a description of whether there are nearby places
listings %>%
group_by(transit) %>%
summarise(n=n()) %>%
arrange(desc(n))
# Also gives a ton of attraction
listings %>%
group_by(neighborhood_overview) %>%
summarise(n=n()) %>%
arrange(desc(n))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/render_dependency_matrix.R
\name{render_dependency_matrix}
\alias{render_dependency_matrix}
\title{Renders a dependency matrix as dependency graph}
\usage{
render_dependency_matrix(
dependencies,
rankdir = "LR",
layout = "dot",
render = T
)
}
\arguments{
\item{dependencies}{A dependency matrix created by \code{\link{dependency_matrix}}}
\item{rankdir}{Rankdir to be used for DiagrammeR.}
\item{layout}{Layout to be used for DiagrammeR.}
\item{render}{Whether to directly render the DiagrammeR graph or simply return it.}
}
\value{
A DiagrammeR graph of the (filtered) dependency matrix.
}
\description{
Creates a dependency graph visualizing the contents of a dependency matrix.
}
\examples{
render_dependency_matrix(dependency_matrix(L_heur_1))
}
| /man/render_dependency_matrix.Rd | no_license | cran/heuristicsmineR | R | false | true | 872 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/render_dependency_matrix.R
\name{render_dependency_matrix}
\alias{render_dependency_matrix}
\title{Renders a dependency matrix as dependency graph}
\usage{
render_dependency_matrix(
dependencies,
rankdir = "LR",
layout = "dot",
render = T
)
}
\arguments{
\item{dependencies}{A dependency matrix created by \code{\link{dependency_matrix}}}
\item{rankdir}{Rankdir to be used for DiagrammeR.}
\item{layout}{Layout to be used for DiagrammeR.}
\item{render}{Whether to directly render the DiagrammeR graph or simply return it.}
}
\value{
A DiagrammeR graph of the (filtered) dependency matrix.
}
\description{
Creates a dependency graph visualizing the contents of a dependency matrix.
}
\examples{
render_dependency_matrix(dependency_matrix(L_heur_1))
}
|
\name{party-methods}
\alias{party-methods}
\alias{length.party}
\alias{print.party}
\alias{print.simpleparty}
\alias{print.constparty}
\alias{[.party}
\alias{[[.party}
\alias{depth.party}
\alias{width.party}
\alias{getCall.party}
\alias{nodeprune}
\alias{nodeprune.party}
\title{ Methods for Party Objects }
\description{
Methods for computing on \code{party} objects.
}
\usage{
\method{print}{party}(x,
terminal_panel = function(node)
formatinfo_node(node, default = "*", prefix = ": "),
tp_args = list(),
inner_panel = function(node) "", ip_args = list(),
header_panel = function(party) "",
footer_panel = function(party) "",
digits = getOption("digits") - 2, \dots)
\method{print}{simpleparty}(x, digits = getOption("digits") - 4,
header = NULL, footer = TRUE, \dots)
\method{print}{constparty}(x, FUN = NULL, digits = getOption("digits") - 4,
header = NULL, footer = TRUE, \dots)
\method{length}{party}(x)
\method{[}{party}(x, i, \dots)
\method{[[}{party}(x, i, \dots)
\method{depth}{party}(x, root = FALSE, \dots)
\method{width}{party}(x, \dots)
\method{nodeprune}{party}(x, ids, ...)
}
\arguments{
\item{x}{ an object of class \code{\link{party}}.}
\item{i}{ an integer specifying the root of the subtree to extract.}
\item{terminal_panel}{ a panel function for printing terminal nodes.}
\item{tp_args}{ a list containing arguments to \code{terminal_panel}.}
\item{inner_panel}{ a panel function for printing inner nodes.}
\item{ip_args}{ a list containing arguments to \code{inner_panel}.}
\item{header_panel}{ a panel function for printing the header.}
\item{footer_panel}{ a panel function for printing the footer.}
\item{digits}{ number of digits to be printed.}
\item{header}{ header to be printed.}
\item{footer}{ footer to be printed.}
\item{FUN}{ a function to be applied to nodes.}
\item{root}{ a logical. Should the root count be counted in \code{depth}? }
\item{ids}{ a vector of node ids (or their names) to be pruned-off.}
\item{\dots}{ additional arguments.}
}
\details{
\code{length} gives the number of nodes in the tree (in contrast
to the \code{length} method for \code{\link{partynode}} objects which
returns the number of kid nodes in the root),
\code{depth} the depth of the tree and \code{width}
the number of terminal nodes. The subset methods extract subtrees
and the \code{print} method generates a textual representation
of the tree. \code{nodeprune} prunes-off nodes and makes sure that
the node ids of the resulting tree are in pre-order starting with root node id 1.
For \code{constparty} objects, the \code{fitted} slot is also changed.
}
\examples{
## a tree as flat list structure
nodelist <- list(
# root node
list(id = 1L, split = partysplit(varid = 4L, breaks = 1.9),
kids = 2:3),
# V4 <= 1.9, terminal node
list(id = 2L),
# V4 > 1.9
list(id = 3L, split = partysplit(varid = 5L, breaks = 1.7),
kids = c(4L, 7L)),
# V5 <= 1.7
list(id = 4L, split = partysplit(varid = 4L, breaks = 4.8),
kids = 5:6),
# V4 <= 4.8, terminal node
list(id = 5L),
# V4 > 4.8, terminal node
list(id = 6L),
# V5 > 1.7, terminal node
list(id = 7L)
)
## convert to a recursive structure
node <- as.partynode(nodelist)
## set up party object
data("iris")
tree <- party(node, data = iris,
fitted = data.frame("(fitted)" =
fitted_node(node, data = iris),
check.names = FALSE))
names(tree) <- paste("Node", nodeids(tree), sep = " ")
## number of kids in root node
length(tree)
## depth of tree
depth(tree)
## number of terminal nodes
width(tree)
## node number four
tree["Node 4"]
tree[["Node 4"]]
}
\keyword{tree}
| /man/party-methods.Rd | no_license | cran/partykit | R | false | false | 3,883 | rd | \name{party-methods}
\alias{party-methods}
\alias{length.party}
\alias{print.party}
\alias{print.simpleparty}
\alias{print.constparty}
\alias{[.party}
\alias{[[.party}
\alias{depth.party}
\alias{width.party}
\alias{getCall.party}
\alias{nodeprune}
\alias{nodeprune.party}
\title{ Methods for Party Objects }
\description{
Methods for computing on \code{party} objects.
}
\usage{
\method{print}{party}(x,
terminal_panel = function(node)
formatinfo_node(node, default = "*", prefix = ": "),
tp_args = list(),
inner_panel = function(node) "", ip_args = list(),
header_panel = function(party) "",
footer_panel = function(party) "",
digits = getOption("digits") - 2, \dots)
\method{print}{simpleparty}(x, digits = getOption("digits") - 4,
header = NULL, footer = TRUE, \dots)
\method{print}{constparty}(x, FUN = NULL, digits = getOption("digits") - 4,
header = NULL, footer = TRUE, \dots)
\method{length}{party}(x)
\method{[}{party}(x, i, \dots)
\method{[[}{party}(x, i, \dots)
\method{depth}{party}(x, root = FALSE, \dots)
\method{width}{party}(x, \dots)
\method{nodeprune}{party}(x, ids, ...)
}
\arguments{
\item{x}{ an object of class \code{\link{party}}.}
\item{i}{ an integer specifying the root of the subtree to extract.}
\item{terminal_panel}{ a panel function for printing terminal nodes.}
\item{tp_args}{ a list containing arguments to \code{terminal_panel}.}
\item{inner_panel}{ a panel function for printing inner nodes.}
\item{ip_args}{ a list containing arguments to \code{inner_panel}.}
\item{header_panel}{ a panel function for printing the header.}
\item{footer_panel}{ a panel function for printing the footer.}
\item{digits}{ number of digits to be printed.}
\item{header}{ header to be printed.}
\item{footer}{ footer to be printed.}
\item{FUN}{ a function to be applied to nodes.}
\item{root}{ a logical. Should the root count be counted in \code{depth}? }
\item{ids}{ a vector of node ids (or their names) to be pruned-off.}
\item{\dots}{ additional arguments.}
}
\details{
\code{length} gives the number of nodes in the tree (in contrast
to the \code{length} method for \code{\link{partynode}} objects which
returns the number of kid nodes in the root),
\code{depth} the depth of the tree and \code{width}
the number of terminal nodes. The subset methods extract subtrees
and the \code{print} method generates a textual representation
of the tree. \code{nodeprune} prunes-off nodes and makes sure that
the node ids of the resulting tree are in pre-order starting with root node id 1.
For \code{constparty} objects, the \code{fitted} slot is also changed.
}
\examples{
## a tree as flat list structure
nodelist <- list(
# root node
list(id = 1L, split = partysplit(varid = 4L, breaks = 1.9),
kids = 2:3),
# V4 <= 1.9, terminal node
list(id = 2L),
# V4 > 1.9
list(id = 3L, split = partysplit(varid = 5L, breaks = 1.7),
kids = c(4L, 7L)),
# V5 <= 1.7
list(id = 4L, split = partysplit(varid = 4L, breaks = 4.8),
kids = 5:6),
# V4 <= 4.8, terminal node
list(id = 5L),
# V4 > 4.8, terminal node
list(id = 6L),
# V5 > 1.7, terminal node
list(id = 7L)
)
## convert to a recursive structure
node <- as.partynode(nodelist)
## set up party object
data("iris")
tree <- party(node, data = iris,
fitted = data.frame("(fitted)" =
fitted_node(node, data = iris),
check.names = FALSE))
names(tree) <- paste("Node", nodeids(tree), sep = " ")
## number of kids in root node
length(tree)
## depth of tree
depth(tree)
## number of terminal nodes
width(tree)
## node number four
tree["Node 4"]
tree[["Node 4"]]
}
\keyword{tree}
|
#########################
## SNU Global Data Center
## 2019 March
## Sooahn Shin
#########################
rm(list=ls())
library(tidyverse)
library(tidytext)
library(SnowballC)
library(udpipe)
library(lattice)
library(wesanderson)
library(ggraph)
library(igraph)
## user specific working directory setup
if(Sys.getenv("LOGNAME") == "park"){
setwd("~/Dropbox/BigDataDiplomacy/Code/2019/Analysis")
}else{
setwd("~/Dropbox/GlobalDataCenter/Analysis")
}
### function
## draw network plot for coocurrence of figures
coocNetworkPlot <- function(mat, # cooccurence matrix
lb=10, # lower bound for coocurrence
layout = "nicely") {
Fig <- as.data.frame(as.table(mat))
Fig.freq <- Fig %>% filter(Var1==Var2)
Fig <- Fig %>% filter(Var1!=Var2)
colnames(Fig) <- c("figure1", "figure2","cooc")
Fig <- Fig %>% filter(cooc>lb)
Fig.graph <- Fig %>% graph_from_data_frame()
size.vertex <- Fig.freq %>% filter(Var1 %in% V(Fig.graph)$name)
size.vertex <- size.vertex[order(match(size.vertex$Var1, V(Fig.graph)$name)),]
size.vertex <- size.vertex %>% select(Freq) %>% unlist()
# col.vertex <- figures %>% filter(name %in% V(Fig.graph)$name)
# col.vertex <- col.vertex[order(match(col.vertex$name, V(Fig.graph)$name)),]
# col.vertex <- col.vertex %>% select(type) %>% unlist()
if(layout=="linear"){
plot <- Fig.graph %>%
ggraph(layout = layout, circular="TRUE") +
geom_edge_link(aes(width = cooc, edge_alpha = cooc), edge_colour = "grey") +
geom_node_point(aes(size=size.vertex),alpha=0.4) +
# geom_node_point(aes(size=size.vertex, col=col.vertex),alpha=0.4) +
geom_node_text(aes(label = name)) +
theme_graph(base_family = "sans") +
theme(legend.position = "none") +
scale_size(range = c(5, 15)) +
# scale_color_manual(values =c("black","orange","blue","red")) +
scale_edge_width(range = c(0.5, 5)) + scale_edge_alpha(range = c(0.4, 0.6))
}else{
plot <- Fig.graph %>%
ggraph(layout = layout) +
geom_edge_link(aes(width = cooc, edge_alpha = cooc), edge_colour = "grey") +
geom_node_point(aes(size=size.vertex),alpha=0.4) +
# geom_node_point(aes(size=size.vertex, col=col.vertex),alpha=0.4) +
geom_node_text(aes(label = name)) +
theme_graph(base_family = "sans") +
theme(legend.position = "none") +
scale_size(range = c(5, 15)) +
# scale_color_manual(values =c("black","orange","blue","red")) +
scale_edge_width(range = c(0.5, 5)) + scale_edge_alpha(range = c(0.4, 0.6))
}
return(plot)
}
### News Data
load("keyplayers.RData")
load("news_data.RData")
occur_matrix <- sapply(keyplayers$regex, function(x) grepl(x,news_data$text_raw))
rownames(occur_matrix) <- news_data$id_row
colnames(occur_matrix) <- keyplayers$name
### count = # of articles which mentioned the figure
keyplayers$count <- colSums(occur_matrix)
keyplayers %>%
arrange(-count) %>%
top_n(10,wt=count)
monthly_count <- occur_matrix %>%
as.data.frame() %>%
mutate(month = news_data$month) %>%
aggregate(. ~ month, ., sum) %>%
gather(name, count, -month) %>%
arrange(month, -count)
monthly_top10 <- monthly_count %>%
group_by(month) %>%
top_n(10, wt = count)
cooc_matrix <- t(occur_matrix)%*%occur_matrix
dim(cooc_matrix)
coocNetworkPlot(cooc_matrix, lb=40)
| /Analysis/4. keyplayer.R | no_license | JuwonOh/GlobalDataCenter | R | false | false | 3,362 | r | #########################
## SNU Global Data Center
## 2019 March
## Sooahn Shin
#########################
rm(list=ls())
library(tidyverse)
library(tidytext)
library(SnowballC)
library(udpipe)
library(lattice)
library(wesanderson)
library(ggraph)
library(igraph)
## user specific working directory setup
if(Sys.getenv("LOGNAME") == "park"){
setwd("~/Dropbox/BigDataDiplomacy/Code/2019/Analysis")
}else{
setwd("~/Dropbox/GlobalDataCenter/Analysis")
}
### function
## draw network plot for coocurrence of figures
coocNetworkPlot <- function(mat, # cooccurence matrix
lb=10, # lower bound for coocurrence
layout = "nicely") {
Fig <- as.data.frame(as.table(mat))
Fig.freq <- Fig %>% filter(Var1==Var2)
Fig <- Fig %>% filter(Var1!=Var2)
colnames(Fig) <- c("figure1", "figure2","cooc")
Fig <- Fig %>% filter(cooc>lb)
Fig.graph <- Fig %>% graph_from_data_frame()
size.vertex <- Fig.freq %>% filter(Var1 %in% V(Fig.graph)$name)
size.vertex <- size.vertex[order(match(size.vertex$Var1, V(Fig.graph)$name)),]
size.vertex <- size.vertex %>% select(Freq) %>% unlist()
# col.vertex <- figures %>% filter(name %in% V(Fig.graph)$name)
# col.vertex <- col.vertex[order(match(col.vertex$name, V(Fig.graph)$name)),]
# col.vertex <- col.vertex %>% select(type) %>% unlist()
if(layout=="linear"){
plot <- Fig.graph %>%
ggraph(layout = layout, circular="TRUE") +
geom_edge_link(aes(width = cooc, edge_alpha = cooc), edge_colour = "grey") +
geom_node_point(aes(size=size.vertex),alpha=0.4) +
# geom_node_point(aes(size=size.vertex, col=col.vertex),alpha=0.4) +
geom_node_text(aes(label = name)) +
theme_graph(base_family = "sans") +
theme(legend.position = "none") +
scale_size(range = c(5, 15)) +
# scale_color_manual(values =c("black","orange","blue","red")) +
scale_edge_width(range = c(0.5, 5)) + scale_edge_alpha(range = c(0.4, 0.6))
}else{
plot <- Fig.graph %>%
ggraph(layout = layout) +
geom_edge_link(aes(width = cooc, edge_alpha = cooc), edge_colour = "grey") +
geom_node_point(aes(size=size.vertex),alpha=0.4) +
# geom_node_point(aes(size=size.vertex, col=col.vertex),alpha=0.4) +
geom_node_text(aes(label = name)) +
theme_graph(base_family = "sans") +
theme(legend.position = "none") +
scale_size(range = c(5, 15)) +
# scale_color_manual(values =c("black","orange","blue","red")) +
scale_edge_width(range = c(0.5, 5)) + scale_edge_alpha(range = c(0.4, 0.6))
}
return(plot)
}
### News Data
load("keyplayers.RData")
load("news_data.RData")
occur_matrix <- sapply(keyplayers$regex, function(x) grepl(x,news_data$text_raw))
rownames(occur_matrix) <- news_data$id_row
colnames(occur_matrix) <- keyplayers$name
### count = # of articles which mentioned the figure
keyplayers$count <- colSums(occur_matrix)
keyplayers %>%
arrange(-count) %>%
top_n(10,wt=count)
monthly_count <- occur_matrix %>%
as.data.frame() %>%
mutate(month = news_data$month) %>%
aggregate(. ~ month, ., sum) %>%
gather(name, count, -month) %>%
arrange(month, -count)
monthly_top10 <- monthly_count %>%
group_by(month) %>%
top_n(10, wt = count)
cooc_matrix <- t(occur_matrix)%*%occur_matrix
dim(cooc_matrix)
coocNetworkPlot(cooc_matrix, lb=40)
|
# sanger_AKRvDBA_missense_genes function
#
# written by Brian Ritchey
#
#
sanger_AKRvDBA_missense_genes <-
function(chr, pos1, pos2){
require(rvest)
require(rentrez)
pos1 <- pos1 * 10^6
pos2 <- pos2 * 10^6
query_1 <- "https://www.sanger.ac.uk/sanger/Mouse_SnpViewer/rel-1211?gene=&context=0&loc="
query_2 <- "%3A"
query_3 <- "-"
query_4 <- "&release=rel-1211&sn=missense_variant&st=akr_j&st=dba_2j"
sanger_query <- paste0(query_1, chr, query_2, pos1, query_3, pos2, query_4)
sanger_html <- read_html(sanger_query)
gene_table <- sanger_html %>%
html_node("#t_snps_0 > div.scrollable > table")%>%
html_table(header = T)
# Exclude rows without dbSNP or where AKR allele == DBA allele
exclude <- which(gene_table$dbSNP == "-" | gene_table[,6] == gene_table[,7])
gene_table <- gene_table[-exclude,]
rownames(gene_table) <- NULL
gene_table
genes <- unique(gene_table$Gene)
rs_list <- vector("list", length(unique(gene_table$Gene)))
for(i in 1:length(rs_list)){
rs_list[[i]] <- gene_table$dbSNP[gene_table$Gene == (unique(gene_table$Gene)[i])]
}
rs_list
names(rs_list) <- genes
rs_list
# Calls and returns result from missense_for_provean function.
# The code above filters the region and decides which gene rs to feed missense_for_provean function (rs_list)
missense_for_provean(rs_list = rs_list, gene_table = gene_table)
}
| /sanger_AKRvDBA_missense_genes.R | no_license | BrianRitchey/qtl | R | false | false | 1,409 | r | # sanger_AKRvDBA_missense_genes function
#
# written by Brian Ritchey
#
#
sanger_AKRvDBA_missense_genes <-
function(chr, pos1, pos2){
require(rvest)
require(rentrez)
pos1 <- pos1 * 10^6
pos2 <- pos2 * 10^6
query_1 <- "https://www.sanger.ac.uk/sanger/Mouse_SnpViewer/rel-1211?gene=&context=0&loc="
query_2 <- "%3A"
query_3 <- "-"
query_4 <- "&release=rel-1211&sn=missense_variant&st=akr_j&st=dba_2j"
sanger_query <- paste0(query_1, chr, query_2, pos1, query_3, pos2, query_4)
sanger_html <- read_html(sanger_query)
gene_table <- sanger_html %>%
html_node("#t_snps_0 > div.scrollable > table")%>%
html_table(header = T)
# Exclude rows without dbSNP or where AKR allele == DBA allele
exclude <- which(gene_table$dbSNP == "-" | gene_table[,6] == gene_table[,7])
gene_table <- gene_table[-exclude,]
rownames(gene_table) <- NULL
gene_table
genes <- unique(gene_table$Gene)
rs_list <- vector("list", length(unique(gene_table$Gene)))
for(i in 1:length(rs_list)){
rs_list[[i]] <- gene_table$dbSNP[gene_table$Gene == (unique(gene_table$Gene)[i])]
}
rs_list
names(rs_list) <- genes
rs_list
# Calls and returns result from missense_for_provean function.
# The code above filters the region and decides which gene rs to feed missense_for_provean function (rs_list)
missense_for_provean(rs_list = rs_list, gene_table = gene_table)
}
|
# Predict orthoptera based on satellite observations
# Thomas Nauss
if(Sys.info()["sysname"] == "Windows"){
source("D:/orthoptera/orthoptera_prediction/src/00_set_environment.R")
} else {
source("/media/tnauss/myWork/analysis/orthoptera/orthoptera_prediction/src/00_set_environment.R")
}
compute <- TRUE
# Analyse model results --------------------------------------------------------
load(paste0(path_results, "gpm_trainModel_model_instances_004.RData"))
i001 <- model_instances
i002 <- model_instances
i003 <- model_instances
i004 <- model_instances
vi <- lapply(i003, function(i){
vi <- caret::varImp(i$model)
variables <- rownames(vi)
data.frame(VARIABLE = variables,
IMPORTANCE = vi$Overall)
})
vi_species <- do.call("rbind", vi)
vi_count <- vi_species %>% count(VARIABLE)
vi_mean <- vi_species %>% group_by(VARIABLE) %>% summarise(mean = mean(IMPORTANCE))
vi <- merge(vi_count, vi_mean)
vi$RESPONSE <- vi_species$RESPONSE[1]
vi <- vi[order(vi$n, decreasing = TRUE), ,drop = FALSE]
vi_length <- lapply(i003, function(i){
length(i$model$finalModel$importance)
})
summary(unlist(vi_length))
caret::varImp(i001[[100]]$model)
# var_imp <- compVarImp(models@model$rf_rfe, scale = FALSE)
#
# var_imp_scale <- compVarImp(models@model$rf_rfe, scale = TRUE)
#
# var_imp_plot <- plotVarImp(var_imp)
#
# var_imp_heat <- plotVarImpHeatmap(var_imp_scale, xlab = "Species", ylab = "Band")
#
# tstat <- compContTests(models@model$rf_rfe, mean = TRUE)
#
# tstat_mean <- merge(tstat[[1]], obsv_gpm[[prj]]@meta$input$MIN_OCCURENCE,
# by.x = "Response", by.y="names")
#
# tstat_mean[order(tstat_mean$Kappa_mean, decreasing = TRUE),]
#
# ggplot(data = tstat_mean, aes(x = mo_mean, y = Kappa_mean)) + geom_point() + geom_smooth()
| /src/05_gls_analysis.R | permissive | environmentalinformatics-marburg/orthoptera_prediction | R | false | false | 1,778 | r | # Predict orthoptera based on satellite observations
# Thomas Nauss
if(Sys.info()["sysname"] == "Windows"){
source("D:/orthoptera/orthoptera_prediction/src/00_set_environment.R")
} else {
source("/media/tnauss/myWork/analysis/orthoptera/orthoptera_prediction/src/00_set_environment.R")
}
compute <- TRUE
# Analyse model results --------------------------------------------------------
load(paste0(path_results, "gpm_trainModel_model_instances_004.RData"))
i001 <- model_instances
i002 <- model_instances
i003 <- model_instances
i004 <- model_instances
vi <- lapply(i003, function(i){
vi <- caret::varImp(i$model)
variables <- rownames(vi)
data.frame(VARIABLE = variables,
IMPORTANCE = vi$Overall)
})
vi_species <- do.call("rbind", vi)
vi_count <- vi_species %>% count(VARIABLE)
vi_mean <- vi_species %>% group_by(VARIABLE) %>% summarise(mean = mean(IMPORTANCE))
vi <- merge(vi_count, vi_mean)
vi$RESPONSE <- vi_species$RESPONSE[1]
vi <- vi[order(vi$n, decreasing = TRUE), ,drop = FALSE]
vi_length <- lapply(i003, function(i){
length(i$model$finalModel$importance)
})
summary(unlist(vi_length))
caret::varImp(i001[[100]]$model)
# var_imp <- compVarImp(models@model$rf_rfe, scale = FALSE)
#
# var_imp_scale <- compVarImp(models@model$rf_rfe, scale = TRUE)
#
# var_imp_plot <- plotVarImp(var_imp)
#
# var_imp_heat <- plotVarImpHeatmap(var_imp_scale, xlab = "Species", ylab = "Band")
#
# tstat <- compContTests(models@model$rf_rfe, mean = TRUE)
#
# tstat_mean <- merge(tstat[[1]], obsv_gpm[[prj]]@meta$input$MIN_OCCURENCE,
# by.x = "Response", by.y="names")
#
# tstat_mean[order(tstat_mean$Kappa_mean, decreasing = TRUE),]
#
# ggplot(data = tstat_mean, aes(x = mo_mean, y = Kappa_mean)) + geom_point() + geom_smooth()
|
#' @title Function to make a violin plot.
#'
#' @description
#' \code{plot.violins} creates a violin plot from a list of numerical vectors.
#'
#' @param dat.list A list of numerical vectors. Each list item will be associated with its own violin, representing its distribution.
#' @param x A character or numerical vector indicating the labels associated with each item in dat.list.
#' @param at The location of each violin on the X axis.
#' @param add If the violins to be added to an existing plot (TRUE) or if a new plot is to be generated (FALSE)
#'
#' @author Richard Bischof, \email{richard.bischof@@nmbu.no}
#' @backref R/plot.violins.R
#' @keywords simul
#'
#' @examples
#' # Generate a violin plot
#' plot.violins(dat.list=lapply(1:5,function(x)rnorm(10000,x,5)),x=1:5,at=NULL,invlogit=FALSE,ylim=NULL,col="darkblue",cex=0.5,add=FALSE)
#'
plot.violins2<-function(dat.list
, x
, at = NULL
, violin.width = 0.20
, invlogit = FALSE
, ylim = NULL
, col = "darkblue"
, cex = 0.5
, add = FALSE
, plot.ci = 1
, border.col = "black"
, alpha = 0
, fromto = NULL
, median = TRUE
, scale.width = FALSE
, lwd.violins = 1
, lwd.CI = 1){
if(is.null(at))at<-1:length(x)
if(!add){
if(invlogit & is.null(ylim))ylim<-c(0,1)#inv.logit(range(unlist(dat.list)))
if(!invlogit & is.null(ylim))ylim<-range(unlist(dat.list))
plot(1,1,type="n",ylim=ylim,xlim=c(min(at)-0.5,max(at)+0.5),axes=FALSE,xlab="",ylab="")
axis(1,at=at)#, labels=x,lwd=0)
axis(2)
}
i<-1
#---GET THE VIOLIN-SPECIFIC SCALE IF SO INDICATED
# amp<-unlist(lapply(1:length(x),function(i){
# max(density(dat.list[[i]])$y)
# }))
for(i in 1: length(x)){
temp<-density(dat.list[[i]])
if(!is.null(fromto)) temp<-density(dat.list[[i]],from=fromto[1],to=fromto[2])
if(invlogit){
temp$x<-inv.logit(temp$x)
}
#violin.width<-0.20 #in units x (group variable)
#if(scale.width) scal.y<-DoScale(c(0,temp$y),0,violin.width[1]*amp[i]/max(amp))[-1]#--scaled to a portion of the year for plotting
if(scale.width) scal.y<-DoScale(c(0,temp$y),0,violin.width*max(temp$y))[-1]
if(!scale.width) scal.y<-DoScale(c(0,temp$y),0,violin.width)[-1]#--scaled to a portion of the year for plotting
#if(!scale.width & length(violin.width)==length(x)) scal.y<-DoScale(c(0,temp$y),0,violin.width[i])[-1]#--scaled to a portion of the year for plotting
#if(!scale.width & length(violin.width)==1) scal.y<-DoScale(c(0,temp$y),0,violin.width)[-1]#--scaled to a portion of the year for plotting
poly.x<-c(at[i]-scal.y,(at[i]+scal.y)[length(scal.y):1])
poly.y<-c(temp$x,temp$x[length(temp$x):1])#---the number
#points(poly.y~poly.x,type="l")
yv<-poly.y
xv<-poly.x
rgb.col<-as.vector(col2rgb(col)/255)
polygon.col<-adjustcolor(col,alpha=alpha)#rgb(rgb.col[1],rgb.col[2],rgb.col[3],alpha)
# polygon(xv,yv,col=NA,border=border.col, lwd = lwd.violins)
polygon(yv,xv,col=NA,border=border.col, lwd = lwd.violins)
#if(!is.null(plot.ci)){
lci<-quantile(dat.list[[i]],(1-plot.ci)/2)
uci<-quantile(dat.list[[i]],1-(1-plot.ci)/2)
yvci<-yv[yv>=lci & yv<=uci]
xvci<-xv[yv>=lci & yv<=uci]
# polygon(xvci,yvci,col=polygon.col,border=border.col, lwd = lwd.CI)#polygon.col
polygon(yvci,xvci,col=polygon.col,border=border.col, lwd = lwd.CI)#polygon.col
# }
if(median==TRUE){
this.median<-ifelse(invlogit,inv.logit(median(dat.list[[i]])),median(dat.list[[i]]))
}else{
this.median<-ifelse(invlogit,inv.logit(mean(dat.list[[i]])),mean(dat.list[[i]]))
}
# points(this.median~at[i],pch=19,col="white",cex=cex)
points(at[i]~this.median,pch=19,col="white",cex=cex)
}#####
} | /Ch. 2-3/Ch. 3/Results/Functions/plot.violins2.r | no_license | anasanz/MyScripts | R | false | false | 4,869 | r | #' @title Function to make a violin plot.
#'
#' @description
#' \code{plot.violins} creates a violin plot from a list of numerical vectors.
#'
#' @param dat.list A list of numerical vectors. Each list item will be associated with its own violin, representing its distribution.
#' @param x A character or numerical vector indicating the labels associated with each item in dat.list.
#' @param at The location of each violin on the X axis.
#' @param add If the violins to be added to an existing plot (TRUE) or if a new plot is to be generated (FALSE)
#'
#' @author Richard Bischof, \email{richard.bischof@@nmbu.no}
#' @backref R/plot.violins.R
#' @keywords simul
#'
#' @examples
#' # Generate a violin plot
#' plot.violins(dat.list=lapply(1:5,function(x)rnorm(10000,x,5)),x=1:5,at=NULL,invlogit=FALSE,ylim=NULL,col="darkblue",cex=0.5,add=FALSE)
#'
plot.violins2<-function(dat.list
, x
, at = NULL
, violin.width = 0.20
, invlogit = FALSE
, ylim = NULL
, col = "darkblue"
, cex = 0.5
, add = FALSE
, plot.ci = 1
, border.col = "black"
, alpha = 0
, fromto = NULL
, median = TRUE
, scale.width = FALSE
, lwd.violins = 1
, lwd.CI = 1){
if(is.null(at))at<-1:length(x)
if(!add){
if(invlogit & is.null(ylim))ylim<-c(0,1)#inv.logit(range(unlist(dat.list)))
if(!invlogit & is.null(ylim))ylim<-range(unlist(dat.list))
plot(1,1,type="n",ylim=ylim,xlim=c(min(at)-0.5,max(at)+0.5),axes=FALSE,xlab="",ylab="")
axis(1,at=at)#, labels=x,lwd=0)
axis(2)
}
i<-1
#---GET THE VIOLIN-SPECIFIC SCALE IF SO INDICATED
# amp<-unlist(lapply(1:length(x),function(i){
# max(density(dat.list[[i]])$y)
# }))
for(i in 1: length(x)){
temp<-density(dat.list[[i]])
if(!is.null(fromto)) temp<-density(dat.list[[i]],from=fromto[1],to=fromto[2])
if(invlogit){
temp$x<-inv.logit(temp$x)
}
#violin.width<-0.20 #in units x (group variable)
#if(scale.width) scal.y<-DoScale(c(0,temp$y),0,violin.width[1]*amp[i]/max(amp))[-1]#--scaled to a portion of the year for plotting
if(scale.width) scal.y<-DoScale(c(0,temp$y),0,violin.width*max(temp$y))[-1]
if(!scale.width) scal.y<-DoScale(c(0,temp$y),0,violin.width)[-1]#--scaled to a portion of the year for plotting
#if(!scale.width & length(violin.width)==length(x)) scal.y<-DoScale(c(0,temp$y),0,violin.width[i])[-1]#--scaled to a portion of the year for plotting
#if(!scale.width & length(violin.width)==1) scal.y<-DoScale(c(0,temp$y),0,violin.width)[-1]#--scaled to a portion of the year for plotting
poly.x<-c(at[i]-scal.y,(at[i]+scal.y)[length(scal.y):1])
poly.y<-c(temp$x,temp$x[length(temp$x):1])#---the number
#points(poly.y~poly.x,type="l")
yv<-poly.y
xv<-poly.x
rgb.col<-as.vector(col2rgb(col)/255)
polygon.col<-adjustcolor(col,alpha=alpha)#rgb(rgb.col[1],rgb.col[2],rgb.col[3],alpha)
# polygon(xv,yv,col=NA,border=border.col, lwd = lwd.violins)
polygon(yv,xv,col=NA,border=border.col, lwd = lwd.violins)
#if(!is.null(plot.ci)){
lci<-quantile(dat.list[[i]],(1-plot.ci)/2)
uci<-quantile(dat.list[[i]],1-(1-plot.ci)/2)
yvci<-yv[yv>=lci & yv<=uci]
xvci<-xv[yv>=lci & yv<=uci]
# polygon(xvci,yvci,col=polygon.col,border=border.col, lwd = lwd.CI)#polygon.col
polygon(yvci,xvci,col=polygon.col,border=border.col, lwd = lwd.CI)#polygon.col
# }
if(median==TRUE){
this.median<-ifelse(invlogit,inv.logit(median(dat.list[[i]])),median(dat.list[[i]]))
}else{
this.median<-ifelse(invlogit,inv.logit(mean(dat.list[[i]])),mean(dat.list[[i]]))
}
# points(this.median~at[i],pch=19,col="white",cex=cex)
points(at[i]~this.median,pch=19,col="white",cex=cex)
}#####
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boot.fcor.R
\name{boot.fcor}
\alias{boot.fcor}
\title{Fisher-transformed Pearson's correlation: Bootstrap-based Heterogeneity Test for Between-study Heterogeneity in Random- or Mixed- Effects Model}
\usage{
boot.fcor(
n,
z,
lambda = 0,
model = "random",
mods = NULL,
nrep = 10^4,
p_cut = 0.05,
boot.include = FALSE,
parallel = FALSE,
cores = 4,
verbose = FALSE
)
}
\arguments{
\item{n}{A vector of sample sizes in each of the included studies.}
\item{z}{A vector of Fisher-transformed Pearson's correlations.}
\item{lambda}{Size of the magnitude to be tested in the alternative hypothesis of the heterogeneity magnitude test. Default to 0.}
\item{model}{Choice of random- or mixed- effects models. Can only be set to \code{"random"}, or \code{"mixed"}.}
\item{mods}{Optional argument to include moderators in the model. \code{mods} is NULL for random-effects model and a dataframe of moderators for mixed-effects model. A single moderator can be given as a vector specifying the values of the moderator. Multiple moderators are specified by giving a matrix with as many columns as there are moderator variables. See \code{\link[metafor:rma.uni]{rma}} for more details.}
\item{nrep}{Number of replications used in bootstrap simulations. Default to 10^4.}
\item{p_cut}{Cutoff for p-value, which is the alpha level. Default to 0.05.}
\item{boot.include}{If true, bootstrap simulation results are included in the output.}
\item{parallel}{If true, parallel computing using 4 cores will be performed during bootstrapping stage. Otherwise, for loop is used.}
\item{cores}{The number of cores used in the parallel computing. Default to 4.}
\item{verbose}{If true, show the progress of bootstrapping.}
}
\value{
A dataframe that contains the test statistics ('stat'), p-values ('p_value'), and significances of effect size heterogeneity ("Heterogeneity").
}
\description{
\code{boot.fcor} returns the bootstrap-based tests of the residual heterogeneity in random- or mixed- effects model of Pearson's correlation coefficients transformed with Fisher's r-to-z transformation (z scores).
}
\details{
This function returns the test statistics as well as their p-value and significances using (1) Q-test and (2) Bootstrap-based Heterogeneity Test with Restricted Maximum Likelihood (REML).
The results of significances are classified as "sig" or "n.s" based on the cutoff p-value (i.e., alpha level). "sig" means that the between-study heterogeneity is significantly different from zero whereas "n.s" means the between-study heterogeneity is not significantly different from zero. The default alpha level is 0.05.
}
\examples{
# A meta-analysis of 13 studies studying the correlation
# between sensation-seeking scores and levels of monoamine oxidase (Zuckerman, 1994).
sensation <- boot.heterogeneity:::sensation
# n is a list of samples sizes
n <- sensation$n
# Pearson's correlation
r <- sensation$r
# Fisher's Transformation
z <- 1/2*log((1+r)/(1-r))
\dontrun{
#' boot.run <- boot.fcor(n, z, model = 'random', p_cut = 0.05)
}
}
\references{
Zuckerman, M. (1994). Behavioral expressions and biosocial bases of sensation-seeking. New York, NY: Cambridge University Press.
Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. Journal of Statistical Software, 36(3), 1-48. URL: http://www.jstatsoft.org/v36/i03/
}
| /man/boot.fcor.Rd | no_license | cran/boot.heterogeneity | R | false | true | 3,445 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boot.fcor.R
\name{boot.fcor}
\alias{boot.fcor}
\title{Fisher-transformed Pearson's correlation: Bootstrap-based Heterogeneity Test for Between-study Heterogeneity in Random- or Mixed- Effects Model}
\usage{
boot.fcor(
n,
z,
lambda = 0,
model = "random",
mods = NULL,
nrep = 10^4,
p_cut = 0.05,
boot.include = FALSE,
parallel = FALSE,
cores = 4,
verbose = FALSE
)
}
\arguments{
\item{n}{A vector of sample sizes in each of the included studies.}
\item{z}{A vector of Fisher-transformed Pearson's correlations.}
\item{lambda}{Size of the magnitude to be tested in the alternative hypothesis of the heterogeneity magnitude test. Default to 0.}
\item{model}{Choice of random- or mixed- effects models. Can only be set to \code{"random"}, or \code{"mixed"}.}
\item{mods}{Optional argument to include moderators in the model. \code{mods} is NULL for random-effects model and a dataframe of moderators for mixed-effects model. A single moderator can be given as a vector specifying the values of the moderator. Multiple moderators are specified by giving a matrix with as many columns as there are moderator variables. See \code{\link[metafor:rma.uni]{rma}} for more details.}
\item{nrep}{Number of replications used in bootstrap simulations. Default to 10^4.}
\item{p_cut}{Cutoff for p-value, which is the alpha level. Default to 0.05.}
\item{boot.include}{If true, bootstrap simulation results are included in the output.}
\item{parallel}{If true, parallel computing using 4 cores will be performed during bootstrapping stage. Otherwise, for loop is used.}
\item{cores}{The number of cores used in the parallel computing. Default to 4.}
\item{verbose}{If true, show the progress of bootstrapping.}
}
\value{
A dataframe that contains the test statistics ('stat'), p-values ('p_value'), and significances of effect size heterogeneity ("Heterogeneity").
}
\description{
\code{boot.fcor} returns the bootstrap-based tests of the residual heterogeneity in random- or mixed- effects model of Pearson's correlation coefficients transformed with Fisher's r-to-z transformation (z scores).
}
\details{
This function returns the test statistics as well as their p-value and significances using (1) Q-test and (2) Bootstrap-based Heterogeneity Test with Restricted Maximum Likelihood (REML).
The results of significances are classified as "sig" or "n.s" based on the cutoff p-value (i.e., alpha level). "sig" means that the between-study heterogeneity is significantly different from zero whereas "n.s" means the between-study heterogeneity is not significantly different from zero. The default alpha level is 0.05.
}
\examples{
# A meta-analysis of 13 studies studying the correlation
# between sensation-seeking scores and levels of monoamine oxidase (Zuckerman, 1994).
sensation <- boot.heterogeneity:::sensation
# n is a list of samples sizes
n <- sensation$n
# Pearson's correlation
r <- sensation$r
# Fisher's Transformation
z <- 1/2*log((1+r)/(1-r))
\dontrun{
#' boot.run <- boot.fcor(n, z, model = 'random', p_cut = 0.05)
}
}
\references{
Zuckerman, M. (1994). Behavioral expressions and biosocial bases of sensation-seeking. New York, NY: Cambridge University Press.
Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. Journal of Statistical Software, 36(3), 1-48. URL: http://www.jstatsoft.org/v36/i03/
}
|
require(graphics)
mosaicplot(Titanic, main = "Survival on the Titanic")
apply(Titanic, c(3,4),sum)
apply(Titanic, c(2,4),sum)
str(Titanic)
df <- as.data.frame(Titanic)
head(df)
titanic.raw <- NULL
for(i in 1:4){titanic.raw <- cbind(titanic.raw, rep(as.character(df[,i]), df$Freq))}
titanic.raw <- as.data.frame(titanic.raw)
names(titanic.raw) <- names(df)[1:4]
dim(titanic.raw)
str(titanic.raw)
head(titanic.raw)
summary(titanic.raw)
library(arules)
rules.all <- apriori(titanic.raw)
rules.all
inspect(rules.all)
rules <- apriori(titanic.raw, control = list(verbose=F), parameter = list(minlen=2, supp=0.005, conf=0.8),appearance = list(rhs=c("Survived=No", "Survived=Yes"), default="lhs"))
quality(rules) <- round(quality(rules), digit=3)
rules.sorted <- sort(rules, by="lift")
inspect(rules.sorted)
library(arulesViz)
plot(rules.all, method="graph", control=list(type="items"))
| /lab9/lab9.r | no_license | schnapple/csci2961-master | R | false | false | 884 | r | require(graphics)
mosaicplot(Titanic, main = "Survival on the Titanic")
apply(Titanic, c(3,4),sum)
apply(Titanic, c(2,4),sum)
str(Titanic)
df <- as.data.frame(Titanic)
head(df)
titanic.raw <- NULL
for(i in 1:4){titanic.raw <- cbind(titanic.raw, rep(as.character(df[,i]), df$Freq))}
titanic.raw <- as.data.frame(titanic.raw)
names(titanic.raw) <- names(df)[1:4]
dim(titanic.raw)
str(titanic.raw)
head(titanic.raw)
summary(titanic.raw)
library(arules)
rules.all <- apriori(titanic.raw)
rules.all
inspect(rules.all)
rules <- apriori(titanic.raw, control = list(verbose=F), parameter = list(minlen=2, supp=0.005, conf=0.8),appearance = list(rhs=c("Survived=No", "Survived=Yes"), default="lhs"))
quality(rules) <- round(quality(rules), digit=3)
rules.sorted <- sort(rules, by="lift")
inspect(rules.sorted)
library(arulesViz)
plot(rules.all, method="graph", control=list(type="items"))
|
##Plot 4 - the quadruple plot.
##Reading dataset
powerr <- read.csv('/home/sara/Work/Courses/Coursera/Data_Science_Specialization/Course4/Week1/Course_project/data/household_power_consumption.txt', header = TRUE, sep = ";")
##Extracting the desired date range.
powerr2 = subset(powerr, powerr$Date == "1/2/2007" | powerr$Date == "2/2/2007")
##assigning the dataset as numeric and stripping away factors
powerr2$Sub_metering_1 <- as.numeric(as.character(powerr2$Sub_metering_1))
powerr2$Sub_metering_2 <- as.numeric(as.character(powerr2$Sub_metering_2))
powerr2$Sub_metering_3 <- as.numeric(as.character(powerr2$Sub_metering_3))
##assigning the dataset as character and stripping away factors
powerr2$Date3 <- as.character(powerr2$Date)
powerr2$Time3<- as.character(powerr2$Time)
##setting the time
powerr2$datetime <- strptime(paste(powerr2$Date3, powerr2$Time3), "%d/%m/%Y %H:%M:%S")
##preparing quadrouple plot.
par(mfrow = c(2,2))
par(mar = c(4,4,2,1))
plot (x= powerr2$datetime, y = powerr2$Global_active_power,
ylab = 'Global Active power (kilowatts)',
xlab =' ',
type = "l")
plot (x= powerr2$datetime, y = powerr2$Voltage,
ylab = 'Voltage',
xlab ='datetime',
type = "l")
plot (x= powerr2$datetime, y = powerr2$Sub_metering_1,
ylab = 'Energy sub metering',
xlab =' ',
type = "l")
legend("topright" , bty = "n", lty = 1, col = c("black","red","blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
lines(x= powerr2$datetime, y = powerr2$Sub_metering_2, col = "red")
lines(x= powerr2$datetime, y = powerr2$Sub_metering_3, col = "blue")
plot (x= powerr2$datetime, y = as.numeric(powerr2$Global_reactive_power),
ylab = 'Global_reactive_power',
xlab ='datetime',
#ylim = range(0,0.5),
type = "l")
dev.off()
| /plot4.R | no_license | lightks/ProgrammingAssignment1_course4 | R | false | false | 1,818 | r | ##Plot 4 - the quadruple plot.
##Reading dataset
powerr <- read.csv('/home/sara/Work/Courses/Coursera/Data_Science_Specialization/Course4/Week1/Course_project/data/household_power_consumption.txt', header = TRUE, sep = ";")
##Extracting the desired date range.
powerr2 = subset(powerr, powerr$Date == "1/2/2007" | powerr$Date == "2/2/2007")
##assigning the dataset as numeric and stripping away factors
powerr2$Sub_metering_1 <- as.numeric(as.character(powerr2$Sub_metering_1))
powerr2$Sub_metering_2 <- as.numeric(as.character(powerr2$Sub_metering_2))
powerr2$Sub_metering_3 <- as.numeric(as.character(powerr2$Sub_metering_3))
##assigning the dataset as character and stripping away factors
powerr2$Date3 <- as.character(powerr2$Date)
powerr2$Time3<- as.character(powerr2$Time)
##setting the time
powerr2$datetime <- strptime(paste(powerr2$Date3, powerr2$Time3), "%d/%m/%Y %H:%M:%S")
##preparing quadrouple plot.
par(mfrow = c(2,2))
par(mar = c(4,4,2,1))
plot (x= powerr2$datetime, y = powerr2$Global_active_power,
ylab = 'Global Active power (kilowatts)',
xlab =' ',
type = "l")
plot (x= powerr2$datetime, y = powerr2$Voltage,
ylab = 'Voltage',
xlab ='datetime',
type = "l")
plot (x= powerr2$datetime, y = powerr2$Sub_metering_1,
ylab = 'Energy sub metering',
xlab =' ',
type = "l")
legend("topright" , bty = "n", lty = 1, col = c("black","red","blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
lines(x= powerr2$datetime, y = powerr2$Sub_metering_2, col = "red")
lines(x= powerr2$datetime, y = powerr2$Sub_metering_3, col = "blue")
plot (x= powerr2$datetime, y = as.numeric(powerr2$Global_reactive_power),
ylab = 'Global_reactive_power',
xlab ='datetime',
#ylim = range(0,0.5),
type = "l")
dev.off()
|
# Measurements of electric power consumption in one household
# with a one-minute sampling rate over a period of almost 4 years.
# Different electrical quantities and some sub-metering values are available.
# This data set has 2,075,259 rows and 9 columns
# The memory usage of this data set is 224131200 Bytes
# Read in the power consumption data set
# First two columns will be converted to date/time in next step
# In this data set missing values are coded as "?"
# Plot 1
# Histogram of Frequency vs Global Active Power
data <- read.csv("household_power_consumption.txt",
sep=";",
colClasses=c(rep("character",2),rep("numeric",7)),
na.strings="?")
# Combine the date and time columns into one timestapm
data$Timestamp <- strptime(paste(data$Date,data$Time),
format="%d/%m/%Y %H:%M:%S")
# Drop the now-unnecessary date and time cols
data$Date=NULL
data$Time=NULL
# Subset the data to only look at desired time span
# Here we'll be working with data from 2007-02-01 to 2007-02-02.
sub_data = subset(data,as.Date(data$Timestamp) >= "2007-02-01"
& as.Date(data$Timestamp) < "2007-02-03")
# Start the png device
png(filename="plot1.png", height=480, width=480, bg="transparent")
# Plot the histogram
hist(sub_data$Global_active_power,
col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)",
ylab="Frequency")
# Save the figure
dev.off()
| /plot1.R | no_license | pachidam/ExData_Plotting1 | R | false | false | 1,487 | r | # Measurements of electric power consumption in one household
# with a one-minute sampling rate over a period of almost 4 years.
# Different electrical quantities and some sub-metering values are available.
# This data set has 2,075,259 rows and 9 columns
# The memory usage of this data set is 224131200 Bytes
# Read in the power consumption data set
# First two columns will be converted to date/time in next step
# In this data set missing values are coded as "?"
# Plot 1
# Histogram of Frequency vs Global Active Power
data <- read.csv("household_power_consumption.txt",
sep=";",
colClasses=c(rep("character",2),rep("numeric",7)),
na.strings="?")
# Combine the date and time columns into one timestapm
data$Timestamp <- strptime(paste(data$Date,data$Time),
format="%d/%m/%Y %H:%M:%S")
# Drop the now-unnecessary date and time cols
data$Date=NULL
data$Time=NULL
# Subset the data to only look at desired time span
# Here we'll be working with data from 2007-02-01 to 2007-02-02.
sub_data = subset(data,as.Date(data$Timestamp) >= "2007-02-01"
& as.Date(data$Timestamp) < "2007-02-03")
# Start the png device
png(filename="plot1.png", height=480, width=480, bg="transparent")
# Plot the histogram
hist(sub_data$Global_active_power,
col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)",
ylab="Frequency")
# Save the figure
dev.off()
|
> Resp_Prod <- c(
+ 45,43,41,42,
+ 44,44,45,43,
+ 40,44,45,46,
+ 44,45,46,45
+ )
> Trt_Operadores <- rep( c("Operador1", "Operador2", "Operador3", "Operador4"), each = 4)
> Blq_Tecnica <- rep(c("Tecnica1", "Tecnica2", "Tecnica3", "Tecnica4"), times = 4)
> Datos<- data.frame(Resp_Prod, Trt_Operadores, Blq_Tecnica)
> head(Datos)
Resp_Prod Trt_Operadores Blq_Tecnica
1 45 Operador1 Tecnica1
2 43 Operador1 Tecnica2
3 41 Operador1 Tecnica3
4 42 Operador1 Tecnica4
5 44 Operador2 Tecnica1
6 44 Operador2 Tecnica2
> tail(Datos)
Resp_Prod Trt_Operadores Blq_Tecnica
11 45 Operador3 Tecnica3
12 46 Operador3 Tecnica4
13 44 Operador4 Tecnica1
14 45 Operador4 Tecnica2
15 46 Operador4 Tecnica3
16 45 Operador4 Tecnica4
> modelo <- aov(Resp_Prod~ Trt_Operadores+ Blq_Tecnica, data = Datos)
> summary(modelo)
Df Sum Sq Mean Sq F value Pr(>F)
Trt_Operadores 3 10.25 3.417 0.984 0.443
Blq_Tecnica 3 2.25 0.750 0.216 0.883
Residuals 9 31.25 3.472
> TukeyHSD(modelo)
Tukey multiple comparisons of means
95% family-wise confidence level
Fit: aov(formula = Resp_Prod ~ Trt_Operadores + Blq_Tecnica, data = Datos)
$Trt_Operadores
diff lwr upr p adj
Operador2-Operador1 1.25 -2.863331 5.363331 0.7804545
Operador3-Operador1 1.00 -3.113331 5.113331 0.8706964
Operador4-Operador1 2.25 -1.863331 6.363331 0.3738117
Operador3-Operador2 -0.25 -4.363331 3.863331 0.9973957
Operador4-Operador2 1.00 -3.113331 5.113331 0.8706964
Operador4-Operador3 1.25 -2.863331 5.363331 0.7804545
$Blq_Tecnica
diff lwr upr p adj
Tecnica2-Tecnica1 0.75 -3.363331 4.863331 0.9387968
Tecnica3-Tecnica1 1.00 -3.113331 5.113331 0.8706964
Tecnica4-Tecnica1 0.75 -3.363331 4.863331 0.9387968
Tecnica3-Tecnica2 0.25 -3.863331 4.363331 0.9973957
Tecnica4-Tecnica2 0.00 -4.113331 4.113331 1.0000000
Tecnica4-Tecnica3 -0.25 -4.363331 3.863331 0.9973957
> par(mar=c(6,11,3,1))
> plot(TukeyHSD(modelo,'Trt_Operadores'), las=1, col="brown")
> library(agricolae)
> Prueba <- HSD.test(modelo, "Trt_Operadores", group=TRUE)
> Prueba$groups
Resp_Prod groups
Operador4 45.00 a
Operador2 44.00 a
Operador3 43.75 a
Operador1 42.75 a
> qqnorm(modelo$residuals)
> qqline(modelo$residuals)
> shapiro.test(modelo$residuals)
Shapiro-Wilk normality test
data: modelo$residuals
W = 0.9566, p-value = 0.6008
> | /Tarea3_AOVRB/source/consoleRebeca3.r | no_license | CarlosRDGZ/Metodos_Estadisticos_2018 | R | false | false | 2,714 | r | > Resp_Prod <- c(
+ 45,43,41,42,
+ 44,44,45,43,
+ 40,44,45,46,
+ 44,45,46,45
+ )
> Trt_Operadores <- rep( c("Operador1", "Operador2", "Operador3", "Operador4"), each = 4)
> Blq_Tecnica <- rep(c("Tecnica1", "Tecnica2", "Tecnica3", "Tecnica4"), times = 4)
> Datos<- data.frame(Resp_Prod, Trt_Operadores, Blq_Tecnica)
> head(Datos)
Resp_Prod Trt_Operadores Blq_Tecnica
1 45 Operador1 Tecnica1
2 43 Operador1 Tecnica2
3 41 Operador1 Tecnica3
4 42 Operador1 Tecnica4
5 44 Operador2 Tecnica1
6 44 Operador2 Tecnica2
> tail(Datos)
Resp_Prod Trt_Operadores Blq_Tecnica
11 45 Operador3 Tecnica3
12 46 Operador3 Tecnica4
13 44 Operador4 Tecnica1
14 45 Operador4 Tecnica2
15 46 Operador4 Tecnica3
16 45 Operador4 Tecnica4
> modelo <- aov(Resp_Prod~ Trt_Operadores+ Blq_Tecnica, data = Datos)
> summary(modelo)
Df Sum Sq Mean Sq F value Pr(>F)
Trt_Operadores 3 10.25 3.417 0.984 0.443
Blq_Tecnica 3 2.25 0.750 0.216 0.883
Residuals 9 31.25 3.472
> TukeyHSD(modelo)
Tukey multiple comparisons of means
95% family-wise confidence level
Fit: aov(formula = Resp_Prod ~ Trt_Operadores + Blq_Tecnica, data = Datos)
$Trt_Operadores
diff lwr upr p adj
Operador2-Operador1 1.25 -2.863331 5.363331 0.7804545
Operador3-Operador1 1.00 -3.113331 5.113331 0.8706964
Operador4-Operador1 2.25 -1.863331 6.363331 0.3738117
Operador3-Operador2 -0.25 -4.363331 3.863331 0.9973957
Operador4-Operador2 1.00 -3.113331 5.113331 0.8706964
Operador4-Operador3 1.25 -2.863331 5.363331 0.7804545
$Blq_Tecnica
diff lwr upr p adj
Tecnica2-Tecnica1 0.75 -3.363331 4.863331 0.9387968
Tecnica3-Tecnica1 1.00 -3.113331 5.113331 0.8706964
Tecnica4-Tecnica1 0.75 -3.363331 4.863331 0.9387968
Tecnica3-Tecnica2 0.25 -3.863331 4.363331 0.9973957
Tecnica4-Tecnica2 0.00 -4.113331 4.113331 1.0000000
Tecnica4-Tecnica3 -0.25 -4.363331 3.863331 0.9973957
> par(mar=c(6,11,3,1))
> plot(TukeyHSD(modelo,'Trt_Operadores'), las=1, col="brown")
> library(agricolae)
> Prueba <- HSD.test(modelo, "Trt_Operadores", group=TRUE)
> Prueba$groups
Resp_Prod groups
Operador4 45.00 a
Operador2 44.00 a
Operador3 43.75 a
Operador1 42.75 a
> qqnorm(modelo$residuals)
> qqline(modelo$residuals)
> shapiro.test(modelo$residuals)
Shapiro-Wilk normality test
data: modelo$residuals
W = 0.9566, p-value = 0.6008
> |
#' Spark Data Types
#'
#' These function support supplying a spark read schema. This is particularly useful
#' when reading data with nested arrays when you are not interested in several of
#' the nested fields.
#'
#' @param sc A \code{spark_connection}
#' @param struct_fields A vector or fields obtained from \code{struct_field()}
#' @importFrom sparklyr invoke_new
#' @importFrom sparklyr invoke
#' @export
struct_type <- function(sc, struct_fields) {
struct <- invoke_new(sc,
class="org.apache.spark.sql.types.StructType")
if (is.list(struct_fields)) {
for (i in 1:length(struct_fields))
struct <- invoke(struct, "add", struct_fields[[i]])
} else {
struct <- invoke(struct, "add", struct_fields)
}
return(struct)
}
#' @rdname struct_type
#' @param name A field name to use in the output struct type
#' @param data_type A (java) data type (e.g., \code{string_type()} or \code{double_type()})
#' @param nullable Logical. Describes whether field can be missing for some rows.
#' @importFrom sparklyr invoke_static
#' @importFrom sparklyr invoke_new
#' @export
struct_field <- function(sc, name, data_type, nullable=FALSE) {
metadata <- invoke_static(sc,
class="org.apache.spark.sql.types.Metadata",
method="empty")
invoke_new(sc,
class="org.apache.spark.sql.types.StructField",
name, data_type, nullable, metadata)
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_new
#' @export
array_type <- function(sc, data_type, nullable=FALSE) {
invoke_new(sc,
class="org.apache.spark.sql.types.ArrayType",
data_type, nullable)
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
binary_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "binary")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
boolean_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "boolean")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
byte_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "byte")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
date_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "date")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
double_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "double")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
float_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "float")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
integer_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "integer")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
numeric_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "numeric")
}
#' @rdname struct_type
#' @param key_type A (java) data type describing the map keys (usually \code{string_type()})
#' @param value_type A (java) data type describing the map values
#' @importFrom sparklyr invoke_new
#' @export
map_type <- function(sc, key_type, value_type, nullable=FALSE) {
invoke_new(sc,
class="org.apache.spark.sql.types.MapType",
key_type, value_type, nullable)
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
string_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "string")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
character_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "character")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
timestamp_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "timestamp")
} | /R/data_types.R | permissive | kashenfelter/sparklyr.nested | R | false | false | 4,038 | r | #' Spark Data Types
#'
#' These function support supplying a spark read schema. This is particularly useful
#' when reading data with nested arrays when you are not interested in several of
#' the nested fields.
#'
#' @param sc A \code{spark_connection}
#' @param struct_fields A vector or fields obtained from \code{struct_field()}
#' @importFrom sparklyr invoke_new
#' @importFrom sparklyr invoke
#' @export
struct_type <- function(sc, struct_fields) {
struct <- invoke_new(sc,
class="org.apache.spark.sql.types.StructType")
if (is.list(struct_fields)) {
for (i in 1:length(struct_fields))
struct <- invoke(struct, "add", struct_fields[[i]])
} else {
struct <- invoke(struct, "add", struct_fields)
}
return(struct)
}
#' @rdname struct_type
#' @param name A field name to use in the output struct type
#' @param data_type A (java) data type (e.g., \code{string_type()} or \code{double_type()})
#' @param nullable Logical. Describes whether field can be missing for some rows.
#' @importFrom sparklyr invoke_static
#' @importFrom sparklyr invoke_new
#' @export
struct_field <- function(sc, name, data_type, nullable=FALSE) {
metadata <- invoke_static(sc,
class="org.apache.spark.sql.types.Metadata",
method="empty")
invoke_new(sc,
class="org.apache.spark.sql.types.StructField",
name, data_type, nullable, metadata)
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_new
#' @export
array_type <- function(sc, data_type, nullable=FALSE) {
invoke_new(sc,
class="org.apache.spark.sql.types.ArrayType",
data_type, nullable)
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
binary_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "binary")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
boolean_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "boolean")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
byte_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "byte")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
date_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "date")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
double_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "double")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
float_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "float")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
integer_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "integer")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
numeric_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "numeric")
}
#' @rdname struct_type
#' @param key_type A (java) data type describing the map keys (usually \code{string_type()})
#' @param value_type A (java) data type describing the map values
#' @importFrom sparklyr invoke_new
#' @export
map_type <- function(sc, key_type, value_type, nullable=FALSE) {
invoke_new(sc,
class="org.apache.spark.sql.types.MapType",
key_type, value_type, nullable)
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
string_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "string")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
character_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "character")
}
#' @rdname struct_type
#' @importFrom sparklyr invoke_static
#' @export
timestamp_type <- function(sc) {
invoke_static(sc, "sparklyr.SQLUtils", "getSQLDataType", "timestamp")
} |
## `````````````````````````````````````````````
#### Read Me ####
## `````````````````````````````````````````````
# From the following line in csv
# starttime start_station_id tripduration
# 1 8/1/2016 00:01:22 302 288
# The start_time field is populated as "8"
# 8 302 288
# This file is debugging it, where reduced trip data.csv
# is a reduced 3 line input
## `````````````````````````````````````````````
# file path
# reduced trip data.csv was based on: "https://s3.amazonaws.com/tripdata/201409-citibike-tripdata.zip"
# open it in sublime and copy top few rows
c.csv.temp.file = "reduced trip data.csv"
c.csv.temp.file = file.path(c.home.dir,c.data.dir,c.csv.temp.file)
# from fn_downloadZip
df.1 = read_file(c.csv.temp.file) %>%
str_replace_all('"{2,3}', '"') %>%
read_csv(col_names = TRUE)
## --> start_time is being read in as a character
# # A tibble: 5 ร 15
# tripduration starttime stoptime start_station_id start_station_name start_station_latitude
# <int> <chr> <chr> <int> <chr> <dbl>
# 1 288 9/1/2015 00:00:00 9/1/2015 00:04:48 263 Elizabeth St & Hester St 40.71729
# which is a problem in some cases. Read it instead as a datetime col
## readr help
# SRC:
# http://r4ds.had.co.nz/import.html
# If these defaults donโt work for your data you can supply your own date time formats, built up of the pieces
# parse_datetime("9/1/2015 00:00:00", "%d/%m/%y %H:%M:%S")
parse_datetime("9/1/2015 00:00:00", "%d/%m/%Y %H:%M:%S")
parse_datetime("9/1/2015 02:03:04", "%d/%m/%Y %H:%M:%S")
df.1 = read_file(c.csv.temp.file) %>%
str_replace_all('"{2,3}', '"') %>%
read_csv(col_names = TRUE, col_types=cols(starttime=col_datetime("%d/%m/%Y %H:%M:%S")))
# this read_csv is the magical line that needs to go back to v 0 4
# 5. fix col names for df.1
names(df.1) = gsub(" ", "_", names(df.1))
# 6. remove all but first and last rows
# TODO: remove this, as we want complete data, not just two rows
df.1 <-
df.1 %>%
filter(row_number() %in% c(1, n()))
# 7. remove all but few cols
df.1 <-
df.1 %>%
select(starttime, start_station_id, tripduration)
# view the df written
# TODO: comment this
#cat("head(df.1)", head(df.1))
print.data.frame(head(df.1))
| /2.Code/reduce data v 2.R | no_license | patternproject/r.nycBikeData2 | R | false | false | 2,356 | r | ## `````````````````````````````````````````````
#### Read Me ####
## `````````````````````````````````````````````
# From the following line in csv
# starttime start_station_id tripduration
# 1 8/1/2016 00:01:22 302 288
# The start_time field is populated as "8"
# 8 302 288
# This file is debugging it, where reduced trip data.csv
# is a reduced 3 line input
## `````````````````````````````````````````````
# file path
# reduced trip data.csv was based on: "https://s3.amazonaws.com/tripdata/201409-citibike-tripdata.zip"
# open it in sublime and copy top few rows
c.csv.temp.file = "reduced trip data.csv"
c.csv.temp.file = file.path(c.home.dir,c.data.dir,c.csv.temp.file)
# from fn_downloadZip
df.1 = read_file(c.csv.temp.file) %>%
str_replace_all('"{2,3}', '"') %>%
read_csv(col_names = TRUE)
## --> start_time is being read in as a character
# # A tibble: 5 ร 15
# tripduration starttime stoptime start_station_id start_station_name start_station_latitude
# <int> <chr> <chr> <int> <chr> <dbl>
# 1 288 9/1/2015 00:00:00 9/1/2015 00:04:48 263 Elizabeth St & Hester St 40.71729
# which is a problem in some cases. Read it instead as a datetime col
## readr help
# SRC:
# http://r4ds.had.co.nz/import.html
# If these defaults donโt work for your data you can supply your own date time formats, built up of the pieces
# parse_datetime("9/1/2015 00:00:00", "%d/%m/%y %H:%M:%S")
parse_datetime("9/1/2015 00:00:00", "%d/%m/%Y %H:%M:%S")
parse_datetime("9/1/2015 02:03:04", "%d/%m/%Y %H:%M:%S")
df.1 = read_file(c.csv.temp.file) %>%
str_replace_all('"{2,3}', '"') %>%
read_csv(col_names = TRUE, col_types=cols(starttime=col_datetime("%d/%m/%Y %H:%M:%S")))
# this read_csv is the magical line that needs to go back to v 0 4
# 5. fix col names for df.1
names(df.1) = gsub(" ", "_", names(df.1))
# 6. remove all but first and last rows
# TODO: remove this, as we want complete data, not just two rows
df.1 <-
df.1 %>%
filter(row_number() %in% c(1, n()))
# 7. remove all but few cols
df.1 <-
df.1 %>%
select(starttime, start_station_id, tripduration)
# view the df written
# TODO: comment this
#cat("head(df.1)", head(df.1))
print.data.frame(head(df.1))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zzz.R
\name{get_base_url}
\alias{get_base_url}
\title{get AlphaVantage base url}
\usage{
get_base_url()
}
\description{
get AlphaVantage base url
}
| /man/get_base_url.Rd | no_license | schardtbc/avR | R | false | true | 226 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zzz.R
\name{get_base_url}
\alias{get_base_url}
\title{get AlphaVantage base url}
\usage{
get_base_url()
}
\description{
get AlphaVantage base url
}
|
# TODO: Automatic compilation of documents
#
# Author: Miguel Alvarez
################################################################################
library(knitr)
setwd("M:/WorkspaceEclipse/Guides")
## taxlist_firststeps ----------------------------------------------------------
## library(taxlist)
## File <- "taxlist_firststeps"
## knit(file.path("src", File, paste0(File, ".Rmd")))
## taxlist_syntax --------------------------------------------------------------
library(taxlist)
File <- "taxlist_syntax"
knit(file.path("src", File, paste0(File, ".Rmd")))
| /src/Compile.R | no_license | kamapu/Guides | R | false | false | 592 | r | # TODO: Automatic compilation of documents
#
# Author: Miguel Alvarez
################################################################################
library(knitr)
setwd("M:/WorkspaceEclipse/Guides")
## taxlist_firststeps ----------------------------------------------------------
## library(taxlist)
## File <- "taxlist_firststeps"
## knit(file.path("src", File, paste0(File, ".Rmd")))
## taxlist_syntax --------------------------------------------------------------
library(taxlist)
File <- "taxlist_syntax"
knit(file.path("src", File, paste0(File, ".Rmd")))
|
#
# This is a template for creating a leaflet chroropleth map shiny application.
# This app uses mock data that resembles survey answers to several major & minor categories
# This app is based on rstudio's 'superzip' example authored by Joe Cheng.
#
# Author: Jasper Ginn
#
library(leaflet)
library(RColorBrewer)
library(scales)
library(lattice)
library(dplyr)
library(reshape2)
library(ggplot2)
library(plotly)
# Server
function(input, output, session) {
# Put country shapedata in temporary var
shp2 <- shp
# Create mock data for testing purpose
source("functions/mapfunctions/mockdata.R")
mock.data.all <- mockData(countries = countries, ISO3.codes = ISO3)
# Mutate mock data for main categories
mock.data.main <- mock.data.all$data.major.cats %>%
melt(., id.vars = c("country", "ISO3.codes"))
# Pick the highest value for category
io <- mock.data.main %>%
group_by(country) %>%
filter(value == max(value)) %>%
ungroup() %>%
tbl_df() %>%
select(country, value, variable) %>%
mutate(value = round(value, digits=2)) %>%
unique()
# Remove where country == NA
#filter(!is.na(country))
# Join mockdata to shapefile
shp2@data <- shp@data %>%
left_join(., io, by=c("name"="country"))
# Popup
popup <- paste0("<strong>Country: </strong>",
shp2@data$name,
"<br><strong>Most important category: </strong>",
shp2@data$variable,
" (",
(shp2@data$value * 100), "%", ")")
#
# Leaflet map
#
output$map <- renderLeaflet({
# Coropleth map
leaflet(data = shp2) %>%
# Add legend
addLegend(colors = cus.pal, position = "bottomleft",
labels = major.cats, opacity = 1, title = "Major Categories") %>%
# Add polygons
addPolygons(fillColor = ~pal.major(variable),
fillOpacity = 0.6,
color = "#BDBDC3",
weight = 1,
popup = popup) %>%
# Set view on area between Europe & USA
setView(lng = -27.5097656, lat = 29.0801758, zoom = 3)
})
#
# Last update (this should come from a database)
#
output$lastUpdate <- renderText({
paste0("Last update: ", as.character(Sys.time()))
})
#
# Create charts for side panel
#
# Reactive function to subset data
countryMajorData <- reactive({
if(input$countries == "-") {
return(NULL)
} else {
return(
mock.data.main %>% filter(country == input$countries)
)
}
})
# Bar chart major categories
output$majorCats <- renderPlot({
d <- countryMajorData()
if(is.null(d)) return(NULL)
d <- d %>%
mutate(country = as.character(country))
# Plot
p <- ggplot(d, aes(x=variable, y=value, fill = variable)) +
geom_bar(stat = "identity") +
theme_cfi_scientific() +
scale_fill_manual(values = rep("#2b8cbe", length(d$variable))) +
scale_x_discrete(name="") +
scale_y_continuous(name="", labels = percent) +
theme(axis.text.x = element_text(angle = 60, hjust = 1, size = 12),
axis.text.y = element_text(size = 12),
legend.position = "none")
# To output
print(p)
})
# Create data for minor categories
countryMinorData <- reactive({
if(input$countries == "-") {
return(NULL)
} else if(input$majorCategories == "-") {
return(NULL)
} else {
r <- mock.data.all$data.minor.cats[[input$majorCategories]] %>%
filter(country == input$countries) %>%
# Melt
melt(., id.vars = c("country", "ISO3.codes"))
r$variable <- unname(sapply(as.character(r$variable), function(x) {
stringr::str_split(x, " \\(")[[1]][1]
} ))
r
}
})
# Bar chart minor categories
output$minorCats <- renderPlot({
d <- countryMinorData()
if(is.null(d)) return(NULL)
d <- d %>%
mutate(country = as.character(country))
# Plot
p <- ggplot(d, aes(x=variable, y=value, fill = variable)) +
geom_bar(stat = "identity") +
theme_cfi_scientific() +
scale_fill_manual(values = rep("#2b8cbe", length(d$variable))) +
scale_x_discrete(name="", labels = abbreviate) +
scale_y_continuous(name="", labels = percent) +
theme(axis.text.x = element_text(angle = 60, hjust = 1, size = 10),
axis.text.y = element_text(size = 12),
legend.position = "none")
# To output
print(p)
})
}
| /server.R | no_license | JasperHG90/shiny-choropleth-map-example | R | false | false | 4,501 | r |
#
# This is a template for creating a leaflet chroropleth map shiny application.
# This app uses mock data that resembles survey answers to several major & minor categories
# This app is based on rstudio's 'superzip' example authored by Joe Cheng.
#
# Author: Jasper Ginn
#
library(leaflet)
library(RColorBrewer)
library(scales)
library(lattice)
library(dplyr)
library(reshape2)
library(ggplot2)
library(plotly)
# Server
function(input, output, session) {
# Put country shapedata in temporary var
shp2 <- shp
# Create mock data for testing purpose
source("functions/mapfunctions/mockdata.R")
mock.data.all <- mockData(countries = countries, ISO3.codes = ISO3)
# Mutate mock data for main categories
mock.data.main <- mock.data.all$data.major.cats %>%
melt(., id.vars = c("country", "ISO3.codes"))
# Pick the highest value for category
io <- mock.data.main %>%
group_by(country) %>%
filter(value == max(value)) %>%
ungroup() %>%
tbl_df() %>%
select(country, value, variable) %>%
mutate(value = round(value, digits=2)) %>%
unique()
# Remove where country == NA
#filter(!is.na(country))
# Join mockdata to shapefile
shp2@data <- shp@data %>%
left_join(., io, by=c("name"="country"))
# Popup
popup <- paste0("<strong>Country: </strong>",
shp2@data$name,
"<br><strong>Most important category: </strong>",
shp2@data$variable,
" (",
(shp2@data$value * 100), "%", ")")
#
# Leaflet map
#
output$map <- renderLeaflet({
# Coropleth map
leaflet(data = shp2) %>%
# Add legend
addLegend(colors = cus.pal, position = "bottomleft",
labels = major.cats, opacity = 1, title = "Major Categories") %>%
# Add polygons
addPolygons(fillColor = ~pal.major(variable),
fillOpacity = 0.6,
color = "#BDBDC3",
weight = 1,
popup = popup) %>%
# Set view on area between Europe & USA
setView(lng = -27.5097656, lat = 29.0801758, zoom = 3)
})
#
# Last update (this should come from a database)
#
output$lastUpdate <- renderText({
paste0("Last update: ", as.character(Sys.time()))
})
#
# Create charts for side panel
#
# Reactive function to subset data
countryMajorData <- reactive({
if(input$countries == "-") {
return(NULL)
} else {
return(
mock.data.main %>% filter(country == input$countries)
)
}
})
# Bar chart major categories
output$majorCats <- renderPlot({
d <- countryMajorData()
if(is.null(d)) return(NULL)
d <- d %>%
mutate(country = as.character(country))
# Plot
p <- ggplot(d, aes(x=variable, y=value, fill = variable)) +
geom_bar(stat = "identity") +
theme_cfi_scientific() +
scale_fill_manual(values = rep("#2b8cbe", length(d$variable))) +
scale_x_discrete(name="") +
scale_y_continuous(name="", labels = percent) +
theme(axis.text.x = element_text(angle = 60, hjust = 1, size = 12),
axis.text.y = element_text(size = 12),
legend.position = "none")
# To output
print(p)
})
# Create data for minor categories
countryMinorData <- reactive({
if(input$countries == "-") {
return(NULL)
} else if(input$majorCategories == "-") {
return(NULL)
} else {
r <- mock.data.all$data.minor.cats[[input$majorCategories]] %>%
filter(country == input$countries) %>%
# Melt
melt(., id.vars = c("country", "ISO3.codes"))
r$variable <- unname(sapply(as.character(r$variable), function(x) {
stringr::str_split(x, " \\(")[[1]][1]
} ))
r
}
})
# Bar chart minor categories
output$minorCats <- renderPlot({
d <- countryMinorData()
if(is.null(d)) return(NULL)
d <- d %>%
mutate(country = as.character(country))
# Plot
p <- ggplot(d, aes(x=variable, y=value, fill = variable)) +
geom_bar(stat = "identity") +
theme_cfi_scientific() +
scale_fill_manual(values = rep("#2b8cbe", length(d$variable))) +
scale_x_discrete(name="", labels = abbreviate) +
scale_y_continuous(name="", labels = percent) +
theme(axis.text.x = element_text(angle = 60, hjust = 1, size = 10),
axis.text.y = element_text(size = 12),
legend.position = "none")
# To output
print(p)
})
}
|
## These functions create a matrix object given dimensions by the user
## For the given object cacheSolve calculates the inverse of the object
## and caches the result for reference. Operators on the object include
## "get," "set," "getinverse" and "setinverse"
## takes provided matrix and creates an object with functions allowing the
## user to cache the object and its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## calculates the inverse on the matrix object and pushes the result to the
## object
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | kpeterse/ProgrammingAssignment2 | R | false | false | 1,116 | r | ## These functions create a matrix object given dimensions by the user
## For the given object cacheSolve calculates the inverse of the object
## and caches the result for reference. Operators on the object include
## "get," "set," "getinverse" and "setinverse"
## takes provided matrix and creates an object with functions allowing the
## user to cache the object and its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## calculates the inverse on the matrix object and pushes the result to the
## object
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
## preparations ####
LID.1548 = read.table("/Users/xhu/Documents/1548-links.txt")[,2]
library(solr);library(pracma);library(permute);library(data.table);library(caret);library(dummies);library(BBmisc)
url = 'http://localhost:8987/solr/LSH/afts'
# searching with LID, 15 of them cannot be found. The rest are all cases.
L= length(LID.1548); DBID.1548 = rep(NA,L)
for(i in 1:L){
Q1 = paste("LID:",shQuote(LID.1548[i]))
search.i = solr_search(q=Q1, base=url, fl='*,DBID', verbose=FALSE)$DBID
#DBID.1548[i] = ifelse(, NA, search.i)
#if(!isempty(search.i))
DBID.1548[i]=search.i
}
summary(DBID.1548)
query.need = " 244787 1442699 1958389 41817 43615 34584 53845 54060 1185590 2332739"
query = paste('TYPE:"{http://www.alfresco.com/model/actsearch2/salesforce/1.0}Case" AND -DBID:(',query.need,")")
result.search = solr_all(q= query, base=url, fl='*,[cached]', raw=TRUE, verbose=FALSE, rows=99999)
cachedInfo = solr_parse(result.search,'list')$response$docs
rm(result.search);gc()
### extracting features ####
#################################################################
rm(NER.LIST,DBID)
NER.LIST = vector('list',93434);DBID=rep(0,length(cachedInfo))
for(i in 1:length(cachedInfo)){
cachedInfo.i = cachedInfo[[i]]
cached.i.ner = cachedInfo.i$ner # several items in it
for (j in 1:length(cached.i.ner)) {
# originally 93434 cases, after removing cardinal, date and time, 13 cases have no entities, drop them.
if(strsplit(cached.i.ner[j],split = ":")[[1]][1] %in% c('CARDINAL','DATE','TIME')) cached.i.ner[j] <- NA
}
cached.i.ner = cached.i.ner[!is.na(cached.i.ner)]
NER.LIST[[i]]=cached.i.ner
DBID[i] = cachedInfo.i$DBID}
rm(cachedInfo);gc()
#NER.LIST.1 = NER.LIST[which(DBID %in% DBID.1548)]
NERwithDDBID = data.frame(DBID = rep(DBID,sapply(NER.LIST,function(x) length(x))),
entity = unlist(NER.LIST))
library(dplyr)
temp = NERwithDDBID %>% group_by(entity) %>% mutate(count.entity = n())
temp = temp %>% group_by(entity,DBID) %>% mutate(count.entity.dbid= n())
temp = temp %>% group_by(DBID) %>% mutate(count.dbid= n())
dbid.count = unique(temp[,c(1,5)])
NER.LIST = NER.LIST[sapply(NER.LIST, function(x) !isempty(x))]
# cases with no entity
dbid.no.entity.idx = which(sapply(NER.LIST, function(x) isempty(x))) # 13 is empty
dbid.notempty.entity = DBID[-dbid.no.entity.idx ]
entropy.LIST = sapply(NER.LIST, function(x) rep(0,length(x)))
ner.LIST = sapply(NER.LIST, function(x) character(length(x)))
for (i in 1: length(NER.LIST)){
NER_i = NER.LIST[[i]]
tf.table = data.frame(table(NER_i))
Nij = length(NER_i)
V_f_ij = tf.table$Freq / (Nij ^(tf.table$Freq))
entropy.LIST[[i]] = V_f_ij / sum(V_f_ij)
ner.LIST[[i]] = tf.table$NER_i
}
ner.term.frequency= sapply(NER.LIST, function(x) rep(0,length(x)))
for (i in 1: length(NER.LIST)){
NER_i = NER.LIST[[i]]
tf.table = data.frame(table(NER_i))
ner.term.frequency[[i]] = tf.table$Freq
}
ner.LIST = sapply(ner.LIST, function(x) as.character(x))
entities = unique(temp$entity)
entity = rep(0,length(entities))
for(j in 1:length(entities)){
w_j = entities[j]
for (i in 1: length(ner.LIST)){
if(w_j %in% ner.LIST[[i]]){
entity[j] = entity[j] - entropy.LIST[[i]][which(ner.LIST[[i]] == w_j)]/log(entropy.LIST[[i]][which(ner.LIST[[i]]==w_j)])
}
}
}
max(entity)*0.5
entity[which(entity>10)] # count = 98
usefulEntity = as.character(entities[which(entity > 10)]) # top 98 entities
NER.DF.NEW
newdf = data.frame(entity = entities,entropy = entity,count = unique(temp[,2:3]))
NERwithDDBID$count = temp$count
nrow(NERwithDDBID[which(NERwithDDBID$count<500),])
temp = NERwithDDBID[,2:3]
temp = unique(temp)
NER.LIST.1.unlist = unlist(NER.LIST.1)
length(unique(NER.LIST.1.unlist))
df = data.frame(docID = rep(1:length(NER.LIST.1), sapply(NER.LIST, function(x) length(x))), entityName = unlist(NER.LIST))
library(dplyr)
temp = NERwithDDBID %>% group_by(entity) %>% mutate(count = n())
NERwithDDBID$count = temp$count
df = df[which(df$count > 500),]
df$entityName <- as.factor(as.character(df$entityName))
df.2 = dummy(df$entityName)
df.2 = cbind(df$docID,df.2)
temp = df %>% group_by(entityName) %>% mutate(count = n())
df$count = temp$count
rownames(df.2) = df.2$docID
NERdfwithref = data.frame(NERref = 1:nrow(NER.DF.NEW.1),NER = NER.DF.NEW.1$NER,count =NER.DF.NEW.1$COUNT )
entitycounttable = data.frame(table(ENTITIES))
entitycounttable = entitycounttable[which(entitycounttable$Freq > 100),]
for(i in 1:length(NER.LIST)){
cached.i.ner = NER.LIST[[i]] # several items in it
for (j in cached.i.ner) {
if(!j %in% entitycounttable$ENTITIES) cached.i.ner[j] <- NA
}
cached.i.ner = cached.i.ner[!is.na(cached.i.ner)]
NER.LIST[[i]]=cached.i.ner
}
df = data.frame(docID = rep(1:length(NER.LIST), sapply(NER.LIST, function(x) length(x))), entityName = unlist(NER.LIST))
df.2 = dummy(df$entityName)
NER.LIST.1
rm(NER.LIST.REP)
rownames(NER.LIST.REP)
| /features/entity_selection.R | no_license | Ginny15/pulearning | R | false | false | 4,950 | r | ## preparations ####
LID.1548 = read.table("/Users/xhu/Documents/1548-links.txt")[,2]
library(solr);library(pracma);library(permute);library(data.table);library(caret);library(dummies);library(BBmisc)
url = 'http://localhost:8987/solr/LSH/afts'
# searching with LID, 15 of them cannot be found. The rest are all cases.
L= length(LID.1548); DBID.1548 = rep(NA,L)
for(i in 1:L){
Q1 = paste("LID:",shQuote(LID.1548[i]))
search.i = solr_search(q=Q1, base=url, fl='*,DBID', verbose=FALSE)$DBID
#DBID.1548[i] = ifelse(, NA, search.i)
#if(!isempty(search.i))
DBID.1548[i]=search.i
}
summary(DBID.1548)
query.need = " 244787 1442699 1958389 41817 43615 34584 53845 54060 1185590 2332739"
query = paste('TYPE:"{http://www.alfresco.com/model/actsearch2/salesforce/1.0}Case" AND -DBID:(',query.need,")")
result.search = solr_all(q= query, base=url, fl='*,[cached]', raw=TRUE, verbose=FALSE, rows=99999)
cachedInfo = solr_parse(result.search,'list')$response$docs
rm(result.search);gc()
### extracting features ####
#################################################################
rm(NER.LIST,DBID)
NER.LIST = vector('list',93434);DBID=rep(0,length(cachedInfo))
for(i in 1:length(cachedInfo)){
cachedInfo.i = cachedInfo[[i]]
cached.i.ner = cachedInfo.i$ner # several items in it
for (j in 1:length(cached.i.ner)) {
# originally 93434 cases, after removing cardinal, date and time, 13 cases have no entities, drop them.
if(strsplit(cached.i.ner[j],split = ":")[[1]][1] %in% c('CARDINAL','DATE','TIME')) cached.i.ner[j] <- NA
}
cached.i.ner = cached.i.ner[!is.na(cached.i.ner)]
NER.LIST[[i]]=cached.i.ner
DBID[i] = cachedInfo.i$DBID}
rm(cachedInfo);gc()
#NER.LIST.1 = NER.LIST[which(DBID %in% DBID.1548)]
NERwithDDBID = data.frame(DBID = rep(DBID,sapply(NER.LIST,function(x) length(x))),
entity = unlist(NER.LIST))
library(dplyr)
temp = NERwithDDBID %>% group_by(entity) %>% mutate(count.entity = n())
temp = temp %>% group_by(entity,DBID) %>% mutate(count.entity.dbid= n())
temp = temp %>% group_by(DBID) %>% mutate(count.dbid= n())
dbid.count = unique(temp[,c(1,5)])
NER.LIST = NER.LIST[sapply(NER.LIST, function(x) !isempty(x))]
# cases with no entity
dbid.no.entity.idx = which(sapply(NER.LIST, function(x) isempty(x))) # 13 is empty
dbid.notempty.entity = DBID[-dbid.no.entity.idx ]
entropy.LIST = sapply(NER.LIST, function(x) rep(0,length(x)))
ner.LIST = sapply(NER.LIST, function(x) character(length(x)))
for (i in 1: length(NER.LIST)){
NER_i = NER.LIST[[i]]
tf.table = data.frame(table(NER_i))
Nij = length(NER_i)
V_f_ij = tf.table$Freq / (Nij ^(tf.table$Freq))
entropy.LIST[[i]] = V_f_ij / sum(V_f_ij)
ner.LIST[[i]] = tf.table$NER_i
}
ner.term.frequency= sapply(NER.LIST, function(x) rep(0,length(x)))
for (i in 1: length(NER.LIST)){
NER_i = NER.LIST[[i]]
tf.table = data.frame(table(NER_i))
ner.term.frequency[[i]] = tf.table$Freq
}
ner.LIST = sapply(ner.LIST, function(x) as.character(x))
entities = unique(temp$entity)
entity = rep(0,length(entities))
for(j in 1:length(entities)){
w_j = entities[j]
for (i in 1: length(ner.LIST)){
if(w_j %in% ner.LIST[[i]]){
entity[j] = entity[j] - entropy.LIST[[i]][which(ner.LIST[[i]] == w_j)]/log(entropy.LIST[[i]][which(ner.LIST[[i]]==w_j)])
}
}
}
max(entity)*0.5
entity[which(entity>10)] # count = 98
usefulEntity = as.character(entities[which(entity > 10)]) # top 98 entities
NER.DF.NEW
newdf = data.frame(entity = entities,entropy = entity,count = unique(temp[,2:3]))
NERwithDDBID$count = temp$count
nrow(NERwithDDBID[which(NERwithDDBID$count<500),])
temp = NERwithDDBID[,2:3]
temp = unique(temp)
NER.LIST.1.unlist = unlist(NER.LIST.1)
length(unique(NER.LIST.1.unlist))
df = data.frame(docID = rep(1:length(NER.LIST.1), sapply(NER.LIST, function(x) length(x))), entityName = unlist(NER.LIST))
library(dplyr)
temp = NERwithDDBID %>% group_by(entity) %>% mutate(count = n())
NERwithDDBID$count = temp$count
df = df[which(df$count > 500),]
df$entityName <- as.factor(as.character(df$entityName))
df.2 = dummy(df$entityName)
df.2 = cbind(df$docID,df.2)
temp = df %>% group_by(entityName) %>% mutate(count = n())
df$count = temp$count
rownames(df.2) = df.2$docID
NERdfwithref = data.frame(NERref = 1:nrow(NER.DF.NEW.1),NER = NER.DF.NEW.1$NER,count =NER.DF.NEW.1$COUNT )
entitycounttable = data.frame(table(ENTITIES))
entitycounttable = entitycounttable[which(entitycounttable$Freq > 100),]
for(i in 1:length(NER.LIST)){
cached.i.ner = NER.LIST[[i]] # several items in it
for (j in cached.i.ner) {
if(!j %in% entitycounttable$ENTITIES) cached.i.ner[j] <- NA
}
cached.i.ner = cached.i.ner[!is.na(cached.i.ner)]
NER.LIST[[i]]=cached.i.ner
}
df = data.frame(docID = rep(1:length(NER.LIST), sapply(NER.LIST, function(x) length(x))), entityName = unlist(NER.LIST))
df.2 = dummy(df$entityName)
NER.LIST.1
rm(NER.LIST.REP)
rownames(NER.LIST.REP)
|
remove(list = ls())
#
# library(alluvial)
# library(ggalluvial)
# # library(vaersvax)
# data(vaccinations)
# levels(vaccinations$response) <- rev(levels(vaccinations$response))
# ggplot(vaccinations,
# aes(x = survey, stratum = response, alluvium = subject,
# y = freq,
# fill = response, label = response)) +
# scale_x_discrete(expand = c(.1, .1)) +
# geom_flow() +
# geom_stratum(alpha = .5) +
# geom_text(stat = "stratum", size = 3) +
# theme(legend.position = "none") +
# ggtitle("vaccination survey responses at three points in time")
# Libraries
library(tidyverse)
library(viridis)
library(patchwork)
library(hrbrthemes)
library(circlize)
# Load dataset from github
data <- read.table("https://raw.githubusercontent.com/holtzy/data_to_viz/master/Example_dataset/13_AdjacencyDirectedWeighted.csv", header=TRUE)
# Package
library(networkD3)
# I need a long format
data_long <- data %>%
rownames_to_column %>%
gather(key = 'key', value = 'value', -rowname) %>%
filter(value > 0)
colnames(data_long) <- c("source", "target", "value")
data_long$target <- paste(data_long$target, " ", sep="")
# From these flows we need to create a node data frame: it lists every entities involved in the flow
nodes <- data.frame(name=c(as.character(data_long$source), as.character(data_long$target)) %>% unique())
# With networkD3, connection must be provided using id, not using real name like in the links dataframe.. So we need to reformat it.
data_long$IDsource=match(data_long$source, nodes$name)-1
data_long$IDtarget=match(data_long$target, nodes$name)-1
# prepare colour scale
ColourScal ='d3.scaleOrdinal() .range(["#FDE725FF","#B4DE2CFF","#6DCD59FF","#35B779FF","#1F9E89FF","#26828EFF","#31688EFF","#3E4A89FF","#482878FF","#440154FF"])'
# Make the Network
sankeyNetwork(Links = data_long, Nodes = nodes,
Source = "IDsource", Target = "IDtarget",
Value = "value", NodeID = "name",
sinksRight=FALSE, colourScale=ColourScal, nodeWidth=40, fontSize=13, nodePadding=20)
# Load package
library(networkD3)
# Load energy projection data
URL <- "https://cdn.rawgit.com/christophergandrud/networkD3/master/JSONdata/energy.json"
Energy <- jsonlite::fromJSON(URL)
# Now we have 2 data frames: a 'links' data frame with 3 columns (from, to, value), and a 'nodes' data frame that gives the name of each node.
# Thus we can plot it
sankeyNetwork(Links = Energy$links, Nodes = Energy$nodes, Source = "source",
Target = "target", Value = "value", NodeID = "name",
units = "TWh", fontSize = 12, nodeWidth = 30)
link <- Energy$links
node <- Energy$nodes
# 2 ------ INCIDENCE MATRIX
# Create an incidence matrix. Usually the flow goes from the row names to the column names.
# Remember that our connection are directed since we are working with a flow.
set.seed(1)
data=matrix(sample( seq(0,40), 49, replace=T ), 7, 7)
data[data < 35] = 0
colnames(data) = rownames(data) = c("group_A", "group_B", "group_C", "group_D", "group_E", "group_F", "group_G")
data
getwd()
write.csv(data, 'data.csv')
data <- read.csv('./data.csv') %>% as.data.frame()
rname <- c('forest1', 'water1', 'bareland1', 'forest2', 'water2', 'bareland2')
row.names(data) <-rname
data <- data[,-1]
# Transform it to connection data frame with tidyr from the tidyverse:
links = data %>%
as.data.frame() %>%
rownames_to_column(var="source") %>%
gather(key="target", value="value", -1) %>%
filter(value != 0)
# From these flows we need to create a node data frame: it lists every entities involved in the flow
nodes=data.frame(name=c(as.character(links$source), as.character(links$target)) %>% unique())
# With networkD3, connection must be provided using id, not using real name like in the links dataframe.. So we need to reformat it.
links$IDsource=match(links$source, nodes$name)-1
links$IDtarget=match(links$target, nodes$name)-1
# Make the Network
sankeyNetwork(Links = links, Nodes = nodes,
Source = "IDsource", Target = "IDtarget",
Value = "value", NodeID = "name",
sinksRight=FALSE)
| /_Sankey_Network/sankeyNetwork_v0.R | permissive | Yingjie4Science/R_code_cheatsheet | R | false | false | 4,195 | r |
remove(list = ls())
#
# library(alluvial)
# library(ggalluvial)
# # library(vaersvax)
# data(vaccinations)
# levels(vaccinations$response) <- rev(levels(vaccinations$response))
# ggplot(vaccinations,
# aes(x = survey, stratum = response, alluvium = subject,
# y = freq,
# fill = response, label = response)) +
# scale_x_discrete(expand = c(.1, .1)) +
# geom_flow() +
# geom_stratum(alpha = .5) +
# geom_text(stat = "stratum", size = 3) +
# theme(legend.position = "none") +
# ggtitle("vaccination survey responses at three points in time")
# Libraries
library(tidyverse)
library(viridis)
library(patchwork)
library(hrbrthemes)
library(circlize)
# Load dataset from github
data <- read.table("https://raw.githubusercontent.com/holtzy/data_to_viz/master/Example_dataset/13_AdjacencyDirectedWeighted.csv", header=TRUE)
# Package
library(networkD3)
# I need a long format
data_long <- data %>%
rownames_to_column %>%
gather(key = 'key', value = 'value', -rowname) %>%
filter(value > 0)
colnames(data_long) <- c("source", "target", "value")
data_long$target <- paste(data_long$target, " ", sep="")
# From these flows we need to create a node data frame: it lists every entities involved in the flow
nodes <- data.frame(name=c(as.character(data_long$source), as.character(data_long$target)) %>% unique())
# With networkD3, connection must be provided using id, not using real name like in the links dataframe.. So we need to reformat it.
data_long$IDsource=match(data_long$source, nodes$name)-1
data_long$IDtarget=match(data_long$target, nodes$name)-1
# prepare colour scale
ColourScal ='d3.scaleOrdinal() .range(["#FDE725FF","#B4DE2CFF","#6DCD59FF","#35B779FF","#1F9E89FF","#26828EFF","#31688EFF","#3E4A89FF","#482878FF","#440154FF"])'
# Make the Network
sankeyNetwork(Links = data_long, Nodes = nodes,
Source = "IDsource", Target = "IDtarget",
Value = "value", NodeID = "name",
sinksRight=FALSE, colourScale=ColourScal, nodeWidth=40, fontSize=13, nodePadding=20)
# Load package
library(networkD3)
# Load energy projection data
URL <- "https://cdn.rawgit.com/christophergandrud/networkD3/master/JSONdata/energy.json"
Energy <- jsonlite::fromJSON(URL)
# Now we have 2 data frames: a 'links' data frame with 3 columns (from, to, value), and a 'nodes' data frame that gives the name of each node.
# Thus we can plot it
sankeyNetwork(Links = Energy$links, Nodes = Energy$nodes, Source = "source",
Target = "target", Value = "value", NodeID = "name",
units = "TWh", fontSize = 12, nodeWidth = 30)
link <- Energy$links
node <- Energy$nodes
# 2 ------ INCIDENCE MATRIX
# Create an incidence matrix. Usually the flow goes from the row names to the column names.
# Remember that our connection are directed since we are working with a flow.
set.seed(1)
data=matrix(sample( seq(0,40), 49, replace=T ), 7, 7)
data[data < 35] = 0
colnames(data) = rownames(data) = c("group_A", "group_B", "group_C", "group_D", "group_E", "group_F", "group_G")
data
getwd()
write.csv(data, 'data.csv')
data <- read.csv('./data.csv') %>% as.data.frame()
rname <- c('forest1', 'water1', 'bareland1', 'forest2', 'water2', 'bareland2')
row.names(data) <-rname
data <- data[,-1]
# Transform it to connection data frame with tidyr from the tidyverse:
links = data %>%
as.data.frame() %>%
rownames_to_column(var="source") %>%
gather(key="target", value="value", -1) %>%
filter(value != 0)
# From these flows we need to create a node data frame: it lists every entities involved in the flow
nodes=data.frame(name=c(as.character(links$source), as.character(links$target)) %>% unique())
# With networkD3, connection must be provided using id, not using real name like in the links dataframe.. So we need to reformat it.
links$IDsource=match(links$source, nodes$name)-1
links$IDtarget=match(links$target, nodes$name)-1
# Make the Network
sankeyNetwork(Links = links, Nodes = nodes,
Source = "IDsource", Target = "IDtarget",
Value = "value", NodeID = "name",
sinksRight=FALSE)
|
sink("Variants_counts_EUR_cyp.txt")
print('CYP')
data <- read.table('EUR_anc.frq', header=TRUE, row.names=NULL)
freq = c(0.01, 0.05, 0.25)
for(f in freq){
MIF <- data[,6]
MIF <- subset(MIF, MIF >0) #remove 0
hist<- hist(MIF, plot =FALSE, breaks= c(0,f,1))
#colnames
colnames <- c()
for(x in 1:(length(hist$breaks)-1)){
interval<- paste(hist$breaks[x],'-',hist$breaks[x+1], sep='')
colnames <- c(colnames, interval)
}
#print result
print(colnames)
print(hist$counts)
}
#freq intermediaire
MIF <- data[,6]
MIF <- subset(MIF, MIF >0) #remove 0
hist<- hist(MIF, plot =FALSE, breaks= c(0,0.05,0.25,1))
#colnames
colnames <- c()
for(x in 1:(length(hist$breaks)-1)){
interval<- paste(hist$breaks[x],'-',hist$breaks[x+1], sep='')
colnames <- c(colnames, interval)
}
#print result
print(colnames)
print(hist$counts)
print("0.05-0.25 0-0.05, 0.25-1")
a = c(hist$counts[2], hist$counts[3]+hist$counts[1])
print(a)
#close file
sink()
| /results/2017.05.30/SFS_SAMESIZE/get_histcounts_EUR.R | no_license | arsthilaire/PGX_DENOVO | R | false | false | 971 | r | sink("Variants_counts_EUR_cyp.txt")
print('CYP')
data <- read.table('EUR_anc.frq', header=TRUE, row.names=NULL)
freq = c(0.01, 0.05, 0.25)
for(f in freq){
MIF <- data[,6]
MIF <- subset(MIF, MIF >0) #remove 0
hist<- hist(MIF, plot =FALSE, breaks= c(0,f,1))
#colnames
colnames <- c()
for(x in 1:(length(hist$breaks)-1)){
interval<- paste(hist$breaks[x],'-',hist$breaks[x+1], sep='')
colnames <- c(colnames, interval)
}
#print result
print(colnames)
print(hist$counts)
}
#freq intermediaire
MIF <- data[,6]
MIF <- subset(MIF, MIF >0) #remove 0
hist<- hist(MIF, plot =FALSE, breaks= c(0,0.05,0.25,1))
#colnames
colnames <- c()
for(x in 1:(length(hist$breaks)-1)){
interval<- paste(hist$breaks[x],'-',hist$breaks[x+1], sep='')
colnames <- c(colnames, interval)
}
#print result
print(colnames)
print(hist$counts)
print("0.05-0.25 0-0.05, 0.25-1")
a = c(hist$counts[2], hist$counts[3]+hist$counts[1])
print(a)
#close file
sink()
|
# OpenSilex API
#
# No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
#
# OpenAPI spec version: 1.0.0-rc+2
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' DataCSVValidationDTO Class
#'
#' @field errors
#' @field dataErrors
#' @field sizeMax
#' @field validation_token
#' @field nb_lines_imported
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
DataCSVValidationDTO <- R6::R6Class(
'DataCSVValidationDTO',
public = list(
`errors` = NULL,
`dataErrors` = NULL,
`sizeMax` = NULL,
`validation_token` = NULL,
`nb_lines_imported` = NULL,
initialize = function(`errors`, `dataErrors`, `sizeMax`, `validation_token`, `nb_lines_imported`){
if (!missing(`errors`)) {
stopifnot(R6::is.R6(`errors`))
self$`errors` <- `errors`
}
if (!missing(`dataErrors`)) {
stopifnot(R6::is.R6(`dataErrors`))
self$`dataErrors` <- `dataErrors`
}
if (!missing(`sizeMax`)) {
stopifnot(is.numeric(`sizeMax`), length(`sizeMax`) == 1)
self$`sizeMax` <- `sizeMax`
}
if (!missing(`validation_token`)) {
stopifnot(is.character(`validation_token`), length(`validation_token`) == 1)
self$`validation_token` <- `validation_token`
}
if (!missing(`nb_lines_imported`)) {
stopifnot(is.numeric(`nb_lines_imported`), length(`nb_lines_imported`) == 1)
self$`nb_lines_imported` <- `nb_lines_imported`
}
},
toJSON = function() {
DataCSVValidationDTOObject <- list()
if (!is.null(self$`errors`)) {
DataCSVValidationDTOObject[['errors']] <- self$`errors`$toJSON()
}
if (!is.null(self$`dataErrors`)) {
DataCSVValidationDTOObject[['dataErrors']] <- self$`dataErrors`$toJSON()
}
if (!is.null(self$`sizeMax`)) {
DataCSVValidationDTOObject[['sizeMax']] <- self$`sizeMax`
}
if (!is.null(self$`validation_token`)) {
DataCSVValidationDTOObject[['validation_token']] <- self$`validation_token`
}
if (!is.null(self$`nb_lines_imported`)) {
DataCSVValidationDTOObject[['nb_lines_imported']] <- self$`nb_lines_imported`
}
DataCSVValidationDTOObject
},
fromJSON = function(DataCSVValidationDTOJson) {
DataCSVValidationDTOObject <- jsonlite::fromJSON(DataCSVValidationDTOJson)
if (!is.null(DataCSVValidationDTOObject$`errors`)) {
errorsObject <- CSVValidationModel$new()
errorsObject$fromJSON(jsonlite::toJSON(DataCSVValidationDTOObject$errors, auto_unbox = TRUE, null = "null"))
self$`errors` <- errorsObject
}
if (!is.null(DataCSVValidationDTOObject$`dataErrors`)) {
dataErrorsObject <- DataCSVValidationModel$new()
dataErrorsObject$fromJSON(jsonlite::toJSON(DataCSVValidationDTOObject$dataErrors, auto_unbox = TRUE, null = "null"))
self$`dataErrors` <- dataErrorsObject
}
if (!is.null(DataCSVValidationDTOObject$`sizeMax`)) {
self$`sizeMax` <- DataCSVValidationDTOObject$`sizeMax`
}
if (!is.null(DataCSVValidationDTOObject$`validation_token`)) {
self$`validation_token` <- DataCSVValidationDTOObject$`validation_token`
}
if (!is.null(DataCSVValidationDTOObject$`nb_lines_imported`)) {
self$`nb_lines_imported` <- DataCSVValidationDTOObject$`nb_lines_imported`
}
},
fromJSONObject = function(DataCSVValidationDTOObject) {
if (!is.null(DataCSVValidationDTOObject$`errors`)) {
errorsObject <- CSVValidationModel$new()
errorsObject$fromJSON(jsonlite::toJSON(DataCSVValidationDTOObject$errors, auto_unbox = TRUE, null = "null"))
self$`errors` <- errorsObject
}
if (!is.null(DataCSVValidationDTOObject$`dataErrors`)) {
dataErrorsObject <- DataCSVValidationModel$new()
dataErrorsObject$fromJSON(jsonlite::toJSON(DataCSVValidationDTOObject$dataErrors, auto_unbox = TRUE, null = "null"))
self$`dataErrors` <- dataErrorsObject
}
if (!is.null(DataCSVValidationDTOObject$`sizeMax`)) {
self$`sizeMax` <- DataCSVValidationDTOObject$`sizeMax`
}
if (!is.null(DataCSVValidationDTOObject$`validation_token`)) {
self$`validation_token` <- DataCSVValidationDTOObject$`validation_token`
}
if (!is.null(DataCSVValidationDTOObject$`nb_lines_imported`)) {
self$`nb_lines_imported` <- DataCSVValidationDTOObject$`nb_lines_imported`
}
},
toJSONString = function() {
sprintf(
'{
"errors": %s,
"dataErrors": %s,
"sizeMax": %s,
"validation_token": %s,
"nb_lines_imported": %s
}',
jsonlite::toJSON(self$`errors`$toJSON(),auto_unbox=TRUE, null = "null"),
jsonlite::toJSON(self$`dataErrors`$toJSON(),auto_unbox=TRUE, null = "null"),
ifelse(is.null(self$`sizeMax`), "null",as.numeric(jsonlite::toJSON(self$`sizeMax`,auto_unbox=TRUE, null = "null"))),
ifelse(is.null(self$`validation_token`), "null",jsonlite::toJSON(self$`validation_token`,auto_unbox=TRUE, null = "null")),
ifelse(is.null(self$`nb_lines_imported`), "null",as.numeric(jsonlite::toJSON(self$`nb_lines_imported`,auto_unbox=TRUE, null = "null")))
)
},
fromJSONString = function(DataCSVValidationDTOJson) {
DataCSVValidationDTOObject <- jsonlite::fromJSON(DataCSVValidationDTOJson)
CSVValidationModelObject <- CSVValidationModel$new()
self$`errors` <- CSVValidationModelObject$fromJSON(jsonlite::toJSON(DataCSVValidationDTOObject$errors, auto_unbox = TRUE))
DataCSVValidationModelObject <- DataCSVValidationModel$new()
self$`dataErrors` <- DataCSVValidationModelObject$fromJSON(jsonlite::toJSON(DataCSVValidationDTOObject$dataErrors, auto_unbox = TRUE))
self$`sizeMax` <- DataCSVValidationDTOObject$`sizeMax`
self$`validation_token` <- DataCSVValidationDTOObject$`validation_token`
self$`nb_lines_imported` <- DataCSVValidationDTOObject$`nb_lines_imported`
}
)
)
| /R/DataCSVValidationDTO.r | no_license | OpenSILEX/opensilexClientToolsR | R | false | false | 6,114 | r | # OpenSilex API
#
# No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
#
# OpenAPI spec version: 1.0.0-rc+2
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' DataCSVValidationDTO Class
#'
#' @field errors
#' @field dataErrors
#' @field sizeMax
#' @field validation_token
#' @field nb_lines_imported
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
DataCSVValidationDTO <- R6::R6Class(
'DataCSVValidationDTO',
public = list(
`errors` = NULL,
`dataErrors` = NULL,
`sizeMax` = NULL,
`validation_token` = NULL,
`nb_lines_imported` = NULL,
initialize = function(`errors`, `dataErrors`, `sizeMax`, `validation_token`, `nb_lines_imported`){
if (!missing(`errors`)) {
stopifnot(R6::is.R6(`errors`))
self$`errors` <- `errors`
}
if (!missing(`dataErrors`)) {
stopifnot(R6::is.R6(`dataErrors`))
self$`dataErrors` <- `dataErrors`
}
if (!missing(`sizeMax`)) {
stopifnot(is.numeric(`sizeMax`), length(`sizeMax`) == 1)
self$`sizeMax` <- `sizeMax`
}
if (!missing(`validation_token`)) {
stopifnot(is.character(`validation_token`), length(`validation_token`) == 1)
self$`validation_token` <- `validation_token`
}
if (!missing(`nb_lines_imported`)) {
stopifnot(is.numeric(`nb_lines_imported`), length(`nb_lines_imported`) == 1)
self$`nb_lines_imported` <- `nb_lines_imported`
}
},
toJSON = function() {
DataCSVValidationDTOObject <- list()
if (!is.null(self$`errors`)) {
DataCSVValidationDTOObject[['errors']] <- self$`errors`$toJSON()
}
if (!is.null(self$`dataErrors`)) {
DataCSVValidationDTOObject[['dataErrors']] <- self$`dataErrors`$toJSON()
}
if (!is.null(self$`sizeMax`)) {
DataCSVValidationDTOObject[['sizeMax']] <- self$`sizeMax`
}
if (!is.null(self$`validation_token`)) {
DataCSVValidationDTOObject[['validation_token']] <- self$`validation_token`
}
if (!is.null(self$`nb_lines_imported`)) {
DataCSVValidationDTOObject[['nb_lines_imported']] <- self$`nb_lines_imported`
}
DataCSVValidationDTOObject
},
fromJSON = function(DataCSVValidationDTOJson) {
DataCSVValidationDTOObject <- jsonlite::fromJSON(DataCSVValidationDTOJson)
if (!is.null(DataCSVValidationDTOObject$`errors`)) {
errorsObject <- CSVValidationModel$new()
errorsObject$fromJSON(jsonlite::toJSON(DataCSVValidationDTOObject$errors, auto_unbox = TRUE, null = "null"))
self$`errors` <- errorsObject
}
if (!is.null(DataCSVValidationDTOObject$`dataErrors`)) {
dataErrorsObject <- DataCSVValidationModel$new()
dataErrorsObject$fromJSON(jsonlite::toJSON(DataCSVValidationDTOObject$dataErrors, auto_unbox = TRUE, null = "null"))
self$`dataErrors` <- dataErrorsObject
}
if (!is.null(DataCSVValidationDTOObject$`sizeMax`)) {
self$`sizeMax` <- DataCSVValidationDTOObject$`sizeMax`
}
if (!is.null(DataCSVValidationDTOObject$`validation_token`)) {
self$`validation_token` <- DataCSVValidationDTOObject$`validation_token`
}
if (!is.null(DataCSVValidationDTOObject$`nb_lines_imported`)) {
self$`nb_lines_imported` <- DataCSVValidationDTOObject$`nb_lines_imported`
}
},
fromJSONObject = function(DataCSVValidationDTOObject) {
if (!is.null(DataCSVValidationDTOObject$`errors`)) {
errorsObject <- CSVValidationModel$new()
errorsObject$fromJSON(jsonlite::toJSON(DataCSVValidationDTOObject$errors, auto_unbox = TRUE, null = "null"))
self$`errors` <- errorsObject
}
if (!is.null(DataCSVValidationDTOObject$`dataErrors`)) {
dataErrorsObject <- DataCSVValidationModel$new()
dataErrorsObject$fromJSON(jsonlite::toJSON(DataCSVValidationDTOObject$dataErrors, auto_unbox = TRUE, null = "null"))
self$`dataErrors` <- dataErrorsObject
}
if (!is.null(DataCSVValidationDTOObject$`sizeMax`)) {
self$`sizeMax` <- DataCSVValidationDTOObject$`sizeMax`
}
if (!is.null(DataCSVValidationDTOObject$`validation_token`)) {
self$`validation_token` <- DataCSVValidationDTOObject$`validation_token`
}
if (!is.null(DataCSVValidationDTOObject$`nb_lines_imported`)) {
self$`nb_lines_imported` <- DataCSVValidationDTOObject$`nb_lines_imported`
}
},
toJSONString = function() {
sprintf(
'{
"errors": %s,
"dataErrors": %s,
"sizeMax": %s,
"validation_token": %s,
"nb_lines_imported": %s
}',
jsonlite::toJSON(self$`errors`$toJSON(),auto_unbox=TRUE, null = "null"),
jsonlite::toJSON(self$`dataErrors`$toJSON(),auto_unbox=TRUE, null = "null"),
ifelse(is.null(self$`sizeMax`), "null",as.numeric(jsonlite::toJSON(self$`sizeMax`,auto_unbox=TRUE, null = "null"))),
ifelse(is.null(self$`validation_token`), "null",jsonlite::toJSON(self$`validation_token`,auto_unbox=TRUE, null = "null")),
ifelse(is.null(self$`nb_lines_imported`), "null",as.numeric(jsonlite::toJSON(self$`nb_lines_imported`,auto_unbox=TRUE, null = "null")))
)
},
fromJSONString = function(DataCSVValidationDTOJson) {
DataCSVValidationDTOObject <- jsonlite::fromJSON(DataCSVValidationDTOJson)
CSVValidationModelObject <- CSVValidationModel$new()
self$`errors` <- CSVValidationModelObject$fromJSON(jsonlite::toJSON(DataCSVValidationDTOObject$errors, auto_unbox = TRUE))
DataCSVValidationModelObject <- DataCSVValidationModel$new()
self$`dataErrors` <- DataCSVValidationModelObject$fromJSON(jsonlite::toJSON(DataCSVValidationDTOObject$dataErrors, auto_unbox = TRUE))
self$`sizeMax` <- DataCSVValidationDTOObject$`sizeMax`
self$`validation_token` <- DataCSVValidationDTOObject$`validation_token`
self$`nb_lines_imported` <- DataCSVValidationDTOObject$`nb_lines_imported`
}
)
)
|
context("fetchKSSL() -- requires internet connection")
## sample data
x <- fetchKSSL(series='sierra')
x.morph <- fetchKSSL(series='sierra', returnMorphologicData = TRUE)
test_that("fetchKSSL() returns an SPC or list", {
# standard request
expect_match(class(x), 'SoilProfileCollection')
# SPC + morphologic data
expect_match(class(x.morph), 'list')
expect_match(class(x.morph$SPC), 'SoilProfileCollection')
expect_match(class(x.morph$morph), 'list')
})
test_that("fetchKSSL() returns reasonable data", {
# standard request
expect_equal(nrow(site(x)) > 0, TRUE)
expect_equal(nrow(horizons(x)) > 0, TRUE)
expect_equal(idname(x), 'pedon_key')
expect_equal(horizonDepths(x), c("hzn_top", "hzn_bot"))
})
test_that("fetchKSSL() returns data associated with named series (sierra)", {
# all of the results should contain the search term
f <- grepl('sierra', x$taxonname, ignore.case = TRUE)
expect_equal(all(f), TRUE)
})
test_that("fetchKSSL() returns NULL with bogus query", {
# a message is printed and NULL returned when no results
res <- suppressMessages(fetchKSSL(series='XXX'))
expect_null(res)
})
| /tests/testthat/test-fetchKSSL.R | no_license | ewanoleghe/soilDB | R | false | false | 1,165 | r | context("fetchKSSL() -- requires internet connection")
## sample data
x <- fetchKSSL(series='sierra')
x.morph <- fetchKSSL(series='sierra', returnMorphologicData = TRUE)
test_that("fetchKSSL() returns an SPC or list", {
# standard request
expect_match(class(x), 'SoilProfileCollection')
# SPC + morphologic data
expect_match(class(x.morph), 'list')
expect_match(class(x.morph$SPC), 'SoilProfileCollection')
expect_match(class(x.morph$morph), 'list')
})
test_that("fetchKSSL() returns reasonable data", {
# standard request
expect_equal(nrow(site(x)) > 0, TRUE)
expect_equal(nrow(horizons(x)) > 0, TRUE)
expect_equal(idname(x), 'pedon_key')
expect_equal(horizonDepths(x), c("hzn_top", "hzn_bot"))
})
test_that("fetchKSSL() returns data associated with named series (sierra)", {
# all of the results should contain the search term
f <- grepl('sierra', x$taxonname, ignore.case = TRUE)
expect_equal(all(f), TRUE)
})
test_that("fetchKSSL() returns NULL with bogus query", {
# a message is printed and NULL returned when no results
res <- suppressMessages(fetchKSSL(series='XXX'))
expect_null(res)
})
|
context("mesh-sanity")
library(raster)
library(sf)
library(silicate)
library(dplyr)
v <- raster(diag(3))
p <- st_as_sf(rasterToPolygons(v, dissolve = TRUE))
tp <- st_cast(sfdct::ct_triangulate(p), warn = FALSE)
nverts <- 16
test_that("vertex de-duplication is sane", {
expect_equal(sc_coord(p) %>% distinct() %>% nrow(),
nverts)
expect_equal(sc_coord(tp) %>% distinct() %>% nrow(),
nverts)
expect_equal(anglr(p)$v %>% nrow(),
nverts)
expect_equal(anglr(tp)$v %>% nrow(),
nverts)
})
ntriangles <- nrow(gibble::gibble(tp))
test_that("triangle set is equivalent", {
expect_equal(ntriangles, 18L)
## triangulating p here and below fails because of https://github.com/hypertidy/anglr/issues/54
## but it works for tp because those triangles already exist and the mesh comes out the same
anglr(p)$t %>% nrow() %>% expect_equal(ntriangles)
anglr(tp)$t %>% nrow() %>% expect_equal(ntriangles)
## we expect 18 because although the (constant) z value requires distinct features
## the number of triangles is the same, as one feature is in the gaps of the other
anglr(p, z = "layer")$t %>% nrow() %>% expect_equal(ntriangles)
anglr(tp, z = "layer")$t %>% nrow() %>% expect_equal(ntriangles)
})
| /tests/testthat/test-mesh-sanity.R | no_license | cuulee/anglr | R | false | false | 1,293 | r | context("mesh-sanity")
library(raster)
library(sf)
library(silicate)
library(dplyr)
v <- raster(diag(3))
p <- st_as_sf(rasterToPolygons(v, dissolve = TRUE))
tp <- st_cast(sfdct::ct_triangulate(p), warn = FALSE)
nverts <- 16
test_that("vertex de-duplication is sane", {
expect_equal(sc_coord(p) %>% distinct() %>% nrow(),
nverts)
expect_equal(sc_coord(tp) %>% distinct() %>% nrow(),
nverts)
expect_equal(anglr(p)$v %>% nrow(),
nverts)
expect_equal(anglr(tp)$v %>% nrow(),
nverts)
})
ntriangles <- nrow(gibble::gibble(tp))
test_that("triangle set is equivalent", {
expect_equal(ntriangles, 18L)
## triangulating p here and below fails because of https://github.com/hypertidy/anglr/issues/54
## but it works for tp because those triangles already exist and the mesh comes out the same
anglr(p)$t %>% nrow() %>% expect_equal(ntriangles)
anglr(tp)$t %>% nrow() %>% expect_equal(ntriangles)
## we expect 18 because although the (constant) z value requires distinct features
## the number of triangles is the same, as one feature is in the gaps of the other
anglr(p, z = "layer")$t %>% nrow() %>% expect_equal(ntriangles)
anglr(tp, z = "layer")$t %>% nrow() %>% expect_equal(ntriangles)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EAP.R
\name{EAP}
\alias{EAP}
\title{Calculating Expected a Posteriori Value for Theta}
\usage{
EAP(raschObj, lower = -6, upper = 6)
}
\value{
A numeric representing the likelihood theta is correct
\item{Top/Bottom}{expected a posteriori value for theta}
}
\description{
Calculating the Likelihood That a Proposed Value of Theta is Correct
}
\section{Slots}{
\describe{
\item{\code{raschObj}}{An object of class Rasch}
\item{\code{lower}}{A numerical input the lower bound defaulted to -6}
\item{\code{upper}}{A numerical input the upper bound defaulted to 6}
}}
\examples{
myRasch <- newRasch("Emily", c(3, 4, 12), c(1, 1, 0))
EAP(myRasch, lower = -6, upper = 6)
}
\seealso{
newRasch
}
\author{
Emily Garner<\email{emily.garner@wustl.edu}>
}
| /easyRasch/man/EAP.Rd | no_license | emilyg95/Midterm | R | false | true | 826 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EAP.R
\name{EAP}
\alias{EAP}
\title{Calculating Expected a Posteriori Value for Theta}
\usage{
EAP(raschObj, lower = -6, upper = 6)
}
\value{
A numeric representing the likelihood theta is correct
\item{Top/Bottom}{expected a posteriori value for theta}
}
\description{
Calculating the Likelihood That a Proposed Value of Theta is Correct
}
\section{Slots}{
\describe{
\item{\code{raschObj}}{An object of class Rasch}
\item{\code{lower}}{A numerical input the lower bound defaulted to -6}
\item{\code{upper}}{A numerical input the upper bound defaulted to 6}
}}
\examples{
myRasch <- newRasch("Emily", c(3, 4, 12), c(1, 1, 0))
EAP(myRasch, lower = -6, upper = 6)
}
\seealso{
newRasch
}
\author{
Emily Garner<\email{emily.garner@wustl.edu}>
}
|
# Copyright 2015 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
context("utils")
test_that("detected", {
expect_identical(detected(1, 1), FALSE)
expect_identical(detected(2, 1), TRUE)
expect_identical(detected(NA, NA), NA)
expect_identical(detected(NA, 1), NA)
expect_identical(detected(0, NA), FALSE)
expect_identical(detected(1, NA), TRUE)
expect_identical(detected(1:2, 1:2), c(FALSE, FALSE))
expect_identical(detected(2:3, 1:2), c(TRUE, TRUE))
expect_identical(detected(1:2, 1:1), c(FALSE, TRUE))
expect_identical(detected(0:3, c(NA, NA, 2, 2)), c(FALSE, TRUE, FALSE, TRUE))
})
test_that("punctuate_strings", {
expect_identical(punctuate_strings(c("x")), "x")
expect_identical(punctuate_strings(c("x", "y")), "x or y")
expect_identical(punctuate_strings(c("x", "y", "z")), "x, y or z")
expect_identical(punctuate_strings(c("x", "y", "z", "a")), "x, y, z or a")
expect_identical(punctuate_strings(c("x", "y", "z", "a"), "and"), "x, y, z and a")
})
test_that("add_missing_columns", {
data(ccme)
expect_error(add_missing_columns(1))
x <- add_missing_columns(ccme, list(Test = NA_real_), messages = FALSE)
expect_is(x, "data.frame")
expect_equal(colnames(x), c(colnames(ccme), "Test"))
expect_message(add_missing_columns(ccme, list(Test = NA_real_), messages = TRUE))
expect_equal(ccme, add_missing_columns(ccme, list(Date = as.Date("2000-01-01")), messages = FALSE))
})
test_that("delete_rows_with_certain_values", {
x <- data.frame(X = c(1, 2, NA, 4, NA), Y = c(1, NA, NA, 4, 5), Z = 1:5)
expect_message(delete_rows_with_certain_values(x, list("X", "Y"), messages = TRUE))
z <- delete_rows_with_certain_values(x, list("X", "Y"), messages = FALSE)
expect_identical(x[!is.na(x$X) & !is.na(x$Y), , drop = FALSE], z)
z <- delete_rows_with_certain_values(x, list(c("X", "Y")), messages = FALSE)
expect_identical(x[!(is.na(x$X) & is.na(x$Y)), , drop = FALSE], z)
})
test_that("is_color", {
expect_true(is_color("black"))
expect_false(is_color("Date"))
})
| /tests/testthat/test-utils.R | permissive | bcgov/wqbc | R | false | false | 2,539 | r | # Copyright 2015 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
context("utils")
test_that("detected", {
expect_identical(detected(1, 1), FALSE)
expect_identical(detected(2, 1), TRUE)
expect_identical(detected(NA, NA), NA)
expect_identical(detected(NA, 1), NA)
expect_identical(detected(0, NA), FALSE)
expect_identical(detected(1, NA), TRUE)
expect_identical(detected(1:2, 1:2), c(FALSE, FALSE))
expect_identical(detected(2:3, 1:2), c(TRUE, TRUE))
expect_identical(detected(1:2, 1:1), c(FALSE, TRUE))
expect_identical(detected(0:3, c(NA, NA, 2, 2)), c(FALSE, TRUE, FALSE, TRUE))
})
test_that("punctuate_strings", {
expect_identical(punctuate_strings(c("x")), "x")
expect_identical(punctuate_strings(c("x", "y")), "x or y")
expect_identical(punctuate_strings(c("x", "y", "z")), "x, y or z")
expect_identical(punctuate_strings(c("x", "y", "z", "a")), "x, y, z or a")
expect_identical(punctuate_strings(c("x", "y", "z", "a"), "and"), "x, y, z and a")
})
test_that("add_missing_columns", {
data(ccme)
expect_error(add_missing_columns(1))
x <- add_missing_columns(ccme, list(Test = NA_real_), messages = FALSE)
expect_is(x, "data.frame")
expect_equal(colnames(x), c(colnames(ccme), "Test"))
expect_message(add_missing_columns(ccme, list(Test = NA_real_), messages = TRUE))
expect_equal(ccme, add_missing_columns(ccme, list(Date = as.Date("2000-01-01")), messages = FALSE))
})
test_that("delete_rows_with_certain_values", {
x <- data.frame(X = c(1, 2, NA, 4, NA), Y = c(1, NA, NA, 4, 5), Z = 1:5)
expect_message(delete_rows_with_certain_values(x, list("X", "Y"), messages = TRUE))
z <- delete_rows_with_certain_values(x, list("X", "Y"), messages = FALSE)
expect_identical(x[!is.na(x$X) & !is.na(x$Y), , drop = FALSE], z)
z <- delete_rows_with_certain_values(x, list(c("X", "Y")), messages = FALSE)
expect_identical(x[!(is.na(x$X) & is.na(x$Y)), , drop = FALSE], z)
})
test_that("is_color", {
expect_true(is_color("black"))
expect_false(is_color("Date"))
})
|
# data preparation for baby mental life study 2
library(tidyverse)
# data prep ----
# load in de-identified raw data
d0 <- read.csv("../data/deidentified/baby_mental_life_s2_data.csv") %>% select(-X)
# make question key
s2_question_key <- d0[1,] %>%
t() %>%
data.frame() %>%
rownames_to_column("question_qualtrics") %>%
rename("question_text" = X1) %>%
mutate(question = recode(question_qualtrics,
"Duration..in.seconds." = "Duration",
"Q2" = "Age",
"Q3" = "GenderSex",
"Q3_3_TEXT" = "GenderSex_fillIn",
"Q4" = "EnglishProf",
"Q5" = "FirstLang",
"Q5_2_TEXT" = "FirstLang_fillIn",
"Q40" = "RaceEthnicity",
"Q40_10_TEXT" = "RaceEthnicity_fillIn",
"Q41" = "Education",
"Q42" = "Income",
"Q43" = "MaritalStatus",
"Q43_6_TEXT" = "MaritalStatus_fillIn",
"Q44" = "HouseholdSize",
"Q45" = "Parent",
"Q47" = "ChildrenNumber",
"Q48" = "ChildrenYoungestAge",
"Q48_1_TEXT" = "ChildrenYoungestAge_fillIn1",
"Q48_2_TEXT" = "ChildrenYoungestAge_fillIn2",
"Q49" = "ChildrenOldestAge",
"Q49_1_TEXT" = "ChildrenOldestAge_fillIn1",
"Q49_2_TEXT" = "ChildrenOldestAge_fillIn2",
"Q50" = "Attention",
"Q51" = "Comments",
.default = question_qualtrics),
question = case_when(grepl("the following questions", question_text) ~
gsub("^.*extent is a ", "", question_text),
TRUE ~ question),
question = case_when(grepl("capable of...", question_text) ~
gsub("capable of... ", "", tolower(question)),
TRUE ~ question),
question = gsub(" ", "_", question),
question = gsub("'", "", question),
question = gsub("5-year-old_-_", "target60mo_", question),
question = gsub("4-year-old_-_", "target48mo_", question),
question = gsub("3-year-old_-_", "target36mo_", question),
question = gsub("2-year-old_-_", "target24mo_", question),
question = gsub("18-month-old_-_", "target18mo_", question),
question = gsub("12-month-old_-_", "target12mo_", question),
question = gsub("9-month-old_-_", "target09mo_", question),
question = gsub("6-month-old_-_", "target06mo_", question),
question = gsub("4-month-old_-_", "target04mo_", question),
question = gsub("3-month-old_-_", "target03mo_", question),
question = gsub("2-month-old_-_", "target02mo_", question),
question = gsub("1-month-old_-_", "target01mo_", question),
question = gsub("4-day-old_-_", "target0Xmo_", question),
question = gsub("newborn_-_", "target00mo_", question)) %>%
mutate(question = gsub("-", "_", question),
question = gsub(" \\(for_example,_smooth,_rough\\)", "", question))
# rename questions
d1 <- d0 %>%
# get rid of extra info in first two rows
filter(!is.na(as.numeric(as.character(Q2)))) %>%
gather(question_qualtrics, response, -c(ResponseId, duplicateGPS)) %>%
left_join(s2_question_key %>% select(question_qualtrics, question)) %>%
select(-question_qualtrics) %>%
spread(question, response)
# implement inclusion/exclusion criteria
d2 <- d1 %>%
filter(Age >= 18, Age <= 45,
EnglishProf %in% c("Advanced", "Superior"),
`target04mo_choose_seventy_one` == 71,
`target0Xmo_please_select_ninety_two` == 92,
`target12mo_set_this_answer_to_zero` == 0,
`target36mo_move_the_slider_to_fifty` == 50,
Attention == "Yes")
# remove people with another identical set of GPS coordinates among people who passed attention checks AS DESIRED
d3 <- d2 %>%
# filter(duplicateGPS == F) %>%
select(-duplicateGPS)
# recode variables & drop extraneous variables
d4 <- d3 %>%
select(-c(EndDate, Finished,
payment, Progress,
RecordedDate, StartDate, Status,
timeEstimate, UserLanguage)) %>%
mutate_at(vars(c(starts_with("target"), Age, ChildrenNumber,
ChildrenOldestAge_fillIn1, ChildrenOldestAge_fillIn2,
ChildrenYoungestAge_fillIn1, ChildrenYoungestAge_fillIn2,
Duration, HouseholdSize)),
funs(as.numeric(.))) %>%
mutate(Education = factor(Education,
levels = c("No schooling completed",
"Nursery school to 8th grade",
"Some high school, no diploma",
"High school graduate, diploma or equivalent (including GED)",
"Some college credit, no degree",
"Trade school, technical school, or vocational school",
"Associate's degree (for example, AA, AS)",
"Bachelor's degree (for example, BA, BS)",
"Master's degree (for example, MA, MS)",
"Doctor or professional degree (for example, PhD, JD, MD, MBA)")),
Income = factor(Income,
levels = c("$5,001 - 15,000",
"$15,001 - 30,000",
"$30,001 - 60,000",
"$60,001 - 90,000",
"$90,001 - 150,000",
"Greater than $150,000",
"Prefer not to say")),
Parent = factor(Parent,
levels = c("No", "Yes")))
# remove intermediate datasets
rm(d0, d1, d2, d3)
# make useful datasets ----
# final dataset with all measured variables
d2 <- d4 %>% distinct()
# remove intermediate datasets
rm(d4)
# demographic information
d2_demo <- d2 %>%
select(ResponseId, Duration,
Age, starts_with("GenderSex"), starts_with("RaceEthnicity"),
starts_with("FirstLang"),
Education, Income, HouseholdSize,
starts_with("MaritalStatus"),
Parent, starts_with("Children"),
Comments) %>%
mutate(RaceEthnicity_collapse = ifelse(grepl(",([A-Za-z])", RaceEthnicity),
"Multiple", RaceEthnicity)) %>%
mutate(ChildrenOldestAge_collapse = case_when(
ChildrenOldestAge %in% c("My oldest child has not yet been born (I am/my partner is pregnant)", "My oldest child is deceased", "Prefer not to say") ~ ChildrenOldestAge,
ChildrenOldestAge == "In months:" ~
ifelse(as.numeric(ChildrenOldestAge_fillIn1)/12 < 1,
"< 1 year",
ifelse(as.numeric(ChildrenOldestAge_fillIn1)/12 < 3,
"1 - 3 years",
ifelse(as.numeric(ChildrenOldestAge_fillIn1)/12 < 5,
"3 - 5 years",
ifelse(as.numeric(ChildrenOldestAge_fillIn1)/12 < 10,
"5 - 10 years",
ifelse(as.numeric(ChildrenOldestAge_fillIn1)/12 < 18,
"10 - 18 years",
"> 18 years"))))),
ChildrenOldestAge == "In years:" ~
ifelse(as.numeric(ChildrenOldestAge_fillIn2) < 1,
"< 1 year",
ifelse(as.numeric(ChildrenOldestAge_fillIn2) < 3,
"1 - 3 years",
ifelse(as.numeric(ChildrenOldestAge_fillIn2) < 5,
"3 - 5 years",
ifelse(as.numeric(ChildrenOldestAge_fillIn2) < 10,
"5 - 10 years",
ifelse(as.numeric(ChildrenOldestAge_fillIn2) < 18,
"10 - 18 years",
"> 18 years"))))),
TRUE ~ "NA")) %>%
mutate(ChildrenOldestAge_collapse =
factor(ChildrenOldestAge_collapse,
levels = c("My oldest child has not yet been born (I am/my partner is pregnant)",
"< 1 year",
"1 - 3 years",
"3 - 5 years",
"5 - 10 years",
"10 - 18 years",
"> 18 years",
"My oldest child is deceased",
"Prefer not to say"))) %>%
mutate(ChildrenYoungestAge_collapse = case_when(
ChildrenYoungestAge %in% c("My youngest child has not yet been born (I am/my partner is pregnant)", "My youngest child is deceased", "Prefer not to say") ~ ChildrenYoungestAge,
ChildrenYoungestAge == "In months:" ~
ifelse(as.numeric(ChildrenYoungestAge_fillIn1)/12 < 1,
"< 1 year",
ifelse(as.numeric(ChildrenYoungestAge_fillIn1)/12 < 3,
"1 - 3 years",
ifelse(as.numeric(ChildrenYoungestAge_fillIn1)/12 < 5,
"3 - 5 years",
ifelse(as.numeric(ChildrenYoungestAge_fillIn1)/12 < 10,
"5 - 10 years",
ifelse(as.numeric(ChildrenYoungestAge_fillIn1)/12 < 18,
"10 - 18 years",
"> 18 years"))))),
ChildrenYoungestAge == "In years:" ~
ifelse(as.numeric(ChildrenYoungestAge_fillIn2) < 1,
"< 1 year",
ifelse(as.numeric(ChildrenYoungestAge_fillIn2) < 3,
"1 - 3 years",
ifelse(as.numeric(ChildrenYoungestAge_fillIn2) < 5,
"3 - 5 years",
ifelse(as.numeric(ChildrenYoungestAge_fillIn2) < 10,
"5 - 10 years",
ifelse(as.numeric(ChildrenYoungestAge_fillIn2) < 18,
"10 - 18 years",
"> 18 years"))))),
TRUE ~ "NA")) %>%
mutate(ChildrenYoungestAge_collapse =
factor(ChildrenYoungestAge_collapse,
levels = c("My Youngest child has not yet been born (I am/my partner is pregnant)",
"< 1 year",
"1 - 3 years",
"3 - 5 years",
"5 - 10 years",
"10 - 18 years",
"> 18 years",
"My Youngest child is deceased",
"Prefer not to say")))
# all assessments of ALL TARGETS, RepsonseId as rownames
d2_all <- d2 %>%
select(ResponseId, starts_with("target"),
-c(contains("seventy"), contains("fifty"),
contains("zero"), contains("ninety"), contains("please"))) %>%
gather(question, response, -ResponseId) %>%
mutate(target = gsub("_.*$", "", question),
capacity = gsub("target..mo_", "", question),
subid = paste(ResponseId, target, sep = "_")) %>%
select(-ResponseId, -question, -target) %>%
spread(capacity, response) %>%
column_to_rownames("subid")
| /code/data_prep_s2.R | no_license | kgweisman/baby_mental_life_ms | R | false | false | 11,786 | r | # data preparation for baby mental life study 2
library(tidyverse)
# data prep ----
# load in de-identified raw data
d0 <- read.csv("../data/deidentified/baby_mental_life_s2_data.csv") %>% select(-X)
# make question key
s2_question_key <- d0[1,] %>%
t() %>%
data.frame() %>%
rownames_to_column("question_qualtrics") %>%
rename("question_text" = X1) %>%
mutate(question = recode(question_qualtrics,
"Duration..in.seconds." = "Duration",
"Q2" = "Age",
"Q3" = "GenderSex",
"Q3_3_TEXT" = "GenderSex_fillIn",
"Q4" = "EnglishProf",
"Q5" = "FirstLang",
"Q5_2_TEXT" = "FirstLang_fillIn",
"Q40" = "RaceEthnicity",
"Q40_10_TEXT" = "RaceEthnicity_fillIn",
"Q41" = "Education",
"Q42" = "Income",
"Q43" = "MaritalStatus",
"Q43_6_TEXT" = "MaritalStatus_fillIn",
"Q44" = "HouseholdSize",
"Q45" = "Parent",
"Q47" = "ChildrenNumber",
"Q48" = "ChildrenYoungestAge",
"Q48_1_TEXT" = "ChildrenYoungestAge_fillIn1",
"Q48_2_TEXT" = "ChildrenYoungestAge_fillIn2",
"Q49" = "ChildrenOldestAge",
"Q49_1_TEXT" = "ChildrenOldestAge_fillIn1",
"Q49_2_TEXT" = "ChildrenOldestAge_fillIn2",
"Q50" = "Attention",
"Q51" = "Comments",
.default = question_qualtrics),
question = case_when(grepl("the following questions", question_text) ~
gsub("^.*extent is a ", "", question_text),
TRUE ~ question),
question = case_when(grepl("capable of...", question_text) ~
gsub("capable of... ", "", tolower(question)),
TRUE ~ question),
question = gsub(" ", "_", question),
question = gsub("'", "", question),
question = gsub("5-year-old_-_", "target60mo_", question),
question = gsub("4-year-old_-_", "target48mo_", question),
question = gsub("3-year-old_-_", "target36mo_", question),
question = gsub("2-year-old_-_", "target24mo_", question),
question = gsub("18-month-old_-_", "target18mo_", question),
question = gsub("12-month-old_-_", "target12mo_", question),
question = gsub("9-month-old_-_", "target09mo_", question),
question = gsub("6-month-old_-_", "target06mo_", question),
question = gsub("4-month-old_-_", "target04mo_", question),
question = gsub("3-month-old_-_", "target03mo_", question),
question = gsub("2-month-old_-_", "target02mo_", question),
question = gsub("1-month-old_-_", "target01mo_", question),
question = gsub("4-day-old_-_", "target0Xmo_", question),
question = gsub("newborn_-_", "target00mo_", question)) %>%
mutate(question = gsub("-", "_", question),
question = gsub(" \\(for_example,_smooth,_rough\\)", "", question))
# rename questions
d1 <- d0 %>%
# get rid of extra info in first two rows
filter(!is.na(as.numeric(as.character(Q2)))) %>%
gather(question_qualtrics, response, -c(ResponseId, duplicateGPS)) %>%
left_join(s2_question_key %>% select(question_qualtrics, question)) %>%
select(-question_qualtrics) %>%
spread(question, response)
# implement inclusion/exclusion criteria
d2 <- d1 %>%
filter(Age >= 18, Age <= 45,
EnglishProf %in% c("Advanced", "Superior"),
`target04mo_choose_seventy_one` == 71,
`target0Xmo_please_select_ninety_two` == 92,
`target12mo_set_this_answer_to_zero` == 0,
`target36mo_move_the_slider_to_fifty` == 50,
Attention == "Yes")
# remove people with another identical set of GPS coordinates among people who passed attention checks AS DESIRED
d3 <- d2 %>%
# filter(duplicateGPS == F) %>%
select(-duplicateGPS)
# recode variables & drop extraneous variables
d4 <- d3 %>%
select(-c(EndDate, Finished,
payment, Progress,
RecordedDate, StartDate, Status,
timeEstimate, UserLanguage)) %>%
mutate_at(vars(c(starts_with("target"), Age, ChildrenNumber,
ChildrenOldestAge_fillIn1, ChildrenOldestAge_fillIn2,
ChildrenYoungestAge_fillIn1, ChildrenYoungestAge_fillIn2,
Duration, HouseholdSize)),
funs(as.numeric(.))) %>%
mutate(Education = factor(Education,
levels = c("No schooling completed",
"Nursery school to 8th grade",
"Some high school, no diploma",
"High school graduate, diploma or equivalent (including GED)",
"Some college credit, no degree",
"Trade school, technical school, or vocational school",
"Associate's degree (for example, AA, AS)",
"Bachelor's degree (for example, BA, BS)",
"Master's degree (for example, MA, MS)",
"Doctor or professional degree (for example, PhD, JD, MD, MBA)")),
Income = factor(Income,
levels = c("$5,001 - 15,000",
"$15,001 - 30,000",
"$30,001 - 60,000",
"$60,001 - 90,000",
"$90,001 - 150,000",
"Greater than $150,000",
"Prefer not to say")),
Parent = factor(Parent,
levels = c("No", "Yes")))
# remove intermediate datasets
rm(d0, d1, d2, d3)
# make useful datasets ----
# final dataset with all measured variables
d2 <- d4 %>% distinct()
# remove intermediate datasets
rm(d4)
# demographic information
d2_demo <- d2 %>%
select(ResponseId, Duration,
Age, starts_with("GenderSex"), starts_with("RaceEthnicity"),
starts_with("FirstLang"),
Education, Income, HouseholdSize,
starts_with("MaritalStatus"),
Parent, starts_with("Children"),
Comments) %>%
mutate(RaceEthnicity_collapse = ifelse(grepl(",([A-Za-z])", RaceEthnicity),
"Multiple", RaceEthnicity)) %>%
mutate(ChildrenOldestAge_collapse = case_when(
ChildrenOldestAge %in% c("My oldest child has not yet been born (I am/my partner is pregnant)", "My oldest child is deceased", "Prefer not to say") ~ ChildrenOldestAge,
ChildrenOldestAge == "In months:" ~
ifelse(as.numeric(ChildrenOldestAge_fillIn1)/12 < 1,
"< 1 year",
ifelse(as.numeric(ChildrenOldestAge_fillIn1)/12 < 3,
"1 - 3 years",
ifelse(as.numeric(ChildrenOldestAge_fillIn1)/12 < 5,
"3 - 5 years",
ifelse(as.numeric(ChildrenOldestAge_fillIn1)/12 < 10,
"5 - 10 years",
ifelse(as.numeric(ChildrenOldestAge_fillIn1)/12 < 18,
"10 - 18 years",
"> 18 years"))))),
ChildrenOldestAge == "In years:" ~
ifelse(as.numeric(ChildrenOldestAge_fillIn2) < 1,
"< 1 year",
ifelse(as.numeric(ChildrenOldestAge_fillIn2) < 3,
"1 - 3 years",
ifelse(as.numeric(ChildrenOldestAge_fillIn2) < 5,
"3 - 5 years",
ifelse(as.numeric(ChildrenOldestAge_fillIn2) < 10,
"5 - 10 years",
ifelse(as.numeric(ChildrenOldestAge_fillIn2) < 18,
"10 - 18 years",
"> 18 years"))))),
TRUE ~ "NA")) %>%
mutate(ChildrenOldestAge_collapse =
factor(ChildrenOldestAge_collapse,
levels = c("My oldest child has not yet been born (I am/my partner is pregnant)",
"< 1 year",
"1 - 3 years",
"3 - 5 years",
"5 - 10 years",
"10 - 18 years",
"> 18 years",
"My oldest child is deceased",
"Prefer not to say"))) %>%
mutate(ChildrenYoungestAge_collapse = case_when(
ChildrenYoungestAge %in% c("My youngest child has not yet been born (I am/my partner is pregnant)", "My youngest child is deceased", "Prefer not to say") ~ ChildrenYoungestAge,
ChildrenYoungestAge == "In months:" ~
ifelse(as.numeric(ChildrenYoungestAge_fillIn1)/12 < 1,
"< 1 year",
ifelse(as.numeric(ChildrenYoungestAge_fillIn1)/12 < 3,
"1 - 3 years",
ifelse(as.numeric(ChildrenYoungestAge_fillIn1)/12 < 5,
"3 - 5 years",
ifelse(as.numeric(ChildrenYoungestAge_fillIn1)/12 < 10,
"5 - 10 years",
ifelse(as.numeric(ChildrenYoungestAge_fillIn1)/12 < 18,
"10 - 18 years",
"> 18 years"))))),
ChildrenYoungestAge == "In years:" ~
ifelse(as.numeric(ChildrenYoungestAge_fillIn2) < 1,
"< 1 year",
ifelse(as.numeric(ChildrenYoungestAge_fillIn2) < 3,
"1 - 3 years",
ifelse(as.numeric(ChildrenYoungestAge_fillIn2) < 5,
"3 - 5 years",
ifelse(as.numeric(ChildrenYoungestAge_fillIn2) < 10,
"5 - 10 years",
ifelse(as.numeric(ChildrenYoungestAge_fillIn2) < 18,
"10 - 18 years",
"> 18 years"))))),
TRUE ~ "NA")) %>%
mutate(ChildrenYoungestAge_collapse =
factor(ChildrenYoungestAge_collapse,
levels = c("My Youngest child has not yet been born (I am/my partner is pregnant)",
"< 1 year",
"1 - 3 years",
"3 - 5 years",
"5 - 10 years",
"10 - 18 years",
"> 18 years",
"My Youngest child is deceased",
"Prefer not to say")))
# all assessments of ALL TARGETS, RepsonseId as rownames
d2_all <- d2 %>%
select(ResponseId, starts_with("target"),
-c(contains("seventy"), contains("fifty"),
contains("zero"), contains("ninety"), contains("please"))) %>%
gather(question, response, -ResponseId) %>%
mutate(target = gsub("_.*$", "", question),
capacity = gsub("target..mo_", "", question),
subid = paste(ResponseId, target, sep = "_")) %>%
select(-ResponseId, -question, -target) %>%
spread(capacity, response) %>%
column_to_rownames("subid")
|
#' Get Owens Lake areas polygons from database
pull_onlake_polygons <- function(){
query <- paste0("SELECT dca.dust_control_area_id AS objectid, dca.dca_name, ",
"dca.bacm_type, dca.phase, ",
"ST_X(ST_TRANSFORM((ST_DUMPPOINTS(dca.geom)).geom, 26911)) AS x, ",
"ST_Y(ST_TRANSFORM((ST_DUMPPOINTS(dca.geom)).geom, 26911)) AS y ",
"FROM info.dust_control_areas dca;")
df1 <- query_db("owenslake", query)
}
pull_sfwcrft_polygons <- function(){
query <- paste0("SELECT sf.gid AS objectid, sf.dca, sf.treatment, sf.phase, ",
"ST_X((ST_DUMPPOINTS(sf.geom)).geom) AS x, ",
"ST_Y((ST_DUMPPOINTS(sf.geom)).geom) AS y ",
"FROM info.sfwcrft sf ")
df1 <- query_db("owenslake", query)
}
pull_offlake_polygons <- function(){
query <- paste0("SELECT lb.lakebed_area_id AS objectid, lb.area_name, ",
"ST_X(ST_TRANSFORM((ST_DUMPPOINTS(lb.geom)).geom, 26911)) ",
"AS x, ",
"ST_Y(ST_TRANSFORM((ST_DUMPPOINTS(lb.geom)).geom, 26911)) ",
"AS y ",
"FROM info.lakebed_areas lb ",
"LEFT JOIN info.dust_control_areas dcas ",
"ON lb.area_name=dcas.dca_name ",
"WHERE dcas.dca_name IS NULL;")
df1 <- query_db("owenslake", query)
df2 <- df1 %>% filter(grepl("Off Lake", area_name) |
area_name=='Keeler Dunes')
}
pull_all_polygons <- function(){
query <- paste0("SELECT lb.lakebed_area_id AS objectid, lb.area_name, ",
"ST_X(ST_TRANSFORM((ST_DUMPPOINTS(lb.geom)).geom, 26911)) ",
"AS x, ",
"ST_Y(ST_TRANSFORM((ST_DUMPPOINTS(lb.geom)).geom, 26911)) ",
"AS y ",
"FROM info.lakebed_areas lb;")
df1 <- query_db("owenslake", query)
}
pull_highways <- function(){
query <- paste0("SELECT name, ",
"ST_X(ST_TRANSFORM((ST_DUMPPOINTS(geom)).geom, 26911)) ",
"AS x, ",
"ST_Y(ST_TRANSFORM((ST_DUMPPOINTS(geom)).geom, 26911)) ",
"AS y ",
"FROM info.highways;")
df1 <- query_db("owenslake", query)
df2 <- rbind(arrange(filter(df1, name==395), y),
arrange(filter(df1, name!=395), x))
}
pull_2kmbuffer <- function(){
query <- paste0("SELECT id, ",
"ST_X(ST_TRANSFORM((ST_DUMPPOINTS(geom)).geom, 26911)) ",
"AS x, ",
"ST_Y(ST_TRANSFORM((ST_DUMPPOINTS(geom)).geom, 26911)) ",
"AS y ",
"FROM info.buffer;")
df1 <- query_db("owenslake", query)
}
#' Get Owens Lake DCA labels from database
pull_dca_labels <- function(){
query <- paste0("SELECT area_name AS label, ",
"ST_X(ST_CENTROID(ST_TRANSFORM(geom::geometry, 26911))) AS x, ",
"ST_Y(ST_CENTROID(ST_TRANSFORM(geom::geometry, 26911))) AS y ",
"FROM info.lakebed_areas;")
df1 <- query_db("owenslake", query)
}
pull_onlake_labels <- function(){
query <- paste0("SELECT dca_name AS label, bacm_type, ",
"ST_X(ST_CENTROID(ST_TRANSFORM(geom::geometry, 26911))) AS x, ",
"ST_Y(ST_CENTROID(ST_TRANSFORM(geom::geometry, 26911))) AS y ",
"FROM info.dust_control_areas;")
df1 <- query_db("owenslake", query)
}
pull_sfwcrft_labels <- function(){
query <- paste0("SELECT dca, treatment, phase, ",
"ST_X(ST_CENTROID(geom::geometry)) AS x, ",
"ST_Y(ST_CENTROID(geom::geometry)) AS y ",
"FROM info.sfwcrft sf ")
df1 <- query_db("owenslake", query)
}
pull_highways_labels <- function(){
query <- paste0("SELECT name, ",
"ST_X(ST_CENTROID(geom::geometry)) AS x, ",
"ST_Y(ST_CENTROID(geom::geometry)) AS y ",
"FROM info.highways;")
df1 <- query_db("owenslake", query)
df1[df1$name==136, ]$x <- df1[df1$name==136, ]$x + 3000
df1[df1$name==136, ]$y <- df1[df1$name==136, ]$y + 2000
df1[df1$name==395, ]$x <- df1[df1$name==395, ]$x - 2000
df1
}
#' Get Owens Lake shoreline polygon from database
pull_shoreline_polygon <- function(){
query <- paste0("SELECT shr.source AS area_name, ",
"ST_X(ST_TRANSFORM((ST_DUMPPOINTS(shr.geom)).geom, 26911)) AS x, ",
"ST_Y(ST_TRANSFORM((ST_DUMPPOINTS(shr.geom)).geom, 26911)) AS y ",
"FROM info.shoreline shr;")
df1 <- query_db("owenslake", query)
}
#' Get polygon data from shapefile
#'
#' @param dsn String. Path to shapefile directory.
#' @param layer String. Name of shapefile.
#' @param proj_string String. CRS projection string in "proj4string" format.
#' @return Data frame with treatment area polygon data.
shape_data <- function(dsn, layer, proj_string){
dsn <- path.expand(dsn)
areas <- rgdal::readOGR(dsn=dsn, layer=layer, verbose=FALSE)
areas <- sp::spTransform(areas, proj_string)
dat <- areas@data
labpnts <- lapply(c(1:length(areas@polygons)),
function(x) areas@polygons[[x]]@labpt)
polypnts <- lapply(c(1:length(areas@polygons)),
function(x) areas@polygons[x][[1]]@Polygons[[1]]@coords)
area_data <- cbind(dat, I(labpnts), I(polypnts))
colnames(area_data) <- tolower(colnames(area_data))
area_data
}
#' Get polygon plot points from shapefile
#'
#' Shapefile must have first attribute be a unique identifier for the area.
#'
#' @param dsn String. Path to shapefile directory.
#' @param layer String. Name of shapefile.
#' @param proj_string String. CRS projection string in "proj4string" format.
extract_polygons <- function(dsn, layer, proj_string){
dsn <- path.expand(dsn)
areas <- rgdal::readOGR(dsn=dsn, layer=layer, verbose=FALSE)
areas <- sp::spTransform(areas, proj_string)
polypnts <- data.frame(x=c(), y=c(), dca=c(), polyid=c())
polyid <- 1
for (i in 1:length(areas@polygons)){
dca <- areas@data[[1]][i]
for (j in 1:length(areas@polygons[[i]]@Polygons)){
pnts <- as.data.frame(areas@polygons[[i]]@Polygons[[j]]@coords)
names(pnts) <- c('x', 'y')
pnts$dca <- dca
pnts$polyid <- polyid
polyid <- polyid + 1
polypnts <- rbind(polypnts, pnts)
}
}
polypnts
}
#' Build data frame from multiple lists contained in a data frame.
#'
#' @param df_in Data frame.
#' @param list_ind Integer. Column index of lists to process.
#' @param id_ind Integer. Column index of object id to be associated with all
#' elements of corresponding list.
#' @return Data frame.
lists2df <- function(df_in, list_ind, id_ind){
df_out <- data.frame(x=numeric(), y=numeric(), objectid=integer())
for (i in 1:nrow(df_in)){
df1 <- data.frame(matrix(df_in[, list_ind][[i]], ncol=2))
df1$objectid <- rep(df_in[i, id_ind], nrow(df1))
colnames(df1)[1:2] <- c("x", "y")
df_out <- rbind(df_out, df1)
}
df_out
}
point_in_dca <- function(vec_in, poly_df, return_dca=T){
for (j in unique(poly_df$objectid)){
polycheck <- sp::point.in.polygon(vec_in[1], vec_in[2],
dplyr::filter(poly_df, objectid==j)$x,
dplyr::filter(poly_df, objectid==j)$y)
if (polycheck==1){
ifelse(return_dca, return(filter(poly_df, objectid==j)$dca_name[1]),
return(j))
}
}
return(NA)
}
| /R/gis_functions.R | no_license | jwbannister/aiRsci | R | false | false | 7,712 | r | #' Get Owens Lake areas polygons from database
pull_onlake_polygons <- function(){
query <- paste0("SELECT dca.dust_control_area_id AS objectid, dca.dca_name, ",
"dca.bacm_type, dca.phase, ",
"ST_X(ST_TRANSFORM((ST_DUMPPOINTS(dca.geom)).geom, 26911)) AS x, ",
"ST_Y(ST_TRANSFORM((ST_DUMPPOINTS(dca.geom)).geom, 26911)) AS y ",
"FROM info.dust_control_areas dca;")
df1 <- query_db("owenslake", query)
}
pull_sfwcrft_polygons <- function(){
query <- paste0("SELECT sf.gid AS objectid, sf.dca, sf.treatment, sf.phase, ",
"ST_X((ST_DUMPPOINTS(sf.geom)).geom) AS x, ",
"ST_Y((ST_DUMPPOINTS(sf.geom)).geom) AS y ",
"FROM info.sfwcrft sf ")
df1 <- query_db("owenslake", query)
}
pull_offlake_polygons <- function(){
query <- paste0("SELECT lb.lakebed_area_id AS objectid, lb.area_name, ",
"ST_X(ST_TRANSFORM((ST_DUMPPOINTS(lb.geom)).geom, 26911)) ",
"AS x, ",
"ST_Y(ST_TRANSFORM((ST_DUMPPOINTS(lb.geom)).geom, 26911)) ",
"AS y ",
"FROM info.lakebed_areas lb ",
"LEFT JOIN info.dust_control_areas dcas ",
"ON lb.area_name=dcas.dca_name ",
"WHERE dcas.dca_name IS NULL;")
df1 <- query_db("owenslake", query)
df2 <- df1 %>% filter(grepl("Off Lake", area_name) |
area_name=='Keeler Dunes')
}
pull_all_polygons <- function(){
query <- paste0("SELECT lb.lakebed_area_id AS objectid, lb.area_name, ",
"ST_X(ST_TRANSFORM((ST_DUMPPOINTS(lb.geom)).geom, 26911)) ",
"AS x, ",
"ST_Y(ST_TRANSFORM((ST_DUMPPOINTS(lb.geom)).geom, 26911)) ",
"AS y ",
"FROM info.lakebed_areas lb;")
df1 <- query_db("owenslake", query)
}
pull_highways <- function(){
query <- paste0("SELECT name, ",
"ST_X(ST_TRANSFORM((ST_DUMPPOINTS(geom)).geom, 26911)) ",
"AS x, ",
"ST_Y(ST_TRANSFORM((ST_DUMPPOINTS(geom)).geom, 26911)) ",
"AS y ",
"FROM info.highways;")
df1 <- query_db("owenslake", query)
df2 <- rbind(arrange(filter(df1, name==395), y),
arrange(filter(df1, name!=395), x))
}
pull_2kmbuffer <- function(){
query <- paste0("SELECT id, ",
"ST_X(ST_TRANSFORM((ST_DUMPPOINTS(geom)).geom, 26911)) ",
"AS x, ",
"ST_Y(ST_TRANSFORM((ST_DUMPPOINTS(geom)).geom, 26911)) ",
"AS y ",
"FROM info.buffer;")
df1 <- query_db("owenslake", query)
}
#' Get Owens Lake DCA labels from database
pull_dca_labels <- function(){
query <- paste0("SELECT area_name AS label, ",
"ST_X(ST_CENTROID(ST_TRANSFORM(geom::geometry, 26911))) AS x, ",
"ST_Y(ST_CENTROID(ST_TRANSFORM(geom::geometry, 26911))) AS y ",
"FROM info.lakebed_areas;")
df1 <- query_db("owenslake", query)
}
pull_onlake_labels <- function(){
query <- paste0("SELECT dca_name AS label, bacm_type, ",
"ST_X(ST_CENTROID(ST_TRANSFORM(geom::geometry, 26911))) AS x, ",
"ST_Y(ST_CENTROID(ST_TRANSFORM(geom::geometry, 26911))) AS y ",
"FROM info.dust_control_areas;")
df1 <- query_db("owenslake", query)
}
pull_sfwcrft_labels <- function(){
query <- paste0("SELECT dca, treatment, phase, ",
"ST_X(ST_CENTROID(geom::geometry)) AS x, ",
"ST_Y(ST_CENTROID(geom::geometry)) AS y ",
"FROM info.sfwcrft sf ")
df1 <- query_db("owenslake", query)
}
pull_highways_labels <- function(){
query <- paste0("SELECT name, ",
"ST_X(ST_CENTROID(geom::geometry)) AS x, ",
"ST_Y(ST_CENTROID(geom::geometry)) AS y ",
"FROM info.highways;")
df1 <- query_db("owenslake", query)
df1[df1$name==136, ]$x <- df1[df1$name==136, ]$x + 3000
df1[df1$name==136, ]$y <- df1[df1$name==136, ]$y + 2000
df1[df1$name==395, ]$x <- df1[df1$name==395, ]$x - 2000
df1
}
#' Get Owens Lake shoreline polygon from database
pull_shoreline_polygon <- function(){
query <- paste0("SELECT shr.source AS area_name, ",
"ST_X(ST_TRANSFORM((ST_DUMPPOINTS(shr.geom)).geom, 26911)) AS x, ",
"ST_Y(ST_TRANSFORM((ST_DUMPPOINTS(shr.geom)).geom, 26911)) AS y ",
"FROM info.shoreline shr;")
df1 <- query_db("owenslake", query)
}
#' Get polygon data from shapefile
#'
#' @param dsn String. Path to shapefile directory.
#' @param layer String. Name of shapefile.
#' @param proj_string String. CRS projection string in "proj4string" format.
#' @return Data frame with treatment area polygon data.
shape_data <- function(dsn, layer, proj_string){
dsn <- path.expand(dsn)
areas <- rgdal::readOGR(dsn=dsn, layer=layer, verbose=FALSE)
areas <- sp::spTransform(areas, proj_string)
dat <- areas@data
labpnts <- lapply(c(1:length(areas@polygons)),
function(x) areas@polygons[[x]]@labpt)
polypnts <- lapply(c(1:length(areas@polygons)),
function(x) areas@polygons[x][[1]]@Polygons[[1]]@coords)
area_data <- cbind(dat, I(labpnts), I(polypnts))
colnames(area_data) <- tolower(colnames(area_data))
area_data
}
#' Get polygon plot points from shapefile
#'
#' Shapefile must have first attribute be a unique identifier for the area.
#'
#' @param dsn String. Path to shapefile directory.
#' @param layer String. Name of shapefile.
#' @param proj_string String. CRS projection string in "proj4string" format.
extract_polygons <- function(dsn, layer, proj_string){
dsn <- path.expand(dsn)
areas <- rgdal::readOGR(dsn=dsn, layer=layer, verbose=FALSE)
areas <- sp::spTransform(areas, proj_string)
polypnts <- data.frame(x=c(), y=c(), dca=c(), polyid=c())
polyid <- 1
for (i in 1:length(areas@polygons)){
dca <- areas@data[[1]][i]
for (j in 1:length(areas@polygons[[i]]@Polygons)){
pnts <- as.data.frame(areas@polygons[[i]]@Polygons[[j]]@coords)
names(pnts) <- c('x', 'y')
pnts$dca <- dca
pnts$polyid <- polyid
polyid <- polyid + 1
polypnts <- rbind(polypnts, pnts)
}
}
polypnts
}
#' Build data frame from multiple lists contained in a data frame.
#'
#' @param df_in Data frame.
#' @param list_ind Integer. Column index of lists to process.
#' @param id_ind Integer. Column index of object id to be associated with all
#' elements of corresponding list.
#' @return Data frame.
lists2df <- function(df_in, list_ind, id_ind){
df_out <- data.frame(x=numeric(), y=numeric(), objectid=integer())
for (i in 1:nrow(df_in)){
df1 <- data.frame(matrix(df_in[, list_ind][[i]], ncol=2))
df1$objectid <- rep(df_in[i, id_ind], nrow(df1))
colnames(df1)[1:2] <- c("x", "y")
df_out <- rbind(df_out, df1)
}
df_out
}
point_in_dca <- function(vec_in, poly_df, return_dca=T){
for (j in unique(poly_df$objectid)){
polycheck <- sp::point.in.polygon(vec_in[1], vec_in[2],
dplyr::filter(poly_df, objectid==j)$x,
dplyr::filter(poly_df, objectid==j)$y)
if (polycheck==1){
ifelse(return_dca, return(filter(poly_df, objectid==j)$dca_name[1]),
return(j))
}
}
return(NA)
}
|
## Logo home page
### om_skeleton home.R
### Tom Weishaar - Oct 2017 - v0.1
### Skeleton for multi-page, multi-user web site in Shiny, with user authentication
rv$trigger = 0
output$pageStub <- renderUI({
x = rv$limn
if(page_debug_on) {
cat(paste0("Rendering ", webpage$name, " v.", rv$limn, "\n"))
}
if(session$userData$user$sp) {
tagList(
HTML(paste0('<h4>You are logged in. This is your data:</h4>')),
dataTableOutput("user")
)
} else {
tagList(
HTML(paste0("
<a href='?home'>
<img src='logo.png'>
</a>
"
))
)
}
})
output$user = renderDataTable(session$userData$user)
| /Logo_page.R | permissive | Gresliebear/Vessel_Webapp | R | false | false | 724 | r | ## Logo home page
### om_skeleton home.R
### Tom Weishaar - Oct 2017 - v0.1
### Skeleton for multi-page, multi-user web site in Shiny, with user authentication
rv$trigger = 0
output$pageStub <- renderUI({
x = rv$limn
if(page_debug_on) {
cat(paste0("Rendering ", webpage$name, " v.", rv$limn, "\n"))
}
if(session$userData$user$sp) {
tagList(
HTML(paste0('<h4>You are logged in. This is your data:</h4>')),
dataTableOutput("user")
)
} else {
tagList(
HTML(paste0("
<a href='?home'>
<img src='logo.png'>
</a>
"
))
)
}
})
output$user = renderDataTable(session$userData$user)
|
\name{getWS}
\alias{getWS}
\title{Walk Score API Call}
\description{A function to perform the basic Walk Score API call.}
\usage{getWS(x, y, key)}
\arguments{
\item{x}{ longitude of query location (numeric) }
\item{y}{ latitude of query location (numeric) }
\item{key}{ your Walk Score API key (string), see Details below }
}
\details{Note that the call uses longitude and latitude coordintes and not addresses like the website interface. It is strongly recomended that Google Geolocation is used to convert addresses to coordinates because this is the method used by the Walk Score website, and will result in the same Walk Score as entering the address into the website interface. The function "geoloc" in this package is a tool for using the Google Geolocation API.}
\value{ Otherwise Returns an object of class \code{WalkScore}, basically a list of the following elements:
\item{status}{ Status code of the request. Status of 1 indicates a successful call. See the Walk Score API page for interpretation of other codes. }
\item{walkscore}{ Walk Score of query location. }
\item{description}{ Qualitative description of location. }
\item{updated}{ Date and time of most recent update to this location's Walk Score. }
\item{snappedLong}{ grid point longitude to which the input was snapped to. }
\item{snappedLat}{ grid point latitude to which the input was snapped to. }
}
\references{ http://www.walkscore.com/professional/api.php }
\author{ John Whalen }
\note{ Visit www.walkscore.com for information on Walk Score and to obtain an API key }
\seealso{
\code{\link{geoloc}}}
\examples{
\dontrun{
getWS(-73.98496,40.74807,"your key")
}
} | /man/getWS.Rd | no_license | ajinkyaghorpade/walkscoreAPI | R | false | false | 1,720 | rd | \name{getWS}
\alias{getWS}
\title{Walk Score API Call}
\description{A function to perform the basic Walk Score API call.}
\usage{getWS(x, y, key)}
\arguments{
\item{x}{ longitude of query location (numeric) }
\item{y}{ latitude of query location (numeric) }
\item{key}{ your Walk Score API key (string), see Details below }
}
\details{Note that the call uses longitude and latitude coordintes and not addresses like the website interface. It is strongly recomended that Google Geolocation is used to convert addresses to coordinates because this is the method used by the Walk Score website, and will result in the same Walk Score as entering the address into the website interface. The function "geoloc" in this package is a tool for using the Google Geolocation API.}
\value{ Otherwise Returns an object of class \code{WalkScore}, basically a list of the following elements:
\item{status}{ Status code of the request. Status of 1 indicates a successful call. See the Walk Score API page for interpretation of other codes. }
\item{walkscore}{ Walk Score of query location. }
\item{description}{ Qualitative description of location. }
\item{updated}{ Date and time of most recent update to this location's Walk Score. }
\item{snappedLong}{ grid point longitude to which the input was snapped to. }
\item{snappedLat}{ grid point latitude to which the input was snapped to. }
}
\references{ http://www.walkscore.com/professional/api.php }
\author{ John Whalen }
\note{ Visit www.walkscore.com for information on Walk Score and to obtain an API key }
\seealso{
\code{\link{geoloc}}}
\examples{
\dontrun{
getWS(-73.98496,40.74807,"your key")
}
} |
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("The application is really simple: just enter a numeric value to get its square and cube"),
sidebarPanel(
numericInput('id1', 'Your numeric input', 0, min = 0, max = 10, step = 1)
),
mainPanel(
h3('The square of what you entered!'),
verbatimTextOutput("oid1"),
h3('The cube of what you entered!'),
verbatimTextOutput("oid2")
)
))
| /ui.R | no_license | motazel/ExData_Plotting1 | R | false | false | 431 | r | library(shiny)
shinyUI(pageWithSidebar(
headerPanel("The application is really simple: just enter a numeric value to get its square and cube"),
sidebarPanel(
numericInput('id1', 'Your numeric input', 0, min = 0, max = 10, step = 1)
),
mainPanel(
h3('The square of what you entered!'),
verbatimTextOutput("oid1"),
h3('The cube of what you entered!'),
verbatimTextOutput("oid2")
)
))
|
library(saws)
### Name: clogistCalc
### Title: Conditional Logistic Regression fit
### Aliases: clogistCalc clogistInfo clogistLoglike
### Keywords: nonlinear
### ** Examples
data(micefat)
cout<-clogistCalc(micefat$N,micefat$NTUM,micefat[,c("fatCal","totalCal")],micefat$cluster)
## usual model based variance
saws(cout,method="dm")
## sandwich based variance with small sample correction
s3<-saws(cout,method="d3")
s3
print.default(s3)
| /data/genthat_extracted_code/saws/examples/clogistCalc.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 445 | r | library(saws)
### Name: clogistCalc
### Title: Conditional Logistic Regression fit
### Aliases: clogistCalc clogistInfo clogistLoglike
### Keywords: nonlinear
### ** Examples
data(micefat)
cout<-clogistCalc(micefat$N,micefat$NTUM,micefat[,c("fatCal","totalCal")],micefat$cluster)
## usual model based variance
saws(cout,method="dm")
## sandwich based variance with small sample correction
s3<-saws(cout,method="d3")
s3
print.default(s3)
|
# Compression attempt for large sankey file.
library(data.table)
library(ngram)
library(tm)
library(stylo)
chars <- strsplit(rawToChar(as.raw(1:255)),"")[[1]]
chars2 <- strsplit(rawToChar(as.raw(c(33:126,128:255))),"")[[1]]
length(chars)
length(chars2)
dat <- readLines("ProviderNetworkFull.html")
dat2 <- paste(c(dat),collapse="\n")
dat2 <- gsub(" ","|",dat2)
dat2e <- strsplit(dat2,"")[[1]]
unique(dat2e)
chars3 <- chars2[!chars2 %in% dat2e]
phrase <- paste(dat2e,collapse=" ")
phr <- substr(phrase,1,1e5)
this <- ngram(phr,2)
newdat <- dat2e
newdatstr <- dat2
size <- 11
i <- 1
thresh <- 2e5
maptab <- data.table(String="",Char="",Count=0,Order=0)[0]
while(size > 1) {
ng <- switch(size-1,
table(paste0(newdat[-N],
newdat[-1])),
table(paste0(head(newdat,-2),
newdat[-c(1,N)],
tail(newdat,-2))),
table(paste0(head(newdat,-3),
newdat[-c(1,N-1,N)],
newdat[-c(1,2,N)],
tail(newdat,-3))),
table(paste0(head(newdat,-4),
newdat[-c(1,N-2,N-1,N)],
newdat[-c(1,2,N-1,N)],
newdat[-c(1:3,N)],
tail(newdat,-4))),
table(paste0(head(newdat,-5),
newdat[-c(1,(N-3):N)],
newdat[-c(1:2,(N-2):N)],
newdat[-c(1:3,N-1,N)],
newdat[-c(1:4,N)],
tail(newdat,-5))),
table(paste0(head(newdat,-6),
newdat[-c(1,(N-4):N)],
newdat[-c(1:2,(N-3):N)],
newdat[-c(1:3,(N-2):N)],
newdat[-c(1:4,N-1,N)],
newdat[-c(1:5,N)],
tail(newdat,-6))),
table(paste0(head(newdat,-7),
newdat[-c(1,(N-5):N)],
newdat[-c(1:2,(N-4):N)],
newdat[-c(1:3,(N-3):N)],
newdat[-c(1:4,(N-2):N)],
newdat[-c(1:5,N-1,N)],
newdat[-c(1:6,N)],
tail(newdat,-7))),
table(paste0(head(newdat,-8),
newdat[-c(1,(N-6):N)],
newdat[-c(1:2,(N-5):N)],
newdat[-c(1:3,(N-4):N)],
newdat[-c(1:4,(N-3):N)],
newdat[-c(1:5,(N-2):N)],
newdat[-c(1:6,N-1,N)],
newdat[-c(1:7,N)],
tail(newdat,-8))),
table(paste0(head(newdat,-9),
newdat[-c(1,(N-7):N)],
newdat[-c(1:2,(N-6):N)],
newdat[-c(1:3,(N-5):N)],
newdat[-c(1:4,(N-4):N)],
newdat[-c(1:5,(N-3):N)],
newdat[-c(1:6,(N-2):N)],
newdat[-c(1:7,N-1,N)],
newdat[-c(1:8,N)],
tail(newdat,-9))),
table(paste0(head(newdat,-10),
newdat[-c(1,(N-8):N)],
newdat[-c(1:2,(N-7):N)],
newdat[-c(1:3,(N-6):N)],
newdat[-c(1:4,(N-5):N)],
newdat[-c(1:5,(N-4):N)],
newdat[-c(1:6,(N-3):N)],
newdat[-c(1:7,(N-2):N)],
newdat[-c(1:8,N-1,N)],
newdat[-c(1:9,N)],
tail(newdat,-10))))
cat("----")
if(max(ng) > thresh) {
str <- tail(names(sort(ng)),1)
newdatstr <- gsub(str,chars3[i],newdatstr)
newdat <- strsplit(newdatstr,"")[[1]]
maptab <- rbind(maptab,data.table(String=str,Char=chars3[i],Count=max(ng),Order=i))
cat(paste0("Replaced ",max(ng)," occurrences of \"",str,"\" with ",chars3[i],". \n"))
i <- i+1
} else {
size <- size - 1
cat(paste0("No replacement. Only ",max(ng)," repeated ",size+1,"-grams. Reducing size to ",size,". \n"))
}
}
ngrams <- list()
N <- length(dat2e)
n1 <- table(dat2e)
n2 <- table(paste0(dat2e[-N],dat2e[-1]))
n3 <- table(paste0(head(dat2e,-2),dat2e[-c(1,N)],tail(dat2e,-2)))
n4 <- table(paste0(head(dat2e,-3),dat2e[-c(1,N-1,N)],dat2e[-c(1,2,N)],tail(dat2e,-3)))
for (i in 2:n) {
mat <- matrix(dat2e,nrow=length(dat2e)+1,ncol=6)
table(paste0(mat[,1],mat[,2]))
ngrams[[i]] <- table(apply(mat,1,paste0))
}
n1 <- table(dat2e)
n2 <- table(paste0(dat2e))
# Need some sort of clustering, need to find common patterns that are repeated frequently (more frequent the better, longer is good too)
# savings equivalent to (length of segment - 1)*(number of occurrences)
| /Tools/TextFileCompression.R | no_license | chacemcneil/Personal | R | false | false | 5,220 | r | # Compression attempt for large sankey file.
library(data.table)
library(ngram)
library(tm)
library(stylo)
chars <- strsplit(rawToChar(as.raw(1:255)),"")[[1]]
chars2 <- strsplit(rawToChar(as.raw(c(33:126,128:255))),"")[[1]]
length(chars)
length(chars2)
dat <- readLines("ProviderNetworkFull.html")
dat2 <- paste(c(dat),collapse="\n")
dat2 <- gsub(" ","|",dat2)
dat2e <- strsplit(dat2,"")[[1]]
unique(dat2e)
chars3 <- chars2[!chars2 %in% dat2e]
phrase <- paste(dat2e,collapse=" ")
phr <- substr(phrase,1,1e5)
this <- ngram(phr,2)
newdat <- dat2e
newdatstr <- dat2
size <- 11
i <- 1
thresh <- 2e5
maptab <- data.table(String="",Char="",Count=0,Order=0)[0]
while(size > 1) {
ng <- switch(size-1,
table(paste0(newdat[-N],
newdat[-1])),
table(paste0(head(newdat,-2),
newdat[-c(1,N)],
tail(newdat,-2))),
table(paste0(head(newdat,-3),
newdat[-c(1,N-1,N)],
newdat[-c(1,2,N)],
tail(newdat,-3))),
table(paste0(head(newdat,-4),
newdat[-c(1,N-2,N-1,N)],
newdat[-c(1,2,N-1,N)],
newdat[-c(1:3,N)],
tail(newdat,-4))),
table(paste0(head(newdat,-5),
newdat[-c(1,(N-3):N)],
newdat[-c(1:2,(N-2):N)],
newdat[-c(1:3,N-1,N)],
newdat[-c(1:4,N)],
tail(newdat,-5))),
table(paste0(head(newdat,-6),
newdat[-c(1,(N-4):N)],
newdat[-c(1:2,(N-3):N)],
newdat[-c(1:3,(N-2):N)],
newdat[-c(1:4,N-1,N)],
newdat[-c(1:5,N)],
tail(newdat,-6))),
table(paste0(head(newdat,-7),
newdat[-c(1,(N-5):N)],
newdat[-c(1:2,(N-4):N)],
newdat[-c(1:3,(N-3):N)],
newdat[-c(1:4,(N-2):N)],
newdat[-c(1:5,N-1,N)],
newdat[-c(1:6,N)],
tail(newdat,-7))),
table(paste0(head(newdat,-8),
newdat[-c(1,(N-6):N)],
newdat[-c(1:2,(N-5):N)],
newdat[-c(1:3,(N-4):N)],
newdat[-c(1:4,(N-3):N)],
newdat[-c(1:5,(N-2):N)],
newdat[-c(1:6,N-1,N)],
newdat[-c(1:7,N)],
tail(newdat,-8))),
table(paste0(head(newdat,-9),
newdat[-c(1,(N-7):N)],
newdat[-c(1:2,(N-6):N)],
newdat[-c(1:3,(N-5):N)],
newdat[-c(1:4,(N-4):N)],
newdat[-c(1:5,(N-3):N)],
newdat[-c(1:6,(N-2):N)],
newdat[-c(1:7,N-1,N)],
newdat[-c(1:8,N)],
tail(newdat,-9))),
table(paste0(head(newdat,-10),
newdat[-c(1,(N-8):N)],
newdat[-c(1:2,(N-7):N)],
newdat[-c(1:3,(N-6):N)],
newdat[-c(1:4,(N-5):N)],
newdat[-c(1:5,(N-4):N)],
newdat[-c(1:6,(N-3):N)],
newdat[-c(1:7,(N-2):N)],
newdat[-c(1:8,N-1,N)],
newdat[-c(1:9,N)],
tail(newdat,-10))))
cat("----")
if(max(ng) > thresh) {
str <- tail(names(sort(ng)),1)
newdatstr <- gsub(str,chars3[i],newdatstr)
newdat <- strsplit(newdatstr,"")[[1]]
maptab <- rbind(maptab,data.table(String=str,Char=chars3[i],Count=max(ng),Order=i))
cat(paste0("Replaced ",max(ng)," occurrences of \"",str,"\" with ",chars3[i],". \n"))
i <- i+1
} else {
size <- size - 1
cat(paste0("No replacement. Only ",max(ng)," repeated ",size+1,"-grams. Reducing size to ",size,". \n"))
}
}
ngrams <- list()
N <- length(dat2e)
n1 <- table(dat2e)
n2 <- table(paste0(dat2e[-N],dat2e[-1]))
n3 <- table(paste0(head(dat2e,-2),dat2e[-c(1,N)],tail(dat2e,-2)))
n4 <- table(paste0(head(dat2e,-3),dat2e[-c(1,N-1,N)],dat2e[-c(1,2,N)],tail(dat2e,-3)))
for (i in 2:n) {
mat <- matrix(dat2e,nrow=length(dat2e)+1,ncol=6)
table(paste0(mat[,1],mat[,2]))
ngrams[[i]] <- table(apply(mat,1,paste0))
}
n1 <- table(dat2e)
n2 <- table(paste0(dat2e))
# Need some sort of clustering, need to find common patterns that are repeated frequently (more frequent the better, longer is good too)
# savings equivalent to (length of segment - 1)*(number of occurrences)
|
#' Balance Scale Dataset.
#'
#' This data set was generated to model psychological experimental results. Each
#' example is classified as having the balance scale tip to the right, tip to the left,
#' or be balanced. The attributes are the left weight, the left distance, the right
#' weight, and the right distance. The correct way to find the class is the greater of
#' (left-distance x left-weight) and (right-distance x right-weight). If they are equal,
#' it is balanced.
#'
#' @docType data
#'
#' @usage data(balance)
#'
#' @format A data frame with 625 rows and 4 variables:
#' \describe{
#' \item{Left-Weight}{Left-Weight, one of 1, 2, 3, 4, or 5}
#' \item{Left-Distance}{Left-Distance, one of 1, 2, 3, 4, or 5}
#' \item{Right-Weight}{Right-Weight, one of 1, 2, 3, 4, or 5}
#' \item{Right-Distance}{Right-Distance, one of 1, 2, 3, 4, or 5}
#' \item{Class Name}{Class Name: one of L, B or R)}
#' }
#' @source \url{https://archive.ics.uci.edu/ml/datasets/Balance+Scale}
"balance"
| /R/balance.R | no_license | cran/hhcartr | R | false | false | 996 | r | #' Balance Scale Dataset.
#'
#' This data set was generated to model psychological experimental results. Each
#' example is classified as having the balance scale tip to the right, tip to the left,
#' or be balanced. The attributes are the left weight, the left distance, the right
#' weight, and the right distance. The correct way to find the class is the greater of
#' (left-distance x left-weight) and (right-distance x right-weight). If they are equal,
#' it is balanced.
#'
#' @docType data
#'
#' @usage data(balance)
#'
#' @format A data frame with 625 rows and 4 variables:
#' \describe{
#' \item{Left-Weight}{Left-Weight, one of 1, 2, 3, 4, or 5}
#' \item{Left-Distance}{Left-Distance, one of 1, 2, 3, 4, or 5}
#' \item{Right-Weight}{Right-Weight, one of 1, 2, 3, 4, or 5}
#' \item{Right-Distance}{Right-Distance, one of 1, 2, 3, 4, or 5}
#' \item{Class Name}{Class Name: one of L, B or R)}
#' }
#' @source \url{https://archive.ics.uci.edu/ml/datasets/Balance+Scale}
"balance"
|
require("RColorBrewer")
require("gplots")
source("Colour_Scheme.R")
source("/nfs/users/nfs_t/ta6/NetworkInferencePipeline/Dropouts/DE_vs_bulk/Other_FS_functions.R")
source("/nfs/users/nfs_t/ta6/NetworkInferencePipeline/Dropouts/Results_Git/Consistent_Setup.R")
getFeatures <- function(counts, norm, fdr=0.01, name="Test", suppress.plot=TRUE){
counts = counts[rowSums(counts) > 0,]
norm = norm[rowSums(norm) > 0,]
t = c(proc.time()[3]) #1
M3Drop_table = M3DropFeatureSelection(norm, mt_method="fdr", mt_threshold=2, suppress.plot=TRUE)
M3Drop_table[,1] = as.character(M3Drop_table[,1])
t = c(t, proc.time()[3]) #2
HVG_Deng = BrenneckeGetVariableGenes(norm, fdr=2, suppress.plot=TRUE)
t = c(t, proc.time()[3]) #3
counts <- as.matrix(counts)
fit <- NBumiFitModel(counts)
t = c(t, proc.time()[3]) #4
Dengfeatures <- NBumiFeatureSelectionCombinedDrop(fit)
t = c(t, proc.time()[3]) #5
Dengfeatures2 <- NBumiFeatureSelectionHighVar(fit)
t = c(t, proc.time()[3]) #6
Gini = Gini_FS(norm)
t = c(t, proc.time()[3]) #7
negcor = Cor_FS_neg(norm)
t = c(t, proc.time()[3]) #8
pca1 = Monocle2_pca_FS(counts, 1:length(counts[1,]), pcs=c(2,3))
t = c(t, proc.time()[3]) #9
pca2 = Monocle2_pca_FS(counts, 1:length(counts[1,]), pcs=c(1,2,3))
t = c(t, proc.time()[3]) #10
m3d_t = t[2]-t[1]; hvg_t = t[3]-t[2];
nb_t = t[5]-t[3]; nbv_t = (t[6]-t[5])+(t[4]-t[3]);
gini_t = t[7]-t[6]; cor_t = t[8]-t[7];
pca1_t = t[9]-t[8]; pca2_t = t[10]-t[9];
return(c(m3d_t, hvg_t, nb_t, nbv_t, gini_t, cor_t, pca1_t, pca2_t))
}
convert_to_integer <- function(mat) {
mat <- round(as.matrix(mat))
storage.mode(mat) <- "integer"
mat = mat[rowSums(mat) > 0,]
return(mat)
}
# Deng
load("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/Deng_embryo_clean.RData")
counts_list = normalize_data(Deng_embyro_list$data, is.counts = FALSE)
norm_list = normalize_data(Deng_embyro_list$data, is.counts = TRUE)
Deng <- getFeatures(counts_list$data, norm_list$data, name="Deng")
Deng_dim <- dim(counts_list$data);
# Zhong
zhong = read.table("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/GSE57249_fpkm_ZHONG.txt", header=TRUE);
zhong = zhong[!duplicated(zhong[,1]),]
rownames(zhong) = zhong[,1]
zhong = zhong[,-1]
zhong = as.matrix(zhong);
ultralow = which(rowMeans(zhong) < 10^-5)
zhong = zhong[-ultralow,]
zhong_list = normalize_data(zhong, is.counts=FALSE)
zhong_count = convert_to_integer(zhong_list$data);
Biase <- getFeatures(zhong_count, zhong_list$data, name="Biase");
Biase_dim <- dim(zhong_count);
# Xue
Xue_data = read.table("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/GSE44183_mouse_expression_mat.txt", header=TRUE)
Xue_list = normalize_data(Xue_data, is.counts=FALSE)
Xue_count = convert_to_integer(Xue_list$data);
Xue <- getFeatures(Xue_count, Xue_list$data, name="Xue");
Xue_dim <- dim(Xue_count)
# Fan
Fan_data = read.table("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/GSE53386_matrix_fpkms.tsv", header=TRUE)
Fan_list = normalize_data(Fan_data, is.counts=FALSE)
Fan_count = convert_to_integer(Fan_list$data)
Fan <- getFeatures(Fan_count, Fan_list$data, name="Fan")
Fan_dim <- dim(Fan_count);
# Goolam
Goolam_data = read.table("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/Goolam_et_al_2015_count_table.tsv", header=T)
Goolam_list = normalize_data(Goolam_data, is.counts=TRUE)
Goolam_counts = normalize_data(Goolam_data, is.counts=FALSE)
Goo <- getFeatures(Goolam_counts$data, Goolam_list$data, name="Goolam")
Goo_dim <- dim(Goolam_counts$data)
#### Main-Text Ola & Blischak ####
source("/nfs/users/nfs_t/ta6/NetworkInferencePipeline/Dropouts/DE_vs_bulk/Load_Ola_SC.R")
Ola_count <- as.matrix(data[-spikes,]);
Ola_count <- Ola_count[rowSums(Ola_count) > 0,]
Ola_norm <- normalize_data(Ola_count, is.counts=TRUE)
Ola <- getFeatures(NBumiConvertToInteger(Ola_count), Ola_norm$data, name="Ola")
Ola_dim <- dim(Ola_norm$data);
source("../My_R_packages/M3D/R/NB_UMI.R"); require("matrixStats");
source("/nfs/users/nfs_t/ta6/NetworkInferencePipeline/Dropouts/DE_vs_bulk/Load_Blishcak_UMI.R")
counts <- counts[rowSums(counts) > 0,]
norm <- normalize_data(counts, is.counts=TRUE)
Blish <- getFeatures(counts, norm$data, name="Blish")
Blish_dim <- dim(norm$data);
# Shalek
load("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/ShalekKO_clean.RData")
ShalekKO_list$data <- ShalekKO_list$data[rowSums(ShalekKO_list$data) > 0,]
norm <- normalize_data(ShalekKO_list$data, is.counts=TRUE)
Shalek <- getFeatures(NBumiConvertToInteger(ShalekKO_list$data), norm$data, name="Sha")
Shalek_dim <- dim(norm$data);
rm(ShalekKO_list)
# Buettner
load("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/TeichCC_clean.RData")
TeichCC_list$data <- TeichCC_list$data[rowSums(TeichCC_list$data) > 0,]
norm <- normalize_data(TeichCC_list$data, is.counts=TRUE)
Teic <- getFeatures(NBumiConvertToInteger(TeichCC_list$data), norm$data, name="T")
Teic_dim <- dim(norm$data);
rm(TeichCC_list)
# Pollen
load("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/clustools-master/data/pollen.rda")
pollen <- pollen[rowSums(pollen) > 0,]
colnames(pollen) <- 1:length(pollen[1,])
norm <- normalize_data(pollen, is.counts=FALSE);
Pollen <- getFeatures(NBumiConvertToInteger(pollen), norm$data, name="pollen")
Pollen_dim <- dim(norm$data);
rm(pollen)
# Kirschner
load("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/clustools-master/data/kirschner.rda")
kirschner <- kirschner[rowSums(kirschner) > 0,]
colnames(kirschner) <- 1:length(kirschner[1,])
data_list = normalize_data(kirschner, labels = 1:length(kirschner[1,]), is.counts = FALSE)
Kir <- getFeatures(NBumiConvertToInteger(kirschner), data_list$data, name="kir")
Kir_dim <- dim(data_list$data)
rm(kirschner)
# Linnarsson
load("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/clustools-master/data/linnarsson.rda")
linnarsson <- linnarsson[rowSums(linnarsson) > 0,]
colnames(linnarsson) <- 1:length(linnarsson[1,])
data_list = normalize_data(linnarsson, labels = 1:length(linnarsson[1,]), is.counts = FALSE)
Lin <- getFeatures(NBumiConvertToInteger(linnarsson), data_list$data, name="lin")
Lin_dim <- dim(data_list$data)
rm(linnarsson)
# Usoskin
load("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/clustools-master/data/usoskin.rda")
usoskin <- usoskin[rowSums(usoskin) > 0,]
colnames(usoskin) <- 1:length(usoskin[1,])
data_list = normalize_data(usoskin, labels = 1:length(usoskin[1,]), is.counts = FALSE)
Uso <- getFeatures(NBumiConvertToInteger(usoskin), data_list$data, name="uso")
Uso_dim <- dim(data_list$data)
rm(usoskin)
# Macosko
mac_data <- read.table("/lustre/scratch117/cellgen/team218/MH/scRNASeqData/macosko.txt", header=T)
mac_data <- mac_data[rowSums(mac_data) > 0,]
colnames(mac_data) <- 1:length(mac_data[1,])
norm = t(t(mac_data)/colSums(mac_data)*1000000)
Mac <- getFeatures(NBumiConvertToInteger(mac_data), norm, name="mac")
Mac_dim <- dim(norm)
rm(mac_data); rm(norm);
#save.image(file="fsTime.RData")
return(c(m3d_t, hvg_t, nb_t, nbv_t, gini_t, cor_t, pca1_t, pca2_t))
# Make plot
source("Colour_Scheme.R")
TABLE = cbind(Deng, Biase, Xue, Fan, Goo, Ola, Blish, Shalek, Teic, Pollen, Kir, Lin, Uso, Mac)
rownames(TABLE) = c("M3D", "HVG", "NBDrop", "NBDisp", "Gini","Cor", "PCA (1-3)", "PCA (2-3)")
mat_size = rbind(Deng_dim, Biase_dim, Xue_dim, Fan_dim, Goo_dim, Ola_dim, Blish_dim, Shalek_dim, Teic_dim, Pollen_dim, Kir_dim, Lin_dim, Uso_dim, Mac_dim)
xes = apply(mat_size, 1, prod)
my_order = order(xes)
my_col = c(MM_col, hvg_1_col, Depth_col, NBVar_col, gini_col, cor_col, pca_1_col, pca_2_col)
png("FS_Time.png")
plot(1,1, col="white", xlim=c(min(xes), max(xes))/1000000, ylim=c(min(TABLE), max(TABLE)), xlab="ExprMat Size (millions)", ylab="Compute Time (s)")
for (i in 1:length(TABLE[,1])) {
lines(xes[my_order]/1000000, TABLE[i,my_order], col=my_col[i], lwd=3)
points(xes[my_order]/1000000, TABLE[i,my_order], col=my_col[i], pch=16, cex=1.75)
}
meth_order = order(-TABLE[,which(xes==max(xes))])
legend("topleft", rownames(TABLE)[meth_order], col=my_col[meth_order], lty=1, bty="n", lwd=2)
abline(h=60, col="grey35", lty=2)
text(15,60, "1 min", pos=3, col="grey50")
abline(h=60*30, col="grey35", lty=2)
text(175,60*30, "30 min", pos=1, col="grey50")
dev.off()
| /FS_Times.R | no_license | tallulandrews/Figures | R | false | false | 8,229 | r | require("RColorBrewer")
require("gplots")
source("Colour_Scheme.R")
source("/nfs/users/nfs_t/ta6/NetworkInferencePipeline/Dropouts/DE_vs_bulk/Other_FS_functions.R")
source("/nfs/users/nfs_t/ta6/NetworkInferencePipeline/Dropouts/Results_Git/Consistent_Setup.R")
getFeatures <- function(counts, norm, fdr=0.01, name="Test", suppress.plot=TRUE){
counts = counts[rowSums(counts) > 0,]
norm = norm[rowSums(norm) > 0,]
t = c(proc.time()[3]) #1
M3Drop_table = M3DropFeatureSelection(norm, mt_method="fdr", mt_threshold=2, suppress.plot=TRUE)
M3Drop_table[,1] = as.character(M3Drop_table[,1])
t = c(t, proc.time()[3]) #2
HVG_Deng = BrenneckeGetVariableGenes(norm, fdr=2, suppress.plot=TRUE)
t = c(t, proc.time()[3]) #3
counts <- as.matrix(counts)
fit <- NBumiFitModel(counts)
t = c(t, proc.time()[3]) #4
Dengfeatures <- NBumiFeatureSelectionCombinedDrop(fit)
t = c(t, proc.time()[3]) #5
Dengfeatures2 <- NBumiFeatureSelectionHighVar(fit)
t = c(t, proc.time()[3]) #6
Gini = Gini_FS(norm)
t = c(t, proc.time()[3]) #7
negcor = Cor_FS_neg(norm)
t = c(t, proc.time()[3]) #8
pca1 = Monocle2_pca_FS(counts, 1:length(counts[1,]), pcs=c(2,3))
t = c(t, proc.time()[3]) #9
pca2 = Monocle2_pca_FS(counts, 1:length(counts[1,]), pcs=c(1,2,3))
t = c(t, proc.time()[3]) #10
m3d_t = t[2]-t[1]; hvg_t = t[3]-t[2];
nb_t = t[5]-t[3]; nbv_t = (t[6]-t[5])+(t[4]-t[3]);
gini_t = t[7]-t[6]; cor_t = t[8]-t[7];
pca1_t = t[9]-t[8]; pca2_t = t[10]-t[9];
return(c(m3d_t, hvg_t, nb_t, nbv_t, gini_t, cor_t, pca1_t, pca2_t))
}
convert_to_integer <- function(mat) {
mat <- round(as.matrix(mat))
storage.mode(mat) <- "integer"
mat = mat[rowSums(mat) > 0,]
return(mat)
}
# Deng
load("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/Deng_embryo_clean.RData")
counts_list = normalize_data(Deng_embyro_list$data, is.counts = FALSE)
norm_list = normalize_data(Deng_embyro_list$data, is.counts = TRUE)
Deng <- getFeatures(counts_list$data, norm_list$data, name="Deng")
Deng_dim <- dim(counts_list$data);
# Zhong
zhong = read.table("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/GSE57249_fpkm_ZHONG.txt", header=TRUE);
zhong = zhong[!duplicated(zhong[,1]),]
rownames(zhong) = zhong[,1]
zhong = zhong[,-1]
zhong = as.matrix(zhong);
ultralow = which(rowMeans(zhong) < 10^-5)
zhong = zhong[-ultralow,]
zhong_list = normalize_data(zhong, is.counts=FALSE)
zhong_count = convert_to_integer(zhong_list$data);
Biase <- getFeatures(zhong_count, zhong_list$data, name="Biase");
Biase_dim <- dim(zhong_count);
# Xue
Xue_data = read.table("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/GSE44183_mouse_expression_mat.txt", header=TRUE)
Xue_list = normalize_data(Xue_data, is.counts=FALSE)
Xue_count = convert_to_integer(Xue_list$data);
Xue <- getFeatures(Xue_count, Xue_list$data, name="Xue");
Xue_dim <- dim(Xue_count)
# Fan
Fan_data = read.table("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/GSE53386_matrix_fpkms.tsv", header=TRUE)
Fan_list = normalize_data(Fan_data, is.counts=FALSE)
Fan_count = convert_to_integer(Fan_list$data)
Fan <- getFeatures(Fan_count, Fan_list$data, name="Fan")
Fan_dim <- dim(Fan_count);
# Goolam
Goolam_data = read.table("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/Goolam_et_al_2015_count_table.tsv", header=T)
Goolam_list = normalize_data(Goolam_data, is.counts=TRUE)
Goolam_counts = normalize_data(Goolam_data, is.counts=FALSE)
Goo <- getFeatures(Goolam_counts$data, Goolam_list$data, name="Goolam")
Goo_dim <- dim(Goolam_counts$data)
#### Main-Text Ola & Blischak ####
source("/nfs/users/nfs_t/ta6/NetworkInferencePipeline/Dropouts/DE_vs_bulk/Load_Ola_SC.R")
Ola_count <- as.matrix(data[-spikes,]);
Ola_count <- Ola_count[rowSums(Ola_count) > 0,]
Ola_norm <- normalize_data(Ola_count, is.counts=TRUE)
Ola <- getFeatures(NBumiConvertToInteger(Ola_count), Ola_norm$data, name="Ola")
Ola_dim <- dim(Ola_norm$data);
source("../My_R_packages/M3D/R/NB_UMI.R"); require("matrixStats");
source("/nfs/users/nfs_t/ta6/NetworkInferencePipeline/Dropouts/DE_vs_bulk/Load_Blishcak_UMI.R")
counts <- counts[rowSums(counts) > 0,]
norm <- normalize_data(counts, is.counts=TRUE)
Blish <- getFeatures(counts, norm$data, name="Blish")
Blish_dim <- dim(norm$data);
# Shalek
load("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/ShalekKO_clean.RData")
ShalekKO_list$data <- ShalekKO_list$data[rowSums(ShalekKO_list$data) > 0,]
norm <- normalize_data(ShalekKO_list$data, is.counts=TRUE)
Shalek <- getFeatures(NBumiConvertToInteger(ShalekKO_list$data), norm$data, name="Sha")
Shalek_dim <- dim(norm$data);
rm(ShalekKO_list)
# Buettner
load("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/TeichCC_clean.RData")
TeichCC_list$data <- TeichCC_list$data[rowSums(TeichCC_list$data) > 0,]
norm <- normalize_data(TeichCC_list$data, is.counts=TRUE)
Teic <- getFeatures(NBumiConvertToInteger(TeichCC_list$data), norm$data, name="T")
Teic_dim <- dim(norm$data);
rm(TeichCC_list)
# Pollen
load("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/clustools-master/data/pollen.rda")
pollen <- pollen[rowSums(pollen) > 0,]
colnames(pollen) <- 1:length(pollen[1,])
norm <- normalize_data(pollen, is.counts=FALSE);
Pollen <- getFeatures(NBumiConvertToInteger(pollen), norm$data, name="pollen")
Pollen_dim <- dim(norm$data);
rm(pollen)
# Kirschner
load("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/clustools-master/data/kirschner.rda")
kirschner <- kirschner[rowSums(kirschner) > 0,]
colnames(kirschner) <- 1:length(kirschner[1,])
data_list = normalize_data(kirschner, labels = 1:length(kirschner[1,]), is.counts = FALSE)
Kir <- getFeatures(NBumiConvertToInteger(kirschner), data_list$data, name="kir")
Kir_dim <- dim(data_list$data)
rm(kirschner)
# Linnarsson
load("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/clustools-master/data/linnarsson.rda")
linnarsson <- linnarsson[rowSums(linnarsson) > 0,]
colnames(linnarsson) <- 1:length(linnarsson[1,])
data_list = normalize_data(linnarsson, labels = 1:length(linnarsson[1,]), is.counts = FALSE)
Lin <- getFeatures(NBumiConvertToInteger(linnarsson), data_list$data, name="lin")
Lin_dim <- dim(data_list$data)
rm(linnarsson)
# Usoskin
load("/lustre/scratch117/cellgen/team218/TA/scRNASeqDatasets/clustools-master/data/usoskin.rda")
usoskin <- usoskin[rowSums(usoskin) > 0,]
colnames(usoskin) <- 1:length(usoskin[1,])
data_list = normalize_data(usoskin, labels = 1:length(usoskin[1,]), is.counts = FALSE)
Uso <- getFeatures(NBumiConvertToInteger(usoskin), data_list$data, name="uso")
Uso_dim <- dim(data_list$data)
rm(usoskin)
# Macosko
mac_data <- read.table("/lustre/scratch117/cellgen/team218/MH/scRNASeqData/macosko.txt", header=T)
mac_data <- mac_data[rowSums(mac_data) > 0,]
colnames(mac_data) <- 1:length(mac_data[1,])
norm = t(t(mac_data)/colSums(mac_data)*1000000)
Mac <- getFeatures(NBumiConvertToInteger(mac_data), norm, name="mac")
Mac_dim <- dim(norm)
rm(mac_data); rm(norm);
#save.image(file="fsTime.RData")
return(c(m3d_t, hvg_t, nb_t, nbv_t, gini_t, cor_t, pca1_t, pca2_t))
# Make plot
source("Colour_Scheme.R")
TABLE = cbind(Deng, Biase, Xue, Fan, Goo, Ola, Blish, Shalek, Teic, Pollen, Kir, Lin, Uso, Mac)
rownames(TABLE) = c("M3D", "HVG", "NBDrop", "NBDisp", "Gini","Cor", "PCA (1-3)", "PCA (2-3)")
mat_size = rbind(Deng_dim, Biase_dim, Xue_dim, Fan_dim, Goo_dim, Ola_dim, Blish_dim, Shalek_dim, Teic_dim, Pollen_dim, Kir_dim, Lin_dim, Uso_dim, Mac_dim)
xes = apply(mat_size, 1, prod)
my_order = order(xes)
my_col = c(MM_col, hvg_1_col, Depth_col, NBVar_col, gini_col, cor_col, pca_1_col, pca_2_col)
png("FS_Time.png")
plot(1,1, col="white", xlim=c(min(xes), max(xes))/1000000, ylim=c(min(TABLE), max(TABLE)), xlab="ExprMat Size (millions)", ylab="Compute Time (s)")
for (i in 1:length(TABLE[,1])) {
lines(xes[my_order]/1000000, TABLE[i,my_order], col=my_col[i], lwd=3)
points(xes[my_order]/1000000, TABLE[i,my_order], col=my_col[i], pch=16, cex=1.75)
}
meth_order = order(-TABLE[,which(xes==max(xes))])
legend("topleft", rownames(TABLE)[meth_order], col=my_col[meth_order], lty=1, bty="n", lwd=2)
abline(h=60, col="grey35", lty=2)
text(15,60, "1 min", pos=3, col="grey50")
abline(h=60*30, col="grey35", lty=2)
text(175,60*30, "30 min", pos=1, col="grey50")
dev.off()
|
vcov.gpcm <-
function (object, robust = FALSE, ...) {
if (!inherits(object, "gpcm"))
stop("Use only with 'gpcm' objects.\n")
inv.Hessian <- if (robust) {
inv.H <- ginv(object$hessian)
outer.score <- scoregpcmSNW(object)
inv.H %*% outer.score %*% inv.H
} else
ginv(object$hessian)
nams <- if (object$constraint == "gpcm") {
names(unlist(object$coefficients))
} else if (object$constraint == "1PL") {
nm <- lapply(object$coefficients, function (x) x[-length(x)])
c(names(unlist(nm)), "alpha")
} else {
nm <- lapply(object$coefficients, function (x) x[-length(x)])
names(unlist(nm))
}
dimnames(inv.Hessian) <- list(nams, nams)
inv.Hessian
}
| /R/vcov.gpcm.R | no_license | gscriver/ltm | R | false | false | 762 | r | vcov.gpcm <-
function (object, robust = FALSE, ...) {
if (!inherits(object, "gpcm"))
stop("Use only with 'gpcm' objects.\n")
inv.Hessian <- if (robust) {
inv.H <- ginv(object$hessian)
outer.score <- scoregpcmSNW(object)
inv.H %*% outer.score %*% inv.H
} else
ginv(object$hessian)
nams <- if (object$constraint == "gpcm") {
names(unlist(object$coefficients))
} else if (object$constraint == "1PL") {
nm <- lapply(object$coefficients, function (x) x[-length(x)])
c(names(unlist(nm)), "alpha")
} else {
nm <- lapply(object$coefficients, function (x) x[-length(x)])
names(unlist(nm))
}
dimnames(inv.Hessian) <- list(nams, nams)
inv.Hessian
}
|
#! /usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
if (length(args)!=7) {
stop("At least seven arguments must be supplied", call.=FALSE)
}
#usage Rscript merge3_methbin_plot.R dir1 samp1 dir2 samp2 dir3 samp3 savedir
dir1=args[1]
samp1=args[2]
dir2=args[3]
samp2=args[4]
dir3=args[5]
samp3=args[6]
savedir=args[7]
library(data.table)
setwd(paste0(dir1))
a1=as.data.frame(fread(paste0("bis_",samp1,".binning.txt")))
b1=as.data.frame(fread(paste0("oxbis_",samp1,".binning.txt")))
c1=as.data.frame(fread(paste0("hmc_sigup_",samp1,".binning.txt")))
setwd(paste0(dir2))
a2=as.data.frame(fread(paste0("bis_",samp2,".binning.txt")))
b2=as.data.frame(fread(paste0("oxbis_",samp2,".binning.txt")))
c2=as.data.frame(fread(paste0("hmc_sigup_",samp2,".binning.txt")))
setwd(paste0(dir3))
a3=as.data.frame(fread(paste0("bis_",samp3,".binning.txt")))
b3=as.data.frame(fread(paste0("oxbis_",samp3,".binning.txt")))
c3=as.data.frame(fread(paste0("hmc_sigup_",samp3,".binning.txt")))
#pan =rgb(0,0.545098,0.545098,1/4)
#bla =rgb(0.729412,0.333333,0.827451,1/4)
#norm =rgb(0.560784,0.737255,0.560784,1/4)
setwd(paste0(savedir))
hist1=hist(a1$V7,breaks=20,xlim=c(0,100))
hist1$density = hist1$counts/sum(hist1$counts)*100
hist2=hist(a2$V7,breaks=20,xlim=c(0,100))
hist2$density = hist2$counts/sum(hist2$counts)*100
hist3=hist(a3$V7,breaks=20,xlim=c(0,100))
hist3$density = hist3$counts/sum(hist3$counts)*100
max=max(max(hist1$density),max(hist2$density),max(hist3$density))
pdf(paste0("bis_",samp1,samp2,samp3,"_bin.histall.pdf"))
par(mar=c(8.1, 4.1, 4.1, 2.1), xpd=TRUE)
plot(hist1,ylim=c(0,max),col=rgb(0.560784,0.737255,0.560784,1/4),main = "Histogram of % 5mC & 5hmC CpG methylation",xlab="% mc & hmC per base",freq=FALSE)
plot(hist2,col=rgb(0,0.545098,0.545098,1/4),add=T,freq=FALSE)
plot(hist3,col=rgb(0.729412,0.333333,0.827451,1/4),add=T,freq=FALSE)
legend("bottom", xpd=TRUE,c(paste0(samp1),paste0(samp2),paste0(samp3)), fill=c(rgb(0.560784,0.737255,0.560784,1/4),
rgb(0,0.545098,0.545098,1/4),col=rgb(0.729412,0.333333,0.827451,1/4)), horiz=T,xjust = .5,yjust = 1,bty = "n",inset=-.3)
dev.off()
hist1=hist(b1$V7,breaks=20,xlim=c(0,100))
hist1$density = hist1$counts/sum(hist1$counts)*100
hist2=hist(b2$V7,breaks=20,xlim=c(0,100))
hist2$density = hist2$counts/sum(hist2$counts)*100
hist3=hist(b3$V7,breaks=20,xlim=c(0,100))
hist3$density = hist3$counts/sum(hist3$counts)*100
max=max(max(hist1$density),max(hist2$density),max(hist3$density))
pdf(paste0("oxbis_",samp1,samp2,samp3,"_bin.histall.pdf"))
par(mar=c(8.1, 4.1, 4.1, 2.1), xpd=TRUE)
plot(hist1,ylim=c(0,max),col=rgb(0.560784,0.737255,0.560784,1/4),main = "Histogram of % 5mC CpG methylation",xlab="% mc & hmC per base",freq=FALSE)
plot(hist2,col=rgb(0,0.545098,0.545098,1/4),add=T,freq=FALSE)
plot(hist3,col=rgb(0.729412,0.333333,0.827451,1/4),add=T,freq=FALSE)
legend("bottom", xpd=TRUE,c(paste0(samp1),paste0(samp2),paste0(samp3)), fill=c(rgb(0.560784,0.737255,0.560784,1/4),
rgb(0,0.545098,0.545098,1/4),col=rgb(0.729412,0.333333,0.827451,1/4)), horiz=T,xjust = .5,yjust = 1,bty = "n",inset=-.3)
dev.off()
hist1=hist(c1$V4,breaks=20,xlim=c(0,1))
hist1$density = hist1$counts/sum(hist1$counts)*100
hist2=hist(c2$V4,breaks=20,xlim=c(0,1))
hist2$density = hist2$counts/sum(hist2$counts)*100
hist3=hist(c3$V4,breaks=20,xlim=c(0,1))
hist3$density = hist3$counts/sum(hist3$counts)*100
max=max(max(hist1$density),max(hist2$density),max(hist3$density))
pdf(paste0("hmc_sigup_",samp1,samp2,samp3,"_bin.hist.pdf"))
par(mar=c(8.1, 4.1, 4.1, 2.1), xpd=TRUE)
plot(hist1,ylim=c(0,max),col=rgb(0.560784,0.737255,0.560784,1/4),main = "Histogram of % 5hmC CpG methylation",xlab="% mc & hmC per base",freq=FALSE)
plot(hist2,col=rgb(0,0.545098,0.545098,1/4),add=T,freq=FALSE)
plot(hist3,col=rgb(0.729412,0.333333,0.827451,1/4),add=T,freq=FALSE)
legend("bottom", xpd=TRUE,c(paste0(samp1),paste0(samp2),paste0(samp3)), fill=c(rgb(0.560784,0.737255,0.560784,1/4),
rgb(0,0.545098,0.545098,1/4),col=rgb(0.729412,0.333333,0.827451,1/4)), horiz=T,xjust = .5,yjust = 1,bty = "n",inset=-.3)
dev.off()
| /merge3_methbin_plot.R | no_license | rachelGoldfeder/cfDNA | R | false | false | 4,058 | r | #! /usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
if (length(args)!=7) {
stop("At least seven arguments must be supplied", call.=FALSE)
}
#usage Rscript merge3_methbin_plot.R dir1 samp1 dir2 samp2 dir3 samp3 savedir
dir1=args[1]
samp1=args[2]
dir2=args[3]
samp2=args[4]
dir3=args[5]
samp3=args[6]
savedir=args[7]
library(data.table)
setwd(paste0(dir1))
a1=as.data.frame(fread(paste0("bis_",samp1,".binning.txt")))
b1=as.data.frame(fread(paste0("oxbis_",samp1,".binning.txt")))
c1=as.data.frame(fread(paste0("hmc_sigup_",samp1,".binning.txt")))
setwd(paste0(dir2))
a2=as.data.frame(fread(paste0("bis_",samp2,".binning.txt")))
b2=as.data.frame(fread(paste0("oxbis_",samp2,".binning.txt")))
c2=as.data.frame(fread(paste0("hmc_sigup_",samp2,".binning.txt")))
setwd(paste0(dir3))
a3=as.data.frame(fread(paste0("bis_",samp3,".binning.txt")))
b3=as.data.frame(fread(paste0("oxbis_",samp3,".binning.txt")))
c3=as.data.frame(fread(paste0("hmc_sigup_",samp3,".binning.txt")))
#pan =rgb(0,0.545098,0.545098,1/4)
#bla =rgb(0.729412,0.333333,0.827451,1/4)
#norm =rgb(0.560784,0.737255,0.560784,1/4)
setwd(paste0(savedir))
hist1=hist(a1$V7,breaks=20,xlim=c(0,100))
hist1$density = hist1$counts/sum(hist1$counts)*100
hist2=hist(a2$V7,breaks=20,xlim=c(0,100))
hist2$density = hist2$counts/sum(hist2$counts)*100
hist3=hist(a3$V7,breaks=20,xlim=c(0,100))
hist3$density = hist3$counts/sum(hist3$counts)*100
max=max(max(hist1$density),max(hist2$density),max(hist3$density))
pdf(paste0("bis_",samp1,samp2,samp3,"_bin.histall.pdf"))
par(mar=c(8.1, 4.1, 4.1, 2.1), xpd=TRUE)
plot(hist1,ylim=c(0,max),col=rgb(0.560784,0.737255,0.560784,1/4),main = "Histogram of % 5mC & 5hmC CpG methylation",xlab="% mc & hmC per base",freq=FALSE)
plot(hist2,col=rgb(0,0.545098,0.545098,1/4),add=T,freq=FALSE)
plot(hist3,col=rgb(0.729412,0.333333,0.827451,1/4),add=T,freq=FALSE)
legend("bottom", xpd=TRUE,c(paste0(samp1),paste0(samp2),paste0(samp3)), fill=c(rgb(0.560784,0.737255,0.560784,1/4),
rgb(0,0.545098,0.545098,1/4),col=rgb(0.729412,0.333333,0.827451,1/4)), horiz=T,xjust = .5,yjust = 1,bty = "n",inset=-.3)
dev.off()
hist1=hist(b1$V7,breaks=20,xlim=c(0,100))
hist1$density = hist1$counts/sum(hist1$counts)*100
hist2=hist(b2$V7,breaks=20,xlim=c(0,100))
hist2$density = hist2$counts/sum(hist2$counts)*100
hist3=hist(b3$V7,breaks=20,xlim=c(0,100))
hist3$density = hist3$counts/sum(hist3$counts)*100
max=max(max(hist1$density),max(hist2$density),max(hist3$density))
pdf(paste0("oxbis_",samp1,samp2,samp3,"_bin.histall.pdf"))
par(mar=c(8.1, 4.1, 4.1, 2.1), xpd=TRUE)
plot(hist1,ylim=c(0,max),col=rgb(0.560784,0.737255,0.560784,1/4),main = "Histogram of % 5mC CpG methylation",xlab="% mc & hmC per base",freq=FALSE)
plot(hist2,col=rgb(0,0.545098,0.545098,1/4),add=T,freq=FALSE)
plot(hist3,col=rgb(0.729412,0.333333,0.827451,1/4),add=T,freq=FALSE)
legend("bottom", xpd=TRUE,c(paste0(samp1),paste0(samp2),paste0(samp3)), fill=c(rgb(0.560784,0.737255,0.560784,1/4),
rgb(0,0.545098,0.545098,1/4),col=rgb(0.729412,0.333333,0.827451,1/4)), horiz=T,xjust = .5,yjust = 1,bty = "n",inset=-.3)
dev.off()
hist1=hist(c1$V4,breaks=20,xlim=c(0,1))
hist1$density = hist1$counts/sum(hist1$counts)*100
hist2=hist(c2$V4,breaks=20,xlim=c(0,1))
hist2$density = hist2$counts/sum(hist2$counts)*100
hist3=hist(c3$V4,breaks=20,xlim=c(0,1))
hist3$density = hist3$counts/sum(hist3$counts)*100
max=max(max(hist1$density),max(hist2$density),max(hist3$density))
pdf(paste0("hmc_sigup_",samp1,samp2,samp3,"_bin.hist.pdf"))
par(mar=c(8.1, 4.1, 4.1, 2.1), xpd=TRUE)
plot(hist1,ylim=c(0,max),col=rgb(0.560784,0.737255,0.560784,1/4),main = "Histogram of % 5hmC CpG methylation",xlab="% mc & hmC per base",freq=FALSE)
plot(hist2,col=rgb(0,0.545098,0.545098,1/4),add=T,freq=FALSE)
plot(hist3,col=rgb(0.729412,0.333333,0.827451,1/4),add=T,freq=FALSE)
legend("bottom", xpd=TRUE,c(paste0(samp1),paste0(samp2),paste0(samp3)), fill=c(rgb(0.560784,0.737255,0.560784,1/4),
rgb(0,0.545098,0.545098,1/4),col=rgb(0.729412,0.333333,0.827451,1/4)), horiz=T,xjust = .5,yjust = 1,bty = "n",inset=-.3)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/advancedArg.R
\name{advancedArg}
\alias{advancedArg}
\title{List advanced arguments}
\usage{
advancedArg(fun, package = "derfinder", browse = interactive())
}
\arguments{
\item{fun}{The name of a function(s) that has advanced arguments in
\code{package}.}
\item{package}{The name of the package where the function is stored. Only
'derfinder', 'derfinderPlot', and 'regionReport' are accepted.}
\item{browse}{Whether to open the URLs in a browser.}
}
\value{
A vector of URLs with the GitHub search queries.
}
\description{
Find in GitHub the documentation for the advanced arguments of a given
function.
}
\details{
If you are interested on the default options used for functions
that run on multiple cores, check
https://github.com/lcolladotor/derfinder/blob/master/R/utils.R
Note that in general, \link[BiocParallel]{SnowParam} is more memory efficient
than link[BiocParallel]{MulticoreParam}. If you so desire, use your favorite
cluster type by specifying \code{BPPARAM}.
}
\examples{
## Open the advanced argument docs for loadCoverage()
if(interactive()) {
advancedArg('loadCoverage')
} else {
(advancedArg('loadCoverage', browse = FALSE))
}
}
\author{
Leonardo Collado-Torres
}
| /man/advancedArg.Rd | no_license | cyang-2014/derfinder | R | false | true | 1,275 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/advancedArg.R
\name{advancedArg}
\alias{advancedArg}
\title{List advanced arguments}
\usage{
advancedArg(fun, package = "derfinder", browse = interactive())
}
\arguments{
\item{fun}{The name of a function(s) that has advanced arguments in
\code{package}.}
\item{package}{The name of the package where the function is stored. Only
'derfinder', 'derfinderPlot', and 'regionReport' are accepted.}
\item{browse}{Whether to open the URLs in a browser.}
}
\value{
A vector of URLs with the GitHub search queries.
}
\description{
Find in GitHub the documentation for the advanced arguments of a given
function.
}
\details{
If you are interested on the default options used for functions
that run on multiple cores, check
https://github.com/lcolladotor/derfinder/blob/master/R/utils.R
Note that in general, \link[BiocParallel]{SnowParam} is more memory efficient
than link[BiocParallel]{MulticoreParam}. If you so desire, use your favorite
cluster type by specifying \code{BPPARAM}.
}
\examples{
## Open the advanced argument docs for loadCoverage()
if(interactive()) {
advancedArg('loadCoverage')
} else {
(advancedArg('loadCoverage', browse = FALSE))
}
}
\author{
Leonardo Collado-Torres
}
|
no_duplicados = unique(data_frame_inicio)
not_in_planilla = no_duplicados[!(no_duplicados$email %in% c(contactados$Email,respuestas$Email,reunion_hecha$Email)),]
not_in_deal = not_in_planilla[!(not_in_planilla$company %in% datos_deals$`Associated Company`),]
no_duplicados_final = not_in_deal
| /LIMPIADOR DE DATOS/eliminador_duplicados.R | no_license | rquintana-abstracta/data_cleaner2 | R | false | false | 295 | r | no_duplicados = unique(data_frame_inicio)
not_in_planilla = no_duplicados[!(no_duplicados$email %in% c(contactados$Email,respuestas$Email,reunion_hecha$Email)),]
not_in_deal = not_in_planilla[!(not_in_planilla$company %in% datos_deals$`Associated Company`),]
no_duplicados_final = not_in_deal
|
#### Import database
setwd("~/SELECT ERASMUS MUNDUS/ENERBYTE- Thesis&internship/THESIS - Enerbyte/Chapter 2. Case study Data analysis/R for Tesi/test_01_tesi")
rubi <- read.csv("~/SELECT ERASMUS MUNDUS/ENERBYTE- Thesis&internship/THESIS - Enerbyte/Chapter 2. Case study Data analysis/R for Tesi/test_01_tesi/data_id_selection.csv", sep=";", stringsAsFactors=FALSE)
#rubi<-rubi[(rubi$source == "CURRENTCOST"),]
rubi$source <-NULL
rubi$idmeter<-as.numeric(rubi$idmeter)
rubi$date<-as.POSIXct((rubi$date), format="%d/%m/%Y")
rubi1<-rubi[order(as.numeric(rubi$idmeter),rubi$date),]
str(rubi1)
a<-as.data.frame(unique(rubi1$idmeter))
###1. Get the sum of each row
rubi_cut<-rubi[3:26]
rubi_zero<-rubi[apply(rubi_cut==0, 1, sum)<=0,]
### 2. frozens
x<-rubi_zero[3:26]
y<-x[-1]
diff <- y-x[1:length(x)-1]
rubi_net<-rubi_zero[apply(diff==0,1,sum)<=2,] ## more than six 0 per row, remove the row
names(rubi_net)<- c("idmeter","date","00:00","01:00","02:00","03:00","04:00","05:00","06:00","07:00","08:00","09:00","10:00","11:00","12:00","13:00","14:00","15:00","16:00","17:00","18:00","19:00","20:00","21:00","22:00","23:00")
str(rubi_net)
b<-as.data.frame(unique(rubi_net$idmeter))
####Separate Weekday and Weekends
library(xts)
rubi2<-as.xts(rubi_net,rubi_net$date)
## Weekdays
weekdays<-rubi2[.indexwday(rubi2) %in% 1:5] #labels=c("Monday","Tuesday","Wednesday", "Thursday", "Friday")
w_days<-as.data.frame(dates=index(weekdays), coredata(weekdays))
w_days$date<-NULL
names(w_days)<- c("idmeter",c("wd0","wd1","wd2","wd3","wd4","wd5","wd6","wd7","wd8","wd9","wd10","wd11","wd12","wd13","wd14","wd15","wd16","wd17","wd18","wd19","wd20","wd21","wd22","wd23"))
w_days[,c(2:25)] <- lapply(w_days[,c(2:25)], as.character)
w_days[,c(2:25)] <- lapply(w_days[,c(2:25)], as.numeric)
#w_days$date<-as.POSIXct(w_days$date)
w_days$idmeter<-as.numeric(as.character(w_days$idmeter))
w_days<-w_days[order(w_days$idmeter),]
str(w_days)
c<-as.data.frame(unique(w_days$idmeter))
#### PERCENTAGES (OPOWER) OPTION 2: CURVES ARCHETYPES
## RowSUms, Division, ColMeans --> MORE ACCURATE!
###1. Get the sum of each row
rubi_sum<-w_days[2:25]
row_sum<-as.matrix(rowSums(rubi_sum))
names(row_sum)<-"sum"
###2. Division to get the percentages per hour
division<-as.data.frame(rubi_sum/row_sum)
#division$idmeter<-NULL
division_id<-cbind(w_days$idmeter,division)
names(division_id)<-c("idmeter","00:00","01:00","02:00","03:00","04:00","05:00","06:00","07:00","08:00","09:00","10:00","11:00","12:00","13:00","14:00","15:00","16:00","17:00","18:00","19:00","20:00","21:00","22:00","23:00")
division_id<-as.data.frame(division_id)
#test<-as.matrix(rowSums(division_id[2:25]))
###3. Column means
cast_99<-as.data.frame(lapply(split(division_id, division_id$idmeter),colMeans))
cast100<-as.data.frame(t(cast_99))
cast100$idmeter<-NULL
hour_percent<-cast100
#perc_wd<-w_days[2:25]
#perc_wd<-perc_wd*100
#str(perc_wd)
#### K-means clustering
## no distances between points is needed to calculate, as kmeans is based centroid mininimu square
set.seed(13)
fit <- kmeans(hour_percent, 7,iter.max=100,nstart=121, algorithm="MacQueen") # 5 cluster solution
#fit <- kmeans(subi, 5,iter.max=100,nstart=100) # 5 cluster solution
fit
clus_num<-fit$cluster
p1<-as.data.frame(clus_num)
fit$tot.withinss
fit$size
fit$betweenss
fit$withinss
fit$tot.withinss
###plotting cluster line
p2<-as.data.frame(cbind(p1$clus_num,hour_percent))
names(p2)<-c("clus_num",c(0:23))
cluster1<-p2[p2$clus_num==1,]
cluster2<-p2[p2$clus_num==2,]
cluster3<-p2[p2$clus_num==3,]
cluster4<-p2[p2$clus_num==4,]
cluster5<-p2[p2$clus_num==5,]
cluster6<-p2[p2$clus_num==6,]
cluster7<-p2[p2$clus_num==7,]
## TESTING clusters by plotting
library(reshape2)
hour<-c(0:23)
#cluster1
c_1<-as.data.frame(t(cluster1[,c(2:25)]))
c_1<-cbind(hour,c_1)
c_11<-melt(c_1, id.vars="hour")
cp1<- ggplot(c_11, aes(hour,value)) + geom_line(aes(colour = variable))+ggtitle("cluster1")+ylim(0,0.15)
theme_set(theme_gray(base_size = 12))
#cluster2
c_2<-as.data.frame(t(cluster2[,c(2:25)]))
c_2<-cbind(hour,c_2)
c_21<-melt(c_2, id.vars="hour")
cp2<- ggplot(c_21, aes(hour,value)) + geom_line(aes(colour = variable))+ggtitle("cluster2")+ylim(0,0.15)
#cluster3
c_3<-as.data.frame(t(cluster3[,c(2:25)]))
c_3<-cbind(hour,c_3)
c_31<-melt(c_3, id.vars="hour")
cp3<- ggplot(c_31, aes(hour,value)) + geom_line(aes(colour = variable))+ggtitle("cluster3")+ylim(0,0.15)
#cluster4
c_4<-as.data.frame(t(cluster4[,c(2:25)]))
c_4<-cbind(hour,c_4)
c_41<-melt(c_4, id.vars="hour")
cp4<- ggplot(c_41, aes(hour,value)) + geom_line(aes(colour = variable))+ggtitle("cluster4") +ylim(0,0.15)
#cluster5
c_5<-as.data.frame(t(cluster5[,c(2:25)]))
c_5<-cbind(hour,c_5)
c_51<-melt(c_5, id.vars="hour")
cp5<- ggplot(c_51, aes(hour,value)) + geom_line(aes(colour = variable))+ggtitle("cluster5")+ylim(0,0.15)
#cluster6
c_6<-as.data.frame(t(cluster6[,c(2:25)]))
c_6<-cbind(hour,c_6)
c_61<-melt(c_6, id.vars="hour")
cp6<- ggplot(c_61, aes(hour,value)) + geom_line(aes(colour = variable))+ggtitle("cluster6")+ylim(0,0.15)
#cluster7
c_7<-as.data.frame(t(cluster7[,c(2:25)]))
c_7<-cbind(hour,c_7)
c_71<-melt(c_7, id.vars="hour")
cp7<- ggplot(c_71, aes(hour,value)) + geom_line(aes(colour = variable))+ggtitle("cluster7")+ylim(0,0.15)
source("multiplot_function.R")
multiplot(cp1, cp2, cp3, cp4,cp5,cp6,cp7, cols=3)
## Plotting the clusters MEAN
#cluster1
cluster1_mean<-as.data.frame(colMeans(cluster1[2:25]))
names(cluster1_mean)<-"mean_clus1"
c1_mean<-as.data.frame(t(cluster1_mean))
cluster1_mean<-cbind(hour,cluster1_mean)
#cluster2
cluster2_mean<-as.data.frame(colMeans(cluster2[2:25]))
names(cluster2_mean)<-"mean_clus2"
c2_mean<-as.data.frame(t(cluster2_mean))
cluster2_mean<-cbind(hour,cluster2_mean)
#cluster 3
cluster3_mean<-as.data.frame(colMeans(cluster3[2:25]))
names(cluster3_mean)<-"mean_clus3"
c3_mean<-as.data.frame(t(cluster3_mean))
cluster3_mean<-cbind(hour,cluster3_mean)
# cluster4
cluster4_mean<-as.data.frame(colMeans(cluster4[2:25]))
names(cluster4_mean)<-"mean_clus4"
c4_mean<-as.data.frame(t(cluster4_mean))
cluster4_mean<-cbind(hour,cluster4_mean)
# cluster5
cluster5_mean<-as.data.frame(colMeans(cluster5[2:25]))
names(cluster5_mean)<-"mean_clus5"
c5_mean<-as.data.frame(t(cluster5_mean))
cluster5_mean<-cbind(hour,cluster5_mean)
# cluster6
cluster6_mean<-as.data.frame(colMeans(cluster6[2:25]))
names(cluster6_mean)<-"mean_clus6"
c6_mean<-as.data.frame(t(cluster6_mean))
cluster6_mean<-cbind(hour,cluster6_mean)
# cluster7
cluster7_mean<-as.data.frame(colMeans(cluster7[2:25]))
names(cluster7_mean)<-"mean_clus7"
c7_mean<-as.data.frame(t(cluster7_mean))
cluster7_mean<-cbind(hour,cluster7_mean)
##merging cluster to the same dataframe
cluster1_mean<-cbind(cluster1_mean,rep(c(1)))
names(cluster1_mean)<-(c("hour","mean","clus_num"))
cluster2_mean<-cbind(cluster2_mean,rep(c(2)))
names(cluster2_mean)<-(c("hour","mean","clus_num"))
cluster3_mean<-cbind(cluster3_mean,rep(c(3)))
names(cluster3_mean)<-(c("hour","mean","clus_num"))
cluster4_mean<-cbind(cluster4_mean,rep(c(4)))
names(cluster4_mean)<-(c("hour","mean","clus_num"))
cluster5_mean<-cbind(cluster5_mean,rep(c(5)))
names(cluster5_mean)<-(c("hour","mean","clus_num"))
cluster6_mean<-cbind(cluster6_mean,rep(c(6)))
names(cluster6_mean)<-(c("hour","mean","clus_num"))
cluster7_mean<-cbind(cluster7_mean,rep(c(7)))
names(cluster7_mean)<-(c("hour","mean","clus_num"))
by_clus_mean<-rbind(cluster1_mean,cluster2_mean,cluster3_mean,cluster4_mean,cluster5_mean,cluster6_mean,cluster7_mean)
##2. plot the 5 different cluster in 5 wrap facets
library(ggplot2)
ggplot(by_clus_mean,aes(hour,mean))+geom_line(aes(colour=clus_num))+facet_wrap(~clus_num)+
ylim(0,0.13)
##residus per cluster
all_means<-rbind(c1_mean,c2_mean,c3_mean,c4_mean,c5_mean,c6_mean,c7_mean)
#1
c1_res<-as.data.frame(cluster1[,2:25])
ax1<-data.frame()
id1<-1:nrow(cluster1)
for (i in id1){
ax1 <- rbind(ax1,(c1_res[i,] - c1_mean))
}
ax1<-abs(ax1)
ax1_rs<-as.data.frame(rowSums(ax1))
ax1_m<-as.data.frame(colMeans(ax1_rs))
names(ax1_m)<-"dist"
#2
c2_res<-as.data.frame(cluster2[,2:25])
ax2<-data.frame()
id2<-1:nrow(cluster2)
for (i in id2){
ax2 <- rbind(ax2,(c2_res[i,] - c2_mean))
}
ax2<-abs(ax2)
ax2_rs<-as.data.frame(rowSums(ax2))
ax2_m<-as.data.frame(colMeans(ax2_rs))
names(ax2_m)<-"dist"
#3
c3_res<-as.data.frame(cluster3[,2:25])
ax3<-data.frame()
id3<-1:nrow(cluster3)
for (i in id3){
ax3 <- rbind(ax3,(c3_res[i,] - c3_mean))
}
ax3<-abs(ax3)
ax3_rs<-as.data.frame(rowSums(ax3))
ax3_m<-as.data.frame(colMeans(ax3_rs))
names(ax3_m)<-"dist"
#4
c4_res<-as.data.frame(cluster4[,2:25])
ax4<-data.frame()
id4<-1:nrow(cluster4)
for (i in id4){
ax4 <- rbind(ax4,(c4_res[i,] - c4_mean))
}
ax4<-abs(ax4)
ax4_rs<-as.data.frame(rowSums(ax4))
ax4_m<-as.data.frame(colMeans(ax4_rs))
names(ax4_m)<-"dist"
#5
c5_res<-as.data.frame(cluster5[,2:25])
ax5<-data.frame()
id5<-1:nrow(cluster5)
for (i in id5){
ax5 <- rbind(ax5,(c5_res[i,] - c5_mean))
}
ax5<-abs(ax5)
ax5_rs<-as.data.frame(rowSums(ax5))
ax5_m<-as.data.frame(colMeans(ax5_rs))
names(ax5_m)<-"dist"
#6
c6_res<-as.data.frame(cluster6[,2:25])
ax6<-data.frame()
id6<-1:nrow(cluster6)
for (i in id6){
ax6 <- rbind(ax6,(c6_res[i,] - c6_mean))
}
ax6<-abs(ax6)
ax6_rs<-as.data.frame(rowSums(ax6))
ax6_m<-as.data.frame(colMeans(ax6_rs))
names(ax6_m)<-"dist"
#7
c7_res<-as.data.frame(cluster7[,2:25])
ax7<-data.frame()
id7<-1:nrow(cluster7)
for (i in id7){
ax7 <- rbind(ax7,(c7_res[i,] - c7_mean))
}
ax7<-abs(ax7)
ax7_rs<-as.data.frame(rowSums(ax7))
ax7_m<-as.data.frame(colMeans(ax7_rs))
names(ax7_m)<-"dist"
alls_m<-as.data.frame(rbind(ax1_m,ax2_m,ax3_m,ax4_m,ax5_m,ax6_m,ax7_m))
| /R scripts/script_44.2_kmeans_wd_k7fmacqueenclus.R | no_license | josepotal/master-thesis--clustering-energy-profiles | R | false | false | 9,992 | r |
#### Import database
setwd("~/SELECT ERASMUS MUNDUS/ENERBYTE- Thesis&internship/THESIS - Enerbyte/Chapter 2. Case study Data analysis/R for Tesi/test_01_tesi")
rubi <- read.csv("~/SELECT ERASMUS MUNDUS/ENERBYTE- Thesis&internship/THESIS - Enerbyte/Chapter 2. Case study Data analysis/R for Tesi/test_01_tesi/data_id_selection.csv", sep=";", stringsAsFactors=FALSE)
#rubi<-rubi[(rubi$source == "CURRENTCOST"),]
rubi$source <-NULL
rubi$idmeter<-as.numeric(rubi$idmeter)
rubi$date<-as.POSIXct((rubi$date), format="%d/%m/%Y")
rubi1<-rubi[order(as.numeric(rubi$idmeter),rubi$date),]
str(rubi1)
a<-as.data.frame(unique(rubi1$idmeter))
###1. Get the sum of each row
rubi_cut<-rubi[3:26]
rubi_zero<-rubi[apply(rubi_cut==0, 1, sum)<=0,]
### 2. frozens
x<-rubi_zero[3:26]
y<-x[-1]
diff <- y-x[1:length(x)-1]
rubi_net<-rubi_zero[apply(diff==0,1,sum)<=2,] ## more than six 0 per row, remove the row
names(rubi_net)<- c("idmeter","date","00:00","01:00","02:00","03:00","04:00","05:00","06:00","07:00","08:00","09:00","10:00","11:00","12:00","13:00","14:00","15:00","16:00","17:00","18:00","19:00","20:00","21:00","22:00","23:00")
str(rubi_net)
b<-as.data.frame(unique(rubi_net$idmeter))
####Separate Weekday and Weekends
library(xts)
rubi2<-as.xts(rubi_net,rubi_net$date)
## Weekdays
weekdays<-rubi2[.indexwday(rubi2) %in% 1:5] #labels=c("Monday","Tuesday","Wednesday", "Thursday", "Friday")
w_days<-as.data.frame(dates=index(weekdays), coredata(weekdays))
w_days$date<-NULL
names(w_days)<- c("idmeter",c("wd0","wd1","wd2","wd3","wd4","wd5","wd6","wd7","wd8","wd9","wd10","wd11","wd12","wd13","wd14","wd15","wd16","wd17","wd18","wd19","wd20","wd21","wd22","wd23"))
w_days[,c(2:25)] <- lapply(w_days[,c(2:25)], as.character)
w_days[,c(2:25)] <- lapply(w_days[,c(2:25)], as.numeric)
#w_days$date<-as.POSIXct(w_days$date)
w_days$idmeter<-as.numeric(as.character(w_days$idmeter))
w_days<-w_days[order(w_days$idmeter),]
str(w_days)
c<-as.data.frame(unique(w_days$idmeter))
#### PERCENTAGES (OPOWER) OPTION 2: CURVES ARCHETYPES
## RowSUms, Division, ColMeans --> MORE ACCURATE!
###1. Get the sum of each row
rubi_sum<-w_days[2:25]
row_sum<-as.matrix(rowSums(rubi_sum))
names(row_sum)<-"sum"
###2. Division to get the percentages per hour
division<-as.data.frame(rubi_sum/row_sum)
#division$idmeter<-NULL
division_id<-cbind(w_days$idmeter,division)
names(division_id)<-c("idmeter","00:00","01:00","02:00","03:00","04:00","05:00","06:00","07:00","08:00","09:00","10:00","11:00","12:00","13:00","14:00","15:00","16:00","17:00","18:00","19:00","20:00","21:00","22:00","23:00")
division_id<-as.data.frame(division_id)
#test<-as.matrix(rowSums(division_id[2:25]))
###3. Column means
cast_99<-as.data.frame(lapply(split(division_id, division_id$idmeter),colMeans))
cast100<-as.data.frame(t(cast_99))
cast100$idmeter<-NULL
hour_percent<-cast100
#perc_wd<-w_days[2:25]
#perc_wd<-perc_wd*100
#str(perc_wd)
#### K-means clustering
## no distances between points is needed to calculate, as kmeans is based centroid mininimu square
set.seed(13)
fit <- kmeans(hour_percent, 7,iter.max=100,nstart=121, algorithm="MacQueen") # 5 cluster solution
#fit <- kmeans(subi, 5,iter.max=100,nstart=100) # 5 cluster solution
fit
clus_num<-fit$cluster
p1<-as.data.frame(clus_num)
fit$tot.withinss
fit$size
fit$betweenss
fit$withinss
fit$tot.withinss
###plotting cluster line
p2<-as.data.frame(cbind(p1$clus_num,hour_percent))
names(p2)<-c("clus_num",c(0:23))
cluster1<-p2[p2$clus_num==1,]
cluster2<-p2[p2$clus_num==2,]
cluster3<-p2[p2$clus_num==3,]
cluster4<-p2[p2$clus_num==4,]
cluster5<-p2[p2$clus_num==5,]
cluster6<-p2[p2$clus_num==6,]
cluster7<-p2[p2$clus_num==7,]
## TESTING clusters by plotting
library(reshape2)
hour<-c(0:23)
#cluster1
c_1<-as.data.frame(t(cluster1[,c(2:25)]))
c_1<-cbind(hour,c_1)
c_11<-melt(c_1, id.vars="hour")
cp1<- ggplot(c_11, aes(hour,value)) + geom_line(aes(colour = variable))+ggtitle("cluster1")+ylim(0,0.15)
theme_set(theme_gray(base_size = 12))
#cluster2
c_2<-as.data.frame(t(cluster2[,c(2:25)]))
c_2<-cbind(hour,c_2)
c_21<-melt(c_2, id.vars="hour")
cp2<- ggplot(c_21, aes(hour,value)) + geom_line(aes(colour = variable))+ggtitle("cluster2")+ylim(0,0.15)
#cluster3
c_3<-as.data.frame(t(cluster3[,c(2:25)]))
c_3<-cbind(hour,c_3)
c_31<-melt(c_3, id.vars="hour")
cp3<- ggplot(c_31, aes(hour,value)) + geom_line(aes(colour = variable))+ggtitle("cluster3")+ylim(0,0.15)
#cluster4
c_4<-as.data.frame(t(cluster4[,c(2:25)]))
c_4<-cbind(hour,c_4)
c_41<-melt(c_4, id.vars="hour")
cp4<- ggplot(c_41, aes(hour,value)) + geom_line(aes(colour = variable))+ggtitle("cluster4") +ylim(0,0.15)
#cluster5
c_5<-as.data.frame(t(cluster5[,c(2:25)]))
c_5<-cbind(hour,c_5)
c_51<-melt(c_5, id.vars="hour")
cp5<- ggplot(c_51, aes(hour,value)) + geom_line(aes(colour = variable))+ggtitle("cluster5")+ylim(0,0.15)
#cluster6
c_6<-as.data.frame(t(cluster6[,c(2:25)]))
c_6<-cbind(hour,c_6)
c_61<-melt(c_6, id.vars="hour")
cp6<- ggplot(c_61, aes(hour,value)) + geom_line(aes(colour = variable))+ggtitle("cluster6")+ylim(0,0.15)
#cluster7
c_7<-as.data.frame(t(cluster7[,c(2:25)]))
c_7<-cbind(hour,c_7)
c_71<-melt(c_7, id.vars="hour")
cp7<- ggplot(c_71, aes(hour,value)) + geom_line(aes(colour = variable))+ggtitle("cluster7")+ylim(0,0.15)
source("multiplot_function.R")
multiplot(cp1, cp2, cp3, cp4,cp5,cp6,cp7, cols=3)
## Plotting the clusters MEAN
#cluster1
cluster1_mean<-as.data.frame(colMeans(cluster1[2:25]))
names(cluster1_mean)<-"mean_clus1"
c1_mean<-as.data.frame(t(cluster1_mean))
cluster1_mean<-cbind(hour,cluster1_mean)
#cluster2
cluster2_mean<-as.data.frame(colMeans(cluster2[2:25]))
names(cluster2_mean)<-"mean_clus2"
c2_mean<-as.data.frame(t(cluster2_mean))
cluster2_mean<-cbind(hour,cluster2_mean)
#cluster 3
cluster3_mean<-as.data.frame(colMeans(cluster3[2:25]))
names(cluster3_mean)<-"mean_clus3"
c3_mean<-as.data.frame(t(cluster3_mean))
cluster3_mean<-cbind(hour,cluster3_mean)
# cluster4
cluster4_mean<-as.data.frame(colMeans(cluster4[2:25]))
names(cluster4_mean)<-"mean_clus4"
c4_mean<-as.data.frame(t(cluster4_mean))
cluster4_mean<-cbind(hour,cluster4_mean)
# cluster5
cluster5_mean<-as.data.frame(colMeans(cluster5[2:25]))
names(cluster5_mean)<-"mean_clus5"
c5_mean<-as.data.frame(t(cluster5_mean))
cluster5_mean<-cbind(hour,cluster5_mean)
# cluster6
cluster6_mean<-as.data.frame(colMeans(cluster6[2:25]))
names(cluster6_mean)<-"mean_clus6"
c6_mean<-as.data.frame(t(cluster6_mean))
cluster6_mean<-cbind(hour,cluster6_mean)
# cluster7
cluster7_mean<-as.data.frame(colMeans(cluster7[2:25]))
names(cluster7_mean)<-"mean_clus7"
c7_mean<-as.data.frame(t(cluster7_mean))
cluster7_mean<-cbind(hour,cluster7_mean)
##merging cluster to the same dataframe
cluster1_mean<-cbind(cluster1_mean,rep(c(1)))
names(cluster1_mean)<-(c("hour","mean","clus_num"))
cluster2_mean<-cbind(cluster2_mean,rep(c(2)))
names(cluster2_mean)<-(c("hour","mean","clus_num"))
cluster3_mean<-cbind(cluster3_mean,rep(c(3)))
names(cluster3_mean)<-(c("hour","mean","clus_num"))
cluster4_mean<-cbind(cluster4_mean,rep(c(4)))
names(cluster4_mean)<-(c("hour","mean","clus_num"))
cluster5_mean<-cbind(cluster5_mean,rep(c(5)))
names(cluster5_mean)<-(c("hour","mean","clus_num"))
cluster6_mean<-cbind(cluster6_mean,rep(c(6)))
names(cluster6_mean)<-(c("hour","mean","clus_num"))
cluster7_mean<-cbind(cluster7_mean,rep(c(7)))
names(cluster7_mean)<-(c("hour","mean","clus_num"))
by_clus_mean<-rbind(cluster1_mean,cluster2_mean,cluster3_mean,cluster4_mean,cluster5_mean,cluster6_mean,cluster7_mean)
##2. plot the 5 different cluster in 5 wrap facets
library(ggplot2)
ggplot(by_clus_mean,aes(hour,mean))+geom_line(aes(colour=clus_num))+facet_wrap(~clus_num)+
ylim(0,0.13)
##residus per cluster
all_means<-rbind(c1_mean,c2_mean,c3_mean,c4_mean,c5_mean,c6_mean,c7_mean)
#1
c1_res<-as.data.frame(cluster1[,2:25])
ax1<-data.frame()
id1<-1:nrow(cluster1)
for (i in id1){
ax1 <- rbind(ax1,(c1_res[i,] - c1_mean))
}
ax1<-abs(ax1)
ax1_rs<-as.data.frame(rowSums(ax1))
ax1_m<-as.data.frame(colMeans(ax1_rs))
names(ax1_m)<-"dist"
#2
c2_res<-as.data.frame(cluster2[,2:25])
ax2<-data.frame()
id2<-1:nrow(cluster2)
for (i in id2){
ax2 <- rbind(ax2,(c2_res[i,] - c2_mean))
}
ax2<-abs(ax2)
ax2_rs<-as.data.frame(rowSums(ax2))
ax2_m<-as.data.frame(colMeans(ax2_rs))
names(ax2_m)<-"dist"
#3
c3_res<-as.data.frame(cluster3[,2:25])
ax3<-data.frame()
id3<-1:nrow(cluster3)
for (i in id3){
ax3 <- rbind(ax3,(c3_res[i,] - c3_mean))
}
ax3<-abs(ax3)
ax3_rs<-as.data.frame(rowSums(ax3))
ax3_m<-as.data.frame(colMeans(ax3_rs))
names(ax3_m)<-"dist"
#4
c4_res<-as.data.frame(cluster4[,2:25])
ax4<-data.frame()
id4<-1:nrow(cluster4)
for (i in id4){
ax4 <- rbind(ax4,(c4_res[i,] - c4_mean))
}
ax4<-abs(ax4)
ax4_rs<-as.data.frame(rowSums(ax4))
ax4_m<-as.data.frame(colMeans(ax4_rs))
names(ax4_m)<-"dist"
#5
c5_res<-as.data.frame(cluster5[,2:25])
ax5<-data.frame()
id5<-1:nrow(cluster5)
for (i in id5){
ax5 <- rbind(ax5,(c5_res[i,] - c5_mean))
}
ax5<-abs(ax5)
ax5_rs<-as.data.frame(rowSums(ax5))
ax5_m<-as.data.frame(colMeans(ax5_rs))
names(ax5_m)<-"dist"
#6
c6_res<-as.data.frame(cluster6[,2:25])
ax6<-data.frame()
id6<-1:nrow(cluster6)
for (i in id6){
ax6 <- rbind(ax6,(c6_res[i,] - c6_mean))
}
ax6<-abs(ax6)
ax6_rs<-as.data.frame(rowSums(ax6))
ax6_m<-as.data.frame(colMeans(ax6_rs))
names(ax6_m)<-"dist"
#7
c7_res<-as.data.frame(cluster7[,2:25])
ax7<-data.frame()
id7<-1:nrow(cluster7)
for (i in id7){
ax7 <- rbind(ax7,(c7_res[i,] - c7_mean))
}
ax7<-abs(ax7)
ax7_rs<-as.data.frame(rowSums(ax7))
ax7_m<-as.data.frame(colMeans(ax7_rs))
names(ax7_m)<-"dist"
alls_m<-as.data.frame(rbind(ax1_m,ax2_m,ax3_m,ax4_m,ax5_m,ax6_m,ax7_m))
|
test.simple1 <- function() {
sourceMatrix <- matrix(1:4, nrow=2, ncol=2)
expected <- solve(sourceMatrix)
cm <- makeCacheMatrix(sourceMatrix)
checkEquals(sourceMatrix, cm$get())
checkEquals(expected, cacheSolve(cm))
}
test.inverseNotCalculated <- function() {
cm <- makeCacheMatrix(matrix(1:4, nrow=2, ncol=2))
checkEquals(NULL, cm$getInverse())
}
test.cacheHits <- function() {
sourceMatrix <- matrix(1:4, nrow=2, ncol=2)
expected <- solve(sourceMatrix)
cm <- makeCacheMatrix(sourceMatrix)
# call a number of times keeping track of calls made
track <- tracker()
track$init()
for (i in 1:10) {
res <- inspect(cacheSolve(cm), track = track)
checkEquals(expected, res)
}
resTrack <- track$getTrackInfo()
cacheSolveTrack <- resTrack$`R/cacheSolve`
checkEquals(10, cacheSolveTrack$nrRuns)
# cacheSolveTrack$src[4] is the line that calculates the matrix inverse
# so it should be called only once even though we made many calls
# when i run this interactively on the console it works but it doesn't when
# i run using the test suite, all the src run counts are zero; since unit
# testing wasn't officially prescribed i will punt for now.
#checkEquals(1, cacheSolveTrack$run[4])
} | /runit.cachematrix.R | no_license | malaffoon/ProgrammingAssignment2 | R | false | false | 1,282 | r | test.simple1 <- function() {
sourceMatrix <- matrix(1:4, nrow=2, ncol=2)
expected <- solve(sourceMatrix)
cm <- makeCacheMatrix(sourceMatrix)
checkEquals(sourceMatrix, cm$get())
checkEquals(expected, cacheSolve(cm))
}
test.inverseNotCalculated <- function() {
cm <- makeCacheMatrix(matrix(1:4, nrow=2, ncol=2))
checkEquals(NULL, cm$getInverse())
}
test.cacheHits <- function() {
sourceMatrix <- matrix(1:4, nrow=2, ncol=2)
expected <- solve(sourceMatrix)
cm <- makeCacheMatrix(sourceMatrix)
# call a number of times keeping track of calls made
track <- tracker()
track$init()
for (i in 1:10) {
res <- inspect(cacheSolve(cm), track = track)
checkEquals(expected, res)
}
resTrack <- track$getTrackInfo()
cacheSolveTrack <- resTrack$`R/cacheSolve`
checkEquals(10, cacheSolveTrack$nrRuns)
# cacheSolveTrack$src[4] is the line that calculates the matrix inverse
# so it should be called only once even though we made many calls
# when i run this interactively on the console it works but it doesn't when
# i run using the test suite, all the src run counts are zero; since unit
# testing wasn't officially prescribed i will punt for now.
#checkEquals(1, cacheSolveTrack$run[4])
} |
#Importing from ggsheet
#install.packages('gsheet')
library(gsheet)
regr1 = "https://docs.google.com/spreadsheets/d/1QogGSuEab5SZyZIw1Q8h-0yrBNs1Z_eEBJG7oRESW5k/edit#gid=107865534"
logr1 = "https://docs.google.com/spreadsheets/d/1QogGSuEab5SZyZIw1Q8h-0yrBNs1Z_eEBJG7oRESW5k/edit#gid=560796239"
df1 = as.data.frame(gsheet2tbl(regr1))
df1
df2 = as.data.frame(gsheet2tbl(logr1))
str(df)
df2
summary(df1)
summary(df2)
#
docurl = "https://docs.google.com/spreadsheets/d/"
sheeturl = paste0("1QogGSuEab5SZyZIw1Q8h-0yrBNs1Z_eEBJG7oRESW5k","/edit#gid=")
sheetname = "560796239"
fullurl = paste0(docurl, sheeturl, sheetname)
fullurl
df = as.data.frame(gsheet::gsheet2tbl(fullurl))
summary(df)
str(df)
| /05-dataIE/20a-importgg.R | no_license | DUanalytics/rAnalytics | R | false | false | 698 | r | #Importing from ggsheet
#install.packages('gsheet')
library(gsheet)
regr1 = "https://docs.google.com/spreadsheets/d/1QogGSuEab5SZyZIw1Q8h-0yrBNs1Z_eEBJG7oRESW5k/edit#gid=107865534"
logr1 = "https://docs.google.com/spreadsheets/d/1QogGSuEab5SZyZIw1Q8h-0yrBNs1Z_eEBJG7oRESW5k/edit#gid=560796239"
df1 = as.data.frame(gsheet2tbl(regr1))
df1
df2 = as.data.frame(gsheet2tbl(logr1))
str(df)
df2
summary(df1)
summary(df2)
#
docurl = "https://docs.google.com/spreadsheets/d/"
sheeturl = paste0("1QogGSuEab5SZyZIw1Q8h-0yrBNs1Z_eEBJG7oRESW5k","/edit#gid=")
sheetname = "560796239"
fullurl = paste0(docurl, sheeturl, sheetname)
fullurl
df = as.data.frame(gsheet::gsheet2tbl(fullurl))
summary(df)
str(df)
|
plink.simulate <- function(n.SNPs=NULL,
labels=paste0("SNP", 1:length(n.SNPs)),
lower.bound=0.01,
upper.bound=1,
OR.het=1, ## Note: over hom alt
OR.hom=1, ## Note: over hom alt
r2=0,
dominance=0,
lower.bound.causal=NULL,
upper.bound.causal=NULL,
lower.bound.marker=NULL,
upper.bound.marker=NULL,
ld=0,
n=NULL,
ncases=NULL,
ncontrols=NULL,
prevalence=NULL,
label=NULL,
missing=NULL,
tags=F,
haps=F,
acgt="", ### Either: "acgt", "1234", "12"
cmd="",
...) { ### to pass to plink()
### Call plink's simulate function (includes simulate-qt)
### The first 11 parameters are for constructing the simulation parameter file.
### Each should be given a single value or be a vector
stopifnot(!is.null(n.SNPs))
sim.file.params <- list(n.SNPs,
labels,
lower.bound,
upper.bound,
OR.het,
OR.hom,
r2,
dominance,
lower.bound.causal,
upper.bound.causal,
lower.bound.marker,
upper.bound.marker,
ld)
lengths <- unlist(lapply(sim.file.params, length))
if(max(lengths) > 1) {
if(length(unique(lengths[lengths > 1])) > 1)
print(sim.file.params)
stop("Lengths of parameters for simulation parameter file not consistent")
}
if(is.null(n)) case.controls <- T
else case.controls <- F
if(!case.controls) { ### qt scenario
stopifnot(!is.null(n))
if(tags || haps) {
stopifnot(!is.null(lower.bound.causal) &&
!is.null(upper.bound.causal) &&
!is.null(lower.bound.marker) &&
!is.null(upper.bound.marker))
if(tags && haps) stop("tags and haps cannot both be specified.")
else if(tags) tagshaps <- "tags"
else tagshaps <- "haps"
simfile.data <- data.frame(n.SNPs,
labels,
lower.bound.causal,
upper.bound.causal,
lower.bound.marker,
upper.bound.marker,
ld,
r2,
dominance)
}
else {
tagshaps <- ""
simfile.data <- data.frame(n.SNPs,
labels,
lower.bound,
upper.bound,
r2,
dominance)
}
write.table2(simfile.data, file=simfile <- tempfile(pattern="simfile"))
cmd <- paste(cmd, "--simulate-qt", simfile, tagshaps, acgt)
cmd <- paste(cmd, "--simulate-n", n)
}
else {
stopifnot(!is.null(ncases) && !is.null(ncontrols))
stopifnot(!is.null(prevalence))
if(tags | haps) {
stopifnot(!is.null(lower.bound.causal) &&
!is.null(upper.bound.causal) &&
!is.null(lower.bound.marker) &&
!is.null(upper.bound.marker))
if(tags && haps) stop("tags and haps cannot both be specified.")
else if(tags) tagshaps <- "tags"
else tagshaps <- "haps"
simfile.data <- data.frame(n.SNPs,
labels,
lower.bound.causal,
upper.bound.causal,
lower.bound.marker,
upper.bound.marker,
ld,
OR.het,
OR.hom)
}
else {
tagshaps <- ""
simfile.data <- data.frame(n.SNPs,
labels,
lower.bound,
upper.bound,
OR.het,
OR.hom)
}
write.table2(simfile.data, file=simfile <- tempfile(pattern="simfile"))
cmd <- paste(cmd, "--simulate-prevalence", prevalence)
cmd <- paste(cmd, "--simulate", simfile, tagshaps, acgt)
cmd <- paste(cmd, "--simulate-ncases", ncases, "--simulate-ncontrols", ncontrols)
}
### Other options
if(!is.null(label)) cmd <- paste(cmd, "--simulate-label", label)
if(!is.null(missing)) cmd <- paste(cmd, "--simulate-missing", missing)
return(plink(cmd=cmd, bfile="", ...))
}
| /Rplink/R/plink.simulate.R | no_license | unfated/cross-population-PRS | R | false | false | 5,341 | r | plink.simulate <- function(n.SNPs=NULL,
labels=paste0("SNP", 1:length(n.SNPs)),
lower.bound=0.01,
upper.bound=1,
OR.het=1, ## Note: over hom alt
OR.hom=1, ## Note: over hom alt
r2=0,
dominance=0,
lower.bound.causal=NULL,
upper.bound.causal=NULL,
lower.bound.marker=NULL,
upper.bound.marker=NULL,
ld=0,
n=NULL,
ncases=NULL,
ncontrols=NULL,
prevalence=NULL,
label=NULL,
missing=NULL,
tags=F,
haps=F,
acgt="", ### Either: "acgt", "1234", "12"
cmd="",
...) { ### to pass to plink()
### Call plink's simulate function (includes simulate-qt)
### The first 11 parameters are for constructing the simulation parameter file.
### Each should be given a single value or be a vector
stopifnot(!is.null(n.SNPs))
sim.file.params <- list(n.SNPs,
labels,
lower.bound,
upper.bound,
OR.het,
OR.hom,
r2,
dominance,
lower.bound.causal,
upper.bound.causal,
lower.bound.marker,
upper.bound.marker,
ld)
lengths <- unlist(lapply(sim.file.params, length))
if(max(lengths) > 1) {
if(length(unique(lengths[lengths > 1])) > 1)
print(sim.file.params)
stop("Lengths of parameters for simulation parameter file not consistent")
}
if(is.null(n)) case.controls <- T
else case.controls <- F
if(!case.controls) { ### qt scenario
stopifnot(!is.null(n))
if(tags || haps) {
stopifnot(!is.null(lower.bound.causal) &&
!is.null(upper.bound.causal) &&
!is.null(lower.bound.marker) &&
!is.null(upper.bound.marker))
if(tags && haps) stop("tags and haps cannot both be specified.")
else if(tags) tagshaps <- "tags"
else tagshaps <- "haps"
simfile.data <- data.frame(n.SNPs,
labels,
lower.bound.causal,
upper.bound.causal,
lower.bound.marker,
upper.bound.marker,
ld,
r2,
dominance)
}
else {
tagshaps <- ""
simfile.data <- data.frame(n.SNPs,
labels,
lower.bound,
upper.bound,
r2,
dominance)
}
write.table2(simfile.data, file=simfile <- tempfile(pattern="simfile"))
cmd <- paste(cmd, "--simulate-qt", simfile, tagshaps, acgt)
cmd <- paste(cmd, "--simulate-n", n)
}
else {
stopifnot(!is.null(ncases) && !is.null(ncontrols))
stopifnot(!is.null(prevalence))
if(tags | haps) {
stopifnot(!is.null(lower.bound.causal) &&
!is.null(upper.bound.causal) &&
!is.null(lower.bound.marker) &&
!is.null(upper.bound.marker))
if(tags && haps) stop("tags and haps cannot both be specified.")
else if(tags) tagshaps <- "tags"
else tagshaps <- "haps"
simfile.data <- data.frame(n.SNPs,
labels,
lower.bound.causal,
upper.bound.causal,
lower.bound.marker,
upper.bound.marker,
ld,
OR.het,
OR.hom)
}
else {
tagshaps <- ""
simfile.data <- data.frame(n.SNPs,
labels,
lower.bound,
upper.bound,
OR.het,
OR.hom)
}
write.table2(simfile.data, file=simfile <- tempfile(pattern="simfile"))
cmd <- paste(cmd, "--simulate-prevalence", prevalence)
cmd <- paste(cmd, "--simulate", simfile, tagshaps, acgt)
cmd <- paste(cmd, "--simulate-ncases", ncases, "--simulate-ncontrols", ncontrols)
}
### Other options
if(!is.null(label)) cmd <- paste(cmd, "--simulate-label", label)
if(!is.null(missing)) cmd <- paste(cmd, "--simulate-missing", missing)
return(plink(cmd=cmd, bfile="", ...))
}
|
library(sdef)
### Name: extractFeatures.T
### Title: Extracting the lists of features of interest
### Aliases: extractFeatures.T
### ** Examples
data = simulation(n=500,GammaA=1,GammaB=1,r1=0.5,r2=0.8,
DEfirst=300,DEsecond=200,DEcommon=100)
Th<- ratio(data=data$Pval)
feat.names = data$names
feat.lists.T <- extractFeatures.T(output.ratio=Th,
feat.names=feat.names)
| /data/genthat_extracted_code/sdef/examples/extractFeatures.T.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 374 | r | library(sdef)
### Name: extractFeatures.T
### Title: Extracting the lists of features of interest
### Aliases: extractFeatures.T
### ** Examples
data = simulation(n=500,GammaA=1,GammaB=1,r1=0.5,r2=0.8,
DEfirst=300,DEsecond=200,DEcommon=100)
Th<- ratio(data=data$Pval)
feat.names = data$names
feat.lists.T <- extractFeatures.T(output.ratio=Th,
feat.names=feat.names)
|
options = commandArgs(trailingOnly = TRUE)
input = options[1]
output = options[2]
data = read.table(input, header=T,row.names=1,sep="\t")
summary.results = data.frame(zeros = colSums(data==0),
nonzeros = colSums(data>0),
mean.ab.reads = apply(data,2,function(x){mean(x[x>0])}),
mean.ab.share = apply(data,2,function(x){x = x/data[,"rootrank.Root"];mean(x[x>0])}))
write.table(summary.results,file = options[2],sep="\t") | /software/step1.3_generate_summary.R | no_license | wyc9559/miQTL_cookbook | R | false | false | 518 | r | options = commandArgs(trailingOnly = TRUE)
input = options[1]
output = options[2]
data = read.table(input, header=T,row.names=1,sep="\t")
summary.results = data.frame(zeros = colSums(data==0),
nonzeros = colSums(data>0),
mean.ab.reads = apply(data,2,function(x){mean(x[x>0])}),
mean.ab.share = apply(data,2,function(x){x = x/data[,"rootrank.Root"];mean(x[x>0])}))
write.table(summary.results,file = options[2],sep="\t") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/supporter.R
\name{null_or_character}
\alias{null_or_character}
\title{Check value NULL or character}
\usage{
null_or_character(x)
}
\arguments{
\item{x}{a value to be checked for character or NULL}
}
\description{
Check value NULL or character
}
\keyword{internal}
| /man/null_or_character.Rd | permissive | cparsania/phyloR | R | false | true | 343 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/supporter.R
\name{null_or_character}
\alias{null_or_character}
\title{Check value NULL or character}
\usage{
null_or_character(x)
}
\arguments{
\item{x}{a value to be checked for character or NULL}
}
\description{
Check value NULL or character
}
\keyword{internal}
|
#import data set
data = read.csv('Mall_Customers.csv')
X <- data[4:5]
#using the elbow method
set.seed(6)
wcss = vector()
for(i in 1:10) {
wcss[i] <- sum(kmeans(X,i)$withinss)
}
plot(1:10,wcss,type = 'b',main=paste('elbow graph'),xlab = "no of clusters",
ylab = "wcss")
#kmeans
kmeans = kmeans(X ,5, iter.max = 300, nstart =10)
predictions = kmeans$cluster
#visulazing the clusters
#install.packages('cluster')
library(cluster)
clusplot(X,
predictions,
lines = 0,
shade = TRUE,
color = TRUE,
labels = 2,
plotchar = TRUE,
span = FALSE,
main = paste('Clusters'),
xlab = 'Annual Income',
ylab = 'Spending Score') | /Kmeans/source/kmeansR.R | no_license | naveenanallamotu/MachineLearningWithR | R | false | false | 716 | r | #import data set
data = read.csv('Mall_Customers.csv')
X <- data[4:5]
#using the elbow method
set.seed(6)
wcss = vector()
for(i in 1:10) {
wcss[i] <- sum(kmeans(X,i)$withinss)
}
plot(1:10,wcss,type = 'b',main=paste('elbow graph'),xlab = "no of clusters",
ylab = "wcss")
#kmeans
kmeans = kmeans(X ,5, iter.max = 300, nstart =10)
predictions = kmeans$cluster
#visulazing the clusters
#install.packages('cluster')
library(cluster)
clusplot(X,
predictions,
lines = 0,
shade = TRUE,
color = TRUE,
labels = 2,
plotchar = TRUE,
span = FALSE,
main = paste('Clusters'),
xlab = 'Annual Income',
ylab = 'Spending Score') |
source("SpaceTimeStructureMix.R")
load("human_sample_covariance.Robj")
metadata <- read.table("human_sample_metadata.txt",header=TRUE,stringsAsFactors=FALSE)
sim.data <- list("geo.coords" = cbind(metadata$lon,metadata$lat),
"time.coords" = metadata$time,
"sample.covariance" = sample.cov,
"n.loci" = 87158)
model.options = list("round.earth" = FALSE,
"n.clusters" = 3,
"temporal.sampling" = TRUE,
no.st = FALSE)
mcmc.options = list("ngen" = 5e7,
"samplefreq" = 5e4,
"printfreq" = 1e3,
"savefreq" = 5e6,
"output.file.name"="haak_k3_st_output.Robj")
MCMC.gid(sim.data,model.options,mcmc.options,initial.parameters=NULL)
| /datasets/HumanData/analyses/old/temporal/spatial1/k_3/exe.spatialStructure.R | no_license | gbradburd/spatialStructure | R | false | false | 669 | r | source("SpaceTimeStructureMix.R")
load("human_sample_covariance.Robj")
metadata <- read.table("human_sample_metadata.txt",header=TRUE,stringsAsFactors=FALSE)
sim.data <- list("geo.coords" = cbind(metadata$lon,metadata$lat),
"time.coords" = metadata$time,
"sample.covariance" = sample.cov,
"n.loci" = 87158)
model.options = list("round.earth" = FALSE,
"n.clusters" = 3,
"temporal.sampling" = TRUE,
no.st = FALSE)
mcmc.options = list("ngen" = 5e7,
"samplefreq" = 5e4,
"printfreq" = 1e3,
"savefreq" = 5e6,
"output.file.name"="haak_k3_st_output.Robj")
MCMC.gid(sim.data,model.options,mcmc.options,initial.parameters=NULL)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bertini-package.R, R/bertini.R
\docType{package}
\name{bertini}
\alias{bertini}
\alias{package-bertini}
\alias{bertini-package}
\alias{bertini}
\title{Evaluate Bertini Code}
\usage{
bertini(code, dir = tempdir(), quiet = TRUE)
}
\arguments{
\item{code}{Bertini code as either a character string or function; see
examples}
\item{dir}{directory to place the files in, without an ending /}
\item{quiet}{show bertini output}
}
\value{
an object of class bertini
}
\description{
Write a Bertini file, evaluate it through a back-end connection to Bertini,
and bring the output back into R.
}
\examples{
\dontrun{ requires bertini
# where does the circle intersect the line y = x?
code <- "
INPUT
variable_group x, y;
function f, g;
f = x^2 + y^2 - 1;
g = y - x;
END;
"
bertini(code)
c(sqrt(2)/2, sqrt(2)/2)
# where do the surfaces
# x^2 - y^2 - z^2 - 1/2
# x^2 + y^2 + z^2 - 9
# x^2/4 + y^2/4 - z^2
# intersect?
#
code <- "
INPUT
variable_group x, y, z;
function f, g, h;
f = x^2 - y^2 - z^2 - 1/2;
g = x^2 + y^2 + z^2 - 9;
h = x^2/4 + y^2/4 - z^2;
END;
"
bertini(code)
# algebraic solution :
c(sqrt(19)/2, 7/(2*sqrt(5)), 3/sqrt(5)) # +/- each ordinate
# example from bertini manual
code <- "
INPUT
variable_group x, y;
function f, g;
f = x^2 - 1;
g = x + y - 1;
END;
"
out <- bertini(code)
str(out)
# non zero-dimensional example
code <- "
CONFIG
TRACKTYPE: 1;
END;
INPUT
variable_group x, y, z;
function f1, f2;
f1 = x^2-y;
f2 = x^3-z;
END;
"
out <- bertini(code)
bertini(code, quiet = FALSE) # print broken here
}
}
| /man/bertini.Rd | no_license | Csun1992/bertini | R | false | true | 1,650 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bertini-package.R, R/bertini.R
\docType{package}
\name{bertini}
\alias{bertini}
\alias{package-bertini}
\alias{bertini-package}
\alias{bertini}
\title{Evaluate Bertini Code}
\usage{
bertini(code, dir = tempdir(), quiet = TRUE)
}
\arguments{
\item{code}{Bertini code as either a character string or function; see
examples}
\item{dir}{directory to place the files in, without an ending /}
\item{quiet}{show bertini output}
}
\value{
an object of class bertini
}
\description{
Write a Bertini file, evaluate it through a back-end connection to Bertini,
and bring the output back into R.
}
\examples{
\dontrun{ requires bertini
# where does the circle intersect the line y = x?
code <- "
INPUT
variable_group x, y;
function f, g;
f = x^2 + y^2 - 1;
g = y - x;
END;
"
bertini(code)
c(sqrt(2)/2, sqrt(2)/2)
# where do the surfaces
# x^2 - y^2 - z^2 - 1/2
# x^2 + y^2 + z^2 - 9
# x^2/4 + y^2/4 - z^2
# intersect?
#
code <- "
INPUT
variable_group x, y, z;
function f, g, h;
f = x^2 - y^2 - z^2 - 1/2;
g = x^2 + y^2 + z^2 - 9;
h = x^2/4 + y^2/4 - z^2;
END;
"
bertini(code)
# algebraic solution :
c(sqrt(19)/2, 7/(2*sqrt(5)), 3/sqrt(5)) # +/- each ordinate
# example from bertini manual
code <- "
INPUT
variable_group x, y;
function f, g;
f = x^2 - 1;
g = x + y - 1;
END;
"
out <- bertini(code)
str(out)
# non zero-dimensional example
code <- "
CONFIG
TRACKTYPE: 1;
END;
INPUT
variable_group x, y, z;
function f1, f2;
f1 = x^2-y;
f2 = x^3-z;
END;
"
out <- bertini(code)
bertini(code, quiet = FALSE) # print broken here
}
}
|
###############################################################################
# Simulates position of birds by individual, season, year, and month.
# Incorporates migratory connectivity, movement within season, and dispersal
# between seasons.
# Does not incorporate births or deaths.
###############################################################################
#' Simulates position of birds by individual, season, year, and month.
#'
#' Incorporates migratory connectivity, movement within season, and dispersal
#' between seasons. Does not incorporate births or deaths.
#'
#' @param breedingAbund Vector with number of birds to simulate starting at
#' each breeding site.
#' @param breedingDist Distances between the breeding sites. Symmetric matrix.
#' @param winteringDist Distances between the wintering sites. Symmetric
#' matrix.
#' @param psi Transition probabilities between B origin and W target sites.
#' A matrix with B rows and W columns where rows sum to 1.
#' @param nYears Number of years to simulate movement.
#' @param nMonths Number of months per breeding and wintering season.
#' @param winMoveRate Within winter movement rate. Defaults to 0 (no
#' movement).
#' @param sumMoveRate Within summer movement rate. Defaults to 0 (no
#' movement).
#' @param winDispRate Between winter dispersal rate. Defaults to 0 (no
#' dispersal).
#' @param sumDispRate Between summer dispersal rate. Defaults to 0 (no
#' dispersal). Setting this to a value above 0 is equivalent to setting
#' both natal and breeding dispersal to that same value.
#' @param natalDispRate Natal dispersal rate. Controls the movement of
#' animals from their birthplace on their first return to the breeding
#' grounds. Defaults to 0 (return to the birthplace for all).
#' @param breedDispRate Breeding dispersal rate. Controls the movement of
#' animals between breeding sites on spring migrations after the first.
#' Defaults to 0 (return to the same breeding site each year).
#' @param verbose If set to a value > 0, informs the user on the passage
#' of years and seasons during the simulation. Defaults to 0 (no output
#' during simulation).
#'
#' @return \code{simMove} returns a list with elements:
#' \describe{
#' \item{\code{animalLoc}}{\code{sum(breedingAbund)} (number of animals)
#' by 2 by \code{nYears} by \code{nMonths} array with the simulated
#' locations of each animal in each month of each season (summer or
#' winter) of each year. Values of cells are 1...B (first column) and
#' 1...W (second column) where B is the number of breeding sites and W is
#' the number of wintering sites.}
#' \item{\code{breedDispMat}}{B by B matrix of probabilities of breeding
#' dispersal between each pair of 1...B breeding sites. Direction is from
#' row to column, so each row sums to 1.}
#' \item{\code{natalDispMat}}{B by B matrix of probabilities of natal
#' dispersal between each pair of 1...B breeding sites. Direction is from
#' row to column, so each row sums to 1.}
#' \item{\code{sumMoveMat}}{B by B matrix of probabilities of within season
#' movement between each pair of 1...B breeding sites. Direction is from
#' row to column, so each row sums to 1.}
#' \item{\code{winDispMat}}{W by W matrix of probabilities of dispersal
#' between each pair of 1...W nonbreeding sites. Direction is from
#' row to column, so each row sums to 1.}
#' \item{\code{winMoveMat}}{W by W matrix of probabilities of within season
#' movement between each pair of 1...W nonbreeding sites. Direction is
#' from row to column, so each row sums to 1.}
#' }
#'
#' @export
#' @example inst/examples/simMoveExamples.R
#' @references
#' Cohen, E. B., J. A. Hostetler, M. T. Hallworth, C. S. Rushing, T. S. Sillett,
#' and P. P. Marra. 2018. Quantifying the strength of migratory connectivity.
#' Methods in Ecology and Evolution 9: 513-524.
#' \href{http://doi.org/10.1111/2041-210X.12916}{doi:10.1111/2041-210X.12916}
simMove <- function(breedingAbund, breedingDist, winteringDist, psi,
nYears = 10, nMonths = 3, winMoveRate = 0,
sumMoveRate = 0, winDispRate = 0, sumDispRate = 0,
natalDispRate = 0, breedDispRate = 0, verbose = 0)
{
nSeasons <- 2
nBreeding <- length(breedingAbund)
nWintering <- nrow(winteringDist)
if (sumDispRate>0 && (natalDispRate>0 || breedDispRate>0) &&
(sumDispRate!=natalDispRate || sumDispRate!=breedDispRate))
stop("Can't specify summer dispersal separately from breeding or natal dispersal")
if (sumDispRate<0 || natalDispRate<0 || breedDispRate<0 ||sumMoveRate<0 ||
winMoveRate<0)
stop("Can't specify negative movement or dispersal rates")
# Turn rate terms into probabilities
if (winMoveRate>0) {
winMoveMat <- mlogitMat(1/sqrt(winMoveRate), winteringDist)
}
else
winMoveMat <- NULL
if (sumMoveRate>0) {
sumMoveMat <- mlogitMat(1/sqrt(sumMoveRate), breedingDist)
}
else
sumMoveMat <- NULL
if (winDispRate>0) {
winDispMat <- mlogitMat(1/sqrt(winDispRate), winteringDist)
}
else
winDispMat <- NULL
if (sumDispRate>0) {
natalDispRate <- sumDispRate
breedDispRate <- sumDispRate
}
if (natalDispRate>0) {
natalDispMat <- mlogitMat(1/sqrt(natalDispRate), breedingDist)
}
else
natalDispMat <- NULL
if (breedDispRate>0) {
breedDispMat <- mlogitMat(1/sqrt(breedDispRate), breedingDist)
}
else
breedDispMat <- NULL
# Storage of locations
animalLoc <- array(NA, c(sum(breedingAbund), nSeasons, nYears, nMonths))
animalLoc[,1,1,1] <- rep(1:nBreeding, breedingAbund)
# Run simulation
for (y in 1:nYears) {
if (verbose>0)
cat("Year", y, "Summer, ")
if (nMonths > 1)
for (sm in 2:nMonths) {
if (sumMoveRate==0)
animalLoc[,1,y,sm] <- animalLoc[,1,y,sm-1]
else
for (i in 1:sum(breedingAbund))
animalLoc[i,1,y,sm] <- which(rmultinom(1,1, sumMoveMat[animalLoc[i,1,y,sm-1], ])>0)
}
if (verbose>0)
cat("Fall, ")
if (y == 1) {
for (i in 1:sum(breedingAbund))
animalLoc[i,2,y,1] <- which(rmultinom(1,1, psi[animalLoc[i,1,y,1], ])>0)
}
else if (winDispRate==0)
animalLoc[,2,y,1] <- animalLoc[,2,y-1,1]
else
for (i in 1:sum(breedingAbund))
animalLoc[i,2,y,1] <- which(rmultinom(1,1, winDispMat[animalLoc[i,2,y-1,1], ])>0)
if (verbose>0)
cat("Winter, ")
if (nMonths > 1)
for (wm in 2:nMonths) {
if (winMoveRate==0)
animalLoc[,2,y,wm] <- animalLoc[,2,y,wm-1]
else
for (i in 1:sum(breedingAbund))
animalLoc[i,2,y,wm] <- which(rmultinom(1,1, winMoveMat[animalLoc[i,2,y,wm-1], ])>0)
}
if (verbose>0)
cat("Spring\n")
if (y == 1 & nYears>1) {
if (natalDispRate==0)
animalLoc[,1,y+1,1] <- animalLoc[,1,y,1]
else
for (i in 1:sum(breedingAbund))
animalLoc[i,1,y+1,1] <- which(rmultinom(1,1, natalDispMat[animalLoc[i,1,y,1], ])>0)
}
else if (y < nYears) {
if (breedDispRate==0)
animalLoc[,1,y+1,1] <- animalLoc[,1,y,1]
else
for (i in 1:sum(breedingAbund))
animalLoc[i,1,y+1,1] <- which(rmultinom(1,1, breedDispMat[animalLoc[i,1,y,1], ])>0)
}
}
return(list(animalLoc = animalLoc, natalDispMat = natalDispMat,
breedDispMat = breedDispMat, sumMoveMat = sumMoveMat,
winDispMat = winDispMat, winMoveMat = winMoveMat))
}
###############################################################################
# Function for generating simulated count data
###############################################################################
#' Simulates Breeding Bird Survey-style count data
#'
#'
#' @param nPops Number of populations/regions
#' @param routePerPop Vector of length 1 or nPops containing the number of routes (i.e. counts) per population. If length(routePerPop) == 1, number of routes is identical for each population
#' @param nYears Number of years surveys were conducted
#' @param alphaPop Vector of length 1 or nPops containing the log expected number of individuals counted at each route for each population. If length(alphaPop) == 1, expected counts are identical for each population
#' @param beta Coefficient of linear year effect (default = 0)
#' @param sdRoute Standard deviation of random route-level variation
#' @param sdYear Standard deviation of random year-level variation
#'
#' @return \code{simCountData} returns a list containing:
#' \describe{
#' \item{\code{nPops}}{Number of populations/regions.}
#' \item{\code{nRoutes}}{Total number of routes.}
#' \item{\code{nYears}}{Number of years.}
#' \item{\code{routePerPop}}{Number of routes per population.}
#' \item{\code{year}}{Vector of length nYears with standardized year values.}
#' \item{\code{pop}}{Vector of length nRoutes indicating the population/region in which each route is located.}
#' \item{\code{alphaPop}}{log expected count for each populations.}
#' \item{\code{epsRoute}}{realized deviation from alphaPop for each route.}
#' \item{\code{epsYear}}{realized deviation from alphaPop for each year.}
#' \item{\code{beta}}{linear year effect.}
#' \item{\code{sdRoute}}{standard deviation of random route-level variation.}
#' \item{\code{sdYear}}{standard deviation of random year-level variation.}
#' \item{\code{expectedCount}}{nRoutes by nYears matrix containing deterministic expected counts.}
#' \item{\code{C}}{nRoutes by nYears matrix containing observed counts.}
#' }
#'
#'
#' @export
#' @example inst/examples/simCountExamples.R
#' @references
#' Cohen, E. B., J. A. Hostetler, M. T. Hallworth, C. S. Rushing, T. S. Sillett,
#' and P. P. Marra. 2018. Quantifying the strength of migratory connectivity.
#' Methods in Ecology and Evolution 9: 513-524.
#' \href{http://doi.org/10.1111/2041-210X.12916}{doi:10.1111/2041-210X.12916}
#'
#' Link, W. A. and J. R. Sauer. 2002. A hierarchical analysis of population
#' change with application to Cerulean Warblers. Ecology 83: 2832-2840.
simCountData <- function (nPops, routePerPop, nYears, alphaPop, beta = 0, sdRoute, sdYear){
if(length(routePerPop) == 1){
nRoutes <- nPops*routePerPop # Total number of routes
pop <- gl(nPops, routePerPop, nRoutes) # Population index for each route
}else{
nRoutes <- sum(routePerPop)
pop <- as.factor(rep(seq(1:nPops), routePerPop)) # Population index for each route
}
if(length(alphaPop) == 1) {
alphaPop <- rep(alphaPop, nPops)
}
# Generate data structure to hold counts and log (lambda)
C <- log.expectedCount <- array(NA, dim = c(nYears, nRoutes))
# Generate covariate values
year <- 1:nYears
yr <- (year - (nYears/2))/(nYears/2) # Standardize
# Draw two sets of random effects from their respective distributions
epsRoute <- rnorm(n = nRoutes, mean = 0, sd = sdRoute)
epsYear <- rnorm(n = nYears, mean = 0, sd = sdYear)
# Loop over routes
for (i in 1:nRoutes){
# Build up systematic part of the GLM including random effects
log.expectedCount[,i] <- alphaPop[pop[i]] + beta*yr + epsRoute[i] +
epsYear
expectedCount <- exp(log.expectedCount[,i])
C[,i] <- rpois(n = nYears, lambda = expectedCount)
}
return(list(nPops = nPops, nRoutes = nRoutes, nYears = nYears,
routePerPop = routePerPop, year = yr, pop = pop,
alphaPop = alphaPop, epsRoute = epsRoute,
epsYear = epsYear, beta = beta,
sdRoute = sdRoute, sdYear = sdYear,
expectedCount = expectedCount, C = C))
}
###############################################################################
# Estimates population-level relative abundance from count data
###############################################################################
#' Estimates population-level relative abundance from count data
#'
#' Uses a Bayesian heirarchical model to estimate relative abundance of regional
#' populations from count-based data (e.g., Breeding Bird Survey)
#'
#' @param count_data List containing the following elements:
#' ' \describe{
#' \item{\code{C}}{nYears by nRoutes matrix containing the observed number of individuals counted at each route in each year.}
#' \item{\code{pop}}{Vector of length nRoutes indicating the population/region in which each route is located.}
#' \item{\code{routePerPop}}{Vector of length 1 or nPops containing the number of routes (i.e. counts) per population. If length(routePerPop) == 1, number of routes is identical for each population.}
#' }
#' @param ni Number of MCMC iterations. Default = 20000.
#' @param nt Thinning rate. Default = 5.
#' @param nb Number of MCMC iterations to discard as burn-in. Default = 5000.
#' @param nc Number of chains. Default = 3.
#'
#' @return \code{modelCountDataJAGS} returns an mcmc object containing posterior samples for each monitored parameter.
#
#'
#' @export
#' @example inst/examples/simCountExamples.R
#' @references
#' Cohen, E. B., J. A. Hostetler, M. T. Hallworth, C. S. Rushing, T. S. Sillett,
#' and P. P. Marra. 2018. Quantifying the strength of migratory connectivity.
#' Methods in Ecology and Evolution 9: 513-524.
#' \href{http://doi.org/10.1111/2041-210X.12916}{doi:10.1111/2041-210X.12916}
#'
#' Link, W. A. and J. R. Sauer. 2002. A hierarchical analysis of population
#' change with application to Cerulean Warblers. Ecology 83: 2832-2840.
modelCountDataJAGS <- function (count_data, ni = 20000, nt = 5, nb = 5000, nc = 3) {
nPops <- length(unique(count_data$pop))
nRoutes <- dim(count_data$C)[2]
nYears = dim(count_data$C)[1]
if(length(count_data$routePerPop) == 1){
routePerPop = rep(count_data$routePerPop, nPops)
} else {
routePerPop = count_data$routePerPop
}
# Initial values
jags.inits <- function()list(mu = runif(1,0,2), alpha = runif(nPops, -1,1), beta1 = runif(1,-1,1),
tau.alpha = runif(1,0,0.1), tau.noise = runif(1,0,0.1),
tau.rte = runif(1,0,0.1), route = runif(nRoutes,-1,1))
# Parameters to monitor
params <- c("mu", "alpha", "beta1", "sd.alpha", "sd.rte", "sd.noise", "totalN", "popN", "relN")
# Data
jags.data <- list(C = count_data$C, nPops = length(unique(count_data$pop)), nRoutes = nRoutes,
routePerPop = routePerPop,
year = seq(from = 0, to = 1, length.out = nYears), nYears = nYears, pop = count_data$pop)
out <- R2jags::jags(data = jags.data, inits = jags.inits, params,
paste0(find.package('MigConnectivity'), "/JAGS/sim_Poisson2.txt"),
n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb,
progress.bar = 'none')
return(coda::as.mcmc(out))
}
| /R/simConnectivity.R | no_license | eriqande/MigConnectivity | R | false | false | 14,856 | r | ###############################################################################
# Simulates position of birds by individual, season, year, and month.
# Incorporates migratory connectivity, movement within season, and dispersal
# between seasons.
# Does not incorporate births or deaths.
###############################################################################
#' Simulates position of birds by individual, season, year, and month.
#'
#' Incorporates migratory connectivity, movement within season, and dispersal
#' between seasons. Does not incorporate births or deaths.
#'
#' @param breedingAbund Vector with number of birds to simulate starting at
#' each breeding site.
#' @param breedingDist Distances between the breeding sites. Symmetric matrix.
#' @param winteringDist Distances between the wintering sites. Symmetric
#' matrix.
#' @param psi Transition probabilities between B origin and W target sites.
#' A matrix with B rows and W columns where rows sum to 1.
#' @param nYears Number of years to simulate movement.
#' @param nMonths Number of months per breeding and wintering season.
#' @param winMoveRate Within winter movement rate. Defaults to 0 (no
#' movement).
#' @param sumMoveRate Within summer movement rate. Defaults to 0 (no
#' movement).
#' @param winDispRate Between winter dispersal rate. Defaults to 0 (no
#' dispersal).
#' @param sumDispRate Between summer dispersal rate. Defaults to 0 (no
#' dispersal). Setting this to a value above 0 is equivalent to setting
#' both natal and breeding dispersal to that same value.
#' @param natalDispRate Natal dispersal rate. Controls the movement of
#' animals from their birthplace on their first return to the breeding
#' grounds. Defaults to 0 (return to the birthplace for all).
#' @param breedDispRate Breeding dispersal rate. Controls the movement of
#' animals between breeding sites on spring migrations after the first.
#' Defaults to 0 (return to the same breeding site each year).
#' @param verbose If set to a value > 0, informs the user on the passage
#' of years and seasons during the simulation. Defaults to 0 (no output
#' during simulation).
#'
#' @return \code{simMove} returns a list with elements:
#' \describe{
#' \item{\code{animalLoc}}{\code{sum(breedingAbund)} (number of animals)
#' by 2 by \code{nYears} by \code{nMonths} array with the simulated
#' locations of each animal in each month of each season (summer or
#' winter) of each year. Values of cells are 1...B (first column) and
#' 1...W (second column) where B is the number of breeding sites and W is
#' the number of wintering sites.}
#' \item{\code{breedDispMat}}{B by B matrix of probabilities of breeding
#' dispersal between each pair of 1...B breeding sites. Direction is from
#' row to column, so each row sums to 1.}
#' \item{\code{natalDispMat}}{B by B matrix of probabilities of natal
#' dispersal between each pair of 1...B breeding sites. Direction is from
#' row to column, so each row sums to 1.}
#' \item{\code{sumMoveMat}}{B by B matrix of probabilities of within season
#' movement between each pair of 1...B breeding sites. Direction is from
#' row to column, so each row sums to 1.}
#' \item{\code{winDispMat}}{W by W matrix of probabilities of dispersal
#' between each pair of 1...W nonbreeding sites. Direction is from
#' row to column, so each row sums to 1.}
#' \item{\code{winMoveMat}}{W by W matrix of probabilities of within season
#' movement between each pair of 1...W nonbreeding sites. Direction is
#' from row to column, so each row sums to 1.}
#' }
#'
#' @export
#' @example inst/examples/simMoveExamples.R
#' @references
#' Cohen, E. B., J. A. Hostetler, M. T. Hallworth, C. S. Rushing, T. S. Sillett,
#' and P. P. Marra. 2018. Quantifying the strength of migratory connectivity.
#' Methods in Ecology and Evolution 9: 513-524.
#' \href{http://doi.org/10.1111/2041-210X.12916}{doi:10.1111/2041-210X.12916}
simMove <- function(breedingAbund, breedingDist, winteringDist, psi,
nYears = 10, nMonths = 3, winMoveRate = 0,
sumMoveRate = 0, winDispRate = 0, sumDispRate = 0,
natalDispRate = 0, breedDispRate = 0, verbose = 0)
{
nSeasons <- 2
nBreeding <- length(breedingAbund)
nWintering <- nrow(winteringDist)
if (sumDispRate>0 && (natalDispRate>0 || breedDispRate>0) &&
(sumDispRate!=natalDispRate || sumDispRate!=breedDispRate))
stop("Can't specify summer dispersal separately from breeding or natal dispersal")
if (sumDispRate<0 || natalDispRate<0 || breedDispRate<0 ||sumMoveRate<0 ||
winMoveRate<0)
stop("Can't specify negative movement or dispersal rates")
# Turn rate terms into probabilities
if (winMoveRate>0) {
winMoveMat <- mlogitMat(1/sqrt(winMoveRate), winteringDist)
}
else
winMoveMat <- NULL
if (sumMoveRate>0) {
sumMoveMat <- mlogitMat(1/sqrt(sumMoveRate), breedingDist)
}
else
sumMoveMat <- NULL
if (winDispRate>0) {
winDispMat <- mlogitMat(1/sqrt(winDispRate), winteringDist)
}
else
winDispMat <- NULL
if (sumDispRate>0) {
natalDispRate <- sumDispRate
breedDispRate <- sumDispRate
}
if (natalDispRate>0) {
natalDispMat <- mlogitMat(1/sqrt(natalDispRate), breedingDist)
}
else
natalDispMat <- NULL
if (breedDispRate>0) {
breedDispMat <- mlogitMat(1/sqrt(breedDispRate), breedingDist)
}
else
breedDispMat <- NULL
# Storage of locations
animalLoc <- array(NA, c(sum(breedingAbund), nSeasons, nYears, nMonths))
animalLoc[,1,1,1] <- rep(1:nBreeding, breedingAbund)
# Run simulation
for (y in 1:nYears) {
if (verbose>0)
cat("Year", y, "Summer, ")
if (nMonths > 1)
for (sm in 2:nMonths) {
if (sumMoveRate==0)
animalLoc[,1,y,sm] <- animalLoc[,1,y,sm-1]
else
for (i in 1:sum(breedingAbund))
animalLoc[i,1,y,sm] <- which(rmultinom(1,1, sumMoveMat[animalLoc[i,1,y,sm-1], ])>0)
}
if (verbose>0)
cat("Fall, ")
if (y == 1) {
for (i in 1:sum(breedingAbund))
animalLoc[i,2,y,1] <- which(rmultinom(1,1, psi[animalLoc[i,1,y,1], ])>0)
}
else if (winDispRate==0)
animalLoc[,2,y,1] <- animalLoc[,2,y-1,1]
else
for (i in 1:sum(breedingAbund))
animalLoc[i,2,y,1] <- which(rmultinom(1,1, winDispMat[animalLoc[i,2,y-1,1], ])>0)
if (verbose>0)
cat("Winter, ")
if (nMonths > 1)
for (wm in 2:nMonths) {
if (winMoveRate==0)
animalLoc[,2,y,wm] <- animalLoc[,2,y,wm-1]
else
for (i in 1:sum(breedingAbund))
animalLoc[i,2,y,wm] <- which(rmultinom(1,1, winMoveMat[animalLoc[i,2,y,wm-1], ])>0)
}
if (verbose>0)
cat("Spring\n")
if (y == 1 & nYears>1) {
if (natalDispRate==0)
animalLoc[,1,y+1,1] <- animalLoc[,1,y,1]
else
for (i in 1:sum(breedingAbund))
animalLoc[i,1,y+1,1] <- which(rmultinom(1,1, natalDispMat[animalLoc[i,1,y,1], ])>0)
}
else if (y < nYears) {
if (breedDispRate==0)
animalLoc[,1,y+1,1] <- animalLoc[,1,y,1]
else
for (i in 1:sum(breedingAbund))
animalLoc[i,1,y+1,1] <- which(rmultinom(1,1, breedDispMat[animalLoc[i,1,y,1], ])>0)
}
}
return(list(animalLoc = animalLoc, natalDispMat = natalDispMat,
breedDispMat = breedDispMat, sumMoveMat = sumMoveMat,
winDispMat = winDispMat, winMoveMat = winMoveMat))
}
###############################################################################
# Function for generating simulated count data
###############################################################################
#' Simulates Breeding Bird Survey-style count data
#'
#'
#' @param nPops Number of populations/regions
#' @param routePerPop Vector of length 1 or nPops containing the number of routes (i.e. counts) per population. If length(routePerPop) == 1, number of routes is identical for each population
#' @param nYears Number of years surveys were conducted
#' @param alphaPop Vector of length 1 or nPops containing the log expected number of individuals counted at each route for each population. If length(alphaPop) == 1, expected counts are identical for each population
#' @param beta Coefficient of linear year effect (default = 0)
#' @param sdRoute Standard deviation of random route-level variation
#' @param sdYear Standard deviation of random year-level variation
#'
#' @return \code{simCountData} returns a list containing:
#' \describe{
#' \item{\code{nPops}}{Number of populations/regions.}
#' \item{\code{nRoutes}}{Total number of routes.}
#' \item{\code{nYears}}{Number of years.}
#' \item{\code{routePerPop}}{Number of routes per population.}
#' \item{\code{year}}{Vector of length nYears with standardized year values.}
#' \item{\code{pop}}{Vector of length nRoutes indicating the population/region in which each route is located.}
#' \item{\code{alphaPop}}{log expected count for each populations.}
#' \item{\code{epsRoute}}{realized deviation from alphaPop for each route.}
#' \item{\code{epsYear}}{realized deviation from alphaPop for each year.}
#' \item{\code{beta}}{linear year effect.}
#' \item{\code{sdRoute}}{standard deviation of random route-level variation.}
#' \item{\code{sdYear}}{standard deviation of random year-level variation.}
#' \item{\code{expectedCount}}{nRoutes by nYears matrix containing deterministic expected counts.}
#' \item{\code{C}}{nRoutes by nYears matrix containing observed counts.}
#' }
#'
#'
#' @export
#' @example inst/examples/simCountExamples.R
#' @references
#' Cohen, E. B., J. A. Hostetler, M. T. Hallworth, C. S. Rushing, T. S. Sillett,
#' and P. P. Marra. 2018. Quantifying the strength of migratory connectivity.
#' Methods in Ecology and Evolution 9: 513-524.
#' \href{http://doi.org/10.1111/2041-210X.12916}{doi:10.1111/2041-210X.12916}
#'
#' Link, W. A. and J. R. Sauer. 2002. A hierarchical analysis of population
#' change with application to Cerulean Warblers. Ecology 83: 2832-2840.
simCountData <- function (nPops, routePerPop, nYears, alphaPop, beta = 0, sdRoute, sdYear){
if(length(routePerPop) == 1){
nRoutes <- nPops*routePerPop # Total number of routes
pop <- gl(nPops, routePerPop, nRoutes) # Population index for each route
}else{
nRoutes <- sum(routePerPop)
pop <- as.factor(rep(seq(1:nPops), routePerPop)) # Population index for each route
}
if(length(alphaPop) == 1) {
alphaPop <- rep(alphaPop, nPops)
}
# Generate data structure to hold counts and log (lambda)
C <- log.expectedCount <- array(NA, dim = c(nYears, nRoutes))
# Generate covariate values
year <- 1:nYears
yr <- (year - (nYears/2))/(nYears/2) # Standardize
# Draw two sets of random effects from their respective distributions
epsRoute <- rnorm(n = nRoutes, mean = 0, sd = sdRoute)
epsYear <- rnorm(n = nYears, mean = 0, sd = sdYear)
# Loop over routes
for (i in 1:nRoutes){
# Build up systematic part of the GLM including random effects
log.expectedCount[,i] <- alphaPop[pop[i]] + beta*yr + epsRoute[i] +
epsYear
expectedCount <- exp(log.expectedCount[,i])
C[,i] <- rpois(n = nYears, lambda = expectedCount)
}
return(list(nPops = nPops, nRoutes = nRoutes, nYears = nYears,
routePerPop = routePerPop, year = yr, pop = pop,
alphaPop = alphaPop, epsRoute = epsRoute,
epsYear = epsYear, beta = beta,
sdRoute = sdRoute, sdYear = sdYear,
expectedCount = expectedCount, C = C))
}
###############################################################################
# Estimates population-level relative abundance from count data
###############################################################################
#' Estimates population-level relative abundance from count data
#'
#' Uses a Bayesian heirarchical model to estimate relative abundance of regional
#' populations from count-based data (e.g., Breeding Bird Survey)
#'
#' @param count_data List containing the following elements:
#' ' \describe{
#' \item{\code{C}}{nYears by nRoutes matrix containing the observed number of individuals counted at each route in each year.}
#' \item{\code{pop}}{Vector of length nRoutes indicating the population/region in which each route is located.}
#' \item{\code{routePerPop}}{Vector of length 1 or nPops containing the number of routes (i.e. counts) per population. If length(routePerPop) == 1, number of routes is identical for each population.}
#' }
#' @param ni Number of MCMC iterations. Default = 20000.
#' @param nt Thinning rate. Default = 5.
#' @param nb Number of MCMC iterations to discard as burn-in. Default = 5000.
#' @param nc Number of chains. Default = 3.
#'
#' @return \code{modelCountDataJAGS} returns an mcmc object containing posterior samples for each monitored parameter.
#
#'
#' @export
#' @example inst/examples/simCountExamples.R
#' @references
#' Cohen, E. B., J. A. Hostetler, M. T. Hallworth, C. S. Rushing, T. S. Sillett,
#' and P. P. Marra. 2018. Quantifying the strength of migratory connectivity.
#' Methods in Ecology and Evolution 9: 513-524.
#' \href{http://doi.org/10.1111/2041-210X.12916}{doi:10.1111/2041-210X.12916}
#'
#' Link, W. A. and J. R. Sauer. 2002. A hierarchical analysis of population
#' change with application to Cerulean Warblers. Ecology 83: 2832-2840.
modelCountDataJAGS <- function (count_data, ni = 20000, nt = 5, nb = 5000, nc = 3) {
nPops <- length(unique(count_data$pop))
nRoutes <- dim(count_data$C)[2]
nYears = dim(count_data$C)[1]
if(length(count_data$routePerPop) == 1){
routePerPop = rep(count_data$routePerPop, nPops)
} else {
routePerPop = count_data$routePerPop
}
# Initial values
jags.inits <- function()list(mu = runif(1,0,2), alpha = runif(nPops, -1,1), beta1 = runif(1,-1,1),
tau.alpha = runif(1,0,0.1), tau.noise = runif(1,0,0.1),
tau.rte = runif(1,0,0.1), route = runif(nRoutes,-1,1))
# Parameters to monitor
params <- c("mu", "alpha", "beta1", "sd.alpha", "sd.rte", "sd.noise", "totalN", "popN", "relN")
# Data
jags.data <- list(C = count_data$C, nPops = length(unique(count_data$pop)), nRoutes = nRoutes,
routePerPop = routePerPop,
year = seq(from = 0, to = 1, length.out = nYears), nYears = nYears, pop = count_data$pop)
out <- R2jags::jags(data = jags.data, inits = jags.inits, params,
paste0(find.package('MigConnectivity'), "/JAGS/sim_Poisson2.txt"),
n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb,
progress.bar = 'none')
return(coda::as.mcmc(out))
}
|
#create plot data
planet_plot_data <- data.frame(plot_number = 1:20,
planet = c(rep("Kashyyyk", 5),
rep("Forest Moon of Endor", 5),
rep("Dagobah", 5),
rep("Naboo", 5)),
count_of_trees = c(204, 156, 240, 286, 263,
112, 167, 131, 25, 145,
141, 65, 127, 15, 98,
100, 12, 49, 94, 69),
forest_cover = c(85, 74, 89, 95, 92,
70, 73, 69, 11, 68,
67, 30, 62, 15, 42,
59, 5, 17, 25, 22),
eco_province = c("forest", "swamp", "forest", "forest", "forest",
"forest", "forest", "forest", "grassland", "forest",
"forest", "swamp", "swamp", "grassland", "swamp",
"forest", "grassland", "grassland",
"swamp", "swamp"))
#create mean data
planet_means <- data.frame(planet = c("Kashyyyk",
"Forest Moon of Endor",
"Dagobah",
"Naboo"),
forest_cover = c(95,
85,
50,
30))
#create proportion data
planet_province_prop <- data.frame(planet = c(rep("Kashyyyk", 2),
rep("Forest Moon of Endor", 2),
rep("Dagobah", 3),
rep("Naboo", 3)),
eco_province = c("forest", "swamp",
"forest", "grassland",
"forest", "grassland", "swamp",
"forest", "grassland", "swamp"),
prop = c(0.8, 0.2,
0.75, 0.25,
0.1, 0.1, 0.8,
0.2, 0.4, 0.4))
x1 <- gregory_all(plot_df = planet_plot_data,
resolution = "eco_province",
estimation = "planet",
pixel_estimation_means = planet_means,
proportions = planet_province_prop,
formula = count_of_trees ~ forest_cover,
prop = "prop")
x1 | /R/examples/gregory_all_example.R | no_license | cran/gregRy | R | false | false | 2,990 | r | #create plot data
planet_plot_data <- data.frame(plot_number = 1:20,
planet = c(rep("Kashyyyk", 5),
rep("Forest Moon of Endor", 5),
rep("Dagobah", 5),
rep("Naboo", 5)),
count_of_trees = c(204, 156, 240, 286, 263,
112, 167, 131, 25, 145,
141, 65, 127, 15, 98,
100, 12, 49, 94, 69),
forest_cover = c(85, 74, 89, 95, 92,
70, 73, 69, 11, 68,
67, 30, 62, 15, 42,
59, 5, 17, 25, 22),
eco_province = c("forest", "swamp", "forest", "forest", "forest",
"forest", "forest", "forest", "grassland", "forest",
"forest", "swamp", "swamp", "grassland", "swamp",
"forest", "grassland", "grassland",
"swamp", "swamp"))
#create mean data
planet_means <- data.frame(planet = c("Kashyyyk",
"Forest Moon of Endor",
"Dagobah",
"Naboo"),
forest_cover = c(95,
85,
50,
30))
#create proportion data
planet_province_prop <- data.frame(planet = c(rep("Kashyyyk", 2),
rep("Forest Moon of Endor", 2),
rep("Dagobah", 3),
rep("Naboo", 3)),
eco_province = c("forest", "swamp",
"forest", "grassland",
"forest", "grassland", "swamp",
"forest", "grassland", "swamp"),
prop = c(0.8, 0.2,
0.75, 0.25,
0.1, 0.1, 0.8,
0.2, 0.4, 0.4))
x1 <- gregory_all(plot_df = planet_plot_data,
resolution = "eco_province",
estimation = "planet",
pixel_estimation_means = planet_means,
proportions = planet_province_prop,
formula = count_of_trees ~ forest_cover,
prop = "prop")
x1 |
library(mongolite)
library(rtweet)
library(lubridate)
library(tidyverse)
library(lobstr)
rt <- search_tweets(
"๋ง์คํฌ", n = 1000, include_rts = FALSE
)
rt %>% head
nrow(rt)
Sys.getenv()
# install.packages("mongolite")
# con$insert(rt)
# con$find()
# con$drop()
mask_tweets = mongo("mask_tweets",
db = "test",
url = "mongodb://localhost:27017")
Sys.getenv("MONGODB_CRUD_ID")
mask_tweets$insert(rt)
mask_tweets$count()
# rm(mask_tweets)
aa = mask_tweets$find()
mask_tweets$insert(aa)
# ํ์ํ ํ๋๋ง ์ถ์ถ.
aa = mask_tweets$find(fields = '{ "user_id" : 1, "status_id" : 1, "created_at" : 1, "screen_name" : 1, "text" : 1}') %>% as_tibble()
aa
aa = aa %>% group_by(status_id) %>%
mutate(rn = row_number()) %>%
ungroup()
aa %>% group_by(status_id) %>%
summarise(cnt = n()) %>%
filter(cnt > 1)
aa %>% filter(status_id == "1237259520780283904")
aa %>% filter(rn == 2)
aa %>% group_by(status_id) %>%
mutate(rn = row_number()) %>%
filter(rn == 2) %>%
select(user_id, status_id, created_at)
aa %>% setdiff(aa %>% top_n(100000))
aa %>% top_n(100000)%>% setdiff(aa)
aa %>% setdiff(aa)
aa = mask_tweets$find(fields = '{ "user_id" : 1, "status_id" : 1, "created_at" : 1, "_id": 0}') %>% as_tibble()
lobstr::obj_size(aa)
aa
aa %>% unite(all) %>%
mutate(all_sha = digest::digest(object=all, algo="md5"))
aa %>% unite(all) %>%
mutate(all_sha = map_chr(all, digest::digest, algo="md5"))
digest::digest(object="1222854300444921856_1237387137835651076_2020-03-10 23:37:37", algo="sha256")
data = c("1234", "5678")
data = c("1234")
data = "1234"
digest::digest(object=data, algo="sha256")
# install.packages('devtools')
# devtools::install_github('haven-jeon/KoNLP')
# https://brunch.co.kr/@mapthecity/9
# https://github.com/SKTBrain/KoBERT#why
# library(KoNLP)
#
# str = "ํ๊ต์ข
์ด๋ก๋ก๋ก์ด์๋ชจ์ด์์ ์๋์ดํ๊ต์์์ฐ๋ฆฌ๋ฅผ๊ธฐ๋ค๋ฆฌ์ ๋ค."
# str = aa$text[1]
# str
# useSejongDic()
#
#
# extractNoun(aa$text[1])
#
# MorphAnalyzer(str)
#
# SimplePos09(str)
#
# SimplePos22(str)
df <- tibble(
grp = rep(1:2, each = 5),
x = c(rnorm(5, -0.25, 1), rnorm(5, 0, 1.5)),
y = c(rnorm(5, 0.25, 1), rnorm(5, 0, 0.5)),
)
df
df %>%
group_by(grp) %>%
summarise(cnt = n())
df2 = df %>%
group_by(grp) %>%
summarise(rng = list(range(x)), cnt = n())
df3 = df %>%
group_by(grp) %>%
summarise(rng = str_c(range(x), collapse = "/"), remarks = "min/max")
df3 %>% separate_rows(rng, remarks, sep = "/")
relig_income %>%
pivot_longer(-religion, names_to = "income", values_to = "count")
family1 <- tribble(
~family, ~dob_child1, ~dob_child2, ~gender_child1, ~gender_child2,
1L, "1998-11-26", "2000-01-29", 1L, 2L,
2L, "1996-06-22", NA, 2L, NA,
3L, "2002-07-11", "2004-04-05", 2L, 2L,
4L, "2004-10-10", "2009-08-27", 1L, 1L,
5L, "2000-12-05", "2005-02-28", 2L, 1L,
)
family2 <- tribble(
~family, ~dob_child1, ~dob_child2, ~gender_child1, ~gender_child2,
1L, "1998-11-26", "2000-01-29", 1L, 2L,
2L, "1996-06-22", NA, 2L, NA,
3L, "2001-07-11", "2004-04-05", 3L, 2L,
4L, "2004-10-10", "2009-08-27", 1L, 1L,
5L, "2000-12-05", "2005-02-28", 2L, 1L,
)
family1_longer = family1 %>% mutate_all(str_replace_na) %>%
pivot_longer(cols = -family, names_to = "column", values_to = "dev_value", values_drop_na = F)
family1_longer
family2_longer = family2 %>% mutate_all(str_replace_na) %>%
pivot_longer(cols = -family, names_to = "column", values_to = "prd_value")
family1_longer %>% bind_cols(family2_longer[,3]) %>%
mutate(same = dev_value == prd_value) %>%
mutate(nums = abs(as.numeric(dev_value) - as.numeric(prd_value))) %>%
filter(same == F)
df <- data.frame(x = c(1:4))
scale_num <- ggplot(df, aes(x)) +
geom_point(size = 3, color = "#0072B2", y = 1) +
scale_y_continuous(limits = c(0.8, 1.2), expand = c(0, 0), breaks = 1, label = "position ") +
scale_x_continuous(limits = c(.7, 4.4), breaks = 1:5, labels = c("1", "2", "3", "4", "5"), name = NULL, position = "top") +
theme_dviz_grid() +
theme(axis.ticks.length = grid::unit(0, "pt"),
axis.text = element_text(size = 14),
axis.title.y = element_blank(),
axis.ticks.y = element_blank())
c(1, 3, 6, 8, 6, 5, 3, 1, 5, 2)
a = tibble(x = c(1, 3, 6, 8, 6, 5, 3, 1, 5, 2))
a %>% mutate(cummax_x = cummax(x),
cummin_x = cummin(x),
cumsum_x = cumsum(x),
cumsum_x2 = cumsum(x %in% c(3,6,9)),
cume_dist_x = cume_dist(x),
dense_rank_x = dense_rank(x),
min_rank_x = min_rank(x),
ntile_x = ntile(x, 3),
percent_rank_x = percent_rank(x),
lead_x = lead(x),
lag_x = lag(x)
)
aa = mask_tweets$find(fields = '{ "_id":0, "text" : 1}') %>% as_tibble()
aa
library(tidytext)
install.packages("tidytext")
aa1 = aa %>% mutate(line = row_number()) %>% select(line, text) %>%
filter(!str_detect(text, "์ฌ๋ชจ๋|๋ฏธ๋
"))
aa1
aa2 = aa1 %>% unnest_tokens(word, text)
aa2 %>% count(word) %>% arrange(desc(n))
aa3 = aa2 %>% group_by(word) %>% summarise(n = n(), min_line = min(line)) %>% arrange(desc(n))
aa3 %>% head(50)
aa4 = aa2 %>% group_by(word) %>% summarise(n = n(), min_line = list(line)) %>% arrange(desc(n))
aa4 %>% head %>% view
aa1 %>% filter(line == 31)
| /R/docs/mongodb/mongolite_connect.R | no_license | emflant/sample | R | false | false | 5,593 | r | library(mongolite)
library(rtweet)
library(lubridate)
library(tidyverse)
library(lobstr)
rt <- search_tweets(
"๋ง์คํฌ", n = 1000, include_rts = FALSE
)
rt %>% head
nrow(rt)
Sys.getenv()
# install.packages("mongolite")
# con$insert(rt)
# con$find()
# con$drop()
mask_tweets = mongo("mask_tweets",
db = "test",
url = "mongodb://localhost:27017")
Sys.getenv("MONGODB_CRUD_ID")
mask_tweets$insert(rt)
mask_tweets$count()
# rm(mask_tweets)
aa = mask_tweets$find()
mask_tweets$insert(aa)
# ํ์ํ ํ๋๋ง ์ถ์ถ.
aa = mask_tweets$find(fields = '{ "user_id" : 1, "status_id" : 1, "created_at" : 1, "screen_name" : 1, "text" : 1}') %>% as_tibble()
aa
aa = aa %>% group_by(status_id) %>%
mutate(rn = row_number()) %>%
ungroup()
aa %>% group_by(status_id) %>%
summarise(cnt = n()) %>%
filter(cnt > 1)
aa %>% filter(status_id == "1237259520780283904")
aa %>% filter(rn == 2)
aa %>% group_by(status_id) %>%
mutate(rn = row_number()) %>%
filter(rn == 2) %>%
select(user_id, status_id, created_at)
aa %>% setdiff(aa %>% top_n(100000))
aa %>% top_n(100000)%>% setdiff(aa)
aa %>% setdiff(aa)
aa = mask_tweets$find(fields = '{ "user_id" : 1, "status_id" : 1, "created_at" : 1, "_id": 0}') %>% as_tibble()
lobstr::obj_size(aa)
aa
aa %>% unite(all) %>%
mutate(all_sha = digest::digest(object=all, algo="md5"))
aa %>% unite(all) %>%
mutate(all_sha = map_chr(all, digest::digest, algo="md5"))
digest::digest(object="1222854300444921856_1237387137835651076_2020-03-10 23:37:37", algo="sha256")
data = c("1234", "5678")
data = c("1234")
data = "1234"
digest::digest(object=data, algo="sha256")
# install.packages('devtools')
# devtools::install_github('haven-jeon/KoNLP')
# https://brunch.co.kr/@mapthecity/9
# https://github.com/SKTBrain/KoBERT#why
# library(KoNLP)
#
# str = "ํ๊ต์ข
์ด๋ก๋ก๋ก์ด์๋ชจ์ด์์ ์๋์ดํ๊ต์์์ฐ๋ฆฌ๋ฅผ๊ธฐ๋ค๋ฆฌ์ ๋ค."
# str = aa$text[1]
# str
# useSejongDic()
#
#
# extractNoun(aa$text[1])
#
# MorphAnalyzer(str)
#
# SimplePos09(str)
#
# SimplePos22(str)
df <- tibble(
grp = rep(1:2, each = 5),
x = c(rnorm(5, -0.25, 1), rnorm(5, 0, 1.5)),
y = c(rnorm(5, 0.25, 1), rnorm(5, 0, 0.5)),
)
df
df %>%
group_by(grp) %>%
summarise(cnt = n())
df2 = df %>%
group_by(grp) %>%
summarise(rng = list(range(x)), cnt = n())
df3 = df %>%
group_by(grp) %>%
summarise(rng = str_c(range(x), collapse = "/"), remarks = "min/max")
df3 %>% separate_rows(rng, remarks, sep = "/")
relig_income %>%
pivot_longer(-religion, names_to = "income", values_to = "count")
family1 <- tribble(
~family, ~dob_child1, ~dob_child2, ~gender_child1, ~gender_child2,
1L, "1998-11-26", "2000-01-29", 1L, 2L,
2L, "1996-06-22", NA, 2L, NA,
3L, "2002-07-11", "2004-04-05", 2L, 2L,
4L, "2004-10-10", "2009-08-27", 1L, 1L,
5L, "2000-12-05", "2005-02-28", 2L, 1L,
)
family2 <- tribble(
~family, ~dob_child1, ~dob_child2, ~gender_child1, ~gender_child2,
1L, "1998-11-26", "2000-01-29", 1L, 2L,
2L, "1996-06-22", NA, 2L, NA,
3L, "2001-07-11", "2004-04-05", 3L, 2L,
4L, "2004-10-10", "2009-08-27", 1L, 1L,
5L, "2000-12-05", "2005-02-28", 2L, 1L,
)
family1_longer = family1 %>% mutate_all(str_replace_na) %>%
pivot_longer(cols = -family, names_to = "column", values_to = "dev_value", values_drop_na = F)
family1_longer
family2_longer = family2 %>% mutate_all(str_replace_na) %>%
pivot_longer(cols = -family, names_to = "column", values_to = "prd_value")
family1_longer %>% bind_cols(family2_longer[,3]) %>%
mutate(same = dev_value == prd_value) %>%
mutate(nums = abs(as.numeric(dev_value) - as.numeric(prd_value))) %>%
filter(same == F)
df <- data.frame(x = c(1:4))
scale_num <- ggplot(df, aes(x)) +
geom_point(size = 3, color = "#0072B2", y = 1) +
scale_y_continuous(limits = c(0.8, 1.2), expand = c(0, 0), breaks = 1, label = "position ") +
scale_x_continuous(limits = c(.7, 4.4), breaks = 1:5, labels = c("1", "2", "3", "4", "5"), name = NULL, position = "top") +
theme_dviz_grid() +
theme(axis.ticks.length = grid::unit(0, "pt"),
axis.text = element_text(size = 14),
axis.title.y = element_blank(),
axis.ticks.y = element_blank())
c(1, 3, 6, 8, 6, 5, 3, 1, 5, 2)
a = tibble(x = c(1, 3, 6, 8, 6, 5, 3, 1, 5, 2))
a %>% mutate(cummax_x = cummax(x),
cummin_x = cummin(x),
cumsum_x = cumsum(x),
cumsum_x2 = cumsum(x %in% c(3,6,9)),
cume_dist_x = cume_dist(x),
dense_rank_x = dense_rank(x),
min_rank_x = min_rank(x),
ntile_x = ntile(x, 3),
percent_rank_x = percent_rank(x),
lead_x = lead(x),
lag_x = lag(x)
)
aa = mask_tweets$find(fields = '{ "_id":0, "text" : 1}') %>% as_tibble()
aa
library(tidytext)
install.packages("tidytext")
aa1 = aa %>% mutate(line = row_number()) %>% select(line, text) %>%
filter(!str_detect(text, "์ฌ๋ชจ๋|๋ฏธ๋
"))
aa1
aa2 = aa1 %>% unnest_tokens(word, text)
aa2 %>% count(word) %>% arrange(desc(n))
aa3 = aa2 %>% group_by(word) %>% summarise(n = n(), min_line = min(line)) %>% arrange(desc(n))
aa3 %>% head(50)
aa4 = aa2 %>% group_by(word) %>% summarise(n = n(), min_line = list(line)) %>% arrange(desc(n))
aa4 %>% head %>% view
aa1 %>% filter(line == 31)
|
bgnbd.pmf.General.fixed <- function (params, t.start, t.end, x) {
max.length = max(length(t.start), length(t.end), length(x))
if (max.length%%length(t.start))
warning("Maximum vector length not a multiple of the length of t.start")
if (max.length%%length(t.end))
warning("Maximum vector length not a multiple of the length of t.end")
if (max.length%%length(x))
warning("Maximum vector length not a multiple of the length of x")
dc.check.model.params(c("r", "alpha", "a", "b"), params,
"bgnbd.pmf.General")
if (any(t.start < 0) || !is.numeric(t.start))
stop("t.start must be numeric and may not contain negative numbers.")
if (any(t.end < 0) || !is.numeric(t.end))
stop("t.end must be numeric and may not contain negative numbers.")
if (any(x < 0) || !is.numeric(x))
stop("x must be numeric and may not contain negative numbers.")
t.start = rep(t.start, length.out = max.length)
t.end = rep(t.end, length.out = max.length)
x = rep(x, length.out = max.length)
if (any(t.start > t.end)) {
stop("Error in bgnbd.pmf.General: t.start > t.end.")
}
r <- params[1]
alpha <- params[2]
a <- params[3]
b <- params[4]
equation.part.0 <- rep(0, max.length)
t = t.end - t.start
term3 = rep(0, max.length)
term1 = ifelse(x < 170, beta(a, b + x)/beta(a, b) * gamma(r + x)/gamma(r)/factorial(x) *
((alpha/(alpha + t))^r) * ((t/(alpha + t))^x),
beta(a, b + x)/beta(a, b) / beta(r, x) / (x + 1) *
((alpha/(alpha + t))^r) * ((t/(alpha + t))^x))
for (i in 1:max.length) {
if (x[i] > 0) {
ii = c(0:(x[i] - 1))
summation.term = ifelse(x < 170,
sum(gamma(r + ii)/gamma(r)/factorial(ii) *
((t[i]/(alpha + t[i]))^ii)),
sum(1 / beta(r, ii) / (ii + 1) *
((t[i]/(alpha + t[i]))^ii)))
term3[i] = 1 - (((alpha/(alpha + t[i]))^r) * summation.term)
}
}
term2 = as.numeric(x > 0) * beta(a + 1, b + x - 1)/beta(a,
b) * term3
return(term1 + term2)
}
bgnbd.pmf.fixed <- function (params, t, x) {
max.length <- max(length(t), length(x))
if (max.length%%length(t))
warning("Maximum vector length not a multiple of the length of t")
if (max.length%%length(x))
warning("Maximum vector length not a multiple of the length of x")
dc.check.model.params(c("r", "alpha", "a", "b"), params,
"bgnbd.pmf")
if (any(t < 0) || !is.numeric(t))
stop("t must be numeric and may not contain negative numbers.")
if (any(x < 0) || !is.numeric(x))
stop("x must be numeric and may not contain negative numbers.")
t <- rep(t, length.out = max.length)
x <- rep(x, length.out = max.length)
return(bgnbd.pmf.General.fixed(params, 0, t, x))
}
bgnbd.pmf <- function (params, t, x) {
max.length <- max(length(t), length(x))
if (max.length%%length(t))
warning("Maximum vector length not a multiple of the length of t")
if (max.length%%length(x))
warning("Maximum vector length not a multiple of the length of x")
dc.check.model.params(c("r", "alpha", "a", "b"), params,
"bgnbd.pmf")
if (any(t < 0) || !is.numeric(t))
stop("t must be numeric and may not contain negative numbers.")
if (any(x < 0) || !is.numeric(x))
stop("x must be numeric and may not contain negative numbers.")
t <- rep(t, length.out = max.length)
x <- rep(x, length.out = max.length)
return(bgnbd.pmf.General(params, 0, t, x))
}
bgnbd.PlotFrequencyInCalibration.fixed <- function (params, cal.cbs, censor,
plotZero = TRUE,
xlab = "Calibration period transactions",
ylab = "Customers",
title = "Frequency of Repeat Transactions") {
tryCatch(x <- cal.cbs[, "x"], error = function(e) stop("Error in bgnbd.PlotFrequencyInCalibration: cal.cbs must have a frequency column labelled \"x\""))
tryCatch(T.cal <- cal.cbs[, "T.cal"], error = function(e) stop("Error in bgnbd.PlotFrequencyInCalibration: cal.cbs must have a column for length of time observed labelled \"T.cal\""))
dc.check.model.params(c("r", "alpha", "a", "b"), params,
"bgnbd.PlotFrequencyInCalibration")
if (censor > max(x))
stop("censor too big (> max freq) in PlotFrequencyInCalibration.")
x = cal.cbs$x
T.cal = cal.cbs$T.cal
n.x <- rep(0, max(x) + 1)
ncusts = nrow(cal.cbs)
for (ii in unique(x)) {
n.x[ii + 1] <- sum(ii == x)
}
n.x.censor <- sum(n.x[(censor + 1):length(n.x)])
n.x.actual <- c(n.x[1:censor], n.x.censor)
T.value.counts <- table(T.cal)
T.values <- as.numeric(names(T.value.counts))
n.T.values <- length(T.values)
n.x.expected <- rep(0, length(n.x.actual))
n.x.expected.all <- rep(0, max(x) + 1)
for (ii in 0:max(x)) {
this.x.expected = 0
if ((params[4]+ii-1) <=0 ) next
for (T.idx in 1:n.T.values) {
Tx = T.values[T.idx]
if (Tx == 0)
next
n.T = T.value.counts[T.idx]
# print(c(ii, Tx))
# flush.console()
if (ii > 170) {
prob.of.this.x.for.this.T <- bgnbd.pmf.fixed(params, Tx, ii)
} else {
prob.of.this.x.for.this.T <- bgnbd.pmf(params, Tx, ii)
}
expected.given.x.and.T = n.T * prob.of.this.x.for.this.T
this.x.expected = this.x.expected + expected.given.x.and.T
}
n.x.expected.all[ii + 1] = this.x.expected
}
n.x.expected[1:censor] = n.x.expected.all[1:censor]
n.x.expected[censor + 1] = sum(n.x.expected.all[(censor +
1):(max(x) + 1)])
col.names <- paste(rep("freq", length(censor + 1)), (0:censor),
sep = ".")
col.names[censor + 1] <- paste(col.names[censor + 1], "+",
sep = "")
censored.freq.comparison <- rbind(n.x.actual, n.x.expected)
colnames(censored.freq.comparison) <- col.names
cfc.plot <- censored.freq.comparison
if (plotZero == FALSE)
cfc.plot <- cfc.plot[, -1]
n.ticks <- ncol(cfc.plot)
if (plotZero == TRUE) {
x.labels <- 0:(n.ticks - 1)
x.labels[n.ticks] <- paste(n.ticks - 1, "+", sep = "")
}
ylim <- c(0, ceiling(max(cfc.plot) * 1.1))
barplot(cfc.plot, names.arg = x.labels, beside = TRUE, ylim = ylim,
main = title, xlab = xlab, ylab = ylab, col = 1:2)
legend("topright", legend = c("Actual", "Model"), col = 1:2,
lwd = 2)
return(censored.freq.comparison)
}
| /fun.fixed.R | no_license | Helen-R/shop_cluster | R | false | false | 7,106 | r | bgnbd.pmf.General.fixed <- function (params, t.start, t.end, x) {
max.length = max(length(t.start), length(t.end), length(x))
if (max.length%%length(t.start))
warning("Maximum vector length not a multiple of the length of t.start")
if (max.length%%length(t.end))
warning("Maximum vector length not a multiple of the length of t.end")
if (max.length%%length(x))
warning("Maximum vector length not a multiple of the length of x")
dc.check.model.params(c("r", "alpha", "a", "b"), params,
"bgnbd.pmf.General")
if (any(t.start < 0) || !is.numeric(t.start))
stop("t.start must be numeric and may not contain negative numbers.")
if (any(t.end < 0) || !is.numeric(t.end))
stop("t.end must be numeric and may not contain negative numbers.")
if (any(x < 0) || !is.numeric(x))
stop("x must be numeric and may not contain negative numbers.")
t.start = rep(t.start, length.out = max.length)
t.end = rep(t.end, length.out = max.length)
x = rep(x, length.out = max.length)
if (any(t.start > t.end)) {
stop("Error in bgnbd.pmf.General: t.start > t.end.")
}
r <- params[1]
alpha <- params[2]
a <- params[3]
b <- params[4]
equation.part.0 <- rep(0, max.length)
t = t.end - t.start
term3 = rep(0, max.length)
term1 = ifelse(x < 170, beta(a, b + x)/beta(a, b) * gamma(r + x)/gamma(r)/factorial(x) *
((alpha/(alpha + t))^r) * ((t/(alpha + t))^x),
beta(a, b + x)/beta(a, b) / beta(r, x) / (x + 1) *
((alpha/(alpha + t))^r) * ((t/(alpha + t))^x))
for (i in 1:max.length) {
if (x[i] > 0) {
ii = c(0:(x[i] - 1))
summation.term = ifelse(x < 170,
sum(gamma(r + ii)/gamma(r)/factorial(ii) *
((t[i]/(alpha + t[i]))^ii)),
sum(1 / beta(r, ii) / (ii + 1) *
((t[i]/(alpha + t[i]))^ii)))
term3[i] = 1 - (((alpha/(alpha + t[i]))^r) * summation.term)
}
}
term2 = as.numeric(x > 0) * beta(a + 1, b + x - 1)/beta(a,
b) * term3
return(term1 + term2)
}
bgnbd.pmf.fixed <- function (params, t, x) {
max.length <- max(length(t), length(x))
if (max.length%%length(t))
warning("Maximum vector length not a multiple of the length of t")
if (max.length%%length(x))
warning("Maximum vector length not a multiple of the length of x")
dc.check.model.params(c("r", "alpha", "a", "b"), params,
"bgnbd.pmf")
if (any(t < 0) || !is.numeric(t))
stop("t must be numeric and may not contain negative numbers.")
if (any(x < 0) || !is.numeric(x))
stop("x must be numeric and may not contain negative numbers.")
t <- rep(t, length.out = max.length)
x <- rep(x, length.out = max.length)
return(bgnbd.pmf.General.fixed(params, 0, t, x))
}
bgnbd.pmf <- function (params, t, x) {
max.length <- max(length(t), length(x))
if (max.length%%length(t))
warning("Maximum vector length not a multiple of the length of t")
if (max.length%%length(x))
warning("Maximum vector length not a multiple of the length of x")
dc.check.model.params(c("r", "alpha", "a", "b"), params,
"bgnbd.pmf")
if (any(t < 0) || !is.numeric(t))
stop("t must be numeric and may not contain negative numbers.")
if (any(x < 0) || !is.numeric(x))
stop("x must be numeric and may not contain negative numbers.")
t <- rep(t, length.out = max.length)
x <- rep(x, length.out = max.length)
return(bgnbd.pmf.General(params, 0, t, x))
}
bgnbd.PlotFrequencyInCalibration.fixed <- function (params, cal.cbs, censor,
plotZero = TRUE,
xlab = "Calibration period transactions",
ylab = "Customers",
title = "Frequency of Repeat Transactions") {
tryCatch(x <- cal.cbs[, "x"], error = function(e) stop("Error in bgnbd.PlotFrequencyInCalibration: cal.cbs must have a frequency column labelled \"x\""))
tryCatch(T.cal <- cal.cbs[, "T.cal"], error = function(e) stop("Error in bgnbd.PlotFrequencyInCalibration: cal.cbs must have a column for length of time observed labelled \"T.cal\""))
dc.check.model.params(c("r", "alpha", "a", "b"), params,
"bgnbd.PlotFrequencyInCalibration")
if (censor > max(x))
stop("censor too big (> max freq) in PlotFrequencyInCalibration.")
x = cal.cbs$x
T.cal = cal.cbs$T.cal
n.x <- rep(0, max(x) + 1)
ncusts = nrow(cal.cbs)
for (ii in unique(x)) {
n.x[ii + 1] <- sum(ii == x)
}
n.x.censor <- sum(n.x[(censor + 1):length(n.x)])
n.x.actual <- c(n.x[1:censor], n.x.censor)
T.value.counts <- table(T.cal)
T.values <- as.numeric(names(T.value.counts))
n.T.values <- length(T.values)
n.x.expected <- rep(0, length(n.x.actual))
n.x.expected.all <- rep(0, max(x) + 1)
for (ii in 0:max(x)) {
this.x.expected = 0
if ((params[4]+ii-1) <=0 ) next
for (T.idx in 1:n.T.values) {
Tx = T.values[T.idx]
if (Tx == 0)
next
n.T = T.value.counts[T.idx]
# print(c(ii, Tx))
# flush.console()
if (ii > 170) {
prob.of.this.x.for.this.T <- bgnbd.pmf.fixed(params, Tx, ii)
} else {
prob.of.this.x.for.this.T <- bgnbd.pmf(params, Tx, ii)
}
expected.given.x.and.T = n.T * prob.of.this.x.for.this.T
this.x.expected = this.x.expected + expected.given.x.and.T
}
n.x.expected.all[ii + 1] = this.x.expected
}
n.x.expected[1:censor] = n.x.expected.all[1:censor]
n.x.expected[censor + 1] = sum(n.x.expected.all[(censor +
1):(max(x) + 1)])
col.names <- paste(rep("freq", length(censor + 1)), (0:censor),
sep = ".")
col.names[censor + 1] <- paste(col.names[censor + 1], "+",
sep = "")
censored.freq.comparison <- rbind(n.x.actual, n.x.expected)
colnames(censored.freq.comparison) <- col.names
cfc.plot <- censored.freq.comparison
if (plotZero == FALSE)
cfc.plot <- cfc.plot[, -1]
n.ticks <- ncol(cfc.plot)
if (plotZero == TRUE) {
x.labels <- 0:(n.ticks - 1)
x.labels[n.ticks] <- paste(n.ticks - 1, "+", sep = "")
}
ylim <- c(0, ceiling(max(cfc.plot) * 1.1))
barplot(cfc.plot, names.arg = x.labels, beside = TRUE, ylim = ylim,
main = title, xlab = xlab, ylab = ylab, col = 1:2)
legend("topright", legend = c("Actual", "Model"), col = 1:2,
lwd = 2)
return(censored.freq.comparison)
}
|
#**************************************************************************
# 6. Compare emissions from motor vehicle sources in Baltimore City ####
# with emissions from motor vehicle sources in Los Angeles County, CA
# (fips=="06037"). Which has seen greater changes over time?
#**************************************************************************
# Read files as needed
if(!exists("nei")) {
nei <- readRDS("./summarySCC_PM25.rds")
names(nei) <- tolower(names(nei))
}
if(!exists("scc")) {
scc <- readRDS("./Source_Classification_Code.rds")
names(scc) <- tolower(names(scc))
}
suppressMessages(library(dplyr))
# Find scc codes for motor vehicle emission sources
vehicle.index <- with(scc, grepl("highway", scc.level.two, ignore.case =T))
vehicle.codes <- scc[vehicle.index, ] %>%
select(scc, scc.level.two)
# Merge vehicle source info with monitor data for Baltimore and LA County
nei.vehicle <- merge(nei, vehicle.codes, by="scc") %>%
filter(fips=="24510" | fips=="06037") %>%
filter(year==1999 | year==2008) %>%
rename(area=fips) %>%
mutate(area = replace(area, area=="06037", "LA County")) %>%
mutate(area = replace(area, area=="24510", "Baltimore"))
# Calculate total emissions by area for 1999 and 2008
vehicle.emissions <- nei.vehicle %>%
group_by(area, year) %>%
summarize_at("emissions", sum) %>%
mutate(delta = emissions - lag(emissions, default = 0)) %>%
mutate(delta = round(delta), digits=0)
# Plot emissions levels and annotate with magnitude of change using ggplot2
suppressMessages(library(ggplot2))
png("./plot6.png", width=480, height=480)
ggplot(data=vehicle.emissions, aes(x=year, y=emissions, fill=area)) +
geom_line(aes(color=area)) +
geom_point(aes(color=area)) +
labs(y = "Motor vehicle emissions (tons)", x = "Year") +
labs(title = "Baltimore vs. LA vehicle emissions 1999 & 2008",
subtitle = "(tons)") +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5),
legend.title = element_blank()) +
geom_text(data = subset(vehicle.emissions, year=="2008"),
aes(x = 2008,
y = emissions *.9,
label = paste("change = ",delta),
hjust = .95, vjust = -1.5
)
)
dev.off() | /plot6.R | no_license | connectomania/ExData_Plotting2 | R | false | false | 2,629 | r |
#**************************************************************************
# 6. Compare emissions from motor vehicle sources in Baltimore City ####
# with emissions from motor vehicle sources in Los Angeles County, CA
# (fips=="06037"). Which has seen greater changes over time?
#**************************************************************************
# Read files as needed
if(!exists("nei")) {
nei <- readRDS("./summarySCC_PM25.rds")
names(nei) <- tolower(names(nei))
}
if(!exists("scc")) {
scc <- readRDS("./Source_Classification_Code.rds")
names(scc) <- tolower(names(scc))
}
suppressMessages(library(dplyr))
# Find scc codes for motor vehicle emission sources
vehicle.index <- with(scc, grepl("highway", scc.level.two, ignore.case =T))
vehicle.codes <- scc[vehicle.index, ] %>%
select(scc, scc.level.two)
# Merge vehicle source info with monitor data for Baltimore and LA County
nei.vehicle <- merge(nei, vehicle.codes, by="scc") %>%
filter(fips=="24510" | fips=="06037") %>%
filter(year==1999 | year==2008) %>%
rename(area=fips) %>%
mutate(area = replace(area, area=="06037", "LA County")) %>%
mutate(area = replace(area, area=="24510", "Baltimore"))
# Calculate total emissions by area for 1999 and 2008
vehicle.emissions <- nei.vehicle %>%
group_by(area, year) %>%
summarize_at("emissions", sum) %>%
mutate(delta = emissions - lag(emissions, default = 0)) %>%
mutate(delta = round(delta), digits=0)
# Plot emissions levels and annotate with magnitude of change using ggplot2
suppressMessages(library(ggplot2))
png("./plot6.png", width=480, height=480)
ggplot(data=vehicle.emissions, aes(x=year, y=emissions, fill=area)) +
geom_line(aes(color=area)) +
geom_point(aes(color=area)) +
labs(y = "Motor vehicle emissions (tons)", x = "Year") +
labs(title = "Baltimore vs. LA vehicle emissions 1999 & 2008",
subtitle = "(tons)") +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5),
legend.title = element_blank()) +
geom_text(data = subset(vehicle.emissions, year=="2008"),
aes(x = 2008,
y = emissions *.9,
label = paste("change = ",delta),
hjust = .95, vjust = -1.5
)
)
dev.off() |
##-------------------------------------------
##
## Data Science PCE Interview Question
##
##-------------------------------------------
# Question: Two trains are slowly going towards each
# other at 4mph and 6mph and are 20 miles apart.
#
# There is a fly that can go 8 mph flying between
# each of them. When the fly arrives at the first
# train, it instantly turns around and flies back
# to the other train and so on.
#
# What is the total distance the fly goes until the
# two trains meet?
##-------- (1) -------------
# Solve this programmatically
total_distance = 0 # Keep track of distance
# Now we need to keep track of which train the fly
# is headed towards, say 1 = 4mph train, 0 = 6mph train.
logical_train = 1
# Keep track of distance between:
dist_between_trains = 20
while (dist_between_trains>0.0001){
if (logical_train==1){
# Fly and 4mph train are headed towards each other.
# Combined speed of 4+8 = 12 mph
time_to_intercept = dist_between_trains/12
dist_fly_traveled = time_to_intercept * 8
total_distance = total_distance + dist_fly_traveled
# Need to compute distance train traveled:
dist_train_traveled = time_to_intercept * (4+6)
dist_between_trains = dist_between_trains - dist_train_traveled
# Turn around to other train
logical_train = 0
}else{
# Fly and 6mph train are headed towards each other.
# Combined speed of 6+8 = 14 mph
time_to_intercept = dist_between_trains/14
dist_fly_traveled = time_to_intercept * 8
total_distance = total_distance + dist_fly_traveled
# Need to compute distance train traveled:
dist_train_traveled = time_to_intercept * (4+6)
dist_between_trains = dist_between_trains - dist_train_traveled
# Turn around to other train
logical_train = 1
}
}
print(paste('Fly traveled', round(total_distance, 3), 'miles'))
##-------- (2) -------------
# Solve this the short way
time_till_trains_meet = 20/(4+6)
fly_dist = time_till_trains_meet * 8
| /9_NLP/prob_interview_question.R | no_license | siva2k16/DataScience350 | R | false | false | 2,077 | r | ##-------------------------------------------
##
## Data Science PCE Interview Question
##
##-------------------------------------------
# Question: Two trains are slowly going towards each
# other at 4mph and 6mph and are 20 miles apart.
#
# There is a fly that can go 8 mph flying between
# each of them. When the fly arrives at the first
# train, it instantly turns around and flies back
# to the other train and so on.
#
# What is the total distance the fly goes until the
# two trains meet?
##-------- (1) -------------
# Solve this programmatically
total_distance = 0 # Keep track of distance
# Now we need to keep track of which train the fly
# is headed towards, say 1 = 4mph train, 0 = 6mph train.
logical_train = 1
# Keep track of distance between:
dist_between_trains = 20
while (dist_between_trains>0.0001){
if (logical_train==1){
# Fly and 4mph train are headed towards each other.
# Combined speed of 4+8 = 12 mph
time_to_intercept = dist_between_trains/12
dist_fly_traveled = time_to_intercept * 8
total_distance = total_distance + dist_fly_traveled
# Need to compute distance train traveled:
dist_train_traveled = time_to_intercept * (4+6)
dist_between_trains = dist_between_trains - dist_train_traveled
# Turn around to other train
logical_train = 0
}else{
# Fly and 6mph train are headed towards each other.
# Combined speed of 6+8 = 14 mph
time_to_intercept = dist_between_trains/14
dist_fly_traveled = time_to_intercept * 8
total_distance = total_distance + dist_fly_traveled
# Need to compute distance train traveled:
dist_train_traveled = time_to_intercept * (4+6)
dist_between_trains = dist_between_trains - dist_train_traveled
# Turn around to other train
logical_train = 1
}
}
print(paste('Fly traveled', round(total_distance, 3), 'miles'))
##-------- (2) -------------
# Solve this the short way
time_till_trains_meet = 20/(4+6)
fly_dist = time_till_trains_meet * 8
|
##' Delaunay triangulation in N dimensions
##'
##' The Delaunay triangulation is a tessellation of the convex hull of
##' the points such that no \eqn{N}-sphere defined by the \eqn{N}-
##' triangles contains any other points from the set.
##'
##' @param p An \eqn{M}-by-\eqn{N} matrix whose rows represent \eqn{M}
##' points in \eqn{N}-dimensional space.
##'
##' @param options String containing extra control options for the
##' underlying Qhull command; see the Qhull documentation
##' (\url{../doc/qhull/html/qdelaun.html}) for the available
##' options.
##'
##' The \code{Qbb} option is always passed to Qhull. The remaining
##' default options are \code{Qcc Qc Qt Qz} for \eqn{N<4} and
##' \code{Qcc Qc Qt Qx} for \eqn{N>=4}. If neither of the \code{QJ}
##' or \code{Qt} options are supplied, the \code{Qt} option is
##' passed to Qhull. The \code{Qt} option ensures all Delaunay
##' regions are simplical (e.g., triangles in 2D). See
##' \url{../doc/qhull/html/qdelaun.html} for more details. Contrary
##' to the Qhull documentation, no degenerate (zero area) regions
##' are returned with the \code{Qt} option since the R function
##' removes them from the triangulation.
##'
##' \emph{If \code{options} is specified, the default options are
##' overridden.} It is recommended to use \code{output.options} for
##' options controlling the outputs.
##'
##' @param output.options String containing Qhull options to control
##' output. Currently \code{Fn} (neighbours) and \code{Fa} (areas)
##' are supported. Causes an object of return value for details. If
##' \code{output.options} is \code{TRUE}, select all supported
##' options.
##'
##' @param full Deprecated and will be removed in a future release.
##' Adds options \code{Fa} and \code{Fn}.
##'
##' @return If \code{output.options} is \code{NULL} (the default),
##' return the Delaunay triangulation as a matrix with \eqn{M} rows
##' and \eqn{N+1} columns in which each row contains a set of
##' indices to the input points \code{p}. Thus each row describes a
##' simplex of dimension \eqn{N}, e.g. a triangle in 2D or a
##' tetrahedron in 3D.
##'
##' If the \code{output.options} argument is \code{TRUE} or is a
##' string containing \code{Fn} or \code{Fa}, return a list with
##' class \code{delaunayn} comprising the named elements:
##' \describe{
##' \item{\code{tri}}{The Delaunay triangulation described above}
##' \item{\code{areas}}{If \code{TRUE} or if \code{Fa} is specified, an
##' \eqn{M}-dimensional vector containing the generalised area of
##' each simplex (e.g. in 2D the areas of triangles; in 3D the volumes
##' of tetrahedra). See \url{../doc/qhull/html/qh-optf.html#Fa}.}
##' \item{\code{neighbours}}{If \code{TRUE} or if \code{Fn} is specified,
##' a list of neighbours of each simplex.
##' See \url{../doc/qhull/html/qh-optf.html#Fn}}
##' }
##'
##' @note This function interfaces the Qhull library and is a port
##' from Octave (\url{http://www.octave.org}) to R. Qhull computes
##' convex hulls, Delaunay triangulations, halfspace intersections
##' about a point, Voronoi diagrams, furthest-site Delaunay
##' triangulations, and furthest-site Voronoi diagrams. It runs in
##' 2D, 3D, 4D, and higher dimensions. It implements the
##' Quickhull algorithm for computing the convex hull. Qhull handles
##' round-off errors from floating point arithmetic. It computes
##' volumes, surface areas, and approximations to the convex
##' hull. See the Qhull documentation included in this distribution
##' (the doc directory \url{../doc/qhull/index.html}).
##'
##' Qhull does not support constrained Delaunay triangulations, triangulation
##' of non-convex surfaces, mesh generation of non-convex objects, or
##' medium-sized inputs in 9D and higher. A rudimentary algorithm for mesh
##' generation in non-convex regions using Delaunay triangulation is
##' implemented in \link{distmesh2d} (currently only 2D).
##' @author Raoul Grasman and Robert B. Gramacy; based on the
##' corresponding Octave sources of Kai Habel.
##' @seealso \code{\link[tripack]{tri.mesh}}, \code{\link{convhulln}},
##' \code{\link{surf.tri}}, \code{\link{distmesh2d}}
##' @references \cite{Barber, C.B., Dobkin, D.P., and Huhdanpaa, H.T.,
##' \dQuote{The Quickhull algorithm for convex hulls,} \emph{ACM Trans. on
##' Mathematical Software,} Dec 1996.}
##'
##' \url{http://www.qhull.org}
##' @keywords math dplot graphs
##' @examples
##'
##' # example delaunayn
##' d <- c(-1,1)
##' pc <- as.matrix(rbind(expand.grid(d,d,d),0))
##' tc <- delaunayn(pc)
##'
##' # example tetramesh
##' \dontrun{
##' rgl::rgl.viewpoint(60)
##' rgl::rgl.light(120,60)
##' tetramesh(tc,pc, alpha=0.9)
##' }
##'
##' tc1 <- delaunayn(pc, output.options="Fa")
##' ## sum of generalised areas is total volume of cube
##' sum(tc1$areas)
##'
##' @export
##' @useDynLib geometry
delaunayn <-
function(p, options=NULL, output.options=NULL, full=FALSE) {
tmp_stdout <- tempfile("Rf")
tmp_stderr <- tempfile("Rf")
on.exit(unlink(c(tmp_stdout, tmp_stderr)))
## Coerce the input to be matrix
if (is.data.frame(p)) {
p <- as.matrix(p)
}
## Make sure we have real-valued input
storage.mode(p) <- "double"
## We need to check for NAs in the input, as these will crash the C
## code.
if (any(is.na(p))) {
stop("The first argument should not contain any NAs")
}
## Default options
if (is.null(options)) {
if (ncol(p) < 4) {
options <- "Qt Qc Qz"
} else {
options <- "Qt Qc Qx"
}
}
## Combine and check options
options <- tryCatch(qhull.options(options, output.options, supported_output.options <- c("Fa", "Fn"), full=full), error=function(e) {stop(e)})
## It is essential that delaunayn is called with either the QJ or Qt
## option. Otherwise it may return a non-triangulated structure, i.e
## one with more than dim+1 points per structure, where dim is the
## dimension in which the points p reside.
if (!grepl("Qt", options) & !grepl("QJ", options)) {
options <- paste(options, "Qt")
}
out <- .Call("C_delaunayn", p, as.character(options), tmp_stdout, tmp_stderr, PACKAGE="geometry")
# Remove NULL elements
out[which(sapply(out, is.null))] <- NULL
if (is.null(out$areas) & is.null(out$neighbours)) {
attr(out$tri, "delaunayn") <- attr(out$tri, "delaunayn")
return(out$tri)
}
class(out) <- "delaunayn"
out$p <- p
return(out)
}
## LocalWords: param Qhull Fn delaunayn Qbb Qcc Qc Qz Qx QJ itemize
## LocalWords: tri Voronoi Quickhull distmesh Grasman Gramacy Kai
## LocalWords: Habel seealso tripack convhulln Dobkin Huhdanpaa ACM
## LocalWords: dQuote emph dplot pc tc tetramesh dontrun useDynLib
| /fuzzedpackages/geometry/R/delaunayn.R | no_license | akhikolla/testpackages | R | false | false | 6,774 | r | ##' Delaunay triangulation in N dimensions
##'
##' The Delaunay triangulation is a tessellation of the convex hull of
##' the points such that no \eqn{N}-sphere defined by the \eqn{N}-
##' triangles contains any other points from the set.
##'
##' @param p An \eqn{M}-by-\eqn{N} matrix whose rows represent \eqn{M}
##' points in \eqn{N}-dimensional space.
##'
##' @param options String containing extra control options for the
##' underlying Qhull command; see the Qhull documentation
##' (\url{../doc/qhull/html/qdelaun.html}) for the available
##' options.
##'
##' The \code{Qbb} option is always passed to Qhull. The remaining
##' default options are \code{Qcc Qc Qt Qz} for \eqn{N<4} and
##' \code{Qcc Qc Qt Qx} for \eqn{N>=4}. If neither of the \code{QJ}
##' or \code{Qt} options are supplied, the \code{Qt} option is
##' passed to Qhull. The \code{Qt} option ensures all Delaunay
##' regions are simplical (e.g., triangles in 2D). See
##' \url{../doc/qhull/html/qdelaun.html} for more details. Contrary
##' to the Qhull documentation, no degenerate (zero area) regions
##' are returned with the \code{Qt} option since the R function
##' removes them from the triangulation.
##'
##' \emph{If \code{options} is specified, the default options are
##' overridden.} It is recommended to use \code{output.options} for
##' options controlling the outputs.
##'
##' @param output.options String containing Qhull options to control
##' output. Currently \code{Fn} (neighbours) and \code{Fa} (areas)
##' are supported. Causes an object of return value for details. If
##' \code{output.options} is \code{TRUE}, select all supported
##' options.
##'
##' @param full Deprecated and will be removed in a future release.
##' Adds options \code{Fa} and \code{Fn}.
##'
##' @return If \code{output.options} is \code{NULL} (the default),
##' return the Delaunay triangulation as a matrix with \eqn{M} rows
##' and \eqn{N+1} columns in which each row contains a set of
##' indices to the input points \code{p}. Thus each row describes a
##' simplex of dimension \eqn{N}, e.g. a triangle in 2D or a
##' tetrahedron in 3D.
##'
##' If the \code{output.options} argument is \code{TRUE} or is a
##' string containing \code{Fn} or \code{Fa}, return a list with
##' class \code{delaunayn} comprising the named elements:
##' \describe{
##' \item{\code{tri}}{The Delaunay triangulation described above}
##' \item{\code{areas}}{If \code{TRUE} or if \code{Fa} is specified, an
##' \eqn{M}-dimensional vector containing the generalised area of
##' each simplex (e.g. in 2D the areas of triangles; in 3D the volumes
##' of tetrahedra). See \url{../doc/qhull/html/qh-optf.html#Fa}.}
##' \item{\code{neighbours}}{If \code{TRUE} or if \code{Fn} is specified,
##' a list of neighbours of each simplex.
##' See \url{../doc/qhull/html/qh-optf.html#Fn}}
##' }
##'
##' @note This function interfaces the Qhull library and is a port
##' from Octave (\url{http://www.octave.org}) to R. Qhull computes
##' convex hulls, Delaunay triangulations, halfspace intersections
##' about a point, Voronoi diagrams, furthest-site Delaunay
##' triangulations, and furthest-site Voronoi diagrams. It runs in
##' 2D, 3D, 4D, and higher dimensions. It implements the
##' Quickhull algorithm for computing the convex hull. Qhull handles
##' round-off errors from floating point arithmetic. It computes
##' volumes, surface areas, and approximations to the convex
##' hull. See the Qhull documentation included in this distribution
##' (the doc directory \url{../doc/qhull/index.html}).
##'
##' Qhull does not support constrained Delaunay triangulations, triangulation
##' of non-convex surfaces, mesh generation of non-convex objects, or
##' medium-sized inputs in 9D and higher. A rudimentary algorithm for mesh
##' generation in non-convex regions using Delaunay triangulation is
##' implemented in \link{distmesh2d} (currently only 2D).
##' @author Raoul Grasman and Robert B. Gramacy; based on the
##' corresponding Octave sources of Kai Habel.
##' @seealso \code{\link[tripack]{tri.mesh}}, \code{\link{convhulln}},
##' \code{\link{surf.tri}}, \code{\link{distmesh2d}}
##' @references \cite{Barber, C.B., Dobkin, D.P., and Huhdanpaa, H.T.,
##' \dQuote{The Quickhull algorithm for convex hulls,} \emph{ACM Trans. on
##' Mathematical Software,} Dec 1996.}
##'
##' \url{http://www.qhull.org}
##' @keywords math dplot graphs
##' @examples
##'
##' # example delaunayn
##' d <- c(-1,1)
##' pc <- as.matrix(rbind(expand.grid(d,d,d),0))
##' tc <- delaunayn(pc)
##'
##' # example tetramesh
##' \dontrun{
##' rgl::rgl.viewpoint(60)
##' rgl::rgl.light(120,60)
##' tetramesh(tc,pc, alpha=0.9)
##' }
##'
##' tc1 <- delaunayn(pc, output.options="Fa")
##' ## sum of generalised areas is total volume of cube
##' sum(tc1$areas)
##'
##' @export
##' @useDynLib geometry
delaunayn <-
function(p, options=NULL, output.options=NULL, full=FALSE) {
tmp_stdout <- tempfile("Rf")
tmp_stderr <- tempfile("Rf")
on.exit(unlink(c(tmp_stdout, tmp_stderr)))
## Coerce the input to be matrix
if (is.data.frame(p)) {
p <- as.matrix(p)
}
## Make sure we have real-valued input
storage.mode(p) <- "double"
## We need to check for NAs in the input, as these will crash the C
## code.
if (any(is.na(p))) {
stop("The first argument should not contain any NAs")
}
## Default options
if (is.null(options)) {
if (ncol(p) < 4) {
options <- "Qt Qc Qz"
} else {
options <- "Qt Qc Qx"
}
}
## Combine and check options
options <- tryCatch(qhull.options(options, output.options, supported_output.options <- c("Fa", "Fn"), full=full), error=function(e) {stop(e)})
## It is essential that delaunayn is called with either the QJ or Qt
## option. Otherwise it may return a non-triangulated structure, i.e
## one with more than dim+1 points per structure, where dim is the
## dimension in which the points p reside.
if (!grepl("Qt", options) & !grepl("QJ", options)) {
options <- paste(options, "Qt")
}
out <- .Call("C_delaunayn", p, as.character(options), tmp_stdout, tmp_stderr, PACKAGE="geometry")
# Remove NULL elements
out[which(sapply(out, is.null))] <- NULL
if (is.null(out$areas) & is.null(out$neighbours)) {
attr(out$tri, "delaunayn") <- attr(out$tri, "delaunayn")
return(out$tri)
}
class(out) <- "delaunayn"
out$p <- p
return(out)
}
## LocalWords: param Qhull Fn delaunayn Qbb Qcc Qc Qz Qx QJ itemize
## LocalWords: tri Voronoi Quickhull distmesh Grasman Gramacy Kai
## LocalWords: Habel seealso tripack convhulln Dobkin Huhdanpaa ACM
## LocalWords: dQuote emph dplot pc tc tetramesh dontrun useDynLib
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.