content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# 2
# Importing dataset
library(readr)
crimedata <- read.csv("C:/Users/WIN10/Desktop/LEARNING/crime_data.csv")
View(crimedata)
summary(crimedata)
# Performing EDA for the given dataset
# Checking for NA values
is.na(crimedata)
sum(is.na(crimedata))
# There are no missing values
# Plotting histogram for finding the Skewness
colnames(crimedata)
hist(crimedata$Murder,xlab = 'Murder',ylab = 'Frequency',col = 'green',border = 'red',breaks = 15) # Histogram is positively skewed
hist(crimedata$Assault,xlab = 'Assault',ylab = 'Frequency',col = 'green',border = 'red',breaks = 15) # Histogram is normally skewed
hist(crimedata$UrbanPop,xlab = 'Urban Pop',ylab = 'Frequency',col = 'green',border = 'red',breaks = 15) # Histogram is negatively skewed
hist(crimedata$Rape,xlab = 'Rape',ylab = 'frequency',col = 'green',border = 'red',breaks = 15) # Histogram is negatively skewed
# Plotting boxplot for checking outliers
murderbox <- boxplot(crimedata$Murder,horizontal = T,xlab = 'Murder',ylab = 'Frequecy',main = "Murder vs Frequency",col = 'red',border = 'blue')
murderbox$out # There are no outliers in the Murder dataset
assaultbox <- boxplot(crimedata$Assault,xlab = 'Assault',ylab = 'Frequecy',main = 'Assault vs Frequency',horizontal = T,col = 'red',border = 'blue')
assaultbox$out # There are no outliers in assault dataset
urbanbox <- boxplot(crimedata$UrbanPop,horizontal = T,xlab = 'Urban Pop',ylab = 'Frequency',main = 'Urban Pop vs Frequency',col = 'red',border = 'blue')
urbanbox$out # There are no outliers in urbanpop dataset
rapebox <- boxplot(crimedata$Rape,horizontal = T,xlab = 'Rape',ylab = 'Frequency', main = 'Rape vs Frequency',col = 'red',border = 'blue')
rapebox$out # There are outliers in this dataset
# We will perform winsorization and remove the outliers
quant1 <- quantile(crimedata$Rape,probs = c(0.25,0.75))
quant1
wins1 <- quantile(crimedata$Rape,probs = c(0.05,0.95))
wins1
a1 <- 1.5*IQR(crimedata$Rape)
a1
b1 <- quant1[1] - a1
b1
c1 <- quant1[2] + a1
c1
# Replacing the outliers
crimedata$Rape[crimedata$Rape<b1] <- wins1[1]
crimedata$Rape[crimedata$Rape>c1] <- wins1[2]
# Checking the outliers greater than 95% limit
d <- boxplot(crimedata$Rape)
d$out # The outliers are removed
# To check the normality of the dataset
qqnorm(crimedata$Murder)
qqline(crimedata$Murder)
# The Murder data is normal datae.
qqnorm(crimedata$Assault)
qqline(crimedata$Assault)
# The Assault data is normal data
qqnorm(crimedata$UrbanPop)
qqline(crimedata$UrbanPop)
# The urbanpop data is normal data
qqnorm(crimedata$Rape)
qqline(crimedata$Rape)
# The Rape data is normal data
# Since there is only continuous data there is no need for discritization
# Checking our dataset for zero variance column
# Check for variance on numerical values
apply(crimedata, 2, var)
# Check for the columns having zero variance
which(apply(crimedata, 2, var)==0)
# We have variance in all the columns.
# We have to make our data scale free and unit free so we have to do normalization
# normalize the data
# For normalization we define user defined function
norm <- function(x){
return((x-min(x))/(max(x)-min(x)))
}
normdata <- as.data.frame(lapply(crimedata[2:5],norm))
summary(normdata)
# Plotting the elbow curve
# Plotting the elbow curve
table(is.na(normdata))
summary(normdata)
twss<-NULL
for (i in 2:8){
twss<-c(twss,kmeans(normdata,centers = i)$tot.withinss)
}
twss
# to visualize elbow curve in the screeplot
plot(2:8,twss,type = "b",xlab = "Number of clusters",ylab = "Within sum of square")
title("Kmeans clustering scree plot")
# Clustering solution
fit <- kmeans(normdata,3)
str(fit)
final <- data.frame(fit$cluster,crimedata)
aggregate(crimedata[, 2:5], by = list(final$cluster), FUN = mean)
# To get the output with having clustered group value column in it.
library(readr)
write_csv(final, "Kmeanscrime.csv")
getwd() #to get working directory
| /kmeanscrime.R | no_license | priyankaankireddypalli/Kmeans-Clustering | R | false | false | 3,962 | r | # 2
# Importing dataset
library(readr)
crimedata <- read.csv("C:/Users/WIN10/Desktop/LEARNING/crime_data.csv")
View(crimedata)
summary(crimedata)
# Performing EDA for the given dataset
# Checking for NA values
is.na(crimedata)
sum(is.na(crimedata))
# There are no missing values
# Plotting histogram for finding the Skewness
colnames(crimedata)
hist(crimedata$Murder,xlab = 'Murder',ylab = 'Frequency',col = 'green',border = 'red',breaks = 15) # Histogram is positively skewed
hist(crimedata$Assault,xlab = 'Assault',ylab = 'Frequency',col = 'green',border = 'red',breaks = 15) # Histogram is normally skewed
hist(crimedata$UrbanPop,xlab = 'Urban Pop',ylab = 'Frequency',col = 'green',border = 'red',breaks = 15) # Histogram is negatively skewed
hist(crimedata$Rape,xlab = 'Rape',ylab = 'frequency',col = 'green',border = 'red',breaks = 15) # Histogram is negatively skewed
# Plotting boxplot for checking outliers
murderbox <- boxplot(crimedata$Murder,horizontal = T,xlab = 'Murder',ylab = 'Frequecy',main = "Murder vs Frequency",col = 'red',border = 'blue')
murderbox$out # There are no outliers in the Murder dataset
assaultbox <- boxplot(crimedata$Assault,xlab = 'Assault',ylab = 'Frequecy',main = 'Assault vs Frequency',horizontal = T,col = 'red',border = 'blue')
assaultbox$out # There are no outliers in assault dataset
urbanbox <- boxplot(crimedata$UrbanPop,horizontal = T,xlab = 'Urban Pop',ylab = 'Frequency',main = 'Urban Pop vs Frequency',col = 'red',border = 'blue')
urbanbox$out # There are no outliers in urbanpop dataset
rapebox <- boxplot(crimedata$Rape,horizontal = T,xlab = 'Rape',ylab = 'Frequency', main = 'Rape vs Frequency',col = 'red',border = 'blue')
rapebox$out # There are outliers in this dataset
# We will perform winsorization and remove the outliers
quant1 <- quantile(crimedata$Rape,probs = c(0.25,0.75))
quant1
wins1 <- quantile(crimedata$Rape,probs = c(0.05,0.95))
wins1
a1 <- 1.5*IQR(crimedata$Rape)
a1
b1 <- quant1[1] - a1
b1
c1 <- quant1[2] + a1
c1
# Replacing the outliers
crimedata$Rape[crimedata$Rape<b1] <- wins1[1]
crimedata$Rape[crimedata$Rape>c1] <- wins1[2]
# Checking the outliers greater than 95% limit
d <- boxplot(crimedata$Rape)
d$out # The outliers are removed
# To check the normality of the dataset
qqnorm(crimedata$Murder)
qqline(crimedata$Murder)
# The Murder data is normal datae.
qqnorm(crimedata$Assault)
qqline(crimedata$Assault)
# The Assault data is normal data
qqnorm(crimedata$UrbanPop)
qqline(crimedata$UrbanPop)
# The urbanpop data is normal data
qqnorm(crimedata$Rape)
qqline(crimedata$Rape)
# The Rape data is normal data
# Since there is only continuous data there is no need for discritization
# Checking our dataset for zero variance column
# Check for variance on numerical values
apply(crimedata, 2, var)
# Check for the columns having zero variance
which(apply(crimedata, 2, var)==0)
# We have variance in all the columns.
# We have to make our data scale free and unit free so we have to do normalization
# normalize the data
# For normalization we define user defined function
norm <- function(x){
return((x-min(x))/(max(x)-min(x)))
}
normdata <- as.data.frame(lapply(crimedata[2:5],norm))
summary(normdata)
# Plotting the elbow curve
# Plotting the elbow curve
table(is.na(normdata))
summary(normdata)
twss<-NULL
for (i in 2:8){
twss<-c(twss,kmeans(normdata,centers = i)$tot.withinss)
}
twss
# to visualize elbow curve in the screeplot
plot(2:8,twss,type = "b",xlab = "Number of clusters",ylab = "Within sum of square")
title("Kmeans clustering scree plot")
# Clustering solution
fit <- kmeans(normdata,3)
str(fit)
final <- data.frame(fit$cluster,crimedata)
aggregate(crimedata[, 2:5], by = list(final$cluster), FUN = mean)
# To get the output with having clustered group value column in it.
library(readr)
write_csv(final, "Kmeanscrime.csv")
getwd() #to get working directory
|
# compute the log density for sample data in each group
# including f_yz, f_z, and f_y
groupdenscpp <-'
int m = Rcpp::as<int>(m0); // MC replicates
int D = Rcpp::as<int>(D0); // group size
double P = Rcpp::as<double>(P0);
double sigmaZ = Rcpp::as<double>(sigmaZ0);
arma::vec Z = Rcpp::as<arma::vec>(Z0);
arma::vec Y = Rcpp::as<arma::vec>(Y0);
arma::cube ub = Rcpp::as<arma::cube>(rand_matrix); // generate random effect D * m * 2
arma::vec delta = Rcpp::as<arma::vec>(delta_fix0); // x * beta_z
arma::vec mu = Rcpp::as<arma::vec>(mu_fix0); // x * beta_y
arma::vec id = Rcpp::as<arma::vec>(id0); // group id
arma::vec subid = Rcpp::as<arma::vec>(subid0); // subgroup id (1:5: H1,K1,H0,K0, M)
int n = Z.n_rows; // sample size
double gx;
double u;
double b;
double logdens;
arma::mat result = arma::zeros<arma::mat>(m, D); //result table ???
for(int i = 1; i<=m; i++)
{
for(int j = 1; j<=n; j++)
{
u = ub(id(j-1)-1, i-1, 0); // random effect for Z
b = ub(id(j-1)-1, i-1, 1); // random effect for Y
gx = P * (delta(j-1) + u) + mu(j-1) + b + sigmaZ * pow(P, 2)/2; // g(x)
logdens = Y(j-1) * gx -log(1+exp(gx));
if(subid(j-1) < 3) // H, K
{
logdens += -0.5 * log(2*PI*sigmaZ) - pow(Z(j-1) - delta(j-1) - u - Y(j-1)*sigmaZ*P, 2)/sigmaZ/2;
}
if(subid(j-1) == 2) // K1
{
logdens += log(1+exp(Z(j-1)*P + mu(j-1) + b));
}
result(i-1, id(j-1)-1) += logdens;
}
}
return Rcpp::wrap(result);
'
rcppgroupdens <- cxxfunction(signature(m0 = "int", D0 = "int", P0 = "numeric", sigmaZ0 = "numeric",
Z0 = "numeric", Y0 = "numeric", rand_matrix = "numeric",
delta_fix0 = "numeric", mu_fix0 = "numeric", id0 = "int", subid0 = "int"),
groupdenscpp,plugin="RcppArmadillo", verbose = TRUE)
SigmaUVweightavgcpp <- '
arma::mat xrand = Rcpp::as<arma::mat>(rand_origin); // mD * 2
arma::vec xweight = Rcpp::as<arma::vec>(weight); // D*m
int n = xweight.n_rows;
arma::mat result = arma::zeros<arma::mat>(2,2);
for(int i = 1; i<=2; i++)
for(int j = 1; j<=2; j++)
for(int k = 1; k<=n; k++)
result(i-1,j-1) += xrand(k-1,i-1) * xrand(k-1,j-1) * xweight(k-1);
return Rcpp::wrap(result);
'
rcppSigmaUVweightavg <- cxxfunction(signature(rand_origin="numeric", weight = "numeric"),
SigmaUVweightavgcpp,plugin="RcppArmadillo", verbose = TRUE)
# Ydraw n * 3 (first column denote the label 0 or 1, second column denote the Yid 1 or 2, third column is condition mean of Z)
YZdrawcpp <- '
arma::vec xdelta = Rcpp::as<arma::vec>(delta_new);
arma::vec xmu = Rcpp::as<arma::vec>(mu_new);
arma::mat xprob = Rcpp::as<arma::mat>(Yprob_new);
double xP = Rcpp::as<double>(P_hat);
double xsimgaZ = Rcpp::as<double>(SigmaZ_hat);
int n = xmu.n_rows;
arma::mat Ydraw = arma::zeros<arma::mat>(n,3);
arma::vec rand = Rcpp::as<arma::vec>(rand_vec);
for(int i = 1; i<=n; i++)
{
if(rand(i-1) < xprob(i-1, 1))
{
Ydraw(i-1,0) = 1;
Ydraw(i-1,1) = 2;
}
else
Ydraw(i-1,1) = 1;
Ydraw(i-1,2) = xdelta(i-1) + Ydraw(i-1,0) * xP * xsimgaZ;
}
return Rcpp::wrap(Ydraw);
'
rcppYZdraw <- cxxfunction(signature(delta_new = "numeric", mu_new = "numeric",
Yprob_new = "numeric", P_hat = "numeric", SigmaZ_hat = "numeric", rand_vec = "numeric"),
YZdrawcpp, plugin = "RcppArmadillo", verbose = TRUE)
groupdens_expectation_cpp <-'
double P = Rcpp::as<double>(P0);
double sigmaZ = Rcpp::as<double>(sigmaZ0);
arma::vec Z = Rcpp::as<arma::vec>(Z0);
arma::vec Y = Rcpp::as<arma::vec>(Y0);
arma::cube ub = Rcpp::as<arma::cube>(rand_matrix_posterior); // m * D * 2
arma::vec delta = Rcpp::as<arma::vec>(delta_fix0); // x * beta_z
arma::vec mu = Rcpp::as<arma::vec>(mu_fix0); // x * beta_y
arma::vec id = Rcpp::as<arma::vec>(id0); // group id
arma::vec subid = Rcpp::as<arma::vec>(subid0); // subgroup id (1:5: H1,K1,H0,K0, M)
int n = Z.n_rows; // sample size
int m = ub.n_rows; // MC replicates
double gx;
double u;
double b;
double logdens; //logdens for one sample
double result; // final output
result = 0;
for(int j = 1; j<=n; j++) // each sample
{
logdens = 0; // first define logdens (later random part)
for(int i = 1; i<=m; i++) //each MC draw
{
u = ub(i-1, id(j-1)-1, 0); // random effect for Z
b = ub(i-1, id(j-1)-1, 1); // random effect for Y
gx = P * (delta(j-1) + u) + mu(j-1) + b + sigmaZ * pow(P, 2)/2; // g(x)
logdens += -log(1+exp(gx)) + Y(j-1) * gx; // include first term random part
if(subid(j-1) < 3)
{
logdens += -0.5 * log(2*PI*sigmaZ) - pow(Z(j-1) - delta(j-1) - Y(j-1)*P*sigmaZ - u, 2)/sigmaZ/2; // random part
}
if(subid(j-1) == 2) // K
{
logdens += log(1 + exp(Z(j-1) * P + mu(j-1) + b));
}
}
result -= logdens/m; //expectation of negative loglikehood
}
return Rcpp::wrap(result);
'
rcppgroupdens_expectation_cpp <- cxxfunction(signature(P0 = "numeric", sigmaZ0 = "numeric", Z0 = "numeric", Y0 = "numeric",
rand_matrix_posterior = "numeric", delta_fix0 = "numeric",
mu_fix0 = "numeric", id0 = 'int', subid0 = "int"),
groupdens_expectation_cpp,plugin="RcppArmadillo", verbose = TRUE)
groupdens_dev1_cpp <-'
double P = Rcpp::as<double>(P0);
double sigmaZ = Rcpp::as<double>(sigmaZ0);
int num_Zcoef = Rcpp::as<int>(num_Zcoef0);
arma::vec Z = Rcpp::as<arma::vec>(Z0);
arma::vec Y = Rcpp::as<arma::vec>(Y0);
arma::mat X = Rcpp::as<arma::mat>(X0); // matrix
arma::cube ub = Rcpp::as<arma::cube>(rand_matrix_posterior); // m * D* 2
arma::vec delta = Rcpp::as<arma::vec>(delta_fix0); // x * beta_z
arma::vec mu = Rcpp::as<arma::vec>(mu_fix0); // x * beta_y
arma::vec id = Rcpp::as<arma::vec>(id0); // group id
arma::vec subid = Rcpp::as<arma::vec>(subid0); // subgroup id (1:3: H, K, M)
int n = Z.n_rows; // sample size
int m = ub.n_rows; // MC replicates
double gx;
double u;
double b;
double dbetaz0 = 0;
arma::vec dbetaz1 = arma::zeros<arma::vec>(num_Zcoef); // vector
double dbetay0 = 0;
double dbetay1 = 0;
double dP = 0;
double dsigmaZ = 0;
double partA;
double partB;
arma::vec result = arma::zeros<arma::vec>(4+num_Zcoef+1); // betaz, betay, P, sigmaZ;
for(int j = 1; j<=n; j++) // each sample
{
for(int i = 1; i<=m; i++) //each MC draw
{
u = ub(i-1, id(j-1)-1, 0); // random effect for Z
b = ub(i-1, id(j-1)-1, 1); // random effect for Y
gx = P * (delta(j-1) + u) + mu(j-1) + b + sigmaZ * pow(P, 2)/2; // g(x)
partB = -exp(gx)/(1+exp(gx));
dbetaz0 += (Y(j-1) + partB) * P;
for(int k = 1; k <= num_Zcoef; k++)
{
dbetaz1(k-1) += (Y(j-1) + partB) * P * X(j-1, k-1);
}
dbetay0 += Y(j-1) + partB;
dbetay1 += (Y(j-1) + partB) * X(j-1, 0);
dP += (Y(j-1) + partB) * (delta(j-1) + u + sigmaZ* P); // add random part here
dsigmaZ += (Y(j-1) + partB) * pow(P, 2)/2;
if(subid(j-1) < 3)
{
partA = (Z(j-1) - delta(j-1) - u - Y(j-1) * P *sigmaZ)/sigmaZ;
dbetaz0 += partA;
for(int k = 1; k <= num_Zcoef; k++)
{
dbetaz1(k-1) += partA * X(j-1, k-1);
}
dP += partA * Y(j-1) * sigmaZ;
dsigmaZ += -0.5/sigmaZ + pow(partA, 2)/2 + partA * Y(j-1)*P;
}
if(subid(j-1) == 2) // K
{
partB = exp(Z(j-1) * P + mu(j-1) + b)/(1 + exp(Z(j-1) * P + mu(j-1) + b));
dbetay0 += partB;
dbetay1 += partB * X(j-1,0);
dP += partB * Z(j-1);
}
}
result(0) -= dbetaz0/m;
for(int k = 1; k<= num_Zcoef; k++)
{
result(k) -= dbetaz1(k-1)/m;
}
result(num_Zcoef+1) -= dbetay0/m;
result(num_Zcoef+2) -= dbetay1/m;
result(num_Zcoef+3) -= dP/m;
result(num_Zcoef+4) -= dsigmaZ/m; //derivative of negative loglikelihood
}
return Rcpp::wrap(result);
'
rcppgroupdens_dev1_cpp <- cxxfunction(signature(P0 = "numeric", sigmaZ0 = "numeric", num_Zcoef0 = 'int',
Z0 = "numeric", Y0 = "numeric", X0 = "numeric", rand_matrix_posterior = "numeric",
delta_fix0 = "numeric", mu_fix0 = "numeric", id0 = "int", subid0 = "int"),
groupdens_dev1_cpp ,plugin="RcppArmadillo", verbose = TRUE)
rcppfulldevfun <- function(data, initparam, rand_matrix_posterior)
{
bZinit = initparam[1:(1+num_Zcoef)]
bYinit = initparam[(2+num_Zcoef):(2+num_Zcoef+num_Ycoef)]
Pinit = initparam[3+num_Zcoef+num_Ycoef]
SigmaZinit = initparam[4+num_Zcoef+num_Ycoef]
delta_fix = as.matrix(cbind(1, data[,2:(1+num_Zcoef)])) %*% bZinit
mu_fix = cbind(1, data$X2) %*% bYinit
X0 = data[, 2:(1+num_Zcoef)] %>% as.matrix()
dev = rcppgroupdens_dev1_cpp(P0 = P, sigmaZ0 = SigmaZ, Z0 = data$Z, Y0 = data$Y,
X0 = X0, rand_matrix_posterior = rand_matrix_posterior, delta_fix0 = delta_fix,
mu_fix0 = mu_fix, id0 = data$group, subid0 = data$subgroupid)
return(dev)
}
rcpploglikefun <- function(data, initparam, rand_matrix_posterior) #(check)
{
bZinit = initparam[1:(1+num_Zcoef)]
bYinit = initparam[(2+num_Zcoef):(2+num_Zcoef+num_Ycoef)]
Pinit = initparam[3+num_Zcoef+num_Ycoef]
SigmaZinit = initparam[4+num_Zcoef+num_Ycoef]
delta_fix = as.matrix(cbind(1, data[,2:(1+num_Zcoef)])) %*% bZinit
mu_fix = cbind(1, data$X2) %*% bYinit
r1 = rcppgroupdens_expectation_cpp(P0 = Pinit, sigmaZ0 = SigmaZinit, Z0 = data$Z, Y0 = data$Y,
rand_matrix_posterior = rand_matrix_posterior, delta_fix0 = delta_fix, mu_fix0 = mu_fix,
id0 = data$group, subid0 = data$subgroupid)
return(r1)
}
rcppupdatefix <- function(data, Bz_hat, By_hat, SigmaZ_hat, P_hat, SigmaUV_hat, maxiter, m)
{
rand_matrix <- rmvnorm(m * D, mean = rep(0, 2), sigma = SigmaUV_hat) %>% array(., dim = c(D, m, 2)) # 40 * 10000
delta_fix = as.matrix(cbind(1, data[, 2:(1+num_Zcoef)])) %*% Bz_hat
mu_fix = cbind(1, data$X2) %*% By_hat
# compute the posterior weight
logdens <- rcppgroupdens(m0 = m, D0 = D, P0 = P_hat, sigmaZ0 = SigmaZ_hat, Z0 = data$Z, Y0 = data$Y,
rand_matrix = rand_matrix, delta_fix0 = delta_fix, mu_fix0 = mu_fix,
id0 = data$group, subid0 = data$subgroupid)
quant_logdens = apply(logdens, MARGIN = 2, FUN = function(x) quantile(x, 0.7))
dens = sweep(logdens, MARGIN = 2, FUN = "-", STATS = quant_logdens) %>% exp()
dens[is.infinite(dens)] <- 0
weight = sweep(dens, STATS= colSums(dens), FUN = "/" , MARGIN = 2) # posterior weight
# next generate posterior samples from posterior distribution
posterior_ID <- sapply(1:D, FUN = function(x) base::sample(1:dim(weight)[1], dim(weight)[1], replace = TRUE, prob = weight[,x])) %>%
as.vector() %>% cbind(rep(1:D, each = dim(weight)[1]), .)
rand_matrix_posterior <- c(rand_matrix[,,1][posterior_ID], rand_matrix[,,2][posterior_ID]) %>% array(., dim = c(m, D, 2)) # 1000 * 40 * 2
initparam <- c(Bz_hat, By_hat, P_hat, SigmaZ_hat)
if(usedev == TRUE)
o1 <- optim(par = initparam, fn = rcpploglikefun, data = data, method = "BFGS", gr = rcppfulldevfun,
rand_matrix_posterior = rand_matrix_posterior, control = list(maxit = maxiter))
if(usedev == FALSE)
o1 <- optim(par = initparam, fn = rcpploglikefun, data = data,
rand_matrix_posterior = rand_matrix_posterior, control = list(maxit = maxiter))
return(o1$par) #(bz, by, P, sigmaZ)
}
SigmaUVweightavgcpp <- '
arma::mat xrand = Rcpp::as<arma::mat>(rand_origin);
arma::vec xweight = Rcpp::as<arma::vec>(weight);
int n = xweight.n_rows;
arma::mat result = arma::zeros<arma::mat>(2,2);
for(int i = 1; i<=2; i++)
for(int j = 1; j<=2; j++)
for(int k = 1; k<=n; k++)
result(i-1,j-1) += xrand(k-1,i-1) * xrand(k-1,j-1) * xweight(k-1);
return Rcpp::wrap(result);
'
rcppSigmaUVweightavg <- cxxfunction(signature(rand_origin="numeric", weight = "numeric"),
SigmaUVweightavgcpp,plugin="RcppArmadillo", verbose = TRUE)
# this function uses two rcpp functions:
rcppSigmaUVfun <- function(data, SigmaZ_hat, P_hat, Bz_hat, By_hat, SigmaUV_hat, m)
{
rand_origin = rmvnorm(n = m*D, mean = rep(0, 2), sigma = SigmaUV_hat)
rand_matrix = rand_origin %>% array(., dim = c(D, m, 2)) # D*m*2
delta_fix = as.matrix(cbind(1, data[, 2:(1+num_Zcoef)])) %*% Bz_hat
mu_fix = cbind(1, data$X2) %*% By_hat # now is a vector
rcppgroupdens(m0 = m, D0 = D, P0 = P, sigmaZ0 = SigmaZ_hat, Z0 = data$Z, Y0 = data$Y,
rand_matrix = rand_matrix, delta_fix0 = delta_fix,
mu_fix0 = mu_fix, id0 = data$group, subid0 = data$subgroupid) -> logdens # m * D
rm(rand_matrix)
quant_logdens = apply(logdens, MARGIN = 2, FUN = function(x) quantile(x, 0.7))
dens = sweep(logdens, MARGIN = 2, FUN = "-", STATS = quant_logdens) %>% exp()
dens[is.infinite(dens)] <- 0
weight = t(sweep(dens, STATS= colSums(dens), FUN = "/" , MARGIN = 2)/D) # D * m
return(rcppSigmaUVweightavg(rand_origin, as.vector(weight)))
}
# Ydraw n * 3 (first column denote the label 0 or 1, second column denote the Yid 1 or 2, third column is condition mean of Z)
YZdrawcpp <- '
arma::vec xdelta = Rcpp::as<arma::vec>(delta_new);
arma::vec xmu = Rcpp::as<arma::vec>(mu_new);
arma::mat xprob = Rcpp::as<arma::mat>(Yprob_new);
double xP = Rcpp::as<double>(P_hat);
double xsimgaZ = Rcpp::as<double>(SigmaZ_hat);
int n = xmu.n_rows;
arma::mat Ydraw = arma::zeros<arma::mat>(n,3);
arma::vec rand = Rcpp::as<arma::vec>(rand_vec);
for(int i = 1; i<=n; i++)
{
if(rand(i-1) < xprob(i-1, 1))
{
Ydraw(i-1,0) = 1;
Ydraw(i-1,1) = 2;
}
else
Ydraw(i-1,1) = 1;
Ydraw(i-1,2) = xdelta(i-1) + Ydraw(i-1,0) * xP * xsimgaZ;
}
return Rcpp::wrap(Ydraw);
'
rcppYZdraw <- cxxfunction(signature(delta_new = "numeric", mu_new = "numeric",
Yprob_new = "numeric", P_hat = "numeric", SigmaZ_hat = "numeric", rand_vec = "numeric"),
YZdrawcpp, plugin = "RcppArmadillo", verbose = TRUE) | /cpp.R | no_license | EugeneHao/BivarateModel | R | false | false | 14,624 | r | # compute the log density for sample data in each group
# including f_yz, f_z, and f_y
groupdenscpp <-'
int m = Rcpp::as<int>(m0); // MC replicates
int D = Rcpp::as<int>(D0); // group size
double P = Rcpp::as<double>(P0);
double sigmaZ = Rcpp::as<double>(sigmaZ0);
arma::vec Z = Rcpp::as<arma::vec>(Z0);
arma::vec Y = Rcpp::as<arma::vec>(Y0);
arma::cube ub = Rcpp::as<arma::cube>(rand_matrix); // generate random effect D * m * 2
arma::vec delta = Rcpp::as<arma::vec>(delta_fix0); // x * beta_z
arma::vec mu = Rcpp::as<arma::vec>(mu_fix0); // x * beta_y
arma::vec id = Rcpp::as<arma::vec>(id0); // group id
arma::vec subid = Rcpp::as<arma::vec>(subid0); // subgroup id (1:5: H1,K1,H0,K0, M)
int n = Z.n_rows; // sample size
double gx;
double u;
double b;
double logdens;
arma::mat result = arma::zeros<arma::mat>(m, D); //result table ???
for(int i = 1; i<=m; i++)
{
for(int j = 1; j<=n; j++)
{
u = ub(id(j-1)-1, i-1, 0); // random effect for Z
b = ub(id(j-1)-1, i-1, 1); // random effect for Y
gx = P * (delta(j-1) + u) + mu(j-1) + b + sigmaZ * pow(P, 2)/2; // g(x)
logdens = Y(j-1) * gx -log(1+exp(gx));
if(subid(j-1) < 3) // H, K
{
logdens += -0.5 * log(2*PI*sigmaZ) - pow(Z(j-1) - delta(j-1) - u - Y(j-1)*sigmaZ*P, 2)/sigmaZ/2;
}
if(subid(j-1) == 2) // K1
{
logdens += log(1+exp(Z(j-1)*P + mu(j-1) + b));
}
result(i-1, id(j-1)-1) += logdens;
}
}
return Rcpp::wrap(result);
'
rcppgroupdens <- cxxfunction(signature(m0 = "int", D0 = "int", P0 = "numeric", sigmaZ0 = "numeric",
Z0 = "numeric", Y0 = "numeric", rand_matrix = "numeric",
delta_fix0 = "numeric", mu_fix0 = "numeric", id0 = "int", subid0 = "int"),
groupdenscpp,plugin="RcppArmadillo", verbose = TRUE)
SigmaUVweightavgcpp <- '
arma::mat xrand = Rcpp::as<arma::mat>(rand_origin); // mD * 2
arma::vec xweight = Rcpp::as<arma::vec>(weight); // D*m
int n = xweight.n_rows;
arma::mat result = arma::zeros<arma::mat>(2,2);
for(int i = 1; i<=2; i++)
for(int j = 1; j<=2; j++)
for(int k = 1; k<=n; k++)
result(i-1,j-1) += xrand(k-1,i-1) * xrand(k-1,j-1) * xweight(k-1);
return Rcpp::wrap(result);
'
rcppSigmaUVweightavg <- cxxfunction(signature(rand_origin="numeric", weight = "numeric"),
SigmaUVweightavgcpp,plugin="RcppArmadillo", verbose = TRUE)
# Ydraw n * 3 (first column denote the label 0 or 1, second column denote the Yid 1 or 2, third column is condition mean of Z)
YZdrawcpp <- '
arma::vec xdelta = Rcpp::as<arma::vec>(delta_new);
arma::vec xmu = Rcpp::as<arma::vec>(mu_new);
arma::mat xprob = Rcpp::as<arma::mat>(Yprob_new);
double xP = Rcpp::as<double>(P_hat);
double xsimgaZ = Rcpp::as<double>(SigmaZ_hat);
int n = xmu.n_rows;
arma::mat Ydraw = arma::zeros<arma::mat>(n,3);
arma::vec rand = Rcpp::as<arma::vec>(rand_vec);
for(int i = 1; i<=n; i++)
{
if(rand(i-1) < xprob(i-1, 1))
{
Ydraw(i-1,0) = 1;
Ydraw(i-1,1) = 2;
}
else
Ydraw(i-1,1) = 1;
Ydraw(i-1,2) = xdelta(i-1) + Ydraw(i-1,0) * xP * xsimgaZ;
}
return Rcpp::wrap(Ydraw);
'
rcppYZdraw <- cxxfunction(signature(delta_new = "numeric", mu_new = "numeric",
Yprob_new = "numeric", P_hat = "numeric", SigmaZ_hat = "numeric", rand_vec = "numeric"),
YZdrawcpp, plugin = "RcppArmadillo", verbose = TRUE)
groupdens_expectation_cpp <-'
double P = Rcpp::as<double>(P0);
double sigmaZ = Rcpp::as<double>(sigmaZ0);
arma::vec Z = Rcpp::as<arma::vec>(Z0);
arma::vec Y = Rcpp::as<arma::vec>(Y0);
arma::cube ub = Rcpp::as<arma::cube>(rand_matrix_posterior); // m * D * 2
arma::vec delta = Rcpp::as<arma::vec>(delta_fix0); // x * beta_z
arma::vec mu = Rcpp::as<arma::vec>(mu_fix0); // x * beta_y
arma::vec id = Rcpp::as<arma::vec>(id0); // group id
arma::vec subid = Rcpp::as<arma::vec>(subid0); // subgroup id (1:5: H1,K1,H0,K0, M)
int n = Z.n_rows; // sample size
int m = ub.n_rows; // MC replicates
double gx;
double u;
double b;
double logdens; //logdens for one sample
double result; // final output
result = 0;
for(int j = 1; j<=n; j++) // each sample
{
logdens = 0; // first define logdens (later random part)
for(int i = 1; i<=m; i++) //each MC draw
{
u = ub(i-1, id(j-1)-1, 0); // random effect for Z
b = ub(i-1, id(j-1)-1, 1); // random effect for Y
gx = P * (delta(j-1) + u) + mu(j-1) + b + sigmaZ * pow(P, 2)/2; // g(x)
logdens += -log(1+exp(gx)) + Y(j-1) * gx; // include first term random part
if(subid(j-1) < 3)
{
logdens += -0.5 * log(2*PI*sigmaZ) - pow(Z(j-1) - delta(j-1) - Y(j-1)*P*sigmaZ - u, 2)/sigmaZ/2; // random part
}
if(subid(j-1) == 2) // K
{
logdens += log(1 + exp(Z(j-1) * P + mu(j-1) + b));
}
}
result -= logdens/m; //expectation of negative loglikehood
}
return Rcpp::wrap(result);
'
rcppgroupdens_expectation_cpp <- cxxfunction(signature(P0 = "numeric", sigmaZ0 = "numeric", Z0 = "numeric", Y0 = "numeric",
rand_matrix_posterior = "numeric", delta_fix0 = "numeric",
mu_fix0 = "numeric", id0 = 'int', subid0 = "int"),
groupdens_expectation_cpp,plugin="RcppArmadillo", verbose = TRUE)
groupdens_dev1_cpp <-'
double P = Rcpp::as<double>(P0);
double sigmaZ = Rcpp::as<double>(sigmaZ0);
int num_Zcoef = Rcpp::as<int>(num_Zcoef0);
arma::vec Z = Rcpp::as<arma::vec>(Z0);
arma::vec Y = Rcpp::as<arma::vec>(Y0);
arma::mat X = Rcpp::as<arma::mat>(X0); // matrix
arma::cube ub = Rcpp::as<arma::cube>(rand_matrix_posterior); // m * D* 2
arma::vec delta = Rcpp::as<arma::vec>(delta_fix0); // x * beta_z
arma::vec mu = Rcpp::as<arma::vec>(mu_fix0); // x * beta_y
arma::vec id = Rcpp::as<arma::vec>(id0); // group id
arma::vec subid = Rcpp::as<arma::vec>(subid0); // subgroup id (1:3: H, K, M)
int n = Z.n_rows; // sample size
int m = ub.n_rows; // MC replicates
double gx;
double u;
double b;
double dbetaz0 = 0;
arma::vec dbetaz1 = arma::zeros<arma::vec>(num_Zcoef); // vector
double dbetay0 = 0;
double dbetay1 = 0;
double dP = 0;
double dsigmaZ = 0;
double partA;
double partB;
arma::vec result = arma::zeros<arma::vec>(4+num_Zcoef+1); // betaz, betay, P, sigmaZ;
for(int j = 1; j<=n; j++) // each sample
{
for(int i = 1; i<=m; i++) //each MC draw
{
u = ub(i-1, id(j-1)-1, 0); // random effect for Z
b = ub(i-1, id(j-1)-1, 1); // random effect for Y
gx = P * (delta(j-1) + u) + mu(j-1) + b + sigmaZ * pow(P, 2)/2; // g(x)
partB = -exp(gx)/(1+exp(gx));
dbetaz0 += (Y(j-1) + partB) * P;
for(int k = 1; k <= num_Zcoef; k++)
{
dbetaz1(k-1) += (Y(j-1) + partB) * P * X(j-1, k-1);
}
dbetay0 += Y(j-1) + partB;
dbetay1 += (Y(j-1) + partB) * X(j-1, 0);
dP += (Y(j-1) + partB) * (delta(j-1) + u + sigmaZ* P); // add random part here
dsigmaZ += (Y(j-1) + partB) * pow(P, 2)/2;
if(subid(j-1) < 3)
{
partA = (Z(j-1) - delta(j-1) - u - Y(j-1) * P *sigmaZ)/sigmaZ;
dbetaz0 += partA;
for(int k = 1; k <= num_Zcoef; k++)
{
dbetaz1(k-1) += partA * X(j-1, k-1);
}
dP += partA * Y(j-1) * sigmaZ;
dsigmaZ += -0.5/sigmaZ + pow(partA, 2)/2 + partA * Y(j-1)*P;
}
if(subid(j-1) == 2) // K
{
partB = exp(Z(j-1) * P + mu(j-1) + b)/(1 + exp(Z(j-1) * P + mu(j-1) + b));
dbetay0 += partB;
dbetay1 += partB * X(j-1,0);
dP += partB * Z(j-1);
}
}
result(0) -= dbetaz0/m;
for(int k = 1; k<= num_Zcoef; k++)
{
result(k) -= dbetaz1(k-1)/m;
}
result(num_Zcoef+1) -= dbetay0/m;
result(num_Zcoef+2) -= dbetay1/m;
result(num_Zcoef+3) -= dP/m;
result(num_Zcoef+4) -= dsigmaZ/m; //derivative of negative loglikelihood
}
return Rcpp::wrap(result);
'
rcppgroupdens_dev1_cpp <- cxxfunction(signature(P0 = "numeric", sigmaZ0 = "numeric", num_Zcoef0 = 'int',
Z0 = "numeric", Y0 = "numeric", X0 = "numeric", rand_matrix_posterior = "numeric",
delta_fix0 = "numeric", mu_fix0 = "numeric", id0 = "int", subid0 = "int"),
groupdens_dev1_cpp ,plugin="RcppArmadillo", verbose = TRUE)
rcppfulldevfun <- function(data, initparam, rand_matrix_posterior)
{
bZinit = initparam[1:(1+num_Zcoef)]
bYinit = initparam[(2+num_Zcoef):(2+num_Zcoef+num_Ycoef)]
Pinit = initparam[3+num_Zcoef+num_Ycoef]
SigmaZinit = initparam[4+num_Zcoef+num_Ycoef]
delta_fix = as.matrix(cbind(1, data[,2:(1+num_Zcoef)])) %*% bZinit
mu_fix = cbind(1, data$X2) %*% bYinit
X0 = data[, 2:(1+num_Zcoef)] %>% as.matrix()
dev = rcppgroupdens_dev1_cpp(P0 = P, sigmaZ0 = SigmaZ, Z0 = data$Z, Y0 = data$Y,
X0 = X0, rand_matrix_posterior = rand_matrix_posterior, delta_fix0 = delta_fix,
mu_fix0 = mu_fix, id0 = data$group, subid0 = data$subgroupid)
return(dev)
}
rcpploglikefun <- function(data, initparam, rand_matrix_posterior) #(check)
{
bZinit = initparam[1:(1+num_Zcoef)]
bYinit = initparam[(2+num_Zcoef):(2+num_Zcoef+num_Ycoef)]
Pinit = initparam[3+num_Zcoef+num_Ycoef]
SigmaZinit = initparam[4+num_Zcoef+num_Ycoef]
delta_fix = as.matrix(cbind(1, data[,2:(1+num_Zcoef)])) %*% bZinit
mu_fix = cbind(1, data$X2) %*% bYinit
r1 = rcppgroupdens_expectation_cpp(P0 = Pinit, sigmaZ0 = SigmaZinit, Z0 = data$Z, Y0 = data$Y,
rand_matrix_posterior = rand_matrix_posterior, delta_fix0 = delta_fix, mu_fix0 = mu_fix,
id0 = data$group, subid0 = data$subgroupid)
return(r1)
}
rcppupdatefix <- function(data, Bz_hat, By_hat, SigmaZ_hat, P_hat, SigmaUV_hat, maxiter, m)
{
rand_matrix <- rmvnorm(m * D, mean = rep(0, 2), sigma = SigmaUV_hat) %>% array(., dim = c(D, m, 2)) # 40 * 10000
delta_fix = as.matrix(cbind(1, data[, 2:(1+num_Zcoef)])) %*% Bz_hat
mu_fix = cbind(1, data$X2) %*% By_hat
# compute the posterior weight
logdens <- rcppgroupdens(m0 = m, D0 = D, P0 = P_hat, sigmaZ0 = SigmaZ_hat, Z0 = data$Z, Y0 = data$Y,
rand_matrix = rand_matrix, delta_fix0 = delta_fix, mu_fix0 = mu_fix,
id0 = data$group, subid0 = data$subgroupid)
quant_logdens = apply(logdens, MARGIN = 2, FUN = function(x) quantile(x, 0.7))
dens = sweep(logdens, MARGIN = 2, FUN = "-", STATS = quant_logdens) %>% exp()
dens[is.infinite(dens)] <- 0
weight = sweep(dens, STATS= colSums(dens), FUN = "/" , MARGIN = 2) # posterior weight
# next generate posterior samples from posterior distribution
posterior_ID <- sapply(1:D, FUN = function(x) base::sample(1:dim(weight)[1], dim(weight)[1], replace = TRUE, prob = weight[,x])) %>%
as.vector() %>% cbind(rep(1:D, each = dim(weight)[1]), .)
rand_matrix_posterior <- c(rand_matrix[,,1][posterior_ID], rand_matrix[,,2][posterior_ID]) %>% array(., dim = c(m, D, 2)) # 1000 * 40 * 2
initparam <- c(Bz_hat, By_hat, P_hat, SigmaZ_hat)
if(usedev == TRUE)
o1 <- optim(par = initparam, fn = rcpploglikefun, data = data, method = "BFGS", gr = rcppfulldevfun,
rand_matrix_posterior = rand_matrix_posterior, control = list(maxit = maxiter))
if(usedev == FALSE)
o1 <- optim(par = initparam, fn = rcpploglikefun, data = data,
rand_matrix_posterior = rand_matrix_posterior, control = list(maxit = maxiter))
return(o1$par) #(bz, by, P, sigmaZ)
}
SigmaUVweightavgcpp <- '
arma::mat xrand = Rcpp::as<arma::mat>(rand_origin);
arma::vec xweight = Rcpp::as<arma::vec>(weight);
int n = xweight.n_rows;
arma::mat result = arma::zeros<arma::mat>(2,2);
for(int i = 1; i<=2; i++)
for(int j = 1; j<=2; j++)
for(int k = 1; k<=n; k++)
result(i-1,j-1) += xrand(k-1,i-1) * xrand(k-1,j-1) * xweight(k-1);
return Rcpp::wrap(result);
'
rcppSigmaUVweightavg <- cxxfunction(signature(rand_origin="numeric", weight = "numeric"),
SigmaUVweightavgcpp,plugin="RcppArmadillo", verbose = TRUE)
# this function uses two rcpp functions:
rcppSigmaUVfun <- function(data, SigmaZ_hat, P_hat, Bz_hat, By_hat, SigmaUV_hat, m)
{
rand_origin = rmvnorm(n = m*D, mean = rep(0, 2), sigma = SigmaUV_hat)
rand_matrix = rand_origin %>% array(., dim = c(D, m, 2)) # D*m*2
delta_fix = as.matrix(cbind(1, data[, 2:(1+num_Zcoef)])) %*% Bz_hat
mu_fix = cbind(1, data$X2) %*% By_hat # now is a vector
rcppgroupdens(m0 = m, D0 = D, P0 = P, sigmaZ0 = SigmaZ_hat, Z0 = data$Z, Y0 = data$Y,
rand_matrix = rand_matrix, delta_fix0 = delta_fix,
mu_fix0 = mu_fix, id0 = data$group, subid0 = data$subgroupid) -> logdens # m * D
rm(rand_matrix)
quant_logdens = apply(logdens, MARGIN = 2, FUN = function(x) quantile(x, 0.7))
dens = sweep(logdens, MARGIN = 2, FUN = "-", STATS = quant_logdens) %>% exp()
dens[is.infinite(dens)] <- 0
weight = t(sweep(dens, STATS= colSums(dens), FUN = "/" , MARGIN = 2)/D) # D * m
return(rcppSigmaUVweightavg(rand_origin, as.vector(weight)))
}
# Ydraw n * 3 (first column denote the label 0 or 1, second column denote the Yid 1 or 2, third column is condition mean of Z)
YZdrawcpp <- '
arma::vec xdelta = Rcpp::as<arma::vec>(delta_new);
arma::vec xmu = Rcpp::as<arma::vec>(mu_new);
arma::mat xprob = Rcpp::as<arma::mat>(Yprob_new);
double xP = Rcpp::as<double>(P_hat);
double xsimgaZ = Rcpp::as<double>(SigmaZ_hat);
int n = xmu.n_rows;
arma::mat Ydraw = arma::zeros<arma::mat>(n,3);
arma::vec rand = Rcpp::as<arma::vec>(rand_vec);
for(int i = 1; i<=n; i++)
{
if(rand(i-1) < xprob(i-1, 1))
{
Ydraw(i-1,0) = 1;
Ydraw(i-1,1) = 2;
}
else
Ydraw(i-1,1) = 1;
Ydraw(i-1,2) = xdelta(i-1) + Ydraw(i-1,0) * xP * xsimgaZ;
}
return Rcpp::wrap(Ydraw);
'
rcppYZdraw <- cxxfunction(signature(delta_new = "numeric", mu_new = "numeric",
Yprob_new = "numeric", P_hat = "numeric", SigmaZ_hat = "numeric", rand_vec = "numeric"),
YZdrawcpp, plugin = "RcppArmadillo", verbose = TRUE) |
if(!exists("emission")){
emission <- readRDS("C:/Users/ehoff_000/Documents/GitHub/data_analysis_final_project/summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("C:/Users/ehoff_000/Documents/GitHub/data_analysis_final_project/Source_Classification_Code.rds")
}
library(ggplot2)
subset_emission <- emission[emission$fips=="24510", ]
total_year_type <- aggregate(Emissions ~ year + type, subset_emission, sum)
png("plot3.png", width=640, height=480)
g <- ggplot(total_year_type, aes(year, Emissions, color = type))
g <- g + geom_line() +
xlab("year") +
ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle('Total Emissions in Baltimore City, Maryland for 1999, 2002, 2005 and 2008')
print(g)
dev.off()
| /plot3.R | no_license | erichoffman1217/data_analysis_final_project | R | false | false | 758 | r | if(!exists("emission")){
emission <- readRDS("C:/Users/ehoff_000/Documents/GitHub/data_analysis_final_project/summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("C:/Users/ehoff_000/Documents/GitHub/data_analysis_final_project/Source_Classification_Code.rds")
}
library(ggplot2)
subset_emission <- emission[emission$fips=="24510", ]
total_year_type <- aggregate(Emissions ~ year + type, subset_emission, sum)
png("plot3.png", width=640, height=480)
g <- ggplot(total_year_type, aes(year, Emissions, color = type))
g <- g + geom_line() +
xlab("year") +
ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle('Total Emissions in Baltimore City, Maryland for 1999, 2002, 2005 and 2008')
print(g)
dev.off()
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(
echo = TRUE,
tidy.opts = list(width.cutoff = 65),
tidy = TRUE)
set.seed(12314159)
imageDirectory <- "./images/introduction"
path_concat <- function(path1, path2, sep="/") {paste(path1, path2, sep = sep)}
## ----library, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 4, out.width = "75%", warning=FALSE, message=FALSE----
# library(rasterly)
# library(data.table)
# library(lubridate)
# library(grid)
# library(plotly)
## ----data, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# # Load data
# ridesRaw_1 <- "https://raw.githubusercontent.com/plotly/datasets/master/uber-rides-data1.csv" %>%
# data.table::fread(stringsAsFactors = FALSE)
# ridesRaw_2 <- "https://raw.githubusercontent.com/plotly/datasets/master/uber-rides-data2.csv" %>%
# data.table::fread(stringsAsFactors = FALSE)
# ridesRaw_3 <- "https://raw.githubusercontent.com/plotly/datasets/master/uber-rides-data3.csv" %>%
# data.table::fread(stringsAsFactors = FALSE)
# ridesDf <- list(ridesRaw_1, ridesRaw_2, ridesRaw_3) %>%
# data.table::rbindlist()
#
# # Extract hour of trip taken
# time <- lubridate::ymd_hms(ridesDf$`Date/Time`)
# ridesDf <- ridesDf[, 'Date/Time':=NULL][, list(Lat,
# Lon,
# hour = lubridate::hour(time),
# month = lubridate::month(time),
# day = lubridate::day(time))]
## ----basic, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# start_time <- Sys.time()
# p <- ridesDf %>%
# rasterly(mapping = aes(x = Lat, y = Lon)) %>%
# rasterly_points()
# p
# end_time <- Sys.time()
# end_time - start_time
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "uberBasic.png"))
## ----image2Data---------------------------------------------------------------
image <- as.raster(matrix((1:4)/4, nrow = 2))
image
# mapping this image onto a 1 <= x <= 2 and 2 <= y <= 5 plane
rasterly::image2data(image, x_range = c(1,2), y_range = c(2,5))
## ----basic plot, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# imageData <- rasterly::image2data(p)
# # basic graphics
# # It is slow but still much faster than drawing the huge data directly)
# plot(x = imageData$x, y = imageData$y, col = imageData$color)
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "image2data.png"))
## ----subsetting, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# p["background"]
# # $rasterly_env
# # [1] "white"
#
# # $rasterlyPoints1
# # [1] "white"
# ########### Replace the background in child layer `rasterly_points()`
# p["background", level = 2] <- "black"
# p["background"]
# # $rasterly_env
# # [1] "white"
#
# # $rasterlyPoints1
# # [1] "black"
# ########## Colors in both `rasterly()` and `rasterly_points()` are replaced
# ## fire is a vector of colors (as character strings) with length 256
# ## see `rasterly::fire`
# p["color", level = 1:2] <- fire_map
# p
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "darkBg.png"))
## ----rasterly_build, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# build <- rasterly_build(p)
# str(build)
## ----add_rasterly_heatmap, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# plotly::plot_ly(ridesDf, x = ~Lat, y = ~Lon) %>%
# add_rasterly_heatmap() %>%
# layout(
# title = "Uber drives",
# xaxis = list(
# title = "Lat"
# ),
# yaxis = list(
# title = "Lon"
# )
# )
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "add_rasterizer.gif"))
## ----plotly_rasterly, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# # if as_image is FALSE, the image will be transformed to a data.frame
# plotRasterly(ridesDf,
# mapping = aes(x = Lat, y = Lon),
# as_image = TRUE)
## ----ggRasterly, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# ggRasterly(data = ridesDf,
# mapping = aes(x = Lat, y = Lon, color = hour),
# color = hourColors_map) +
# labs(title = "New York Uber",
# subtitle = "Apr to Sept, 2014",
# caption = "Data from https://raw.githubusercontent.com/plotly/datasets/master")
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "ggUber.png"))
## ----API, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# r <- rasterly(data = ridesDf,
# mapping = aes(x = Lat, y = Lon))
## ----set color, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# r %>%
# rasterly_points(
# mapping = aes(color = hour),
# color = hourColors_map,
# background = "black"
# ) -> g
# g
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "uberColor.png"))
## ----set color cover, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# r %>%
# rasterly_points(
# mapping = aes(color = hour),
# color = hourColors_map,
# background = "black",
# layout = "cover"
# )
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "uberColorCover.png"))
## ----set on, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# r %>%
# rasterly_points(
# # take the "mean" reduction function
# # more details are in section 'Reduction function'
# reduction_func = "mean",
# mapping = aes(on = -Lat)
# )
## ----set size, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# r %>%
# rasterly_points(
# mapping = aes(size = month),
# max_size = 4
# )
## ----reduction on mean, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# r %>%
# rasterly_points(
# reduction_func = "mean", # process the data points using the mean reduction function
# background = "black", # change background to "black" from right to left (from dark to light)
# color = fire_map # provide a custom color_map
# )
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "meanAgg.png"))
## ----reduction on any, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# r %>%
# rasterly_points(
# reduction_func = "any",
# color = c("white", "black")
# )
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "anyAgg.png"))
| /vignettes/introduction.R | no_license | donrv/rasterly | R | false | false | 8,387 | r | ## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(
echo = TRUE,
tidy.opts = list(width.cutoff = 65),
tidy = TRUE)
set.seed(12314159)
imageDirectory <- "./images/introduction"
path_concat <- function(path1, path2, sep="/") {paste(path1, path2, sep = sep)}
## ----library, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 4, out.width = "75%", warning=FALSE, message=FALSE----
# library(rasterly)
# library(data.table)
# library(lubridate)
# library(grid)
# library(plotly)
## ----data, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# # Load data
# ridesRaw_1 <- "https://raw.githubusercontent.com/plotly/datasets/master/uber-rides-data1.csv" %>%
# data.table::fread(stringsAsFactors = FALSE)
# ridesRaw_2 <- "https://raw.githubusercontent.com/plotly/datasets/master/uber-rides-data2.csv" %>%
# data.table::fread(stringsAsFactors = FALSE)
# ridesRaw_3 <- "https://raw.githubusercontent.com/plotly/datasets/master/uber-rides-data3.csv" %>%
# data.table::fread(stringsAsFactors = FALSE)
# ridesDf <- list(ridesRaw_1, ridesRaw_2, ridesRaw_3) %>%
# data.table::rbindlist()
#
# # Extract hour of trip taken
# time <- lubridate::ymd_hms(ridesDf$`Date/Time`)
# ridesDf <- ridesDf[, 'Date/Time':=NULL][, list(Lat,
# Lon,
# hour = lubridate::hour(time),
# month = lubridate::month(time),
# day = lubridate::day(time))]
## ----basic, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# start_time <- Sys.time()
# p <- ridesDf %>%
# rasterly(mapping = aes(x = Lat, y = Lon)) %>%
# rasterly_points()
# p
# end_time <- Sys.time()
# end_time - start_time
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "uberBasic.png"))
## ----image2Data---------------------------------------------------------------
image <- as.raster(matrix((1:4)/4, nrow = 2))
image
# mapping this image onto a 1 <= x <= 2 and 2 <= y <= 5 plane
rasterly::image2data(image, x_range = c(1,2), y_range = c(2,5))
## ----basic plot, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# imageData <- rasterly::image2data(p)
# # basic graphics
# # It is slow but still much faster than drawing the huge data directly)
# plot(x = imageData$x, y = imageData$y, col = imageData$color)
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "image2data.png"))
## ----subsetting, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# p["background"]
# # $rasterly_env
# # [1] "white"
#
# # $rasterlyPoints1
# # [1] "white"
# ########### Replace the background in child layer `rasterly_points()`
# p["background", level = 2] <- "black"
# p["background"]
# # $rasterly_env
# # [1] "white"
#
# # $rasterlyPoints1
# # [1] "black"
# ########## Colors in both `rasterly()` and `rasterly_points()` are replaced
# ## fire is a vector of colors (as character strings) with length 256
# ## see `rasterly::fire`
# p["color", level = 1:2] <- fire_map
# p
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "darkBg.png"))
## ----rasterly_build, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# build <- rasterly_build(p)
# str(build)
## ----add_rasterly_heatmap, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# plotly::plot_ly(ridesDf, x = ~Lat, y = ~Lon) %>%
# add_rasterly_heatmap() %>%
# layout(
# title = "Uber drives",
# xaxis = list(
# title = "Lat"
# ),
# yaxis = list(
# title = "Lon"
# )
# )
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "add_rasterizer.gif"))
## ----plotly_rasterly, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# # if as_image is FALSE, the image will be transformed to a data.frame
# plotRasterly(ridesDf,
# mapping = aes(x = Lat, y = Lon),
# as_image = TRUE)
## ----ggRasterly, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# ggRasterly(data = ridesDf,
# mapping = aes(x = Lat, y = Lon, color = hour),
# color = hourColors_map) +
# labs(title = "New York Uber",
# subtitle = "Apr to Sept, 2014",
# caption = "Data from https://raw.githubusercontent.com/plotly/datasets/master")
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "ggUber.png"))
## ----API, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# r <- rasterly(data = ridesDf,
# mapping = aes(x = Lat, y = Lon))
## ----set color, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# r %>%
# rasterly_points(
# mapping = aes(color = hour),
# color = hourColors_map,
# background = "black"
# ) -> g
# g
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "uberColor.png"))
## ----set color cover, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# r %>%
# rasterly_points(
# mapping = aes(color = hour),
# color = hourColors_map,
# background = "black",
# layout = "cover"
# )
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "uberColorCover.png"))
## ----set on, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# r %>%
# rasterly_points(
# # take the "mean" reduction function
# # more details are in section 'Reduction function'
# reduction_func = "mean",
# mapping = aes(on = -Lat)
# )
## ----set size, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# r %>%
# rasterly_points(
# mapping = aes(size = month),
# max_size = 4
# )
## ----reduction on mean, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# r %>%
# rasterly_points(
# reduction_func = "mean", # process the data points using the mean reduction function
# background = "black", # change background to "black" from right to left (from dark to light)
# color = fire_map # provide a custom color_map
# )
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "meanAgg.png"))
## ----reduction on any, eval = FALSE, echo = TRUE, fig.align="center", fig.width = 6, fig.height = 5, out.width = "75%", warning=FALSE, message=FALSE, tidy=FALSE----
# r %>%
# rasterly_points(
# reduction_func = "any",
# color = c("white", "black")
# )
## ---- out.width= "60%", fig.align="center", echo=FALSE------------------------
knitr::include_graphics(path_concat(imageDirectory, "anyAgg.png"))
|
# SPDX-Copyright: Copyright (c) Capital One Services, LLC
# SPDX-License-Identifier: Apache-2.0
# Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied.
#
# UNIT TEST: checkEmpty
#
# checkEmpty checks whether or not the input data frame is empty
# and stops with an error if it is
# Note that by empty, we mean no columns. Data frames with no columns
# fail, but data frames with columns but no rows pass
# loading testing library
library(testthat)
context('checkEmpty')
# Testing that Empty DFs give errors
test_that("Empty DFs give errors", {
# create empty dataframe
emptydf <- data.frame(Car = character(),
Date = as.Date(character()),
Model = character(),
stringsAsFactors = FALSE)
veryEmptydf <- data.frame()
# create populated dataframe
fulldf <- iris
# no error with cols
expect_silent( checkEmpty(emptydf))
# get no error with no empty df
expect_silent(checkEmpty(fulldf))
# get error with empty df
expect_error(checkEmpty(veryEmptydf))
})
| /dataCompareR/tests/testthat/testCheckEmpty.R | permissive | Lextuga007/dataCompareR | R | false | false | 1,501 | r | # SPDX-Copyright: Copyright (c) Capital One Services, LLC
# SPDX-License-Identifier: Apache-2.0
# Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied.
#
# UNIT TEST: checkEmpty
#
# checkEmpty checks whether or not the input data frame is empty
# and stops with an error if it is
# Note that by empty, we mean no columns. Data frames with no columns
# fail, but data frames with columns but no rows pass
# loading testing library
library(testthat)
context('checkEmpty')
# Testing that Empty DFs give errors
test_that("Empty DFs give errors", {
# create empty dataframe
emptydf <- data.frame(Car = character(),
Date = as.Date(character()),
Model = character(),
stringsAsFactors = FALSE)
veryEmptydf <- data.frame()
# create populated dataframe
fulldf <- iris
# no error with cols
expect_silent( checkEmpty(emptydf))
# get no error with no empty df
expect_silent(checkEmpty(fulldf))
# get error with empty df
expect_error(checkEmpty(veryEmptydf))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/samples.R
\name{fetch_samples.FacileDataSet}
\alias{fetch_samples.FacileDataSet}
\title{Fetches a sample descriptor that matches the filter criterion}
\usage{
\method{fetch_samples}{FacileDataSet}(x, samples = NULL, assay = "rnaseq", ...)
}
\arguments{
\item{x}{A \code{FacileDataRepository}}
\item{...}{the NSE boolean filter criteria}
}
\value{
a facile sample descriptor
}
\description{
Use \code{...} as if this is a dplyr::filter call, and our
sample_covariate_tbl was "wide".
}
\details{
This is experimental, so each "term" in the filter criteria should be
just one boolean operation. Multiple terms passed into \code{...} will be
"AND"ed together.
}
\seealso{
Other API:
\code{\link{fetch_assay_score.FacileDataSet}()},
\code{\link{fetch_custom_sample_covariates.FacileDataSet}()},
\code{\link{fetch_sample_covariates}()},
\code{\link{fetch_sample_statistics.FacileDataSet}()},
\code{\link{filter_features.FacileDataSet}()},
\code{\link{filter_samples.FacileDataSet}()},
\code{\link{organism.FacileDataSet}()},
\code{\link{samples.FacileDataSet}()}
}
\concept{API}
| /man/fetch_samples.FacileDataSet.Rd | permissive | jonocarroll/FacileData | R | false | true | 1,153 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/samples.R
\name{fetch_samples.FacileDataSet}
\alias{fetch_samples.FacileDataSet}
\title{Fetches a sample descriptor that matches the filter criterion}
\usage{
\method{fetch_samples}{FacileDataSet}(x, samples = NULL, assay = "rnaseq", ...)
}
\arguments{
\item{x}{A \code{FacileDataRepository}}
\item{...}{the NSE boolean filter criteria}
}
\value{
a facile sample descriptor
}
\description{
Use \code{...} as if this is a dplyr::filter call, and our
sample_covariate_tbl was "wide".
}
\details{
This is experimental, so each "term" in the filter criteria should be
just one boolean operation. Multiple terms passed into \code{...} will be
"AND"ed together.
}
\seealso{
Other API:
\code{\link{fetch_assay_score.FacileDataSet}()},
\code{\link{fetch_custom_sample_covariates.FacileDataSet}()},
\code{\link{fetch_sample_covariates}()},
\code{\link{fetch_sample_statistics.FacileDataSet}()},
\code{\link{filter_features.FacileDataSet}()},
\code{\link{filter_samples.FacileDataSet}()},
\code{\link{organism.FacileDataSet}()},
\code{\link{samples.FacileDataSet}()}
}
\concept{API}
|
library(tidyverse)
library(ggtext)
library(USAboundaries)
library(sf)
library(patchwork)
library(nationalparkcolors)
library(extrafont)
fonttable()
pal <- park_palette("Denali")
#text_col <- pal[3]
background <- pal[2]
fill_col <- "#ed1944"
line_col <- pal[1]
measles <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-02-25/measles.csv')
county_df <- us_counties(resolution = "high") %>%
rename(state = state_name,
county = name)
measles_sum <- measles %>%
filter(mmr > 0) %>%
group_by(state, county) %>%
summarize(mmr = mean(mmr, na.rm = TRUE)) %>%
ungroup() %>%
group_by(state) %>%
mutate(min_county = if_else(is.element(mmr, min(mmr)), "Y", NA_character_)) %>%
ungroup() %>%
left_join(x = county_df %>%
filter(is.element(state, .$state),
str_detect(state, "Alaska|Haw|Island|Puerto|Samoa|Guam|District", negate = TRUE)),
y = .,
by = c("state", "county"))
use_states <- measles_sum %>%
filter(min_county == "Y") %>%
distinct(state, .keep_all = TRUE) %>%
mutate(title = paste0("<span style='font-size:16pt'>", state, "</span><br>", "<span style='font-size:11pt'>", county, ": ", round(mmr, digits = 1), "%</span>")) %>%
select(state, title)
states <- use_states$state
titles <- use_states$title
p <- tibble(
state = states,
plots = purrr::map2(states, titles,
function(x, y) {
ggplot() +
geom_sf(data = measles_sum %>%
filter(state == x),
aes(fill = min_county), color = line_col) +
theme_void() +
labs(
title = y
) +
coord_sf() +
scale_fill_manual(
values = c("Y" = fill_col),
na.value = background
) +
theme(
line = element_blank(),
text = element_text(family = "Bahnschrift"),
legend.position = "none",
plot.title = element_markdown(hjust = .5)
)
})
) %>%
arrange(state)
wrap_plots(p$plots) +
plot_annotation(title = "Which Counties have the <span style='color:#ed1944'><b>Lowest</b></span> MMR Vaccination Rates?",
subtitle = "Data shows the average measles, mumps, and rubella (mmr) vaccination rate for schools in 18 states<br>during the 2017-2018 or 2018-2019 school year",
caption = "Data: The Wall Street Journal | Viz: Eric Ekholm (@ekholm_e)",
theme = theme(
text = element_text(family = "Bahnschrift"),
plot.title = element_markdown(size = 22, hjust = .5),
plot.subtitle = element_markdown(size = 12, hjust = .5, face = "italic"),
plot.margin = margin(.5, .5, .5, .5, unit = "cm")
))
ggsave(here::here("2020 - 9 - measles/measles_plot.jpeg"), device = "jpeg", width = 10, height = 8.5)
| /2020 - 9 - measles/measles script.R | no_license | ekholme/TidyTuesday | R | false | false | 3,172 | r |
library(tidyverse)
library(ggtext)
library(USAboundaries)
library(sf)
library(patchwork)
library(nationalparkcolors)
library(extrafont)
fonttable()
pal <- park_palette("Denali")
#text_col <- pal[3]
background <- pal[2]
fill_col <- "#ed1944"
line_col <- pal[1]
measles <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-02-25/measles.csv')
county_df <- us_counties(resolution = "high") %>%
rename(state = state_name,
county = name)
measles_sum <- measles %>%
filter(mmr > 0) %>%
group_by(state, county) %>%
summarize(mmr = mean(mmr, na.rm = TRUE)) %>%
ungroup() %>%
group_by(state) %>%
mutate(min_county = if_else(is.element(mmr, min(mmr)), "Y", NA_character_)) %>%
ungroup() %>%
left_join(x = county_df %>%
filter(is.element(state, .$state),
str_detect(state, "Alaska|Haw|Island|Puerto|Samoa|Guam|District", negate = TRUE)),
y = .,
by = c("state", "county"))
use_states <- measles_sum %>%
filter(min_county == "Y") %>%
distinct(state, .keep_all = TRUE) %>%
mutate(title = paste0("<span style='font-size:16pt'>", state, "</span><br>", "<span style='font-size:11pt'>", county, ": ", round(mmr, digits = 1), "%</span>")) %>%
select(state, title)
states <- use_states$state
titles <- use_states$title
p <- tibble(
state = states,
plots = purrr::map2(states, titles,
function(x, y) {
ggplot() +
geom_sf(data = measles_sum %>%
filter(state == x),
aes(fill = min_county), color = line_col) +
theme_void() +
labs(
title = y
) +
coord_sf() +
scale_fill_manual(
values = c("Y" = fill_col),
na.value = background
) +
theme(
line = element_blank(),
text = element_text(family = "Bahnschrift"),
legend.position = "none",
plot.title = element_markdown(hjust = .5)
)
})
) %>%
arrange(state)
wrap_plots(p$plots) +
plot_annotation(title = "Which Counties have the <span style='color:#ed1944'><b>Lowest</b></span> MMR Vaccination Rates?",
subtitle = "Data shows the average measles, mumps, and rubella (mmr) vaccination rate for schools in 18 states<br>during the 2017-2018 or 2018-2019 school year",
caption = "Data: The Wall Street Journal | Viz: Eric Ekholm (@ekholm_e)",
theme = theme(
text = element_text(family = "Bahnschrift"),
plot.title = element_markdown(size = 22, hjust = .5),
plot.subtitle = element_markdown(size = 12, hjust = .5, face = "italic"),
plot.margin = margin(.5, .5, .5, .5, unit = "cm")
))
ggsave(here::here("2020 - 9 - measles/measles_plot.jpeg"), device = "jpeg", width = 10, height = 8.5)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shifting_plots.R
\name{plot_shift_rank}
\alias{plot_shift_rank}
\title{Shifting Rank Plot}
\usage{
plot_shift_rank(
experiment,
samples = "all",
score_order = "descending",
ncol = 3
)
}
\arguments{
\item{experiment}{TSRexploreR object.}
\item{samples}{A vector of sample names to analyze.}
\item{score_order}{Either descending or ascending}
\item{ncol}{Integer specifying the number of columns to arrange multiple plots.}
}
\description{
Shifting Rank Plot
}
| /man/plot_shift_rank.Rd | no_license | jtourig/TSRexploreR | R | false | true | 548 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shifting_plots.R
\name{plot_shift_rank}
\alias{plot_shift_rank}
\title{Shifting Rank Plot}
\usage{
plot_shift_rank(
experiment,
samples = "all",
score_order = "descending",
ncol = 3
)
}
\arguments{
\item{experiment}{TSRexploreR object.}
\item{samples}{A vector of sample names to analyze.}
\item{score_order}{Either descending or ascending}
\item{ncol}{Integer specifying the number of columns to arrange multiple plots.}
}
\description{
Shifting Rank Plot
}
|
#' Create caterpillar plots from rstan's stanfit objects
#'
#' @param obj a \code{stanfit} object
#' @param pars scalar or vector regular expressions for parameter
#' labels that you would like to plot as declared in \code{model_code} from the
#' \code{\link{stan}} call.
#' @param pars_labels vector of parameter labels for the output plot. Important:
#' they must be in the same order as in the \code{stanfit} object when
#' \code{as.data.frame(obj)} is called.
#' @param hpd logical. If \code{TRUE} then the 90\% and 95\% highest probability
#' density intervals (HDI) are found. If \code{FALSE} then the corresponding
#' central intervals are found.
#' @param order_medians logical. Whether or not to order the points by their
#' medians.
#' @param horizontal logical. Whether or not you would like the lines to be
#' horizontal
#' @param alpha_bounds numeric. Alpha transparency value for the uncertainty
#' bounds.
#'
#' @details Points plot the simulations' medians, thin lines represent the 95%
#' HPD/central intervals, and thick lines represent the 90% HPD/central
#' intervals.
#'
#' @examples
#' \dontrun{
#' # Create Stan model
#' library(rstan)
#' scode <- "
#' parameters {
#' real y[2];
#' }
#' model {
#' y[1] ~ normal(0, 1);
#' y[2] ~ double_exponential(0, 2);
#' }
#' "
#'
#' # Run
#' fit <- stan(model_code = scode, iter = 10000, verbose = FALSE)
#'
#' # Plot y[1] and y[2] parameters
#' stan_caterpillar(fit, pars = 'y\\[.*\\]')
#' }
#'
#' @seealso \link{rstan}, \code{\link{stan}}, \code{ggmcmc}
#'
#' @import rstan
#' @importFrom tidyr gather
#' @importFrom dplyr group_by summarise inner_join %>%
#' @import ggplot2
#' @importFrom stats median quantile reorder
#'
#' @export
stan_caterpillar <- function(obj,
pars,
pars_labels = NULL,
hpd = TRUE,
order_medians = TRUE,
horizontal = TRUE,
alpha_bounds = 0.3)
{
variable <- value <- NULL
# Extract all simulations
sims <- as.data.frame(obj)
# Extract only desired parameters
names <- names(sims)
sims_subset <- sims[, names %in% grep(pattern = pars, x = names,
value = TRUE)] %>% data.frame()
if (ncol(sims_subset) == 0) {
stop("No parameters selected. \n", call. = FALSE)
}
# Gather for plotting
gathered <- gather(sims_subset, variable, value)
# Add labels
if (!is.null(pars_labels)) {
message("\nEnsure that your parameter labels are in the same order as the parameters.\n")
if (length(pars_labels) !=
length(unique(gathered$variable))) {
stop("pars_labels must equal the number of plotted parameters.",
call. = FALSE)
}
gathered$variable <- factor(gathered$variable,
labels = pars_labels)
}
if (isTRUE(hpd)) {
gathered <- group_by(gathered, variable)
lower95 <- summarise(gathered, HPD(value, prob = 0.95, side = 'lower'))
lower90 <- summarise(gathered, HPD(value, prob = 0.9, side = 'lower'))
upper90 <- summarise(gathered, HPD(value, prob = 0.9, side = 'upper'))
upper95 <- summarise(gathered, HPD(value, prob = 0.95, side = 'upper'))
}
else if (!isTRUE(hpd)){
# Find central interval
gathered <- group_by(gathered, variable)
lower95 <- summarise(gathered, quantile(value, 0.025))
lower90 <- summarise(gathered, quantile(value, 0.05))
upper90 <- summarise(gathered, quantile(value, 0.95))
upper95 <- summarise(gathered, quantile(value, 0.975))
}
# Find medians
medians <- summarise(gathered, median(value))
# Merge
comb <- suppressMessages(inner_join(lower95, lower90))
comb <- suppressMessages(inner_join(comb, medians))
comb <- suppressMessages(inner_join(comb, upper90))
comb <- suppressMessages(inner_join(comb, upper95))
names(comb) <- c('pars', 'lower95', 'lower90', 'medians', 'upper90',
'upper95')
# Plot
if (isTRUE(order_medians)){
pp <- ggplot(comb, aes(x = medians, y = reorder(pars, medians),
xmin = lower95,
xmax = upper95)) +
geom_point(size = 3) +
geom_segment(aes(x = lower95, xend = upper95,
yend = reorder(pars, medians)), size = 0.5,
alpha = alpha_bounds) +
geom_segment(aes(x = lower90, xend = upper90,
yend = reorder(pars, medians)), size = 1.5,
alpha = alpha_bounds) +
xlab('') + ylab('') +
theme_bw()
}
else {
pp <- ggplot(comb, aes(x = medians, y = pars,
xmin = lower95,
xmax = upper95)) +
geom_point(size = 3) +
geom_segment(aes(x = lower95, xend = upper95, yend = pars),
size = 0.5, alpha = alpha_bounds) +
geom_segment(aes(x = lower90, xend = upper90,
yend = pars), size = 1.5, alpha = alpha_bounds) +
xlab('') + ylab('') +
theme_bw()
}
if (!isTRUE(horizontal)) pp <- pp + coord_flip()
return(pp)
}
| /R/stan_caterpillar.R | no_license | christophergandrud/StanCat | R | false | false | 5,448 | r | #' Create caterpillar plots from rstan's stanfit objects
#'
#' @param obj a \code{stanfit} object
#' @param pars scalar or vector regular expressions for parameter
#' labels that you would like to plot as declared in \code{model_code} from the
#' \code{\link{stan}} call.
#' @param pars_labels vector of parameter labels for the output plot. Important:
#' they must be in the same order as in the \code{stanfit} object when
#' \code{as.data.frame(obj)} is called.
#' @param hpd logical. If \code{TRUE} then the 90\% and 95\% highest probability
#' density intervals (HDI) are found. If \code{FALSE} then the corresponding
#' central intervals are found.
#' @param order_medians logical. Whether or not to order the points by their
#' medians.
#' @param horizontal logical. Whether or not you would like the lines to be
#' horizontal
#' @param alpha_bounds numeric. Alpha transparency value for the uncertainty
#' bounds.
#'
#' @details Points plot the simulations' medians, thin lines represent the 95%
#' HPD/central intervals, and thick lines represent the 90% HPD/central
#' intervals.
#'
#' @examples
#' \dontrun{
#' # Create Stan model
#' library(rstan)
#' scode <- "
#' parameters {
#' real y[2];
#' }
#' model {
#' y[1] ~ normal(0, 1);
#' y[2] ~ double_exponential(0, 2);
#' }
#' "
#'
#' # Run
#' fit <- stan(model_code = scode, iter = 10000, verbose = FALSE)
#'
#' # Plot y[1] and y[2] parameters
#' stan_caterpillar(fit, pars = 'y\\[.*\\]')
#' }
#'
#' @seealso \link{rstan}, \code{\link{stan}}, \code{ggmcmc}
#'
#' @import rstan
#' @importFrom tidyr gather
#' @importFrom dplyr group_by summarise inner_join %>%
#' @import ggplot2
#' @importFrom stats median quantile reorder
#'
#' @export
stan_caterpillar <- function(obj,
pars,
pars_labels = NULL,
hpd = TRUE,
order_medians = TRUE,
horizontal = TRUE,
alpha_bounds = 0.3)
{
variable <- value <- NULL
# Extract all simulations
sims <- as.data.frame(obj)
# Extract only desired parameters
names <- names(sims)
sims_subset <- sims[, names %in% grep(pattern = pars, x = names,
value = TRUE)] %>% data.frame()
if (ncol(sims_subset) == 0) {
stop("No parameters selected. \n", call. = FALSE)
}
# Gather for plotting
gathered <- gather(sims_subset, variable, value)
# Add labels
if (!is.null(pars_labels)) {
message("\nEnsure that your parameter labels are in the same order as the parameters.\n")
if (length(pars_labels) !=
length(unique(gathered$variable))) {
stop("pars_labels must equal the number of plotted parameters.",
call. = FALSE)
}
gathered$variable <- factor(gathered$variable,
labels = pars_labels)
}
if (isTRUE(hpd)) {
gathered <- group_by(gathered, variable)
lower95 <- summarise(gathered, HPD(value, prob = 0.95, side = 'lower'))
lower90 <- summarise(gathered, HPD(value, prob = 0.9, side = 'lower'))
upper90 <- summarise(gathered, HPD(value, prob = 0.9, side = 'upper'))
upper95 <- summarise(gathered, HPD(value, prob = 0.95, side = 'upper'))
}
else if (!isTRUE(hpd)){
# Find central interval
gathered <- group_by(gathered, variable)
lower95 <- summarise(gathered, quantile(value, 0.025))
lower90 <- summarise(gathered, quantile(value, 0.05))
upper90 <- summarise(gathered, quantile(value, 0.95))
upper95 <- summarise(gathered, quantile(value, 0.975))
}
# Find medians
medians <- summarise(gathered, median(value))
# Merge
comb <- suppressMessages(inner_join(lower95, lower90))
comb <- suppressMessages(inner_join(comb, medians))
comb <- suppressMessages(inner_join(comb, upper90))
comb <- suppressMessages(inner_join(comb, upper95))
names(comb) <- c('pars', 'lower95', 'lower90', 'medians', 'upper90',
'upper95')
# Plot
if (isTRUE(order_medians)){
pp <- ggplot(comb, aes(x = medians, y = reorder(pars, medians),
xmin = lower95,
xmax = upper95)) +
geom_point(size = 3) +
geom_segment(aes(x = lower95, xend = upper95,
yend = reorder(pars, medians)), size = 0.5,
alpha = alpha_bounds) +
geom_segment(aes(x = lower90, xend = upper90,
yend = reorder(pars, medians)), size = 1.5,
alpha = alpha_bounds) +
xlab('') + ylab('') +
theme_bw()
}
else {
pp <- ggplot(comb, aes(x = medians, y = pars,
xmin = lower95,
xmax = upper95)) +
geom_point(size = 3) +
geom_segment(aes(x = lower95, xend = upper95, yend = pars),
size = 0.5, alpha = alpha_bounds) +
geom_segment(aes(x = lower90, xend = upper90,
yend = pars), size = 1.5, alpha = alpha_bounds) +
xlab('') + ylab('') +
theme_bw()
}
if (!isTRUE(horizontal)) pp <- pp + coord_flip()
return(pp)
}
|
# Define variables
tweet_search <- totaltweet %>% dplyr::mutate(rowIndex=
as.numeric(row.names(.))) %>%
dplyr::select(retweetCount,rowIndex,hashtag)
tweet_search$text <- tottxt
docList <- as.list(tweet_search$text)
N.docs <- length(docList)
# Function for searching documents for where words are used in Tweets
QrySearch <- function(queryTerm) {
my.docs <- VectorSource(c(docList, queryTerm))
my.corpus <- VCorpus(my.docs) %>%
tm_map(stemDocument) %>%
tm_map(removeNumbers) %>%
tm_map(content_transformer(tolower)) %>%
tm_map(removeWords,stopwords("en")) %>%
tm_map(stripWhitespace)
term.doc.matrix.stm <- TermDocumentMatrix(my.corpus,
control=list(weighting=function(x) weightSMART(x,spec="ltc"),
wordLengths=c(1,Inf)))
term.doc.matrix <- tidy(term.doc.matrix.stm) %>%
dplyr::group_by(document) %>%
dplyr::mutate(vtrLen=sqrt(sum(count^2))) %>%
dplyr::mutate(count=count/vtrLen) %>%
ungroup() %>%
dplyr::select(term:count)
docMatrix <- term.doc.matrix %>%
dplyr::mutate(document=as.numeric(document)) %>%
dplyr::filter(document<N.docs+1)
qryMatrix <- term.doc.matrix %>%
dplyr::mutate(document=as.numeric(document)) %>%
dplyr::filter(document>=N.docs+1)
searchRes <- docMatrix %>%
inner_join(qryMatrix,by=c("term"="term"),
suffix=c(".doc",".query")) %>%
dplyr::mutate(termScore=round(count.doc*count.query,4)) %>%
dplyr::group_by(document.query,document.doc) %>%
dplyr::summarise(Score=sum(termScore)) %>%
filter(row_number(desc(Score))<=10) %>%
dplyr::arrange(desc(Score)) %>%
left_join(tweet_search,by=c("document.doc"="rowIndex")) %>%
ungroup() %>%
rename(Result=text) %>%
dplyr::select(Result,Score,retweetCount,hashtag) %>%
data.frame()
return(searchRes)
} | /Analysis/Search.R | permissive | kylehommes/TwitterMentalHealth | R | false | false | 1,817 | r | # Define variables
tweet_search <- totaltweet %>% dplyr::mutate(rowIndex=
as.numeric(row.names(.))) %>%
dplyr::select(retweetCount,rowIndex,hashtag)
tweet_search$text <- tottxt
docList <- as.list(tweet_search$text)
N.docs <- length(docList)
# Function for searching documents for where words are used in Tweets
QrySearch <- function(queryTerm) {
my.docs <- VectorSource(c(docList, queryTerm))
my.corpus <- VCorpus(my.docs) %>%
tm_map(stemDocument) %>%
tm_map(removeNumbers) %>%
tm_map(content_transformer(tolower)) %>%
tm_map(removeWords,stopwords("en")) %>%
tm_map(stripWhitespace)
term.doc.matrix.stm <- TermDocumentMatrix(my.corpus,
control=list(weighting=function(x) weightSMART(x,spec="ltc"),
wordLengths=c(1,Inf)))
term.doc.matrix <- tidy(term.doc.matrix.stm) %>%
dplyr::group_by(document) %>%
dplyr::mutate(vtrLen=sqrt(sum(count^2))) %>%
dplyr::mutate(count=count/vtrLen) %>%
ungroup() %>%
dplyr::select(term:count)
docMatrix <- term.doc.matrix %>%
dplyr::mutate(document=as.numeric(document)) %>%
dplyr::filter(document<N.docs+1)
qryMatrix <- term.doc.matrix %>%
dplyr::mutate(document=as.numeric(document)) %>%
dplyr::filter(document>=N.docs+1)
searchRes <- docMatrix %>%
inner_join(qryMatrix,by=c("term"="term"),
suffix=c(".doc",".query")) %>%
dplyr::mutate(termScore=round(count.doc*count.query,4)) %>%
dplyr::group_by(document.query,document.doc) %>%
dplyr::summarise(Score=sum(termScore)) %>%
filter(row_number(desc(Score))<=10) %>%
dplyr::arrange(desc(Score)) %>%
left_join(tweet_search,by=c("document.doc"="rowIndex")) %>%
ungroup() %>%
rename(Result=text) %>%
dplyr::select(Result,Score,retweetCount,hashtag) %>%
data.frame()
return(searchRes)
} |
text_color_ <- c(
theme_colors,
`black-50` = "black-50",
`white-50` = "white-50",
white = "white",
muted = "muted",
body = "body",
reset = "reset"
)
text_color <- function(color) {
pick(color, from = text_color_)
}
text_align_ <- c(
left = "left",
right = "right",
center = "center"
)
text_align <- function(align) {
responsive(pick(align, from = text_align_))
}
text_spacing_ <- c(
sm = "sm",
small = "sm",
md = "md",
medium = "md",
lg = "lg",
large = "lg"
)
# it's line height
text_spacing <- function(spacing) {
compose("height", pick(spacing, from = text_spacing_))
}
text_decoration_ <- c(
none = "none",
underline = "underline",
strikethrough = "strikethrough"
)
text_decoration <- function(decoration) {
compose("decoration", pick(decoration, from = text_decoration_))
}
text_wrap <- function(wrap) {
if (is.null(wrap)) {
return(NULL)
}
ifelse(wrap, "wrap", "nowrap")
}
text_select_ <- c(
all = "all",
none = "none"
)
text_select <- function(select) {
compose("select", pick(select, from = text_select_))
}
#' Text
#'
#' The `text()` function adjusts the text color, alignment, line spacing, line
#' wrapping, line height, and decoration of a tag element.
#'
#' @inheritParams background
#'
#' @param color One of `r rd_list(names(text_color_))` specifying the text
#' color, defaults to `NULL`, in which case the argument is ignored.
#'
#' @param align One of `r rd_list(names(text_align_))` specifying the alignment
#' of the text within the element, defaults to `NULL`, in which case the
#' argument is ignored.
#'
#' @param spacing One of `r rd_list(names(text_spacing_))` specifying the text
#' line spacing, defaults to `NULL`, in which case the argument is ignored.
#'
#' @param decoration One of `r rd_list(names(text_decoration_))` specifying how
#' the text is decorated, defaults to `NULL`, in which case the argument is
#' ignored.
#'
#' @param wrap One of `TRUE` or `FALSE` specifying if an element's text should
#' wrap onto new lines, defaults to `NULL`, in which case the argument
#' is ignored.
#'
#' @param select One of `r rd_list(text_select_)` specifying how the element's
#' text is selected when the user clicks on the element, defaults to `NULL`,
#' in which case the argument is ignored.
#'
#' @includeRmd man/roxygen/text.Rmd
#'
#' @export
#' @examples
#'
#' library(htmltools)
#'
#' div(
#' .style %>%
#' text(spacing = "small"),
#' "Nam vestibulum accumsan nisl.",
#' "Fusce commodo."
#' )
#'
#' div(
#' .style %>%
#' text(spacing = "large"),
#' "Suspendisse potenti.",
#' "Pellentesque tristique imperdiet tortor."
#' )
#'
#' tags$button(
#' .style %>%
#' text(wrap = FALSE),
#' "Aliquam feugiat tellus ut neque."
#' )
#'
text <- function(x, color = NULL, align = NULL, spacing = NULL,
decoration = NULL, wrap = NULL, select = NULL) {
assert_subject(x)
classes <- prefix(
"text",
text_color(color),
text_align(align),
text_spacing(spacing),
text_decoration(decoration),
text_wrap(wrap),
text_select(select)
)
add_class(x, classes)
}
| /R/text.R | no_license | cran/cascadess | R | false | false | 3,146 | r | text_color_ <- c(
theme_colors,
`black-50` = "black-50",
`white-50` = "white-50",
white = "white",
muted = "muted",
body = "body",
reset = "reset"
)
text_color <- function(color) {
pick(color, from = text_color_)
}
text_align_ <- c(
left = "left",
right = "right",
center = "center"
)
text_align <- function(align) {
responsive(pick(align, from = text_align_))
}
text_spacing_ <- c(
sm = "sm",
small = "sm",
md = "md",
medium = "md",
lg = "lg",
large = "lg"
)
# it's line height
text_spacing <- function(spacing) {
compose("height", pick(spacing, from = text_spacing_))
}
text_decoration_ <- c(
none = "none",
underline = "underline",
strikethrough = "strikethrough"
)
text_decoration <- function(decoration) {
compose("decoration", pick(decoration, from = text_decoration_))
}
text_wrap <- function(wrap) {
if (is.null(wrap)) {
return(NULL)
}
ifelse(wrap, "wrap", "nowrap")
}
text_select_ <- c(
all = "all",
none = "none"
)
text_select <- function(select) {
compose("select", pick(select, from = text_select_))
}
#' Text
#'
#' The `text()` function adjusts the text color, alignment, line spacing, line
#' wrapping, line height, and decoration of a tag element.
#'
#' @inheritParams background
#'
#' @param color One of `r rd_list(names(text_color_))` specifying the text
#' color, defaults to `NULL`, in which case the argument is ignored.
#'
#' @param align One of `r rd_list(names(text_align_))` specifying the alignment
#' of the text within the element, defaults to `NULL`, in which case the
#' argument is ignored.
#'
#' @param spacing One of `r rd_list(names(text_spacing_))` specifying the text
#' line spacing, defaults to `NULL`, in which case the argument is ignored.
#'
#' @param decoration One of `r rd_list(names(text_decoration_))` specifying how
#' the text is decorated, defaults to `NULL`, in which case the argument is
#' ignored.
#'
#' @param wrap One of `TRUE` or `FALSE` specifying if an element's text should
#' wrap onto new lines, defaults to `NULL`, in which case the argument
#' is ignored.
#'
#' @param select One of `r rd_list(text_select_)` specifying how the element's
#' text is selected when the user clicks on the element, defaults to `NULL`,
#' in which case the argument is ignored.
#'
#' @includeRmd man/roxygen/text.Rmd
#'
#' @export
#' @examples
#'
#' library(htmltools)
#'
#' div(
#' .style %>%
#' text(spacing = "small"),
#' "Nam vestibulum accumsan nisl.",
#' "Fusce commodo."
#' )
#'
#' div(
#' .style %>%
#' text(spacing = "large"),
#' "Suspendisse potenti.",
#' "Pellentesque tristique imperdiet tortor."
#' )
#'
#' tags$button(
#' .style %>%
#' text(wrap = FALSE),
#' "Aliquam feugiat tellus ut neque."
#' )
#'
text <- function(x, color = NULL, align = NULL, spacing = NULL,
decoration = NULL, wrap = NULL, select = NULL) {
assert_subject(x)
classes <- prefix(
"text",
text_color(color),
text_align(align),
text_spacing(spacing),
text_decoration(decoration),
text_wrap(wrap),
text_select(select)
)
add_class(x, classes)
}
|
library(twitteR)
library(ROAuth)
api_keys <- read.csv2("api_key.csv", header = TRUE, sep = ";", stringsAsFactors = FALSE)
setup_twitter_oauth(
consumer_key = api_keys$consumer_key,
consumer_secret = api_keys$consumer_secret,
access_token = api_keys$access_token,
access_secret = api_keys$access_token_secret )
| /RScripts/authentification.R | no_license | kimbox12/fimecho | R | false | false | 319 | r | library(twitteR)
library(ROAuth)
api_keys <- read.csv2("api_key.csv", header = TRUE, sep = ";", stringsAsFactors = FALSE)
setup_twitter_oauth(
consumer_key = api_keys$consumer_key,
consumer_secret = api_keys$consumer_secret,
access_token = api_keys$access_token,
access_secret = api_keys$access_token_secret )
|
## class defined in aaaClasses for inheritance
## constructor
setMethod(".gedit",
signature(toolkit="guiWidgetsToolkitRGtk2"),
function(toolkit,
text="", width=25,
coerce.with = NULL,
initial.msg = "",
handler=NULL, action=NULL,
container=NULL,
...
) {
force(toolkit)
entry <- gtkEntryNew()
obj <- as.gWidgetsRGtk2(entry)
tag(obj, "coerce.with") <- coerce.with
## this adds completion fields to this widget. To *add* to the list
## of values that can be completed use gEditobject[]<- values
## entry$setMaxLength(max(width,length(unlist(strsplit(text,"")))))
svalue(obj) <- text
tag(obj,"completion") <- NULL # a completion object if set via [<-
## process initial message if applicable
tag(obj, "init_msg_flag") <- FALSE
tag(obj, "init_msg") <- initial.msg
if(nchar(text) == 0 && nchar(initial.msg) > 0) {
entry$modifyText(GtkStateType[1], "gray")
entry$setText(initial.msg)
id <- gSignalConnect(entry, "focus-in-event", function(...) {
entry$setText("")
entry$modifyText(GtkStateType[1], "black")
gSignalHandlerDisconnect(entry,id)
tag(obj, "init_msg_flag") <- FALSE
})
tag(obj, "init_msg_flag") <- TRUE
tag(obj, "init_msg_id") <- id
}
## width -- ths sets minimum -- it ay expand to fill space
if(!is.null(width))
entry$setWidthChars(as.numeric(width))
if (!is.null(container)) {
if(is.logical(container) && container == TRUE)
container <- gwindow()
add(container, obj,...)
}
if (!is.null(handler))
tag(obj, "handler.id") <- addhandlerchanged(obj,handler,action)
invisible(obj)
})
as.gWidgetsRGtk2.GtkEntry <- function(widget, ...) {
obj = new("gEditRGtk",block=widget, widget=widget,
toolkit=guiToolkit("RGtk2"))
return(obj)
}
## code to add completion to the entry
## only do so if set via [<-
.setCompletion <- function(obj,...) {
completion = gtkEntryCompletionNew()
## set model
## this caps out at 1000 -- is this a speed issue?
model <- rGtkDataFrame(data.frame(character(1000),stringsAsFactors=FALSE))
completion$SetModel(model)
completion$SetTextColumn(0) # Columns count from 0 -- not 1
## set properties
gtktry({completion['inline-completion'] <- TRUE}, silent = TRUE)
gtktry({completion['inline-selection'] <- TRUE}, silent = TRUE)
## set completion
tag(obj,"completion") <- completion
## get entry from obj
entry <- obj@widget
entry$SetCompletion(completion)
}
## methods
setMethod("svalue", signature(obj="GtkEntry"),
function(obj, index=NULL, drop=NULL, ...) {
.svalue(obj,guiToolkit("RGtk2"), index, drop, ...)
})
setMethod(".svalue",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="gEditRGtk"),
function(obj, toolkit, index=NULL, drop=NULL, ...) {
val <- obj@widget$getText()
init_msg <- tag(obj, "init_msg")
if(!is.null(init_msg) && val == init_msg)
val <- ""
return(val)
})
## trouble here -- no coerce.with info available in obj
setMethod(".svalue",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="GtkEntry"),
function(obj, toolkit, index=NULL, drop=NULL, ...) {
val <- obj$getText()
return(val)
})
## svalue<-
setReplaceMethod(".svalue",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="gEditRGtk"),
function(obj, toolkit, index=NULL, ..., value) {
if(is.null(value))
return(obj) ## o/w we get a crash
widget <- getWidget(obj)
## initial message, clear
flag <- tag(obj, "init_msg_flag")
if(!is.null(flag) && flag) {
widget$modifyText(GtkStateType[1], "black")
gSignalHandlerDisconnect(widget, tag(obj, "init_msg_id"))
tag(obj, "init_msg_flag") <- FALSE
}
widget$setText(value)
widget$activate()
tag(obj, "value") <- value
return(obj)
})
## want to replace "value" but can't
setReplaceMethod(".svalue",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="GtkEntry"),
function(obj, toolkit, index=NULL, ..., value) {
obj$setText(value)
obj$activate()
return(obj)
})
setMethod(".leftBracket",
signature(toolkit="guiWidgetsToolkitRGtk2",x="gEditRGtk"),
function(x, toolkit, i, j, ..., drop=TRUE) {
obj <- x
if(!is.null(tag(obj,"completion"))) {
store <- obj@widget$GetCompletion()$GetModel()
nrows <- dim(store)[1]
if(missing(i))
i <- 1:nrows
return(store[i , ])
} else {
return(c())
}
})
setMethod("[",
signature(x="gEditRGtk"),
function(x, i, j, ..., drop=TRUE) {
if(missing(i))
.leftBracket(x,x@toolkit, ...)
else
.leftBracket(x,x@toolkit, i, ...)
})
setReplaceMethod(".leftBracket",
signature(toolkit="guiWidgetsToolkitRGtk2",x="gEditRGtk"),
function(x, toolkit, i, j, ..., value) {
obj <- x
if(is.null(tag(obj,"completion")))
.setCompletion(obj)
store <- obj@widget$GetCompletion()$GetModel()
nrows <- dim(store)[1]
n <- length(value)
if(n > nrows)
values <- values[1:nrows] # truncate
if(missing(i))
i <- 1:n
store[i , ] <- value
## all done
return(obj)
})
setReplaceMethod("[",
signature(x="gEditRGtk"),
function(x, i, j,..., value) {
.leftBracket(x, x@toolkit, i, j, ...) <- value
return(x)
})
##' visible<- if FALSE, for password usage
setReplaceMethod(".visible",
signature(toolkit="guiWidgetsToolkitRGtk2", obj="gEditRGtk"),
function(obj, toolkit, ..., value) {
widget <- getWidget(obj)
widget$setInvisibleChar(42L) # asterisk
widget$setVisibility(as.logical(value))
return(obj)
})
##################################################
## handlers
### doesn't work -- double writes
setMethod(".adddropsource",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="gEditRGtk"),
function(obj, toolkit, targetType="text", handler=NULL, action=NULL, ...) {
## do nothing, alrady in gedit widget
})
setMethod(".adddroptarget",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="gEditRGtk"),
function(obj, toolkit, targetType="text", handler=NULL, action=NULL, ...) {
## gwCat("drop target for gedit uses default only")
## issue is if using after=FALSE, the default drophandler is called. (Can't stop signal emission)
## if using after=TRUE, the dropped value is put into widget's value in similar way, a
## again we don't want this
## so we store the pre-value then set after as a hack
predrophandler <- function(h,...) {
tag(h$obj,"..predropvalue") <- svalue(h$obj)
}
gSignalConnect(getWidget(obj), "drag-data-received", f= predrophandler,
data=list(obj=obj), user.data.first=TRUE,
after=FALSE)
postdropHandler <- function(h,w, ctxt, x, y, selection, ...) {
svalue(h$obj) <- tag(h$obj,"..predropvalue") # complete the hack
tag(h$obj, "..predropvalue") <- NULL
dropdata <- selection$GetText()
if(is.integer(dropdata))
dropdata <- Paste(intToChar(dropdata))
else
dropdata <- rawToChar(dropdata)
dropdata <- gsub(Paste("^",.gWidgetDropTargetListKey),"", dropdata)
h$dropdata <- dropdata
handler(h, widget=w, context=ctxt, x=x, y=y, selection=selection, ...)
}
id <- gSignalConnect(getWidget(obj), "drag-data-received", f=postdropHandler,
data=list(obj=obj, action=action),
after=TRUE, user.data.first=TRUE)
})
setMethod(".addhandlerchanged",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="gEditRGtk"),
function(obj, toolkit, handler, action=NULL, ...) {
f <- function(h,widget,event,...) {
keyval <- event$GetKeyval()
if(keyval == GDK_Return) {
handler(h,widget,event,...)
return(TRUE)
} else {
return(FALSE)
}
}
id <- addhandler(obj, signal="activate", handler=handler, action=action)
return(id)
})
setMethod(".addhandlerkeystroke",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="gEditRGtk"),
function(obj,toolkit, handler=NULL, action=NULL,...) {
widget <- getWidget(obj)
ID <-
gSignalConnect(widget,signal = "key-release-event",
f = function(d,widget,event,...) {
h <- list(obj=d$obj,action=d$action)
key <- event$GetString()
h$key <- key
if(!is.null(d$handler) &&
is.function(d$handler))
d$handler(h,...)
return(FALSE) # propogate
},
user.data.first = TRUE,
data = list(obj=obj,handler=handler, action=action)
)
invisible(ID)
})
| /R/gedit.R | no_license | gwidgets3/gWidgetsRGtk2 | R | false | false | 10,896 | r | ## class defined in aaaClasses for inheritance
## constructor
setMethod(".gedit",
signature(toolkit="guiWidgetsToolkitRGtk2"),
function(toolkit,
text="", width=25,
coerce.with = NULL,
initial.msg = "",
handler=NULL, action=NULL,
container=NULL,
...
) {
force(toolkit)
entry <- gtkEntryNew()
obj <- as.gWidgetsRGtk2(entry)
tag(obj, "coerce.with") <- coerce.with
## this adds completion fields to this widget. To *add* to the list
## of values that can be completed use gEditobject[]<- values
## entry$setMaxLength(max(width,length(unlist(strsplit(text,"")))))
svalue(obj) <- text
tag(obj,"completion") <- NULL # a completion object if set via [<-
## process initial message if applicable
tag(obj, "init_msg_flag") <- FALSE
tag(obj, "init_msg") <- initial.msg
if(nchar(text) == 0 && nchar(initial.msg) > 0) {
entry$modifyText(GtkStateType[1], "gray")
entry$setText(initial.msg)
id <- gSignalConnect(entry, "focus-in-event", function(...) {
entry$setText("")
entry$modifyText(GtkStateType[1], "black")
gSignalHandlerDisconnect(entry,id)
tag(obj, "init_msg_flag") <- FALSE
})
tag(obj, "init_msg_flag") <- TRUE
tag(obj, "init_msg_id") <- id
}
## width -- ths sets minimum -- it ay expand to fill space
if(!is.null(width))
entry$setWidthChars(as.numeric(width))
if (!is.null(container)) {
if(is.logical(container) && container == TRUE)
container <- gwindow()
add(container, obj,...)
}
if (!is.null(handler))
tag(obj, "handler.id") <- addhandlerchanged(obj,handler,action)
invisible(obj)
})
as.gWidgetsRGtk2.GtkEntry <- function(widget, ...) {
obj = new("gEditRGtk",block=widget, widget=widget,
toolkit=guiToolkit("RGtk2"))
return(obj)
}
## code to add completion to the entry
## only do so if set via [<-
.setCompletion <- function(obj,...) {
completion = gtkEntryCompletionNew()
## set model
## this caps out at 1000 -- is this a speed issue?
model <- rGtkDataFrame(data.frame(character(1000),stringsAsFactors=FALSE))
completion$SetModel(model)
completion$SetTextColumn(0) # Columns count from 0 -- not 1
## set properties
gtktry({completion['inline-completion'] <- TRUE}, silent = TRUE)
gtktry({completion['inline-selection'] <- TRUE}, silent = TRUE)
## set completion
tag(obj,"completion") <- completion
## get entry from obj
entry <- obj@widget
entry$SetCompletion(completion)
}
## methods
setMethod("svalue", signature(obj="GtkEntry"),
function(obj, index=NULL, drop=NULL, ...) {
.svalue(obj,guiToolkit("RGtk2"), index, drop, ...)
})
setMethod(".svalue",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="gEditRGtk"),
function(obj, toolkit, index=NULL, drop=NULL, ...) {
val <- obj@widget$getText()
init_msg <- tag(obj, "init_msg")
if(!is.null(init_msg) && val == init_msg)
val <- ""
return(val)
})
## trouble here -- no coerce.with info available in obj
setMethod(".svalue",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="GtkEntry"),
function(obj, toolkit, index=NULL, drop=NULL, ...) {
val <- obj$getText()
return(val)
})
## svalue<-
setReplaceMethod(".svalue",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="gEditRGtk"),
function(obj, toolkit, index=NULL, ..., value) {
if(is.null(value))
return(obj) ## o/w we get a crash
widget <- getWidget(obj)
## initial message, clear
flag <- tag(obj, "init_msg_flag")
if(!is.null(flag) && flag) {
widget$modifyText(GtkStateType[1], "black")
gSignalHandlerDisconnect(widget, tag(obj, "init_msg_id"))
tag(obj, "init_msg_flag") <- FALSE
}
widget$setText(value)
widget$activate()
tag(obj, "value") <- value
return(obj)
})
## want to replace "value" but can't
setReplaceMethod(".svalue",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="GtkEntry"),
function(obj, toolkit, index=NULL, ..., value) {
obj$setText(value)
obj$activate()
return(obj)
})
setMethod(".leftBracket",
signature(toolkit="guiWidgetsToolkitRGtk2",x="gEditRGtk"),
function(x, toolkit, i, j, ..., drop=TRUE) {
obj <- x
if(!is.null(tag(obj,"completion"))) {
store <- obj@widget$GetCompletion()$GetModel()
nrows <- dim(store)[1]
if(missing(i))
i <- 1:nrows
return(store[i , ])
} else {
return(c())
}
})
setMethod("[",
signature(x="gEditRGtk"),
function(x, i, j, ..., drop=TRUE) {
if(missing(i))
.leftBracket(x,x@toolkit, ...)
else
.leftBracket(x,x@toolkit, i, ...)
})
setReplaceMethod(".leftBracket",
signature(toolkit="guiWidgetsToolkitRGtk2",x="gEditRGtk"),
function(x, toolkit, i, j, ..., value) {
obj <- x
if(is.null(tag(obj,"completion")))
.setCompletion(obj)
store <- obj@widget$GetCompletion()$GetModel()
nrows <- dim(store)[1]
n <- length(value)
if(n > nrows)
values <- values[1:nrows] # truncate
if(missing(i))
i <- 1:n
store[i , ] <- value
## all done
return(obj)
})
setReplaceMethod("[",
signature(x="gEditRGtk"),
function(x, i, j,..., value) {
.leftBracket(x, x@toolkit, i, j, ...) <- value
return(x)
})
##' visible<- if FALSE, for password usage
setReplaceMethod(".visible",
signature(toolkit="guiWidgetsToolkitRGtk2", obj="gEditRGtk"),
function(obj, toolkit, ..., value) {
widget <- getWidget(obj)
widget$setInvisibleChar(42L) # asterisk
widget$setVisibility(as.logical(value))
return(obj)
})
##################################################
## handlers
### doesn't work -- double writes
setMethod(".adddropsource",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="gEditRGtk"),
function(obj, toolkit, targetType="text", handler=NULL, action=NULL, ...) {
## do nothing, alrady in gedit widget
})
setMethod(".adddroptarget",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="gEditRGtk"),
function(obj, toolkit, targetType="text", handler=NULL, action=NULL, ...) {
## gwCat("drop target for gedit uses default only")
## issue is if using after=FALSE, the default drophandler is called. (Can't stop signal emission)
## if using after=TRUE, the dropped value is put into widget's value in similar way, a
## again we don't want this
## so we store the pre-value then set after as a hack
predrophandler <- function(h,...) {
tag(h$obj,"..predropvalue") <- svalue(h$obj)
}
gSignalConnect(getWidget(obj), "drag-data-received", f= predrophandler,
data=list(obj=obj), user.data.first=TRUE,
after=FALSE)
postdropHandler <- function(h,w, ctxt, x, y, selection, ...) {
svalue(h$obj) <- tag(h$obj,"..predropvalue") # complete the hack
tag(h$obj, "..predropvalue") <- NULL
dropdata <- selection$GetText()
if(is.integer(dropdata))
dropdata <- Paste(intToChar(dropdata))
else
dropdata <- rawToChar(dropdata)
dropdata <- gsub(Paste("^",.gWidgetDropTargetListKey),"", dropdata)
h$dropdata <- dropdata
handler(h, widget=w, context=ctxt, x=x, y=y, selection=selection, ...)
}
id <- gSignalConnect(getWidget(obj), "drag-data-received", f=postdropHandler,
data=list(obj=obj, action=action),
after=TRUE, user.data.first=TRUE)
})
setMethod(".addhandlerchanged",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="gEditRGtk"),
function(obj, toolkit, handler, action=NULL, ...) {
f <- function(h,widget,event,...) {
keyval <- event$GetKeyval()
if(keyval == GDK_Return) {
handler(h,widget,event,...)
return(TRUE)
} else {
return(FALSE)
}
}
id <- addhandler(obj, signal="activate", handler=handler, action=action)
return(id)
})
setMethod(".addhandlerkeystroke",
signature(toolkit="guiWidgetsToolkitRGtk2",obj="gEditRGtk"),
function(obj,toolkit, handler=NULL, action=NULL,...) {
widget <- getWidget(obj)
ID <-
gSignalConnect(widget,signal = "key-release-event",
f = function(d,widget,event,...) {
h <- list(obj=d$obj,action=d$action)
key <- event$GetString()
h$key <- key
if(!is.null(d$handler) &&
is.function(d$handler))
d$handler(h,...)
return(FALSE) # propogate
},
user.data.first = TRUE,
data = list(obj=obj,handler=handler, action=action)
)
invisible(ID)
})
|
numPerPatch2328 <- c(2555,2445)
| /NatureEE-data-archive/Run203021/JAFSdata/JAFSnumPerPatch2328.R | no_license | flaxmans/NatureEE2017 | R | false | false | 32 | r | numPerPatch2328 <- c(2555,2445)
|
##mothing
##mothing | /blahblah.R | no_license | haotianwu17/Testing | R | false | false | 19 | r | ##mothing
##mothing |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/downgradeShiraishiSignatures.R
\name{downgradeShiraishiSignatures}
\alias{downgradeShiraishiSignatures}
\title{Downgrade Shiraishi-type signatures.}
\usage{
downgradeShiraishiSignatures(signatures, numBases=NULL,
removeTrDir=FALSE)
}
\arguments{
\item{signatures}{(Mandatory) A list of Shiraishi signatures that need to be
downgraded/trimmed.}
\item{numBases}{(Conditionally optional) The total number of bases
(mutated base plus flanking bases around the mutated base) that should
be kept. All further flanking bases farther away from the mutated bases
are dropped. If specified, \code{numBases} must be odd and smaller than
the current number of bases of the \code{signatures}. If \code{NULL}, no
flanking bases will be dropped. At least one of \code{numBases} or
\code{removeTrDir} must be specified.}
\item{removeTrDir}{(Conditionally optional) Logical value that specifies
whether information on the transcript direction should be dropped (if
present at all). At least one of \code{numBases} or \code{removeTrDir}
must be specified.}
}
\value{
A list of Shiraishi signatures that have been accordingly downgraded.
}
\description{
`downgradeShiraishiSignatures()` downgrades/trims signatures of the
Shiraishi type by discarding flanking bases (reducing the length of the
sequence pattern) and/or the transcription direction. The downgrade doesn't
pose a problem because the flanking bases and the transcription direction
are considered as independent features according to the Shiraishi model of
mutational signatures.
}
\examples{
### Load 15 Shiraishi signatures obtained from 435 tumor genomes from
### Alexandrov et al. (number of bases: 5, transcription direction: yes)
sfile <- system.file("extdata",
"Alexandrov_PMID_23945592_435_tumors-pmsignature-15sig.Rdata",
package="decompTumor2Sig")
load(sfile)
### downgrade the signatures to include only 3 bases and drop the
### transcription direction
downgradeShiraishiSignatures(signatures, numBases=3, removeTrDir=TRUE)
}
\references{
\url{http://rmpiro.net/decompTumor2Sig/}\cr
Krueger, Piro (2018) decompTumor2Sig: Identification of mutational
signatures active in individual tumors. BMC Bioinformatics (accepted for
publication).\cr
Krueger, Piro (2017) Identification of Mutational Signatures Active in
Individual Tumors. NETTAB 2017 - Methods, Tools & Platforms for
Personalized Medicine in the Big Data Era, October 16-18, 2017, Palermo,
Italy. PeerJ Preprints 5:e3257v1, 2017.
}
\seealso{
\code{\link{decompTumor2Sig}}
}
\author{
Rosario M. Piro\cr Freie Universitaet Berlin\cr Maintainer: Rosario
M. Piro\cr E-Mail: <rmpiro@gmail.com> or <r.piro@fu-berlin.de>
}
| /man/downgradeShiraishiSignatures.Rd | no_license | zhiiiyang/decompTumor2Sig | R | false | true | 2,731 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/downgradeShiraishiSignatures.R
\name{downgradeShiraishiSignatures}
\alias{downgradeShiraishiSignatures}
\title{Downgrade Shiraishi-type signatures.}
\usage{
downgradeShiraishiSignatures(signatures, numBases=NULL,
removeTrDir=FALSE)
}
\arguments{
\item{signatures}{(Mandatory) A list of Shiraishi signatures that need to be
downgraded/trimmed.}
\item{numBases}{(Conditionally optional) The total number of bases
(mutated base plus flanking bases around the mutated base) that should
be kept. All further flanking bases farther away from the mutated bases
are dropped. If specified, \code{numBases} must be odd and smaller than
the current number of bases of the \code{signatures}. If \code{NULL}, no
flanking bases will be dropped. At least one of \code{numBases} or
\code{removeTrDir} must be specified.}
\item{removeTrDir}{(Conditionally optional) Logical value that specifies
whether information on the transcript direction should be dropped (if
present at all). At least one of \code{numBases} or \code{removeTrDir}
must be specified.}
}
\value{
A list of Shiraishi signatures that have been accordingly downgraded.
}
\description{
`downgradeShiraishiSignatures()` downgrades/trims signatures of the
Shiraishi type by discarding flanking bases (reducing the length of the
sequence pattern) and/or the transcription direction. The downgrade doesn't
pose a problem because the flanking bases and the transcription direction
are considered as independent features according to the Shiraishi model of
mutational signatures.
}
\examples{
### Load 15 Shiraishi signatures obtained from 435 tumor genomes from
### Alexandrov et al. (number of bases: 5, transcription direction: yes)
sfile <- system.file("extdata",
"Alexandrov_PMID_23945592_435_tumors-pmsignature-15sig.Rdata",
package="decompTumor2Sig")
load(sfile)
### downgrade the signatures to include only 3 bases and drop the
### transcription direction
downgradeShiraishiSignatures(signatures, numBases=3, removeTrDir=TRUE)
}
\references{
\url{http://rmpiro.net/decompTumor2Sig/}\cr
Krueger, Piro (2018) decompTumor2Sig: Identification of mutational
signatures active in individual tumors. BMC Bioinformatics (accepted for
publication).\cr
Krueger, Piro (2017) Identification of Mutational Signatures Active in
Individual Tumors. NETTAB 2017 - Methods, Tools & Platforms for
Personalized Medicine in the Big Data Era, October 16-18, 2017, Palermo,
Italy. PeerJ Preprints 5:e3257v1, 2017.
}
\seealso{
\code{\link{decompTumor2Sig}}
}
\author{
Rosario M. Piro\cr Freie Universitaet Berlin\cr Maintainer: Rosario
M. Piro\cr E-Mail: <rmpiro@gmail.com> or <r.piro@fu-berlin.de>
}
|
"
Name : c9_22_reinforcementLearning_state_same_as_next.R
Book : Hands-on Data Science with Anaconda )
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan and James Yan
Date : 4/6/2018
email : yany@canisius.edu
paulyxy@hotmail.com
"
x<-subset(data,data$State==data$NextState)
head(x)
unique(x$Reward)
| /Chapter09/c9_22_reinforcementLearning_state_same_as_nextState.R | permissive | andrewjcoxon/Hands-On-Data-Science-with-Anaconda | R | false | false | 348 | r |
"
Name : c9_22_reinforcementLearning_state_same_as_next.R
Book : Hands-on Data Science with Anaconda )
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan and James Yan
Date : 4/6/2018
email : yany@canisius.edu
paulyxy@hotmail.com
"
x<-subset(data,data$State==data$NextState)
head(x)
unique(x$Reward)
|
###################################################
### code chunk number 30: Cs27_CIs-pboot
###################################################
kem.w.boot.CIs <- MARSSparamCIs(kem, method = "parametric", nboot = 10)
print(kem.w.boot.CIs)
| /inst/userguide/figures/QE--Cs27_CIs-pboot.R | permissive | nwfsc-timeseries/MARSS | R | false | false | 241 | r | ###################################################
### code chunk number 30: Cs27_CIs-pboot
###################################################
kem.w.boot.CIs <- MARSSparamCIs(kem, method = "parametric", nboot = 10)
print(kem.w.boot.CIs)
|
corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
# getComplete <- function(filename){
# path <- paste(directory, "/", filename, sep="")
# data <- read.csv(path)
# #data[1:2,]
# data[complete.cases(data),]
# }
#
# valid <- function(df){
# nrow(df) > threshold
# }
#
getCorr <- function(id){
filename <- paste(directory, "/", formatC(id, width=3, flag="0"), ".csv", sep="")
data <- read.csv(filename)
good <- data[complete.cases(data),]
cor(good$nitrate, good$sulfate)
}
rowCounts <- complete(directory)
overThreshold <- rowCounts$id[rowCounts$nobs > threshold]
sapply(overThreshold, getCorr)
}
| /RProgramming/corr.R | no_license | rupertbates/datasciencecoursera | R | false | false | 1,028 | r | corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
# getComplete <- function(filename){
# path <- paste(directory, "/", filename, sep="")
# data <- read.csv(path)
# #data[1:2,]
# data[complete.cases(data),]
# }
#
# valid <- function(df){
# nrow(df) > threshold
# }
#
getCorr <- function(id){
filename <- paste(directory, "/", formatC(id, width=3, flag="0"), ".csv", sep="")
data <- read.csv(filename)
good <- data[complete.cases(data),]
cor(good$nitrate, good$sulfate)
}
rowCounts <- complete(directory)
overThreshold <- rowCounts$id[rowCounts$nobs > threshold]
sapply(overThreshold, getCorr)
}
|
print.deviation <- function(x,digits=max(4,getOption("digits")-4),...)
{
cat("\n")
cat("Diversity as separation: Standard Deviation")
cat("\n\n Call: \n")
cat("",deparse(x$call), "\n\n")
cat(" Data: ")
cat( if (length(x$data) >= 5) x$data[1:5] else x$data[1:length(x$data)])
cat(if (length(x$data) <= 5) "" else " ...", "\n")
cat(" Minimum: ", x$min,"\n")
cat(" Maximum: ", x$max,"\n")
cat(" Standard Deviation (sd): ")
cat(formatC(x$std.dev, digits = digits), "\n")
cat(" Maximum value of Standard Deviation (sdmax): ")
cat(formatC(x$std.dev.max, digits = digits), "\n")
cat(" Normalized value of Standard Deviation (sdnorm): ")
cat(formatC(x$std.dev.norm, digits = digits), "\n")
cat("\n\n")
invisible(x)
}
| /R/print.deviation.R | no_license | DLEIVA/diversity | R | false | false | 765 | r | print.deviation <- function(x,digits=max(4,getOption("digits")-4),...)
{
cat("\n")
cat("Diversity as separation: Standard Deviation")
cat("\n\n Call: \n")
cat("",deparse(x$call), "\n\n")
cat(" Data: ")
cat( if (length(x$data) >= 5) x$data[1:5] else x$data[1:length(x$data)])
cat(if (length(x$data) <= 5) "" else " ...", "\n")
cat(" Minimum: ", x$min,"\n")
cat(" Maximum: ", x$max,"\n")
cat(" Standard Deviation (sd): ")
cat(formatC(x$std.dev, digits = digits), "\n")
cat(" Maximum value of Standard Deviation (sdmax): ")
cat(formatC(x$std.dev.max, digits = digits), "\n")
cat(" Normalized value of Standard Deviation (sdnorm): ")
cat(formatC(x$std.dev.norm, digits = digits), "\n")
cat("\n\n")
invisible(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{data_H2test}
\alias{data_H2test}
\title{Data very simple structure}
\format{An object of class \code{list} of length 4.}
\usage{
data_H2test
}
\description{
Data randomly generated for simple H2 test
}
\keyword{datasets}
| /man/data_H2test.Rd | no_license | virgile-baudrot/trophicR | R | false | true | 327 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{data_H2test}
\alias{data_H2test}
\title{Data very simple structure}
\format{An object of class \code{list} of length 4.}
\usage{
data_H2test
}
\description{
Data randomly generated for simple H2 test
}
\keyword{datasets}
|
#' Create a D3 JavaScript force directed network graph.
#'
#' @param Links a data frame object with the links between the nodes. It should
#' include the \code{Source} and \code{Target} for each link. These should be
#' numbered starting from 0. An optional \code{Value} variable can be included
#' to specify how close the nodes are to one another.
#' @param Nodes a data frame containing the node id and properties of the nodes.
#' If no ID is specified then the nodes must be in the same order as the Source
#' variable column in the \code{Links} data frame. Currently only a grouping
#' variable is allowed.
#' @param Source character string naming the network source variable in the
#' \code{Links} data frame.
#' @param Target character string naming the network target variable in the
#' \code{Links} data frame.
#' @param Value character string naming the variable in the \code{Links} data
#' frame for how wide the links are.
#' @param NodeID character string specifying the node IDs in the \code{Nodes}
#' data frame.
#' @param Nodesize character string specifying the a column in the \code{Nodes}
#' data frame with some value to vary the node radius's with. See also
#' \code{radiusCalculation}.
#' @param Group character string specifying the group of each node in the
#' \code{Nodes} data frame.
#' @param height numeric height for the network graph's frame area in pixels.
#' @param width numeric width for the network graph's frame area in pixels.
#' @param colourScale character string specifying the categorical colour
#' scale for the nodes. See
#' \url{https://github.com/mbostock/d3/wiki/Ordinal-Scales}.
#' @param fontSize numeric font size in pixels for the node text labels.
#' @param fontFamily font family for the node text labels.
#' @param linkDistance numeric or character string. Either numberic fixed
#' distance between the links in pixels (actually arbitrary relative to the
#' diagram's size). Or a JavaScript function, possibly to weight by
#' \code{Value}. For example:
#' \code{linkDistance = JS("function(d){return d.value * 10}")}.
#' @param linkWidth numeric or character string. Can be a numeric fixed width in
#' pixels (arbitrary relative to the diagram's size). Or a JavaScript function,
#' possibly to weight by \code{Value}. The default is
#' \code{linkWidth = JS("function(d) { return Math.sqrt(d.value); }")}.
#' @param radiusCalculation character string. A javascript mathematical
#' expression, to weight the radius by \code{Nodesize}. The default value is
#' \code{radiusCalculation = JS("Math.sqrt(d.nodesize)+6")}.
#' @param charge numeric value indicating either the strength of the node
#' repulsion (negative value) or attraction (positive value).
#' @param linkColour character vector specifying the colour(s) you want the link
#' lines to be. Multiple formats supported (e.g. hexadecimal).
#' @param opacity numeric value of the proportion opaque you would like the
#' graph elements to be.
#' @param zoom logical value to enable (\code{TRUE}) or disable (\code{FALSE})
#' zooming.
#' @param legend logical value to enable node colour legends.
#' @param bounded logical value to enable (\code{TRUE}) or disable
#' (\code{FALSE}) the bounding box limiting the graph's extent. See
#' \url{http://bl.ocks.org/mbostock/1129492}.
#' @param opacityNoHover numeric value of the opacity proportion for node labels
#' text when the mouse is not hovering over them.
#' @param clickAction character string with a JavaScript expression to evaluate
#' when a node is clicked.
#'
#' @examples
#' # Load data
#' data(MisLinks)
#' data(MisNodes)
#' # Create graph
#' forceNetwork(Links = MisLinks, Nodes = MisNodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Group = "group", opacity = 0.4, zoom = TRUE)
#'
#' # Create graph with legend and varying node radius
#' forceNetwork(Links = MisLinks, Nodes = MisNodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Nodesize = "size",
#' radiusCalculation = "Math.sqrt(d.nodesize)+6",
#' Group = "group", opacity = 0.4, legend = TRUE)
#'
#' \dontrun{
#' #### JSON Data Example
#' # Load data JSON formated data into two R data frames
#' # Create URL. paste0 used purely to keep within line width.
#' URL <- paste0("https://cdn.rawgit.com/christophergandrud/networkD3/",
#' "master/JSONdata/miserables.json")
#'
#' MisJson <- jsonlite::fromJSON(URL)
#'
#' # Create graph
#' forceNetwork(Links = MisJson$links, Nodes = MisJson$nodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Group = "group", opacity = 0.4)
#'
#' # Create graph with zooming
#' forceNetwork(Links = MisJson$links, Nodes = MisJson$nodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Group = "group", opacity = 0.4, zoom = TRUE)
#'
#'
#' # Create a bounded graph
#' forceNetwork(Links = MisJson$links, Nodes = MisJson$nodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Group = "group", opacity = 0.4, bounded = TRUE)
#'
#' # Create graph with node text faintly visible when no hovering
#' forceNetwork(Links = MisJson$links, Nodes = MisJson$nodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Group = "group", opacity = 0.4, bounded = TRUE,
#' opacityNoHover = TRUE)
#'
#' ## Specify colours for specific edges
#' # Find links to Valjean (11)
#' which(MisNodes == "Valjean", arr = TRUE)[1] - 1
#' ValjeanInds = which(MisLinks == 11, arr = TRUE)[, 1]
#'
#' # Create a colour vector
#' ValjeanCols = ifelse(1:nrow(MisLinks) %in% ValjeanInds, "#bf3eff", "#666")
#'
#' forceNetwork(Links = MisLinks, Nodes = MisNodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Group = "group", opacity = 0.8, linkColour = ValjeanCols)
#'
#'
#' ## Create graph with alert pop-up when a node is clicked. You're
#' # unlikely to want to do exactly this, but you might use
#' # Shiny.onInputChange() to allocate d.XXX to an element of input
#' # for use in a Shiny app.
#'
#' MyClickScript <- 'alert("You clicked " + d.name + " which is in row " +
#' (d.index + 1) + " of your original R data frame");'
#'
#' forceNetwork(Links = MisLinks, Nodes = MisNodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Group = "group", opacity = 1, zoom = FALSE,
#' bounded = TRUE, clickAction = MyClickScript)
#' }
#'
#' @source
#' D3.js was created by Michael Bostock. See \url{http://d3js.org/} and, more
#' specifically for force directed networks
#' \url{https://github.com/mbostock/d3/wiki/Force-Layout}.
#' @seealso \code{\link{JS}}.
#'
#' @export
forceNetwork <- function(Links,
Nodes,
Source,
Target,
Value,
NodeID,
Nodesize,
Group,
height = NULL,
width = NULL,
colourScale = JS("d3.scale.category20()"),
fontSize = 7,
fontFamily = "serif",
linkDistance = 50,
linkWidth = JS("function(d) { return Math.sqrt(d.value); }"),
radiusCalculation = JS(" Math.sqrt(d.nodesize)+6"),
charge = -120,
linkColour = "#666",
opacity = 0.6,
zoom = FALSE,
legend = FALSE,
bounded = FALSE,
opacityNoHover = 0,
clickAction = NULL)
{
# Check if data is zero indexed
check_zero(Links[, Source], Links[, Target])
# If tbl_df convert to plain data.frame
Links <- tbl_df_strip(Links)
Nodes <- tbl_df_strip(Nodes)
# Hack for UI consistency. Think of improving.
colourScale <- as.character(colourScale)
linkWidth <- as.character(linkWidth)
radiusCalculation <- as.character(radiusCalculation)
# Subset data frames for network graph
if (!is.data.frame(Links)) {
stop("Links must be a data frame class object.")
}
if (!is.data.frame(Nodes)) {
stop("Nodes must be a data frame class object.")
}
if (missing(Value)) {
LinksDF <- data.frame(Links[, Source], Links[, Target])
names(LinksDF) <- c("source", "target")
}
else if (!missing(Value)) {
LinksDF <- data.frame(Links[, Source], Links[, Target], Links[, Value])
names(LinksDF) <- c("source", "target", "value")
}
if (!missing(Nodesize)){
NodesDF <- data.frame(Nodes[, NodeID], Nodes[, Group], Nodes[, Nodesize])
names(NodesDF) <- c("name", "group", "nodesize")
nodesize = TRUE
} else {
NodesDF <- data.frame(Nodes[, NodeID], Nodes[, Group])
names(NodesDF) <- c("name", "group")
nodesize = FALSE
}
LinksDF <- data.frame(LinksDF, colour = linkColour)
LinksDF$colour = as.character(LinksDF$colour)
# create options
options = list(
NodeID = NodeID,
Group = Group,
colourScale = colourScale,
fontSize = fontSize,
fontFamily = fontFamily,
clickTextSize = fontSize * 2.5,
linkDistance = linkDistance,
linkWidth = linkWidth,
charge = charge,
# linkColour = linkColour,
opacity = opacity,
zoom = zoom,
legend = legend,
nodesize = nodesize,
radiusCalculation = radiusCalculation,
bounded = bounded,
opacityNoHover = opacityNoHover,
clickAction = clickAction
)
# create widget
htmlwidgets::createWidget(
name = "forceNetwork",
x = list(links = LinksDF, nodes = NodesDF, options = options),
width = width,
height = height,
htmlwidgets::sizingPolicy(padding = 10, browser.fill = TRUE),
package = "networkD3"
)
}
#' @rdname networkD3-shiny
#' @export
forceNetworkOutput <- function(outputId, width = "100%", height = "500px") {
shinyWidgetOutput(outputId, "forceNetwork", width, height,
package = "networkD3")
}
#' @rdname networkD3-shiny
#' @export
renderForceNetwork <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
shinyRenderWidget(expr, forceNetworkOutput, env, quoted = TRUE)
}
| /R/forceNetwork.R | no_license | fbreitwieser/networkD3 | R | false | false | 10,913 | r | #' Create a D3 JavaScript force directed network graph.
#'
#' @param Links a data frame object with the links between the nodes. It should
#' include the \code{Source} and \code{Target} for each link. These should be
#' numbered starting from 0. An optional \code{Value} variable can be included
#' to specify how close the nodes are to one another.
#' @param Nodes a data frame containing the node id and properties of the nodes.
#' If no ID is specified then the nodes must be in the same order as the Source
#' variable column in the \code{Links} data frame. Currently only a grouping
#' variable is allowed.
#' @param Source character string naming the network source variable in the
#' \code{Links} data frame.
#' @param Target character string naming the network target variable in the
#' \code{Links} data frame.
#' @param Value character string naming the variable in the \code{Links} data
#' frame for how wide the links are.
#' @param NodeID character string specifying the node IDs in the \code{Nodes}
#' data frame.
#' @param Nodesize character string specifying the a column in the \code{Nodes}
#' data frame with some value to vary the node radius's with. See also
#' \code{radiusCalculation}.
#' @param Group character string specifying the group of each node in the
#' \code{Nodes} data frame.
#' @param height numeric height for the network graph's frame area in pixels.
#' @param width numeric width for the network graph's frame area in pixels.
#' @param colourScale character string specifying the categorical colour
#' scale for the nodes. See
#' \url{https://github.com/mbostock/d3/wiki/Ordinal-Scales}.
#' @param fontSize numeric font size in pixels for the node text labels.
#' @param fontFamily font family for the node text labels.
#' @param linkDistance numeric or character string. Either numberic fixed
#' distance between the links in pixels (actually arbitrary relative to the
#' diagram's size). Or a JavaScript function, possibly to weight by
#' \code{Value}. For example:
#' \code{linkDistance = JS("function(d){return d.value * 10}")}.
#' @param linkWidth numeric or character string. Can be a numeric fixed width in
#' pixels (arbitrary relative to the diagram's size). Or a JavaScript function,
#' possibly to weight by \code{Value}. The default is
#' \code{linkWidth = JS("function(d) { return Math.sqrt(d.value); }")}.
#' @param radiusCalculation character string. A javascript mathematical
#' expression, to weight the radius by \code{Nodesize}. The default value is
#' \code{radiusCalculation = JS("Math.sqrt(d.nodesize)+6")}.
#' @param charge numeric value indicating either the strength of the node
#' repulsion (negative value) or attraction (positive value).
#' @param linkColour character vector specifying the colour(s) you want the link
#' lines to be. Multiple formats supported (e.g. hexadecimal).
#' @param opacity numeric value of the proportion opaque you would like the
#' graph elements to be.
#' @param zoom logical value to enable (\code{TRUE}) or disable (\code{FALSE})
#' zooming.
#' @param legend logical value to enable node colour legends.
#' @param bounded logical value to enable (\code{TRUE}) or disable
#' (\code{FALSE}) the bounding box limiting the graph's extent. See
#' \url{http://bl.ocks.org/mbostock/1129492}.
#' @param opacityNoHover numeric value of the opacity proportion for node labels
#' text when the mouse is not hovering over them.
#' @param clickAction character string with a JavaScript expression to evaluate
#' when a node is clicked.
#'
#' @examples
#' # Load data
#' data(MisLinks)
#' data(MisNodes)
#' # Create graph
#' forceNetwork(Links = MisLinks, Nodes = MisNodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Group = "group", opacity = 0.4, zoom = TRUE)
#'
#' # Create graph with legend and varying node radius
#' forceNetwork(Links = MisLinks, Nodes = MisNodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Nodesize = "size",
#' radiusCalculation = "Math.sqrt(d.nodesize)+6",
#' Group = "group", opacity = 0.4, legend = TRUE)
#'
#' \dontrun{
#' #### JSON Data Example
#' # Load data JSON formated data into two R data frames
#' # Create URL. paste0 used purely to keep within line width.
#' URL <- paste0("https://cdn.rawgit.com/christophergandrud/networkD3/",
#' "master/JSONdata/miserables.json")
#'
#' MisJson <- jsonlite::fromJSON(URL)
#'
#' # Create graph
#' forceNetwork(Links = MisJson$links, Nodes = MisJson$nodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Group = "group", opacity = 0.4)
#'
#' # Create graph with zooming
#' forceNetwork(Links = MisJson$links, Nodes = MisJson$nodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Group = "group", opacity = 0.4, zoom = TRUE)
#'
#'
#' # Create a bounded graph
#' forceNetwork(Links = MisJson$links, Nodes = MisJson$nodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Group = "group", opacity = 0.4, bounded = TRUE)
#'
#' # Create graph with node text faintly visible when no hovering
#' forceNetwork(Links = MisJson$links, Nodes = MisJson$nodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Group = "group", opacity = 0.4, bounded = TRUE,
#' opacityNoHover = TRUE)
#'
#' ## Specify colours for specific edges
#' # Find links to Valjean (11)
#' which(MisNodes == "Valjean", arr = TRUE)[1] - 1
#' ValjeanInds = which(MisLinks == 11, arr = TRUE)[, 1]
#'
#' # Create a colour vector
#' ValjeanCols = ifelse(1:nrow(MisLinks) %in% ValjeanInds, "#bf3eff", "#666")
#'
#' forceNetwork(Links = MisLinks, Nodes = MisNodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Group = "group", opacity = 0.8, linkColour = ValjeanCols)
#'
#'
#' ## Create graph with alert pop-up when a node is clicked. You're
#' # unlikely to want to do exactly this, but you might use
#' # Shiny.onInputChange() to allocate d.XXX to an element of input
#' # for use in a Shiny app.
#'
#' MyClickScript <- 'alert("You clicked " + d.name + " which is in row " +
#' (d.index + 1) + " of your original R data frame");'
#'
#' forceNetwork(Links = MisLinks, Nodes = MisNodes, Source = "source",
#' Target = "target", Value = "value", NodeID = "name",
#' Group = "group", opacity = 1, zoom = FALSE,
#' bounded = TRUE, clickAction = MyClickScript)
#' }
#'
#' @source
#' D3.js was created by Michael Bostock. See \url{http://d3js.org/} and, more
#' specifically for force directed networks
#' \url{https://github.com/mbostock/d3/wiki/Force-Layout}.
#' @seealso \code{\link{JS}}.
#'
#' @export
forceNetwork <- function(Links,
Nodes,
Source,
Target,
Value,
NodeID,
Nodesize,
Group,
height = NULL,
width = NULL,
colourScale = JS("d3.scale.category20()"),
fontSize = 7,
fontFamily = "serif",
linkDistance = 50,
linkWidth = JS("function(d) { return Math.sqrt(d.value); }"),
radiusCalculation = JS(" Math.sqrt(d.nodesize)+6"),
charge = -120,
linkColour = "#666",
opacity = 0.6,
zoom = FALSE,
legend = FALSE,
bounded = FALSE,
opacityNoHover = 0,
clickAction = NULL)
{
# Check if data is zero indexed
check_zero(Links[, Source], Links[, Target])
# If tbl_df convert to plain data.frame
Links <- tbl_df_strip(Links)
Nodes <- tbl_df_strip(Nodes)
# Hack for UI consistency. Think of improving.
colourScale <- as.character(colourScale)
linkWidth <- as.character(linkWidth)
radiusCalculation <- as.character(radiusCalculation)
# Subset data frames for network graph
if (!is.data.frame(Links)) {
stop("Links must be a data frame class object.")
}
if (!is.data.frame(Nodes)) {
stop("Nodes must be a data frame class object.")
}
if (missing(Value)) {
LinksDF <- data.frame(Links[, Source], Links[, Target])
names(LinksDF) <- c("source", "target")
}
else if (!missing(Value)) {
LinksDF <- data.frame(Links[, Source], Links[, Target], Links[, Value])
names(LinksDF) <- c("source", "target", "value")
}
if (!missing(Nodesize)){
NodesDF <- data.frame(Nodes[, NodeID], Nodes[, Group], Nodes[, Nodesize])
names(NodesDF) <- c("name", "group", "nodesize")
nodesize = TRUE
} else {
NodesDF <- data.frame(Nodes[, NodeID], Nodes[, Group])
names(NodesDF) <- c("name", "group")
nodesize = FALSE
}
LinksDF <- data.frame(LinksDF, colour = linkColour)
LinksDF$colour = as.character(LinksDF$colour)
# create options
options = list(
NodeID = NodeID,
Group = Group,
colourScale = colourScale,
fontSize = fontSize,
fontFamily = fontFamily,
clickTextSize = fontSize * 2.5,
linkDistance = linkDistance,
linkWidth = linkWidth,
charge = charge,
# linkColour = linkColour,
opacity = opacity,
zoom = zoom,
legend = legend,
nodesize = nodesize,
radiusCalculation = radiusCalculation,
bounded = bounded,
opacityNoHover = opacityNoHover,
clickAction = clickAction
)
# create widget
htmlwidgets::createWidget(
name = "forceNetwork",
x = list(links = LinksDF, nodes = NodesDF, options = options),
width = width,
height = height,
htmlwidgets::sizingPolicy(padding = 10, browser.fill = TRUE),
package = "networkD3"
)
}
#' @rdname networkD3-shiny
#' @export
forceNetworkOutput <- function(outputId, width = "100%", height = "500px") {
shinyWidgetOutput(outputId, "forceNetwork", width, height,
package = "networkD3")
}
#' @rdname networkD3-shiny
#' @export
renderForceNetwork <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
shinyRenderWidget(expr, forceNetworkOutput, env, quoted = TRUE)
}
|
path = 'http://www.cores.es/sites/default/files/archivos/estadisticas/consumos-pp.xlsx'
get_core <- function(path, hoja){
require(zoo)
require(lubridate)
require(data.table)
require(readxl)
temp <- tempfile()
download.file(path, temp, mode = 'wb')
data <- data.table(read_xlsx(temp, sheet = hoja))
m = which(data[[1]]=='Año')
dt = data[!1:m]
colnames(dt) = as.character(unlist(data[m,]))
dt <- dt[!is.na(dt[[1]]) & dt[[2]] != 'total']
dt$Año <- as.yearmon(dym(paste0(dt[[1]],'-', dt[[2]])))
dt$Mes = NULL
dt = as.data.table(dt)
dt[, names(dt)[-1] := lapply(.SD, function(x){as.numeric(as.character(x))}),
.SD = names(dt)[-1]]
return(dt)
}
dt <- get_core(path, 'Gasolinas')
| /Core.R | no_license | puigjos/GetData | R | false | false | 752 | r |
path = 'http://www.cores.es/sites/default/files/archivos/estadisticas/consumos-pp.xlsx'
get_core <- function(path, hoja){
require(zoo)
require(lubridate)
require(data.table)
require(readxl)
temp <- tempfile()
download.file(path, temp, mode = 'wb')
data <- data.table(read_xlsx(temp, sheet = hoja))
m = which(data[[1]]=='Año')
dt = data[!1:m]
colnames(dt) = as.character(unlist(data[m,]))
dt <- dt[!is.na(dt[[1]]) & dt[[2]] != 'total']
dt$Año <- as.yearmon(dym(paste0(dt[[1]],'-', dt[[2]])))
dt$Mes = NULL
dt = as.data.table(dt)
dt[, names(dt)[-1] := lapply(.SD, function(x){as.numeric(as.character(x))}),
.SD = names(dt)[-1]]
return(dt)
}
dt <- get_core(path, 'Gasolinas')
|
library(shiny)
library(dplyr)
library(shinyWidgets)
library(shinythemes)
library(shinyjs)
library(shinyalert)
library(plotly)
setwd('/Users/gather3/Documents/Sanitation-Hub')
source('ui_modules.R')
setwd('/Users/gather3/Documents/General Coding/Database')
ui <- tagList(
useShinyalert(),
tags$head(tags$script(type="text/javascript", src = "code.js")),
navbarPage(title = 'Sanitation Hub - Data Colection', id = 'nav', theme = 'style.css',
tabPanel(title = 'Data colection', id = 'colection', value = 0,
fluidRow(
column(4, offset = 4,
'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
br(),
br(),
fileInput('csv', width = '100%',
label = 'Choose the file to update',
buttonLabel = 'Browse',
accept = c("text/csv", ".csv", ".xls", ".xlsx"),
placeholder = 'No file selected'),
"Case you want to see our current database, click on the button below",
br(),
br(),
fluidRow(
column( width = 4 , offset = 3, actionButton('loadDB', label = 'Load Database', icon = icon("upload"), width = '200px'))
),
br(),
br(),
"If your file follows all the instructions above, you file may be available to upload on the Gather's Sanitation Hub! Just press the button and the system will make all the verifications to
send you data",
br(),
br(),
fluidRow(
column(width = 4, offset = 3, actionButton('Submit',"Send to the database", icon("database"), width = '200px'))
)
)
)
),
tabPanel(title = 'Data overview', id = 'overview', value = 1,
textOutput("data_structure"),
br(),
textOutput("numeric_title"),
br(),
br(),
fluidRow(
column(offset = 3, width = 6, tableOutput("data_stats"))
),
br(),
br(),
textOutput("categorical_title"),
br(),
br(),
fluidRow(
column(offset = 3, width = 6, tableOutput("cat_stats"))
)
),
tabPanel(title = 'Data analysis', id = 'analysis', value = 2,
tabsetPanel(id = 'analysis-selection', type = 'tabs',
tabPanel('Type of Toilets', plotOutput('tot', height = '600px'),
br(),
br(),
plotlyOutput('tot2', height = '600px')),
tabPanel('Fill Level', leafletOutput('Fill', height = '600px')),
tabPanel('Maps Visualisations', leafletOutput('Map')))
)
)
) | /Legacy/ui.R | no_license | blueriver212/Sanition_hub | R | false | false | 4,222 | r | library(shiny)
library(dplyr)
library(shinyWidgets)
library(shinythemes)
library(shinyjs)
library(shinyalert)
library(plotly)
setwd('/Users/gather3/Documents/Sanitation-Hub')
source('ui_modules.R')
setwd('/Users/gather3/Documents/General Coding/Database')
ui <- tagList(
useShinyalert(),
tags$head(tags$script(type="text/javascript", src = "code.js")),
navbarPage(title = 'Sanitation Hub - Data Colection', id = 'nav', theme = 'style.css',
tabPanel(title = 'Data colection', id = 'colection', value = 0,
fluidRow(
column(4, offset = 4,
'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
br(),
br(),
fileInput('csv', width = '100%',
label = 'Choose the file to update',
buttonLabel = 'Browse',
accept = c("text/csv", ".csv", ".xls", ".xlsx"),
placeholder = 'No file selected'),
"Case you want to see our current database, click on the button below",
br(),
br(),
fluidRow(
column( width = 4 , offset = 3, actionButton('loadDB', label = 'Load Database', icon = icon("upload"), width = '200px'))
),
br(),
br(),
"If your file follows all the instructions above, you file may be available to upload on the Gather's Sanitation Hub! Just press the button and the system will make all the verifications to
send you data",
br(),
br(),
fluidRow(
column(width = 4, offset = 3, actionButton('Submit',"Send to the database", icon("database"), width = '200px'))
)
)
)
),
tabPanel(title = 'Data overview', id = 'overview', value = 1,
textOutput("data_structure"),
br(),
textOutput("numeric_title"),
br(),
br(),
fluidRow(
column(offset = 3, width = 6, tableOutput("data_stats"))
),
br(),
br(),
textOutput("categorical_title"),
br(),
br(),
fluidRow(
column(offset = 3, width = 6, tableOutput("cat_stats"))
)
),
tabPanel(title = 'Data analysis', id = 'analysis', value = 2,
tabsetPanel(id = 'analysis-selection', type = 'tabs',
tabPanel('Type of Toilets', plotOutput('tot', height = '600px'),
br(),
br(),
plotlyOutput('tot2', height = '600px')),
tabPanel('Fill Level', leafletOutput('Fill', height = '600px')),
tabPanel('Maps Visualisations', leafletOutput('Map')))
)
)
) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree_fix_brlen.R
\name{tree_fix_brlen}
\alias{tree_fix_brlen}
\title{Takes a tree and fixes negative or zero length branches in several ways}
\usage{
tree_fix_brlen(tree = NULL, fixing_criterion = "negative",
fixing_method = 0, ultrametric = TRUE)
}
\arguments{
\item{tree}{A tree either as a newick character string or as a phylo object}
\item{fixing_criterion}{A character vector specifying the type of branch length to be fixed: "negative" or "zero"}
\item{fixing_method}{A character vector specifying the method to fix branch lengths: "bladj", "mrbayes" or a number to be assigned to all branches meeting fixing_criterion}
\item{ultrametric}{Boolean indicating whether to force ultrametric or not.}
}
\value{
A phylo object with fixed branch lengths
}
\description{
Takes a tree and fixes negative or zero length branches in several ways
}
| /man/tree_fix_brlen.Rd | no_license | KlausVigo/datelife | R | false | true | 927 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree_fix_brlen.R
\name{tree_fix_brlen}
\alias{tree_fix_brlen}
\title{Takes a tree and fixes negative or zero length branches in several ways}
\usage{
tree_fix_brlen(tree = NULL, fixing_criterion = "negative",
fixing_method = 0, ultrametric = TRUE)
}
\arguments{
\item{tree}{A tree either as a newick character string or as a phylo object}
\item{fixing_criterion}{A character vector specifying the type of branch length to be fixed: "negative" or "zero"}
\item{fixing_method}{A character vector specifying the method to fix branch lengths: "bladj", "mrbayes" or a number to be assigned to all branches meeting fixing_criterion}
\item{ultrametric}{Boolean indicating whether to force ultrametric or not.}
}
\value{
A phylo object with fixed branch lengths
}
\description{
Takes a tree and fixes negative or zero length branches in several ways
}
|
# directories
root_dir <- rprojroot::find_root(rprojroot::has_dir(".git"))
data_dir <- file.path(root_dir, "data")
module_dir <- file.path(root_dir, "code", "drug_synergy")
output_dir <- file.path(patient_dir, "output", "drug_synergy")
dir.create(output_dir, showWarnings = F, recursive = T)
# references
chembldb_path <- file.path(data_dir, "chembl", "chembl_29_sqlite", "chembl_29.db")
# inputs from patient's cemitools directory
cemitools_dir <- file.path(patient_dir, "output", "drug_recommendations", "CEMiTools")
interaction <- file.path(cemitools_dir, "interactions.tsv")
enrichment_nes <- file.path(cemitools_dir, "enrichment_nes.tsv")
# inputs from patient's coseq directory
coseq_dir <- file.path(patient_dir, "output", "coseq_detect", "pediatric")
cluster <- file.path(coseq_dir, "cancer_group_of_interest_nb_cluster_assigned.tsv")
# inputs from patient's output directory (lincs connectivity analysis output)
normal_qSig <- file.path(patient_dir, "output", "drug_recommendations", "patient_vs_normals_qSig_output.txt")
pediatric_qSig <- file.path(patient_dir, "output", "drug_recommendations", "patient_vs_pediatric_qSig_output.txt")
adult_qSig <- file.path(patient_dir, "output", "drug_recommendations", "patient_vs_adult_qSig_output.txt")
# subnetwork file and drug mapped subnetwork qSig file
subnetwork <- file.path(output_dir, "subnetwork_genes.tsv")
subnetwork_mapped <- file.path(output_dir, "subnetwork_gene_drug_map.tsv")
normal_subnet_qSig_mapped <- file.path(output_dir, "patient_vs_normal_qSig_subnetwork_drug_gene_map.tsv")
pediatric_subnet_qSig_mapped <- file.path(output_dir, "patient_vs_pediatric_qSig_subnetwork_drug_gene_map.tsv")
adult_subnet_qSig_mapped <- file.path(output_dir, "patient_vs_adult_qSig_subnetwork_drug_gene_map.tsv")
# synergy score for all comparisons
output_normal <- file.path(output_dir, "patient_vs_normal_qSig_synergy_score.tsv")
output_pediatric <- file.path(output_dir, "patient_vs_pediatric_qSig_synergy_score.tsv")
output_adult <- file.path(output_dir, "patient_vs_adult_qSig_synergy_score.tsv")
output_combined <- file.path(output_dir, "combined_qSig_synergy_score.tsv")
combined_plot_file <- file.path(output_dir, "combined_qSig_synergy_score_top10.pdf")
# patient of interest
patient_of_interest <- sample_info %>%
filter(experimental_strategy == "RNA-Seq") %>%
pull(Kids_First_Biospecimen_ID)
# run script to obtain drugs that are associated with all the genes in the subnetwork
# this script will only generate outputs if there are any positively correlated modules identified via CEMiTool
subnetwork_qSig_gene_drug_map <- file.path(module_dir, "01-subnetwork_qSig_gene_drug_map.R")
cmd <- paste('Rscript', subnetwork_qSig_gene_drug_map,
'--interaction', interaction,
'--enrichment_nes', enrichment_nes,
'--cluster', cluster,
'--patient_of_interest', patient_of_interest,
'--chemblDb_path', chembldb_path,
'--normal_qSig', normal_qSig,
'--pediatric_qSig', pediatric_qSig,
'--adult_qSig', adult_qSig,
'--subnetwork', subnetwork,
'--subnetwork_mapped', subnetwork_mapped,
'--normal_mapped', normal_subnet_qSig_mapped,
'--pediatric_mapped', pediatric_subnet_qSig_mapped,
'--adult_mapped', adult_subnet_qSig_mapped)
system(cmd)
# run script to obtain drugs that are both in qSig and subnetwork
# only run if subnetwork output file is generated using above script
if(file.exists(subnetwork)){
drug_synergy_score_calc <- file.path(module_dir, "02-drug_synergy_score_calc.R")
cmd <- paste('Rscript', drug_synergy_score_calc,
'--subnetwork', subnetwork,
'--subnetwork_mapped', subnetwork_mapped,
'--normal_mapped', normal_subnet_qSig_mapped,
'--pediatric_mapped', pediatric_subnet_qSig_mapped,
'--adult_mapped', adult_subnet_qSig_mapped,
'--output_normal', output_normal,
'--output_pediatric', output_pediatric,
'--output_adult', output_adult,
'--output_combined', output_combined)
system(cmd)
}
# run script to create bubble plots from output of 02-drug_synergy_score_calc.R
# only run if combined output is generated by above script
if(file.exists(output_combined)){
create_bubble_plot <- file.path(module_dir, "03-create_bubble_plot.R")
cmd <- paste('Rscript', create_bubble_plot,
'--combined_synergy', output_combined,
'--output_file', combined_plot_file)
system(cmd)
}
| /code/drug_synergy/run_synergy.R | no_license | d3b-center/OMPARE | R | false | false | 4,592 | r | # directories
root_dir <- rprojroot::find_root(rprojroot::has_dir(".git"))
data_dir <- file.path(root_dir, "data")
module_dir <- file.path(root_dir, "code", "drug_synergy")
output_dir <- file.path(patient_dir, "output", "drug_synergy")
dir.create(output_dir, showWarnings = F, recursive = T)
# references
chembldb_path <- file.path(data_dir, "chembl", "chembl_29_sqlite", "chembl_29.db")
# inputs from patient's cemitools directory
cemitools_dir <- file.path(patient_dir, "output", "drug_recommendations", "CEMiTools")
interaction <- file.path(cemitools_dir, "interactions.tsv")
enrichment_nes <- file.path(cemitools_dir, "enrichment_nes.tsv")
# inputs from patient's coseq directory
coseq_dir <- file.path(patient_dir, "output", "coseq_detect", "pediatric")
cluster <- file.path(coseq_dir, "cancer_group_of_interest_nb_cluster_assigned.tsv")
# inputs from patient's output directory (lincs connectivity analysis output)
normal_qSig <- file.path(patient_dir, "output", "drug_recommendations", "patient_vs_normals_qSig_output.txt")
pediatric_qSig <- file.path(patient_dir, "output", "drug_recommendations", "patient_vs_pediatric_qSig_output.txt")
adult_qSig <- file.path(patient_dir, "output", "drug_recommendations", "patient_vs_adult_qSig_output.txt")
# subnetwork file and drug mapped subnetwork qSig file
subnetwork <- file.path(output_dir, "subnetwork_genes.tsv")
subnetwork_mapped <- file.path(output_dir, "subnetwork_gene_drug_map.tsv")
normal_subnet_qSig_mapped <- file.path(output_dir, "patient_vs_normal_qSig_subnetwork_drug_gene_map.tsv")
pediatric_subnet_qSig_mapped <- file.path(output_dir, "patient_vs_pediatric_qSig_subnetwork_drug_gene_map.tsv")
adult_subnet_qSig_mapped <- file.path(output_dir, "patient_vs_adult_qSig_subnetwork_drug_gene_map.tsv")
# synergy score for all comparisons
output_normal <- file.path(output_dir, "patient_vs_normal_qSig_synergy_score.tsv")
output_pediatric <- file.path(output_dir, "patient_vs_pediatric_qSig_synergy_score.tsv")
output_adult <- file.path(output_dir, "patient_vs_adult_qSig_synergy_score.tsv")
output_combined <- file.path(output_dir, "combined_qSig_synergy_score.tsv")
combined_plot_file <- file.path(output_dir, "combined_qSig_synergy_score_top10.pdf")
# patient of interest
patient_of_interest <- sample_info %>%
filter(experimental_strategy == "RNA-Seq") %>%
pull(Kids_First_Biospecimen_ID)
# run script to obtain drugs that are associated with all the genes in the subnetwork
# this script will only generate outputs if there are any positively correlated modules identified via CEMiTool
subnetwork_qSig_gene_drug_map <- file.path(module_dir, "01-subnetwork_qSig_gene_drug_map.R")
cmd <- paste('Rscript', subnetwork_qSig_gene_drug_map,
'--interaction', interaction,
'--enrichment_nes', enrichment_nes,
'--cluster', cluster,
'--patient_of_interest', patient_of_interest,
'--chemblDb_path', chembldb_path,
'--normal_qSig', normal_qSig,
'--pediatric_qSig', pediatric_qSig,
'--adult_qSig', adult_qSig,
'--subnetwork', subnetwork,
'--subnetwork_mapped', subnetwork_mapped,
'--normal_mapped', normal_subnet_qSig_mapped,
'--pediatric_mapped', pediatric_subnet_qSig_mapped,
'--adult_mapped', adult_subnet_qSig_mapped)
system(cmd)
# run script to obtain drugs that are both in qSig and subnetwork
# only run if subnetwork output file is generated using above script
if(file.exists(subnetwork)){
drug_synergy_score_calc <- file.path(module_dir, "02-drug_synergy_score_calc.R")
cmd <- paste('Rscript', drug_synergy_score_calc,
'--subnetwork', subnetwork,
'--subnetwork_mapped', subnetwork_mapped,
'--normal_mapped', normal_subnet_qSig_mapped,
'--pediatric_mapped', pediatric_subnet_qSig_mapped,
'--adult_mapped', adult_subnet_qSig_mapped,
'--output_normal', output_normal,
'--output_pediatric', output_pediatric,
'--output_adult', output_adult,
'--output_combined', output_combined)
system(cmd)
}
# run script to create bubble plots from output of 02-drug_synergy_score_calc.R
# only run if combined output is generated by above script
if(file.exists(output_combined)){
create_bubble_plot <- file.path(module_dir, "03-create_bubble_plot.R")
cmd <- paste('Rscript', create_bubble_plot,
'--combined_synergy', output_combined,
'--output_file', combined_plot_file)
system(cmd)
}
|
# Coursera Getting and cleaning data class
# Filename: run_analysis.R
# Author: Tejash Panchal
#
# You should create one R script called run_analysis.R that does the following.
# 1.Merges the training and the test sets to create one data set.
# 2.Extracts only the measurements on the mean and standard deviation for each measurement.
# 3.Uses descriptive activity names to name the activities in the data set
# 4.Appropriately labels the data set with descriptive variable names.
#
# 5.From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
#
library(plyr)
# Assign test and train dataset directory paths
data_dir <- "UCI\ HAR\ Dataset" # add \ (slash) for space to be included
# mean and standard deviation resides in feature.txt file
feature_file <- paste(data_dir, "/features.txt", sep = "")
# Assign activity label file path
activity_labels_file <- paste(data_dir, "/activity_labels.txt", sep = "")
# Assign all training files paths
x_train_file <- paste(data_dir, "/train/X_train.txt", sep = "")
y_train_file <- paste(data_dir, "/train/y_train.txt", sep = "")
subject_train_file <- paste(data_dir, "/train/subject_train.txt", sep = "")
# Assign all test files paths
x_test_file <- paste(data_dir, "/test/X_test.txt", sep = "")
y_test_file <- paste(data_dir, "/test/y_test.txt", sep = "")
subject_test_file <- paste(data_dir, "/test/subject_test.txt", sep = "")
# Let's load all train data
x_train <- read.table(x_train_file)
y_train <- read.table(y_train_file)
subject_train <- read.table(subject_train_file)
# Let's load all test data
x_test <- read.table(x_test_file)
y_test <- read.table(y_test_file)
subject_test <- read.table(subject_test_file)
# Let's load features and labels
features <- read.table(feature_file, colClasses = c("character"))
activity_labels <- read.table(activity_labels_file, col.names = c("ActivityId", "Activity"))
##################################################################
# Part 1.Merges the training and the test sets to create one data set.
##################################################################
#Bind train data
trainData <- cbind(cbind(x_train, subject_train), y_train)
#Bind test data
testData <- cbind(cbind(x_test, subject_test), y_test)
#Combine train and test data
Data <- rbind(trainData, testData)
#Assign labels to each column
colLabels <- rbind(rbind(features, c(562, "Subject")), c(563, "ActivityId"))[,2]
names(Data) <- colLabels
# Generate .csv file and output
write.csv(Data, file = "tidydata.csv", row.names=TRUE)
############################################################################################
# Part 2. Extracts only the measurements on the mean and standard deviation for each measurement.
############################################################################################
# Extract mean, std from data frame. We also need Subject and ActivityId columns.
Data <- Data[,grepl("mean|std|Subject|ActivityId", names(Data))]
###########################################################################
# Part 3. Uses descriptive activity names to name the activities in the data set
###########################################################################
Data <- join(Data, activity_labels, by = "ActivityId", match = "first")
Data <- Data[,-1]
##############################################################
# Part 4. Appropriately labels the data set with descriptive names.
##############################################################
# Remove parentheses
names(Data) <- gsub('\\(|\\)',"",names(Data), perl = TRUE)
names(Data) <- make.names(names(Data))
names(Data) <- gsub('^t',"Time",names(Data))
names(Data) <- gsub('^f',"Frequency",names(Data))
names(Data) <- gsub('Acc',"Accelerometer",names(Data))
names(Data) <- gsub('GyroJerk',"AngularAcceleration",names(Data))
names(Data) <- gsub('Gyro',"AngularSpeed",names(Data))
names(Data) <- gsub('Mag',"Magnitude",names(Data))
names(Data) <- gsub('\\.mean',".Mean",names(Data))
names(Data) <- gsub('\\.std',".StandardDeviation",names(Data))
names(Data) <- gsub('Freq\\.',"Frequency.",names(Data))
names(Data) <- gsub('Freq$',"Frequency",names(Data))
names(Data) <- gsub("BodyBody", "Body", names(Data))
######################################################################################################################
# Part 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
######################################################################################################################
Data_avg_var_sub = ddply(Data, c("Subject","Activity"), numcolwise(mean))
# Generate .txt file and output
# write.csv(Data_avg_var_sub, file = "Data_avg_var_sub.csv", row.names=TRUE)
write.table(Data_avg_var_sub, file = "Data_avg_var_sub.txt", row.name=FALSE)
| /run_analysis.R | no_license | tpanchal68/Getting-and-cleaning-data-course-project | R | false | false | 4,898 | r | # Coursera Getting and cleaning data class
# Filename: run_analysis.R
# Author: Tejash Panchal
#
# You should create one R script called run_analysis.R that does the following.
# 1.Merges the training and the test sets to create one data set.
# 2.Extracts only the measurements on the mean and standard deviation for each measurement.
# 3.Uses descriptive activity names to name the activities in the data set
# 4.Appropriately labels the data set with descriptive variable names.
#
# 5.From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
#
library(plyr)
# Assign test and train dataset directory paths
data_dir <- "UCI\ HAR\ Dataset" # add \ (slash) for space to be included
# mean and standard deviation resides in feature.txt file
feature_file <- paste(data_dir, "/features.txt", sep = "")
# Assign activity label file path
activity_labels_file <- paste(data_dir, "/activity_labels.txt", sep = "")
# Assign all training files paths
x_train_file <- paste(data_dir, "/train/X_train.txt", sep = "")
y_train_file <- paste(data_dir, "/train/y_train.txt", sep = "")
subject_train_file <- paste(data_dir, "/train/subject_train.txt", sep = "")
# Assign all test files paths
x_test_file <- paste(data_dir, "/test/X_test.txt", sep = "")
y_test_file <- paste(data_dir, "/test/y_test.txt", sep = "")
subject_test_file <- paste(data_dir, "/test/subject_test.txt", sep = "")
# Let's load all train data
x_train <- read.table(x_train_file)
y_train <- read.table(y_train_file)
subject_train <- read.table(subject_train_file)
# Let's load all test data
x_test <- read.table(x_test_file)
y_test <- read.table(y_test_file)
subject_test <- read.table(subject_test_file)
# Let's load features and labels
features <- read.table(feature_file, colClasses = c("character"))
activity_labels <- read.table(activity_labels_file, col.names = c("ActivityId", "Activity"))
##################################################################
# Part 1.Merges the training and the test sets to create one data set.
##################################################################
#Bind train data
trainData <- cbind(cbind(x_train, subject_train), y_train)
#Bind test data
testData <- cbind(cbind(x_test, subject_test), y_test)
#Combine train and test data
Data <- rbind(trainData, testData)
#Assign labels to each column
colLabels <- rbind(rbind(features, c(562, "Subject")), c(563, "ActivityId"))[,2]
names(Data) <- colLabels
# Generate .csv file and output
write.csv(Data, file = "tidydata.csv", row.names=TRUE)
############################################################################################
# Part 2. Extracts only the measurements on the mean and standard deviation for each measurement.
############################################################################################
# Extract mean, std from data frame. We also need Subject and ActivityId columns.
Data <- Data[,grepl("mean|std|Subject|ActivityId", names(Data))]
###########################################################################
# Part 3. Uses descriptive activity names to name the activities in the data set
###########################################################################
Data <- join(Data, activity_labels, by = "ActivityId", match = "first")
Data <- Data[,-1]
##############################################################
# Part 4. Appropriately labels the data set with descriptive names.
##############################################################
# Remove parentheses
names(Data) <- gsub('\\(|\\)',"",names(Data), perl = TRUE)
names(Data) <- make.names(names(Data))
names(Data) <- gsub('^t',"Time",names(Data))
names(Data) <- gsub('^f',"Frequency",names(Data))
names(Data) <- gsub('Acc',"Accelerometer",names(Data))
names(Data) <- gsub('GyroJerk',"AngularAcceleration",names(Data))
names(Data) <- gsub('Gyro',"AngularSpeed",names(Data))
names(Data) <- gsub('Mag',"Magnitude",names(Data))
names(Data) <- gsub('\\.mean',".Mean",names(Data))
names(Data) <- gsub('\\.std',".StandardDeviation",names(Data))
names(Data) <- gsub('Freq\\.',"Frequency.",names(Data))
names(Data) <- gsub('Freq$',"Frequency",names(Data))
names(Data) <- gsub("BodyBody", "Body", names(Data))
######################################################################################################################
# Part 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
######################################################################################################################
Data_avg_var_sub = ddply(Data, c("Subject","Activity"), numcolwise(mean))
# Generate .txt file and output
# write.csv(Data_avg_var_sub, file = "Data_avg_var_sub.csv", row.names=TRUE)
write.table(Data_avg_var_sub, file = "Data_avg_var_sub.txt", row.name=FALSE)
|
#' Barplot UI
#'
#' @param id Module ID
#' @param title A string
#' @param barplot_html A string that is HTML
#' @param ... Arguments passed to drilldown_scatterplot_ui
#'
#' @export
barplot_ui <- function(
id,
title = "",
barplot_html = htmltools::includeMarkdown(get_markdown_path("barchart1")),
...
){
ns <- shiny::NS(id)
shiny::tagList(
titleBox(title),
messageBox(
width = 12,
barplot_html
),
shiny::conditionalPanel(
condition = "output.display_feature_class_selection_ui",
ns = ns,
shiny::fluidRow(
optionsBox(
width = 12,
shiny::column(
width = 12,
shiny::uiOutput(ns("feature_class_selection_ui"))
)
)
)
),
shiny::fluidRow(
plotBox(
width = 12,
"barplot" %>%
ns() %>%
plotly::plotlyOutput(.) %>%
shinycssloaders::withSpinner(.),
plotly_ui(ns("barplot"))
)
),
shiny::conditionalPanel(
condition = "output.display_drilldown_ui",
ns = ns,
drilldown_scatterplot_ui(ns("scatterplot"), ...)
)
)
}
| /R/barplot_ui.R | permissive | Gibbsdavidl/iatlas.modules | R | false | false | 1,145 | r |
#' Barplot UI
#'
#' @param id Module ID
#' @param title A string
#' @param barplot_html A string that is HTML
#' @param ... Arguments passed to drilldown_scatterplot_ui
#'
#' @export
barplot_ui <- function(
id,
title = "",
barplot_html = htmltools::includeMarkdown(get_markdown_path("barchart1")),
...
){
ns <- shiny::NS(id)
shiny::tagList(
titleBox(title),
messageBox(
width = 12,
barplot_html
),
shiny::conditionalPanel(
condition = "output.display_feature_class_selection_ui",
ns = ns,
shiny::fluidRow(
optionsBox(
width = 12,
shiny::column(
width = 12,
shiny::uiOutput(ns("feature_class_selection_ui"))
)
)
)
),
shiny::fluidRow(
plotBox(
width = 12,
"barplot" %>%
ns() %>%
plotly::plotlyOutput(.) %>%
shinycssloaders::withSpinner(.),
plotly_ui(ns("barplot"))
)
),
shiny::conditionalPanel(
condition = "output.display_drilldown_ui",
ns = ns,
drilldown_scatterplot_ui(ns("scatterplot"), ...)
)
)
}
|
## ----setup, include=FALSE, cache=FALSE--------------------------------------------------
# set global chunk options
library(knitr)
opts_chunk$set(fig.path='figure/minimal-', fig.align='center', fig.show='hold',dev='pdf',
warning=FALSE,dev.args=list(family="Palatino"), tidy.opts=list(blank=FALSE, width.cutoff=60))
options(formatR.arrow=TRUE,width=90)
#render_listings()
## ----loadnCal, include=FALSE, cache=FALSE, tidy=TRUE, echo=TRUE-------------------------
library(nCal)
## ----Example1data, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE----------------------
set.seed(1)
log.conc=log(1e4)-log(3)*9:0
n.replicate=2
fi=simulate1curve (p.eotaxin[1,], rep(log.conc,each=n.replicate), sd.e=0.2)
dat.std=data.frame(fi, expected_conc=exp(rep(log.conc,each=n.replicate)),
analyte="Test", assay_id="Run 1", sample_id=NA, well_role="Standard",
dilution=rep(3**(9:0), each=n.replicate))
# add unknown
dat.unk=rbind(
data.frame(fi=exp(6.75), expected_conc=NA, analyte="Test", assay_id="Run 1", sample_id=1, well_role="Unknown", dilution=1)
, data.frame(fi=exp(6.70), expected_conc=NA, analyte="Test", assay_id="Run 1", sample_id=2, well_role="Unknown", dilution=1)
, data.frame(fi=exp(3), expected_conc=NA, analyte="Test", assay_id="Run 1", sample_id=3, well_role="Unknown", dilution=1)
, data.frame(fi=exp(4.4), expected_conc=NA, analyte="Test", assay_id="Run 1", sample_id=4, well_role="Unknown", dilution=10)
)
dat=rbind(dat.std, dat.unk)
## ----Example1drm, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE, eval=TRUE, fig.width=8, fig.height=8.5, fig.cap="ncal graphical output, drm fit."----
res.drm = ncal(log(fi)~expected_conc, dat, return.fits = TRUE)
## ----Example1drmres, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE, eval=TRUE, fig.width=8, fig.height=8.5, fig.cap="ncal graphical output, drm fit."----
res.drm
## ----Example1resultsfit, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE----------------
fit.drm=attr(res.drm, "fits")[[1]]
## ----Example1bcrm, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE, eval=TRUE, fig.width=8, fig.height=8.5, fig.cap="ncal graphical output, bcrm fit."----
res.bcrm = ncal(log(fi)~expected_conc, dat, bcrm.fit=T, return.fits = TRUE, bcrm.model="norm", control.jags=list(n.iter=5e3))
fit.bcrm=attr(res.bcrm, "fits")
## ----Example1bcrmres, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE, eval=TRUE--------
res.bcrm
## ----Example1results, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE-------------------
rbind(cla2gh(coef(fit.drm)), coef(fit.bcrm))
rbind(sqrt(diag(vcov(fit.drm))), sqrt(diag(vcov(fit.bcrm, type="classical"))))
## ----newunknown, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE------------------------
getConc(fit.bcrm, c(5.7,6.3))
## ----Example2bcrm, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE, eval=TRUE-----------
dat=subset(hier.model.ex.2, assay_id %in% paste("Run",1:4))
fit.bcrm=bcrm(log(fi)~expected_conc, dat, error.model="t4", prior="cytokine", n.iter=1e4)
## ----Example2, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE, eval=TRUE, fig.width=8, fig.height=8.5, fig.cap="Comparing bcrm fit with drm and Prism fits."----
# parameters from Prism fits
prism.1 = c("c"=1.596,"d"=10.28,"f"=0.7202,"b"=-0.8815,"e"=10^((1.597+1/0.8815*log10(2**(1/0.7202)-1))) )
prism.2 = c("c"=1.350,"d"=11.32,"f"=8.640e+010,"b"=-0.3452,"e"=10^((1.485+1/0.3452*log10(2**(1/8.640e+010)-1))) )
prism.3 = c("c"=1.333,"d"=10.23,"f"=0.7366,"b"=-0.8502,"e"=10^((1.526+1/0.8502*log10(2**(1/0.7366)-1))) )
prism.4 = c("c"=1.580,"d"=10.37,"f"=1.694,"b"=-0.6639,"e"=10^((1.530+1/0.6639*log10(2**(1/1.694)-1))) )
prism=rbind(prism.1,prism.2,prism.3,prism.4)
# start plotting
par(mfrow=c(2,2))
for (i in 1:4) {
assay.id=paste("Run", i)
# fit drm model
fit.drm = drm.fit(log(fi)~expected_conc, data=dat[dat$assay_id==assay.id,], robust="median")
plot(fit.drm, type="all", col="black", main=assay.id, lty=2)
plot(get.single.fit(fit.bcrm, assay.id), add=T, log="x", col=1)
# plot Prism fit
xx=exp(seq(log(0.51),log(1e4),length=100))
lines(xx, FivePL.x(xx,prism[i,]), type="l", lty=1, col="darkgray")
legend(x="bottomright",legend=c("Prism, robust","drm, median","bcrm, t4"),lty=c(1,2,1),col=c("darkgray",1,1),bty="n")
}
| /nCal/inst/doc/nCal-vignette.R | no_license | ingted/R-Examples | R | false | false | 4,309 | r | ## ----setup, include=FALSE, cache=FALSE--------------------------------------------------
# set global chunk options
library(knitr)
opts_chunk$set(fig.path='figure/minimal-', fig.align='center', fig.show='hold',dev='pdf',
warning=FALSE,dev.args=list(family="Palatino"), tidy.opts=list(blank=FALSE, width.cutoff=60))
options(formatR.arrow=TRUE,width=90)
#render_listings()
## ----loadnCal, include=FALSE, cache=FALSE, tidy=TRUE, echo=TRUE-------------------------
library(nCal)
## ----Example1data, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE----------------------
set.seed(1)
log.conc=log(1e4)-log(3)*9:0
n.replicate=2
fi=simulate1curve (p.eotaxin[1,], rep(log.conc,each=n.replicate), sd.e=0.2)
dat.std=data.frame(fi, expected_conc=exp(rep(log.conc,each=n.replicate)),
analyte="Test", assay_id="Run 1", sample_id=NA, well_role="Standard",
dilution=rep(3**(9:0), each=n.replicate))
# add unknown
dat.unk=rbind(
data.frame(fi=exp(6.75), expected_conc=NA, analyte="Test", assay_id="Run 1", sample_id=1, well_role="Unknown", dilution=1)
, data.frame(fi=exp(6.70), expected_conc=NA, analyte="Test", assay_id="Run 1", sample_id=2, well_role="Unknown", dilution=1)
, data.frame(fi=exp(3), expected_conc=NA, analyte="Test", assay_id="Run 1", sample_id=3, well_role="Unknown", dilution=1)
, data.frame(fi=exp(4.4), expected_conc=NA, analyte="Test", assay_id="Run 1", sample_id=4, well_role="Unknown", dilution=10)
)
dat=rbind(dat.std, dat.unk)
## ----Example1drm, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE, eval=TRUE, fig.width=8, fig.height=8.5, fig.cap="ncal graphical output, drm fit."----
res.drm = ncal(log(fi)~expected_conc, dat, return.fits = TRUE)
## ----Example1drmres, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE, eval=TRUE, fig.width=8, fig.height=8.5, fig.cap="ncal graphical output, drm fit."----
res.drm
## ----Example1resultsfit, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE----------------
fit.drm=attr(res.drm, "fits")[[1]]
## ----Example1bcrm, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE, eval=TRUE, fig.width=8, fig.height=8.5, fig.cap="ncal graphical output, bcrm fit."----
res.bcrm = ncal(log(fi)~expected_conc, dat, bcrm.fit=T, return.fits = TRUE, bcrm.model="norm", control.jags=list(n.iter=5e3))
fit.bcrm=attr(res.bcrm, "fits")
## ----Example1bcrmres, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE, eval=TRUE--------
res.bcrm
## ----Example1results, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE-------------------
rbind(cla2gh(coef(fit.drm)), coef(fit.bcrm))
rbind(sqrt(diag(vcov(fit.drm))), sqrt(diag(vcov(fit.bcrm, type="classical"))))
## ----newunknown, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE------------------------
getConc(fit.bcrm, c(5.7,6.3))
## ----Example2bcrm, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE, eval=TRUE-----------
dat=subset(hier.model.ex.2, assay_id %in% paste("Run",1:4))
fit.bcrm=bcrm(log(fi)~expected_conc, dat, error.model="t4", prior="cytokine", n.iter=1e4)
## ----Example2, include=TRUE, cache=FALSE, tidy=TRUE, echo=TRUE, eval=TRUE, fig.width=8, fig.height=8.5, fig.cap="Comparing bcrm fit with drm and Prism fits."----
# parameters from Prism fits
prism.1 = c("c"=1.596,"d"=10.28,"f"=0.7202,"b"=-0.8815,"e"=10^((1.597+1/0.8815*log10(2**(1/0.7202)-1))) )
prism.2 = c("c"=1.350,"d"=11.32,"f"=8.640e+010,"b"=-0.3452,"e"=10^((1.485+1/0.3452*log10(2**(1/8.640e+010)-1))) )
prism.3 = c("c"=1.333,"d"=10.23,"f"=0.7366,"b"=-0.8502,"e"=10^((1.526+1/0.8502*log10(2**(1/0.7366)-1))) )
prism.4 = c("c"=1.580,"d"=10.37,"f"=1.694,"b"=-0.6639,"e"=10^((1.530+1/0.6639*log10(2**(1/1.694)-1))) )
prism=rbind(prism.1,prism.2,prism.3,prism.4)
# start plotting
par(mfrow=c(2,2))
for (i in 1:4) {
assay.id=paste("Run", i)
# fit drm model
fit.drm = drm.fit(log(fi)~expected_conc, data=dat[dat$assay_id==assay.id,], robust="median")
plot(fit.drm, type="all", col="black", main=assay.id, lty=2)
plot(get.single.fit(fit.bcrm, assay.id), add=T, log="x", col=1)
# plot Prism fit
xx=exp(seq(log(0.51),log(1e4),length=100))
lines(xx, FivePL.x(xx,prism[i,]), type="l", lty=1, col="darkgray")
legend(x="bottomright",legend=c("Prism, robust","drm, median","bcrm, t4"),lty=c(1,2,1),col=c("darkgray",1,1),bty="n")
}
|
#Define matrix mymat by replicating the sequence 1:5 for 4 times and
#transforming into a matrix, sum over rows and columns.
samplemat<-matrix(rep(1:5,4),ncol=4)
apply(samplemat, 1, sum)
apply(samplemat, 2, sum)
| /Assignment_3.3.R | no_license | 123aruna/Assignment-3.3CORRECTED | R | false | false | 218 | r | #Define matrix mymat by replicating the sequence 1:5 for 4 times and
#transforming into a matrix, sum over rows and columns.
samplemat<-matrix(rep(1:5,4),ncol=4)
apply(samplemat, 1, sum)
apply(samplemat, 2, sum)
|
library(NISTunits)
### Name: NISTcubMeterPerSecTOcubYardPerMin
### Title: Convert cubic meter per second to cubic yard per minute
### Aliases: NISTcubMeterPerSecTOcubYardPerMin
### Keywords: programming
### ** Examples
NISTcubMeterPerSecTOcubYardPerMin(10)
| /data/genthat_extracted_code/NISTunits/examples/NISTcubMeterPerSecTOcubYardPerMin.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 264 | r | library(NISTunits)
### Name: NISTcubMeterPerSecTOcubYardPerMin
### Title: Convert cubic meter per second to cubic yard per minute
### Aliases: NISTcubMeterPerSecTOcubYardPerMin
### Keywords: programming
### ** Examples
NISTcubMeterPerSecTOcubYardPerMin(10)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validate_linelist.R
\name{validate_linelist}
\alias{validate_linelist}
\title{Checks the content of a linelist object}
\usage{
validate_linelist(x, allow_extra = FALSE, ref_types = tags_types())
}
\arguments{
\item{x}{a \code{linelist} object}
\item{allow_extra}{a \code{logical} indicating if additional data tags not
currently recognized by \code{linelist} should be allowed; if \code{FALSE}, unknown
tags will trigger an error}
\item{ref_types}{a \code{list} providing allowed types for all tags, as returned
by \code{\link[=tags_types]{tags_types()}}}
}
\value{
If checks pass, a \code{linelist} object; otherwise issues an error.
}
\description{
This function evaluates the validity of a \code{linelist} object by checking the
object class, its tags, and the types of the tagged variables. It combines
validations checks made by \code{\link[=validate_types]{validate_types()}} and \code{\link[=validate_tags]{validate_tags()}}. See
'Details' section for more information on the checks performed.
}
\details{
The following checks are performed:
\itemize{
\item \code{x} is a \code{linelist} object
\item \code{x} has a well-formed \code{tags} attribute
\item all default tags are present (even if \code{NULL})
\item all tagged variables correspond to existing columns
\item all tagged variables have an acceptable class
\item (optional) \code{x} has no extra tag beyond the default tags
}
}
\examples{
if (require(outbreaks) && require(dplyr) && require(magrittr)) {
## create a valid linelist
x <- measles_hagelloch_1861 \%>\%
tibble() \%>\%
make_linelist(
id = "case_ID",
date_onset = "date_of_prodrome",
age = "age",
gender = "gender"
)
x
## validation
validate_linelist(x)
## create an invalid linelist - onset date is a factor
x <- measles_hagelloch_1861 \%>\%
tibble() \%>\%
make_linelist(
id = "case_ID",
date_onset = "gender",
age = "age"
)
x
## the below issues an error
## note: tryCatch is only used to avoid a genuine error in the example
tryCatch(validate_linelist(x), error = paste)
}
}
\seealso{
\itemize{
\item \code{\link[=tags_types]{tags_types()}} to change allowed types
\item \code{\link[=validate_types]{validate_types()}} to check if tagged variables have the right classes
\item \code{\link[=validate_tags]{validate_tags()}} to perform a series of checks on the tags
}
}
\author{
Thibaut Jombart \email{thibaut@data.org}
}
| /man/validate_linelist.Rd | permissive | epiverse-trace/linelist | R | false | true | 2,524 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validate_linelist.R
\name{validate_linelist}
\alias{validate_linelist}
\title{Checks the content of a linelist object}
\usage{
validate_linelist(x, allow_extra = FALSE, ref_types = tags_types())
}
\arguments{
\item{x}{a \code{linelist} object}
\item{allow_extra}{a \code{logical} indicating if additional data tags not
currently recognized by \code{linelist} should be allowed; if \code{FALSE}, unknown
tags will trigger an error}
\item{ref_types}{a \code{list} providing allowed types for all tags, as returned
by \code{\link[=tags_types]{tags_types()}}}
}
\value{
If checks pass, a \code{linelist} object; otherwise issues an error.
}
\description{
This function evaluates the validity of a \code{linelist} object by checking the
object class, its tags, and the types of the tagged variables. It combines
validations checks made by \code{\link[=validate_types]{validate_types()}} and \code{\link[=validate_tags]{validate_tags()}}. See
'Details' section for more information on the checks performed.
}
\details{
The following checks are performed:
\itemize{
\item \code{x} is a \code{linelist} object
\item \code{x} has a well-formed \code{tags} attribute
\item all default tags are present (even if \code{NULL})
\item all tagged variables correspond to existing columns
\item all tagged variables have an acceptable class
\item (optional) \code{x} has no extra tag beyond the default tags
}
}
\examples{
if (require(outbreaks) && require(dplyr) && require(magrittr)) {
## create a valid linelist
x <- measles_hagelloch_1861 \%>\%
tibble() \%>\%
make_linelist(
id = "case_ID",
date_onset = "date_of_prodrome",
age = "age",
gender = "gender"
)
x
## validation
validate_linelist(x)
## create an invalid linelist - onset date is a factor
x <- measles_hagelloch_1861 \%>\%
tibble() \%>\%
make_linelist(
id = "case_ID",
date_onset = "gender",
age = "age"
)
x
## the below issues an error
## note: tryCatch is only used to avoid a genuine error in the example
tryCatch(validate_linelist(x), error = paste)
}
}
\seealso{
\itemize{
\item \code{\link[=tags_types]{tags_types()}} to change allowed types
\item \code{\link[=validate_types]{validate_types()}} to check if tagged variables have the right classes
\item \code{\link[=validate_tags]{validate_tags()}} to perform a series of checks on the tags
}
}
\author{
Thibaut Jombart \email{thibaut@data.org}
}
|
#' @importFrom stats runif
solqp<- function(Q,A,b,c,x,verbose=FALSE,toler=1e-5,beta=0.8){
# Function called by cecm.
# This function solves quadratic program in standard form:
#
# minimize 0.5*(x'*Q*x)+c'*x
# subject to A*x=b, x>=0.
# Input
# Q: Sparse symmetric objective matrix.
# A: Sparse constraint left-hand matrix
# b: constraint right-hand column vector
# c: objective column vector
# toler: relative stopping tolerance: the objective value close to
# the local optimal one in the range of tolerance.
# beta : step size: 0 < beta < 1.
# Output
# x: (local) optimal solution
# y: optimal dual solution (Lagrangien multiplier)
# obhis : objective value history vs iterations
# This program is the implementation of the interior ellipsoidal trust
# region and barrier function algorithm with dual solution updating
# technique in the standard QP form.
# Technical Reference
# Y. Ye, "An extension of Karmarkar's algorithm and the trust region method
# for convex quadratic programming," in Progress in Mathematical
# Programming (N. Megiddo ed.), Springer-Verlag, NY (1989) 49-63.
# Y. Ye, "On affine-scaling algorithm for nonconvex quadratic programming,"
# Math. Programming 56 (1992) 285-300.
# Comment: Each iteration we solve a linear KKT system like
# ( Q+mu X^{-2} A^T )(dx) = c'
# ( A 0 )(dy) = 0
# where X = diag(x) which is a positive diagonal matrix.
m<-nrow(A)
n<-ncol(A)
eps<-.Machine$double.eps
ob=0.5*(t(x)%*% Q %*% x)+c%*%x
alpha <- 0.9
comp<- runif(n)
comp<-solve(rbind(cbind(Diagonal(n),t(A)),cbind(A,Matrix(0,m,m))),
rbind(comp,Matrix(0,m,1)))
comp<-comp[1:n]
nora<-min(comp/x)
if(nora < 0) nora <- -.01/nora else{
nora <- max(comp/x)
if(nora == 0){
print('The problem has a unique feasible point')
return()
}
nora <- .01/nora
}
x <- x + nora*comp
obvalue<- as.numeric(t(x) %*% (Q%*%x)/2+c%*%x)
obhis<-obvalue
lower <- -Inf
zhis <- lower
gap<-1
lamda<-max(1, abs(obvalue)/sqrt(sqrt(n)))
iter<-0
while(gap >= toler){
iter<-iter+1
#-------------------------
# spphase2
lamda<-(1-beta)*lamda
go<-0
gg <- Q%*%x+c
XX <- Diagonal(x=x)
AA <- A%*%XX
XX <- XX%*%Q%*%XX
# Repeatly solve an ellipsoid constrained QP problem by solving a linear
# system equation until find a positive solution.
while(go <= 0){
u<-solve(rbind(cbind(XX+lamda*Diagonal(n),t(AA)),cbind(AA,Matrix(0,m,m))),
rbind(-x*gg,Matrix(0,m,1)))
xx<-x+x*u[1:n]
go<-min(xx)
if(go > 0){
ob<-as.numeric(t(xx)%*%Q%*%xx/2+c%*%xx)
go <- min(c(go,obvalue-ob+eps))
}
lamda<-2*lamda
if(lamda >= (1+abs(obvalue))/toler){
#disp('The problem seems unbounded.')
y=-u(n+1:n+m)
return()
}
}
y<- -u[(n+1):(n+m)]
u<-u[1:n]
nora <- min(u);
if(nora < 0) nora=-alpha/nora else{
if(nora == 0) nora=alpha else nora=Inf
}
u <- x*u
w1 <- as.numeric(t(u)%*%Q%*%u)
w2 <- as.numeric(-t(u)%*%gg)
if(w1 > 0) nora=min(w2/w1,nora)
if(nora == Inf) ob <- -Inf else{
x <-x+nora*u
ob=as.numeric(t(x)%*%Q%*%x/2+c%*%x)
}
# This is the Phase 2 procedure called by SPSOLQP.
if(ob == -Inf){
gap <- 0
print('The problem is unbounded.')
return()
}else{
obhis<-c(obhis,ob)
comp<-Q%*%x+c-t(A)%*%y
if(min(comp)>=0){
zhis=c(zhis,ob-t(x)%*%comp)
lower<-zhis[iter+1]
gap<-(ob-lower)/(1+abs(ob))
obvalue<-ob
}else{
zhis=c(zhis,zhis[iter])
lower=zhis[iter+1]
gap=(obvalue-ob)/(1+abs(ob))
obvalue<-ob
}
}
if(iter>200){
print(c(gap,toler))
}
}
if(verbose) print('A (local) optimal solution is found.')
return(list(x=x,y=y,obhis=obhis))
}
| /R/solqp.R | no_license | cran/evclust | R | false | false | 4,021 | r | #' @importFrom stats runif
solqp<- function(Q,A,b,c,x,verbose=FALSE,toler=1e-5,beta=0.8){
# Function called by cecm.
# This function solves quadratic program in standard form:
#
# minimize 0.5*(x'*Q*x)+c'*x
# subject to A*x=b, x>=0.
# Input
# Q: Sparse symmetric objective matrix.
# A: Sparse constraint left-hand matrix
# b: constraint right-hand column vector
# c: objective column vector
# toler: relative stopping tolerance: the objective value close to
# the local optimal one in the range of tolerance.
# beta : step size: 0 < beta < 1.
# Output
# x: (local) optimal solution
# y: optimal dual solution (Lagrangien multiplier)
# obhis : objective value history vs iterations
# This program is the implementation of the interior ellipsoidal trust
# region and barrier function algorithm with dual solution updating
# technique in the standard QP form.
# Technical Reference
# Y. Ye, "An extension of Karmarkar's algorithm and the trust region method
# for convex quadratic programming," in Progress in Mathematical
# Programming (N. Megiddo ed.), Springer-Verlag, NY (1989) 49-63.
# Y. Ye, "On affine-scaling algorithm for nonconvex quadratic programming,"
# Math. Programming 56 (1992) 285-300.
# Comment: Each iteration we solve a linear KKT system like
# ( Q+mu X^{-2} A^T )(dx) = c'
# ( A 0 )(dy) = 0
# where X = diag(x) which is a positive diagonal matrix.
m<-nrow(A)
n<-ncol(A)
eps<-.Machine$double.eps
ob=0.5*(t(x)%*% Q %*% x)+c%*%x
alpha <- 0.9
comp<- runif(n)
comp<-solve(rbind(cbind(Diagonal(n),t(A)),cbind(A,Matrix(0,m,m))),
rbind(comp,Matrix(0,m,1)))
comp<-comp[1:n]
nora<-min(comp/x)
if(nora < 0) nora <- -.01/nora else{
nora <- max(comp/x)
if(nora == 0){
print('The problem has a unique feasible point')
return()
}
nora <- .01/nora
}
x <- x + nora*comp
obvalue<- as.numeric(t(x) %*% (Q%*%x)/2+c%*%x)
obhis<-obvalue
lower <- -Inf
zhis <- lower
gap<-1
lamda<-max(1, abs(obvalue)/sqrt(sqrt(n)))
iter<-0
while(gap >= toler){
iter<-iter+1
#-------------------------
# spphase2
lamda<-(1-beta)*lamda
go<-0
gg <- Q%*%x+c
XX <- Diagonal(x=x)
AA <- A%*%XX
XX <- XX%*%Q%*%XX
# Repeatly solve an ellipsoid constrained QP problem by solving a linear
# system equation until find a positive solution.
while(go <= 0){
u<-solve(rbind(cbind(XX+lamda*Diagonal(n),t(AA)),cbind(AA,Matrix(0,m,m))),
rbind(-x*gg,Matrix(0,m,1)))
xx<-x+x*u[1:n]
go<-min(xx)
if(go > 0){
ob<-as.numeric(t(xx)%*%Q%*%xx/2+c%*%xx)
go <- min(c(go,obvalue-ob+eps))
}
lamda<-2*lamda
if(lamda >= (1+abs(obvalue))/toler){
#disp('The problem seems unbounded.')
y=-u(n+1:n+m)
return()
}
}
y<- -u[(n+1):(n+m)]
u<-u[1:n]
nora <- min(u);
if(nora < 0) nora=-alpha/nora else{
if(nora == 0) nora=alpha else nora=Inf
}
u <- x*u
w1 <- as.numeric(t(u)%*%Q%*%u)
w2 <- as.numeric(-t(u)%*%gg)
if(w1 > 0) nora=min(w2/w1,nora)
if(nora == Inf) ob <- -Inf else{
x <-x+nora*u
ob=as.numeric(t(x)%*%Q%*%x/2+c%*%x)
}
# This is the Phase 2 procedure called by SPSOLQP.
if(ob == -Inf){
gap <- 0
print('The problem is unbounded.')
return()
}else{
obhis<-c(obhis,ob)
comp<-Q%*%x+c-t(A)%*%y
if(min(comp)>=0){
zhis=c(zhis,ob-t(x)%*%comp)
lower<-zhis[iter+1]
gap<-(ob-lower)/(1+abs(ob))
obvalue<-ob
}else{
zhis=c(zhis,zhis[iter])
lower=zhis[iter+1]
gap=(obvalue-ob)/(1+abs(ob))
obvalue<-ob
}
}
if(iter>200){
print(c(gap,toler))
}
}
if(verbose) print('A (local) optimal solution is found.')
return(list(x=x,y=y,obhis=obhis))
}
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, -2.02729040070858e-05, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615835835-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, -2.02729040070858e-05, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
## creates 2 tidy data frames from the test and training data files of the UCI
## HAR Dataset. The "R" working directory should be set to local root directory
## containing the HAR files (and the "test" and "train" subdirectories).
##
## on the author's computer: setwd("C:/dataCleaningProject/UCI HAR Dataset")
run_analysis <- function()
{
library(readr)
library(plyr)
#---- read activity labels ----
activities = readLines("activity_labels.txt")
#--- MERGE subject ids from train/test data (char, range: 1..30) ----
trainSubjects = readLines("train\\subject_train.txt")
testSubjects = readLines("test\\subject_test.txt")
subjects = c(trainSubjects, testSubjects)
#--- MERGE label ids from train/test data ----
trainLabels = readLines("train\\y_train.txt")
testLabels = readLines("test\\y_test.txt")
labels = c(trainLabels, testLabels)
#--- MERGE feature data from train/test data ----
cw = rep(16, 561)
trainFeatures = read_fwf(file="train\\x_train.txt", fwf_widths(cw))
testFeatures = read_fwf(file="test\\x_test.txt", fwf_widths(cw))
features = rbind(trainFeatures, testFeatures)
#--- translate labels from activities ids to strings ----
labelStrings = sapply(labels, function(d) activities[as.numeric(d)], USE.NAMES=FALSE)
#---- read feature names and use them to label the columns of "features" ----
allFeatureNames = readLines("features.txt")
colnames(features) = allFeatureNames
#---- extract feaure columns with "std" or "mean" in their name ---
featureNames = allFeatureNames[grep("std|mean", allFeatureNames)]
finalFeatures = features[,featureNames]
#---- create the DATA FRAME from finalFeatures, subjects, and labels ----
df = data.frame(subjectIds=subjects, activityLabels=labelStrings, finalFeatures)
#--- group by activty, subject and calc mean for all numeric cols ----
df2 = aggregate(df[,3:81], list(df[,1], df[,2]), mean)
#---- fix up order/labels of first 2 columns ----
df2[,1:2] = df2[,2:1]
df2 = rename(df2, c("Group.2"="Subject.Id", "Group.1"="Activity.Label"))
return(df2)
}
| /run_analysis.R | no_license | GitRoland/UCI-HAR-Processing | R | false | false | 2,187 | r |
## creates 2 tidy data frames from the test and training data files of the UCI
## HAR Dataset. The "R" working directory should be set to local root directory
## containing the HAR files (and the "test" and "train" subdirectories).
##
## on the author's computer: setwd("C:/dataCleaningProject/UCI HAR Dataset")
run_analysis <- function()
{
library(readr)
library(plyr)
#---- read activity labels ----
activities = readLines("activity_labels.txt")
#--- MERGE subject ids from train/test data (char, range: 1..30) ----
trainSubjects = readLines("train\\subject_train.txt")
testSubjects = readLines("test\\subject_test.txt")
subjects = c(trainSubjects, testSubjects)
#--- MERGE label ids from train/test data ----
trainLabels = readLines("train\\y_train.txt")
testLabels = readLines("test\\y_test.txt")
labels = c(trainLabels, testLabels)
#--- MERGE feature data from train/test data ----
cw = rep(16, 561)
trainFeatures = read_fwf(file="train\\x_train.txt", fwf_widths(cw))
testFeatures = read_fwf(file="test\\x_test.txt", fwf_widths(cw))
features = rbind(trainFeatures, testFeatures)
#--- translate labels from activities ids to strings ----
labelStrings = sapply(labels, function(d) activities[as.numeric(d)], USE.NAMES=FALSE)
#---- read feature names and use them to label the columns of "features" ----
allFeatureNames = readLines("features.txt")
colnames(features) = allFeatureNames
#---- extract feaure columns with "std" or "mean" in their name ---
featureNames = allFeatureNames[grep("std|mean", allFeatureNames)]
finalFeatures = features[,featureNames]
#---- create the DATA FRAME from finalFeatures, subjects, and labels ----
df = data.frame(subjectIds=subjects, activityLabels=labelStrings, finalFeatures)
#--- group by activty, subject and calc mean for all numeric cols ----
df2 = aggregate(df[,3:81], list(df[,1], df[,2]), mean)
#---- fix up order/labels of first 2 columns ----
df2[,1:2] = df2[,2:1]
df2 = rename(df2, c("Group.2"="Subject.Id", "Group.1"="Activity.Label"))
return(df2)
}
|
##' Make plot of likelihood contributions by fleet
##'
##' This style of plot was officially named a "Piner Plot" at the
##' CAPAM Selectivity Workshop, La Jolla March 2013. This is in
##' honor of Kevin Piner's contributions to interpreting likelihood
##' profiles. He's surely not the first person to make such a plot
##' but the name seems to have stuck.
##' @param summaryoutput List created by the function
##' [SSsummarize()].
##' @param plot Plot to active plot device?
##' @param print Print to PNG files?
##' @param component Which likelihood component to plot. Default is "Length_like".
##' @param main Title for plot. Should match component.
##' @param models Optional subset of the models described in
##' `summaryoutput`. Either "all" or a vector of numbers indicating
##' columns in summary tables.
##' @param fleets Optional vector of fleet numbers to include.
##' @param fleetnames Optional character vector of names for each fleet.
##' @param profile.string Character string used to find parameter over which the
##' profile was conducted. If `exact=FALSE`, this can be a substring of
##' one of the SS parameter labels found in the Report.sso file.
##' For instance, the default input 'R0'
##' matches the parameter 'SR_LN(R0)'. If `exact=TRUE`, then
##' profile.string needs to be an exact match to the parameter label.
##' @param profile.label Label for x-axis describing the parameter over which
##' the profile was conducted.
##' @param exact Should the `profile.string` have to match the parameter
##' label exactly, or is a substring OK.
##' @param ylab Label for y-axis. Default is "Change in -log-likelihood".
##' @param col Optional vector of colors for each line.
##' @param pch Optional vector of plot characters for the points.
##' @param lty Line total for the likelihood components.
##' @param lty.total Line type for the total likelihood.
##' @param lwd Line width for the likelihood components.
##' @param lwd.total Line width for the total likelihood.
##' @param cex Character expansion for the points representing the likelihood
##' components.
##' @param cex.total Character expansion for the points representing the total
##' likelihood.
##' @param xlim Range for x-axis. Change in likelihood is calculated relative to
##' values within this range.
##' @param ymax Maximum y-value. Default is 10\% greater than largest value
##' plotted.
##' @param xaxs The style of axis interval calculation to be used for the x-axis
##' (see ?par for more info)
##' @param yaxs The style of axis interval calculation to be used for the y-axis
##' (see ?par for more info).
##' @param type Line type (see ?plot for more info).
##' @param legend Include legend?
##' @param legendloc Location of legend (see ?legend for more info).
##' @param pwidth Width of plot
##' @param pheight Height of plot
##' @param punits Units for PNG file
##' @template res
##' @param ptsize Point size for PNG file
##' @param cex.main Character expansion for plot titles
##' @param plotdir Directory where PNG files will be written. by default it will
##' be the directory where the model was run.
##' @param add_cutoff Add dashed line at ~1.92 to indicate 95% confidence interval
##' based on common cutoff of half of chi-squared of p=.95 with 1 degree of
##' freedom: `0.5*qchisq(p=cutoff_prob, df=1)`. The probability value
##' can be adjusted using the `cutoff_prob` below.
##' @param cutoff_prob Probability associated with `add_cutoff` above.
##' @param verbose Return updates of function progress to the R GUI? (Doesn't do
##' anything yet.)
##' @param fleetgroups Optional character vector, with length equal to
##' the number of declared fleets, where fleets with the same value are
##' aggregated
##' @param likelihood_type choice of "raw" or "raw_times_lambda" (the default)
##' determines whether or not likelihoods plotted are adjusted by lambdas
##' (likelihood weights)
##' @param minfraction Minimum change in likelihood (over range considered) as a
##' fraction of change in total likelihood for a component to be included in the
##' figure.
##' @references Kevin Piner says that he's not the originator of this idea so
##' Athol Whitten is going to add a reference here.
##' @author Ian Taylor, Kevin Piner, Jim Thorson
##' @export
PinerPlot <-
function(summaryoutput,
plot = TRUE, print = FALSE,
component = "Length_like",
main = "Changes in length-composition likelihoods by fleet",
models = "all",
fleets = "all",
fleetnames = "default",
profile.string = "R0",
profile.label = expression(log(italic(R)[0])),
exact = FALSE,
ylab = "Change in -log-likelihood",
col = "default",
pch = "default",
lty = 1, lty.total = 1,
lwd = 2, lwd.total = 3,
cex = 1, cex.total = 1.5,
xlim = "default",
ymax = "default",
xaxs = "r", yaxs = "r",
type = "o",
legend = TRUE, legendloc = "topright",
pwidth = 6.5, pheight = 5.0, punits = "in", res = 300, ptsize = 10, cex.main = 1,
plotdir = NULL,
add_cutoff = FALSE,
cutoff_prob = 0.95,
verbose = TRUE,
fleetgroups = NULL,
likelihood_type = "raw_times_lambda",
minfraction = 0.01) {
# this function is very similar to SSplotProfile, but shows fleet-specific likelihoods
# for a single components rather than multiple components aggregated across fleets
# subfunction to write png files
pngfun <- function(file) {
png(
filename = file.path(plotdir, file), width = pwidth, height = pheight,
units = punits, res = res, pointsize = ptsize
)
}
if (print & is.null(plotdir)) stop("to print PNG files, you must supply a directory as 'plotdir'")
# get stuff from summary output into shorter variable names
n <- summaryoutput[["n"]]
lbf <- summaryoutput[["likelihoods_by_fleet"]]
lbtg <- summaryoutput[["likelihoods_by_tag_group"]]
if (is.null(lbf)) {
stop(
"Input 'summaryoutput' needs to be a list output from SSsummarize\n",
"and have an element named 'likelihoods_by_fleet'."
)
}
# count of fleets
nfleets <- ncol(lbf) - 3
pars <- summaryoutput[["pars"]]
# names of fleets
FleetNames <- summaryoutput[["FleetNames"]][[1]]
# stop if lengths don't match
if (length(FleetNames) != nfleets) {
stop(
"problem with FleetNames: length!= ", nfleets, "\n",
paste(FleetNames, collapse = "\n")
)
}
# stop if component input isn't found in table
component_options <- c(
unique(lbf[["Label"]][-grep("_lambda", lbf[["Label"]])]),
unique(lbtg[["Label"]][-grep("_lambda", lbtg[["Label"]])])
)
if (!component %in% component_options) {
stop(
"input 'component' needs to be one of the following\n",
paste(" ", component_options, "\n")
)
}
if (fleetnames[1] == "default") fleetnames <- FleetNames # note lower-case value is the one used below (either equal to vector from replist, or input by user)
# check number of models to be plotted
if (models[1] == "all") {
models <- 1:n
} else {
if (!all(models %in% 1:n)) {
stop("Input 'models' should be a vector of values from 1 to n=", n, " (for your inputs).\n")
}
}
# check number of fleets to be plotted
if (fleets[1] == "all") {
fleets <- 1:nfleets
} else {
if (!all(fleets %in% 1:nfleets)) {
stop("Input 'fleets' should be a vector of values from 1 to nfleets=", nfleets, " (for your inputs).\n")
}
}
# find the parameter that the profile was over
if (exact) {
parnumber <- match(profile.string, pars[["Label"]])
} else {
parnumber <- grep(profile.string, pars[["Label"]])
}
if (length(parnumber) <= 0) {
stop("No parameters matching profile.string='", profile.string, "'", sep = "")
}
parlabel <- pars[["Label"]][parnumber]
if (length(parlabel) > 1) {
stop("Multiple parameters matching profile.string='", profile.string, "':\n",
paste(parlabel, collapse = ", "),
"\nYou may need to use 'exact=TRUE'.",
sep = ""
)
}
parvec <- as.numeric(pars[pars[["Label"]] == parlabel, models])
cat("Parameter matching profile.string='", profile.string, "': '", parlabel, "'\n", sep = "")
cat("Parameter values (after subsetting based on input 'models'):\n")
print(parvec)
if (xlim[1] == "default") xlim <- range(parvec)
# rearange likelihoods to be in columns by type
if (likelihood_type == "raw") prof.table <- lbf[which(lbf[["model"]] %in% models & lbf[["Label"]] == component), ]
if (likelihood_type == "raw_times_lambda") {
prof.table <- lbf[which(lbf[["model"]] %in% models & lbf[["Label"]] == component), ]
prof.table[, -c(1:3)] <- prof.table[, -c(1:3)] * lbf[which(lbf[["model"]] %in% models & lbf[["Label"]] == component) - 1, ][, -c(1:3)]
}
# Aggregate by input fleetgroups (a character vector, where two fleets with the same value are aggregated)
if (!is.null(fleetgroups)) {
if (length(fleetgroups) != nfleets) stop("fleetgroups, if specified, must have length equal to the number of declared fleets")
FleetNames <- unique(fleetgroups)
prof.table_new <- data.frame(matrix(
nrow = nrow(prof.table),
ncol = 3 + length(unique(fleetgroups)),
dimnames = list(
rownames(prof.table),
c(
colnames(prof.table)[1:3],
unique(fleetgroups)
)
)
))
prof.table_new[, 1:3] <- prof.table[, 1:3]
for (rowI in 1:nrow(prof.table)) {
prof.table_new[rowI, -c(1:3)] <- tapply(as.numeric(prof.table[rowI, -c(1:3)]),
FUN = sum,
INDEX = as.numeric(factor(fleetgroups,
levels = unique(fleetgroups)
))
)
}
prof.table <- prof.table_new
nfleets <- ncol(prof.table) - 3
}
# subtract minimum value from each likelihood component (over requested parameter range)
subset <- parvec >= xlim[1] & parvec <= xlim[2]
for (icol in 3:ncol(prof.table)) {
prof.table[, icol] <- prof.table[, icol] -
min(prof.table[subset, icol], na.rm = TRUE)
}
# remove columns that have change less than minfraction change relative to total
column.max <- apply(data.frame(prof.table[, -c(1:3)]), 2, max, na.rm = TRUE)
change.fraction <- column.max / max(prof.table[, 3], na.rm = TRUE)
include <- change.fraction >= minfraction
cat("\nFleets-specific likelihoods showing max change as fraction of total change.\n",
"To change which components are included, change input 'minfraction'.\n\n",
sep = ""
)
print(data.frame(frac_change = round(change.fraction, 4), include = include))
# subset values and reorder values
# Note: first 3 columns are "model", "Label", and "ALL", and
# are excluded from subsetting process
# a future option to exclude the "ALL" column is possible if requested
prof.table <- prof.table[order(parvec), ]
prof.table <- prof.table[, c(1:3, 3 + intersect(
(1:nfleets)[fleets],
(1:nfleets)[include]
))]
nfleets <- ncol(prof.table) - 3
# replace column names with fleetnames unless "fleetgroup" is used
if (is.null(fleetgroups)) {
for (icol in 4:ncol(prof.table)) {
if (names(prof.table)[icol] %in% FleetNames) {
names(prof.table)[icol] <- fleetnames[which(FleetNames == names(prof.table)[icol])]
}
if (names(prof.table)[icol] %in% paste("X", FleetNames, sep = "")) {
names(prof.table)[icol] <- fleetnames[which(paste("X", FleetNames, sep = "") == names(prof.table)[icol])]
}
}
}
# set default y-limits
if (ymax == "default") ymax <- 1.1 * max(prof.table[subset, -(1:2)], na.rm = TRUE)
ylim <- c(0, ymax)
parvec <- parvec[order(parvec)]
# default colors and plot characters
nlines <- ncol(prof.table) - 2
if (col[1] == "default") col <- rich.colors.short(nlines)
if (pch[1] == "default") pch <- 1:nlines
lwd <- c(lwd.total, rep(lwd, nlines - 1))
cex <- c(cex.total, rep(cex, nlines - 1))
lty <- c(lty.total, rep(lty, nlines - 1))
# return(prof.table)
# make plot
plotprofile <- function() {
plot(0,
type = "n", xlim = xlim, ylim = ylim, xlab = profile.label, ylab = ylab,
yaxs = yaxs, xaxs = xaxs, main = main
)
abline(h = 0, col = "grey")
# optionally add horizontal line at ~1.92 (or other value depending
# on chosen probability)
if (add_cutoff) {
abline(h = 0.5 * qchisq(p = cutoff_prob, df = 1), lty = 2)
}
matplot(parvec, prof.table[, -(1:2)],
type = type,
pch = pch, col = col,
cex = cex, lty = lty, lwd = lwd, add = TRUE
)
if (legend) {
legend(legendloc,
bty = "n", legend = names(prof.table)[-(1:2)],
lwd = lwd, pt.cex = cex, lty = lty, pch = pch, col = col
)
}
box()
}
if (plot) {
plotprofile()
}
if (print) {
pngfun("profile_plot_likelihood.png")
plotprofile()
dev.off()
}
out <- data.frame(parvec = parvec, prof.table)
names(out)[1] <- parlabel
return(invisible(out))
}
| /R/PinerPlot.R | no_license | cgrandin/r4ss | R | false | false | 13,466 | r | ##' Make plot of likelihood contributions by fleet
##'
##' This style of plot was officially named a "Piner Plot" at the
##' CAPAM Selectivity Workshop, La Jolla March 2013. This is in
##' honor of Kevin Piner's contributions to interpreting likelihood
##' profiles. He's surely not the first person to make such a plot
##' but the name seems to have stuck.
##' @param summaryoutput List created by the function
##' [SSsummarize()].
##' @param plot Plot to active plot device?
##' @param print Print to PNG files?
##' @param component Which likelihood component to plot. Default is "Length_like".
##' @param main Title for plot. Should match component.
##' @param models Optional subset of the models described in
##' `summaryoutput`. Either "all" or a vector of numbers indicating
##' columns in summary tables.
##' @param fleets Optional vector of fleet numbers to include.
##' @param fleetnames Optional character vector of names for each fleet.
##' @param profile.string Character string used to find parameter over which the
##' profile was conducted. If `exact=FALSE`, this can be a substring of
##' one of the SS parameter labels found in the Report.sso file.
##' For instance, the default input 'R0'
##' matches the parameter 'SR_LN(R0)'. If `exact=TRUE`, then
##' profile.string needs to be an exact match to the parameter label.
##' @param profile.label Label for x-axis describing the parameter over which
##' the profile was conducted.
##' @param exact Should the `profile.string` have to match the parameter
##' label exactly, or is a substring OK.
##' @param ylab Label for y-axis. Default is "Change in -log-likelihood".
##' @param col Optional vector of colors for each line.
##' @param pch Optional vector of plot characters for the points.
##' @param lty Line total for the likelihood components.
##' @param lty.total Line type for the total likelihood.
##' @param lwd Line width for the likelihood components.
##' @param lwd.total Line width for the total likelihood.
##' @param cex Character expansion for the points representing the likelihood
##' components.
##' @param cex.total Character expansion for the points representing the total
##' likelihood.
##' @param xlim Range for x-axis. Change in likelihood is calculated relative to
##' values within this range.
##' @param ymax Maximum y-value. Default is 10\% greater than largest value
##' plotted.
##' @param xaxs The style of axis interval calculation to be used for the x-axis
##' (see ?par for more info)
##' @param yaxs The style of axis interval calculation to be used for the y-axis
##' (see ?par for more info).
##' @param type Line type (see ?plot for more info).
##' @param legend Include legend?
##' @param legendloc Location of legend (see ?legend for more info).
##' @param pwidth Width of plot
##' @param pheight Height of plot
##' @param punits Units for PNG file
##' @template res
##' @param ptsize Point size for PNG file
##' @param cex.main Character expansion for plot titles
##' @param plotdir Directory where PNG files will be written. by default it will
##' be the directory where the model was run.
##' @param add_cutoff Add dashed line at ~1.92 to indicate 95% confidence interval
##' based on common cutoff of half of chi-squared of p=.95 with 1 degree of
##' freedom: `0.5*qchisq(p=cutoff_prob, df=1)`. The probability value
##' can be adjusted using the `cutoff_prob` below.
##' @param cutoff_prob Probability associated with `add_cutoff` above.
##' @param verbose Return updates of function progress to the R GUI? (Doesn't do
##' anything yet.)
##' @param fleetgroups Optional character vector, with length equal to
##' the number of declared fleets, where fleets with the same value are
##' aggregated
##' @param likelihood_type choice of "raw" or "raw_times_lambda" (the default)
##' determines whether or not likelihoods plotted are adjusted by lambdas
##' (likelihood weights)
##' @param minfraction Minimum change in likelihood (over range considered) as a
##' fraction of change in total likelihood for a component to be included in the
##' figure.
##' @references Kevin Piner says that he's not the originator of this idea so
##' Athol Whitten is going to add a reference here.
##' @author Ian Taylor, Kevin Piner, Jim Thorson
##' @export
PinerPlot <-
function(summaryoutput,
plot = TRUE, print = FALSE,
component = "Length_like",
main = "Changes in length-composition likelihoods by fleet",
models = "all",
fleets = "all",
fleetnames = "default",
profile.string = "R0",
profile.label = expression(log(italic(R)[0])),
exact = FALSE,
ylab = "Change in -log-likelihood",
col = "default",
pch = "default",
lty = 1, lty.total = 1,
lwd = 2, lwd.total = 3,
cex = 1, cex.total = 1.5,
xlim = "default",
ymax = "default",
xaxs = "r", yaxs = "r",
type = "o",
legend = TRUE, legendloc = "topright",
pwidth = 6.5, pheight = 5.0, punits = "in", res = 300, ptsize = 10, cex.main = 1,
plotdir = NULL,
add_cutoff = FALSE,
cutoff_prob = 0.95,
verbose = TRUE,
fleetgroups = NULL,
likelihood_type = "raw_times_lambda",
minfraction = 0.01) {
# this function is very similar to SSplotProfile, but shows fleet-specific likelihoods
# for a single components rather than multiple components aggregated across fleets
# subfunction to write png files
pngfun <- function(file) {
png(
filename = file.path(plotdir, file), width = pwidth, height = pheight,
units = punits, res = res, pointsize = ptsize
)
}
if (print & is.null(plotdir)) stop("to print PNG files, you must supply a directory as 'plotdir'")
# get stuff from summary output into shorter variable names
n <- summaryoutput[["n"]]
lbf <- summaryoutput[["likelihoods_by_fleet"]]
lbtg <- summaryoutput[["likelihoods_by_tag_group"]]
if (is.null(lbf)) {
stop(
"Input 'summaryoutput' needs to be a list output from SSsummarize\n",
"and have an element named 'likelihoods_by_fleet'."
)
}
# count of fleets
nfleets <- ncol(lbf) - 3
pars <- summaryoutput[["pars"]]
# names of fleets
FleetNames <- summaryoutput[["FleetNames"]][[1]]
# stop if lengths don't match
if (length(FleetNames) != nfleets) {
stop(
"problem with FleetNames: length!= ", nfleets, "\n",
paste(FleetNames, collapse = "\n")
)
}
# stop if component input isn't found in table
component_options <- c(
unique(lbf[["Label"]][-grep("_lambda", lbf[["Label"]])]),
unique(lbtg[["Label"]][-grep("_lambda", lbtg[["Label"]])])
)
if (!component %in% component_options) {
stop(
"input 'component' needs to be one of the following\n",
paste(" ", component_options, "\n")
)
}
if (fleetnames[1] == "default") fleetnames <- FleetNames # note lower-case value is the one used below (either equal to vector from replist, or input by user)
# check number of models to be plotted
if (models[1] == "all") {
models <- 1:n
} else {
if (!all(models %in% 1:n)) {
stop("Input 'models' should be a vector of values from 1 to n=", n, " (for your inputs).\n")
}
}
# check number of fleets to be plotted
if (fleets[1] == "all") {
fleets <- 1:nfleets
} else {
if (!all(fleets %in% 1:nfleets)) {
stop("Input 'fleets' should be a vector of values from 1 to nfleets=", nfleets, " (for your inputs).\n")
}
}
# find the parameter that the profile was over
if (exact) {
parnumber <- match(profile.string, pars[["Label"]])
} else {
parnumber <- grep(profile.string, pars[["Label"]])
}
if (length(parnumber) <= 0) {
stop("No parameters matching profile.string='", profile.string, "'", sep = "")
}
parlabel <- pars[["Label"]][parnumber]
if (length(parlabel) > 1) {
stop("Multiple parameters matching profile.string='", profile.string, "':\n",
paste(parlabel, collapse = ", "),
"\nYou may need to use 'exact=TRUE'.",
sep = ""
)
}
parvec <- as.numeric(pars[pars[["Label"]] == parlabel, models])
cat("Parameter matching profile.string='", profile.string, "': '", parlabel, "'\n", sep = "")
cat("Parameter values (after subsetting based on input 'models'):\n")
print(parvec)
if (xlim[1] == "default") xlim <- range(parvec)
# rearange likelihoods to be in columns by type
if (likelihood_type == "raw") prof.table <- lbf[which(lbf[["model"]] %in% models & lbf[["Label"]] == component), ]
if (likelihood_type == "raw_times_lambda") {
prof.table <- lbf[which(lbf[["model"]] %in% models & lbf[["Label"]] == component), ]
prof.table[, -c(1:3)] <- prof.table[, -c(1:3)] * lbf[which(lbf[["model"]] %in% models & lbf[["Label"]] == component) - 1, ][, -c(1:3)]
}
# Aggregate by input fleetgroups (a character vector, where two fleets with the same value are aggregated)
if (!is.null(fleetgroups)) {
if (length(fleetgroups) != nfleets) stop("fleetgroups, if specified, must have length equal to the number of declared fleets")
FleetNames <- unique(fleetgroups)
prof.table_new <- data.frame(matrix(
nrow = nrow(prof.table),
ncol = 3 + length(unique(fleetgroups)),
dimnames = list(
rownames(prof.table),
c(
colnames(prof.table)[1:3],
unique(fleetgroups)
)
)
))
prof.table_new[, 1:3] <- prof.table[, 1:3]
for (rowI in 1:nrow(prof.table)) {
prof.table_new[rowI, -c(1:3)] <- tapply(as.numeric(prof.table[rowI, -c(1:3)]),
FUN = sum,
INDEX = as.numeric(factor(fleetgroups,
levels = unique(fleetgroups)
))
)
}
prof.table <- prof.table_new
nfleets <- ncol(prof.table) - 3
}
# subtract minimum value from each likelihood component (over requested parameter range)
subset <- parvec >= xlim[1] & parvec <= xlim[2]
for (icol in 3:ncol(prof.table)) {
prof.table[, icol] <- prof.table[, icol] -
min(prof.table[subset, icol], na.rm = TRUE)
}
# remove columns that have change less than minfraction change relative to total
column.max <- apply(data.frame(prof.table[, -c(1:3)]), 2, max, na.rm = TRUE)
change.fraction <- column.max / max(prof.table[, 3], na.rm = TRUE)
include <- change.fraction >= minfraction
cat("\nFleets-specific likelihoods showing max change as fraction of total change.\n",
"To change which components are included, change input 'minfraction'.\n\n",
sep = ""
)
print(data.frame(frac_change = round(change.fraction, 4), include = include))
# subset values and reorder values
# Note: first 3 columns are "model", "Label", and "ALL", and
# are excluded from subsetting process
# a future option to exclude the "ALL" column is possible if requested
prof.table <- prof.table[order(parvec), ]
prof.table <- prof.table[, c(1:3, 3 + intersect(
(1:nfleets)[fleets],
(1:nfleets)[include]
))]
nfleets <- ncol(prof.table) - 3
# replace column names with fleetnames unless "fleetgroup" is used
if (is.null(fleetgroups)) {
for (icol in 4:ncol(prof.table)) {
if (names(prof.table)[icol] %in% FleetNames) {
names(prof.table)[icol] <- fleetnames[which(FleetNames == names(prof.table)[icol])]
}
if (names(prof.table)[icol] %in% paste("X", FleetNames, sep = "")) {
names(prof.table)[icol] <- fleetnames[which(paste("X", FleetNames, sep = "") == names(prof.table)[icol])]
}
}
}
# set default y-limits
if (ymax == "default") ymax <- 1.1 * max(prof.table[subset, -(1:2)], na.rm = TRUE)
ylim <- c(0, ymax)
parvec <- parvec[order(parvec)]
# default colors and plot characters
nlines <- ncol(prof.table) - 2
if (col[1] == "default") col <- rich.colors.short(nlines)
if (pch[1] == "default") pch <- 1:nlines
lwd <- c(lwd.total, rep(lwd, nlines - 1))
cex <- c(cex.total, rep(cex, nlines - 1))
lty <- c(lty.total, rep(lty, nlines - 1))
# return(prof.table)
# make plot
plotprofile <- function() {
plot(0,
type = "n", xlim = xlim, ylim = ylim, xlab = profile.label, ylab = ylab,
yaxs = yaxs, xaxs = xaxs, main = main
)
abline(h = 0, col = "grey")
# optionally add horizontal line at ~1.92 (or other value depending
# on chosen probability)
if (add_cutoff) {
abline(h = 0.5 * qchisq(p = cutoff_prob, df = 1), lty = 2)
}
matplot(parvec, prof.table[, -(1:2)],
type = type,
pch = pch, col = col,
cex = cex, lty = lty, lwd = lwd, add = TRUE
)
if (legend) {
legend(legendloc,
bty = "n", legend = names(prof.table)[-(1:2)],
lwd = lwd, pt.cex = cex, lty = lty, pch = pch, col = col
)
}
box()
}
if (plot) {
plotprofile()
}
if (print) {
pngfun("profile_plot_likelihood.png")
plotprofile()
dev.off()
}
out <- data.frame(parvec = parvec, prof.table)
names(out)[1] <- parlabel
return(invisible(out))
}
|
library('ggplot2')
library('RColorBrewer')
library('dplyr')
library('readr')
library('data.table')
library('tibble')
library('ggrepel')
library('ggridges')
library('ggExtra')
library('ggforce')
library('viridis')
library('magrittr')
library('lubridate')
library('timeDate')
library('tseries')
library('forecast')
# tibble is used to read the .csv file into the database
air_visits <-as.tibble(read.csv(file="air_visit_data.csv",head=TRUE,sep=","))
air_reserve <- as.tibble(read.csv(file="air_reserve.csv",head=TRUE,sep=","))
hpg_reserve <-as.tibble(read.csv(file="hpg_reserve.csv",head=TRUE,sep=","))
air_store <-as.tibble( read.csv(file="air_store_info.csv",head=TRUE,sep=","))
hpg_store <- as.tibble(read.csv(file="hpg_store_info.csv",head=TRUE,sep=","))
holidays <- as.tibble(read.csv(file="date_info.csv",head=TRUE,sep=","))
store_ids <-as.tibble( read.csv(file="store_id_relation.csv",head=TRUE,sep=","))
test <-as.tibble( read.csv(file="sample_submission.csv",head=TRUE,sep=","))
#using mutate to append the date in year month and day format
air_visits <- mutate(air_visits,visit_date = ymd(visit_date))
air_reserve <- dplyr::mutate(air_reserve,visit_datetime = ymd_hms(visit_datetime),
reserve_datetime = ymd_hms(reserve_datetime))
hpg_reserve <-dplyr::mutate(hpg_reserve,visit_datetime = ymd_hms(visit_datetime),
reserve_datetime = ymd_hms(reserve_datetime))
air_store <-dplyr::mutate(air_store , air_genre_name = as.factor(air_genre_name),
air_area_name = as.factor(air_area_name))
hpg_store <-dplyr::mutate( hpg_store, hpg_genre_name = as.factor(hpg_genre_name),
hpg_area_name = as.factor(hpg_area_name))
holidays <-dplyr::mutate(holidays, holiday_flg = as.logical(holiday_flg),
date = ymd(calendar_date))
#AirVisits
summary(air_visits)
glimpse(air_visits)
nrow( distinct(air_store_id), air_visits)
#AirReserve
summary(air_reserve)
glimpse(air_reserve)
nrow(distinct(air_store_id),air_reserve)
# HPG Reserve
summary(hpg_reserve)
glimpse(hpg_reserve)
nrow(distinct(hpg_store_id),hpg_reserve )
#Analysis_AirVisits
p1 <- air_visits %>%
group_by(visit_date) %>%
summarise(all_visitors = sum(visitors)) %>%
ggplot(aes(visit_date,all_visitors)) +
geom_line(col = "Orange") +
labs(y = "Total Number of Visitors", x = "Date: Jan 2016 - April 2017")
plot (p1)
p2 <- air_visits %>%
ggplot(aes(visitors)) +
geom_vline(xintercept = 20, color = "black") +
geom_histogram(fill = "blue", bins = 30) +
scale_x_log10()
plot (p2)
p3 <- air_visits %>%
mutate(wday = lubridate::wday(visit_date, label = TRUE,abbr = FALSE)) %>%
group_by(wday) %>%
summarise(visits = mean(visitors)) %>%
ggplot(aes(wday, visits, fill = wday)) +
geom_col() +
theme(legend.position = "none", axis.text.x = element_text(angle=45, hjust=1, vjust=0.9)) +
labs(x = "Day of the week", y = "Mean visitors")
plot (p3)
p4 <- air_visits %>%
mutate(month = lubridate::month(visit_date, label = TRUE)) %>%
group_by(month) %>%
summarise(visits = mean(visitors)) %>%
ggplot(aes(month, visits, fill = month)) +
geom_col() +
theme(legend.position = "none") +
labs(x = "Month", y = "Average visitors")
plot (p4)
#Analysis_AirReserve
res <- air_reserve %>%
mutate(reserve_date = date(reserve_datetime),
reserve_hour = hour(reserve_datetime),
reserve_wday = wday(reserve_datetime, label = TRUE),
visit_date = date(visit_datetime),
visit_hour = hour(visit_datetime),
visit_wday = wday(visit_datetime, label = TRUE),
diff_hour = time_length(visit_datetime - reserve_datetime, unit = "hour"),
diff_day = time_length(visit_datetime - reserve_datetime, unit = "day")
)
p5 <- res %>%
group_by(visit_date) %>%
summarise(all_visitors = sum(reserve_visitors)) %>%
ggplot(aes(visit_date, all_visitors)) +
geom_line(color="orange") +
labs(x = "'air' visit date", y= "number of reservation")
plot(p5)
p6 <- res %>%
group_by(visit_hour) %>%
summarise(all_visitors = sum(reserve_visitors)) %>%
ggplot(aes(visit_hour, all_visitors)) +
geom_col(fill = "red")+
labs(x = "Visit hours",y = "number of reservation")
plot(p6)
p7 <- res %>%
filter(diff_hour < 24*5) %>%
group_by(diff_hour) %>%
summarise(all_visitors = sum(reserve_visitors)) %>%
ggplot(aes(diff_hour, all_visitors)) +
geom_col(fill = "red") +
labs(x = "Time from reservation to visit [hours]",y = "number of reservation")
plot(p7)
res %>%
arrange(desc(diff_day)) %>%
select(reserve_datetime, visit_datetime, diff_day, air_store_id) %>%
head(5)
#Analysis_HPGReserve
Hres<- hpg_reserve %>%
mutate(reserve_date = date(reserve_datetime),
reserve_hour = hour(reserve_datetime),
visit_date = date(visit_datetime),
visit_hour = hour(visit_datetime),
diff_hour = time_length(visit_datetime - reserve_datetime, unit = "hour"),
diff_day = time_length(visit_datetime - reserve_datetime, unit = "day")
)
p8 <- Hres %>%
group_by(visit_date) %>%
summarise(all_visitors = sum(reserve_visitors)) %>%
ggplot(aes(visit_date, all_visitors)) +
geom_line(color="orange") +
labs(x = "'hpg' visit date")+
labs(x = "'HPG' visit date", y= "number of reservation")
plot(p8)
p9 <- Hres %>%
group_by(visit_hour) %>%
summarise(all_visitors = sum(reserve_visitors)) %>%
ggplot(aes(visit_hour, all_visitors)) +
geom_col(fill = "red")+
labs(x = "Visit hours",y = "number of reservation")
plot(p9)
p10 <- Hres %>%
filter(diff_hour < 24*5) %>%
group_by(diff_hour) %>%
summarise(all_visitors = sum(reserve_visitors)) %>%
ggplot(aes(diff_hour, all_visitors)) +
geom_col(fill = "red") +
labs(x = "Time from reservation to visit" ,y = "number of reservation")
plot(p10)
## Reservations vs Visits
all_reserve %>%
filter(reserve_visitors < 120) %>%
ggplot(aes(reserve_visitors, visitors)) +
geom_point(color = "black", alpha = 0.5) +
geom_abline(slope = 1, intercept = 0, color = "red") +
geom_smooth(method = "lm", color = "blue")
ggMarginal(p11, type="histogram", fill = "#FF9999", bins=50)
| /1. Data Visualization and analysis.r | no_license | collectionofcells/Forecasting-Visits | R | false | false | 6,336 | r | library('ggplot2')
library('RColorBrewer')
library('dplyr')
library('readr')
library('data.table')
library('tibble')
library('ggrepel')
library('ggridges')
library('ggExtra')
library('ggforce')
library('viridis')
library('magrittr')
library('lubridate')
library('timeDate')
library('tseries')
library('forecast')
# tibble is used to read the .csv file into the database
air_visits <-as.tibble(read.csv(file="air_visit_data.csv",head=TRUE,sep=","))
air_reserve <- as.tibble(read.csv(file="air_reserve.csv",head=TRUE,sep=","))
hpg_reserve <-as.tibble(read.csv(file="hpg_reserve.csv",head=TRUE,sep=","))
air_store <-as.tibble( read.csv(file="air_store_info.csv",head=TRUE,sep=","))
hpg_store <- as.tibble(read.csv(file="hpg_store_info.csv",head=TRUE,sep=","))
holidays <- as.tibble(read.csv(file="date_info.csv",head=TRUE,sep=","))
store_ids <-as.tibble( read.csv(file="store_id_relation.csv",head=TRUE,sep=","))
test <-as.tibble( read.csv(file="sample_submission.csv",head=TRUE,sep=","))
#using mutate to append the date in year month and day format
air_visits <- mutate(air_visits,visit_date = ymd(visit_date))
air_reserve <- dplyr::mutate(air_reserve,visit_datetime = ymd_hms(visit_datetime),
reserve_datetime = ymd_hms(reserve_datetime))
hpg_reserve <-dplyr::mutate(hpg_reserve,visit_datetime = ymd_hms(visit_datetime),
reserve_datetime = ymd_hms(reserve_datetime))
air_store <-dplyr::mutate(air_store , air_genre_name = as.factor(air_genre_name),
air_area_name = as.factor(air_area_name))
hpg_store <-dplyr::mutate( hpg_store, hpg_genre_name = as.factor(hpg_genre_name),
hpg_area_name = as.factor(hpg_area_name))
holidays <-dplyr::mutate(holidays, holiday_flg = as.logical(holiday_flg),
date = ymd(calendar_date))
#AirVisits
summary(air_visits)
glimpse(air_visits)
nrow( distinct(air_store_id), air_visits)
#AirReserve
summary(air_reserve)
glimpse(air_reserve)
nrow(distinct(air_store_id),air_reserve)
# HPG Reserve
summary(hpg_reserve)
glimpse(hpg_reserve)
nrow(distinct(hpg_store_id),hpg_reserve )
#Analysis_AirVisits
p1 <- air_visits %>%
group_by(visit_date) %>%
summarise(all_visitors = sum(visitors)) %>%
ggplot(aes(visit_date,all_visitors)) +
geom_line(col = "Orange") +
labs(y = "Total Number of Visitors", x = "Date: Jan 2016 - April 2017")
plot (p1)
p2 <- air_visits %>%
ggplot(aes(visitors)) +
geom_vline(xintercept = 20, color = "black") +
geom_histogram(fill = "blue", bins = 30) +
scale_x_log10()
plot (p2)
p3 <- air_visits %>%
mutate(wday = lubridate::wday(visit_date, label = TRUE,abbr = FALSE)) %>%
group_by(wday) %>%
summarise(visits = mean(visitors)) %>%
ggplot(aes(wday, visits, fill = wday)) +
geom_col() +
theme(legend.position = "none", axis.text.x = element_text(angle=45, hjust=1, vjust=0.9)) +
labs(x = "Day of the week", y = "Mean visitors")
plot (p3)
p4 <- air_visits %>%
mutate(month = lubridate::month(visit_date, label = TRUE)) %>%
group_by(month) %>%
summarise(visits = mean(visitors)) %>%
ggplot(aes(month, visits, fill = month)) +
geom_col() +
theme(legend.position = "none") +
labs(x = "Month", y = "Average visitors")
plot (p4)
#Analysis_AirReserve
res <- air_reserve %>%
mutate(reserve_date = date(reserve_datetime),
reserve_hour = hour(reserve_datetime),
reserve_wday = wday(reserve_datetime, label = TRUE),
visit_date = date(visit_datetime),
visit_hour = hour(visit_datetime),
visit_wday = wday(visit_datetime, label = TRUE),
diff_hour = time_length(visit_datetime - reserve_datetime, unit = "hour"),
diff_day = time_length(visit_datetime - reserve_datetime, unit = "day")
)
p5 <- res %>%
group_by(visit_date) %>%
summarise(all_visitors = sum(reserve_visitors)) %>%
ggplot(aes(visit_date, all_visitors)) +
geom_line(color="orange") +
labs(x = "'air' visit date", y= "number of reservation")
plot(p5)
p6 <- res %>%
group_by(visit_hour) %>%
summarise(all_visitors = sum(reserve_visitors)) %>%
ggplot(aes(visit_hour, all_visitors)) +
geom_col(fill = "red")+
labs(x = "Visit hours",y = "number of reservation")
plot(p6)
p7 <- res %>%
filter(diff_hour < 24*5) %>%
group_by(diff_hour) %>%
summarise(all_visitors = sum(reserve_visitors)) %>%
ggplot(aes(diff_hour, all_visitors)) +
geom_col(fill = "red") +
labs(x = "Time from reservation to visit [hours]",y = "number of reservation")
plot(p7)
res %>%
arrange(desc(diff_day)) %>%
select(reserve_datetime, visit_datetime, diff_day, air_store_id) %>%
head(5)
#Analysis_HPGReserve
Hres<- hpg_reserve %>%
mutate(reserve_date = date(reserve_datetime),
reserve_hour = hour(reserve_datetime),
visit_date = date(visit_datetime),
visit_hour = hour(visit_datetime),
diff_hour = time_length(visit_datetime - reserve_datetime, unit = "hour"),
diff_day = time_length(visit_datetime - reserve_datetime, unit = "day")
)
p8 <- Hres %>%
group_by(visit_date) %>%
summarise(all_visitors = sum(reserve_visitors)) %>%
ggplot(aes(visit_date, all_visitors)) +
geom_line(color="orange") +
labs(x = "'hpg' visit date")+
labs(x = "'HPG' visit date", y= "number of reservation")
plot(p8)
p9 <- Hres %>%
group_by(visit_hour) %>%
summarise(all_visitors = sum(reserve_visitors)) %>%
ggplot(aes(visit_hour, all_visitors)) +
geom_col(fill = "red")+
labs(x = "Visit hours",y = "number of reservation")
plot(p9)
p10 <- Hres %>%
filter(diff_hour < 24*5) %>%
group_by(diff_hour) %>%
summarise(all_visitors = sum(reserve_visitors)) %>%
ggplot(aes(diff_hour, all_visitors)) +
geom_col(fill = "red") +
labs(x = "Time from reservation to visit" ,y = "number of reservation")
plot(p10)
## Reservations vs Visits
all_reserve %>%
filter(reserve_visitors < 120) %>%
ggplot(aes(reserve_visitors, visitors)) +
geom_point(color = "black", alpha = 0.5) +
geom_abline(slope = 1, intercept = 0, color = "red") +
geom_smooth(method = "lm", color = "blue")
ggMarginal(p11, type="histogram", fill = "#FF9999", bins=50)
|
tar_test("tar_exist_script()", {
expect_false(tar_exist_script())
tar_script()
expect_true(tar_exist_script())
})
tar_test("custom script and store args", {
skip_cran()
expect_equal(tar_config_get("script"), path_script_default())
expect_equal(tar_config_get("store"), path_store_default())
expect_false(tar_exist_script(script = "example/script.R"))
tar_script(tar_target(x, 1), script = "example/script.R")
expect_true(tar_exist_script(script = "example/script.R"))
expect_false(file.exists("_targets.yaml"))
expect_equal(tar_config_get("script"), path_script_default())
expect_equal(tar_config_get("store"), path_store_default())
expect_false(file.exists(path_script_default()))
expect_false(file.exists(path_store_default()))
expect_true(file.exists("example/script.R"))
tar_config_set(script = "x")
expect_equal(tar_config_get("script"), "x")
expect_true(file.exists("_targets.yaml"))
})
| /tests/testthat/test-tar_exist_script.R | permissive | ropensci/targets | R | false | false | 930 | r | tar_test("tar_exist_script()", {
expect_false(tar_exist_script())
tar_script()
expect_true(tar_exist_script())
})
tar_test("custom script and store args", {
skip_cran()
expect_equal(tar_config_get("script"), path_script_default())
expect_equal(tar_config_get("store"), path_store_default())
expect_false(tar_exist_script(script = "example/script.R"))
tar_script(tar_target(x, 1), script = "example/script.R")
expect_true(tar_exist_script(script = "example/script.R"))
expect_false(file.exists("_targets.yaml"))
expect_equal(tar_config_get("script"), path_script_default())
expect_equal(tar_config_get("store"), path_store_default())
expect_false(file.exists(path_script_default()))
expect_false(file.exists(path_store_default()))
expect_true(file.exists("example/script.R"))
tar_config_set(script = "x")
expect_equal(tar_config_get("script"), "x")
expect_true(file.exists("_targets.yaml"))
})
|
#' This function is used to read in data from the initial conditions file.
#'
#' @param init Character string giving the connection of the initial conditions netcdf file.
#' The filename usually contains \code{init} and ends in \code{.nc}.
#' @param vars Vector of character strings giving the variables to extract from the
#' netcdf file.
#'
#' @family load functions
#' @export
#' @return A list of dataframes with columns atoutput, polygon and layer (if present).
#'
#' @author Alexander Keth
#' @examples
#' d <- system.file("extdata", "setas-model-new-trunk", package = "atlantistools")
#' init <- file.path(d, "INIT_VMPA_Jan2015.nc")
#'
#' load_init(init, vars = "Planktiv_S_Fish1_Nums")
#' load_init(init, vars = c("Planktiv_S_Fish2_ResN", "Planktiv_S_Fish3_ResN"))
#' load_init(init, vars = "Megazoobenthos_N")
#'
#' \dontrun{
#' dir <- "C:/Users/siebo/Documents/Atlantis/BalticAtlantis/run_files_73days_Nej"
#' init <- file.path(dir, "new_init_Baltic_05Dec2015_v2.nc")
#' vars <- "Sprat1_ResN"
#' load_init(init = init, vars = vars)
#' }
load_init <- function(init, vars) {
# dummy
read_nc <- RNetCDF::open.nc(con = init)
on.exit(RNetCDF::close.nc(read_nc))
# Extract ncdf dimensions!
n_timesteps <- RNetCDF::dim.inq.nc(read_nc, 't')$length
if (n_timesteps != 1) stop("More than 1 timestep! init was not an initial conditions file.")
n_boxes <- RNetCDF::dim.inq.nc(read_nc, 'b')$length
n_layers <- RNetCDF::dim.inq.nc(read_nc, 'z')$length
num_layers <- get_layers(init = init)
num_layers[is.na(num_layers)] <- 0
layerid <- get_layerid(num_layers = num_layers, max_layer = n_layers, n_boxes = n_boxes)
var_names_ncdf <- sapply(seq_len(RNetCDF::file.inq.nc(read_nc)$nvars - 1),
function(x) RNetCDF::var.inq.nc(read_nc, x)$name)
wrong_var <- vars[!vars %in% var_names_ncdf]
if (length(wrong_var) >= 1) stop(paste("Variable", paste(wrong_var, collapse = " "), "not found in init file."))
at_data <- lapply(vars, RNetCDF::var.get.nc, ncfile = read_nc)
# Box and layer!
convert2d <- function(mat, layerid, n_boxes) {
if (!(is.matrix(mat) & length(dim(mat)) == 2)) {
stop("Wrong data format. Variable is not stored as 2d data in initial file.")
}
data.frame(atoutput = as.vector(mat),
polygon = rep(0:(n_boxes - 1), each = length(layerid) / n_boxes),
layer = layerid, stringsAsFactors = FALSE)
}
# Only Box data!
convert1d <- function(vec, n_boxes) {
if (!(is.array(vec) & length(vec) == n_boxes)) {
stop("Wrong data format. Variable is not stored as 1d vector in initial file.")
}
data.frame(atoutput = as.vector(vec),
polygon = 0:(n_boxes - 1), stringsAsFactors = FALSE)
}
at_dim <- vapply(at_data, function(x) length(dim(x)), integer(1))
# Check cases and apply formulas!
if (all(at_dim == 2)) df_list <- lapply(at_data, convert2d, layerid, n_boxes)
if (all(at_dim == 1)) df_list <- lapply(at_data, convert1d, n_boxes)
if (length(unique(at_dim)) > 1) stop("Vars are stored in different dimensions. Please, either pick only 2d or 1d data.")
# Data extracted for every variable?
if (length(vars) != length(df_list)) stop("Starnge ncdf extraction. Please contact package development Team.")
return(df_list)
}
get_layers <- function(init) {
read_nc <- RNetCDF::open.nc(con = init)
on.exit(RNetCDF::close.nc(read_nc))
num_layers <- RNetCDF::var.get.nc(ncfile = read_nc, variable = "numlayers")
if (length(dim(num_layers)) == 2) {
if (all(apply(num_layers, MARGIN = 1, FUN = function(x) length(unique)) == 1)) {
num_layers <- num_layers[, 1]
} else {
stop("Different numbers of layers per Box. This nc-structure is not supported.")
}
}
return(num_layers)
}
# Utility function used to extract the layerids from the number of layers per box.
# Layer ids are given in 0:num_layers-1 with 0 being the layer closest to the sediment.
# The largest id per box is the surface layer. Non existing layers (per box) are NA.
# The sediment layer id is the maximum number of layers. E.g. in case there are 5 water
# column layers the sediment layer has the id 6.
get_layerid <- function(num_layers, max_layer, n_boxes) {
wc_id <- lapply(num_layers, function(x) rep(1, times = x))
wc_id <- lapply(wc_id, function(x) cumsum(x) - 1) # ids are NOT in reverse order in the inital cond. nc file
wc_fill <- lapply(num_layers, function(x) rep(NA, times = max_layer - x - 1))
wc <- Map(f = c, wc_id, wc_fill)
if (length(unique(sapply(wc, length))) != 1) stop("Layers inconsistent. Contact package development Team.")
wc <- lapply(wc, function(x) c(x, max_layer - 1)) # add sediment layer
unlist(wc)
}
# Remove min pools (0 and almost 0) from a datframe.
remove_min_pools <- function(df, col = "atoutput", min_pools = c(0, 1e-08, 1e-16)) {
expr <- lazyeval::interp(quote(!(x %in% y)), x = as.name(col), y = min_pools)
df %>% dplyr::filter_(expr)
}
# Remove boundary boxes from a dataframe. bboxes is the vector of box ids (starting with 0)
remove_bboxes <- function(df, bboxes) {
if (!any(names(df) == "polygon")) stop("No column polygon in df. Cannot remove boundary boxes.")
df %>% dplyr::filter_(~!(polygon %in% bboxes))
}
| /R/load-init.R | no_license | jporobicg/atlantistools | R | false | false | 5,251 | r | #' This function is used to read in data from the initial conditions file.
#'
#' @param init Character string giving the connection of the initial conditions netcdf file.
#' The filename usually contains \code{init} and ends in \code{.nc}.
#' @param vars Vector of character strings giving the variables to extract from the
#' netcdf file.
#'
#' @family load functions
#' @export
#' @return A list of dataframes with columns atoutput, polygon and layer (if present).
#'
#' @author Alexander Keth
#' @examples
#' d <- system.file("extdata", "setas-model-new-trunk", package = "atlantistools")
#' init <- file.path(d, "INIT_VMPA_Jan2015.nc")
#'
#' load_init(init, vars = "Planktiv_S_Fish1_Nums")
#' load_init(init, vars = c("Planktiv_S_Fish2_ResN", "Planktiv_S_Fish3_ResN"))
#' load_init(init, vars = "Megazoobenthos_N")
#'
#' \dontrun{
#' dir <- "C:/Users/siebo/Documents/Atlantis/BalticAtlantis/run_files_73days_Nej"
#' init <- file.path(dir, "new_init_Baltic_05Dec2015_v2.nc")
#' vars <- "Sprat1_ResN"
#' load_init(init = init, vars = vars)
#' }
load_init <- function(init, vars) {
# dummy
read_nc <- RNetCDF::open.nc(con = init)
on.exit(RNetCDF::close.nc(read_nc))
# Extract ncdf dimensions!
n_timesteps <- RNetCDF::dim.inq.nc(read_nc, 't')$length
if (n_timesteps != 1) stop("More than 1 timestep! init was not an initial conditions file.")
n_boxes <- RNetCDF::dim.inq.nc(read_nc, 'b')$length
n_layers <- RNetCDF::dim.inq.nc(read_nc, 'z')$length
num_layers <- get_layers(init = init)
num_layers[is.na(num_layers)] <- 0
layerid <- get_layerid(num_layers = num_layers, max_layer = n_layers, n_boxes = n_boxes)
var_names_ncdf <- sapply(seq_len(RNetCDF::file.inq.nc(read_nc)$nvars - 1),
function(x) RNetCDF::var.inq.nc(read_nc, x)$name)
wrong_var <- vars[!vars %in% var_names_ncdf]
if (length(wrong_var) >= 1) stop(paste("Variable", paste(wrong_var, collapse = " "), "not found in init file."))
at_data <- lapply(vars, RNetCDF::var.get.nc, ncfile = read_nc)
# Box and layer!
convert2d <- function(mat, layerid, n_boxes) {
if (!(is.matrix(mat) & length(dim(mat)) == 2)) {
stop("Wrong data format. Variable is not stored as 2d data in initial file.")
}
data.frame(atoutput = as.vector(mat),
polygon = rep(0:(n_boxes - 1), each = length(layerid) / n_boxes),
layer = layerid, stringsAsFactors = FALSE)
}
# Only Box data!
convert1d <- function(vec, n_boxes) {
if (!(is.array(vec) & length(vec) == n_boxes)) {
stop("Wrong data format. Variable is not stored as 1d vector in initial file.")
}
data.frame(atoutput = as.vector(vec),
polygon = 0:(n_boxes - 1), stringsAsFactors = FALSE)
}
at_dim <- vapply(at_data, function(x) length(dim(x)), integer(1))
# Check cases and apply formulas!
if (all(at_dim == 2)) df_list <- lapply(at_data, convert2d, layerid, n_boxes)
if (all(at_dim == 1)) df_list <- lapply(at_data, convert1d, n_boxes)
if (length(unique(at_dim)) > 1) stop("Vars are stored in different dimensions. Please, either pick only 2d or 1d data.")
# Data extracted for every variable?
if (length(vars) != length(df_list)) stop("Starnge ncdf extraction. Please contact package development Team.")
return(df_list)
}
get_layers <- function(init) {
read_nc <- RNetCDF::open.nc(con = init)
on.exit(RNetCDF::close.nc(read_nc))
num_layers <- RNetCDF::var.get.nc(ncfile = read_nc, variable = "numlayers")
if (length(dim(num_layers)) == 2) {
if (all(apply(num_layers, MARGIN = 1, FUN = function(x) length(unique)) == 1)) {
num_layers <- num_layers[, 1]
} else {
stop("Different numbers of layers per Box. This nc-structure is not supported.")
}
}
return(num_layers)
}
# Utility function used to extract the layerids from the number of layers per box.
# Layer ids are given in 0:num_layers-1 with 0 being the layer closest to the sediment.
# The largest id per box is the surface layer. Non existing layers (per box) are NA.
# The sediment layer id is the maximum number of layers. E.g. in case there are 5 water
# column layers the sediment layer has the id 6.
get_layerid <- function(num_layers, max_layer, n_boxes) {
wc_id <- lapply(num_layers, function(x) rep(1, times = x))
wc_id <- lapply(wc_id, function(x) cumsum(x) - 1) # ids are NOT in reverse order in the inital cond. nc file
wc_fill <- lapply(num_layers, function(x) rep(NA, times = max_layer - x - 1))
wc <- Map(f = c, wc_id, wc_fill)
if (length(unique(sapply(wc, length))) != 1) stop("Layers inconsistent. Contact package development Team.")
wc <- lapply(wc, function(x) c(x, max_layer - 1)) # add sediment layer
unlist(wc)
}
# Remove min pools (0 and almost 0) from a datframe.
remove_min_pools <- function(df, col = "atoutput", min_pools = c(0, 1e-08, 1e-16)) {
expr <- lazyeval::interp(quote(!(x %in% y)), x = as.name(col), y = min_pools)
df %>% dplyr::filter_(expr)
}
# Remove boundary boxes from a dataframe. bboxes is the vector of box ids (starting with 0)
remove_bboxes <- function(df, bboxes) {
if (!any(names(df) == "polygon")) stop("No column polygon in df. Cannot remove boundary boxes.")
df %>% dplyr::filter_(~!(polygon %in% bboxes))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matrix_functions.R
\name{max_eigen_ratio}
\alias{max_eigen_ratio}
\title{Eigenvalue ratio}
\usage{
max_eigen_ratio(x, y)
}
\arguments{
\item{x}{square matrix with the same dimentions as y}
\item{y}{square matrix with the same dimentions as y}
}
\description{
Take the ratio of the max eigenvalues of two matrices
}
| /man/max_eigen_ratio.Rd | no_license | jarvisc1/CoMix | R | false | true | 394 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matrix_functions.R
\name{max_eigen_ratio}
\alias{max_eigen_ratio}
\title{Eigenvalue ratio}
\usage{
max_eigen_ratio(x, y)
}
\arguments{
\item{x}{square matrix with the same dimentions as y}
\item{y}{square matrix with the same dimentions as y}
}
\description{
Take the ratio of the max eigenvalues of two matrices
}
|
37dd804c2aac5a320cf507b116204dbb tentrup_simple_arbiter_system_2.dqdimacs 113 332 | /code/dcnf-ankit-optimized/Results/DQBF-TRACK-2018/A1/Database/tentrup_simple_arbiter_system_2/tentrup_simple_arbiter_system_2.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 81 | r | 37dd804c2aac5a320cf507b116204dbb tentrup_simple_arbiter_system_2.dqdimacs 113 332 |
library(data.table)
### Name: foverlaps
### Title: Fast overlap joins
### Aliases: foverlaps
### Keywords: data
### ** Examples
require(data.table)
## simple example:
x = data.table(start=c(5,31,22,16), end=c(8,50,25,18), val2 = 7:10)
y = data.table(start=c(10, 20, 30), end=c(15, 35, 45), val1 = 1:3)
setkey(y, start, end)
foverlaps(x, y, type="any", which=TRUE) ## return overlap indices
foverlaps(x, y, type="any") ## return overlap join
foverlaps(x, y, type="any", mult="first") ## returns only first match
foverlaps(x, y, type="within") ## matches iff 'x' is within 'y'
## with extra identifiers (ex: in genomics)
x = data.table(chr=c("Chr1", "Chr1", "Chr2", "Chr2", "Chr2"),
start=c(5,10, 1, 25, 50), end=c(11,20,4,52,60))
y = data.table(chr=c("Chr1", "Chr1", "Chr2"), start=c(1, 15,1),
end=c(4, 18, 55), geneid=letters[1:3])
setkey(y, chr, start, end)
foverlaps(x, y, type="any", which=TRUE)
foverlaps(x, y, type="any")
foverlaps(x, y, type="any", nomatch=NULL)
foverlaps(x, y, type="within", which=TRUE)
foverlaps(x, y, type="within")
foverlaps(x, y, type="start")
## x and y have different column names - specify by.x
x = data.table(seq=c("Chr1", "Chr1", "Chr2", "Chr2", "Chr2"),
start=c(5,10, 1, 25, 50), end=c(11,20,4,52,60))
y = data.table(chr=c("Chr1", "Chr1", "Chr2"), start=c(1, 15,1),
end=c(4, 18, 55), geneid=letters[1:3])
setkey(y, chr, start, end)
foverlaps(x, y, by.x=c("seq", "start", "end"),
type="any", which=TRUE)
| /data/genthat_extracted_code/data.table/examples/foverlaps.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,519 | r | library(data.table)
### Name: foverlaps
### Title: Fast overlap joins
### Aliases: foverlaps
### Keywords: data
### ** Examples
require(data.table)
## simple example:
x = data.table(start=c(5,31,22,16), end=c(8,50,25,18), val2 = 7:10)
y = data.table(start=c(10, 20, 30), end=c(15, 35, 45), val1 = 1:3)
setkey(y, start, end)
foverlaps(x, y, type="any", which=TRUE) ## return overlap indices
foverlaps(x, y, type="any") ## return overlap join
foverlaps(x, y, type="any", mult="first") ## returns only first match
foverlaps(x, y, type="within") ## matches iff 'x' is within 'y'
## with extra identifiers (ex: in genomics)
x = data.table(chr=c("Chr1", "Chr1", "Chr2", "Chr2", "Chr2"),
start=c(5,10, 1, 25, 50), end=c(11,20,4,52,60))
y = data.table(chr=c("Chr1", "Chr1", "Chr2"), start=c(1, 15,1),
end=c(4, 18, 55), geneid=letters[1:3])
setkey(y, chr, start, end)
foverlaps(x, y, type="any", which=TRUE)
foverlaps(x, y, type="any")
foverlaps(x, y, type="any", nomatch=NULL)
foverlaps(x, y, type="within", which=TRUE)
foverlaps(x, y, type="within")
foverlaps(x, y, type="start")
## x and y have different column names - specify by.x
x = data.table(seq=c("Chr1", "Chr1", "Chr2", "Chr2", "Chr2"),
start=c(5,10, 1, 25, 50), end=c(11,20,4,52,60))
y = data.table(chr=c("Chr1", "Chr1", "Chr2"), start=c(1, 15,1),
end=c(4, 18, 55), geneid=letters[1:3])
setkey(y, chr, start, end)
foverlaps(x, y, by.x=c("seq", "start", "end"),
type="any", which=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ordnorm.R
\name{ordnorm}
\alias{ordnorm}
\title{Calculate Intermediate MVN Correlation to Generate Variables Treated as Ordinal}
\usage{
ordnorm(marginal, rho, support = list(), epsilon = 0.001, maxit = 1000)
}
\arguments{
\item{marginal}{a list of length equal to the number of variables; the i-th element is a vector of the cumulative
probabilities defining the marginal distribution of the i-th variable;
if the variable can take r values, the vector will contain r - 1 probabilities (the r-th is assumed to be 1)}
\item{rho}{the target correlation matrix}
\item{support}{a list of length equal to the number of variables; the i-th element is a vector of containing the r
ordered support values; if not provided (i.e. support = list()), the default is for the i-th element to be the vector 1, ..., r}
\item{epsilon}{the maximum acceptable error between the final and target correlation matrices (default = 0.001);
smaller epsilons take more time}
\item{maxit}{the maximum number of iterations to use (default = 1000) to find the intermediate correlation; the
correction loop stops when either the iteration number passes maxit or epsilon is reached}
}
\value{
A list with the following components:
\code{SigmaC} the intermediate MVN correlation matrix
\code{rho0} the calculated final correlation matrix generated from \code{SigmaC}
\code{rho} the target final correlation matrix
\code{niter} a matrix containing the number of iterations required for each variable pair
\code{maxerr} the maximum final error between the final and target correlation matrices
}
\description{
This function calculates the intermediate MVN correlation needed to generate a variable described by
a discrete marginal distribution and associated finite support. This includes ordinal (r >= 2 categories) variables
or variables that are treated as ordinal (i.e. count variables in the Barbiero & Ferrari, 2015 method used in
\code{\link[SimMultiCorrData]{rcorrvar2}}, \doi{10.1002/asmb.2072}). The function is a modification of Barbiero & Ferrari's
\code{\link[GenOrd]{ordcont}}
function in \code{\link[GenOrd]{GenOrd-package}}. It works by setting the intermediate MVN correlation equal to the target
correlation and updating each intermediate pairwise correlation until the final pairwise correlation is within epsilon of the
target correlation or the maximum number of iterations has been reached. This function uses \code{\link[GenOrd]{contord}}
to calculate the ordinal correlation obtained from discretizing the normal variables generated from the intermediate
correlation matrix. The \code{\link[GenOrd]{ordcont}} has been modified in the following ways:
1) the initial correlation check has been removed because it is assumed the user has done this before simulation using
\code{\link[SimMultiCorrData]{valid_corr}} or \code{\link[SimMultiCorrData]{valid_corr2}}
2) the final positive-definite check has been removed
3) the intermediate correlation update function was changed to accomodate more situations, and
4) a final "fail-safe" check was added at the end of the iteration loop where if the absolute
error between the final and target pairwise correlation is still > 0.1, the intermediate correlation is set
equal to the target correlation.
This function would not ordinarily be called by the user. Note that this will return a matrix that is NOT positive-definite
because this is corrected for in the
simulation functions \code{\link[SimMultiCorrData]{rcorrvar}} and \code{\link[SimMultiCorrData]{rcorrvar2}}
using the method of Higham (2002) and the \code{\link[Matrix]{nearPD}} function.
}
\references{
Barbiero A, Ferrari PA (2015). Simulation of correlated Poisson variables. Applied Stochastic Models
in Business and Industry, 31: 669-80. \doi{10.1002/asmb.2072}.
Barbiero A, Ferrari PA (2015). GenOrd: Simulation of Discrete Random Variables with Given
Correlation Matrix and Marginal Distributions. R package version 1.4.0.
\url{https://CRAN.R-project.org/package=GenOrd}
Ferrari PA, Barbiero A (2012). Simulating ordinal data, Multivariate Behavioral Research, 47(4): 566-589. \doi{10.1080/00273171.2012.692630}.
}
\seealso{
\code{\link[GenOrd]{ordcont}}, \code{\link[SimMultiCorrData]{rcorrvar}}, \code{\link[SimMultiCorrData]{rcorrvar2}},
\code{\link[SimMultiCorrData]{findintercorr}}, \code{\link[SimMultiCorrData]{findintercorr2}}
}
\keyword{correlation,}
\keyword{count}
\keyword{intermediate,}
\keyword{ordinal,}
| /man/ordnorm.Rd | no_license | cran/SimMultiCorrData | R | false | true | 4,706 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ordnorm.R
\name{ordnorm}
\alias{ordnorm}
\title{Calculate Intermediate MVN Correlation to Generate Variables Treated as Ordinal}
\usage{
ordnorm(marginal, rho, support = list(), epsilon = 0.001, maxit = 1000)
}
\arguments{
\item{marginal}{a list of length equal to the number of variables; the i-th element is a vector of the cumulative
probabilities defining the marginal distribution of the i-th variable;
if the variable can take r values, the vector will contain r - 1 probabilities (the r-th is assumed to be 1)}
\item{rho}{the target correlation matrix}
\item{support}{a list of length equal to the number of variables; the i-th element is a vector of containing the r
ordered support values; if not provided (i.e. support = list()), the default is for the i-th element to be the vector 1, ..., r}
\item{epsilon}{the maximum acceptable error between the final and target correlation matrices (default = 0.001);
smaller epsilons take more time}
\item{maxit}{the maximum number of iterations to use (default = 1000) to find the intermediate correlation; the
correction loop stops when either the iteration number passes maxit or epsilon is reached}
}
\value{
A list with the following components:
\code{SigmaC} the intermediate MVN correlation matrix
\code{rho0} the calculated final correlation matrix generated from \code{SigmaC}
\code{rho} the target final correlation matrix
\code{niter} a matrix containing the number of iterations required for each variable pair
\code{maxerr} the maximum final error between the final and target correlation matrices
}
\description{
This function calculates the intermediate MVN correlation needed to generate a variable described by
a discrete marginal distribution and associated finite support. This includes ordinal (r >= 2 categories) variables
or variables that are treated as ordinal (i.e. count variables in the Barbiero & Ferrari, 2015 method used in
\code{\link[SimMultiCorrData]{rcorrvar2}}, \doi{10.1002/asmb.2072}). The function is a modification of Barbiero & Ferrari's
\code{\link[GenOrd]{ordcont}}
function in \code{\link[GenOrd]{GenOrd-package}}. It works by setting the intermediate MVN correlation equal to the target
correlation and updating each intermediate pairwise correlation until the final pairwise correlation is within epsilon of the
target correlation or the maximum number of iterations has been reached. This function uses \code{\link[GenOrd]{contord}}
to calculate the ordinal correlation obtained from discretizing the normal variables generated from the intermediate
correlation matrix. The \code{\link[GenOrd]{ordcont}} has been modified in the following ways:
1) the initial correlation check has been removed because it is assumed the user has done this before simulation using
\code{\link[SimMultiCorrData]{valid_corr}} or \code{\link[SimMultiCorrData]{valid_corr2}}
2) the final positive-definite check has been removed
3) the intermediate correlation update function was changed to accomodate more situations, and
4) a final "fail-safe" check was added at the end of the iteration loop where if the absolute
error between the final and target pairwise correlation is still > 0.1, the intermediate correlation is set
equal to the target correlation.
This function would not ordinarily be called by the user. Note that this will return a matrix that is NOT positive-definite
because this is corrected for in the
simulation functions \code{\link[SimMultiCorrData]{rcorrvar}} and \code{\link[SimMultiCorrData]{rcorrvar2}}
using the method of Higham (2002) and the \code{\link[Matrix]{nearPD}} function.
}
\references{
Barbiero A, Ferrari PA (2015). Simulation of correlated Poisson variables. Applied Stochastic Models
in Business and Industry, 31: 669-80. \doi{10.1002/asmb.2072}.
Barbiero A, Ferrari PA (2015). GenOrd: Simulation of Discrete Random Variables with Given
Correlation Matrix and Marginal Distributions. R package version 1.4.0.
\url{https://CRAN.R-project.org/package=GenOrd}
Ferrari PA, Barbiero A (2012). Simulating ordinal data, Multivariate Behavioral Research, 47(4): 566-589. \doi{10.1080/00273171.2012.692630}.
}
\seealso{
\code{\link[GenOrd]{ordcont}}, \code{\link[SimMultiCorrData]{rcorrvar}}, \code{\link[SimMultiCorrData]{rcorrvar2}},
\code{\link[SimMultiCorrData]{findintercorr}}, \code{\link[SimMultiCorrData]{findintercorr2}}
}
\keyword{correlation,}
\keyword{count}
\keyword{intermediate,}
\keyword{ordinal,}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_describe_vpc_endpoint_connection_notifications}
\alias{ec2_describe_vpc_endpoint_connection_notifications}
\title{Describes the connection notifications for VPC endpoints and VPC
endpoint services}
\usage{
ec2_describe_vpc_endpoint_connection_notifications(DryRun,
ConnectionNotificationId, Filters, MaxResults, NextToken)
}
\arguments{
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{ConnectionNotificationId}{The ID of the notification.}
\item{Filters}{One or more filters.
\itemize{
\item \code{connection-notification-arn} - The ARN of the SNS topic for the
notification.
\item \code{connection-notification-id} - The ID of the notification.
\item \code{connection-notification-state} - The state of the notification
(\code{Enabled} \\| \code{Disabled}).
\item \code{connection-notification-type} - The type of notification (\code{Topic}).
\item \code{service-id} - The ID of the endpoint service.
\item \code{vpc-endpoint-id} - The ID of the VPC endpoint.
}}
\item{MaxResults}{The maximum number of results to return in a single call. To retrieve
the remaining results, make another request with the returned
\code{NextToken} value.}
\item{NextToken}{The token to request the next page of results.}
}
\description{
Describes the connection notifications for VPC endpoints and VPC
endpoint services.
}
\section{Request syntax}{
\preformatted{svc$describe_vpc_endpoint_connection_notifications(
DryRun = TRUE|FALSE,
ConnectionNotificationId = "string",
Filters = list(
list(
Name = "string",
Values = list(
"string"
)
)
),
MaxResults = 123,
NextToken = "string"
)
}
}
\keyword{internal}
| /paws/man/ec2_describe_vpc_endpoint_connection_notifications.Rd | permissive | johnnytommy/paws | R | false | true | 1,986 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_describe_vpc_endpoint_connection_notifications}
\alias{ec2_describe_vpc_endpoint_connection_notifications}
\title{Describes the connection notifications for VPC endpoints and VPC
endpoint services}
\usage{
ec2_describe_vpc_endpoint_connection_notifications(DryRun,
ConnectionNotificationId, Filters, MaxResults, NextToken)
}
\arguments{
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{ConnectionNotificationId}{The ID of the notification.}
\item{Filters}{One or more filters.
\itemize{
\item \code{connection-notification-arn} - The ARN of the SNS topic for the
notification.
\item \code{connection-notification-id} - The ID of the notification.
\item \code{connection-notification-state} - The state of the notification
(\code{Enabled} \\| \code{Disabled}).
\item \code{connection-notification-type} - The type of notification (\code{Topic}).
\item \code{service-id} - The ID of the endpoint service.
\item \code{vpc-endpoint-id} - The ID of the VPC endpoint.
}}
\item{MaxResults}{The maximum number of results to return in a single call. To retrieve
the remaining results, make another request with the returned
\code{NextToken} value.}
\item{NextToken}{The token to request the next page of results.}
}
\description{
Describes the connection notifications for VPC endpoints and VPC
endpoint services.
}
\section{Request syntax}{
\preformatted{svc$describe_vpc_endpoint_connection_notifications(
DryRun = TRUE|FALSE,
ConnectionNotificationId = "string",
Filters = list(
list(
Name = "string",
Values = list(
"string"
)
)
),
MaxResults = 123,
NextToken = "string"
)
}
}
\keyword{internal}
|
library(testthat)
library(SimBM)
test_check("SimBM")
| /tests/testthat.R | no_license | Jianchang-HU/826_package | R | false | false | 54 | r | library(testthat)
library(SimBM)
test_check("SimBM")
|
library(combinat)
nums <- factor(c("One", "Two", "Three"))
factors <- factor(nums)
factors[1]
levels2 <- levels(nums)
col <- factor(c("Red", "Green"))
own <- factor(c("English", "Swedish"))
col_p <- permn(levels(col))
own_p <- permn(levels(col)) | /week1/combin.R | no_license | nalbarr/coursera-johnhopkins-datascience-course3 | R | false | false | 248 | r | library(combinat)
nums <- factor(c("One", "Two", "Three"))
factors <- factor(nums)
factors[1]
levels2 <- levels(nums)
col <- factor(c("Red", "Green"))
own <- factor(c("English", "Swedish"))
col_p <- permn(levels(col))
own_p <- permn(levels(col)) |
# TODO: Write a column type auto-detectors
# http://www.r-bloggers.com/package-wide-variablescache-in-r-packages/
question_types = new.env(parent=emptyenv())
#' New question type.
#'
#' Adds a new type of question to the survey question auto-detector.
#'
#' @param name Text string containing the name of this question type
#' @param detector Function that takes a single argument -- a vector of data -- and
#' returns True or False -- whether this data is of the correct type
#' @param processor Function that takes a single argument -- a vector of data --
#' processes it, and returns the processed data
#' @export
add_question_type <- function(name, detector, processor) {
if (exists(name, envir=question_types)) {
warning("Question Type already known")
}
detector <- match.fun(detector)
if (is.character(processor) && stringr::str_to_lower(processor) == "remove") {
processor <- "remove"
} else {
processor <- match.fun(processor)
}
assign(name, list(detector, processor), envir=question_types)
}
#' Remove a question type from the auto-detector
#'
#' @param name Character string name of the question type to remove
#' @export
remove_question_type <- function(name) {
if (exists(name, envir=question_types))
rm(list=name, envir=question_types)
}
replace_nas <- function(v) {
if (is.factor(v)) {
l <- levels(v)
l <- ifelse(l=="", NA, l)
levels(v) <- l
return(v)
}
return(ifelse(v=="", NA, v))
}
#' Question Auto-Detect
#'
#' Automatically detect what type of question this column of data came from, and process it
#'
#' @param column A single vector of data from a survey
#' @param col_name (optional) The name of the column
#'
#' @return A vector of processed data
#' @export
detect.question <- function(column, col_name="") {
column <- replace_nas(column)
attr(column, "name") <- col_name
types <- ls(envir=question_types)
for (type in types) {
qtype <- get(type, envir=question_types)
detector <- qtype[[1]]
if (detector(column)) {
# cat(paste("Found", type, "\n"))
processor <- qtype[[2]]
if (is.character(processor) && processor == "remove") {
return(NULL)
} else {
return(processor(column))
}
}
}
warn_msg <- "Column of unknown type"
if (col_name != "") { warn_msg <- paste0("Column ", col_name, " of unknown type")}
warning(warn_msg)
attr(column, "name") <- NULL
return(column)
}
#' Survey Cleaning Auto-detect
#'
#' Automatically clean a survey by iterating through a data frame and automatically detecting
#' each question's type and processing it
#'
#' @param frame The data.frame or tibble containing the survey
#'
#' @return A new tibble containing the processed survey
#' @export
detect.survey <- function(frame) {
n_rows <- dim(frame)[1]
out <- lapply(names(frame), function(n) { detect.question(frame[[n]], n)} )
names(out) <- names(frame)
for (i in names(out)) {
if (is.null(out[[i]])) {
out[[i]] <- NULL # Actually remove the column if it is all NULL
}
}
out_tibble <- tibble::tibble(.rows = n_rows)
for (i in names(out)) {
if (tibble::is_tibble(out[[i]])) {
names(out[[i]]) <- stringr::str_c(i, "_", names(out[[i]]))
out_tibble <- dplyr::bind_cols(out_tibble, out[[i]])
} else {
out_tibble <- tibble::add_column(out_tibble, !!i := out[[i]])
}
}
# name_fix <- function(x) {stringr::str_remove(x, "\\$value")}
# out <- tibble::as_tibble(out, .name_repair = ~name_fix)
return(out_tibble)
}
#' Load a survey dataset from a file
#'
#' Loads a survey dataset from a file, auto-detects all of the columns, and then processes the
#' dataset
#'
#' @param file Name of the file to load
#'
#' @return A tibble containing the processed dataset
#' @export
load_survey <- function(file, ...) {
f <- readr::read_csv(file, ...)
attr(f, "question.text") <- f[1,]
f <- f[-1:-2,]
return(detect.survey(f))
}
#' Resets the surveys package
#'
#' Removes all known question types and answer types, and resets the surveys packag
#' back to where it is when it is first loaded
#'
#' @export
reset_surveys <- function() {
rm(list=ls(envir=question_types), envir=question_types)
rm(list=ls(envir=multiple_choices), envir=multiple_choices)
rm(list=ls(envir=multiple_answer_choices), envir=multiple_answer_choices)
rm(list=ls(envir=attention_checks), envir=attention_checks)
rm(list=ls(envir=ignore_questions), envir=ignore_questions)
rm(list=ls(envir=known_questions), envir=known_questions)
load_question_types()
load_multiple_choice_options()
}
# TODO: Should I add a priority ordering?
| /R/detector.R | permissive | rwash/surveys | R | false | false | 4,627 | r |
# TODO: Write a column type auto-detectors
# http://www.r-bloggers.com/package-wide-variablescache-in-r-packages/
question_types = new.env(parent=emptyenv())
#' New question type.
#'
#' Adds a new type of question to the survey question auto-detector.
#'
#' @param name Text string containing the name of this question type
#' @param detector Function that takes a single argument -- a vector of data -- and
#' returns True or False -- whether this data is of the correct type
#' @param processor Function that takes a single argument -- a vector of data --
#' processes it, and returns the processed data
#' @export
add_question_type <- function(name, detector, processor) {
if (exists(name, envir=question_types)) {
warning("Question Type already known")
}
detector <- match.fun(detector)
if (is.character(processor) && stringr::str_to_lower(processor) == "remove") {
processor <- "remove"
} else {
processor <- match.fun(processor)
}
assign(name, list(detector, processor), envir=question_types)
}
#' Remove a question type from the auto-detector
#'
#' @param name Character string name of the question type to remove
#' @export
remove_question_type <- function(name) {
if (exists(name, envir=question_types))
rm(list=name, envir=question_types)
}
replace_nas <- function(v) {
if (is.factor(v)) {
l <- levels(v)
l <- ifelse(l=="", NA, l)
levels(v) <- l
return(v)
}
return(ifelse(v=="", NA, v))
}
#' Question Auto-Detect
#'
#' Automatically detect what type of question this column of data came from, and process it
#'
#' @param column A single vector of data from a survey
#' @param col_name (optional) The name of the column
#'
#' @return A vector of processed data
#' @export
detect.question <- function(column, col_name="") {
column <- replace_nas(column)
attr(column, "name") <- col_name
types <- ls(envir=question_types)
for (type in types) {
qtype <- get(type, envir=question_types)
detector <- qtype[[1]]
if (detector(column)) {
# cat(paste("Found", type, "\n"))
processor <- qtype[[2]]
if (is.character(processor) && processor == "remove") {
return(NULL)
} else {
return(processor(column))
}
}
}
warn_msg <- "Column of unknown type"
if (col_name != "") { warn_msg <- paste0("Column ", col_name, " of unknown type")}
warning(warn_msg)
attr(column, "name") <- NULL
return(column)
}
#' Survey Cleaning Auto-detect
#'
#' Automatically clean a survey by iterating through a data frame and automatically detecting
#' each question's type and processing it
#'
#' @param frame The data.frame or tibble containing the survey
#'
#' @return A new tibble containing the processed survey
#' @export
detect.survey <- function(frame) {
n_rows <- dim(frame)[1]
out <- lapply(names(frame), function(n) { detect.question(frame[[n]], n)} )
names(out) <- names(frame)
for (i in names(out)) {
if (is.null(out[[i]])) {
out[[i]] <- NULL # Actually remove the column if it is all NULL
}
}
out_tibble <- tibble::tibble(.rows = n_rows)
for (i in names(out)) {
if (tibble::is_tibble(out[[i]])) {
names(out[[i]]) <- stringr::str_c(i, "_", names(out[[i]]))
out_tibble <- dplyr::bind_cols(out_tibble, out[[i]])
} else {
out_tibble <- tibble::add_column(out_tibble, !!i := out[[i]])
}
}
# name_fix <- function(x) {stringr::str_remove(x, "\\$value")}
# out <- tibble::as_tibble(out, .name_repair = ~name_fix)
return(out_tibble)
}
#' Load a survey dataset from a file
#'
#' Loads a survey dataset from a file, auto-detects all of the columns, and then processes the
#' dataset
#'
#' @param file Name of the file to load
#'
#' @return A tibble containing the processed dataset
#' @export
load_survey <- function(file, ...) {
f <- readr::read_csv(file, ...)
attr(f, "question.text") <- f[1,]
f <- f[-1:-2,]
return(detect.survey(f))
}
#' Resets the surveys package
#'
#' Removes all known question types and answer types, and resets the surveys packag
#' back to where it is when it is first loaded
#'
#' @export
reset_surveys <- function() {
rm(list=ls(envir=question_types), envir=question_types)
rm(list=ls(envir=multiple_choices), envir=multiple_choices)
rm(list=ls(envir=multiple_answer_choices), envir=multiple_answer_choices)
rm(list=ls(envir=attention_checks), envir=attention_checks)
rm(list=ls(envir=ignore_questions), envir=ignore_questions)
rm(list=ls(envir=known_questions), envir=known_questions)
load_question_types()
load_multiple_choice_options()
}
# TODO: Should I add a priority ordering?
|
# Bruno Passarelli
# 16 September 2015
# this script is for Alfaro's in class exercises
# it follows the "R basics and flow control" slideshow by Alfaro
# when using help, you can copy examples to the script and send it to console to see
# how a certain function works
source("source.example.R")
source
all.I.know.about.life.I.learned.in.grad.school()
library(ape)
# read in tree
tt <- read.tree("tree.tre")
tt
attributes(tt)
head(tt$tip.label)
# read in data
# header indicates that there is a header line in the data (TRUE or T works)
dd <- read.table("data.txt", header = T, as.is = T, sep = "\t")
str(dd) # this shows the structure of an object
attributes(dd)
head(dd) # this shows the first six elements of the object
dim(dd) # this returns the dimensions of an object (92 rows by 2 columns in this example)
dim(dd)[1] # dimensions of the rows
dflength <- dim(dd)[1] # this assigns the dimensions of rows to the object dflength
dim(dd)[2]
runif(1) # generates random uniform distribution
# generate some random size data
size <- runif(dflength) # get 92 random variables (dflength = 92), generate random size data
# use cbind to add this column to the exisitng data frame
head(dd) # use this to check data frame; it will return the first 6 values
head(cbind(dd, size)) # this will add a column "size" to the data frame
# make a new variable and give it a name
newdd <- cbind(dd, size) # use this if you want to keep the original variable dd unaltered
# if you want to overwrite dd to include a variable with a new column, use this:
dd <- cbind(dd, size)
head(dd) # check new variable, you will see the size column added
# acessing data frame elements
names(dd) # this gives you the names of the columns
dd$species # this returns all species
head(dd$species) # check the species column first six values
tail(dd$species) # check the species column last six values
head(dd$size) # check the size column first six values
# use [] after a data frame to access specific cells, rows, and columns
dd[1,] # look at row 1, all columns
dd[,1] # look at all rows, column 1
dd[1:10,] # look at rows 1-10, all columns
dd[5:10,] # rows5-10, all columns
dd[1:10,2] # rows 1-10, column 2
dd[,1] # all rows, column 1
dd[,2] # all rows, column 2
# the which() function does queries with your data
# this one is telling you to look at the mode column and find values equal to MPF
which(dd$mode == 'MPF') # this will list every row number that has MPF in the mode column
# two equal signs (==) is asking if two things are equal
dd$mode == "MPF"
# the above returns T or F values for each value (the ones that equal MPF are TRUE)
dd[which(dd$mode == 'MPF'),]
# the above is more complete, it shows all the data for rows with MPF
# next, we assign the above to a variable called just_mpf
just_mpf <- dd[which(dd$mode == 'MPF'),]
head(just_mpf) # check the data
# R challenge
# make a new data frame with large species only (let's define "large" as > 0.8 )
which(dd$size > 0.8) # this will tell you which rows have the large species
large.spp <- dd[which(dd$size > 0.8),] # this makes the new data frame
head(large.spp) # check the data, it worked!
# checking for NAs
head(dd) # we can see that there is an NA in row 2, column 2
# removing NAs
# one way to get only complete cases
cleaned_1 <- dd[complete.cases(dd),]
head(cleaned_1) # you can see the row 2 was removed
# another method
cleaned_2 <- na.omit(dd)
head(cleaned_2) # same effect as above
dd <- cleaned_1 # reassigned dd to only have complete cases
# renaming data frame entries and matching data objects
# using setdiff
setdiff(dd$species, tt$tip.label) # shows which species in dataset don't match the ones in the tree
# this returned 17 species that didn't match
# sometimes this happens because of mispellings
# here is one way to fix this
dd$species[which(dd$species == "Chaetodon_plebius")]<-"Chaetodon_plebeius"
# if we rerun setdiff only 16 species won't match this time
# matching rest of data to tree
del_from_data <- setdiff(dd$species, tt$tip.label)
match(del_from_data, rownames(dd)) # this will show the row numbers in dd that don't match in tt
# * this returned the values (all NA), instead of row numbers in slide
# create new variable dd.prunned
dd.prunned <- dd[-match(del_from_data, rownames(dd)),] # now let's check again for overlap
setdiff(dd.prunned$species, tt$tip.label) # this should return 0 (or NA) mismatches
# matching tree to data
# use setdiff again but switch the arguments
not.in.dd <- setdiff(tt$tip.label, dd.prunned$species)
length(not.in.dd) # will return a large number *this was different than example in slide
head(not.in.dd) # at least head matches
# now need to use drop.tip()
?drop.tip
# always assign short, very descriptive names to variables
# \n tells R that there is a new line
# Common control statements
# "for" commands
# whatever is inside the braces will be performed for each of the elements
# in the parentheses
# below is an example from slide show
for(ii in 1:5){
cat("\nthe number is", ii)
}
# you can also loop over all items in a vector
notfish <- c("bat", "dolphin", "toad", "soldier")
for(animal in notfish){
cat(animal, "fish\n", sep="")
}
# cat puts together whatever element is in our loop plus the word fish
for(animal in notfish){
cat(animal, "fish", " are tasty\n", sep="")
}
# while statements - same general structures as "for" statements
# as long as the condition is true it keeps doing something
xx <- 1
while(xx < 5){
xx <- xx+1;
cat("value of xx", xx, "\n")
}
print(xx)
xx <- 1
while(xx < 5){
xx <- xx+1;
cat("value of xx", xx, "\n")
if( xx == 3){
break; }
}
print(xx)
# "if" statements
# allow your code to diverge depending on conditions
# if(condition is true){do something}
for(ii in 1:6){
if(ii %% 2){
cat(ii, "is odd\n")
}
else{
cat(ii, "is even\n")
}
}
# a few more examples
xx <- 1
while(xx < 10){
cat("value of xx", xx, "\n")
xx <- xx+1;
if(xx == 7){
cat("lucky number", xx, "\n")}
else
{cat("not excited about the number", xx, "\n")
}
}
print(xx)
# if you want the number 1 to be included, place xx <- xx+1; under else
xx <- 1
while(xx < 10){
cat("value of xx", xx, "\n")
if( xx == 7){
cat("lucky number" , xx, "\n")}
else
{cat("not excited about the number", xx, "\n")}
xx <- xx+1;
}
print(xx)
# Pseudocode
# plan out structure and flow of your program
# syntax is not important, think about VARIABLES AND CONTROL STRUCTURE
# can be translated across languages
# pseudocode example from slide
# script that prints a number and its square over a given range of integers and then
# sum all squares
# 1. set upper and lower range values
# 2. setsquaresum to 0
# 3. loop over the range and for each value print currentvalue and currentvalueˆ2 and
# add currentvalueˆ2 to squaresum
# print "here is the sum of it all"m squaresum
lower <- 1
upper <- 5
squaresum <- 0
for(ii in lower:upper){
cat(ii, ii^2, "\n")
squaresum <- squaresum + ii^2
}
cat("the sum of it all is", squaresum)
# Functions
# self-contained bit of code that performs a task
# example from Alfaro's slide show
doubler <- function(num){
doubled <- 2 * num
cat("witness the awesome power of the doubler\n")
cat("I changed", num, "to", doubled, "\n")
cat("you're welcome!\n")
return(doubled)
}
doubler(3)
doubler(8)
# another example
greeter <- function(name){
cat("Hello,", name, "\n")
}
greeter("Bob")
greeter() # if you forget the name, you get an error message
# if you type the name of the function without parenthesis you get a description of the function
# let's try another function
reminder <- function(name){
cat("Keep coding,", name, "\n")
cat("Don't stop until you are done,", name, "\n")
cat("It's worth the trouble,", name, "!\n")
}
reminder("Bruno")
| /Alfaro/In_class_script_Alfaro.R | no_license | bpassarelli/eeb201 | R | false | false | 7,800 | r | # Bruno Passarelli
# 16 September 2015
# this script is for Alfaro's in class exercises
# it follows the "R basics and flow control" slideshow by Alfaro
# when using help, you can copy examples to the script and send it to console to see
# how a certain function works
source("source.example.R")
source
all.I.know.about.life.I.learned.in.grad.school()
library(ape)
# read in tree
tt <- read.tree("tree.tre")
tt
attributes(tt)
head(tt$tip.label)
# read in data
# header indicates that there is a header line in the data (TRUE or T works)
dd <- read.table("data.txt", header = T, as.is = T, sep = "\t")
str(dd) # this shows the structure of an object
attributes(dd)
head(dd) # this shows the first six elements of the object
dim(dd) # this returns the dimensions of an object (92 rows by 2 columns in this example)
dim(dd)[1] # dimensions of the rows
dflength <- dim(dd)[1] # this assigns the dimensions of rows to the object dflength
dim(dd)[2]
runif(1) # generates random uniform distribution
# generate some random size data
size <- runif(dflength) # get 92 random variables (dflength = 92), generate random size data
# use cbind to add this column to the exisitng data frame
head(dd) # use this to check data frame; it will return the first 6 values
head(cbind(dd, size)) # this will add a column "size" to the data frame
# make a new variable and give it a name
newdd <- cbind(dd, size) # use this if you want to keep the original variable dd unaltered
# if you want to overwrite dd to include a variable with a new column, use this:
dd <- cbind(dd, size)
head(dd) # check new variable, you will see the size column added
# acessing data frame elements
names(dd) # this gives you the names of the columns
dd$species # this returns all species
head(dd$species) # check the species column first six values
tail(dd$species) # check the species column last six values
head(dd$size) # check the size column first six values
# use [] after a data frame to access specific cells, rows, and columns
dd[1,] # look at row 1, all columns
dd[,1] # look at all rows, column 1
dd[1:10,] # look at rows 1-10, all columns
dd[5:10,] # rows5-10, all columns
dd[1:10,2] # rows 1-10, column 2
dd[,1] # all rows, column 1
dd[,2] # all rows, column 2
# the which() function does queries with your data
# this one is telling you to look at the mode column and find values equal to MPF
which(dd$mode == 'MPF') # this will list every row number that has MPF in the mode column
# two equal signs (==) is asking if two things are equal
dd$mode == "MPF"
# the above returns T or F values for each value (the ones that equal MPF are TRUE)
dd[which(dd$mode == 'MPF'),]
# the above is more complete, it shows all the data for rows with MPF
# next, we assign the above to a variable called just_mpf
just_mpf <- dd[which(dd$mode == 'MPF'),]
head(just_mpf) # check the data
# R challenge
# make a new data frame with large species only (let's define "large" as > 0.8 )
which(dd$size > 0.8) # this will tell you which rows have the large species
large.spp <- dd[which(dd$size > 0.8),] # this makes the new data frame
head(large.spp) # check the data, it worked!
# checking for NAs
head(dd) # we can see that there is an NA in row 2, column 2
# removing NAs
# one way to get only complete cases
cleaned_1 <- dd[complete.cases(dd),]
head(cleaned_1) # you can see the row 2 was removed
# another method
cleaned_2 <- na.omit(dd)
head(cleaned_2) # same effect as above
dd <- cleaned_1 # reassigned dd to only have complete cases
# renaming data frame entries and matching data objects
# using setdiff
setdiff(dd$species, tt$tip.label) # shows which species in dataset don't match the ones in the tree
# this returned 17 species that didn't match
# sometimes this happens because of mispellings
# here is one way to fix this
dd$species[which(dd$species == "Chaetodon_plebius")]<-"Chaetodon_plebeius"
# if we rerun setdiff only 16 species won't match this time
# matching rest of data to tree
del_from_data <- setdiff(dd$species, tt$tip.label)
match(del_from_data, rownames(dd)) # this will show the row numbers in dd that don't match in tt
# * this returned the values (all NA), instead of row numbers in slide
# create new variable dd.prunned
dd.prunned <- dd[-match(del_from_data, rownames(dd)),] # now let's check again for overlap
setdiff(dd.prunned$species, tt$tip.label) # this should return 0 (or NA) mismatches
# matching tree to data
# use setdiff again but switch the arguments
not.in.dd <- setdiff(tt$tip.label, dd.prunned$species)
length(not.in.dd) # will return a large number *this was different than example in slide
head(not.in.dd) # at least head matches
# now need to use drop.tip()
?drop.tip
# always assign short, very descriptive names to variables
# \n tells R that there is a new line
# Common control statements
# "for" commands
# whatever is inside the braces will be performed for each of the elements
# in the parentheses
# below is an example from slide show
for(ii in 1:5){
cat("\nthe number is", ii)
}
# you can also loop over all items in a vector
notfish <- c("bat", "dolphin", "toad", "soldier")
for(animal in notfish){
cat(animal, "fish\n", sep="")
}
# cat puts together whatever element is in our loop plus the word fish
for(animal in notfish){
cat(animal, "fish", " are tasty\n", sep="")
}
# while statements - same general structures as "for" statements
# as long as the condition is true it keeps doing something
xx <- 1
while(xx < 5){
xx <- xx+1;
cat("value of xx", xx, "\n")
}
print(xx)
xx <- 1
while(xx < 5){
xx <- xx+1;
cat("value of xx", xx, "\n")
if( xx == 3){
break; }
}
print(xx)
# "if" statements
# allow your code to diverge depending on conditions
# if(condition is true){do something}
for(ii in 1:6){
if(ii %% 2){
cat(ii, "is odd\n")
}
else{
cat(ii, "is even\n")
}
}
# a few more examples
xx <- 1
while(xx < 10){
cat("value of xx", xx, "\n")
xx <- xx+1;
if(xx == 7){
cat("lucky number", xx, "\n")}
else
{cat("not excited about the number", xx, "\n")
}
}
print(xx)
# if you want the number 1 to be included, place xx <- xx+1; under else
xx <- 1
while(xx < 10){
cat("value of xx", xx, "\n")
if( xx == 7){
cat("lucky number" , xx, "\n")}
else
{cat("not excited about the number", xx, "\n")}
xx <- xx+1;
}
print(xx)
# Pseudocode
# plan out structure and flow of your program
# syntax is not important, think about VARIABLES AND CONTROL STRUCTURE
# can be translated across languages
# pseudocode example from slide
# script that prints a number and its square over a given range of integers and then
# sum all squares
# 1. set upper and lower range values
# 2. setsquaresum to 0
# 3. loop over the range and for each value print currentvalue and currentvalueˆ2 and
# add currentvalueˆ2 to squaresum
# print "here is the sum of it all"m squaresum
lower <- 1
upper <- 5
squaresum <- 0
for(ii in lower:upper){
cat(ii, ii^2, "\n")
squaresum <- squaresum + ii^2
}
cat("the sum of it all is", squaresum)
# Functions
# self-contained bit of code that performs a task
# example from Alfaro's slide show
doubler <- function(num){
doubled <- 2 * num
cat("witness the awesome power of the doubler\n")
cat("I changed", num, "to", doubled, "\n")
cat("you're welcome!\n")
return(doubled)
}
doubler(3)
doubler(8)
# another example
greeter <- function(name){
cat("Hello,", name, "\n")
}
greeter("Bob")
greeter() # if you forget the name, you get an error message
# if you type the name of the function without parenthesis you get a description of the function
# let's try another function
reminder <- function(name){
cat("Keep coding,", name, "\n")
cat("Don't stop until you are done,", name, "\n")
cat("It's worth the trouble,", name, "!\n")
}
reminder("Bruno")
|
library(MASS)
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
#function that fits two Gaussian process regressions,
#one on the left side of the discontinuity, and one to the right.
#Requires the stan file twoGPsOnlyCovarianceParamsWithSlope.stan
#ARGUMENTS:
# data: a data.frame that has the running variable denoted as "x"
# and an outcome denoted as "y"
# boundary: the value of x that denotes where the discontinuity is; default is 0.
# iters: number of iterations/draws for each chain in the MCMC for the Bayesian model
# (which is run in rstan).
# default is iters = 1000 for demonstration, but for actual application
# this should be much higher, say 50,000 or 100,000.
# chains: number of chains to run in the MCMC for the Bayesian model. default is 2.
# Note: The total number of posterior draws this provides is (iters*chains)/2;
# it's divided by 2 because half of the draws from each chain are thrown
# out after burn-in.
fitTwoGPs = function(data, boundary = 0, iters = 1000, chains = 2){
leftIndex = which(data$x < boundary)
rightIndex = which(data$x >= boundary)
x1 = data$x[leftIndex]; y1 = data$y[leftIndex]
x2 = data$x[rightIndex]; y2 = data$y[rightIndex]
gpFit = stan(file = "twoGPsOnlyCovarianceParamsWithSlope.stan",
data = list(x1 = x1, y1 = y1,
x2 = x2, y2 = y2,
N1 = length(x1), N2 = length(x2)),
iter= iters,
chains = chains)
return(gpFit)
}
#After the Bayesian model using fitTwoGPs() is run,
#this function performs kriging (or extrapolation) to obtain
#posterior draws for the treatment effect at the boundary in an RDD.
# n: for each draw of the posterior, the number of draws from the predictive
# posterior distribution of the Gaussian process that are obtained. default is 1,
# which is fine for inference.
# data: a data.frame that has the running variable denoted as "x"
# and an outcome denoted as "y"
# stanFit: the object returned by fitTwoGPs()
# boundary: the value of x that denotes where the discontinuity is; default is 0.
# length: the number of points specified for kriging on either side of the boundary.
# "length" does not affect the precision of inference for the average treatment effect,
# but making length larger can be useful for visualizations of the mean response functions.
# "length" is only used in the getPredX1() and getPredX2() functions;
# more details about these functions are below.
# Note: this function returns a matrix with (n*iters*chains/2)-many rows and
# (Nt + Nc + 2*length)-many columns,
#where Nt and Nc are the number of units in treatment and control, respectively.
# Importantly, the (Nc + length)-th column denotes the posterior draws for the response
# at the boundary on the left-hand side (assuming the left-hand side is the control),
# and the (Nc + length + 1)-th column denotes the posterior draws for the response
# at the boundary on the right-hand side.
performKrigingSameParams = function(n = 1, data, stanFit, boundary = 0, length = 10){
x1 = data$x[which(data$x < boundary)]
y1 = data$y[which(data$x < boundary)]
x2 = data$x[which(data$x >= boundary)]
y2 = data$y[which(data$x >= boundary)]
predX1 = getPredX1(data = data, boundary = boundary, length = length)
predX2 = getPredX2(data = data, boundary = boundary, length = length)
#extract the variables from the stanFit object
stanFit = extract(stanFit)
#first, perform kriging on the left side
krigingLeft = drawPostPredY(n = n,
mu = stanFit$mu1,
beta = stanFit$beta1,
sigmasq = stanFit$sigmasq,
phi = stanFit$phi,
etasq = stanFit$etasq,
X = x1,
Y = y1,
predX = predX1)
#now perform kriging on the right side
krigingRight = drawPostPredY(n = n,
mu = stanFit$mu2,
beta = stanFit$beta2,
sigmasq = stanFit$sigmasq,
phi = stanFit$phi,
etasq = stanFit$etasq,
X = x2,
Y = y2,
predX = predX2)
#krigingLeft and krigingRight are matrices
#We'll cbind these matrices, so that
#The first 100 columns are for the left,
#the next 100 columns for the right.
krigingMatrix = cbind(krigingLeft, krigingRight)
return(krigingMatrix)
}
#draw from the posterior predictive distribution of the Gaussian process.
#Details about this process can be found in the Branson et al. RDD paper.
drawPostPredY = function(n = 1, mu, sigmasq, phi, etasq, X, Y, predX,beta){
#First we need distances among the X values
distances = matrix(nrow=length(X), ncol=length(X))
for(i in 1:length(X)){
for(j in 1:length(X)){
distances[i,j] = X[i] - X[j]
}
}
#and now we need the distances among the *new* X values
predDistances = matrix(nrow=length(predX), ncol=length(predX))
for(i in 1:length(predX)){
for(j in 1:length(predX)){
predDistances[i,j] = predX[i] - predX[j]
}
}
#and also we need the distances between each of the X values and the *new* X values
predXDistances = t(sapply(X, function(x) x - predX))
#sigmasq and etasq are paramters whose posterior we already drew from
#using the Stan model.
#Thus, for each posterior draw of the parameters, we'll draw from the posterior
#predictive distribution for each new X (i.e., each predX)
#Thus, we'll have length(sigmasq)-many draws from the posterior predictive distribution.
posteriorPredictiveDraws = list(length = length(sigmasq))
for(m in 1:length(sigmasq) ){
print(m)
#the covariance matrix for the Xs is
estCovMat = sigmasq[m]*exp(-phi[m]*distances^2) + diag(etasq[m], length(X))
#and the covariance matrix for the new Xs is
predCovMat = sigmasq[m]*exp(-phi[m]*predDistances^2)
#and the covariance matrix between the Xs and new Xs is
predCrossCovMat = sigmasq[m]*exp(-phi[m]*predXDistances^2)
#using conditional MVN theory, we can find the distribution of
#p(predX | X, Y, theta)
#the mean of this distribution is
predYMean = (mu[m] + predX*beta[m]) + t(predCrossCovMat)%*%solve(estCovMat)%*%(Y - mu[m] - X*beta[m] )
#and the covariance matrix of this distribution is
predYCovMat = predCovMat - t(predCrossCovMat)%*%solve(estCovMat)%*%predCrossCovMat
#Therefore, using the above mean and covariance, we can draw from the posterior
#predictive distribution for each predX.
posteriorPredictiveDraws[[m]] = mvrnorm(n = n, mu = predYMean, Sigma = predYCovMat)
}
#right now, posteriorPredictiveDraws is a list of matrices,
#and we'd like to collapse this into one big matrix
posteriorPredictiveDrawsMatrix = do.call(rbind, posteriorPredictiveDraws)
return(posteriorPredictiveDrawsMatrix)
}
#function that obtains data points to be used for kriging
#Essentially, this function chooses "length" many points between
#the right-most point on the left-hand-side of the boundary,
#as well as "length" many points between
#the left-most point on the right-hand-side of the boundary.
#This then returns Nt + length + Nc + length points,
#where Nt and Nc are the number of units in treatment and control, respectively.
getPredX1 = function(data, boundary, length = 10){
x1 = data$x[which(data$x < boundary)]
predX1 = c(x1, seq(max(x1), boundary, length = length))
return(predX1)
}
getPredX2 = function(data, boundary, length = 10){
x2 = data$x[which(data$x >= boundary)]
predX2 = c(seq(boundary, min(x2), length = length), x2)
return(predX2)
}
pickMeans.mp = readRDS("pickMeans.mp.rds")
#in this example, the boundary is 30.5
boundary = 30.5
#the number of treatment and control units is
Nc = length(which(pickMeans.mp$x <= boundary))
Nt = length(which(pickMeans.mp$x > boundary))
length = 10
#obtain the Bayesian model fit from rstan
fit.test = fitTwoGPs(data = pickMeans.mp, iters = 1000, boundary = boundary)
#perform kriging (i.e., extrapolation of the Gaussian process regressions to the boundary)
kriging.test = performKrigingSameParams(n = 1, data = pickMeans.mp, stanFit = fit.test, length = length, boundary = boundary)
#finally, obtain posterior draws of the treatment effect.
#This just corresponds to the difference in posterior draws for
# the response at the boundary on the right-hand side and
# the response at the boundary on the left-hand side
treatmentEffect.test = kriging.test[,Nc + length + 1] - kriging.test[,Nc + length]
#the corresponding point estimate and confidence interval should be close to
#what is reported for Minutes Played in Table 1 in the paper:
#(note that in the paper we set iters = 50000, which will take a bit more time to run.)
mean(treatmentEffect.test)
quantile(treatmentEffect.test, probs = c(0.025, 0.975))
| /branson2019RDDCode.R | no_license | zjbranson/Branson-2019-Regression-Discontinuity-Replication-Files | R | false | false | 9,089 | r | library(MASS)
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
#function that fits two Gaussian process regressions,
#one on the left side of the discontinuity, and one to the right.
#Requires the stan file twoGPsOnlyCovarianceParamsWithSlope.stan
#ARGUMENTS:
# data: a data.frame that has the running variable denoted as "x"
# and an outcome denoted as "y"
# boundary: the value of x that denotes where the discontinuity is; default is 0.
# iters: number of iterations/draws for each chain in the MCMC for the Bayesian model
# (which is run in rstan).
# default is iters = 1000 for demonstration, but for actual application
# this should be much higher, say 50,000 or 100,000.
# chains: number of chains to run in the MCMC for the Bayesian model. default is 2.
# Note: The total number of posterior draws this provides is (iters*chains)/2;
# it's divided by 2 because half of the draws from each chain are thrown
# out after burn-in.
fitTwoGPs = function(data, boundary = 0, iters = 1000, chains = 2){
leftIndex = which(data$x < boundary)
rightIndex = which(data$x >= boundary)
x1 = data$x[leftIndex]; y1 = data$y[leftIndex]
x2 = data$x[rightIndex]; y2 = data$y[rightIndex]
gpFit = stan(file = "twoGPsOnlyCovarianceParamsWithSlope.stan",
data = list(x1 = x1, y1 = y1,
x2 = x2, y2 = y2,
N1 = length(x1), N2 = length(x2)),
iter= iters,
chains = chains)
return(gpFit)
}
#After the Bayesian model using fitTwoGPs() is run,
#this function performs kriging (or extrapolation) to obtain
#posterior draws for the treatment effect at the boundary in an RDD.
# n: for each draw of the posterior, the number of draws from the predictive
# posterior distribution of the Gaussian process that are obtained. default is 1,
# which is fine for inference.
# data: a data.frame that has the running variable denoted as "x"
# and an outcome denoted as "y"
# stanFit: the object returned by fitTwoGPs()
# boundary: the value of x that denotes where the discontinuity is; default is 0.
# length: the number of points specified for kriging on either side of the boundary.
# "length" does not affect the precision of inference for the average treatment effect,
# but making length larger can be useful for visualizations of the mean response functions.
# "length" is only used in the getPredX1() and getPredX2() functions;
# more details about these functions are below.
# Note: this function returns a matrix with (n*iters*chains/2)-many rows and
# (Nt + Nc + 2*length)-many columns,
#where Nt and Nc are the number of units in treatment and control, respectively.
# Importantly, the (Nc + length)-th column denotes the posterior draws for the response
# at the boundary on the left-hand side (assuming the left-hand side is the control),
# and the (Nc + length + 1)-th column denotes the posterior draws for the response
# at the boundary on the right-hand side.
performKrigingSameParams = function(n = 1, data, stanFit, boundary = 0, length = 10){
x1 = data$x[which(data$x < boundary)]
y1 = data$y[which(data$x < boundary)]
x2 = data$x[which(data$x >= boundary)]
y2 = data$y[which(data$x >= boundary)]
predX1 = getPredX1(data = data, boundary = boundary, length = length)
predX2 = getPredX2(data = data, boundary = boundary, length = length)
#extract the variables from the stanFit object
stanFit = extract(stanFit)
#first, perform kriging on the left side
krigingLeft = drawPostPredY(n = n,
mu = stanFit$mu1,
beta = stanFit$beta1,
sigmasq = stanFit$sigmasq,
phi = stanFit$phi,
etasq = stanFit$etasq,
X = x1,
Y = y1,
predX = predX1)
#now perform kriging on the right side
krigingRight = drawPostPredY(n = n,
mu = stanFit$mu2,
beta = stanFit$beta2,
sigmasq = stanFit$sigmasq,
phi = stanFit$phi,
etasq = stanFit$etasq,
X = x2,
Y = y2,
predX = predX2)
#krigingLeft and krigingRight are matrices
#We'll cbind these matrices, so that
#The first 100 columns are for the left,
#the next 100 columns for the right.
krigingMatrix = cbind(krigingLeft, krigingRight)
return(krigingMatrix)
}
#draw from the posterior predictive distribution of the Gaussian process.
#Details about this process can be found in the Branson et al. RDD paper.
drawPostPredY = function(n = 1, mu, sigmasq, phi, etasq, X, Y, predX,beta){
#First we need distances among the X values
distances = matrix(nrow=length(X), ncol=length(X))
for(i in 1:length(X)){
for(j in 1:length(X)){
distances[i,j] = X[i] - X[j]
}
}
#and now we need the distances among the *new* X values
predDistances = matrix(nrow=length(predX), ncol=length(predX))
for(i in 1:length(predX)){
for(j in 1:length(predX)){
predDistances[i,j] = predX[i] - predX[j]
}
}
#and also we need the distances between each of the X values and the *new* X values
predXDistances = t(sapply(X, function(x) x - predX))
#sigmasq and etasq are paramters whose posterior we already drew from
#using the Stan model.
#Thus, for each posterior draw of the parameters, we'll draw from the posterior
#predictive distribution for each new X (i.e., each predX)
#Thus, we'll have length(sigmasq)-many draws from the posterior predictive distribution.
posteriorPredictiveDraws = list(length = length(sigmasq))
for(m in 1:length(sigmasq) ){
print(m)
#the covariance matrix for the Xs is
estCovMat = sigmasq[m]*exp(-phi[m]*distances^2) + diag(etasq[m], length(X))
#and the covariance matrix for the new Xs is
predCovMat = sigmasq[m]*exp(-phi[m]*predDistances^2)
#and the covariance matrix between the Xs and new Xs is
predCrossCovMat = sigmasq[m]*exp(-phi[m]*predXDistances^2)
#using conditional MVN theory, we can find the distribution of
#p(predX | X, Y, theta)
#the mean of this distribution is
predYMean = (mu[m] + predX*beta[m]) + t(predCrossCovMat)%*%solve(estCovMat)%*%(Y - mu[m] - X*beta[m] )
#and the covariance matrix of this distribution is
predYCovMat = predCovMat - t(predCrossCovMat)%*%solve(estCovMat)%*%predCrossCovMat
#Therefore, using the above mean and covariance, we can draw from the posterior
#predictive distribution for each predX.
posteriorPredictiveDraws[[m]] = mvrnorm(n = n, mu = predYMean, Sigma = predYCovMat)
}
#right now, posteriorPredictiveDraws is a list of matrices,
#and we'd like to collapse this into one big matrix
posteriorPredictiveDrawsMatrix = do.call(rbind, posteriorPredictiveDraws)
return(posteriorPredictiveDrawsMatrix)
}
#function that obtains data points to be used for kriging
#Essentially, this function chooses "length" many points between
#the right-most point on the left-hand-side of the boundary,
#as well as "length" many points between
#the left-most point on the right-hand-side of the boundary.
#This then returns Nt + length + Nc + length points,
#where Nt and Nc are the number of units in treatment and control, respectively.
getPredX1 = function(data, boundary, length = 10){
x1 = data$x[which(data$x < boundary)]
predX1 = c(x1, seq(max(x1), boundary, length = length))
return(predX1)
}
getPredX2 = function(data, boundary, length = 10){
x2 = data$x[which(data$x >= boundary)]
predX2 = c(seq(boundary, min(x2), length = length), x2)
return(predX2)
}
pickMeans.mp = readRDS("pickMeans.mp.rds")
#in this example, the boundary is 30.5
boundary = 30.5
#the number of treatment and control units is
Nc = length(which(pickMeans.mp$x <= boundary))
Nt = length(which(pickMeans.mp$x > boundary))
length = 10
#obtain the Bayesian model fit from rstan
fit.test = fitTwoGPs(data = pickMeans.mp, iters = 1000, boundary = boundary)
#perform kriging (i.e., extrapolation of the Gaussian process regressions to the boundary)
kriging.test = performKrigingSameParams(n = 1, data = pickMeans.mp, stanFit = fit.test, length = length, boundary = boundary)
#finally, obtain posterior draws of the treatment effect.
#This just corresponds to the difference in posterior draws for
# the response at the boundary on the right-hand side and
# the response at the boundary on the left-hand side
treatmentEffect.test = kriging.test[,Nc + length + 1] - kriging.test[,Nc + length]
#the corresponding point estimate and confidence interval should be close to
#what is reported for Minutes Played in Table 1 in the paper:
#(note that in the paper we set iters = 50000, which will take a bit more time to run.)
mean(treatmentEffect.test)
quantile(treatmentEffect.test, probs = c(0.025, 0.975))
|
\name{spwb_sensitivity}
\alias{spwb_sensitivity}
\title{
Sensitivity analysis for soil plant water balance simulations
}
\description{
Performs a set of calls to \code{\link{spwb}} with the aim to determine the sensitivity to particular parameters.
}
\usage{
spwb_sensitivity(x, soil, meteo,
paramType = "above", paramName = "LAI_live", cohort = NA,
p_change = c(-80,-40,-20,0,20,40,80),
summary.fun = NULL, simplify=TRUE,...)
}
\arguments{
\item{x}{An object of class \code{\link{spwbInput}}.}
\item{soil}{A list containing the description of the soil (see \code{\link{soil}}).}
\item{meteo}{A data frame with daily meteorological data series (see \code{\link{spwb}}).}
\item{paramType}{Data frame of \code{x} to modify.}
\item{paramName}{Name of the parameter to modify.}
\item{cohort}{Integer with the cohort to modify (if \code{NA} parameter values of all cohort are modified).}
\item{p_change}{Numerical vector with percentages of change.}
\item{summary.fun}{Summary function to be applied to the results of each simulation. }
\item{simplify}{Whether the result of \code{summary.fun} should be simplified (see \code{\link{sapply}}). }
\item{...}{Additional parameters to function \code{\link{spwb}}.}
}
\details{
Due to parameter dependence, modifying some parameters affects others:
\itemize{
\item{Setting \code{paramName = "Z50/Z95"} affects \code{below$V}, \code{below$VCroot_kmax} and \code{below$VGrhizo_kmax}.}
\item{Modifying \code{LAI_live} also affects \code{LAI_expanded}.}
\item{Modifying \code{VCroot_kmax} from \code{paramsTranspiration} affects both \code{VCroot_kmax} and \code{below$VCroot_kmax}.}
\item{Modifying \code{WaterStorage} affects simultaneously \code{Vleaf} and \code{Vsapwood} from \code{paramsWaterStorage}.}
\item{Modifying \code{c} from \code{paramsTranspiration} affects simultaneously \code{VCleaf_c}, \code{VCstem_c} and \code{VCroot_c}.}
\item{Modifying \code{d} from \code{paramsTranspiration} affects simultaneously \code{VCleaf_d}, \code{VCstem_d} and \code{VCroot_d}.}
\item{Modifying \code{Plant_kmax} from \code{paramsTranspiration} affects \code{VCleaf_kmax}, \code{VCstem_kmax}, \code{VCroot_kmax} and \code{below$VCroot_kmax}.}
\item{Modifying \code{Al2As} from \code{paramsAnatomy} affects \code{Vsapwood} in \code{paramsWaterStorage}, \code{VCstem_kmax} and \code{VCroot_kmax} of \code{paramsTranspiration} and \code{below$VCroot_kmax}.}
\item{Setting \code{paramName = "Vmax298/Jmax298"} affects both \code{Vmax298} and \code{Jmax298} from \code{paramsTranspiration}.}
}
}
\value{
If \code{summary.fun = NULL} the function returns a list whose elements are the result of calling \code{\link{spwb}}. Otherwise, the function applies \code{summary.fun} to each simulation result and returns these summaries (actually, a call to \code{\link{sapply}} is done).
}
\author{
Miquel De \enc{Cáceres}{Caceres} Ainsa, CTFC
}
\seealso{
\code{\link{spwb}}, \code{\link{summary.spwb}}
}
\examples{
\dontrun{
#Load example data and species parameters
data(examplemeteo)
data(exampleforestMED)
data(SpParamsMED)
#Initialize input
examplesoil = soil(defaultSoilParams(2))
control = defaultControl()
x = forest2spwbInput(exampleforestMED,examplesoil, SpParamsMED, control)
#Perform sensitivity analysis
res = spwb_sensitivity(x, examplesoil, examplemeteo, latitude = 41, elevation = 100)
}
}
| /man/spwb_sensitivity.Rd | no_license | fdbesanto2/medfate | R | false | false | 3,424 | rd | \name{spwb_sensitivity}
\alias{spwb_sensitivity}
\title{
Sensitivity analysis for soil plant water balance simulations
}
\description{
Performs a set of calls to \code{\link{spwb}} with the aim to determine the sensitivity to particular parameters.
}
\usage{
spwb_sensitivity(x, soil, meteo,
paramType = "above", paramName = "LAI_live", cohort = NA,
p_change = c(-80,-40,-20,0,20,40,80),
summary.fun = NULL, simplify=TRUE,...)
}
\arguments{
\item{x}{An object of class \code{\link{spwbInput}}.}
\item{soil}{A list containing the description of the soil (see \code{\link{soil}}).}
\item{meteo}{A data frame with daily meteorological data series (see \code{\link{spwb}}).}
\item{paramType}{Data frame of \code{x} to modify.}
\item{paramName}{Name of the parameter to modify.}
\item{cohort}{Integer with the cohort to modify (if \code{NA} parameter values of all cohort are modified).}
\item{p_change}{Numerical vector with percentages of change.}
\item{summary.fun}{Summary function to be applied to the results of each simulation. }
\item{simplify}{Whether the result of \code{summary.fun} should be simplified (see \code{\link{sapply}}). }
\item{...}{Additional parameters to function \code{\link{spwb}}.}
}
\details{
Due to parameter dependence, modifying some parameters affects others:
\itemize{
\item{Setting \code{paramName = "Z50/Z95"} affects \code{below$V}, \code{below$VCroot_kmax} and \code{below$VGrhizo_kmax}.}
\item{Modifying \code{LAI_live} also affects \code{LAI_expanded}.}
\item{Modifying \code{VCroot_kmax} from \code{paramsTranspiration} affects both \code{VCroot_kmax} and \code{below$VCroot_kmax}.}
\item{Modifying \code{WaterStorage} affects simultaneously \code{Vleaf} and \code{Vsapwood} from \code{paramsWaterStorage}.}
\item{Modifying \code{c} from \code{paramsTranspiration} affects simultaneously \code{VCleaf_c}, \code{VCstem_c} and \code{VCroot_c}.}
\item{Modifying \code{d} from \code{paramsTranspiration} affects simultaneously \code{VCleaf_d}, \code{VCstem_d} and \code{VCroot_d}.}
\item{Modifying \code{Plant_kmax} from \code{paramsTranspiration} affects \code{VCleaf_kmax}, \code{VCstem_kmax}, \code{VCroot_kmax} and \code{below$VCroot_kmax}.}
\item{Modifying \code{Al2As} from \code{paramsAnatomy} affects \code{Vsapwood} in \code{paramsWaterStorage}, \code{VCstem_kmax} and \code{VCroot_kmax} of \code{paramsTranspiration} and \code{below$VCroot_kmax}.}
\item{Setting \code{paramName = "Vmax298/Jmax298"} affects both \code{Vmax298} and \code{Jmax298} from \code{paramsTranspiration}.}
}
}
\value{
If \code{summary.fun = NULL} the function returns a list whose elements are the result of calling \code{\link{spwb}}. Otherwise, the function applies \code{summary.fun} to each simulation result and returns these summaries (actually, a call to \code{\link{sapply}} is done).
}
\author{
Miquel De \enc{Cáceres}{Caceres} Ainsa, CTFC
}
\seealso{
\code{\link{spwb}}, \code{\link{summary.spwb}}
}
\examples{
\dontrun{
#Load example data and species parameters
data(examplemeteo)
data(exampleforestMED)
data(SpParamsMED)
#Initialize input
examplesoil = soil(defaultSoilParams(2))
control = defaultControl()
x = forest2spwbInput(exampleforestMED,examplesoil, SpParamsMED, control)
#Perform sensitivity analysis
res = spwb_sensitivity(x, examplesoil, examplemeteo, latitude = 41, elevation = 100)
}
}
|
### Functions
is.sequential <- function(vector, diff = 1){
i <- seq_len(length(vector))
gap <- numeric()
for (iter in i[-length(i)]){
if (vector[iter+ 1] == vector[iter] + diff){
# it will go to upper iteration
} else{
c(gap, iter)
}
}
c(gap, i[length(i)])
}
gaps_Grange <- function(regionMat_region_Grange){
gaps <- future_map(regionMat_region_Grange, ~ as.data.frame(gaps(.x))[-c(1,2)])
gaps <- future_map(gaps,~makeGRangesFromDataFrame(.x[.x$start > 1,], keep.extra.columns = TRUE ))
gaps <- future_map(gaps, ~ sort.GenomicRanges(.x))
gaps
}
exclude_compiled_regions <- function(PrecedingRegion_each_Chr){
rows <- nrow(PrecedingRegion_each_Chr)
exluding_regions <- unique(unlist(map(seq_len(rows), ~ seq(as.numeric(PrecedingRegion_each_Chr[.x,"FollowerRegion"]) ,
as.numeric(PrecedingRegion_each_Chr[.x,"LeaderRegion"]) ))))
exluding_regions
}
# This is the core function
NewRegions <- function(MCC_MRG_Grid, fullCov_Path, DBDir_Path, CalclulateDelta = FALSE ){
stopifnot(!file.exist(file.path(paste0(DBDir_Path,"RegionMat_Path.RData"))))
load( file = file.path(paste0(DBDir_Path,"RegionMat_Path.RData"))) ### This will load the variable "RegionMat_Path" containing the path to RegionMat_MCC file
stopifnot(file.exist(file.path(paste0(DBDir_Path,"MRG.RData"))))
load( file = file.path(paste0(DBDir_Path,"MRG.RData")))### This will load the variable "MRG"
MRG_RegionSet <- vector(mode = 'list',length = 2)
stopifnot(file.exist(fullCov_Path))
fullCov <- load(file = file.path(fullCov_Path))
Deltas_file_path <- file.path(paste0(DBDir_Path,"Deltas.RData"))
SaveDir <- paste0(dirname(RegionMat_Path),"/")
RegionMat <- load(file = file.path(RegionMat_Path))
.subset2(MRG_RegionSet,1) <- future_map(RegionMat, ~ .subset2(.x, 1)) #### The Structure of RegionMat is RegionMat$Chr$regions
.subset2(MRG_RegionSet,2) <- future_map(.subset2(MRG_RegionSet, 1), ~ gaps_Grange(.x))
gc()
if (!file.exist(file.path(paste0(SaveDir, paste(basename(SaveDir), as.character(MRG), sep = "_" ), ".RData" )) )) {
## Filtering gaps
gap_filtered <- future_map(.subset2(MRG_RegionSet,2), ~ .x[which(width(.x) <= as.integer(MRG)),])
## Finding receding regions
preceding_region <- future_map2(gap_filtered, .subset2(MRG_RegionSet,1), ~ tibble(Leader_Region = precede(..1, ..2 )) %>% drop_na() )
# preceding_region is a list of 27(number of chromosomes) elements
###
Leader_Follower_Index <- future_map(preceding_region, ~ tibble(LeaderIndex = is.sequential(.subset2(.x,1))) %>%
mutate(FollowerIndex = dplyr::lag(LeaderIndex + 1, default = 1)) ) #??? What about the last one in preceding_region
### Retrieving corresponding regoins using indices from preceding regions vector (list of 27)
Leader_Follower_PrecedingRegion <- future_map2(preceding_region, Leader_Follower_Index,
~tibble(LeaderRegion = ..1[.subset2(..2, 1)]+1 ), FollowerRegion = ..1[.subset2(..2,2)])
names(Leader_Follower_PrecedingRegion) <- paste0("Chr", as.character(c(1:26,"X")))
### Constituting new regioins
New_Regions_Seqnames <- future_map2(Leader_Follower_PrecedingRegion, .subset2(MRG_RegionSet, 1),
~ seqnames(..2[.subset2(..1, 2),]))
New_Regions_Strand <- future_map2(Leader_Follower_PrecedingRegion, .subset2(MRG_RegionSet, 1),
~strand(..2[.subset2(..1, 2),]))
New_Regions_Ranges <- future_map2(Leader_Follower_PrecedingRegion, .subset2(MRG_RegionSet, 1),
~IRanges(start = start(..2[[.subset2(..1, 2)]]), end = end(..2[.subset2(..1, 1),])))
New_Regions_GRange <- pmap(New_Regions_Seqnames, New_Regions_Strand, New_Regions_Ranges,
~ GRanges(seqnames = ..1, ranges = ..3, strand = ..2))
### Removing compiled regions from the list of regions
excluding_regions_allChrs <- future_map(Leader_Follower_PrecedingRegion, ~ exclude_compiled_regions)
compiled_regions_excluded_tibble <- future_map2(.subset2(MRG_RegionSet,1) , excluding_regions_allChrs,
~ as_tibble(..1[-c(..2),c("seqnames","ranges","strand")])) ### Might incur problems due to nature of regions as a Grange (not subsettable)
RegionMat_MCC_MRG <- future_map2(compiled_regions_excluded_tibble, New_Regions_GRange, ### fitering for regions longer than 3 to exclude microexons
~ list(regions = sort.GenomicRanges(makeGRangesFromDataFrame(dplyr::filter(bind_rows(..1,..2), width(ranges) > 3) ) %>% 'Seqinfo<-'(OarSeqinfo)), #### Retrieving regionCoverage for
bpCoverage = getRegionCoverage(fullCov, sort.GenomicRanges(makeGRangesFromDataFrame(dplyr::filter(bind_rows(..1,..2), width(ranges) > 3) ))) ) )
#### Doing all at once may put such a heavy burden on the RAM (pay attention to above)
names(RegionMat_MCC_MRG) <- seqnames(OarSeqinfo)
# Saving NewRegions
save(RegionMat_MCC_MRG,
file = file.path(paste0(SaveDir, paste(basename(SaveDir), as.character(MRG), sep = "_" ), ".RData" )))
}
### Updating MRG.RData and RegionMat_Path.RData
MCC <- as.numeric(stringr::str_remove(stringr::str_extract(string = basename(RegionMat_Path),
pattern = "[^[[:alpha:]]]+"), ".$"))
if (CalclulateDelta){
if (!c("NonOverlappedExons") %in% ls() ){
warning(paste0("NonOverlappedExons was not loaded trying to load it from the", as.character(DBDir_Path),"!"),
call. = FALSE, immediate. = TRUE)
if (!file.exist(paste0(DBDir_Path,"NonOverlappedExons.RData") )) {
warning(paste0("NonOverlappedExons.RData does not exist in the following directory:",
as.character(DBDir_Path)) ,call. = FALSE, immediate. = TRUE)
} else {
load(paste0(DBDir_Path,"NonOverlappedExons.RData"))
}
} else {
if (!file.exist(Deltas_file_path)) {
warning(paste0("Deltas dataframe file was not found in the following directory: ",
as.character(DBDir_Path), ". Attemping to create one."), immediate. = TRUE)
Deltas_names <- tidyr::unite(MCC_MRG_Grid, "MCC_MRG", sep= "_", remove = TRUE )
Deltas <- vector(mode = "list", length = nrow(MCC_MRG_Grid))
names(Deltas) <- as.character(Deltas_names[["MCC_MRG"]])
## Filtering for regions loonger than 3bp-long has been done on line 80
LaidPairs <- future_map(seq_len(length(NonOverlappedExons)),
~findOverlapPairs(granges(RegionMat_MCC_MRG[[.x]][['regions']]),
granges(NonOverlappedExons[[.x]])), .id = "Chr")
### Filtering Er's laid on multiple Exons
Ers_without_multi_Exons_Index <- future_map(LaidPairs,~ unite(data.frame(.x@first), col = "Region", sep="-", remove = TRUE]) %>%
table() %>% as_tibble() %>% dplyr::filter(n == 1) %>% dplyr::separate(col = "Region",
into = c("seqnames", "start", "end", "width", "strand"),sep = "-", remove = TRUE) %>% see####################
LaidPairs <- future_map2(LaidPairs, Ers_without_multi_Exons_Index, ~ dplyr::semi_join(x = ..1, y = ..2))
############################################### Yet to be done ####################################################
### We need to prune the ERs laying on multiple exons.Temporarily, we omit these steps due to uncertainty regarding
### applying width() function on a output of findOverlapPairs().
###################################################################################################################
Deltas[[paste0(as.character(MCC),"_",as.character(MRG))]] <-
# %>% transmute( DeltaVal = abs(first.start - second.start) + abs(first.end - second.end))
save(Deltas, file = Deltas_file_path)
} else {
load(Deltas_file_path)
Deltas[[paste0(as.character(MCC),"_",as.character(MRG))]] <- future_map_dfr(seq_len(length(NonOverlappedExons)),
~as_tibble(findOverlapPairs(ranges(NonOverlappedExons[[..1]]), # We may need to change the query and subject
ranges(RegionMat_MCC_MRG[[..1]][['regions']]))), .id = "Chr") %>%
transmute( DeltaVal = abs(first.start - second.start) + abs(first.end - second.end))
save(Deltas, file = Deltas_file_path)
}
}
}
NextMCC_MRG <- MCC_MRG_Grid[which(MCC_MRG_Grid[,2] == MRG & MCC_MRG_Grid[1,] == MCC) + 1 , ]
if (is.na(NextMCC_MRG[1,2])) {
stop(call. = FALSE, "Out of MCC_MRG_Grid range!")
} else {
MRG <- as.numeric(NextMCC_MRG[1,2])
save( MRG , file = file.path(paste0(DBDir_Path,"MRG.RData")))
RegionMat_Path <- stringr::str_subset(list.files(RegionMatsPath, recursive=TRUE, full.names=TRUE),
pattern = paste0("RegionMats_",as.numeric(NextMCC_MRG[1,1]),".RData"))
if (length(RegionMat_Path) != 1) {
#### Because MCC iteration is slower than MRG
stop(paste0(" The initial ", paste0("RegionMats_",as.numeric(NextMCC_MRG[1,1]),".RData"),
" was not found or appears more than once!!!"), call. = FALSE)
}else{
save(RegionMat_Path, file = file.path(paste0(DBDir_Path,"RegionMat_Path.RData")))
}
}
}
| /New Approach/HDD/NewRegionsMCCMRG-Deltas/Functions.R | permissive | MPourjam/ShallSangsari-RNA-Seq | R | false | false | 8,805 | r | ### Functions
is.sequential <- function(vector, diff = 1){
i <- seq_len(length(vector))
gap <- numeric()
for (iter in i[-length(i)]){
if (vector[iter+ 1] == vector[iter] + diff){
# it will go to upper iteration
} else{
c(gap, iter)
}
}
c(gap, i[length(i)])
}
gaps_Grange <- function(regionMat_region_Grange){
gaps <- future_map(regionMat_region_Grange, ~ as.data.frame(gaps(.x))[-c(1,2)])
gaps <- future_map(gaps,~makeGRangesFromDataFrame(.x[.x$start > 1,], keep.extra.columns = TRUE ))
gaps <- future_map(gaps, ~ sort.GenomicRanges(.x))
gaps
}
exclude_compiled_regions <- function(PrecedingRegion_each_Chr){
rows <- nrow(PrecedingRegion_each_Chr)
exluding_regions <- unique(unlist(map(seq_len(rows), ~ seq(as.numeric(PrecedingRegion_each_Chr[.x,"FollowerRegion"]) ,
as.numeric(PrecedingRegion_each_Chr[.x,"LeaderRegion"]) ))))
exluding_regions
}
# This is the core function
NewRegions <- function(MCC_MRG_Grid, fullCov_Path, DBDir_Path, CalclulateDelta = FALSE ){
stopifnot(!file.exist(file.path(paste0(DBDir_Path,"RegionMat_Path.RData"))))
load( file = file.path(paste0(DBDir_Path,"RegionMat_Path.RData"))) ### This will load the variable "RegionMat_Path" containing the path to RegionMat_MCC file
stopifnot(file.exist(file.path(paste0(DBDir_Path,"MRG.RData"))))
load( file = file.path(paste0(DBDir_Path,"MRG.RData")))### This will load the variable "MRG"
MRG_RegionSet <- vector(mode = 'list',length = 2)
stopifnot(file.exist(fullCov_Path))
fullCov <- load(file = file.path(fullCov_Path))
Deltas_file_path <- file.path(paste0(DBDir_Path,"Deltas.RData"))
SaveDir <- paste0(dirname(RegionMat_Path),"/")
RegionMat <- load(file = file.path(RegionMat_Path))
.subset2(MRG_RegionSet,1) <- future_map(RegionMat, ~ .subset2(.x, 1)) #### The Structure of RegionMat is RegionMat$Chr$regions
.subset2(MRG_RegionSet,2) <- future_map(.subset2(MRG_RegionSet, 1), ~ gaps_Grange(.x))
gc()
if (!file.exist(file.path(paste0(SaveDir, paste(basename(SaveDir), as.character(MRG), sep = "_" ), ".RData" )) )) {
## Filtering gaps
gap_filtered <- future_map(.subset2(MRG_RegionSet,2), ~ .x[which(width(.x) <= as.integer(MRG)),])
## Finding receding regions
preceding_region <- future_map2(gap_filtered, .subset2(MRG_RegionSet,1), ~ tibble(Leader_Region = precede(..1, ..2 )) %>% drop_na() )
# preceding_region is a list of 27(number of chromosomes) elements
###
Leader_Follower_Index <- future_map(preceding_region, ~ tibble(LeaderIndex = is.sequential(.subset2(.x,1))) %>%
mutate(FollowerIndex = dplyr::lag(LeaderIndex + 1, default = 1)) ) #??? What about the last one in preceding_region
### Retrieving corresponding regoins using indices from preceding regions vector (list of 27)
Leader_Follower_PrecedingRegion <- future_map2(preceding_region, Leader_Follower_Index,
~tibble(LeaderRegion = ..1[.subset2(..2, 1)]+1 ), FollowerRegion = ..1[.subset2(..2,2)])
names(Leader_Follower_PrecedingRegion) <- paste0("Chr", as.character(c(1:26,"X")))
### Constituting new regioins
New_Regions_Seqnames <- future_map2(Leader_Follower_PrecedingRegion, .subset2(MRG_RegionSet, 1),
~ seqnames(..2[.subset2(..1, 2),]))
New_Regions_Strand <- future_map2(Leader_Follower_PrecedingRegion, .subset2(MRG_RegionSet, 1),
~strand(..2[.subset2(..1, 2),]))
New_Regions_Ranges <- future_map2(Leader_Follower_PrecedingRegion, .subset2(MRG_RegionSet, 1),
~IRanges(start = start(..2[[.subset2(..1, 2)]]), end = end(..2[.subset2(..1, 1),])))
New_Regions_GRange <- pmap(New_Regions_Seqnames, New_Regions_Strand, New_Regions_Ranges,
~ GRanges(seqnames = ..1, ranges = ..3, strand = ..2))
### Removing compiled regions from the list of regions
excluding_regions_allChrs <- future_map(Leader_Follower_PrecedingRegion, ~ exclude_compiled_regions)
compiled_regions_excluded_tibble <- future_map2(.subset2(MRG_RegionSet,1) , excluding_regions_allChrs,
~ as_tibble(..1[-c(..2),c("seqnames","ranges","strand")])) ### Might incur problems due to nature of regions as a Grange (not subsettable)
RegionMat_MCC_MRG <- future_map2(compiled_regions_excluded_tibble, New_Regions_GRange, ### fitering for regions longer than 3 to exclude microexons
~ list(regions = sort.GenomicRanges(makeGRangesFromDataFrame(dplyr::filter(bind_rows(..1,..2), width(ranges) > 3) ) %>% 'Seqinfo<-'(OarSeqinfo)), #### Retrieving regionCoverage for
bpCoverage = getRegionCoverage(fullCov, sort.GenomicRanges(makeGRangesFromDataFrame(dplyr::filter(bind_rows(..1,..2), width(ranges) > 3) ))) ) )
#### Doing all at once may put such a heavy burden on the RAM (pay attention to above)
names(RegionMat_MCC_MRG) <- seqnames(OarSeqinfo)
# Saving NewRegions
save(RegionMat_MCC_MRG,
file = file.path(paste0(SaveDir, paste(basename(SaveDir), as.character(MRG), sep = "_" ), ".RData" )))
}
### Updating MRG.RData and RegionMat_Path.RData
MCC <- as.numeric(stringr::str_remove(stringr::str_extract(string = basename(RegionMat_Path),
pattern = "[^[[:alpha:]]]+"), ".$"))
if (CalclulateDelta){
if (!c("NonOverlappedExons") %in% ls() ){
warning(paste0("NonOverlappedExons was not loaded trying to load it from the", as.character(DBDir_Path),"!"),
call. = FALSE, immediate. = TRUE)
if (!file.exist(paste0(DBDir_Path,"NonOverlappedExons.RData") )) {
warning(paste0("NonOverlappedExons.RData does not exist in the following directory:",
as.character(DBDir_Path)) ,call. = FALSE, immediate. = TRUE)
} else {
load(paste0(DBDir_Path,"NonOverlappedExons.RData"))
}
} else {
if (!file.exist(Deltas_file_path)) {
warning(paste0("Deltas dataframe file was not found in the following directory: ",
as.character(DBDir_Path), ". Attemping to create one."), immediate. = TRUE)
Deltas_names <- tidyr::unite(MCC_MRG_Grid, "MCC_MRG", sep= "_", remove = TRUE )
Deltas <- vector(mode = "list", length = nrow(MCC_MRG_Grid))
names(Deltas) <- as.character(Deltas_names[["MCC_MRG"]])
## Filtering for regions loonger than 3bp-long has been done on line 80
LaidPairs <- future_map(seq_len(length(NonOverlappedExons)),
~findOverlapPairs(granges(RegionMat_MCC_MRG[[.x]][['regions']]),
granges(NonOverlappedExons[[.x]])), .id = "Chr")
### Filtering Er's laid on multiple Exons
Ers_without_multi_Exons_Index <- future_map(LaidPairs,~ unite(data.frame(.x@first), col = "Region", sep="-", remove = TRUE]) %>%
table() %>% as_tibble() %>% dplyr::filter(n == 1) %>% dplyr::separate(col = "Region",
into = c("seqnames", "start", "end", "width", "strand"),sep = "-", remove = TRUE) %>% see####################
LaidPairs <- future_map2(LaidPairs, Ers_without_multi_Exons_Index, ~ dplyr::semi_join(x = ..1, y = ..2))
############################################### Yet to be done ####################################################
### We need to prune the ERs laying on multiple exons.Temporarily, we omit these steps due to uncertainty regarding
### applying width() function on a output of findOverlapPairs().
###################################################################################################################
Deltas[[paste0(as.character(MCC),"_",as.character(MRG))]] <-
# %>% transmute( DeltaVal = abs(first.start - second.start) + abs(first.end - second.end))
save(Deltas, file = Deltas_file_path)
} else {
load(Deltas_file_path)
Deltas[[paste0(as.character(MCC),"_",as.character(MRG))]] <- future_map_dfr(seq_len(length(NonOverlappedExons)),
~as_tibble(findOverlapPairs(ranges(NonOverlappedExons[[..1]]), # We may need to change the query and subject
ranges(RegionMat_MCC_MRG[[..1]][['regions']]))), .id = "Chr") %>%
transmute( DeltaVal = abs(first.start - second.start) + abs(first.end - second.end))
save(Deltas, file = Deltas_file_path)
}
}
}
NextMCC_MRG <- MCC_MRG_Grid[which(MCC_MRG_Grid[,2] == MRG & MCC_MRG_Grid[1,] == MCC) + 1 , ]
if (is.na(NextMCC_MRG[1,2])) {
stop(call. = FALSE, "Out of MCC_MRG_Grid range!")
} else {
MRG <- as.numeric(NextMCC_MRG[1,2])
save( MRG , file = file.path(paste0(DBDir_Path,"MRG.RData")))
RegionMat_Path <- stringr::str_subset(list.files(RegionMatsPath, recursive=TRUE, full.names=TRUE),
pattern = paste0("RegionMats_",as.numeric(NextMCC_MRG[1,1]),".RData"))
if (length(RegionMat_Path) != 1) {
#### Because MCC iteration is slower than MRG
stop(paste0(" The initial ", paste0("RegionMats_",as.numeric(NextMCC_MRG[1,1]),".RData"),
" was not found or appears more than once!!!"), call. = FALSE)
}else{
save(RegionMat_Path, file = file.path(paste0(DBDir_Path,"RegionMat_Path.RData")))
}
}
}
|
## -----------------------------------------------------------------------------
library('afpt')
## -----------------------------------------------------------------------------
myBird <- Bird(
massTotal = 0.215,
wingSpan = 0.67,
wingArea = 0.0652,
name = 'Jackdaw',
name.scientific = 'Corvus monedula',
type = 'passerine',
source = 'KleinHeerenbrink M, Warfvinge K and Hedenström A 2016 J.Exp.Biol. 219: 10, 1572--1581'
)
## -----------------------------------------------------------------------------
speed <- seq(6,18,length.out=6) # airspeed in m/s
powercurve <- computeFlappingPower(myBird,speed)
## ---- fig.show='hold', fig.width=3.45, fig.cap="**Drag components** -- Black: total drag; red circles: induced drag; green squares: zero-lift profile drag; blue diamonds: lift-dep. profile drag; yellow triangles: parasitic drag."----
par(mar=c(3.1,3.1,0.4,1.1),mgp=c(1.9,0.7,0.0),cex=0.75)
with(powercurve , plot( speed, Dnf.ind+Dnf.pro0+Dnf.pro2+Dnf.par,
type='b', pch=15, col='grey20',
xlab=NA, ylab=NA, xlim=c(0,20), ylim=c(0,0.39)))
with(powercurve , lines( speed, Dnf.ind, type='b', pch=21, col='red3'))
with(powercurve , lines( speed, Dnf.pro0, type='b', pch=22, col='green3'))
with(powercurve , lines( speed, Dnf.pro2, type='b', pch=23, col='blue3'))
with(powercurve , lines( speed, Dnf.par, type='b', pch=24, col='yellow3'))
mtext(side = 1, line = 2,'Airspeed (m/s)')
mtext(side = 2, line = 2,'Drag components (N)')
## -----------------------------------------------------------------------------
with(myBird,coef.bodyDragCoefficient*bodyFrontalArea)
## -----------------------------------------------------------------------------
myBird$coef.bodyDragCoefficient <- 0.001004007 # the product CDb*Sb
myBird$bodyFrontalArea <- 1 # unit area
with(myBird,coef.bodyDragCoefficient*bodyFrontalArea)
## -----------------------------------------------------------------------------
myBird$coef.bodyDragCoefficient <- 0.01539888 # CDb relative to wing reference area
myBird$bodyFrontalArea <- myBird$wingArea # unit area
with(myBird,coef.bodyDragCoefficient*bodyFrontalArea)
## -----------------------------------------------------------------------------
powercurve[c('speed','amplitude','strokeplane','frequency')]
## ---- fig.show='hold', fig.width=3.45, fig.cap="**Drag factors** -- red circles: induced drag; green squares: zero-lift profile drag; blue diamonds: lift-dep. profile drag."----
par(mar=c(3.1,3.1,0.4,1.1),mgp=c(1.9,0.7,0.0),cex=0.75)
kf <- 2*pi*myBird$wingSpan*myBird$wingbeatFrequency / speed # reduced frequency
phi <- powercurve$strokeplane*pi/180 # strokeplane angle in radians (optimized)
fD <- data.frame(
ind = fD.ind(kf,phi), # induced drag
pro0 = fD.pro0(kf,phi), # zero lift profile drag
pro2 = fD.pro2(kf,phi), # lift dep. profile drag
par = 0 # parasitic drag is wingbeat independent
)
plot( speed, fD$ind, type='b', pch=21, col='red3',
xlab=NA, ylab=NA, xlim=c(0,20), ylim=c(-1,6.6))
lines( speed, fD$pro0, type='b', pch=22, col='green3')
lines( speed, fD$pro2, type='b', pch=23, col='blue3')
mtext(side = 1, line = 2,'Airspeed (m/s)')
mtext(side = 2, line = 2,'Drag factors fD (-)')
## -----------------------------------------------------------------------------
thrustratio <- apply(powercurve[,grep('Dnf',names(powercurve))],1,sum) /
(powercurve$L - apply(fD*powercurve[,grep('Dnf',names(powercurve))],1,sum))
## -----------------------------------------------------------------------------
fP <- data.frame(
ind = fP.ind(kf,phi), # induced power
pro0 = fP.pro0(kf,phi), # zero lift profile power
pro2 = fP.pro2(kf,phi), # lift dep. profile power
par = 0 # parasitic power is wingbeat independent
)
## -----------------------------------------------------------------------------
kP <- 1 + fP*thrustratio
## ---- fig.show='hold', fig.width=3.45, fig.cap="**Power factors** -- red circles: induced drag; green squares: zero-lift profile drag; blue diamonds: lift-dep. profile drag."----
par(mar=c(3.1,3.1,0.4,1.1),mgp=c(1.9,0.7,0.0),cex=0.75)
plot( speed, fP$ind, type='b', pch=21, col='red3',
xlab=NA, ylab=NA, xlim=c(0,20), ylim=c(-0.4,8))
lines( speed, fP$pro0, type='b', pch=22, col='green3')
lines( speed, fP$pro2, type='b', pch=23, col='blue3')
mtext(side = 1, line = 2,'Airspeed (m/s)')
mtext(side = 2, line = 2,'Power factors fP (-)')
plot( speed, kP$ind, type='b', pch=21, col='red3',
xlab=NA, ylab=NA, xlim=c(0,20), ylim=c(0.8,2.3))
lines( speed, kP$pro0, type='b', pch=22, col='green3')
lines( speed, kP$pro2, type='b', pch=23, col='blue3')
mtext(side = 1, line = 2,'Airspeed (m/s)')
mtext(side = 2, line = 2,'Power factors kP (-)')
## -----------------------------------------------------------------------------
powercurve[,grep('flags',names(powercurve))]
| /inst/doc/afpt-aerodynamic-model.R | no_license | cran/afpt | R | false | false | 4,942 | r | ## -----------------------------------------------------------------------------
library('afpt')
## -----------------------------------------------------------------------------
myBird <- Bird(
massTotal = 0.215,
wingSpan = 0.67,
wingArea = 0.0652,
name = 'Jackdaw',
name.scientific = 'Corvus monedula',
type = 'passerine',
source = 'KleinHeerenbrink M, Warfvinge K and Hedenström A 2016 J.Exp.Biol. 219: 10, 1572--1581'
)
## -----------------------------------------------------------------------------
speed <- seq(6,18,length.out=6) # airspeed in m/s
powercurve <- computeFlappingPower(myBird,speed)
## ---- fig.show='hold', fig.width=3.45, fig.cap="**Drag components** -- Black: total drag; red circles: induced drag; green squares: zero-lift profile drag; blue diamonds: lift-dep. profile drag; yellow triangles: parasitic drag."----
par(mar=c(3.1,3.1,0.4,1.1),mgp=c(1.9,0.7,0.0),cex=0.75)
with(powercurve , plot( speed, Dnf.ind+Dnf.pro0+Dnf.pro2+Dnf.par,
type='b', pch=15, col='grey20',
xlab=NA, ylab=NA, xlim=c(0,20), ylim=c(0,0.39)))
with(powercurve , lines( speed, Dnf.ind, type='b', pch=21, col='red3'))
with(powercurve , lines( speed, Dnf.pro0, type='b', pch=22, col='green3'))
with(powercurve , lines( speed, Dnf.pro2, type='b', pch=23, col='blue3'))
with(powercurve , lines( speed, Dnf.par, type='b', pch=24, col='yellow3'))
mtext(side = 1, line = 2,'Airspeed (m/s)')
mtext(side = 2, line = 2,'Drag components (N)')
## -----------------------------------------------------------------------------
with(myBird,coef.bodyDragCoefficient*bodyFrontalArea)
## -----------------------------------------------------------------------------
myBird$coef.bodyDragCoefficient <- 0.001004007 # the product CDb*Sb
myBird$bodyFrontalArea <- 1 # unit area
with(myBird,coef.bodyDragCoefficient*bodyFrontalArea)
## -----------------------------------------------------------------------------
myBird$coef.bodyDragCoefficient <- 0.01539888 # CDb relative to wing reference area
myBird$bodyFrontalArea <- myBird$wingArea # unit area
with(myBird,coef.bodyDragCoefficient*bodyFrontalArea)
## -----------------------------------------------------------------------------
powercurve[c('speed','amplitude','strokeplane','frequency')]
## ---- fig.show='hold', fig.width=3.45, fig.cap="**Drag factors** -- red circles: induced drag; green squares: zero-lift profile drag; blue diamonds: lift-dep. profile drag."----
par(mar=c(3.1,3.1,0.4,1.1),mgp=c(1.9,0.7,0.0),cex=0.75)
kf <- 2*pi*myBird$wingSpan*myBird$wingbeatFrequency / speed # reduced frequency
phi <- powercurve$strokeplane*pi/180 # strokeplane angle in radians (optimized)
fD <- data.frame(
ind = fD.ind(kf,phi), # induced drag
pro0 = fD.pro0(kf,phi), # zero lift profile drag
pro2 = fD.pro2(kf,phi), # lift dep. profile drag
par = 0 # parasitic drag is wingbeat independent
)
plot( speed, fD$ind, type='b', pch=21, col='red3',
xlab=NA, ylab=NA, xlim=c(0,20), ylim=c(-1,6.6))
lines( speed, fD$pro0, type='b', pch=22, col='green3')
lines( speed, fD$pro2, type='b', pch=23, col='blue3')
mtext(side = 1, line = 2,'Airspeed (m/s)')
mtext(side = 2, line = 2,'Drag factors fD (-)')
## -----------------------------------------------------------------------------
thrustratio <- apply(powercurve[,grep('Dnf',names(powercurve))],1,sum) /
(powercurve$L - apply(fD*powercurve[,grep('Dnf',names(powercurve))],1,sum))
## -----------------------------------------------------------------------------
fP <- data.frame(
ind = fP.ind(kf,phi), # induced power
pro0 = fP.pro0(kf,phi), # zero lift profile power
pro2 = fP.pro2(kf,phi), # lift dep. profile power
par = 0 # parasitic power is wingbeat independent
)
## -----------------------------------------------------------------------------
kP <- 1 + fP*thrustratio
## ---- fig.show='hold', fig.width=3.45, fig.cap="**Power factors** -- red circles: induced drag; green squares: zero-lift profile drag; blue diamonds: lift-dep. profile drag."----
par(mar=c(3.1,3.1,0.4,1.1),mgp=c(1.9,0.7,0.0),cex=0.75)
plot( speed, fP$ind, type='b', pch=21, col='red3',
xlab=NA, ylab=NA, xlim=c(0,20), ylim=c(-0.4,8))
lines( speed, fP$pro0, type='b', pch=22, col='green3')
lines( speed, fP$pro2, type='b', pch=23, col='blue3')
mtext(side = 1, line = 2,'Airspeed (m/s)')
mtext(side = 2, line = 2,'Power factors fP (-)')
plot( speed, kP$ind, type='b', pch=21, col='red3',
xlab=NA, ylab=NA, xlim=c(0,20), ylim=c(0.8,2.3))
lines( speed, kP$pro0, type='b', pch=22, col='green3')
lines( speed, kP$pro2, type='b', pch=23, col='blue3')
mtext(side = 1, line = 2,'Airspeed (m/s)')
mtext(side = 2, line = 2,'Power factors kP (-)')
## -----------------------------------------------------------------------------
powercurve[,grep('flags',names(powercurve))]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/saedb.R
\name{saedb}
\alias{saedb}
\title{EBLUPs under Univariate Fay Herriot Model with Difference Benchmarking}
\usage{
saedb(
formula,
vardir,
weight,
samevar = FALSE,
MAXITER = 100,
PRECISION = 1e-04,
data
)
}
\arguments{
\item{formula}{List of formula that describe the fitted model}
\item{vardir}{Sampling variances of direct estimations,if it is included in data frame so it is the vector with the name of sampling variances.if it is not, it is a data frame of sampling variance in order : \code{var1, cov12,.,cov1r,var2,cov23,.,cov2r,.,cov(r-1)(r),var(r)}}
\item{weight}{Known proportion of units in small areas, where \eqn{\sum_{d=1}^{D}}{sum from d=1 to D of} \eqn{W_{rd}}{Wrd} = 1 . \code{d = 1...D} is the number of small areas, and \code{r = 1...R} is the number of response variables}
\item{samevar}{Whether the variances of the data are same or not. Logical input with default \code{FALSE}}
\item{MAXITER}{Maximum number of iteration in Fisher-scoring algorithm with default \code{100}}
\item{PRECISION}{Limit of Fisher-scoring convergence tolerance with default \code{1e-4}}
\item{data}{The data frame}
}
\value{
This function returns a list of the following objects:
\item{SAE_Eblup}{A dataframe with the values of the EBLUPs estimators}
\item{MSE_Eblup}{A dataframe with the values of estimated mean square errors of EBLUPs estimators}
\item{fit}{A list containing the following objects:}
\itemize{
\item method : The fitting method (this function is using "REML")
\item convergence : The convergence result of Fisher-scoring algorithm (Logical Value)
\item iterations : The number of Fisher-Scoring algorithm iterations
\item estcoef : A dataframe with the estimated model coefficient, standard error,t statistics, p-values of the significance of each coefficient
\item refvar : A dataframe with estimated random effect variances
\item informationFisher : A matrix of information fisher from Fisher-scoring algorithm
}
\item{difference_benchmarking}{a list containing the following objects:}
\itemize{
\item Estimation : A dataframe with the value of Benchmarked EBLUPs estimators
\item Aggregation : The aggregation of benchmarked EBLUPs estimators, EBLUPs estimators and direct estimations
\item MSE_DB : A dataframe with the values of estimated mean square errors of benchmarked EBLUPs estimators
\item g.4a : First component of g4 in difference benchmarking MSE estimation formula
\item g.4b : Second component of g4 in difference benchmarking MSE estimation formula
}
}
\description{
This function produces EBLUPs, MSE, and aggregation of Univariate SAE with Difference Benchmarking
}
\examples{
##load dataset
data(datamsaeDB)
#Compute Fitted model for Y1, Y2, and Y3
#Y1 ~ X1 + X2
#Y2 ~ X2
#Y3 ~ X1
##Using parameter 'data'
formula = list(f1 = Y1~X1+X2,
f2 = Y2~X2,
f3 = Y3~X1)
vardir = c("v1","v12","v13","v2","v23","v3")
#Note : in real data for univariate SAE, if you does not have the values of covariances,
# set covariancse as zero in the dataframe
weight = c("w1","w2","w3")
saeDB <- saedb(formula, vardir, weight, data=datamsaeDB)
##Do not use parameter 'data'
formula = list(f1 = datamsaeDB$Y1~datamsaeDB$X1+datamsaeDB$X2,
f2 = datamsaeDB$Y2~datamsaeDB$X2,
f3 = datamsaeDB$Y3~datamsaeDB$X1)
vardir = datamsaeDB[,c("v1","v12","v13","v2","v23","v3")]
#Note : in real data for univariate SAE, if you does not have the values of covariances,
# set covariancse as zero in the dataframe
weight = datamsaeDB[,c("w1","w2","w3")]
saeDB_d <- saedb(formula, vardir, weight = weight)
saeDB$SAE_Eblup #to see EBLUP Estimators
saeDB$MSE_Eblup #to see estimated MSE of EBLUP estimators
saeDB$difference_benchmarking$Estimation #to see Benchmarked EBLUP Estimators
saeDB$difference_benchmarking$MSE_DB #to see estimated MSE of Benchmarked EBLUP Estimators
saeDB$difference_benchmarking$Aggregation #to see the aggregation of, benchmarking
}
| /man/saedb.Rd | no_license | zazaperwira/msaeDB | R | false | true | 4,171 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/saedb.R
\name{saedb}
\alias{saedb}
\title{EBLUPs under Univariate Fay Herriot Model with Difference Benchmarking}
\usage{
saedb(
formula,
vardir,
weight,
samevar = FALSE,
MAXITER = 100,
PRECISION = 1e-04,
data
)
}
\arguments{
\item{formula}{List of formula that describe the fitted model}
\item{vardir}{Sampling variances of direct estimations,if it is included in data frame so it is the vector with the name of sampling variances.if it is not, it is a data frame of sampling variance in order : \code{var1, cov12,.,cov1r,var2,cov23,.,cov2r,.,cov(r-1)(r),var(r)}}
\item{weight}{Known proportion of units in small areas, where \eqn{\sum_{d=1}^{D}}{sum from d=1 to D of} \eqn{W_{rd}}{Wrd} = 1 . \code{d = 1...D} is the number of small areas, and \code{r = 1...R} is the number of response variables}
\item{samevar}{Whether the variances of the data are same or not. Logical input with default \code{FALSE}}
\item{MAXITER}{Maximum number of iteration in Fisher-scoring algorithm with default \code{100}}
\item{PRECISION}{Limit of Fisher-scoring convergence tolerance with default \code{1e-4}}
\item{data}{The data frame}
}
\value{
This function returns a list of the following objects:
\item{SAE_Eblup}{A dataframe with the values of the EBLUPs estimators}
\item{MSE_Eblup}{A dataframe with the values of estimated mean square errors of EBLUPs estimators}
\item{fit}{A list containing the following objects:}
\itemize{
\item method : The fitting method (this function is using "REML")
\item convergence : The convergence result of Fisher-scoring algorithm (Logical Value)
\item iterations : The number of Fisher-Scoring algorithm iterations
\item estcoef : A dataframe with the estimated model coefficient, standard error,t statistics, p-values of the significance of each coefficient
\item refvar : A dataframe with estimated random effect variances
\item informationFisher : A matrix of information fisher from Fisher-scoring algorithm
}
\item{difference_benchmarking}{a list containing the following objects:}
\itemize{
\item Estimation : A dataframe with the value of Benchmarked EBLUPs estimators
\item Aggregation : The aggregation of benchmarked EBLUPs estimators, EBLUPs estimators and direct estimations
\item MSE_DB : A dataframe with the values of estimated mean square errors of benchmarked EBLUPs estimators
\item g.4a : First component of g4 in difference benchmarking MSE estimation formula
\item g.4b : Second component of g4 in difference benchmarking MSE estimation formula
}
}
\description{
This function produces EBLUPs, MSE, and aggregation of Univariate SAE with Difference Benchmarking
}
\examples{
##load dataset
data(datamsaeDB)
#Compute Fitted model for Y1, Y2, and Y3
#Y1 ~ X1 + X2
#Y2 ~ X2
#Y3 ~ X1
##Using parameter 'data'
formula = list(f1 = Y1~X1+X2,
f2 = Y2~X2,
f3 = Y3~X1)
vardir = c("v1","v12","v13","v2","v23","v3")
#Note : in real data for univariate SAE, if you does not have the values of covariances,
# set covariancse as zero in the dataframe
weight = c("w1","w2","w3")
saeDB <- saedb(formula, vardir, weight, data=datamsaeDB)
##Do not use parameter 'data'
formula = list(f1 = datamsaeDB$Y1~datamsaeDB$X1+datamsaeDB$X2,
f2 = datamsaeDB$Y2~datamsaeDB$X2,
f3 = datamsaeDB$Y3~datamsaeDB$X1)
vardir = datamsaeDB[,c("v1","v12","v13","v2","v23","v3")]
#Note : in real data for univariate SAE, if you does not have the values of covariances,
# set covariancse as zero in the dataframe
weight = datamsaeDB[,c("w1","w2","w3")]
saeDB_d <- saedb(formula, vardir, weight = weight)
saeDB$SAE_Eblup #to see EBLUP Estimators
saeDB$MSE_Eblup #to see estimated MSE of EBLUP estimators
saeDB$difference_benchmarking$Estimation #to see Benchmarked EBLUP Estimators
saeDB$difference_benchmarking$MSE_DB #to see estimated MSE of Benchmarked EBLUP Estimators
saeDB$difference_benchmarking$Aggregation #to see the aggregation of, benchmarking
}
|
## Coercition methods
as.data.frame.SoilProfile <- function(x, ...) {
# ID (always defined)
df_id <- data.frame(rep(x@id, times = nrow(x)))
names(df_id) <- names(x@id)
# Depths (always defined)
df_depths <- data.frame(x@depths)
# Spatial point
if (.hasSpatialPoint(x)) {
df_sp <- as.data.frame(coordinates(x@sp))
# replicate coordinates for each hz
df_sp <- ldply(1:nrow(x), function(x) df_sp)
} else {
df_sp <- NULL
}
# Horizon data
if (nrow(horizons(x)) > 0) {
df_hz <- horizons(x)
} else {
df_hz <- NULL
}
# Site data
if (nrow(site(x)) > 0) {
df_site <- site(x)
# replicate site data for each hz
df_site <- ldply(1:nrow(x), function(x) df_site)
} else {
df_site <- NULL
}
l_df <- list(df_id, df_depths, df_sp, df_hz, df_site)
# Eliminate NULL fields
l_df <- l_df[which(laply(l_df, function(x) !is.null(x)))]
do.call('cbind', c(l_df, row.names = NULL))
}
setAs("SoilProfile", "data.frame", function(from)
as.data.frame.SoilProfile(from))
as.data.frame.SoilProfileCollection <- function(x, ...) {
data.frame( do.call('rbind', lapply(profiles(x), as.data.frame)) , row.names = NULL)
}
setAs("SoilProfileCollection", "data.frame", function(from)
as.data.frame.SoilProfileCollection(from))
## Get SpatialPoints
##
setAs("SoilProfile", "SpatialPoints",
function(from) {
if (.hasSpatialPoint(from)){
sp <- from@sp
} else {
sp <- NULL
}
sp
}
)
setAs("SoilProfileCollection", "SpatialPoints",
function(from) {
do.call("rbind", lapply(profiles(from), function(x) as(x, "SpatialPoints")))
}
)
setAs("SoilProfile", "SpatialPointsDataFrame",
function(from) {
sp <- as(from, "SpatialPoints")
if (is.null(sp)) spdf <- NULL
else spdf <- SpatialPointsDataFrame(sp, data = site(from))
spdf
}
)
setAs("SoilProfileCollection", "SpatialPointsDataFrame",
function(from) {
sp <- as(from, "SpatialPoints")
if (is.null(sp)) spdf <- NULL
else spdf <- SpatialPointsDataFrame(sp, data = site(from))
spdf
}
)
| /sandbox/aqp2/R/SoilProfileCollection-coercion.R | no_license | dtbinh/aqp | R | false | false | 2,214 | r | ## Coercition methods
as.data.frame.SoilProfile <- function(x, ...) {
# ID (always defined)
df_id <- data.frame(rep(x@id, times = nrow(x)))
names(df_id) <- names(x@id)
# Depths (always defined)
df_depths <- data.frame(x@depths)
# Spatial point
if (.hasSpatialPoint(x)) {
df_sp <- as.data.frame(coordinates(x@sp))
# replicate coordinates for each hz
df_sp <- ldply(1:nrow(x), function(x) df_sp)
} else {
df_sp <- NULL
}
# Horizon data
if (nrow(horizons(x)) > 0) {
df_hz <- horizons(x)
} else {
df_hz <- NULL
}
# Site data
if (nrow(site(x)) > 0) {
df_site <- site(x)
# replicate site data for each hz
df_site <- ldply(1:nrow(x), function(x) df_site)
} else {
df_site <- NULL
}
l_df <- list(df_id, df_depths, df_sp, df_hz, df_site)
# Eliminate NULL fields
l_df <- l_df[which(laply(l_df, function(x) !is.null(x)))]
do.call('cbind', c(l_df, row.names = NULL))
}
setAs("SoilProfile", "data.frame", function(from)
as.data.frame.SoilProfile(from))
as.data.frame.SoilProfileCollection <- function(x, ...) {
data.frame( do.call('rbind', lapply(profiles(x), as.data.frame)) , row.names = NULL)
}
setAs("SoilProfileCollection", "data.frame", function(from)
as.data.frame.SoilProfileCollection(from))
## Get SpatialPoints
##
setAs("SoilProfile", "SpatialPoints",
function(from) {
if (.hasSpatialPoint(from)){
sp <- from@sp
} else {
sp <- NULL
}
sp
}
)
setAs("SoilProfileCollection", "SpatialPoints",
function(from) {
do.call("rbind", lapply(profiles(from), function(x) as(x, "SpatialPoints")))
}
)
setAs("SoilProfile", "SpatialPointsDataFrame",
function(from) {
sp <- as(from, "SpatialPoints")
if (is.null(sp)) spdf <- NULL
else spdf <- SpatialPointsDataFrame(sp, data = site(from))
spdf
}
)
setAs("SoilProfileCollection", "SpatialPointsDataFrame",
function(from) {
sp <- as(from, "SpatialPoints")
if (is.null(sp)) spdf <- NULL
else spdf <- SpatialPointsDataFrame(sp, data = site(from))
spdf
}
)
|
library(annotate)
### Name: filterGOByOntology
### Title: Filter GO terms by a specified GO ontology
### Aliases: filterGOByOntology
### Keywords: manip
### ** Examples
haveGO <- suppressWarnings(require("GO.db"))
if (haveGO) {
ids <- c("GO:0001838", "GO:0001839")
stopifnot(all(filterGOByOntology(ids, "BP")))
stopifnot(!any(filterGOByOntology(ids, "MF")))
} else cat("Sorry, this example requires the GO package\n")
| /data/genthat_extracted_code/annotate/examples/filterGOByOntology.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 438 | r | library(annotate)
### Name: filterGOByOntology
### Title: Filter GO terms by a specified GO ontology
### Aliases: filterGOByOntology
### Keywords: manip
### ** Examples
haveGO <- suppressWarnings(require("GO.db"))
if (haveGO) {
ids <- c("GO:0001838", "GO:0001839")
stopifnot(all(filterGOByOntology(ids, "BP")))
stopifnot(!any(filterGOByOntology(ids, "MF")))
} else cat("Sorry, this example requires the GO package\n")
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$interpretation_link <- renderUI({
a("Show me what my result means", href=paste("http://www.nscclinics.co.uk/slimming/wp-content/uploads/sites/3/2016/05/bmi-chart.png"))
})
output$health_impact_link <- renderUI({
a("Show my the impact on my health of overweight from John Hopkins Medicine", href=paste("http://www.hopkinsmedicine.org/healthlibrary/conditions/diabetes/overview_of_obesity_85,P07855/"))
})
output$bmi <- renderText({
calc.bmi <- reactive({
input$weight / (input$height/100)^2
})
calc.bmi()
})
})
| /server.R | no_license | ramon-schildknecht/1st-shiny-app | R | false | false | 1,019 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$interpretation_link <- renderUI({
a("Show me what my result means", href=paste("http://www.nscclinics.co.uk/slimming/wp-content/uploads/sites/3/2016/05/bmi-chart.png"))
})
output$health_impact_link <- renderUI({
a("Show my the impact on my health of overweight from John Hopkins Medicine", href=paste("http://www.hopkinsmedicine.org/healthlibrary/conditions/diabetes/overview_of_obesity_85,P07855/"))
})
output$bmi <- renderText({
calc.bmi <- reactive({
input$weight / (input$height/100)^2
})
calc.bmi()
})
})
|
#' Tune Midwater Trawl Selectivity
#'
#' Interactive visualization of midwater trawl selectivity curve for aid in
#' tuning selectivity parameters used in \code{\link{AcMtEst}}.
#'
#' @details
#' Interactive sliders are provided for the four parameters of interest which
#' are required for defining the midwater trawl selectivity
#' of the mesh panel zones as a double logistic curve:
#' \itemize{
#' \item \code{MtL50Small} the length (in mm) at which small fish have a
#' 50\% probability of being captured by the trawl
#' \item \code{MtSlopeSmall} the slope at which small fish probability of
#' capture increases with length
#' \item \code{MtL50Large} the length (in mm) at which large fish have a
#' 50\% probability of being captured by the trawl
#' \item \code{MtSlopeLarge} the (absolute value of the) slope at which
#' large fish probability of capture decreases with length
#' }
#'
#' Note that the sliders pop up in a separate R window, which may be hidden if
#' you click on another window.
#'
#' @export
#' @import
#' rpanel
#' @seealso
#' \code{\link{AcMtEst}}, \code{\link{logit2}}
#' @examples
#'
#' \dontrun{
#' TuneSelec()
#' }
TuneSelec <- function() {
# no idea why, but doesn't seem to work unless this package is attached
library(rpanel)
# probability graphing function
double.draw <- function(panel) {
y <- logit2(panel$x, panel$L501, panel$SR1, panel$L502, -panel$SR2)
plot(0, 0, xlim=range(panel$x[y>0.001]), ylim=0:1, type="n", las=1,
xlab="Fish length (mm)", ylab="Midwater trawl selectivity")
abline(v=c(panel$L501, panel$L502), col="gray", lwd=2)
abline(h=c(0, 0.5, 1), col="gray", lwd=2)
lines(spline(panel$x, y, 1000), lwd=3)
panel
}
# plot it, with a slider to adjust coeficients of the double logistic function
# dev.new()
par(mar=c(4, 4, 1, 1))
plot(1, 1)
panel <- rp.control(x=1:2000, L501=10.1, SR1=10.1, L502=200.1, SR2=20.1)
# draw an initial plot, so user isn't staring at a gray window
# before clicking on the slider
double.draw(panel)
rp.slider(panel, L501, 0.1, 1000, resolution=0.1, showvalue=T,
action=double.draw, title=
"MtL50Small: length at 50% for small fish ")
rp.slider(panel, SR1, 0.1, 200, resolution=0.1, showvalue=T,
action=double.draw, title="MtSlopeSmall: slope for small fish")
rp.slider(panel, L502, 0.1, 1000, resolution=0.1, showvalue=T,
action=double.draw, title="MtL50Large: length at 50% for large fish")
rp.slider(panel, SR2, 0.1, 200, resolution=0.1, showvalue=T,
action=double.draw, title="MtSlopeLarge: slope for large fish")
}
| /R/TuneSelec.R | no_license | JVAdams/artiFISHal | R | false | false | 2,770 | r | #' Tune Midwater Trawl Selectivity
#'
#' Interactive visualization of midwater trawl selectivity curve for aid in
#' tuning selectivity parameters used in \code{\link{AcMtEst}}.
#'
#' @details
#' Interactive sliders are provided for the four parameters of interest which
#' are required for defining the midwater trawl selectivity
#' of the mesh panel zones as a double logistic curve:
#' \itemize{
#' \item \code{MtL50Small} the length (in mm) at which small fish have a
#' 50\% probability of being captured by the trawl
#' \item \code{MtSlopeSmall} the slope at which small fish probability of
#' capture increases with length
#' \item \code{MtL50Large} the length (in mm) at which large fish have a
#' 50\% probability of being captured by the trawl
#' \item \code{MtSlopeLarge} the (absolute value of the) slope at which
#' large fish probability of capture decreases with length
#' }
#'
#' Note that the sliders pop up in a separate R window, which may be hidden if
#' you click on another window.
#'
#' @export
#' @import
#' rpanel
#' @seealso
#' \code{\link{AcMtEst}}, \code{\link{logit2}}
#' @examples
#'
#' \dontrun{
#' TuneSelec()
#' }
TuneSelec <- function() {
# no idea why, but doesn't seem to work unless this package is attached
library(rpanel)
# probability graphing function
double.draw <- function(panel) {
y <- logit2(panel$x, panel$L501, panel$SR1, panel$L502, -panel$SR2)
plot(0, 0, xlim=range(panel$x[y>0.001]), ylim=0:1, type="n", las=1,
xlab="Fish length (mm)", ylab="Midwater trawl selectivity")
abline(v=c(panel$L501, panel$L502), col="gray", lwd=2)
abline(h=c(0, 0.5, 1), col="gray", lwd=2)
lines(spline(panel$x, y, 1000), lwd=3)
panel
}
# plot it, with a slider to adjust coeficients of the double logistic function
# dev.new()
par(mar=c(4, 4, 1, 1))
plot(1, 1)
panel <- rp.control(x=1:2000, L501=10.1, SR1=10.1, L502=200.1, SR2=20.1)
# draw an initial plot, so user isn't staring at a gray window
# before clicking on the slider
double.draw(panel)
rp.slider(panel, L501, 0.1, 1000, resolution=0.1, showvalue=T,
action=double.draw, title=
"MtL50Small: length at 50% for small fish ")
rp.slider(panel, SR1, 0.1, 200, resolution=0.1, showvalue=T,
action=double.draw, title="MtSlopeSmall: slope for small fish")
rp.slider(panel, L502, 0.1, 1000, resolution=0.1, showvalue=T,
action=double.draw, title="MtL50Large: length at 50% for large fish")
rp.slider(panel, SR2, 0.1, 200, resolution=0.1, showvalue=T,
action=double.draw, title="MtSlopeLarge: slope for large fish")
}
|
# IN THIS SCRIPT:
# DNORM
# PNORM
# QNORM
# RNORM
# use help() to find out more about each
# reads normAL var x where mean = 3, sd = 2 (variance = 4)
x<-seq(-5,10,by-0.1)
dnorm(x,mean=3,sd=2)
pnorm(x,mean=3,sd=2)
qnorm(x,mean=3,sd=2)
rnorm(x,mean=3,sd=2)
#----------------------------------------------------------
# DNORM:
# dnorm: gets values on density function,
dnorm(x, mean=3,sd=2^2):
dnorm(0,0,1) # out: 0.39423
# DNORM to plot normal density curve
set.seed(124)
means<-replicate(1000,mean(sample(Nile,100,replace=TRUE)))
str(means)
mm<-mean(means)
x<-seq(min(means),max(means),1)
std<-sd(means)
hist(means,freq=FALSE,
xlab="Nile Flow",
col='blue',
main='Nile Flow Means \n1000 samples at n=100')
curve(dnorm(x, mean=mm, sd=std),
col = "orange",
add=TRUE,
lwd = 2)
abline(v=mm,
col='red',
lwd=2,
lty=2)
#----------------------------------------------------------
# PNORM:
# pnorm: cummulative distribution function (gives probability)
# Data above and below mean---------------------------------
pnorm(-2, 0, 1,lower.tail = TRUE) # % of data below 2 standard deviations from mean
pnorm(3, 0, 1, lower.tail = FALSE) # % of data above 3 standard deviations from mean
pnorm(-2, 0, 1,lower.tail = TRUE) +
pnorm(3, 0, 1, lower.tail = FALSE) # % of data below 2 or above 3 sds from mean
# distribution function at x=2.5, mean=2, and variance= 0.25---
pnorm(2.5, mean=2, sd=sqrt(0.25))
# probability that X is between 1 and 3 -----------------------
# (subtracts pnorm at 3 from pnorm at 1)
pnorm(3, mean=2, sd=sqrt(0.25)) - pnorm(1, mean=2, sd=sqrt(0.25))
# where 9 = value of interest, 8.25 = population mean, 0.75 = sd:
# using zscore formula, z=1.0
pnorm(9, 8.25, 0.75 lower.tail=TRUE)
# probability a randomly selected observation will fall between 3.6 and 5.0 if
# random variable has a N(4.43, 1.32^2) distribution
pnorm(5.0, 4.43, 1.32) - pnorm(3.6, 4.43, 1.32)
# probability that a standard normal variable is > 1
# (zscore: first number in code) using upper
# tail (concerned about area to right of mean)
pnorm(1,0,1, lower.tail=FALSE) # out: 0.1586553
# probability that a standard normal variable is > 1
# (zscore: first number in code) using lower
# tail but taking 1-pnorm
1-pnorm(1,0,1, lower.tail=TRUE) # out: 0.1586553
# Using Z-score formula----------------------------------------------
# this is the zscore formula: for mean of 114.8, sd of 13.1,
# for 36 samples out: 2.183 (can varify using zscore probability tbl)
z <- (110-114.8)/(13.1/sqrt(36))
pnorm(z,0,1, lower.tail=TRUE) # out: 0.0139577
# example 2
z1 <- (5.0 - 4.43)/1.32 # calcluate z score
z2 <- (3.6 - 4.43)/1.32 # calcluate z score
pnorm(z1, 0, 1) - pnorm(z2, 0, 1) # use pnorm to get probability
#----------------------------------------------------------
# QNORM:
# qnorm (quantile function of normal dist), gives quantiles
qnorm(0.95, mean=2, sd=sqrt(0.25))
# returns the z-score when the probability (or shaded area under the curve)
# is given along with the mean and SD.
# In this example, the area was on lower tail.
qnorm(0.4013, 0, 1, lower.tail = TRUE) # out: -0.2499836
# use concat to produces the zscore of lower tail test:
norm(c(0.25, 0.5, 0.75), # out: -0.6744898, 0.000000, 0.6744898
mean=0, sd=1,
lower.tail=TRUE)
# calculate z score of two tailed test when alpha is given
# qnorm(1 - 𝜶/𝟐) gives the z value for two tailed test. z=1.96
qnorm(1-(0.10/2))
# calculate z score of one tailed test when alpha is given (alpha given as 0.1: so 90%)
qnorm(1-0.1)
# calculate z score when percent is used instead of 1-alpha
# ex: right tailed probability of 0.95 z = 1.645
qnorm(0.9); qnorm(0.95)
# gives both zscores (the positive and negative) for two tailed test
# (manually provide the alpha for each end of a 0.05 alpha,
# divided by 2 since its two tail)
qnorm(c(0.025,0.975)) # output: -1.959964, 1.959964
# QNORM: -----------------------------
# example shows qnorm gives typical inputs to pnorm and vice versa
qnorm(c(0.25, 0.5, 0.75), # out: -0.6744898, 0.000000, 0.6744898
mean=0,
sd=1,
lower.tail=TRUE)
pnorm(c(-0.6744898, 0.000000, 0.6744898),0,1, # out: 0.25, 0.5, 0.75
lower.tail=TRUE)
# QNORM:
# find z-score given a probability
# We already know the z-score is 0.6744898.
qnorm(p, 0, 1, lower.tail = TRUE) # gives the zscore
# gives filled in plot up to 75% (specified p value) under standard normal density
cord.x <- c(-3, seq(-3, 0.67, 0.01), 0.67)
cord.y <- c(0, dnorm(seq(-3, 0.67, 0.01),0,1), 0)
curve(dnorm(x,0,1),
xlim=c(-3,3),
main="Standard Normal Density",
ylab = "density")
polygon(cord.x,cord.y,col="skyblue")
#----------------------------------------------------------
# RNORM: generate random observations
# simulate 100 random observations mean=3, sd=2
rnorm(100,3,2)
prnorm(c(-0.6744898, 0.000000, 0.6744898),0,1,lower.tail=FALSE)
| /Stats_and_Math/Distributions/Normal_Gaussian.R | no_license | yangboyubyron/DS_Recipes | R | false | false | 5,031 | r |
# IN THIS SCRIPT:
# DNORM
# PNORM
# QNORM
# RNORM
# use help() to find out more about each
# reads normAL var x where mean = 3, sd = 2 (variance = 4)
x<-seq(-5,10,by-0.1)
dnorm(x,mean=3,sd=2)
pnorm(x,mean=3,sd=2)
qnorm(x,mean=3,sd=2)
rnorm(x,mean=3,sd=2)
#----------------------------------------------------------
# DNORM:
# dnorm: gets values on density function,
dnorm(x, mean=3,sd=2^2):
dnorm(0,0,1) # out: 0.39423
# DNORM to plot normal density curve
set.seed(124)
means<-replicate(1000,mean(sample(Nile,100,replace=TRUE)))
str(means)
mm<-mean(means)
x<-seq(min(means),max(means),1)
std<-sd(means)
hist(means,freq=FALSE,
xlab="Nile Flow",
col='blue',
main='Nile Flow Means \n1000 samples at n=100')
curve(dnorm(x, mean=mm, sd=std),
col = "orange",
add=TRUE,
lwd = 2)
abline(v=mm,
col='red',
lwd=2,
lty=2)
#----------------------------------------------------------
# PNORM:
# pnorm: cummulative distribution function (gives probability)
# Data above and below mean---------------------------------
pnorm(-2, 0, 1,lower.tail = TRUE) # % of data below 2 standard deviations from mean
pnorm(3, 0, 1, lower.tail = FALSE) # % of data above 3 standard deviations from mean
pnorm(-2, 0, 1,lower.tail = TRUE) +
pnorm(3, 0, 1, lower.tail = FALSE) # % of data below 2 or above 3 sds from mean
# distribution function at x=2.5, mean=2, and variance= 0.25---
pnorm(2.5, mean=2, sd=sqrt(0.25))
# probability that X is between 1 and 3 -----------------------
# (subtracts pnorm at 3 from pnorm at 1)
pnorm(3, mean=2, sd=sqrt(0.25)) - pnorm(1, mean=2, sd=sqrt(0.25))
# where 9 = value of interest, 8.25 = population mean, 0.75 = sd:
# using zscore formula, z=1.0
pnorm(9, 8.25, 0.75 lower.tail=TRUE)
# probability a randomly selected observation will fall between 3.6 and 5.0 if
# random variable has a N(4.43, 1.32^2) distribution
pnorm(5.0, 4.43, 1.32) - pnorm(3.6, 4.43, 1.32)
# probability that a standard normal variable is > 1
# (zscore: first number in code) using upper
# tail (concerned about area to right of mean)
pnorm(1,0,1, lower.tail=FALSE) # out: 0.1586553
# probability that a standard normal variable is > 1
# (zscore: first number in code) using lower
# tail but taking 1-pnorm
1-pnorm(1,0,1, lower.tail=TRUE) # out: 0.1586553
# Using Z-score formula----------------------------------------------
# this is the zscore formula: for mean of 114.8, sd of 13.1,
# for 36 samples out: 2.183 (can varify using zscore probability tbl)
z <- (110-114.8)/(13.1/sqrt(36))
pnorm(z,0,1, lower.tail=TRUE) # out: 0.0139577
# example 2
z1 <- (5.0 - 4.43)/1.32 # calcluate z score
z2 <- (3.6 - 4.43)/1.32 # calcluate z score
pnorm(z1, 0, 1) - pnorm(z2, 0, 1) # use pnorm to get probability
#----------------------------------------------------------
# QNORM:
# qnorm (quantile function of normal dist), gives quantiles
qnorm(0.95, mean=2, sd=sqrt(0.25))
# returns the z-score when the probability (or shaded area under the curve)
# is given along with the mean and SD.
# In this example, the area was on lower tail.
qnorm(0.4013, 0, 1, lower.tail = TRUE) # out: -0.2499836
# use concat to produces the zscore of lower tail test:
norm(c(0.25, 0.5, 0.75), # out: -0.6744898, 0.000000, 0.6744898
mean=0, sd=1,
lower.tail=TRUE)
# calculate z score of two tailed test when alpha is given
# qnorm(1 - 𝜶/𝟐) gives the z value for two tailed test. z=1.96
qnorm(1-(0.10/2))
# calculate z score of one tailed test when alpha is given (alpha given as 0.1: so 90%)
qnorm(1-0.1)
# calculate z score when percent is used instead of 1-alpha
# ex: right tailed probability of 0.95 z = 1.645
qnorm(0.9); qnorm(0.95)
# gives both zscores (the positive and negative) for two tailed test
# (manually provide the alpha for each end of a 0.05 alpha,
# divided by 2 since its two tail)
qnorm(c(0.025,0.975)) # output: -1.959964, 1.959964
# QNORM: -----------------------------
# example shows qnorm gives typical inputs to pnorm and vice versa
qnorm(c(0.25, 0.5, 0.75), # out: -0.6744898, 0.000000, 0.6744898
mean=0,
sd=1,
lower.tail=TRUE)
pnorm(c(-0.6744898, 0.000000, 0.6744898),0,1, # out: 0.25, 0.5, 0.75
lower.tail=TRUE)
# QNORM:
# find z-score given a probability
# We already know the z-score is 0.6744898.
qnorm(p, 0, 1, lower.tail = TRUE) # gives the zscore
# gives filled in plot up to 75% (specified p value) under standard normal density
cord.x <- c(-3, seq(-3, 0.67, 0.01), 0.67)
cord.y <- c(0, dnorm(seq(-3, 0.67, 0.01),0,1), 0)
curve(dnorm(x,0,1),
xlim=c(-3,3),
main="Standard Normal Density",
ylab = "density")
polygon(cord.x,cord.y,col="skyblue")
#----------------------------------------------------------
# RNORM: generate random observations
# simulate 100 random observations mean=3, sd=2
rnorm(100,3,2)
prnorm(c(-0.6744898, 0.000000, 0.6744898),0,1,lower.tail=FALSE)
|
# EnsSprErr.R Spread to Error Ratio
#
# Copyright (C) 2016 MeteoSwiss
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#' Spread to Error Ratio
#'
#' Computes the spread to error ratio (SPR) for probabilistic forecasts - not
#' unlike the functions in SpecsVerification. SPR > 1 indicates overdispersion
#' (underconfidence), whereas SPR < indicates overconfidence in the forecasts.
#'
#' @param ens n x k matrix of n forecasts for k ensemble members
#' @param obs vector with n verifying observations
#'
#' @details Here we define the spread-error rate as the square root of the ratio
#' of mean ensemble variance to the mean squared error of the ensemble mean with
#' the verifying observations
#'
#' @seealso \code{\link{veriApply}}, \code{\link{FairSprErr}}
#'
#' @examples
#' tm <- toymodel()
#' EnsSprErr(tm$fcst, tm$obs)
#'
#' ## compute spread to error ratio using veriApply
#' veriApply('EnsSprErr', fcst=tm$fcst, obs=tm$obs)
#'
#' @export
EnsSprErr <- function(ens, obs){
stopifnot(is.matrix(ens), is.vector(obs), nrow(ens) == length(obs))
xmask <- apply(!is.na(ens), 1, any) & !is.na(obs)
spread <- mean(apply(ens[xmask,,drop=F], 1, sd, na.rm=T)**2, na.rm=T)
error <- mean((obs - rowMeans(ens))**2, na.rm=T)
return(sqrt(spread/error))
} | /easyVerification/R/EnsSprErr.R | no_license | akhikolla/InformationHouse | R | false | false | 1,908 | r | # EnsSprErr.R Spread to Error Ratio
#
# Copyright (C) 2016 MeteoSwiss
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#' Spread to Error Ratio
#'
#' Computes the spread to error ratio (SPR) for probabilistic forecasts - not
#' unlike the functions in SpecsVerification. SPR > 1 indicates overdispersion
#' (underconfidence), whereas SPR < indicates overconfidence in the forecasts.
#'
#' @param ens n x k matrix of n forecasts for k ensemble members
#' @param obs vector with n verifying observations
#'
#' @details Here we define the spread-error rate as the square root of the ratio
#' of mean ensemble variance to the mean squared error of the ensemble mean with
#' the verifying observations
#'
#' @seealso \code{\link{veriApply}}, \code{\link{FairSprErr}}
#'
#' @examples
#' tm <- toymodel()
#' EnsSprErr(tm$fcst, tm$obs)
#'
#' ## compute spread to error ratio using veriApply
#' veriApply('EnsSprErr', fcst=tm$fcst, obs=tm$obs)
#'
#' @export
EnsSprErr <- function(ens, obs){
stopifnot(is.matrix(ens), is.vector(obs), nrow(ens) == length(obs))
xmask <- apply(!is.na(ens), 1, any) & !is.na(obs)
spread <- mean(apply(ens[xmask,,drop=F], 1, sd, na.rm=T)**2, na.rm=T)
error <- mean((obs - rowMeans(ens))**2, na.rm=T)
return(sqrt(spread/error))
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SSplotSPR.R
\name{SSplotSPR}
\alias{SSplotSPR}
\title{Plot Spawning Potential Ratio (SPR) quantities.}
\usage{
SSplotSPR(
replist,
add = FALSE,
plot = TRUE,
print = FALSE,
uncertainty = TRUE,
subplots = 1:4,
forecastplot = FALSE,
col1 = "black",
col2 = "blue",
col3 = "green3",
col4 = "red",
sprtarg = "default",
btarg = "default",
labels = c("Year", "SPR", "1-SPR", "Relative fishing intensity",
"Relative spawning output"),
pwidth = 6.5,
pheight = 5,
punits = "in",
res = 300,
ptsize = 10,
cex.main = 1,
plotdir = "default",
verbose = TRUE
)
}
\arguments{
\item{replist}{A list object created by \code{\link{SS_output}()}.}
\item{add}{add to existing plot (not yet implemented)}
\item{plot}{plot to active plot device?}
\item{print}{print to PNG files?}
\item{uncertainty}{include plots showing uncertainty?}
\item{subplots}{vector controlling which subplots to create
Numbering of subplots is as follows:
\enumerate{
\item timeseries of SPR,
\item timeseries of 1 - SPR,
\item timeseries of SPR ratio (as specified in the starter file), and
\item phase plot of Biomass ratio vs SPR ratio (as specified in the
starter file).
}}
\item{forecastplot}{Include forecast years in plot?}
\item{col1}{first color used}
\item{col2}{second color used}
\item{col3}{third color used}
\item{col4}{fourth color used}
\item{sprtarg}{F/SPR proxy target. "default" chooses based on model output,
where models which have SPR_report_basis = 0 or 1 specified in the starter
file will use the SPR target specified in the forecast file. Models which
have SPR_report_basis = 2 will use SPR at MSY for the SPR target
and models which have the SPR_report_basis = 3 will use
SPR at Btarget for the SPR target in these plots. Zero or negative values of
sprtarg input here will cause no horizontal line to be plotted.}
\item{btarg}{target depletion to be used in plots showing depletion. May be
omitted by setting to NA. "default" chooses based on model output.}
\item{labels}{vector of labels for plots (titles and axis labels)}
\item{pwidth}{width of plot}
\item{pheight}{height of plot}
\item{punits}{units for PNG file}
\item{res}{Resolution of plots printed to files.
The default is \code{res = 300}.}
\item{ptsize}{point size for PNG file}
\item{cex.main}{character expansion for plot titles}
\item{plotdir}{directory where PNG files will be written. by default it will
be the directory where the model was run.}
\item{verbose}{report progress to R GUI?}
}
\description{
Plot time series of SPR, 1-SPR, the chosen SPR ratio and the phase plot.
}
\seealso{
\code{\link[=SS_plots]{SS_plots()}}, \code{\link[=SS_output]{SS_output()}}
}
\author{
Ian Stewart, Ian Taylor
}
| /man/SSplotSPR.Rd | no_license | cgrandin/r4ss | R | false | true | 2,795 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SSplotSPR.R
\name{SSplotSPR}
\alias{SSplotSPR}
\title{Plot Spawning Potential Ratio (SPR) quantities.}
\usage{
SSplotSPR(
replist,
add = FALSE,
plot = TRUE,
print = FALSE,
uncertainty = TRUE,
subplots = 1:4,
forecastplot = FALSE,
col1 = "black",
col2 = "blue",
col3 = "green3",
col4 = "red",
sprtarg = "default",
btarg = "default",
labels = c("Year", "SPR", "1-SPR", "Relative fishing intensity",
"Relative spawning output"),
pwidth = 6.5,
pheight = 5,
punits = "in",
res = 300,
ptsize = 10,
cex.main = 1,
plotdir = "default",
verbose = TRUE
)
}
\arguments{
\item{replist}{A list object created by \code{\link{SS_output}()}.}
\item{add}{add to existing plot (not yet implemented)}
\item{plot}{plot to active plot device?}
\item{print}{print to PNG files?}
\item{uncertainty}{include plots showing uncertainty?}
\item{subplots}{vector controlling which subplots to create
Numbering of subplots is as follows:
\enumerate{
\item timeseries of SPR,
\item timeseries of 1 - SPR,
\item timeseries of SPR ratio (as specified in the starter file), and
\item phase plot of Biomass ratio vs SPR ratio (as specified in the
starter file).
}}
\item{forecastplot}{Include forecast years in plot?}
\item{col1}{first color used}
\item{col2}{second color used}
\item{col3}{third color used}
\item{col4}{fourth color used}
\item{sprtarg}{F/SPR proxy target. "default" chooses based on model output,
where models which have SPR_report_basis = 0 or 1 specified in the starter
file will use the SPR target specified in the forecast file. Models which
have SPR_report_basis = 2 will use SPR at MSY for the SPR target
and models which have the SPR_report_basis = 3 will use
SPR at Btarget for the SPR target in these plots. Zero or negative values of
sprtarg input here will cause no horizontal line to be plotted.}
\item{btarg}{target depletion to be used in plots showing depletion. May be
omitted by setting to NA. "default" chooses based on model output.}
\item{labels}{vector of labels for plots (titles and axis labels)}
\item{pwidth}{width of plot}
\item{pheight}{height of plot}
\item{punits}{units for PNG file}
\item{res}{Resolution of plots printed to files.
The default is \code{res = 300}.}
\item{ptsize}{point size for PNG file}
\item{cex.main}{character expansion for plot titles}
\item{plotdir}{directory where PNG files will be written. by default it will
be the directory where the model was run.}
\item{verbose}{report progress to R GUI?}
}
\description{
Plot time series of SPR, 1-SPR, the chosen SPR ratio and the phase plot.
}
\seealso{
\code{\link[=SS_plots]{SS_plots()}}, \code{\link[=SS_output]{SS_output()}}
}
\author{
Ian Stewart, Ian Taylor
}
|
## load libraries
.libPaths("O:/paths") #set library path
library(nlme) # loading up the nlme library
library(lme4)
library(mgcv)
library(tidyverse)
##load data
df1<-read_csv("weight.csv") #load weight.csv
## tidy data
df1 <- df1%>%
mutate(weight = as.numeric(weight), litter_size = as.numeric(litter_size), dev = as.numeric(dev)) # weight and litter size to numeric
df1 <- df1 %>% arrange(dev) # arrange in order of dev column
df1 <- df1%>%
mutate(group = factor(group), ID = factor(ID), litter = factor(litter),
sex = factor(sex)) # variables to factor
df1 <- df1 %>%
na.omit(weight) # get rid of rows with na in weight column
## visualisation of raw data
#weight with age
ggplot(df1, aes(x = age,y = weight)) +
geom_point(aes(colour = group))
#dev day with age
ggplot(df1, aes(x = age,y = dev)) +
geom_point(aes(colour = group))
#############
#glmm
model2<- lmer(weight ~ age + group + # fixed effects of group and age
(age|litter/ID) + (age|litter_size), # random effects of ID (nested in group) and litter size(intercept and slope)
data = df1)
sresid2 <- resid(model2, type = "pearson") # Extract the standardised residuals
hist(sresid2) # check residuals for normality - look good
qqnorm(sresid2);qqline(sresid2) # alternative normality check - also looks good
plot(sresid2) # check redisuals - poor, model not accounting for all of variability
AIC(model2)
drop1(model2, ~., test="Chisq")
summary(model2)
df1 <- df1 %>%
mutate(pred = fitted(model2)) # get predicted values from model
#visualise model prediction
ggplot(df1, aes(x = age)) +
geom_point(aes(y=weight)) +
geom_line(aes(y = pred, colour=ID)) # prediction are linear and original data is not - model is poor fit
#not improved by removing effects
#########
## try a GAMM to account for non linearity of data
modeladd2 <- gam(weight~ s(age, by = group) + group + #fixed effect age (with default spline) and group
s(litter, bs = 're') + s(ID, bs = 're'), #random factors (all indluced, then sig checked and removed if over 0.05)
family = gaussian (link = 'identity'), # continous - bounded at 30 - family = gamm?
na.action=na.omit, data = df1)
summary(modeladd2)
a <- aov(weight ~ age + group, data = df1, na.action = na.omit)
summary(a)
modeladd2 <- gamm(weight ~ s(age, by = group) + group, #fixed effect age (with default spline) and group
random = list(ID = ~1, litter = ~1), #random effects (ID (repeated measures))
family = gaussian (link = 'identity'),
na.action=na.omit, data = df1)
windows (6,6); par (mfrow = c (2,2)) ## or pop them all in a single window
plot(modeladd2$gam) #plot shape of model
sresid1 <- modeladd2$gam$residuals # Extract the standardised residuals
hist(sresid1)
qqnorm(sresid1);qqline(sresid1)#check normality -#check normality - looks good#check normality - looks good
fitted.glmm2 <- modeladd2$gam$fitted # Extract the fitted (predicted) values
plot(sresid1 ~ fitted.glmm2) # Check for homoscedasticity - looks ok (improve??)
plot(predict(modeladd2$gam) ~ na.omit(df1$weight))
wei <- summary(modeladd2$gam) #summary of GAM results
capture.output(wei, file = 'weight-summary.txt')
summary(modeladd2$gam)
summary(modeladd2$lme)
#predictions from GAMM
pdat <- expand.grid(age=seq(20,39,1), group=c('preterm', 'vehicle'))
pred <- predict(modeladd2$gam, newdata = pdat, na.rm = T , type = 'response', se.fit = TRUE)
pred.frame <- data.frame(pdat, weight = pred$fit, se = pred$se.fit)
pred.frame$ci <- pred.frame$se*1.96
#plot model over data
ggplot(NULL, aes(x = age, y = weight, colour = group)) +
geom_point(data = df1) +
theme(panel.background = element_rect(fill = "white", colour = 'black'),
axis.line = element_line(size = 0.5, linetype = 'solid', colour = "black"),
axis.title = element_text(size=24, face='bold', colour = 'black'),
axis.text = element_text(size=20, face='bold', colour = 'black'), legend.position = 'none') +
xlim(20, 40) +
geom_line(data = pred.frame, size = 1) +
geom_ribbon (data = pred.frame, aes(ymin = weight + ci, ymax = weight - ci,
fill = group), alpha = 0.2, colour = NA) +
labs(x = 'Age (post-conception day)', y = 'Weight (g)')
ggsave("growth-curve.png", width = 15, height = 15, units = "cm")
#DEV modelling
#glmm
model3 <- gam(dev ~ s(age, by = group) + group #fixed effect age (with default spline) and group
, #random factors (all indluced, then sig checked and removed if over 0.05)
family = poisson(link = 'identity'), # continous - bounded at 30 - family = gamm?
na.action=na.omit, data = df1)
summary(model3)
modeladd3 <- gamm(dev ~ s(age, by = group) + group, #fixed effect age (with default spline) and group
random = list(ID = ~1), #random effect of ID (repeated measures)
family = poisson (link = 'sqrt'),
na.action = na.omit, data = df1)
windows (6,6); par (mfrow = c (2,2)) ## or pop them all in a single window
plot(modeladd3$gam) #plot shape of model
sresid3 <- modeladd3$gam$residuals # Extract the standardised residuals
hist(sresid3) #check normality
qqnorm(sresid3);qqline(sresid3) #check normality
fitted.glmm3 <- modeladd3$gam$fitted # Extract the fitted (predicted) values
plot(sresid3 ~ fitted.glmm3) # Check for homoscedasticity - looks ok
plot(predict(modeladd3$gam) ~ na.omit(df1$dev))
d <- summary(modeladd3$gam) #summary of GAM results
capture.output(d, file = 'dev-day-summary.txt')
summary(modeladd3$gam)
#predictions from GAMM
pdatdev <- expand.grid(age=seq(20,39,1), group=c('preterm', 'vehicle'))
preddev <- predict(modeladd3$gam, newdata = pdatdev, na.rm = T , type = 'response', se.fit = TRUE)
pred.frame.dev <- data.frame(pdatdev, dev = preddev$fit, se = preddev$se.fit)
pred.frame.dev$ci <- pred.frame.dev$se*1.96
#plot model over data
ggplot(NULL, aes(x = age, y = dev, colour = group)) +
geom_point(data = df1) +
theme(panel.background = element_rect(fill = "white", colour = 'black'),
axis.line = element_line(size = 0.5, linetype = 'solid', colour = "black"),
axis.title = element_text(size=24, face='bold', colour = 'black'),
axis.text = element_text(size=20, face='bold', colour = 'black'), legend.position = 'none') +
geom_line(data = pred.frame.dev, size = 1) +
geom_ribbon (data = pred.frame.dev, aes(ymin =dev + ci, ymax = dev - ci,
fill = group), alpha = 0.2, colour = NA) +
labs(x = 'Age (post-conception day)', y = 'Developmental day') +
xlim(20, 40)
ggsave("dev-curve.png", width = 15, height = 15, units = "cm")
#stop at PC35d?
df1 <- df1 %>%
mutate(dev_short = dev) %>%
mutate(dev_short = replace(dev_short, which (age > 35), NA))
modeladd3 <- gamm(dev_short ~ s(age, by = group) + group, #fixed effect age (with default spline) and group
random = list(ID = ~1), #random effect of ID (repeated measures)
family = poisson (link = 'sqrt'),
na.action = na.omit, data = df1)
summary(modeladd3$gam)
pdatdev <- expand.grid(age=seq(20,35,1), group=c('preterm', 'vehicle'))
preddev <- predict(modeladd3$gam, newdata = pdatdev, na.rm = T , type = 'response', se.fit = TRUE)
pred.frame.dev <- data.frame(pdatdev, dev_short = preddev$fit, se = preddev$se.fit)
pred.frame.dev$ci <- pred.frame.dev$se*1.96
#plot model over data
ggplot(NULL, aes(x = age, y = dev_short, colour = group)) +
geom_point(data = df1) +
theme(panel.background = element_rect(fill = "white", colour = 'black'),
axis.line = element_line(size = 0.5, linetype = 'solid', colour = "black"),
axis.title = element_text(size=24, face='bold', colour = 'black'),
axis.text = element_text(size=20, face='bold', colour = 'black'), legend.position = 'none') +
geom_line(data = pred.frame.dev, size = 1) +
geom_ribbon (data = pred.frame.dev, aes(ymin =dev_short + ci, ymax = dev_short - ci,
fill = group), alpha = 0.2, colour = NA) +
labs(x = 'Age (post-conception day)', y = 'Developmental day') +
xlim(20, 35)
ggsave("dev-curve-short.png", width = 15, height = 15, units = "cm")
| /weights.R | no_license | cx749/Thesis-statistic-in-R | R | false | false | 8,612 | r | ## load libraries
.libPaths("O:/paths") #set library path
library(nlme) # loading up the nlme library
library(lme4)
library(mgcv)
library(tidyverse)
##load data
df1<-read_csv("weight.csv") #load weight.csv
## tidy data
df1 <- df1%>%
mutate(weight = as.numeric(weight), litter_size = as.numeric(litter_size), dev = as.numeric(dev)) # weight and litter size to numeric
df1 <- df1 %>% arrange(dev) # arrange in order of dev column
df1 <- df1%>%
mutate(group = factor(group), ID = factor(ID), litter = factor(litter),
sex = factor(sex)) # variables to factor
df1 <- df1 %>%
na.omit(weight) # get rid of rows with na in weight column
## visualisation of raw data
#weight with age
ggplot(df1, aes(x = age,y = weight)) +
geom_point(aes(colour = group))
#dev day with age
ggplot(df1, aes(x = age,y = dev)) +
geom_point(aes(colour = group))
#############
#glmm
model2<- lmer(weight ~ age + group + # fixed effects of group and age
(age|litter/ID) + (age|litter_size), # random effects of ID (nested in group) and litter size(intercept and slope)
data = df1)
sresid2 <- resid(model2, type = "pearson") # Extract the standardised residuals
hist(sresid2) # check residuals for normality - look good
qqnorm(sresid2);qqline(sresid2) # alternative normality check - also looks good
plot(sresid2) # check redisuals - poor, model not accounting for all of variability
AIC(model2)
drop1(model2, ~., test="Chisq")
summary(model2)
df1 <- df1 %>%
mutate(pred = fitted(model2)) # get predicted values from model
#visualise model prediction
ggplot(df1, aes(x = age)) +
geom_point(aes(y=weight)) +
geom_line(aes(y = pred, colour=ID)) # prediction are linear and original data is not - model is poor fit
#not improved by removing effects
#########
## try a GAMM to account for non linearity of data
modeladd2 <- gam(weight~ s(age, by = group) + group + #fixed effect age (with default spline) and group
s(litter, bs = 're') + s(ID, bs = 're'), #random factors (all indluced, then sig checked and removed if over 0.05)
family = gaussian (link = 'identity'), # continous - bounded at 30 - family = gamm?
na.action=na.omit, data = df1)
summary(modeladd2)
a <- aov(weight ~ age + group, data = df1, na.action = na.omit)
summary(a)
modeladd2 <- gamm(weight ~ s(age, by = group) + group, #fixed effect age (with default spline) and group
random = list(ID = ~1, litter = ~1), #random effects (ID (repeated measures))
family = gaussian (link = 'identity'),
na.action=na.omit, data = df1)
windows (6,6); par (mfrow = c (2,2)) ## or pop them all in a single window
plot(modeladd2$gam) #plot shape of model
sresid1 <- modeladd2$gam$residuals # Extract the standardised residuals
hist(sresid1)
qqnorm(sresid1);qqline(sresid1)#check normality -#check normality - looks good#check normality - looks good
fitted.glmm2 <- modeladd2$gam$fitted # Extract the fitted (predicted) values
plot(sresid1 ~ fitted.glmm2) # Check for homoscedasticity - looks ok (improve??)
plot(predict(modeladd2$gam) ~ na.omit(df1$weight))
wei <- summary(modeladd2$gam) #summary of GAM results
capture.output(wei, file = 'weight-summary.txt')
summary(modeladd2$gam)
summary(modeladd2$lme)
#predictions from GAMM
pdat <- expand.grid(age=seq(20,39,1), group=c('preterm', 'vehicle'))
pred <- predict(modeladd2$gam, newdata = pdat, na.rm = T , type = 'response', se.fit = TRUE)
pred.frame <- data.frame(pdat, weight = pred$fit, se = pred$se.fit)
pred.frame$ci <- pred.frame$se*1.96
#plot model over data
ggplot(NULL, aes(x = age, y = weight, colour = group)) +
geom_point(data = df1) +
theme(panel.background = element_rect(fill = "white", colour = 'black'),
axis.line = element_line(size = 0.5, linetype = 'solid', colour = "black"),
axis.title = element_text(size=24, face='bold', colour = 'black'),
axis.text = element_text(size=20, face='bold', colour = 'black'), legend.position = 'none') +
xlim(20, 40) +
geom_line(data = pred.frame, size = 1) +
geom_ribbon (data = pred.frame, aes(ymin = weight + ci, ymax = weight - ci,
fill = group), alpha = 0.2, colour = NA) +
labs(x = 'Age (post-conception day)', y = 'Weight (g)')
ggsave("growth-curve.png", width = 15, height = 15, units = "cm")
#DEV modelling
#glmm
model3 <- gam(dev ~ s(age, by = group) + group #fixed effect age (with default spline) and group
, #random factors (all indluced, then sig checked and removed if over 0.05)
family = poisson(link = 'identity'), # continous - bounded at 30 - family = gamm?
na.action=na.omit, data = df1)
summary(model3)
modeladd3 <- gamm(dev ~ s(age, by = group) + group, #fixed effect age (with default spline) and group
random = list(ID = ~1), #random effect of ID (repeated measures)
family = poisson (link = 'sqrt'),
na.action = na.omit, data = df1)
windows (6,6); par (mfrow = c (2,2)) ## or pop them all in a single window
plot(modeladd3$gam) #plot shape of model
sresid3 <- modeladd3$gam$residuals # Extract the standardised residuals
hist(sresid3) #check normality
qqnorm(sresid3);qqline(sresid3) #check normality
fitted.glmm3 <- modeladd3$gam$fitted # Extract the fitted (predicted) values
plot(sresid3 ~ fitted.glmm3) # Check for homoscedasticity - looks ok
plot(predict(modeladd3$gam) ~ na.omit(df1$dev))
d <- summary(modeladd3$gam) #summary of GAM results
capture.output(d, file = 'dev-day-summary.txt')
summary(modeladd3$gam)
#predictions from GAMM
pdatdev <- expand.grid(age=seq(20,39,1), group=c('preterm', 'vehicle'))
preddev <- predict(modeladd3$gam, newdata = pdatdev, na.rm = T , type = 'response', se.fit = TRUE)
pred.frame.dev <- data.frame(pdatdev, dev = preddev$fit, se = preddev$se.fit)
pred.frame.dev$ci <- pred.frame.dev$se*1.96
#plot model over data
ggplot(NULL, aes(x = age, y = dev, colour = group)) +
geom_point(data = df1) +
theme(panel.background = element_rect(fill = "white", colour = 'black'),
axis.line = element_line(size = 0.5, linetype = 'solid', colour = "black"),
axis.title = element_text(size=24, face='bold', colour = 'black'),
axis.text = element_text(size=20, face='bold', colour = 'black'), legend.position = 'none') +
geom_line(data = pred.frame.dev, size = 1) +
geom_ribbon (data = pred.frame.dev, aes(ymin =dev + ci, ymax = dev - ci,
fill = group), alpha = 0.2, colour = NA) +
labs(x = 'Age (post-conception day)', y = 'Developmental day') +
xlim(20, 40)
ggsave("dev-curve.png", width = 15, height = 15, units = "cm")
#stop at PC35d?
df1 <- df1 %>%
mutate(dev_short = dev) %>%
mutate(dev_short = replace(dev_short, which (age > 35), NA))
modeladd3 <- gamm(dev_short ~ s(age, by = group) + group, #fixed effect age (with default spline) and group
random = list(ID = ~1), #random effect of ID (repeated measures)
family = poisson (link = 'sqrt'),
na.action = na.omit, data = df1)
summary(modeladd3$gam)
pdatdev <- expand.grid(age=seq(20,35,1), group=c('preterm', 'vehicle'))
preddev <- predict(modeladd3$gam, newdata = pdatdev, na.rm = T , type = 'response', se.fit = TRUE)
pred.frame.dev <- data.frame(pdatdev, dev_short = preddev$fit, se = preddev$se.fit)
pred.frame.dev$ci <- pred.frame.dev$se*1.96
#plot model over data
ggplot(NULL, aes(x = age, y = dev_short, colour = group)) +
geom_point(data = df1) +
theme(panel.background = element_rect(fill = "white", colour = 'black'),
axis.line = element_line(size = 0.5, linetype = 'solid', colour = "black"),
axis.title = element_text(size=24, face='bold', colour = 'black'),
axis.text = element_text(size=20, face='bold', colour = 'black'), legend.position = 'none') +
geom_line(data = pred.frame.dev, size = 1) +
geom_ribbon (data = pred.frame.dev, aes(ymin =dev_short + ci, ymax = dev_short - ci,
fill = group), alpha = 0.2, colour = NA) +
labs(x = 'Age (post-conception day)', y = 'Developmental day') +
xlim(20, 35)
ggsave("dev-curve-short.png", width = 15, height = 15, units = "cm")
|
#### Replication of LIBERATION TECHNOLOGY: MOBILE PHONES AND POLITICAL MOBILIZATION IN AFRICA for the Public Econ and Dev course (Fall 2020) | /code_1.R | no_license | akankshavardani/Public-Eco-and-Dev-Replication | R | false | false | 140 | r | #### Replication of LIBERATION TECHNOLOGY: MOBILE PHONES AND POLITICAL MOBILIZATION IN AFRICA for the Public Econ and Dev course (Fall 2020) |
#PAGE=448
y=c(1980,1981,1985)
y1=c(29.11,306.5,343.6)
y2=c(30.69,325.6,367.8)
y3=c(28.38,310.4,356.4)
y4=c(58410,520,1082)
y5=c(60360,558,1211)
y6=c(65320,567,1297)
a1=y3[1]/y1[1]
a1=a1*100
a1=round(a1,digits = 1)
a2=y3[2]/y1[2]
a2=a2*100
a2=round(a2,digits = 1)
a3=y3[3]/y1[3]
a3=a3*100
a3=round(a3,digits = 1)
a=c(a1,a2,a3)
a=median(a)
a
a1=(y1[1]+y2[1])/2
a1=y3[1]/a1
a1=a1*100
a1=round(a1,digits = 1)
a2=(y1[2]+y2[2])/2
a2=y3[2]/a2
a2=a2*100
a2=round(a2,digits = 1)
a3=(y1[3]+y2[3])/2
a3=y3[3]/a3
a3=a3*100
a3=round(a3,digits = 1)
a=c(a1,a2,a3)
a=median(a)
a
| /Schaum'S_Outline_Series_-_Theory_And_Problems_Of_Statistics_by_Murray_R._Spiegel/CH19/EX19.19.14/Ex19_19_14.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 606 | r | #PAGE=448
y=c(1980,1981,1985)
y1=c(29.11,306.5,343.6)
y2=c(30.69,325.6,367.8)
y3=c(28.38,310.4,356.4)
y4=c(58410,520,1082)
y5=c(60360,558,1211)
y6=c(65320,567,1297)
a1=y3[1]/y1[1]
a1=a1*100
a1=round(a1,digits = 1)
a2=y3[2]/y1[2]
a2=a2*100
a2=round(a2,digits = 1)
a3=y3[3]/y1[3]
a3=a3*100
a3=round(a3,digits = 1)
a=c(a1,a2,a3)
a=median(a)
a
a1=(y1[1]+y2[1])/2
a1=y3[1]/a1
a1=a1*100
a1=round(a1,digits = 1)
a2=(y1[2]+y2[2])/2
a2=y3[2]/a2
a2=a2*100
a2=round(a2,digits = 1)
a3=(y1[3]+y2[3])/2
a3=y3[3]/a3
a3=a3*100
a3=round(a3,digits = 1)
a=c(a1,a2,a3)
a=median(a)
a
|
################################################# Generalised template for the data preprocessing of Input files ##########################################################
################################################# Business Sources and the KPI list for the script ##########################################################################
#There are total 9 internal data source - Primary Sales,Secondary Sales,Secondary Stock,DRO,Penetration,TTS,BMI,Itrust,Sellout
# The base KPI list for Primary Sales {GSV,NIV,Sales Quantity(In PCS)}
# The base KPI list for Secondary Sales {Sec sales value,Sec sales Qty(PC)}
# The base KPI list for Secondary Stock {Sec stock value,Sec stock Qty(PC)}
# The base KPI list for DROO {Orginal order qty,Final expected Order Qty,Dispatch qty}
# The base KPI list for Penetration {Penetration_Category,Penetration}
# The base KPI list for TTS {TTS,BBT,BBT-Place,BBT-Place on invoice,BBT-Place off invoice,BBT-Price,CPP on invoice,CPP off invoice,BBT-Product,BBT-Pack,BBT-Proposition,BBT-Promotion,EOT}
# The base KPI list for BMI {Brand & Marketing Investment,Brand & Marketing Investment Trade,Brand & Marketing Investment Consumer,Promotion Team Cost Trade,Promotion Team Cost Consumer}-set1
# The base KPI list for BMI {Promotion Team Cost Promotion Packaging Material Cost Trade Promotion Packaging Material Cost Consumer,Promotion Communication Material Cost Trade,Promotion Communication Material Cost Consumer}-set2
# The base KPI list for BMI {Promotion Communication Material Cost ,Promo Agency Remuneration Fees & Commissions Consumer,Promotional Expenses,Promotion Packaging Material Cost,Promotion Repacking Cost,Promotion Repacking Cost Trade}-set3
# The base KPI list for BMI {Promotion Repacking Cost Consumer,Promo Samples Gifts and Incentive Costs,Promo SamplesGifts and Incentive Costs,Consumer,Promo Agency Remun Fees & Commissions,Promo Agency Remuneration Fees & Commissions Trade} -set
################################################## Start point of the generalised template script ##########################################################################
# The output values should not in scientific notation use the format(,scientific=F)
# Initiate the script time and calculate the script time for the generalised template , display it back to the user
# load the required packages into the r script , if the package is not present in the r-env it will install aand load the package into the environment
start.time <- Sys.time()
options(scipen=999)
options(digits=22)
packages.required <- c("dplyr","readxl","writexl","reshape","zoo","tidyr","stringr","tibble","data.table","bit64","readr","hablar","magrittr")
packages.diff <- setdiff(packages.required,rownames(installed.packages()))
if(length(packages.diff>1)){
install.packages(packages.diff,dependencies = T)
}
invisible(sapply(packages.required,library,character.only=T))
######## initiate the mapping path and the directory path for the script and load the input files ###############################
input.path <- "C:/Users/goura/OneDrive/Desktop/ID_Jarvis/Python ID KT/Input_files"
input.path.tts <- "C:/Users/goura/OneDrive/Desktop/ID_Jarvis/Python ID KT/Input_files/TTS_xlsx"
directory.path <- "C:/Users/goura/OneDrive/Desktop/UL_R_Projects/ID_Jarvis_Bango_1.1.0/ID_Jarvis_Bango_1.1.0"
output.path <- "C:/Users/goura/OneDrive/Desktop/ID_Jarvis/Jarvis_R/Output_Folder"
mapping.path <- "C:/Users/goura/OneDrive/Desktop/ID_Jarvis/Python ID KT/Mapping file/Jarvis ID- Universal Mapping.xlsx"
product.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Material",col_names = T,guess_max = 100))
customer.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Customer",col_names = T,guess_max = 100))
conversion.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet ="Conversion",col_names = T,guess_max = 100 ))
banner.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Banner",col_names = T))
banner.mapping[which((banner.mapping$`Banner Desc`=="Not assigned")),"Banner code"] <- "#";rownames(banner.mapping) <- NULL
banner.mapping$`Banner code` <- as.character(banner.mapping$`Banner code`)
week.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Week",col_names = T,guess_max = 100))
month.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Month",col_names = T,guess_max = 100))
unmapped.tts.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Unmapped TTS",col_names = T,guess_max = 100))
unmapped.bmi.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Unmapped BMI",col_names = T,guess_max = 100))
penetration.customer.region.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Penetration Customer mapping",col_names = T,guess_max = 100))
psales.material.mt.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Material_PS_Sellout",col_names = T,guess_max = 100))
sellout.material.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Sellout Material",col_names = T,guess_max = 100))
sellout.customer.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Sell out Customer",col_names = T,guess_max = 100))
subrand.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Sub brands",col_names = T,guess_max = 100))
###################################################################### Data Preprocessing User Defined Functions ##################################################
#write the function to convert the extracted input file into a dataframe and also print the structure and dimension of the table
input_clean <- function(extracted_df){
extracted_df <- extracted_df[,5:ncol(extracted_df)]
colnames(extracted_df) <- as.character(unlist(extracted_df[1,]))
clean_df <- extracted_df[-1,]
print(str(clean_df))
dim(clean_df)
return(clean_df)
}
#write the function to perform the customised mapping between the input files and the mapping file provided by the business
mapping <- function(input,map,map_key1,map_key2){
mapped_bango_df <- merge.data.frame(x = input,y = map,by.x =map_key1,by.y = map_key2,all.x = T)
return(mapped_bango_df)
}
#write the function which will transform the penetration input data into tidy dataset
penetration_transpose <- function(input_df){
penetration.long <- tidyr::gather(input_df,Month_Year,penetration_value,2:ncol(input_df))
penetration.long[,1] <- penetration.long[,2]
penetration.long <- penetration.long[c(1,3)]
penetration <- penetration.long
return(penetration)
}
#write a function which will return the required packgroup dataframe
packgroup_selectdf <- function(input_packgroup_df){
#filter out the observations for which the requiered packgroup is !=0 and the DT
output_packgroup_df <- input_packgroup_df%>%
filter(`REQUIRED PACKGROUP`!=0)%>%
filter(str_detect(`Account/Region`,"DT"))%>%
filter(`Account/Region`!=0)%>%
select(-c(`Material Desc.y`:`BASEPACK NO`,`PACKGROUP NAME`:`Sub channel`,National:UMREN,DIVCD:ncol(.)))
return(output_packgroup_df)
}
#write a function which will perform the data preprocessing for the region : other Island (oi) penetration data
penetration_oi_format <- function(input_penetration_oi){
colnames(input_penetration_oi) <- input_penetration_oi[1,]
input_penetration_oi <- input_penetration_oi[-1,]
input_penetration_oi%<>%slice(1)
input_penetration_oi$"Account/Region" <- "DT OI"
input_penetration_oi <- input_penetration_oi[2:ncol(input_penetration_oi)]
input_penetration_oi %<>% select(c(`Account/Region`,everything()))
input_penetration_oi<- gather(input_penetration_oi,key = "Month", "Penetration_Region",2:ncol(input_penetration_oi))
return(input_penetration_oi)
}
#write a function which will perform the data preprocessing for the 3 region : Central & East Java (CEJ) ; Sumatera ; West Jave (WJ)
penetration_3regions_format <- function(input_penetration_3region){
mapped.3region <- merge.data.frame(input_penetration_3region,penetration.customer.region.mapping,by = "Local Sales Force 2(m.d.)\\Calendar Year/Month")
mapped.3region <- mapped.3region[which(mapped.3region$`Account/Region`!=0),]
mapped.3region%<>%select(-c(`Local Sales Force 2(m.d.)\\Calendar Year/Month`))
mapped.3region%<>%select(c(`Account/Region`,everything()))
mapped.3region <- gather(mapped.3region,key = "Month","Penetration_Region",2:ncol(mapped.3region))
return(mapped.3region)
}
#Write a function which will calculate the penetration percentage (%)
penetration_percentage <- function(penetration_final_df){
penetration_final_df%<>%hablar::convert(num(Penetration_Region,Penetration_Category))
penetration.region.final <- penetration_final_df%>%
mutate(penetration_percentage = Penetration_Region/Penetration_Category*100)
return(penetration.region.final)
}
#write a function which will return the final penetration output data frame
penetration_packgroup_output <- function(penetration_final_df){
packgroup.month.mapping <- month.mapping%>%select(c(`Sec stock_DROO_Penetration`,Month))
penetration_final_df <- merge.data.frame(penetration_final_df,packgroup.month.mapping,by.x = "Month",by.y = "Sec stock_DROO_Penetration")
penetration_final_df%<>%select(-Month)
penetration_final_df <- penetration_final_df%>%
select(c(Month.y,`Account/Region`:penetration_percentage))
penetration_final_df <- dplyr::rename(penetration_final_df,Month=Month.y)
return(penetration_final_df)
}
sellout_preprocessing <- function(input_sellout_data1,input_sellout_data2,mapping_set1=month.mapping,mapping_Set=sellout.customer.mapping){
input_sellout_data1[3,]<-NA
for(i in 1:ncol(input_sellout_data1)){
input_sellout_data1[3,i]<-paste(input_sellout_data1[1,i], input_sellout_data1[2,i],sep="-")
}
input_sellout_data1 <- input_sellout_data1[3,]
input_sellout_data2.split1 <- input_sellout_data2[,1:5]
colnames(input_sellout_data2.split1) <- as.character(unlist(input_sellout_data2.split1[1,]))
input_sellout_data2.split2 <- input_sellout_data2[6:ncol(input_sellout_data2)]
colnames(input_sellout_data2.split2) <- input_sellout_data1
output_sellout_data <- cbind(input_sellout_data2.split1,input_sellout_data2.split2)
output_sellout_data <- output_sellout_data[-1,]
output_sellout_data <- output_sellout_data[, -grep("NA", colnames(output_sellout_data))]
output_sellout_data <- output_sellout_data%>%
tidyr::gather(.,Sellout_Type,Sellout_Value,6:ncol(.))
output_sellout_data <- output_sellout_data%>%
filter(str_detect(Sellout_Type,"WITHOUT"))
output_sellout_data <- output_sellout_data%>%
tidyr::separate(.,col = Sellout_Type,into = c("Sellout_Type","Month"),sep = "-",remove = T)
output_sellout_data$New_Date <-as.Date(as.numeric(output_sellout_data$Month),origin="1899-12-30")
output_sellout_data <- output_sellout_data%>%
hablar::convert(chr(New_Date))
output_sellout_data$month <- as.integer(substr(output_sellout_data$New_Date,6,7))
output_sellout_data$year <- as.integer(substr(output_sellout_data$New_Date,1,4))
output_sellout_data$Sellout <- format(as.Date(paste0(output_sellout_data$month,"-1-",output_sellout_data$year), "%m-%d-%Y"),"%m.%Y")
output_sellout_data <- output_sellout_data%>%
select(-c(Month,New_Date:year))
output_sellout_data_mapped <- merge.data.frame(output_sellout_data,month.mapping,by.x = "Sellout",by.y = "Sellout")
output_sellout_data_mapped <- output_sellout_data_mapped%>%
select(-c(Sellout,`Primary sales`:TTS_BMI))
output_sellout_data_mapped%<>%select(c(Month,MAP:Sellout_Value))
output_sellout_data_mapped <-merge.data.frame(x = output_sellout_data_mapped,y = sellout.customer.mapping,by.x = "Customer",by.y = "Customer")
output_sellout_data_mapped <- output_sellout_data_mapped%>%
select(-Customer)
output_sellout_data_mapped <- output_sellout_data_mapped%>%
select(c(Month,Customer = `Customer Mapped`,MAP:Sellout_Value))
return(output_sellout_data_mapped)
}
################################################################ Data Preprocessing ( Input psales) ######################################################
#fetch the list of all psales files and apply the functions to convert it into tidy dataset
psales.files <- list.files(path = input.path,pattern = "^ID_Food_Primary Sales GSV(.*)xlsx|XLSX$")
psales.list <- invisible(lapply(paste0(input.path,"/",psales.files),FUN = read_xlsx,sheet=1,skip=13,guess_max=1000))
names(psales.list) <- basename(psales.files)
cleaned.psales.list <- invisible(lapply(psales.list, input_clean))
cleaned.psales.dfs.list <- lapply(cleaned.psales.list,data.frame)
input.psales.df <- bind_rows(cleaned.psales.dfs.list)
psales.col.headers <- c("Fiscal year/period","Sales Organization","Business Type","Category(m.d.) from SKU","Market(m.d.) from SKU",
"Sector(m.d.) from SKU","Brand(PH)(m.d.) from SKU","Material","Material Desc","Customer","Customer Desc",
"Gross Sales Value (GSV)","Sales Quantity","NIV")
colnames(input.psales.df) <- psales.col.headers
id.psales.header <- gsub("[\r\n]","",colnames(input.psales.df))
colnames(input.psales.df) <- id.psales.header
#convert the KPI data type from char-> numberic for the calculation purpose
input.psales.df%<>%convert(num(`Gross Sales Value (GSV)`,`Sales Quantity`,NIV))
#setnames for the default Material and customer columns names into Material Code and Customer Code respectively
input.psales.df <- input.psales.df%<>%dplyr::rename(Material_Code=Material,Customer_Code=Customer)
#extract the 8 digit code from the exisiting customer code and store it in the customer 8 digit code variable
input.psales.df <- separate(data = input.psales.df,"Customer_Code",c("a","b","c","Customer_Code_8D"),sep = "/",remove = F)
input.psales.df%<>%dplyr::select(-c("a","b","c"))
#filter out the observations for which the brand is "Bango"
brand.bango <- c("BANGO","Bango")
id.psales.bango <- input.psales.df%>%
filter(`Brand(PH)(m.d.) from SKU`%in% brand.bango)
#replace the NA's in the KPI's with 0 as confirmed by business
psales.kpi.list <- c("Gross Sales Value (GSV)","Sales Quantity","NIV")
for (kpi in psales.kpi.list) {
id.psales.bango[which(is.na(id.psales.bango[,kpi])),kpi] <- 0
}
#id.psales.bango[which(is.na(id.psales.bango$`Gross Sales Value (GSV)`)),"Gross Sales Value (GSV)"] <- 0
#id.psales.bango[which(is.na(id.psales.bango$`Sales Quantity`)),"Sales Quantity"] <- 0
#id.psales.bango[which(is.na(id.psales.bango$NIV)),"NIV"] <- 0
#call the product mapping , customer mapping and conversion mapping functions to find the mapped observations
bango.ps.pm <- mapping(id.psales.bango,product.mapping,map_key1 = "Material_Code",map_key2 = "Material Code")
bango.ps.cm <- mapping(id.psales.bango,customer.mapping,map_key1 = "Customer_Code_8D",map_key2 = "Customer Code")
bango.ps.convm <- mapping(id.psales.bango,conversion.mapping,map_key1 = "Material_Code",map_key2 = "MATNR")
#find out the unmapped observations for the product , customer and conversion mapping and store it in
bango.ps.npm <- unique(bango.ps.pm[which(is.na(bango.ps.pm$`Mapped Product Name`)),"Material_Code"])
bango.ps.ncm <- unique(bango.ps.pm[which(is.na(bango.ps.cm$Channel)),c("Customer_Code","Customer Desc")])
bango.ps.nconvm <- unique(bango.ps.convm[which(is.na(bango.ps.convm$Conversion)),"Material_Code"])
if(length(bango.ps.npm)>0){
write.csv(bango.ps.npm,file = paste0(output.path,"/psmaterial_unmapped.csv"),row.names = F)
}
if(length(bango.ps.ncm)>0){
write.csv(bango.ps.ncm,file = paste0(output.path,"/pscustomer_unmapped.csv"),row.names = F)
}
if(length(bango.ps.nconvm)>0){
write.csv(bango.ps.nconvm,file = paste0(output.path,"/psconversion_unmapped.csv"),row.names = F)
}
############################################################## Data Preprocessing ( Input ssales) #####################################################
ssales.files <- list.files(path = input.path,pattern = "^ID_Food_Secondary Sales IP(.*)xlsx|XLSX$")
ssales.list <- invisible(lapply(paste0(input.path,"/",ssales.files),FUN = read_xlsx,sheet=1,skip=13,guess_max=1000))
names(ssales.list) <- basename(ssales.files)
cleaned.ssales.list <- invisible(lapply(ssales.list, input_clean))
cleaned.ssales.dfs.list <- lapply(cleaned.ssales.list,data.frame)
input.ssales.df <- bind_rows(cleaned.ssales.dfs.list)
ssales.col.headers <- c("Calendar Year/Month","Sales Organization","Business Type","Category(m.d.)","Market(m.d.)","Sector(m.d.)",
"Brand(PH)(m.d.)","Material","Material Desc","Distributor","Distributor Desc","Gross Sec Sales (TUR)","Sec Volume")
colnames(input.ssales.df) <- ssales.col.headers
id.ssales.header <- gsub("[\r\n]","",colnames(input.ssales.df))
colnames(input.ssales.df) <- id.ssales.header
#convert the KPI data type from char-> numberic for the calculation purpose
input.ssales.df%<>%convert(num(`Gross Sec Sales (TUR)`,`Sec Volume`))
#setnames for the default Material and customer columns names into Material Code and Customer Code respectively
input.ssales.df <- input.ssales.df%<>%dplyr::rename(Material_Code=Material,Distributor_Code=Distributor)
#extract the 8 digit code from the exisiting customer code and store it in the customer 8 digit code variable
input.ssales.df <- separate(data = input.ssales.df,"Distributor_Code",c("a","b","c","Distributor_Code_8D"),sep = "/",remove = F)
input.ssales.df%<>%dplyr::select(-c("a","b","c"))
#Filter out the observations for the brand Bango
brand.bango <- c("BANGO","Bango")
id.ssales.bango <- input.ssales.df%>%
filter(`Brand(PH)(m.d.)`%in% brand.bango)
#store the KPI's in the KPI list
ssales.kpi.list <- c("Gross Sec Sales (TUR)","Sec Volume")
for (kpi in ssales.kpi.list) {
id.ssales.bango[which(is.na(id.ssales.bango[,kpi])),kpi] <- 0
}
#call the product mapping , customer mapping and conversion mapping functions to find the mapped observations
bango.ssales.pm <- mapping(id.ssales.bango,product.mapping,map_key1 = "Material_Code",map_key2 = "Material Code")
bango.ssales.cm <- mapping(id.ssales.bango,customer.mapping,map_key1 = "Distributor_Code_8D",map_key2 = "Customer Code")
bango.ssales.convm <- mapping(id.ssales.bango,conversion.mapping,map_key1 = "Material_Code",map_key2 = "MATNR")
#find out the unmapped observations for the product , customer and conversion mapping and store it in
bango.ss.npm <- unique(bango.ssales.pm[which(is.na(bango.ssales.pm$`Mapped Product Name`)),"Material_Code"])
bango.ss.ncm <- unique(bango.ssales.cm[which(is.na(bango.ssales.cm$Channel)),c("Distributor_Code","Distributor Desc")])
bango.ss.nconvm <- unique(bango.ssales.convm[which(is.na(bango.ssales.convm$Conversion)),"Material_Code"])
if(length(bango.ss.npm)>0){
write.csv(bango.ss.npm,file = paste0(output.path,"/ssmaterial_unmapped.csv"),row.names = F)
}
if(length(bango.ss.ncm)>0){
write.csv(bango.ss.ncm,file = paste0(output.path,"/sscustomer_unmapped.csv"),row.names = F)
}
if(length(bango.ss.nconvm)>0){
write.csv(bango.ss.nconvm,file = paste0(output.path,"/ssconversion_unmapped.csv"),row.names = F)
}
############################################################## Data Preprocessing (Input SStock) #################################################################
sstock.files <- list.files(path = input.path,pattern = "^ID_Food_Secondary Stock IP(.*)xlsx|XLSX$")
sstock.list <- invisible(lapply(paste0(input.path,"/",sstock.files),FUN = read_xlsx,sheet=1,skip=13,guess_max=1000))
names(sstock.list) <- basename(sstock.files)
cleaned.sstock.list <- invisible(lapply(sstock.list, input_clean))
cleaned.sstock.dfs.list <- lapply(cleaned.sstock.list,data.frame)
input.sstock.df <- bind_rows(cleaned.sstock.dfs.list)
sstock.col.headers <- c("Calendar Year/Month","Sales Organization","Category(m.d.)","Market(m.d.)","Sector(m.d.)","Brand(PH)(m.d.)","Material","Material Desc","Sold-to party","STP Desc","Secondary Stock Volume","Secondary Stock Value [@ DT Rate.]")
colnames(input.sstock.df) <- sstock.col.headers
id.sstock.header <- gsub("[\r\n]","",colnames(input.sstock.df))
colnames(input.sstock.df) <- id.sstock.header
#convert the KPI data type from char-> numberic for the calculation purpose
input.sstock.df%<>%convert(num(`Secondary Stock Volume`,`Secondary Stock Value [@ DT Rate.]`))
#setnames for the default Material and customer columns names into Material Code and Customer Code respectively
input.sstock.df <- input.sstock.df%<>%dplyr::rename(Material_Code=Material,STP_Code=`Sold-to party`)
#Filter out the observations for the brand Bango
brand.bango <- c("BANGO","Bango")
id.sstock.bango <- input.sstock.df%>%
filter(`Brand(PH)(m.d.)`%in% brand.bango)
#store the KPI's in the KPI list
sstock.kpi.list <- c("Secondary Stock Volume","Secondary Stock Value [@ DT Rate.]")
for (kpi in sstock.kpi.list) {
id.sstock.bango[which(is.na(id.sstock.bango[,kpi])),kpi] <- 0
}
#call the product mapping , customer mapping and conversion mapping functions to find the mapped observations
bango.sstock.pm <- mapping(id.sstock.bango,product.mapping,map_key1 = "Material_Code",map_key2 = "Material Code")
bango.sstock.cm <- mapping(id.sstock.bango,customer.mapping,map_key1 = "STP_Code",map_key2 = "Customer Code")
bango.sstock.convm <- mapping(id.sstock.bango,conversion.mapping,map_key1 = "Material_Code",map_key2 = "MATNR")
#find out the unmapped observations for the product , customer and conversion mapping and store it in
bango.sstock.npm <- unique(bango.sstock.pm[which(is.na(bango.sstock.pm$`Mapped Product Name`)),"Material_Code"])
bango.sstock.ncm <- unique(bango.sstock.cm[which(is.na(bango.sstock.cm$Channel)),c("STP_Code","STP Desc")])
bango.sstock.nconvm <- unique(bango.sstock.convm[which(is.na(bango.sstock.convm$Conversion)),"Material_Code"])
if(length(bango.sstock.npm)>0){
write.csv(bango.sstock.npm,file = paste0(output.path,"/sstock_material_unmapped.csv"),row.names = F)
}
if(length(bango.sstock.ncm)>0){
write.csv(bango.sstock.ncm,file = paste0(output.path,"/sstock_customer_unmapped.csv"),row.names = F)
}
if(length(bango.sstock.nconvm)>0){
write.csv(bango.sstock.nconvm,file = paste0(output.path,"/sstock_conversion_unmapped.csv"),row.names = F)
}
#perform the mapping for the observations from the extracted files with the product , customer and conversion mapping
sstock.bango <- merge.data.frame(x = id.sstock.bango,y =product.mapping,by.x ="Material_Code",by.y = "Material Code")%>%
merge.data.frame(x = .,y = customer.mapping,by.x = "STP_Code",by.y ="Customer Code")%>%
merge.data.frame(x = .,y = conversion.mapping,by.x = "Material_Code",by.y = "MATNR")
######################################################################## Data Preprocessing (Input DROO) ##########################################################333
droo.files <- list.files(path = input.path,pattern = "^ID_Food_DROO IP_(.*)xlsx|XLSX$")
droo.list <- invisible(lapply(paste0(input.path,"/",droo.files),FUN = read_xlsx,sheet=1,skip=13,guess_max=1000))
names(droo.list) <- basename(droo.files)
cleaned.droo.list <- invisible(lapply(droo.list, input_clean))
cleaned.droo.dfs.list <- lapply(cleaned.droo.list,data.frame)
input.droo.df <- bind_rows(cleaned.droo.dfs.list)
droo.col.headers <- c("Calendar Year/Month","Sales Organization","Business Type","Category(m.d.)","Market(m.d.)","Sector(m.d.)","Brand(PH)(m.d.)","Material","Material Desc","Sold-to party","STP Desc","OriginalOrder Qty","Final Customer Expected Order Qty","Dispatched Qty")
id.droo.header <- gsub("[\r\n]","",colnames(input.droo.df))
colnames(input.droo.df) <- id.droo.header
colnames(input.droo.df) <- droo.col.headers
#convert the KPI data type from char-> numberic for the calculation purpose
input.droo.df%<>%convert(num(`OriginalOrder Qty`,`Final Customer Expected Order Qty`,`Dispatched Qty`))
#setnames for the default Material and customer columns names into Material Code and Customer Code respectively
input.droo.df <- input.droo.df%<>%dplyr::rename(Material_Code=Material,STP_Code=`Sold-to party`)
#Filter out the observations for the brand Bango
brand.bango <- c("BANGO","Bango")
id.droo.bango <- input.droo.df%>%
filter(`Brand(PH)(m.d.)`%in% brand.bango)
#store the KPI's in the KPI list
droo.kpi.list <- c("OriginalOrder Qty","Final Customer Expected Order Qty","Dispatched Qty")
for (kpi in droo.kpi.list) {
id.droo.bango[which(is.na(id.droo.bango[,kpi])),kpi] <- 0
}
#call the product mapping , customer mapping and conversion mapping functions to find the mapped observations
bango.droo.pm <- mapping(id.droo.bango,product.mapping,map_key1 = "Material_Code",map_key2 = "Material Code")
bango.droo.cm <- mapping(id.droo.bango,customer.mapping,map_key1 = "STP_Code",map_key2 = "Customer Code")
bango.droo.convm <- mapping(id.droo.bango,conversion.mapping,map_key1 = "Material_Code",map_key2 = "MATNR")
#find out the unmapped observations for the product , customer and conversion mapping and store it in
bango.droo.npm <- unique(bango.droo.pm[which(is.na(bango.droo.pm$`Mapped Product Name`)),"Material_Code"])
bango.droo.ncm <- unique(bango.droo.cm[which(is.na(bango.droo.cm$Channel)),c("STP_Code","STP Desc")])
bango.droo.nconvm <- unique(bango.droo.convm[which(is.na(bango.droo.convm$Conversion)),"Material_Code"])
if(length(bango.droo.npm)>0){
write.csv(bango.droo.npm,file = paste0(output.path,"/droo_material_unmapped.csv"),row.names = F)
}
if(length(bango.sstock.ncm)>0){
write.csv(bango.droo.ncm,file = paste0(output.path,"/droo_customer_unmapped.csv"),row.names = F)
}
if(length(bango.droo.nconvm)>0){
write.csv(bango.droo.nconvm,file = paste0(output.path,"/droo_conversion_unmapped.csv"),row.names = F)
}
#perform the mapping for the observations from the extracted files with the product , customer and conversion mapping
droo.bango <- merge.data.frame(x = id.droo.bango,y =product.mapping,by.x ="Material_Code",by.y = "Material Code")%>%
merge.data.frame(x = .,y = customer.mapping,by.x = "STP_Code",by.y ="Customer Code")%>%
merge.data.frame(x = .,y = conversion.mapping,by.x = "Material_Code",by.y = "MATNR")
############################################################# (Itrust) ######################################################
input.itrust.path <- paste0(input.path,"/Itrust_2018 till wk 48.csv")
itrust.input <- tibble::as_tibble(read.csv(input.itrust.path,header = T))
if (!"YEAR" %in% colnames(itrust.input)) {
itrust.input[,"YEAR"] <- substr(basename(input.itrust.path),8,12)
}
itrust.input[,"ITRUST_LINE"]<- NA
itrust.input[,"ITRUST_TOTAL"] <- NA
itrust.input[which(itrust.input$FINAL_CRR==0),"ITRUST_LINE"] <- 0
itrust.input$ITRUST_LINE <-ifelse(itrust.input$FINAL_CRR==0,0,ifelse(itrust.input$STOCK_CS>itrust.input$FINAL_CRR,1,0))
itrust.input$ITRUST_TOTAL <- ifelse(itrust.input$FINAL_CRR==0,0,1)
itrust.input <- itrust.input%>%
tidyr::unite(col = "Week_Concat",c("WK","YEAR"),sep=".",remove=F)
itrust.input$Week_Concat <- stringr::str_trim(itrust.input$Week_Concat,side = "both")
#perform the product and customer mapping for the itrust observations
itrust.bango.pm <- mapping(itrust.input,product.mapping,map_key1 ="PROD_CODE",map_key2 = "Material Code")
itrust.bango.cm <- mapping(itrust.input,customer.mapping,map_key1 ="CUST_CODE",map_key2 = "Customer Code")
itrust.bango.npm <- unique(itrust.bango.pm[which(is.na(itrust.bango.pm$BRAND)),"PROD_CODE"])
itrust.bango.ncm <- unique(itrust.bango.cm[which(is.na(itrust.bango.cm$Channel)),"CUST_CODE"])
if(length(itrust.bango.npm)>0){
write.csv(itrust.bango.npm,file = paste0(output.path,"/itrustmaterial_unmapped.csv"),row.names = F)
}
if(length(itrust.bango.ncm)>0){
write.csv(itrust.bango.ncm,file = paste0(output.path,"/itrustcustomer_unmapped.csv"),row.names = F)
}
itrust.input$Week_Concat <- stringr::str_trim(itrust.input$Week_Concat)
#################################################################### TTS (Mapped)###############################################
tts.files <- list.files(path = input.path.tts,pattern = "^ID_Food_Primary Sales TTS IP(.*)xlsx|XLSX$")
tts.list <- invisible(lapply(paste0(input.path.tts,"/",tts.files),FUN = read_xlsx,sheet=1,skip=13,guess_max=1000))
names(tts.list) <- basename(tts.files)
cleaned.tts.list <- invisible(lapply(tts.list, input_clean))
cleaned.tts.dfs.list <- lapply(cleaned.tts.list,data.frame)
input.tts.df <- bind_rows(cleaned.tts.dfs.list)
tts.col.headers <- c("Fiscal year/period","Sales Organization",NA,"Category(m.d.) from SKU","Sector(m.d.) from SKU","Market(m.d.) from SKU",
"Brand(m.d.)","Material",NA,"Banner(m.d.)",NA,"Banner",NA,"TTS","BBT","BBT - Place","BBT - Place on invoice","BBT - Place off invoice",
"BBT - Price","CPP on invoice","CPP off invoice","BBT - Product","BBT - Pack","BBT - Proposition","BBT - Promotion","EOT")
colnames(input.tts.df) <- tts.col.headers
id.tts.header <- gsub("[\r\n]","",colnames(input.tts.df))
colnames(input.tts.df) <- id.tts.header
input.tts.headers <- colnames(input.tts.df)
input.tts.headers[(which(is.na(input.tts.headers)))] <- c("Business_Type","Material_Description","Banner(m.d)_Description","Banner_Description")
colnames(input.tts.df) <- input.tts.headers
#convert the KPI list flrom character to numeric
mapped.tts.material <- mapping(input = input.tts.df,map = product.mapping,map_key1 ="Material",map_key2 = "Material Code" )
#filter out the observations for which the brand is BANGO
id.tts.mapped.bango <- mapped.tts.material%>%
filter(BRAND =="BANGO")
#convert the KPI list flrom character to numeric
id.tts.mapped.bango[,14:26] <- sapply(id.tts.mapped.bango[,14:26],as.numeric)
#remove the unrequired columns from the data frame
id.tts.mapped.bango1 <-id.tts.mapped.bango%>%
select(-c(27:32,34:ncol(id.tts.mapped.bango)))
#perform the banner mapping to fetch the channel for the mapped banners
id.tts.channel.mapping <- mapping(input=id.tts.mapped.bango1,map =banner.mapping, map_key1 = "Banner",map_key2 = "Banner code")
id.ttschannel.na <- unique(id.tts.channel.mapping[which(is.na(id.tts.channel.mapping$Channel)),"Banner"])
id.tts.channel.mapping <- id.tts.channel.mapping%>%
select(1:13,27:32,14:26)
for (i in 20:ncol(id.tts.channel.mapping)){
id.tts.channel.mapping[which(is.na(id.tts.channel.mapping[,i])),i] <- 0
}
########################################################################### TTS (Unmapped)########################################################
input.tts.unmapped.path <- paste0(input.path,"/TTS Bango-Unmapped.xlsx")
tts.unmapped.input <- as_tibble(read_xlsx(paste0(input.tts.unmapped.path),sheet = 1,guess_max = 1000,skip = 12))
unmapped.tts.input <- input_clean(tts.unmapped.input)
input.unmapped.tts.headers <- gsub("[\r\n]","",colnames(unmapped.tts.input))
colnames(unmapped.tts.input) <- input.unmapped.tts.headers
colnames(unmapped.tts.input)[which(is.na(colnames(unmapped.tts.input)))] <- "Unnammed_cols"
colnames(unmapped.tts.input)[which(colnames(unmapped.tts.input)=="Unnammed_cols")] <-c ("Business_Type","Material_Description","Banner_Name","TFL_Channel","LSF2_Description","LSF3_Description")
#perform the material mapping to fetch the Brands
unmapped.material.tts.mapping <- merge.data.frame(x = unmapped.tts.input,y = product.mapping,by.x ="Material",by.y= "Material Code",all.y = T)
unmapped.material.tts.mapping1 <- unmapped.material.tts.mapping%>%
select(-c(32:37,39:42))
unmapped.material.tts.mapping1 <- unmapped.material.tts.mapping1[,c(32,1:ncol(unmapped.material.tts.mapping1)-1)]
id.tts.unmapped.df <- unmapped.material.tts.mapping1
#filter out the rows for which the Brand_Name is Bango
#convert the NA's from the KPI columns to 0 as mentioned by business
#for (i in 20:ncol(id.tts.unmapped.df)) {
#id.tts.unmapped.df[which(is.na(id.tts.unmapped.df[,i])),i] <- 0}
#create the key for the channel mapping and map the channels for the same
id.tts.unmapped.df <- id.tts.unmapped.df%>%
unite(col = "key",c("Banner","Trade Format Level 2","Local Sales Force 2(m.d.)","Local Sales Force 3(m.d.)","Key Customer Level3"),sep="",remove = F)
id.tts.unmapped.channel.mapping <- merge.data.frame(id.tts.unmapped.df,unmapped.tts.mapping,by.x = "key",by.y = "Key")
id.tts.unmapped.bango <- id.tts.unmapped.channel.mapping[,-c(1,37:41)]
x <- unique(id.tts.unmapped.bango[which(is.na(id.tts.unmapped.bango$Channel)),"Material"])
write.csv(x = x,file = paste0(output.path,"/unmapped.tts.nochannel.csv"))
id.tts.unmapped.rna <- id.tts.unmapped.bango[which(!is.na(id.tts.unmapped.bango$Channel)),]
id.tts.unmapped.rna1 <- id.tts.unmapped.rna[,c(3:9,2,10,1,11:19,33:35,20:32)]
id.tts.unmapped.rna1[23:35] <- sapply(id.tts.unmapped.rna1[23:35],as.numeric)
#################################################################################### BMI (Mapped)###########################################################
bmi.files <- list.files(path = input.path,pattern = "^ID_Food_Primary Sales BMI(.*)xlsx|XLSX$")
bmi.list <- invisible(lapply(paste0(input.path,"/",bmi.files),FUN = read_xlsx,sheet=1,skip=13,guess_max=1000))
names(bmi.list) <- basename(bmi.files)
cleaned.bmi.list <- invisible(lapply(bmi.list, input_clean))
cleaned.bmi.dfs.list <- lapply(cleaned.bmi.list,data.frame)
input.bmi.df <- bind_rows(cleaned.bmi.dfs.list)
id.bmi.mapped.df1 <- input.bmi.df[,1:20]
id.bmi.mapped.df2 <- input.bmi.df[,21:ncol(input.bmi.df)]
colnames(id.bmi.mapped.df1) <- as.character(unlist(id.bmi.mapped.df1[1,]))
id.bmi.mapped.df1 <- id.bmi.mapped.df1[-1,]
id.bmi.mapped.df2 <- id.bmi.mapped.df2[-1,]
id.bmi.mapped.df <- cbind(id.bmi.mapped.df1,id.bmi.mapped.df2)
rownames(id.bmi.mapped.df) <- NULL
id.bmi.mapped.df[,21:ncol(id.bmi.mapped.df)] <- sapply(id.bmi.mapped.df[,21:ncol(id.bmi.mapped.df)],as.numeric)
colnames(id.bmi.mapped.df)[which(is.na(colnames(id.bmi.mapped.df)))] <- "Unnammed_cols"
bmi.bango.mapped.headers <- gsub("[\r\n]","",colnames(id.bmi.mapped.df))
colnames(id.bmi.mapped.df) <- bmi.bango.mapped.headers
for (i in 21:ncol(id.bmi.mapped.df)) {
id.bmi.mapped.df[which(is.na(id.bmi.mapped.df[,i])),i] <- 0
}
#rename the headers as per the convention
colnames(id.bmi.mapped.df)[colnames(id.bmi.mapped.df)=="Brands"] <- "Brand_Code"
names(id.bmi.mapped.df)[16] <- "Brand"
bango.names <- c("Bango","BANGO")
#filter out the brand Bango rows from the input file
bmi.mapped.bango <- subset(x = id.bmi.mapped.df,subset = Brand=="Bango")
bmi.bango.mapped <- merge.data.frame(x = bmi.mapped.bango,y =banner.mapping,by.x = "Banner",by.y = "Banner code",all.x = T )
#find out the Banner codes for which the Channel are missing i.e. "NA"
bmi.mapped.bango.channel.na <- unique(bmi.bango.mapped[which(is.na(bmi.bango.mapped$Channel)),"Banner"])
############################################################### BMI (Unmapped)##############################################################################
input.bmi.unmapped.path <- paste0(input.path,"/BMI Bango-Unmapped.xlsx")
bmi.unmapped.input <- as_tibble(read_xlsx(paste0(input.bmi.unmapped.path),sheet = 1,guess_max = 1000,skip = 12))
#bmi.headers <- bmi.input[2,]
id.bmi.unmapped.df <- input_clean(bmi.unmapped.input)
bmi.unmapped.headers1 <- colnames(id.bmi.unmapped.df)
#id.bmi.unmapped.df1 <- id.bmi.unmapped.df[,c(1:20,42:43)]
#id.bmi.unmapped.df2 <- id.bmi.unmapped.df[,21:41]
bmi.unmapped.headers1 <- gsub("[\r\n]","",colnames(id.bmi.unmapped.df))
colnames(id.bmi.unmapped.df) <- as.character(unlist(id.bmi.unmapped.df[1,]))
id.bmi.unmapped.df <- id.bmi.unmapped.df[-1,]
#change the KPI headers by picking the headers from bmi.unmapped.header1
names(id.bmi.unmapped.df)[21:41] <- bmi.unmapped.headers1[21:41]
colnames(id.bmi.unmapped.df)[which(is.na(colnames(id.bmi.unmapped.df)))] <- "Unnammed_cols"
colnames(id.bmi.unmapped.df)[which(colnames(id.bmi.unmapped.df)=="Unnammed_cols")] <-c ("a","Brand_Name","b","TFL_Channel","c","d","e")
#filter out the rows for which the Brand_Name is Bango
id.bmi.unmapped.df <- subset.data.frame(x = id.bmi.unmapped.df,subset = Brand_Name=="Bango")
#convert the NA's from the KPI columns to 0 as mentioned by business
for (i in 21:41) {
id.bmi.unmapped.df[which(is.na(id.bmi.unmapped.df[,i])),i] <- 0
}
#create the key to fetch the mapped channels from the table
id.bmi.unmapped.df1 <- id.bmi.unmapped.df%>%
unite(col = "key",c("Banner","Trade Format Level 2","Local Sales Force 2","Local Sales Force 3","Key Customer Level3","ConsumpOccassClass05","Local Sales Force 1"),sep="",remove = F)
id.bmi.unmapped.df1 <- id.bmi.unmapped.df1[,-43]
#perform the join on the key column and fetch the channels
unmapped.bmi.mapping1 <- unmapped.bmi.mapping[,c(1,2)]
id.bmi.unmapped.join <- merge.data.frame(x = id.bmi.unmapped.df1,y = unmapped.bmi.mapping1,by.x = "key",by.y ="Key")
setnames(x = id.bmi.unmapped.join,old = "Channel.y",new = "Channel")
id.bmi.unmapped.join <- id.bmi.unmapped.join%>%
select(-c(Channel.x))
id.bmi.unmapped.join[22:42] <- sapply(id.bmi.unmapped.join[22:42],as.numeric)
#calculate the script time for the benchmark purpose
end.time <- Sys.time()
script.time <- round(end.time-start.time)
print(script.time)
| /ID_Jarvis_Generalised_template_1.1.0.r | no_license | gourabchanda/ID_Jarvis_Bango | R | false | false | 138,189 | r |
################################################# Generalised template for the data preprocessing of Input files ##########################################################
################################################# Business Sources and the KPI list for the script ##########################################################################
#There are total 9 internal data source - Primary Sales,Secondary Sales,Secondary Stock,DRO,Penetration,TTS,BMI,Itrust,Sellout
# The base KPI list for Primary Sales {GSV,NIV,Sales Quantity(In PCS)}
# The base KPI list for Secondary Sales {Sec sales value,Sec sales Qty(PC)}
# The base KPI list for Secondary Stock {Sec stock value,Sec stock Qty(PC)}
# The base KPI list for DROO {Orginal order qty,Final expected Order Qty,Dispatch qty}
# The base KPI list for Penetration {Penetration_Category,Penetration}
# The base KPI list for TTS {TTS,BBT,BBT-Place,BBT-Place on invoice,BBT-Place off invoice,BBT-Price,CPP on invoice,CPP off invoice,BBT-Product,BBT-Pack,BBT-Proposition,BBT-Promotion,EOT}
# The base KPI list for BMI {Brand & Marketing Investment,Brand & Marketing Investment Trade,Brand & Marketing Investment Consumer,Promotion Team Cost Trade,Promotion Team Cost Consumer}-set1
# The base KPI list for BMI {Promotion Team Cost Promotion Packaging Material Cost Trade Promotion Packaging Material Cost Consumer,Promotion Communication Material Cost Trade,Promotion Communication Material Cost Consumer}-set2
# The base KPI list for BMI {Promotion Communication Material Cost ,Promo Agency Remuneration Fees & Commissions Consumer,Promotional Expenses,Promotion Packaging Material Cost,Promotion Repacking Cost,Promotion Repacking Cost Trade}-set3
# The base KPI list for BMI {Promotion Repacking Cost Consumer,Promo Samples Gifts and Incentive Costs,Promo SamplesGifts and Incentive Costs,Consumer,Promo Agency Remun Fees & Commissions,Promo Agency Remuneration Fees & Commissions Trade} -set
################################################## Start point of the generalised template script ##########################################################################
# The output values should not in scientific notation use the format(,scientific=F)
# Initiate the script time and calculate the script time for the generalised template , display it back to the user
# load the required packages into the r script , if the package is not present in the r-env it will install aand load the package into the environment
start.time <- Sys.time()
options(scipen=999)
options(digits=22)
packages.required <- c("dplyr","readxl","writexl","reshape","zoo","tidyr","stringr","tibble","data.table","bit64","readr","hablar","magrittr")
packages.diff <- setdiff(packages.required,rownames(installed.packages()))
if(length(packages.diff>1)){
install.packages(packages.diff,dependencies = T)
}
invisible(sapply(packages.required,library,character.only=T))
######## initiate the mapping path and the directory path for the script and load the input files ###############################
input.path <- "C:/Users/goura/OneDrive/Desktop/ID_Jarvis/Python ID KT/Input_files"
input.path.tts <- "C:/Users/goura/OneDrive/Desktop/ID_Jarvis/Python ID KT/Input_files/TTS_xlsx"
directory.path <- "C:/Users/goura/OneDrive/Desktop/UL_R_Projects/ID_Jarvis_Bango_1.1.0/ID_Jarvis_Bango_1.1.0"
output.path <- "C:/Users/goura/OneDrive/Desktop/ID_Jarvis/Jarvis_R/Output_Folder"
mapping.path <- "C:/Users/goura/OneDrive/Desktop/ID_Jarvis/Python ID KT/Mapping file/Jarvis ID- Universal Mapping.xlsx"
product.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Material",col_names = T,guess_max = 100))
customer.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Customer",col_names = T,guess_max = 100))
conversion.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet ="Conversion",col_names = T,guess_max = 100 ))
banner.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Banner",col_names = T))
banner.mapping[which((banner.mapping$`Banner Desc`=="Not assigned")),"Banner code"] <- "#";rownames(banner.mapping) <- NULL
banner.mapping$`Banner code` <- as.character(banner.mapping$`Banner code`)
week.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Week",col_names = T,guess_max = 100))
month.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Month",col_names = T,guess_max = 100))
unmapped.tts.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Unmapped TTS",col_names = T,guess_max = 100))
unmapped.bmi.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Unmapped BMI",col_names = T,guess_max = 100))
penetration.customer.region.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Penetration Customer mapping",col_names = T,guess_max = 100))
psales.material.mt.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Material_PS_Sellout",col_names = T,guess_max = 100))
sellout.material.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Sellout Material",col_names = T,guess_max = 100))
sellout.customer.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Sell out Customer",col_names = T,guess_max = 100))
subrand.mapping <- tibble::as_tibble(read_xlsx(paste0(mapping.path),sheet = "Sub brands",col_names = T,guess_max = 100))
###################################################################### Data Preprocessing User Defined Functions ##################################################
#write the function to convert the extracted input file into a dataframe and also print the structure and dimension of the table
input_clean <- function(extracted_df){
extracted_df <- extracted_df[,5:ncol(extracted_df)]
colnames(extracted_df) <- as.character(unlist(extracted_df[1,]))
clean_df <- extracted_df[-1,]
print(str(clean_df))
dim(clean_df)
return(clean_df)
}
#write the function to perform the customised mapping between the input files and the mapping file provided by the business
mapping <- function(input,map,map_key1,map_key2){
mapped_bango_df <- merge.data.frame(x = input,y = map,by.x =map_key1,by.y = map_key2,all.x = T)
return(mapped_bango_df)
}
#write the function which will transform the penetration input data into tidy dataset
penetration_transpose <- function(input_df){
penetration.long <- tidyr::gather(input_df,Month_Year,penetration_value,2:ncol(input_df))
penetration.long[,1] <- penetration.long[,2]
penetration.long <- penetration.long[c(1,3)]
penetration <- penetration.long
return(penetration)
}
#write a function which will return the required packgroup dataframe
packgroup_selectdf <- function(input_packgroup_df){
#filter out the observations for which the requiered packgroup is !=0 and the DT
output_packgroup_df <- input_packgroup_df%>%
filter(`REQUIRED PACKGROUP`!=0)%>%
filter(str_detect(`Account/Region`,"DT"))%>%
filter(`Account/Region`!=0)%>%
select(-c(`Material Desc.y`:`BASEPACK NO`,`PACKGROUP NAME`:`Sub channel`,National:UMREN,DIVCD:ncol(.)))
return(output_packgroup_df)
}
#write a function which will perform the data preprocessing for the region : other Island (oi) penetration data
penetration_oi_format <- function(input_penetration_oi){
colnames(input_penetration_oi) <- input_penetration_oi[1,]
input_penetration_oi <- input_penetration_oi[-1,]
input_penetration_oi%<>%slice(1)
input_penetration_oi$"Account/Region" <- "DT OI"
input_penetration_oi <- input_penetration_oi[2:ncol(input_penetration_oi)]
input_penetration_oi %<>% select(c(`Account/Region`,everything()))
input_penetration_oi<- gather(input_penetration_oi,key = "Month", "Penetration_Region",2:ncol(input_penetration_oi))
return(input_penetration_oi)
}
#write a function which will perform the data preprocessing for the 3 region : Central & East Java (CEJ) ; Sumatera ; West Jave (WJ)
penetration_3regions_format <- function(input_penetration_3region){
mapped.3region <- merge.data.frame(input_penetration_3region,penetration.customer.region.mapping,by = "Local Sales Force 2(m.d.)\\Calendar Year/Month")
mapped.3region <- mapped.3region[which(mapped.3region$`Account/Region`!=0),]
mapped.3region%<>%select(-c(`Local Sales Force 2(m.d.)\\Calendar Year/Month`))
mapped.3region%<>%select(c(`Account/Region`,everything()))
mapped.3region <- gather(mapped.3region,key = "Month","Penetration_Region",2:ncol(mapped.3region))
return(mapped.3region)
}
#Write a function which will calculate the penetration percentage (%)
penetration_percentage <- function(penetration_final_df){
penetration_final_df%<>%hablar::convert(num(Penetration_Region,Penetration_Category))
penetration.region.final <- penetration_final_df%>%
mutate(penetration_percentage = Penetration_Region/Penetration_Category*100)
return(penetration.region.final)
}
#write a function which will return the final penetration output data frame
penetration_packgroup_output <- function(penetration_final_df){
packgroup.month.mapping <- month.mapping%>%select(c(`Sec stock_DROO_Penetration`,Month))
penetration_final_df <- merge.data.frame(penetration_final_df,packgroup.month.mapping,by.x = "Month",by.y = "Sec stock_DROO_Penetration")
penetration_final_df%<>%select(-Month)
penetration_final_df <- penetration_final_df%>%
select(c(Month.y,`Account/Region`:penetration_percentage))
penetration_final_df <- dplyr::rename(penetration_final_df,Month=Month.y)
return(penetration_final_df)
}
sellout_preprocessing <- function(input_sellout_data1,input_sellout_data2,mapping_set1=month.mapping,mapping_Set=sellout.customer.mapping){
input_sellout_data1[3,]<-NA
for(i in 1:ncol(input_sellout_data1)){
input_sellout_data1[3,i]<-paste(input_sellout_data1[1,i], input_sellout_data1[2,i],sep="-")
}
input_sellout_data1 <- input_sellout_data1[3,]
input_sellout_data2.split1 <- input_sellout_data2[,1:5]
colnames(input_sellout_data2.split1) <- as.character(unlist(input_sellout_data2.split1[1,]))
input_sellout_data2.split2 <- input_sellout_data2[6:ncol(input_sellout_data2)]
colnames(input_sellout_data2.split2) <- input_sellout_data1
output_sellout_data <- cbind(input_sellout_data2.split1,input_sellout_data2.split2)
output_sellout_data <- output_sellout_data[-1,]
output_sellout_data <- output_sellout_data[, -grep("NA", colnames(output_sellout_data))]
output_sellout_data <- output_sellout_data%>%
tidyr::gather(.,Sellout_Type,Sellout_Value,6:ncol(.))
output_sellout_data <- output_sellout_data%>%
filter(str_detect(Sellout_Type,"WITHOUT"))
output_sellout_data <- output_sellout_data%>%
tidyr::separate(.,col = Sellout_Type,into = c("Sellout_Type","Month"),sep = "-",remove = T)
output_sellout_data$New_Date <-as.Date(as.numeric(output_sellout_data$Month),origin="1899-12-30")
output_sellout_data <- output_sellout_data%>%
hablar::convert(chr(New_Date))
output_sellout_data$month <- as.integer(substr(output_sellout_data$New_Date,6,7))
output_sellout_data$year <- as.integer(substr(output_sellout_data$New_Date,1,4))
output_sellout_data$Sellout <- format(as.Date(paste0(output_sellout_data$month,"-1-",output_sellout_data$year), "%m-%d-%Y"),"%m.%Y")
output_sellout_data <- output_sellout_data%>%
select(-c(Month,New_Date:year))
output_sellout_data_mapped <- merge.data.frame(output_sellout_data,month.mapping,by.x = "Sellout",by.y = "Sellout")
output_sellout_data_mapped <- output_sellout_data_mapped%>%
select(-c(Sellout,`Primary sales`:TTS_BMI))
output_sellout_data_mapped%<>%select(c(Month,MAP:Sellout_Value))
output_sellout_data_mapped <-merge.data.frame(x = output_sellout_data_mapped,y = sellout.customer.mapping,by.x = "Customer",by.y = "Customer")
output_sellout_data_mapped <- output_sellout_data_mapped%>%
select(-Customer)
output_sellout_data_mapped <- output_sellout_data_mapped%>%
select(c(Month,Customer = `Customer Mapped`,MAP:Sellout_Value))
return(output_sellout_data_mapped)
}
################################################################ Data Preprocessing ( Input psales) ######################################################
#fetch the list of all psales files and apply the functions to convert it into tidy dataset
psales.files <- list.files(path = input.path,pattern = "^ID_Food_Primary Sales GSV(.*)xlsx|XLSX$")
psales.list <- invisible(lapply(paste0(input.path,"/",psales.files),FUN = read_xlsx,sheet=1,skip=13,guess_max=1000))
names(psales.list) <- basename(psales.files)
cleaned.psales.list <- invisible(lapply(psales.list, input_clean))
cleaned.psales.dfs.list <- lapply(cleaned.psales.list,data.frame)
input.psales.df <- bind_rows(cleaned.psales.dfs.list)
psales.col.headers <- c("Fiscal year/period","Sales Organization","Business Type","Category(m.d.) from SKU","Market(m.d.) from SKU",
"Sector(m.d.) from SKU","Brand(PH)(m.d.) from SKU","Material","Material Desc","Customer","Customer Desc",
"Gross Sales Value (GSV)","Sales Quantity","NIV")
colnames(input.psales.df) <- psales.col.headers
id.psales.header <- gsub("[\r\n]","",colnames(input.psales.df))
colnames(input.psales.df) <- id.psales.header
#convert the KPI data type from char-> numberic for the calculation purpose
input.psales.df%<>%convert(num(`Gross Sales Value (GSV)`,`Sales Quantity`,NIV))
#setnames for the default Material and customer columns names into Material Code and Customer Code respectively
input.psales.df <- input.psales.df%<>%dplyr::rename(Material_Code=Material,Customer_Code=Customer)
#extract the 8 digit code from the exisiting customer code and store it in the customer 8 digit code variable
input.psales.df <- separate(data = input.psales.df,"Customer_Code",c("a","b","c","Customer_Code_8D"),sep = "/",remove = F)
input.psales.df%<>%dplyr::select(-c("a","b","c"))
#filter out the observations for which the brand is "Bango"
brand.bango <- c("BANGO","Bango")
id.psales.bango <- input.psales.df%>%
filter(`Brand(PH)(m.d.) from SKU`%in% brand.bango)
#replace the NA's in the KPI's with 0 as confirmed by business
psales.kpi.list <- c("Gross Sales Value (GSV)","Sales Quantity","NIV")
for (kpi in psales.kpi.list) {
id.psales.bango[which(is.na(id.psales.bango[,kpi])),kpi] <- 0
}
#id.psales.bango[which(is.na(id.psales.bango$`Gross Sales Value (GSV)`)),"Gross Sales Value (GSV)"] <- 0
#id.psales.bango[which(is.na(id.psales.bango$`Sales Quantity`)),"Sales Quantity"] <- 0
#id.psales.bango[which(is.na(id.psales.bango$NIV)),"NIV"] <- 0
#call the product mapping , customer mapping and conversion mapping functions to find the mapped observations
bango.ps.pm <- mapping(id.psales.bango,product.mapping,map_key1 = "Material_Code",map_key2 = "Material Code")
bango.ps.cm <- mapping(id.psales.bango,customer.mapping,map_key1 = "Customer_Code_8D",map_key2 = "Customer Code")
bango.ps.convm <- mapping(id.psales.bango,conversion.mapping,map_key1 = "Material_Code",map_key2 = "MATNR")
#find out the unmapped observations for the product , customer and conversion mapping and store it in
bango.ps.npm <- unique(bango.ps.pm[which(is.na(bango.ps.pm$`Mapped Product Name`)),"Material_Code"])
bango.ps.ncm <- unique(bango.ps.pm[which(is.na(bango.ps.cm$Channel)),c("Customer_Code","Customer Desc")])
bango.ps.nconvm <- unique(bango.ps.convm[which(is.na(bango.ps.convm$Conversion)),"Material_Code"])
if(length(bango.ps.npm)>0){
write.csv(bango.ps.npm,file = paste0(output.path,"/psmaterial_unmapped.csv"),row.names = F)
}
if(length(bango.ps.ncm)>0){
write.csv(bango.ps.ncm,file = paste0(output.path,"/pscustomer_unmapped.csv"),row.names = F)
}
if(length(bango.ps.nconvm)>0){
write.csv(bango.ps.nconvm,file = paste0(output.path,"/psconversion_unmapped.csv"),row.names = F)
}
############################################################## Data Preprocessing ( Input ssales) #####################################################
ssales.files <- list.files(path = input.path,pattern = "^ID_Food_Secondary Sales IP(.*)xlsx|XLSX$")
ssales.list <- invisible(lapply(paste0(input.path,"/",ssales.files),FUN = read_xlsx,sheet=1,skip=13,guess_max=1000))
names(ssales.list) <- basename(ssales.files)
cleaned.ssales.list <- invisible(lapply(ssales.list, input_clean))
cleaned.ssales.dfs.list <- lapply(cleaned.ssales.list,data.frame)
input.ssales.df <- bind_rows(cleaned.ssales.dfs.list)
ssales.col.headers <- c("Calendar Year/Month","Sales Organization","Business Type","Category(m.d.)","Market(m.d.)","Sector(m.d.)",
"Brand(PH)(m.d.)","Material","Material Desc","Distributor","Distributor Desc","Gross Sec Sales (TUR)","Sec Volume")
colnames(input.ssales.df) <- ssales.col.headers
id.ssales.header <- gsub("[\r\n]","",colnames(input.ssales.df))
colnames(input.ssales.df) <- id.ssales.header
#convert the KPI data type from char-> numberic for the calculation purpose
input.ssales.df%<>%convert(num(`Gross Sec Sales (TUR)`,`Sec Volume`))
#setnames for the default Material and customer columns names into Material Code and Customer Code respectively
input.ssales.df <- input.ssales.df%<>%dplyr::rename(Material_Code=Material,Distributor_Code=Distributor)
#extract the 8 digit code from the exisiting customer code and store it in the customer 8 digit code variable
input.ssales.df <- separate(data = input.ssales.df,"Distributor_Code",c("a","b","c","Distributor_Code_8D"),sep = "/",remove = F)
input.ssales.df%<>%dplyr::select(-c("a","b","c"))
#Filter out the observations for the brand Bango
brand.bango <- c("BANGO","Bango")
id.ssales.bango <- input.ssales.df%>%
filter(`Brand(PH)(m.d.)`%in% brand.bango)
#store the KPI's in the KPI list
ssales.kpi.list <- c("Gross Sec Sales (TUR)","Sec Volume")
for (kpi in ssales.kpi.list) {
id.ssales.bango[which(is.na(id.ssales.bango[,kpi])),kpi] <- 0
}
#call the product mapping , customer mapping and conversion mapping functions to find the mapped observations
bango.ssales.pm <- mapping(id.ssales.bango,product.mapping,map_key1 = "Material_Code",map_key2 = "Material Code")
bango.ssales.cm <- mapping(id.ssales.bango,customer.mapping,map_key1 = "Distributor_Code_8D",map_key2 = "Customer Code")
bango.ssales.convm <- mapping(id.ssales.bango,conversion.mapping,map_key1 = "Material_Code",map_key2 = "MATNR")
#find out the unmapped observations for the product , customer and conversion mapping and store it in
bango.ss.npm <- unique(bango.ssales.pm[which(is.na(bango.ssales.pm$`Mapped Product Name`)),"Material_Code"])
bango.ss.ncm <- unique(bango.ssales.cm[which(is.na(bango.ssales.cm$Channel)),c("Distributor_Code","Distributor Desc")])
bango.ss.nconvm <- unique(bango.ssales.convm[which(is.na(bango.ssales.convm$Conversion)),"Material_Code"])
if(length(bango.ss.npm)>0){
write.csv(bango.ss.npm,file = paste0(output.path,"/ssmaterial_unmapped.csv"),row.names = F)
}
if(length(bango.ss.ncm)>0){
write.csv(bango.ss.ncm,file = paste0(output.path,"/sscustomer_unmapped.csv"),row.names = F)
}
if(length(bango.ss.nconvm)>0){
write.csv(bango.ss.nconvm,file = paste0(output.path,"/ssconversion_unmapped.csv"),row.names = F)
}
############################################################## Data Preprocessing (Input SStock) #################################################################
sstock.files <- list.files(path = input.path,pattern = "^ID_Food_Secondary Stock IP(.*)xlsx|XLSX$")
sstock.list <- invisible(lapply(paste0(input.path,"/",sstock.files),FUN = read_xlsx,sheet=1,skip=13,guess_max=1000))
names(sstock.list) <- basename(sstock.files)
cleaned.sstock.list <- invisible(lapply(sstock.list, input_clean))
cleaned.sstock.dfs.list <- lapply(cleaned.sstock.list,data.frame)
input.sstock.df <- bind_rows(cleaned.sstock.dfs.list)
sstock.col.headers <- c("Calendar Year/Month","Sales Organization","Category(m.d.)","Market(m.d.)","Sector(m.d.)","Brand(PH)(m.d.)","Material","Material Desc","Sold-to party","STP Desc","Secondary Stock Volume","Secondary Stock Value [@ DT Rate.]")
colnames(input.sstock.df) <- sstock.col.headers
id.sstock.header <- gsub("[\r\n]","",colnames(input.sstock.df))
colnames(input.sstock.df) <- id.sstock.header
#convert the KPI data type from char-> numberic for the calculation purpose
input.sstock.df%<>%convert(num(`Secondary Stock Volume`,`Secondary Stock Value [@ DT Rate.]`))
#setnames for the default Material and customer columns names into Material Code and Customer Code respectively
input.sstock.df <- input.sstock.df%<>%dplyr::rename(Material_Code=Material,STP_Code=`Sold-to party`)
#Filter out the observations for the brand Bango
brand.bango <- c("BANGO","Bango")
id.sstock.bango <- input.sstock.df%>%
filter(`Brand(PH)(m.d.)`%in% brand.bango)
#store the KPI's in the KPI list
sstock.kpi.list <- c("Secondary Stock Volume","Secondary Stock Value [@ DT Rate.]")
for (kpi in sstock.kpi.list) {
id.sstock.bango[which(is.na(id.sstock.bango[,kpi])),kpi] <- 0
}
#call the product mapping , customer mapping and conversion mapping functions to find the mapped observations
bango.sstock.pm <- mapping(id.sstock.bango,product.mapping,map_key1 = "Material_Code",map_key2 = "Material Code")
bango.sstock.cm <- mapping(id.sstock.bango,customer.mapping,map_key1 = "STP_Code",map_key2 = "Customer Code")
bango.sstock.convm <- mapping(id.sstock.bango,conversion.mapping,map_key1 = "Material_Code",map_key2 = "MATNR")
#find out the unmapped observations for the product , customer and conversion mapping and store it in
bango.sstock.npm <- unique(bango.sstock.pm[which(is.na(bango.sstock.pm$`Mapped Product Name`)),"Material_Code"])
bango.sstock.ncm <- unique(bango.sstock.cm[which(is.na(bango.sstock.cm$Channel)),c("STP_Code","STP Desc")])
bango.sstock.nconvm <- unique(bango.sstock.convm[which(is.na(bango.sstock.convm$Conversion)),"Material_Code"])
if(length(bango.sstock.npm)>0){
write.csv(bango.sstock.npm,file = paste0(output.path,"/sstock_material_unmapped.csv"),row.names = F)
}
if(length(bango.sstock.ncm)>0){
write.csv(bango.sstock.ncm,file = paste0(output.path,"/sstock_customer_unmapped.csv"),row.names = F)
}
if(length(bango.sstock.nconvm)>0){
write.csv(bango.sstock.nconvm,file = paste0(output.path,"/sstock_conversion_unmapped.csv"),row.names = F)
}
#perform the mapping for the observations from the extracted files with the product , customer and conversion mapping
sstock.bango <- merge.data.frame(x = id.sstock.bango,y =product.mapping,by.x ="Material_Code",by.y = "Material Code")%>%
merge.data.frame(x = .,y = customer.mapping,by.x = "STP_Code",by.y ="Customer Code")%>%
merge.data.frame(x = .,y = conversion.mapping,by.x = "Material_Code",by.y = "MATNR")
######################################################################## Data Preprocessing (Input DROO) ##########################################################333
droo.files <- list.files(path = input.path,pattern = "^ID_Food_DROO IP_(.*)xlsx|XLSX$")
droo.list <- invisible(lapply(paste0(input.path,"/",droo.files),FUN = read_xlsx,sheet=1,skip=13,guess_max=1000))
names(droo.list) <- basename(droo.files)
cleaned.droo.list <- invisible(lapply(droo.list, input_clean))
cleaned.droo.dfs.list <- lapply(cleaned.droo.list,data.frame)
input.droo.df <- bind_rows(cleaned.droo.dfs.list)
droo.col.headers <- c("Calendar Year/Month","Sales Organization","Business Type","Category(m.d.)","Market(m.d.)","Sector(m.d.)","Brand(PH)(m.d.)","Material","Material Desc","Sold-to party","STP Desc","OriginalOrder Qty","Final Customer Expected Order Qty","Dispatched Qty")
id.droo.header <- gsub("[\r\n]","",colnames(input.droo.df))
colnames(input.droo.df) <- id.droo.header
colnames(input.droo.df) <- droo.col.headers
#convert the KPI data type from char-> numberic for the calculation purpose
input.droo.df%<>%convert(num(`OriginalOrder Qty`,`Final Customer Expected Order Qty`,`Dispatched Qty`))
#setnames for the default Material and customer columns names into Material Code and Customer Code respectively
input.droo.df <- input.droo.df%<>%dplyr::rename(Material_Code=Material,STP_Code=`Sold-to party`)
#Filter out the observations for the brand Bango
brand.bango <- c("BANGO","Bango")
id.droo.bango <- input.droo.df%>%
filter(`Brand(PH)(m.d.)`%in% brand.bango)
#store the KPI's in the KPI list
droo.kpi.list <- c("OriginalOrder Qty","Final Customer Expected Order Qty","Dispatched Qty")
for (kpi in droo.kpi.list) {
id.droo.bango[which(is.na(id.droo.bango[,kpi])),kpi] <- 0
}
#call the product mapping , customer mapping and conversion mapping functions to find the mapped observations
bango.droo.pm <- mapping(id.droo.bango,product.mapping,map_key1 = "Material_Code",map_key2 = "Material Code")
bango.droo.cm <- mapping(id.droo.bango,customer.mapping,map_key1 = "STP_Code",map_key2 = "Customer Code")
bango.droo.convm <- mapping(id.droo.bango,conversion.mapping,map_key1 = "Material_Code",map_key2 = "MATNR")
#find out the unmapped observations for the product , customer and conversion mapping and store it in
bango.droo.npm <- unique(bango.droo.pm[which(is.na(bango.droo.pm$`Mapped Product Name`)),"Material_Code"])
bango.droo.ncm <- unique(bango.droo.cm[which(is.na(bango.droo.cm$Channel)),c("STP_Code","STP Desc")])
bango.droo.nconvm <- unique(bango.droo.convm[which(is.na(bango.droo.convm$Conversion)),"Material_Code"])
if(length(bango.droo.npm)>0){
write.csv(bango.droo.npm,file = paste0(output.path,"/droo_material_unmapped.csv"),row.names = F)
}
if(length(bango.sstock.ncm)>0){
write.csv(bango.droo.ncm,file = paste0(output.path,"/droo_customer_unmapped.csv"),row.names = F)
}
if(length(bango.droo.nconvm)>0){
write.csv(bango.droo.nconvm,file = paste0(output.path,"/droo_conversion_unmapped.csv"),row.names = F)
}
#perform the mapping for the observations from the extracted files with the product , customer and conversion mapping
droo.bango <- merge.data.frame(x = id.droo.bango,y =product.mapping,by.x ="Material_Code",by.y = "Material Code")%>%
merge.data.frame(x = .,y = customer.mapping,by.x = "STP_Code",by.y ="Customer Code")%>%
merge.data.frame(x = .,y = conversion.mapping,by.x = "Material_Code",by.y = "MATNR")
############################################################# (Itrust) ######################################################
input.itrust.path <- paste0(input.path,"/Itrust_2018 till wk 48.csv")
itrust.input <- tibble::as_tibble(read.csv(input.itrust.path,header = T))
if (!"YEAR" %in% colnames(itrust.input)) {
itrust.input[,"YEAR"] <- substr(basename(input.itrust.path),8,12)
}
itrust.input[,"ITRUST_LINE"]<- NA
itrust.input[,"ITRUST_TOTAL"] <- NA
itrust.input[which(itrust.input$FINAL_CRR==0),"ITRUST_LINE"] <- 0
itrust.input$ITRUST_LINE <-ifelse(itrust.input$FINAL_CRR==0,0,ifelse(itrust.input$STOCK_CS>itrust.input$FINAL_CRR,1,0))
itrust.input$ITRUST_TOTAL <- ifelse(itrust.input$FINAL_CRR==0,0,1)
itrust.input <- itrust.input%>%
tidyr::unite(col = "Week_Concat",c("WK","YEAR"),sep=".",remove=F)
itrust.input$Week_Concat <- stringr::str_trim(itrust.input$Week_Concat,side = "both")
#perform the product and customer mapping for the itrust observations
itrust.bango.pm <- mapping(itrust.input,product.mapping,map_key1 ="PROD_CODE",map_key2 = "Material Code")
itrust.bango.cm <- mapping(itrust.input,customer.mapping,map_key1 ="CUST_CODE",map_key2 = "Customer Code")
itrust.bango.npm <- unique(itrust.bango.pm[which(is.na(itrust.bango.pm$BRAND)),"PROD_CODE"])
itrust.bango.ncm <- unique(itrust.bango.cm[which(is.na(itrust.bango.cm$Channel)),"CUST_CODE"])
if(length(itrust.bango.npm)>0){
write.csv(itrust.bango.npm,file = paste0(output.path,"/itrustmaterial_unmapped.csv"),row.names = F)
}
if(length(itrust.bango.ncm)>0){
write.csv(itrust.bango.ncm,file = paste0(output.path,"/itrustcustomer_unmapped.csv"),row.names = F)
}
itrust.input$Week_Concat <- stringr::str_trim(itrust.input$Week_Concat)
#################################################################### TTS (Mapped)###############################################
tts.files <- list.files(path = input.path.tts,pattern = "^ID_Food_Primary Sales TTS IP(.*)xlsx|XLSX$")
tts.list <- invisible(lapply(paste0(input.path.tts,"/",tts.files),FUN = read_xlsx,sheet=1,skip=13,guess_max=1000))
names(tts.list) <- basename(tts.files)
cleaned.tts.list <- invisible(lapply(tts.list, input_clean))
cleaned.tts.dfs.list <- lapply(cleaned.tts.list,data.frame)
input.tts.df <- bind_rows(cleaned.tts.dfs.list)
tts.col.headers <- c("Fiscal year/period","Sales Organization",NA,"Category(m.d.) from SKU","Sector(m.d.) from SKU","Market(m.d.) from SKU",
"Brand(m.d.)","Material",NA,"Banner(m.d.)",NA,"Banner",NA,"TTS","BBT","BBT - Place","BBT - Place on invoice","BBT - Place off invoice",
"BBT - Price","CPP on invoice","CPP off invoice","BBT - Product","BBT - Pack","BBT - Proposition","BBT - Promotion","EOT")
colnames(input.tts.df) <- tts.col.headers
id.tts.header <- gsub("[\r\n]","",colnames(input.tts.df))
colnames(input.tts.df) <- id.tts.header
input.tts.headers <- colnames(input.tts.df)
input.tts.headers[(which(is.na(input.tts.headers)))] <- c("Business_Type","Material_Description","Banner(m.d)_Description","Banner_Description")
colnames(input.tts.df) <- input.tts.headers
#convert the KPI list flrom character to numeric
mapped.tts.material <- mapping(input = input.tts.df,map = product.mapping,map_key1 ="Material",map_key2 = "Material Code" )
#filter out the observations for which the brand is BANGO
id.tts.mapped.bango <- mapped.tts.material%>%
filter(BRAND =="BANGO")
#convert the KPI list flrom character to numeric
id.tts.mapped.bango[,14:26] <- sapply(id.tts.mapped.bango[,14:26],as.numeric)
#remove the unrequired columns from the data frame
id.tts.mapped.bango1 <-id.tts.mapped.bango%>%
select(-c(27:32,34:ncol(id.tts.mapped.bango)))
#perform the banner mapping to fetch the channel for the mapped banners
id.tts.channel.mapping <- mapping(input=id.tts.mapped.bango1,map =banner.mapping, map_key1 = "Banner",map_key2 = "Banner code")
id.ttschannel.na <- unique(id.tts.channel.mapping[which(is.na(id.tts.channel.mapping$Channel)),"Banner"])
id.tts.channel.mapping <- id.tts.channel.mapping%>%
select(1:13,27:32,14:26)
for (i in 20:ncol(id.tts.channel.mapping)){
id.tts.channel.mapping[which(is.na(id.tts.channel.mapping[,i])),i] <- 0
}
########################################################################### TTS (Unmapped)########################################################
input.tts.unmapped.path <- paste0(input.path,"/TTS Bango-Unmapped.xlsx")
tts.unmapped.input <- as_tibble(read_xlsx(paste0(input.tts.unmapped.path),sheet = 1,guess_max = 1000,skip = 12))
unmapped.tts.input <- input_clean(tts.unmapped.input)
input.unmapped.tts.headers <- gsub("[\r\n]","",colnames(unmapped.tts.input))
colnames(unmapped.tts.input) <- input.unmapped.tts.headers
colnames(unmapped.tts.input)[which(is.na(colnames(unmapped.tts.input)))] <- "Unnammed_cols"
colnames(unmapped.tts.input)[which(colnames(unmapped.tts.input)=="Unnammed_cols")] <-c ("Business_Type","Material_Description","Banner_Name","TFL_Channel","LSF2_Description","LSF3_Description")
#perform the material mapping to fetch the Brands
unmapped.material.tts.mapping <- merge.data.frame(x = unmapped.tts.input,y = product.mapping,by.x ="Material",by.y= "Material Code",all.y = T)
unmapped.material.tts.mapping1 <- unmapped.material.tts.mapping%>%
select(-c(32:37,39:42))
unmapped.material.tts.mapping1 <- unmapped.material.tts.mapping1[,c(32,1:ncol(unmapped.material.tts.mapping1)-1)]
id.tts.unmapped.df <- unmapped.material.tts.mapping1
#filter out the rows for which the Brand_Name is Bango
#convert the NA's from the KPI columns to 0 as mentioned by business
#for (i in 20:ncol(id.tts.unmapped.df)) {
#id.tts.unmapped.df[which(is.na(id.tts.unmapped.df[,i])),i] <- 0}
#create the key for the channel mapping and map the channels for the same
id.tts.unmapped.df <- id.tts.unmapped.df%>%
unite(col = "key",c("Banner","Trade Format Level 2","Local Sales Force 2(m.d.)","Local Sales Force 3(m.d.)","Key Customer Level3"),sep="",remove = F)
id.tts.unmapped.channel.mapping <- merge.data.frame(id.tts.unmapped.df,unmapped.tts.mapping,by.x = "key",by.y = "Key")
id.tts.unmapped.bango <- id.tts.unmapped.channel.mapping[,-c(1,37:41)]
x <- unique(id.tts.unmapped.bango[which(is.na(id.tts.unmapped.bango$Channel)),"Material"])
write.csv(x = x,file = paste0(output.path,"/unmapped.tts.nochannel.csv"))
id.tts.unmapped.rna <- id.tts.unmapped.bango[which(!is.na(id.tts.unmapped.bango$Channel)),]
id.tts.unmapped.rna1 <- id.tts.unmapped.rna[,c(3:9,2,10,1,11:19,33:35,20:32)]
id.tts.unmapped.rna1[23:35] <- sapply(id.tts.unmapped.rna1[23:35],as.numeric)
#################################################################################### BMI (Mapped)###########################################################
bmi.files <- list.files(path = input.path,pattern = "^ID_Food_Primary Sales BMI(.*)xlsx|XLSX$")
bmi.list <- invisible(lapply(paste0(input.path,"/",bmi.files),FUN = read_xlsx,sheet=1,skip=13,guess_max=1000))
names(bmi.list) <- basename(bmi.files)
cleaned.bmi.list <- invisible(lapply(bmi.list, input_clean))
cleaned.bmi.dfs.list <- lapply(cleaned.bmi.list,data.frame)
input.bmi.df <- bind_rows(cleaned.bmi.dfs.list)
id.bmi.mapped.df1 <- input.bmi.df[,1:20]
id.bmi.mapped.df2 <- input.bmi.df[,21:ncol(input.bmi.df)]
colnames(id.bmi.mapped.df1) <- as.character(unlist(id.bmi.mapped.df1[1,]))
id.bmi.mapped.df1 <- id.bmi.mapped.df1[-1,]
id.bmi.mapped.df2 <- id.bmi.mapped.df2[-1,]
id.bmi.mapped.df <- cbind(id.bmi.mapped.df1,id.bmi.mapped.df2)
rownames(id.bmi.mapped.df) <- NULL
id.bmi.mapped.df[,21:ncol(id.bmi.mapped.df)] <- sapply(id.bmi.mapped.df[,21:ncol(id.bmi.mapped.df)],as.numeric)
colnames(id.bmi.mapped.df)[which(is.na(colnames(id.bmi.mapped.df)))] <- "Unnammed_cols"
bmi.bango.mapped.headers <- gsub("[\r\n]","",colnames(id.bmi.mapped.df))
colnames(id.bmi.mapped.df) <- bmi.bango.mapped.headers
for (i in 21:ncol(id.bmi.mapped.df)) {
id.bmi.mapped.df[which(is.na(id.bmi.mapped.df[,i])),i] <- 0
}
#rename the headers as per the convention
colnames(id.bmi.mapped.df)[colnames(id.bmi.mapped.df)=="Brands"] <- "Brand_Code"
names(id.bmi.mapped.df)[16] <- "Brand"
bango.names <- c("Bango","BANGO")
#filter out the brand Bango rows from the input file
bmi.mapped.bango <- subset(x = id.bmi.mapped.df,subset = Brand=="Bango")
bmi.bango.mapped <- merge.data.frame(x = bmi.mapped.bango,y =banner.mapping,by.x = "Banner",by.y = "Banner code",all.x = T )
#find out the Banner codes for which the Channel are missing i.e. "NA"
bmi.mapped.bango.channel.na <- unique(bmi.bango.mapped[which(is.na(bmi.bango.mapped$Channel)),"Banner"])
############################################################### BMI (Unmapped)##############################################################################
input.bmi.unmapped.path <- paste0(input.path,"/BMI Bango-Unmapped.xlsx")
bmi.unmapped.input <- as_tibble(read_xlsx(paste0(input.bmi.unmapped.path),sheet = 1,guess_max = 1000,skip = 12))
#bmi.headers <- bmi.input[2,]
id.bmi.unmapped.df <- input_clean(bmi.unmapped.input)
bmi.unmapped.headers1 <- colnames(id.bmi.unmapped.df)
#id.bmi.unmapped.df1 <- id.bmi.unmapped.df[,c(1:20,42:43)]
#id.bmi.unmapped.df2 <- id.bmi.unmapped.df[,21:41]
bmi.unmapped.headers1 <- gsub("[\r\n]","",colnames(id.bmi.unmapped.df))
colnames(id.bmi.unmapped.df) <- as.character(unlist(id.bmi.unmapped.df[1,]))
id.bmi.unmapped.df <- id.bmi.unmapped.df[-1,]
#change the KPI headers by picking the headers from bmi.unmapped.header1
names(id.bmi.unmapped.df)[21:41] <- bmi.unmapped.headers1[21:41]
colnames(id.bmi.unmapped.df)[which(is.na(colnames(id.bmi.unmapped.df)))] <- "Unnammed_cols"
colnames(id.bmi.unmapped.df)[which(colnames(id.bmi.unmapped.df)=="Unnammed_cols")] <-c ("a","Brand_Name","b","TFL_Channel","c","d","e")
#filter out the rows for which the Brand_Name is Bango
id.bmi.unmapped.df <- subset.data.frame(x = id.bmi.unmapped.df,subset = Brand_Name=="Bango")
#convert the NA's from the KPI columns to 0 as mentioned by business
for (i in 21:41) {
id.bmi.unmapped.df[which(is.na(id.bmi.unmapped.df[,i])),i] <- 0
}
#create the key to fetch the mapped channels from the table
id.bmi.unmapped.df1 <- id.bmi.unmapped.df%>%
unite(col = "key",c("Banner","Trade Format Level 2","Local Sales Force 2","Local Sales Force 3","Key Customer Level3","ConsumpOccassClass05","Local Sales Force 1"),sep="",remove = F)
id.bmi.unmapped.df1 <- id.bmi.unmapped.df1[,-43]
#perform the join on the key column and fetch the channels
unmapped.bmi.mapping1 <- unmapped.bmi.mapping[,c(1,2)]
id.bmi.unmapped.join <- merge.data.frame(x = id.bmi.unmapped.df1,y = unmapped.bmi.mapping1,by.x = "key",by.y ="Key")
setnames(x = id.bmi.unmapped.join,old = "Channel.y",new = "Channel")
id.bmi.unmapped.join <- id.bmi.unmapped.join%>%
select(-c(Channel.x))
id.bmi.unmapped.join[22:42] <- sapply(id.bmi.unmapped.join[22:42],as.numeric)
#calculate the script time for the benchmark purpose
end.time <- Sys.time()
script.time <- round(end.time-start.time)
print(script.time)
|
#' Create character strings that will be evaluated as JavaScript
#'
#' @param ... character string to evaluate
#'
#' @source A direct import of \code{JS} from Ramnath Vaidyanathan, Yihui Xie,
#' JJ Allaire, Joe Cheng and Kenton Russell (2015). \link{htmlwidgets}: HTML
#' Widgets for R. R package version 0.4.
#'
#' @export
JS <- function (...)
{
x <- c(...)
if (is.null(x))
return()
if (!is.character(x))
stop("The arguments for JS() must be a chraracter vector")
x <- paste(x, collapse = "\n")
structure(x, class = unique(c("JS_EVAL", oldClass(x))))
}
#' Internal function from Wei Luo to convert a data frame to a JSON array
#'
#' @param dtf a data frame object.
#' @source Function from:
#' \url{http://theweiluo.wordpress.com/2011/09/30/r-to-json-for-d3-js-and-protovis/}
#' @keywords internal
#' @noRd
toJSONarray <- function(dtf){
clnms <- colnames(dtf)
name.value <- function(i){
quote <- '';
if(class(dtf[, i])!='numeric' && class(dtf[, i])!='integer'){
quote <- '"';
}
paste('"', i, '" : ', quote, dtf[,i], quote, sep='')
}
objs <- apply(sapply(clnms, name.value), 1, function(x){paste(x,
collapse=', ')})
objs <- paste('{', objs, '}')
res <- paste('[', paste(objs, collapse=', '), ']')
return(res)
}
#' Read a text file into a single string
#'
#' @source Code taken directly from Ramnath Vaidyanathan's Slidify
#' \url{https://github.com/ramnathv/slidify}.
#' @param doc path to text document
#' @return string with document contents
#' @keywords internal
#' @noRd
read_file <- function(doc, ...){
paste(readLines(doc, ...), collapse = '\n')
}
#' Utility function to handle margins
#' @param margin an \code{integer}, a named \code{vector} of integers,
#' or a named \code{list} of integers specifying the margins
#' (top, right, bottom, and left)
#' in \code{px}/\code{pixels} for our htmlwidget. If only a single
#' \code{integer} is provided, then the value will be assumed to be
#' the \code{right} margin.
#' @return named \code{list} with top, right, bottom, left margins
#' @noRd
margin_handler <- function(margin){
# margin can be either a single value or a list with any of
# top, right, bottom, left
# if margin is a single value, then we will stick
# with the original behavior of networkD3 and use it for the right margin
if(!is.null(margin) && length(margin) == 1 && is.null(names(margin))){
margin <- list(
top = NULL,
right = margin,
bottom = NULL,
left = NULL
)
} else if(!is.null(margin)){
# if margin is a named vector then convert to list
if(!is.list(margin) && !is.null(names(margin))){
margin <- as.list(margin)
}
# if we are here then margin should be a list and
# we will use the values supplied with NULL as default
margin <- modifyList(
list(top = NULL, right = NULL, bottom = NULL, left = NULL),
margin
)
} else {
# if margin is null, then make it a list of nulls for each position
margin <- list(top = NULL, right = NULL, bottom = NULL, left = NULL)
}
}
#' Function to convert igraph graph to a list suitable for networkD3
#'
#' @param g an \code{igraph} class graph object
#' @param group an object that contains node group values, for example, those
#' created with igraph's \code{\link{membership}} function.
#' @param what a character string specifying what to return. If
#' \code{what = 'links'} or \code{what = 'nodes'} only the links or nodes are
#' returned as data frames, respectively. If \code{what = 'both'} then both
#' data frames will be return in a list.
#'
#' @return A list of link and node data frames or only the link or node data
#' frames.
#'
#' @examples
#' \dontrun{
#' # Load igraph
#' library(igraph)
#'
#' # Load data
#' ## Original data from http://results.ref.ac.uk/DownloadSubmissions/ByUoa/21
#' data('SchoolsJournals')
#'
#' # Convert to igraph
#' SchoolsJournals <- graph.data.frame(SchoolsJournals, directed = FALSE)
#'
#' # Remove duplicate edges
#' SchoolsJournals <- simplify(SchoolsJournals)
#'
#' # Find group membership
#' wt <- cluster_walktrap(SchoolsJournals, steps = 6)
#' members <- membership(wt)
#'
#' # Convert igraph to list for networkD3
#' sj_list <- igraph_to_networkD3(SchoolsJournals, group = members)
#'
#' # Plot as a forceDirected Network
#' forceNetwork(Links = sj_list$links, Nodes = sj_list$nodes, Source = 'source',
#' Target = 'target', NodeID = 'name', Group = 'group',
#' zoom = TRUE, linkDistance = 200)
#' }
#'
#' @importFrom igraph V as_data_frame graph.data.frame simplify cluster_walktrap membership
#' @importFrom magrittr %>%
#' @export
igraph_to_networkD3 <- function(g, group, what = 'both') {
# Sanity check
if (!('igraph' %in% class(g))) stop('g must be an igraph class object.',
call. = FALSE)
if (!(what %in% c('both', 'links', 'nodes'))) stop('what must be either "nodes", "links", or "both".',
call. = FALSE)
# Extract vertices (nodes)
temp_nodes <- V(g) %>% as.matrix %>% data.frame
temp_nodes$name <- row.names(temp_nodes)
names(temp_nodes) <- c('id', 'name')
# Convert to base 0 (for JavaScript)
temp_nodes$id <- temp_nodes$id - 1
# Nodes for output
nodes <- temp_nodes$name %>% data.frame %>% setNames('name')
# Include grouping variable if applicable
if (!missing(group)) {
group <- as.matrix(group)
if (nrow(nodes) != nrow(group)) stop('group must have the same number of rows as the number of nodes in g.',
call. = FALSE)
nodes <- cbind(nodes, group)
}
row.names(nodes) <- NULL
# Convert links from names to numbers
links <- as_data_frame(g, what = 'edges')
links <- merge(links, temp_nodes, by.x = 'from', by.y = 'name')
links <- merge(links, temp_nodes, by.x = 'to', by.y = 'name')
links <- links[, c('id.x', 'id.y')] %>% setNames(c('source', 'target'))
# Output requested object
if (what == 'both') {
return(list(links = links, nodes = nodes))
}
else if (what == 'links') {
return(links)
}
else if (what == 'nodes') {
return(nodes)
}
} | /R/utils.R | no_license | cashoes/networkD3 | R | false | false | 6,371 | r | #' Create character strings that will be evaluated as JavaScript
#'
#' @param ... character string to evaluate
#'
#' @source A direct import of \code{JS} from Ramnath Vaidyanathan, Yihui Xie,
#' JJ Allaire, Joe Cheng and Kenton Russell (2015). \link{htmlwidgets}: HTML
#' Widgets for R. R package version 0.4.
#'
#' @export
JS <- function (...)
{
x <- c(...)
if (is.null(x))
return()
if (!is.character(x))
stop("The arguments for JS() must be a chraracter vector")
x <- paste(x, collapse = "\n")
structure(x, class = unique(c("JS_EVAL", oldClass(x))))
}
#' Internal function from Wei Luo to convert a data frame to a JSON array
#'
#' @param dtf a data frame object.
#' @source Function from:
#' \url{http://theweiluo.wordpress.com/2011/09/30/r-to-json-for-d3-js-and-protovis/}
#' @keywords internal
#' @noRd
toJSONarray <- function(dtf){
clnms <- colnames(dtf)
name.value <- function(i){
quote <- '';
if(class(dtf[, i])!='numeric' && class(dtf[, i])!='integer'){
quote <- '"';
}
paste('"', i, '" : ', quote, dtf[,i], quote, sep='')
}
objs <- apply(sapply(clnms, name.value), 1, function(x){paste(x,
collapse=', ')})
objs <- paste('{', objs, '}')
res <- paste('[', paste(objs, collapse=', '), ']')
return(res)
}
#' Read a text file into a single string
#'
#' @source Code taken directly from Ramnath Vaidyanathan's Slidify
#' \url{https://github.com/ramnathv/slidify}.
#' @param doc path to text document
#' @return string with document contents
#' @keywords internal
#' @noRd
read_file <- function(doc, ...){
paste(readLines(doc, ...), collapse = '\n')
}
#' Utility function to handle margins
#' @param margin an \code{integer}, a named \code{vector} of integers,
#' or a named \code{list} of integers specifying the margins
#' (top, right, bottom, and left)
#' in \code{px}/\code{pixels} for our htmlwidget. If only a single
#' \code{integer} is provided, then the value will be assumed to be
#' the \code{right} margin.
#' @return named \code{list} with top, right, bottom, left margins
#' @noRd
margin_handler <- function(margin){
# margin can be either a single value or a list with any of
# top, right, bottom, left
# if margin is a single value, then we will stick
# with the original behavior of networkD3 and use it for the right margin
if(!is.null(margin) && length(margin) == 1 && is.null(names(margin))){
margin <- list(
top = NULL,
right = margin,
bottom = NULL,
left = NULL
)
} else if(!is.null(margin)){
# if margin is a named vector then convert to list
if(!is.list(margin) && !is.null(names(margin))){
margin <- as.list(margin)
}
# if we are here then margin should be a list and
# we will use the values supplied with NULL as default
margin <- modifyList(
list(top = NULL, right = NULL, bottom = NULL, left = NULL),
margin
)
} else {
# if margin is null, then make it a list of nulls for each position
margin <- list(top = NULL, right = NULL, bottom = NULL, left = NULL)
}
}
#' Function to convert igraph graph to a list suitable for networkD3
#'
#' @param g an \code{igraph} class graph object
#' @param group an object that contains node group values, for example, those
#' created with igraph's \code{\link{membership}} function.
#' @param what a character string specifying what to return. If
#' \code{what = 'links'} or \code{what = 'nodes'} only the links or nodes are
#' returned as data frames, respectively. If \code{what = 'both'} then both
#' data frames will be return in a list.
#'
#' @return A list of link and node data frames or only the link or node data
#' frames.
#'
#' @examples
#' \dontrun{
#' # Load igraph
#' library(igraph)
#'
#' # Load data
#' ## Original data from http://results.ref.ac.uk/DownloadSubmissions/ByUoa/21
#' data('SchoolsJournals')
#'
#' # Convert to igraph
#' SchoolsJournals <- graph.data.frame(SchoolsJournals, directed = FALSE)
#'
#' # Remove duplicate edges
#' SchoolsJournals <- simplify(SchoolsJournals)
#'
#' # Find group membership
#' wt <- cluster_walktrap(SchoolsJournals, steps = 6)
#' members <- membership(wt)
#'
#' # Convert igraph to list for networkD3
#' sj_list <- igraph_to_networkD3(SchoolsJournals, group = members)
#'
#' # Plot as a forceDirected Network
#' forceNetwork(Links = sj_list$links, Nodes = sj_list$nodes, Source = 'source',
#' Target = 'target', NodeID = 'name', Group = 'group',
#' zoom = TRUE, linkDistance = 200)
#' }
#'
#' @importFrom igraph V as_data_frame graph.data.frame simplify cluster_walktrap membership
#' @importFrom magrittr %>%
#' @export
igraph_to_networkD3 <- function(g, group, what = 'both') {
# Sanity check
if (!('igraph' %in% class(g))) stop('g must be an igraph class object.',
call. = FALSE)
if (!(what %in% c('both', 'links', 'nodes'))) stop('what must be either "nodes", "links", or "both".',
call. = FALSE)
# Extract vertices (nodes)
temp_nodes <- V(g) %>% as.matrix %>% data.frame
temp_nodes$name <- row.names(temp_nodes)
names(temp_nodes) <- c('id', 'name')
# Convert to base 0 (for JavaScript)
temp_nodes$id <- temp_nodes$id - 1
# Nodes for output
nodes <- temp_nodes$name %>% data.frame %>% setNames('name')
# Include grouping variable if applicable
if (!missing(group)) {
group <- as.matrix(group)
if (nrow(nodes) != nrow(group)) stop('group must have the same number of rows as the number of nodes in g.',
call. = FALSE)
nodes <- cbind(nodes, group)
}
row.names(nodes) <- NULL
# Convert links from names to numbers
links <- as_data_frame(g, what = 'edges')
links <- merge(links, temp_nodes, by.x = 'from', by.y = 'name')
links <- merge(links, temp_nodes, by.x = 'to', by.y = 'name')
links <- links[, c('id.x', 'id.y')] %>% setNames(c('source', 'target'))
# Output requested object
if (what == 'both') {
return(list(links = links, nodes = nodes))
}
else if (what == 'links') {
return(links)
}
else if (what == 'nodes') {
return(nodes)
}
} |
sink("TradeShift.log", split = T)
## This code block is to install a particular version of H2O
# START
if ("package:h2o" %in% search()) { detach("package:h2o", unload=TRUE) }
if ("h2o" %in% rownames(installed.packages())) { remove.packages("h2o") }
install.packages("h2o", repos=(c("http://s3.amazonaws.com/h2o-release/h2o/master/1555/R", getOption("repos")))) #choose a build here
# END
# Fetch the latest nightly build using Jo-fai Chow's package
#devtools::install_github("woobe/deepr")
#deepr::install_h2o()
library(h2o)
library(stringr)
## Connect to H2O server (On server(s), run 'java -Xmx8g -ea -jar h2o.jar -port 53322 -name TradeShift' first)
## Go to http://server:53322/ to check Jobs/Data/Models etc.
#h2oServer <- h2o.init(ip="server", port = 53322)
h2o.shutdown(h2oServer)
## Launch H2O directly on localhost, go to http://localhost:54321/ to check Jobs/Data/Models etc.!
h2oServer <- h2o.init(nthreads = -1, max_mem_size = '8g')
## Import data
path_train <- "/Users/arno/kaggle_tradeshift/data/train.csv"
path_trainLabels <- "/Users/arno/kaggle_tradeshift/data/trainLabels.csv"
path_test <- "/Users/arno/kaggle_tradeshift/data/test.csv"
path_submission <- "/Users/arno/kaggle_tradeshift/data/sampleSubmission.csv"
train_hex <- h2o.importFile(h2oServer, path = path_train)
trainLabels_hex <- h2o.importFile(h2oServer, path = path_trainLabels)
test_hex <- h2o.importFile(h2oServer, path = path_test)
## Group variables
vars <- colnames(train_hex)
ID <- vars[1]
labels <- colnames(trainLabels_hex)
predictors <- vars[c(-1,-92)] #remove ID and one features with too many factors
targets <- labels[-1] #remove ID
## Settings (at least one of the following two settings has to be TRUE)
validate = T #whether to compute CV error on train/validation split (or n-fold), potentially with grid search
submitwithfulldata = T #whether to use full training dataset for submission (if FALSE, then the validation model(s) will make test set predictions)
ensemble_size <- 2 # more -> lower variance
seed0 = 1337
reproducible_mode = T # Set to TRUE if you want reproducible results, e.g. for final Kaggle submission if you think you'll win :) Note: will be slower for DL
## Scoring helpers
tLogLoss <- matrix(0, nrow = 1, ncol = length(targets))
vLogLoss <- matrix(0, nrow = 1, ncol = length(targets))
## Attach the labels to the training data
trainWL <- h2o.exec(h2oServer,expr=cbind(train_hex, trainLabels_hex))
trainWL <- h2o.assign(trainWL, "trainWL")
h2o.rm(h2oServer, keys = c("train.hex","trainLabels.hex")) #no longer need these two individually
h2o.rm(h2oServer, grep(pattern = "Last.value", x = h2o.ls(h2oServer)$Key, value = TRUE))
## Impute missing values based on group-by on targets
for (i in predictors) {
if (sum(is.na(trainWL[,i]))==0 || sum(is.na(trainWL[,i])) == nrow(trainWL)) next
h2o.impute(trainWL,i,method='mean',targets)
}
# Split the training data into train/valid (95%/5%)
## Want to keep train large enough to make a good submission if submitwithfulldata = F
splits <- h2o.splitFrame(trainWL, ratios = 0.95, shuffle=!reproducible_mode)
train <- splits[[1]]
valid <- splits[[2]]
## Main loop over targets
for (resp in 1:length(targets)) {
# always just predict class 0 for y_14 (is constant)
if (resp == 14) {
final_submission <- cbind(final_submission, as.data.frame(matrix(0, nrow = nrow(test_hex), ncol = 1)))
colnames(final_submission)[resp] <- targets[resp]
next
}
if (validate) {
cat("\n\nNow training and validating an ensemble model for", targets[resp], "...\n")
train_resp <- train[,targets[resp]]
valid_resp <- valid[,targets[resp]]
for (n in 1:ensemble_size) {
cat("\n\nBuilding ensemble validation model", n, "of", ensemble_size, "for", targets[resp], "...\n")
cvmodel <-
h2o.randomForest(x = predictors,
y = targets[resp],
data = train,
validation = valid,
classification = T,
#type = "BigData", ntree = 50, depth = 30, mtries = 20, nbins = 50, #ensemble_size 1: training LL: 0.002863313 validation LL: 0.009463341 LB: 0.094373
#type = "BigData", ntree = 100, depth = 30, mtries = 30, nbins = 100, #ensemble_size 1: training LL: 0.002892511 validation LL: 0.008592581
type = "fast", ntree = c(10,20), depth = c(5,10), mtries = 10, nbins = 10, #demo for grid search
seed = seed0 + resp*ensemble_size + n
)
model <- cvmodel@model[[1]] #If cv model is a grid search model
#model <- cvmodel #If cvmodel is not a grid search model
# use probabilities - clamp validation predictions for LogLoss computation
train_preds <- h2o.predict(model, train)[,3]
valid_preds <- h2o.predict(model, valid)[,3]
# compute LogLoss for this ensemble member, on training data
tpc <- train_preds
tpc <- h2o.exec(h2oServer,expr=ifelse(tpc > 1e-15, tpc, 1e-15))
tpc <- h2o.exec(h2oServer,expr=ifelse(tpc < 1-1e-15, tpc, 1-1e-15))
trainLL <- h2o.exec(h2oServer,expr=mean(-train_resp*log(tpc)-(1-train_resp)*log(1-tpc)))
cat("\nLogLoss of this ensemble member on training data:", trainLL)
# compute LogLoss for this ensemble member, on validation data
vpc <- valid_preds
vpc <- h2o.exec(h2oServer,expr=ifelse(vpc > 1e-15, vpc, 1e-15))
vpc <- h2o.exec(h2oServer,expr=ifelse(vpc < 1-1e-15, vpc, 1-1e-15))
validLL <- h2o.exec(h2oServer,expr=mean(-valid_resp*log(vpc)-(1-valid_resp)*log(1-vpc)))
cat("\nLogLoss of this ensemble member on validation data:", validLL)
if (!submitwithfulldata) {
test_preds <- h2o.predict(model, test_hex)[,3]
}
if (n == 1) {
valid_preds_ensemble <- valid_preds
train_preds_ensemble <- train_preds
if (!submitwithfulldata) {
test_preds_ensemble <- test_preds
}
} else {
valid_preds_ensemble <- valid_preds_ensemble + valid_preds
train_preds_ensemble <- train_preds_ensemble + train_preds
if (!submitwithfulldata) {
test_preds_ensemble <- test_preds_ensemble + test_preds
}
}
}
train_preds <- train_preds_ensemble/ensemble_size ##ensemble average of probabilities
valid_preds <- valid_preds_ensemble/ensemble_size ##ensemble average of probabilities
if (!submitwithfulldata) {
test_preds <- test_preds_ensemble/ensemble_size
}
## Compute LogLoss of ensemble
train_preds <- h2o.exec(h2oServer,expr=ifelse(train_preds > 1e-15, train_preds, 1e-15))
train_preds <- h2o.exec(h2oServer,expr=ifelse(train_preds < 1-1e-15, train_preds, 1-1e-15))
tLL <- h2o.exec(h2oServer,expr=mean(-train_resp*log(train_preds)-(1-train_resp)*log(1-train_preds)))
tLogLoss[resp] <- tLL
cat("\nLogLosses of ensemble on training data so far:", tLogLoss)
cat("\nMean LogLoss of ensemble on training data so far:", sum(tLogLoss)/resp)
valid_preds <- h2o.exec(h2oServer,expr=ifelse(valid_preds > 1e-15, valid_preds, 1e-15))
valid_preds <- h2o.exec(h2oServer,expr=ifelse(valid_preds < 1-1e-15, valid_preds, 1-1e-15))
vLL <- h2o.exec(h2oServer,expr=mean(-valid_resp*log(valid_preds)-(1-valid_resp)*log(1-valid_preds)))
vLogLoss[resp] <- vLL
cat("\nLogLosses of ensemble on validation data so far:", vLogLoss)
cat("\nMean LogLoss of ensemble on validation data so far:", sum(vLogLoss)/resp)
if (!submitwithfulldata) {
cat("\nMaking test set predictions with ensemble model on 95% of the data\n")
ensemble_average <- as.data.frame(test_preds) #bring ensemble average to R
colnames(ensemble_average)[1] <- targets[resp] #give it the right name
if (resp == 1) {
final_submission <- ensemble_average
} else {
final_submission <- cbind(final_submission, ensemble_average)
}
#print(head(final_submission))
}
}
if (submitwithfulldata) {
if (validate) {
cat("\n\nTaking parameters from validation run (or grid search winner) for", targets[resp], "...\n")
p <- cvmodel@model[[1]]@model$params #If cvmodel is a grid search model
#p <- cvmodel@model$params #If cvmodel is not a grid search model
}
else {
p = list(classification = T, type = "BigData", ntree=50, depth=30, mtries=20, nbins=50) #LB: 0.0093360
}
## Build an ensemble model on full training data - should perform better than the CV model above
for (n in 1:ensemble_size) {
cat("\n\nBuilding ensemble model", n, "of", ensemble_size, "for", targets[resp], "...\n")
model <-
h2o.randomForest(x = predictors,
y = targets[resp],
data = trainWL,
classification = T,
type = p$type,
ntree = p$ntree,
depth = p$depth,
mtries = p$mtries,
nbins = p$nbins,
seed = seed0 + resp*ensemble_size + n,
key = paste0(targets[resp], "_cv_ensemble_", n, "_of_", ensemble_size)
)
## Aggregate ensemble model predictions (probabilities)
test_preds <- h2o.predict(model, test_hex)[,3]
if (n == 1) {
test_preds_ensemble <- test_preds
} else {
test_preds_ensemble <- test_preds_ensemble + test_preds
}
}
test_preds <- test_preds_ensemble/ensemble_size #simple ensemble average
ensemble_average <- as.data.frame(test_preds) #bring ensemble average to R
colnames(ensemble_average)[1] <- targets[resp] #give it the right name
if (resp == 1) {
final_submission <- ensemble_average
} else {
final_submission <- cbind(final_submission, ensemble_average)
}
print(head(final_submission))
}
## Remove no longer needed old models and temporaries from K-V store to keep memory footprint low
ls_temp <- h2o.ls(h2oServer)
for (n_ls in 1:nrow(ls_temp)) {
if (str_detect(ls_temp[n_ls, 1], "DRF") || str_detect(ls_temp[n_ls, 1], "Last.value")) {
h2o.rm(h2oServer, keys = as.character(ls_temp[n_ls, 1]))
}
}
}
if (validate) {
cat("\nOverall training LogLosses = " , tLogLoss)
cat("\nOverall training LogLoss = " , mean(tLogLoss))
cat("\nOverall validation LogLosses = " , vLogLoss)
cat("\nOverall validation LogLoss = " , mean(vLogLoss))
cat("\n")
}
print(summary(final_submission))
submission <- read.csv(path_submission)
#reshape predictions into 1D
fs <- t(as.matrix(final_submission))
dim(fs) <- c(prod(dim(fs)),1)
submission[,2] <- fs #replace 0s with actual predictions
write.csv(submission, file = "./submission.csv", quote = F, row.names = F)
sink() | /h2o/TradeShift.R | no_license | mntw/pkdd-15-predict-taxi-service-trajectory-i | R | false | false | 10,793 | r | sink("TradeShift.log", split = T)
## This code block is to install a particular version of H2O
# START
if ("package:h2o" %in% search()) { detach("package:h2o", unload=TRUE) }
if ("h2o" %in% rownames(installed.packages())) { remove.packages("h2o") }
install.packages("h2o", repos=(c("http://s3.amazonaws.com/h2o-release/h2o/master/1555/R", getOption("repos")))) #choose a build here
# END
# Fetch the latest nightly build using Jo-fai Chow's package
#devtools::install_github("woobe/deepr")
#deepr::install_h2o()
library(h2o)
library(stringr)
## Connect to H2O server (On server(s), run 'java -Xmx8g -ea -jar h2o.jar -port 53322 -name TradeShift' first)
## Go to http://server:53322/ to check Jobs/Data/Models etc.
#h2oServer <- h2o.init(ip="server", port = 53322)
h2o.shutdown(h2oServer)
## Launch H2O directly on localhost, go to http://localhost:54321/ to check Jobs/Data/Models etc.!
h2oServer <- h2o.init(nthreads = -1, max_mem_size = '8g')
## Import data
path_train <- "/Users/arno/kaggle_tradeshift/data/train.csv"
path_trainLabels <- "/Users/arno/kaggle_tradeshift/data/trainLabels.csv"
path_test <- "/Users/arno/kaggle_tradeshift/data/test.csv"
path_submission <- "/Users/arno/kaggle_tradeshift/data/sampleSubmission.csv"
train_hex <- h2o.importFile(h2oServer, path = path_train)
trainLabels_hex <- h2o.importFile(h2oServer, path = path_trainLabels)
test_hex <- h2o.importFile(h2oServer, path = path_test)
## Group variables
vars <- colnames(train_hex)
ID <- vars[1]
labels <- colnames(trainLabels_hex)
predictors <- vars[c(-1,-92)] #remove ID and one features with too many factors
targets <- labels[-1] #remove ID
## Settings (at least one of the following two settings has to be TRUE)
validate = T #whether to compute CV error on train/validation split (or n-fold), potentially with grid search
submitwithfulldata = T #whether to use full training dataset for submission (if FALSE, then the validation model(s) will make test set predictions)
ensemble_size <- 2 # more -> lower variance
seed0 = 1337
reproducible_mode = T # Set to TRUE if you want reproducible results, e.g. for final Kaggle submission if you think you'll win :) Note: will be slower for DL
## Scoring helpers
tLogLoss <- matrix(0, nrow = 1, ncol = length(targets))
vLogLoss <- matrix(0, nrow = 1, ncol = length(targets))
## Attach the labels to the training data
trainWL <- h2o.exec(h2oServer,expr=cbind(train_hex, trainLabels_hex))
trainWL <- h2o.assign(trainWL, "trainWL")
h2o.rm(h2oServer, keys = c("train.hex","trainLabels.hex")) #no longer need these two individually
h2o.rm(h2oServer, grep(pattern = "Last.value", x = h2o.ls(h2oServer)$Key, value = TRUE))
## Impute missing values based on group-by on targets
for (i in predictors) {
if (sum(is.na(trainWL[,i]))==0 || sum(is.na(trainWL[,i])) == nrow(trainWL)) next
h2o.impute(trainWL,i,method='mean',targets)
}
# Split the training data into train/valid (95%/5%)
## Want to keep train large enough to make a good submission if submitwithfulldata = F
splits <- h2o.splitFrame(trainWL, ratios = 0.95, shuffle=!reproducible_mode)
train <- splits[[1]]
valid <- splits[[2]]
## Main loop over targets
for (resp in 1:length(targets)) {
# always just predict class 0 for y_14 (is constant)
if (resp == 14) {
final_submission <- cbind(final_submission, as.data.frame(matrix(0, nrow = nrow(test_hex), ncol = 1)))
colnames(final_submission)[resp] <- targets[resp]
next
}
if (validate) {
cat("\n\nNow training and validating an ensemble model for", targets[resp], "...\n")
train_resp <- train[,targets[resp]]
valid_resp <- valid[,targets[resp]]
for (n in 1:ensemble_size) {
cat("\n\nBuilding ensemble validation model", n, "of", ensemble_size, "for", targets[resp], "...\n")
cvmodel <-
h2o.randomForest(x = predictors,
y = targets[resp],
data = train,
validation = valid,
classification = T,
#type = "BigData", ntree = 50, depth = 30, mtries = 20, nbins = 50, #ensemble_size 1: training LL: 0.002863313 validation LL: 0.009463341 LB: 0.094373
#type = "BigData", ntree = 100, depth = 30, mtries = 30, nbins = 100, #ensemble_size 1: training LL: 0.002892511 validation LL: 0.008592581
type = "fast", ntree = c(10,20), depth = c(5,10), mtries = 10, nbins = 10, #demo for grid search
seed = seed0 + resp*ensemble_size + n
)
model <- cvmodel@model[[1]] #If cv model is a grid search model
#model <- cvmodel #If cvmodel is not a grid search model
# use probabilities - clamp validation predictions for LogLoss computation
train_preds <- h2o.predict(model, train)[,3]
valid_preds <- h2o.predict(model, valid)[,3]
# compute LogLoss for this ensemble member, on training data
tpc <- train_preds
tpc <- h2o.exec(h2oServer,expr=ifelse(tpc > 1e-15, tpc, 1e-15))
tpc <- h2o.exec(h2oServer,expr=ifelse(tpc < 1-1e-15, tpc, 1-1e-15))
trainLL <- h2o.exec(h2oServer,expr=mean(-train_resp*log(tpc)-(1-train_resp)*log(1-tpc)))
cat("\nLogLoss of this ensemble member on training data:", trainLL)
# compute LogLoss for this ensemble member, on validation data
vpc <- valid_preds
vpc <- h2o.exec(h2oServer,expr=ifelse(vpc > 1e-15, vpc, 1e-15))
vpc <- h2o.exec(h2oServer,expr=ifelse(vpc < 1-1e-15, vpc, 1-1e-15))
validLL <- h2o.exec(h2oServer,expr=mean(-valid_resp*log(vpc)-(1-valid_resp)*log(1-vpc)))
cat("\nLogLoss of this ensemble member on validation data:", validLL)
if (!submitwithfulldata) {
test_preds <- h2o.predict(model, test_hex)[,3]
}
if (n == 1) {
valid_preds_ensemble <- valid_preds
train_preds_ensemble <- train_preds
if (!submitwithfulldata) {
test_preds_ensemble <- test_preds
}
} else {
valid_preds_ensemble <- valid_preds_ensemble + valid_preds
train_preds_ensemble <- train_preds_ensemble + train_preds
if (!submitwithfulldata) {
test_preds_ensemble <- test_preds_ensemble + test_preds
}
}
}
train_preds <- train_preds_ensemble/ensemble_size ##ensemble average of probabilities
valid_preds <- valid_preds_ensemble/ensemble_size ##ensemble average of probabilities
if (!submitwithfulldata) {
test_preds <- test_preds_ensemble/ensemble_size
}
## Compute LogLoss of ensemble
train_preds <- h2o.exec(h2oServer,expr=ifelse(train_preds > 1e-15, train_preds, 1e-15))
train_preds <- h2o.exec(h2oServer,expr=ifelse(train_preds < 1-1e-15, train_preds, 1-1e-15))
tLL <- h2o.exec(h2oServer,expr=mean(-train_resp*log(train_preds)-(1-train_resp)*log(1-train_preds)))
tLogLoss[resp] <- tLL
cat("\nLogLosses of ensemble on training data so far:", tLogLoss)
cat("\nMean LogLoss of ensemble on training data so far:", sum(tLogLoss)/resp)
valid_preds <- h2o.exec(h2oServer,expr=ifelse(valid_preds > 1e-15, valid_preds, 1e-15))
valid_preds <- h2o.exec(h2oServer,expr=ifelse(valid_preds < 1-1e-15, valid_preds, 1-1e-15))
vLL <- h2o.exec(h2oServer,expr=mean(-valid_resp*log(valid_preds)-(1-valid_resp)*log(1-valid_preds)))
vLogLoss[resp] <- vLL
cat("\nLogLosses of ensemble on validation data so far:", vLogLoss)
cat("\nMean LogLoss of ensemble on validation data so far:", sum(vLogLoss)/resp)
if (!submitwithfulldata) {
cat("\nMaking test set predictions with ensemble model on 95% of the data\n")
ensemble_average <- as.data.frame(test_preds) #bring ensemble average to R
colnames(ensemble_average)[1] <- targets[resp] #give it the right name
if (resp == 1) {
final_submission <- ensemble_average
} else {
final_submission <- cbind(final_submission, ensemble_average)
}
#print(head(final_submission))
}
}
if (submitwithfulldata) {
if (validate) {
cat("\n\nTaking parameters from validation run (or grid search winner) for", targets[resp], "...\n")
p <- cvmodel@model[[1]]@model$params #If cvmodel is a grid search model
#p <- cvmodel@model$params #If cvmodel is not a grid search model
}
else {
p = list(classification = T, type = "BigData", ntree=50, depth=30, mtries=20, nbins=50) #LB: 0.0093360
}
## Build an ensemble model on full training data - should perform better than the CV model above
for (n in 1:ensemble_size) {
cat("\n\nBuilding ensemble model", n, "of", ensemble_size, "for", targets[resp], "...\n")
model <-
h2o.randomForest(x = predictors,
y = targets[resp],
data = trainWL,
classification = T,
type = p$type,
ntree = p$ntree,
depth = p$depth,
mtries = p$mtries,
nbins = p$nbins,
seed = seed0 + resp*ensemble_size + n,
key = paste0(targets[resp], "_cv_ensemble_", n, "_of_", ensemble_size)
)
## Aggregate ensemble model predictions (probabilities)
test_preds <- h2o.predict(model, test_hex)[,3]
if (n == 1) {
test_preds_ensemble <- test_preds
} else {
test_preds_ensemble <- test_preds_ensemble + test_preds
}
}
test_preds <- test_preds_ensemble/ensemble_size #simple ensemble average
ensemble_average <- as.data.frame(test_preds) #bring ensemble average to R
colnames(ensemble_average)[1] <- targets[resp] #give it the right name
if (resp == 1) {
final_submission <- ensemble_average
} else {
final_submission <- cbind(final_submission, ensemble_average)
}
print(head(final_submission))
}
## Remove no longer needed old models and temporaries from K-V store to keep memory footprint low
ls_temp <- h2o.ls(h2oServer)
for (n_ls in 1:nrow(ls_temp)) {
if (str_detect(ls_temp[n_ls, 1], "DRF") || str_detect(ls_temp[n_ls, 1], "Last.value")) {
h2o.rm(h2oServer, keys = as.character(ls_temp[n_ls, 1]))
}
}
}
if (validate) {
cat("\nOverall training LogLosses = " , tLogLoss)
cat("\nOverall training LogLoss = " , mean(tLogLoss))
cat("\nOverall validation LogLosses = " , vLogLoss)
cat("\nOverall validation LogLoss = " , mean(vLogLoss))
cat("\n")
}
print(summary(final_submission))
submission <- read.csv(path_submission)
#reshape predictions into 1D
fs <- t(as.matrix(final_submission))
dim(fs) <- c(prod(dim(fs)),1)
submission[,2] <- fs #replace 0s with actual predictions
write.csv(submission, file = "./submission.csv", quote = F, row.names = F)
sink() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classPrior.R
\name{createPriorDensity}
\alias{createPriorDensity}
\title{Fits a density function to a multivariate sample}
\usage{
createPriorDensity(sampler, method = "multivariate", eps = 1e-10,
lower = NULL, upper = NULL, best = NULL, ...)
}
\arguments{
\item{sampler}{an object of class BayesianOutput or a matrix}
\item{method}{method to generate prior - default and currently only option is multivariate}
\item{eps}{numerical precision to avoid singularity}
\item{lower}{vector with lower bounds of parameter for the new prior, independent of the input sample}
\item{upper}{vector with upper bounds of parameter for the new prior, independent of the input sample}
\item{best}{vector with "best" values of parameter for the new prior, independent of the input sample}
\item{...}{parameters to pass on to the getSample function}
}
\description{
Fits a density function to a multivariate sample
}
\details{
This function fits a density estimator to a multivariate (typically a posterior) sample. The main purpose is to summarize a posterior sample as a pdf, in order to include it as a prior in a new analysis, for example when new data becomes available, or to calculate a fractional Bayes factor (see \code{\link{marginalLikelihood}}).
The limitation of this function is that we currently only implement a multivariate normal density estimator, so you will have a loss of information if your posterior is not approximately multivariate normal, which is likely the case if you have weak data. Extending the function to include more flexible density estimators (e.g. gaussian processes) is on our todo list, but it's quite tricky to get this stable, so I'm not sure when we will have this working. In general, creating reliable empirical density estimates in high-dimensional parameter spaces is extremely tricky, regardless of the software you are using.
For that reason, it is usually recommended to not update the posterior with this option, but rather:
1. If the full dataset is available, to make a single, or infrequent updates, recompute the entire model with the full / updated data
2. For frequent updates, consider using SMC instead of MCMC sampling. SMC sampling doesn't require an analytical summary of the posterior.
}
\examples{
# Create a BayesianSetup
ll <- generateTestDensityMultiNormal(sigma = "no correlation")
bayesianSetup = createBayesianSetup(likelihood = ll,
lower = rep(-10, 3),
upper = rep(10, 3))
settings = list(iterations = 1000)
out <- runMCMC(bayesianSetup = bayesianSetup, settings = settings)
newPrior = createPriorDensity(out, method = "multivariate",
eps = 1e-10, lower = rep(-10, 3),
upper = rep(10, 3), best = NULL)
bayesianSetup <- createBayesianSetup(likelihood = ll, prior = newPrior)
\dontrun{
settings = list(iterations = 1000)
out <- runMCMC(bayesianSetup = bayesianSetup, settings = settings)
}
}
\seealso{
\code{\link{createPrior}} \cr
\code{\link{createBetaPrior}} \cr
\code{\link{createTruncatedNormalPrior}} \cr
\code{\link{createUniformPrior}} \cr
\code{\link{createBayesianSetup}} \cr
}
\author{
Florian Hartig
}
| /BayesianTools/man/createPriorDensity.Rd | no_license | akhikolla/ClusterTests | R | false | true | 3,342 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classPrior.R
\name{createPriorDensity}
\alias{createPriorDensity}
\title{Fits a density function to a multivariate sample}
\usage{
createPriorDensity(sampler, method = "multivariate", eps = 1e-10,
lower = NULL, upper = NULL, best = NULL, ...)
}
\arguments{
\item{sampler}{an object of class BayesianOutput or a matrix}
\item{method}{method to generate prior - default and currently only option is multivariate}
\item{eps}{numerical precision to avoid singularity}
\item{lower}{vector with lower bounds of parameter for the new prior, independent of the input sample}
\item{upper}{vector with upper bounds of parameter for the new prior, independent of the input sample}
\item{best}{vector with "best" values of parameter for the new prior, independent of the input sample}
\item{...}{parameters to pass on to the getSample function}
}
\description{
Fits a density function to a multivariate sample
}
\details{
This function fits a density estimator to a multivariate (typically a posterior) sample. The main purpose is to summarize a posterior sample as a pdf, in order to include it as a prior in a new analysis, for example when new data becomes available, or to calculate a fractional Bayes factor (see \code{\link{marginalLikelihood}}).
The limitation of this function is that we currently only implement a multivariate normal density estimator, so you will have a loss of information if your posterior is not approximately multivariate normal, which is likely the case if you have weak data. Extending the function to include more flexible density estimators (e.g. gaussian processes) is on our todo list, but it's quite tricky to get this stable, so I'm not sure when we will have this working. In general, creating reliable empirical density estimates in high-dimensional parameter spaces is extremely tricky, regardless of the software you are using.
For that reason, it is usually recommended to not update the posterior with this option, but rather:
1. If the full dataset is available, to make a single, or infrequent updates, recompute the entire model with the full / updated data
2. For frequent updates, consider using SMC instead of MCMC sampling. SMC sampling doesn't require an analytical summary of the posterior.
}
\examples{
# Create a BayesianSetup
ll <- generateTestDensityMultiNormal(sigma = "no correlation")
bayesianSetup = createBayesianSetup(likelihood = ll,
lower = rep(-10, 3),
upper = rep(10, 3))
settings = list(iterations = 1000)
out <- runMCMC(bayesianSetup = bayesianSetup, settings = settings)
newPrior = createPriorDensity(out, method = "multivariate",
eps = 1e-10, lower = rep(-10, 3),
upper = rep(10, 3), best = NULL)
bayesianSetup <- createBayesianSetup(likelihood = ll, prior = newPrior)
\dontrun{
settings = list(iterations = 1000)
out <- runMCMC(bayesianSetup = bayesianSetup, settings = settings)
}
}
\seealso{
\code{\link{createPrior}} \cr
\code{\link{createBetaPrior}} \cr
\code{\link{createTruncatedNormalPrior}} \cr
\code{\link{createUniformPrior}} \cr
\code{\link{createBayesianSetup}} \cr
}
\author{
Florian Hartig
}
|
indDF <- data.frame (id=1:2, strategy=NA, num_wins=0)
indDF
choose_Strategy <- function(ind){
strats <- sample(x=1:3, size=nrow(ind))
ind$strategy <- strats
return(ind)
}
##1=Paper, 2=Scissors, 3=Rock
playStrategy <- function(ind){
if (ind$strategy[1] == ind$strategy[2]) {
# same strategies, it's a tie
} else {
if (any(ind$strategy == 3) && any(ind$strategy == 1)) {
# this is the special case of rock over paper
# figure out who of the two IS the winner
winner_id <- ind[ind$strategy == 1, "id"]
ind <- increment_winner(ind = ind, winner_id = winner_id)
} else {
# find index for higher strategy
# index also happens to be winner_id as per above
winner_id <- which(ind[, "strategy"] == max(ind[, "strategy"]))
ind <- increment_winner(ind = ind, winner_id = winner_id)
}
}
return(ind)
}
increment_winner <- function(ind, winner_id) {
ind[winner_id, "num_wins"] <- ind[winner_id, "num_wins"] + 1
return(ind)
}
for (i in 1:1000) {
indDF <- choose_Strategy(indDF)
indDF <- playStrategy(indDF)
i <- i + 1
};indDF
setup <- function(){
return(data.frame(id=1:2, strategy=NA, num_wins=0))
}
###setup function for setting multiple samples
rounds <- 1000
indDF <- setup()
dat <- matrix(NA, rounds, 2)
for (i in 1:rounds) {
indDF <- choose_Strategy(indDF)
indDF <- playStrategy(indDF)
dat[i,] <- indDF$num_wins
i <- i + 1
}
plot(dat[,1], type="l", col="#EA3E49", lwd=3, xlab = "time", ylab = "number of rounds won")
lines(dat[,2], col="#77C4D3", lwd=3)
##A player who switches his strategy and a player who uses always the same strategy.
#Who would win?
choose_Strategy2 <- function(ind){
strats <- sample(x = 1:3, size = 1)
ind$strategy[1] <- strats
return(ind)
}
###################################
##My own try on this modell
exhDF <- data.frame (
id = c("Mark", "Maria"),
strategy=NA,
num_wins=0)
exhDF
###not use strings for
choose_Strategy3 <- function(exh){
strats <- sample(x=1:3, size=nrow(exh))
exh$strategy <- strats
return(exh)
}
playStrategy3 <- function(exh){
if(any(exh$strategy == 2) && any(exh$strategy == 1)){
tmp <- exh[exh$strategy == 2, "id"]
exh[tmp, "num_wins"] <- exh[tmp, "num_wins"] + 1
}
if(any(exh$strategy == 3) && any(exh$strategy == 2)){
tmp <- exh[exh$strategy == 3, "id"]
exh[tmp, "num_wins"] <- exh[tmp, "num_wins"] + 1
}
if(any(exh$strategy == 3) && any(exh$strategy == 1)) {
tmp <- exh[exh$strategy == 1, "id"]
exh[tmp, "num_wins"] <- exh[tmp, "num_wins"] + 1
}
else {}
return(exh)
}
for (i in 1:100) {
exhDF <- choose_Strategy3(exhDF)
exhDF <- playStrategy3(exhDF)
i <- i + 1
};exhDF
setup <- function(){
return(data.frame(
id = c("Mark", "Maria")),
strategy=NA,
num_wins=0)
}
rounds <- 100
exhDF <- setup()
dat <- matrix(NA, rounds, 2)
for (i in 1:rounds) {
exhDF <- choose_Strategy3(exhDF)
exhDF <- playStrategy3(exhDF)
dat[i,] <- exhDF$num_wins
i <- i + 1
}
plot(dat[,1], type="l", col="violet", lwd=3, xlab = "time", ylab = "number of rounds won")
lines(dat[,2], col="yellow", lwd=3)
###############
#Head or Tail
#set.seed(seed) generates random numbers
coin = c("Head", "Tail")
set.seed(100) #to make results reproducible
y = sample(coin, 6, replace = TRUE)
length(y[y=="Tail"])
replicate(100, sample(coin, 6, replace = TRUE ))
a = replicate(100, length(sample(coin, 6, replace = TRUE)[y == "Tail"]))
mean(a)
#####not needed, was for axelrod modell
if p >=0.2 {
changed_feature <- sample(which(!overlap), size = 1)
a[site["y_cor"], site["x_cor"], changed_feature] <- a[the_neighbor[[1]]["y_cor"], the_neighbor[[1]]["x_cor"], changed_feature]
}
}
| /criminological_modelling.R | no_license | Elen93/Models | R | false | false | 3,705 | r | indDF <- data.frame (id=1:2, strategy=NA, num_wins=0)
indDF
choose_Strategy <- function(ind){
strats <- sample(x=1:3, size=nrow(ind))
ind$strategy <- strats
return(ind)
}
##1=Paper, 2=Scissors, 3=Rock
playStrategy <- function(ind){
if (ind$strategy[1] == ind$strategy[2]) {
# same strategies, it's a tie
} else {
if (any(ind$strategy == 3) && any(ind$strategy == 1)) {
# this is the special case of rock over paper
# figure out who of the two IS the winner
winner_id <- ind[ind$strategy == 1, "id"]
ind <- increment_winner(ind = ind, winner_id = winner_id)
} else {
# find index for higher strategy
# index also happens to be winner_id as per above
winner_id <- which(ind[, "strategy"] == max(ind[, "strategy"]))
ind <- increment_winner(ind = ind, winner_id = winner_id)
}
}
return(ind)
}
increment_winner <- function(ind, winner_id) {
ind[winner_id, "num_wins"] <- ind[winner_id, "num_wins"] + 1
return(ind)
}
for (i in 1:1000) {
indDF <- choose_Strategy(indDF)
indDF <- playStrategy(indDF)
i <- i + 1
};indDF
setup <- function(){
return(data.frame(id=1:2, strategy=NA, num_wins=0))
}
###setup function for setting multiple samples
rounds <- 1000
indDF <- setup()
dat <- matrix(NA, rounds, 2)
for (i in 1:rounds) {
indDF <- choose_Strategy(indDF)
indDF <- playStrategy(indDF)
dat[i,] <- indDF$num_wins
i <- i + 1
}
plot(dat[,1], type="l", col="#EA3E49", lwd=3, xlab = "time", ylab = "number of rounds won")
lines(dat[,2], col="#77C4D3", lwd=3)
##A player who switches his strategy and a player who uses always the same strategy.
#Who would win?
choose_Strategy2 <- function(ind){
strats <- sample(x = 1:3, size = 1)
ind$strategy[1] <- strats
return(ind)
}
###################################
##My own try on this modell
exhDF <- data.frame (
id = c("Mark", "Maria"),
strategy=NA,
num_wins=0)
exhDF
###not use strings for
choose_Strategy3 <- function(exh){
strats <- sample(x=1:3, size=nrow(exh))
exh$strategy <- strats
return(exh)
}
playStrategy3 <- function(exh){
if(any(exh$strategy == 2) && any(exh$strategy == 1)){
tmp <- exh[exh$strategy == 2, "id"]
exh[tmp, "num_wins"] <- exh[tmp, "num_wins"] + 1
}
if(any(exh$strategy == 3) && any(exh$strategy == 2)){
tmp <- exh[exh$strategy == 3, "id"]
exh[tmp, "num_wins"] <- exh[tmp, "num_wins"] + 1
}
if(any(exh$strategy == 3) && any(exh$strategy == 1)) {
tmp <- exh[exh$strategy == 1, "id"]
exh[tmp, "num_wins"] <- exh[tmp, "num_wins"] + 1
}
else {}
return(exh)
}
for (i in 1:100) {
exhDF <- choose_Strategy3(exhDF)
exhDF <- playStrategy3(exhDF)
i <- i + 1
};exhDF
setup <- function(){
return(data.frame(
id = c("Mark", "Maria")),
strategy=NA,
num_wins=0)
}
rounds <- 100
exhDF <- setup()
dat <- matrix(NA, rounds, 2)
for (i in 1:rounds) {
exhDF <- choose_Strategy3(exhDF)
exhDF <- playStrategy3(exhDF)
dat[i,] <- exhDF$num_wins
i <- i + 1
}
plot(dat[,1], type="l", col="violet", lwd=3, xlab = "time", ylab = "number of rounds won")
lines(dat[,2], col="yellow", lwd=3)
###############
#Head or Tail
#set.seed(seed) generates random numbers
coin = c("Head", "Tail")
set.seed(100) #to make results reproducible
y = sample(coin, 6, replace = TRUE)
length(y[y=="Tail"])
replicate(100, sample(coin, 6, replace = TRUE ))
a = replicate(100, length(sample(coin, 6, replace = TRUE)[y == "Tail"]))
mean(a)
#####not needed, was for axelrod modell
if p >=0.2 {
changed_feature <- sample(which(!overlap), size = 1)
a[site["y_cor"], site["x_cor"], changed_feature] <- a[the_neighbor[[1]]["y_cor"], the_neighbor[[1]]["x_cor"], changed_feature]
}
}
|
#' do PCA
#' allow_single_column - Do not throw error and go ahead with PCA even if only one column is left after preprocessing. For K-means.
#' @export
do_prcomp <- function(df, ..., normalize_data=TRUE, max_nrow = NULL, allow_single_column = FALSE, seed = 1, na.rm = TRUE) {
# this evaluates select arguments like starts_with
selected_cols <- tidyselect::vars_select(names(df), !!! rlang::quos(...))
grouped_cols <- grouped_by(df)
# remove grouped col or target col
selected_cols <- setdiff(selected_cols, grouped_cols)
if (any(selected_cols %in% grouped_cols)) {
stop("Repeat-By column cannot be used as a variable column.")
}
# list and difftime etc. causes error in tidy_rowwise(model, type="biplot").
# For now, we are removing them upfront.
df <- df %>% dplyr::select(-where(is.list),
-where(lubridate::is.difftime),
-where(lubridate::is.duration),
-where(lubridate::is.interval),
-where(lubridate::is.period))
if(!is.null(seed)) { # Set seed before starting to call sample_n.
set.seed(seed)
}
each_func <- function(df) {
# sample the data for quicker turn around on UI,
# if data size is larger than specified max_nrow.
sampled_nrow <- NULL
if (!is.null(max_nrow) && nrow(df) > max_nrow) {
# Record that sampling happened.
sampled_nrow <- max_nrow
df <- df %>% sample_rows(max_nrow)
}
# As the name suggests, this preprocessing function was originally designed to be done
# before sampling, but we found that for this PCA function, that makes the
# process as a whole slower in the cases we tried. So, we are doing this after sampling.
if (na.rm) { # Do NA preprocessing under this if statement, so that it can be skipped if it is already done. For exp_kmeans.
filtered_df <- preprocess_factanal_data_before_sample(df, selected_cols)
selected_cols <- attr(filtered_df, 'predictors') # predictors are updated (removed) in preprocess_factanal_data_before_sample. Sync with it.
}
else {
filtered_df <- df
}
# select_ was not able to handle space in target_col. let's do it in base R way.
cleaned_df <- filtered_df[,colnames(filtered_df) %in% selected_cols, drop=FALSE]
# remove columns with only one unique value
cols_copy <- colnames(cleaned_df)
for (col in cols_copy) {
unique_val <- unique(cleaned_df[[col]])
if (length(unique_val) == 1) {
cleaned_df <- cleaned_df[colnames(cleaned_df) != col]
}
}
if (allow_single_column) { # This is when exp_kmeans calling this function wants to go ahead even with single column.
min_ncol <- 1
}
else {
min_ncol <- 2
}
if (length(colnames(cleaned_df)) < min_ncol) {
if (length(grouped_cols) < 1) {
# If without group_by, throw error to display message.
stop("There are not enough columns after removing the columns with only NA or a single value.")
}
else {
# skip this group if less than 2 column is left. (We can't handle single column for now.)
return(NULL)
}
}
# "scale." is an argument name. There is no such operator like ".=".
fit <- prcomp(cleaned_df, scale.=normalize_data)
fit$df <- filtered_df # add filtered df to model so that we can bind_col it for output. It needs to be the filtered one to match row number.
fit$grouped_cols <- grouped_cols
fit$sampled_nrow <- sampled_nrow
class(fit) <- c("prcomp_exploratory", class(fit))
fit
}
do_on_each_group(df, each_func, name = "model", with_unnest = FALSE)
}
#' extracts results from prcomp as a dataframe
#' @export
#' @param n_sample Sample number for biplot. Default 5000, which is the default of our scatter plot.
#' we use it for gathered_data for parallel coordinates too. sampling is applied before gather.
tidy.prcomp_exploratory <- function(x, type="variances", n_sample=NULL, pretty.name=FALSE, normalize_data=FALSE, ...) {
if (type == "variances") {
res <- as.data.frame(x$sdev*x$sdev) # square it to make it variance
colnames(res)[1] <- "variance"
res <- tibble::rownames_to_column(res, var="component") %>% # square it to make it variance
mutate(component = forcats::fct_inorder(component)) # fct_inorder is to make order on chart right, e.g. PC2 before PC10
total_variance = sum(res$variance)
res <- res %>% dplyr::mutate(cum_pct_variance = cumsum(variance), cum_pct_variance = cum_pct_variance/total_variance*100)
res <- res %>% dplyr::mutate(pct_variance = variance/total_variance*100)
if (pretty.name) {
res <- res %>% dplyr::rename(`% Variance`=pct_variance, `Cummulated % Variance`=cum_pct_variance)
}
}
else if (type == "loadings") {
res <- tibble::rownames_to_column(as.data.frame(x$rotation[,]), var="measure")
res <- res %>% tidyr::gather(component, value, dplyr::starts_with("PC"), na.rm = TRUE, convert = TRUE)
res <- res %>% dplyr::mutate(component = forcats::fct_inorder(component)) # fct_inorder is to make order on chart right, e.g. PC2 before PC10
res <- res %>% dplyr::mutate(value = value^2) # square it to make it squared cosine. the original value is cosine.
}
else if (type == "biplot") {
# prepare loadings matrix
loadings_matrix <- x$rotation[,1:2] # keep only PC1 and PC2 for biplot
# prepare scores matrix
scores_matrix <- x$x[,1:2] # keep only PC1 and PC2 for biplot
if (is.null(n_sample)) { # set default of 5000 for biplot case.
n_sample = 5000
}
# sum of number of loading rows times 2 (because it is line between 2 points) and number of score rows should fit in n_sample.
score_n_sample <- n_sample - nrow(loadings_matrix)*2
# table of observations. bind original data so that color can be used later.
res <- x$df
orig_cols <- colnames(res)
for (orig_col in orig_cols) {
if (!is.numeric(res[[orig_col]])) {
if (!is.logical(res[[orig_col]])) {
# make categorical columns into factor with NA level, so that legend will show NA.
# if we leave them as real NA, legend for NA would not be shown on biplot chart,
# since we supress it not to show NAs from the lines for measures.
res[[orig_col]] <- forcats::fct_explicit_na(as.factor(res[[orig_col]]), na_level="(NA)")
}
else {
# make logical columns into factor with NA level, so that legend will show NA.
res[[orig_col]] <- forcats::fct_explicit_na(factor(res[[orig_col]], levels = c("TRUE","FALSE")), na_level="(NA)")
}
}
}
res <- res %>% dplyr::bind_cols(as.data.frame(scores_matrix))
if (!is.null(x$kmeans)) { # add cluster column if with kmeans.
# res <- res %>% dplyr::mutate(cluster=factor(x$kmeans$cluster)) # this caused error when input had column x.
res$cluster <- factor(x$kmeans$cluster)
}
res <- res %>% sample_rows(score_n_sample)
# calculate scale ratio for displaying loadings on the same chart as scores.
max_abs_loading <- max(abs(loadings_matrix))
max_abs_score <- max(abs(c(res$PC1, res$PC2)))
scale_ratio <- max_abs_score/max_abs_loading
res <- res %>% rename(Observations=PC2) # name to appear at legend for dots in scatter plot.
# scale loading_matrix so that the scale of measures and data points matches in the scatter plot.
loadings_matrix <- loadings_matrix * scale_ratio
loadings_df <- tibble::rownames_to_column(as.data.frame(loadings_matrix), var="measure_name") #TODO: what if name conflicts?
loadings_df <- loadings_df %>% dplyr::rename(Measures=PC2) # use different column name for PC2 of measures.
loadings_df0 <- loadings_df %>% dplyr::mutate(PC1=0, Measures=0) # create df for origin of coordinates.
loadings_df <- loadings_df0 %>% dplyr::bind_rows(loadings_df)
res <- res %>% dplyr::bind_rows(loadings_df)
# fill group_by column so that Repeat By on chart works fine. loadings_df does not have values for the group_by column.
res <- res %>% tidyr::fill(x$grouped_cols)
res
}
else { # should be data or gathered_data
res <- x$df
if (!is.null(x$kmeans)) {
# res <- res %>% dplyr::mutate(cluster=factor(x$kmeans$cluster)) # this caused error when input had column x.
res$cluster <- factor(x$kmeans$cluster)
}
res <- res %>% dplyr::bind_cols(as.data.frame(x$x))
column_names <- attr(x$rotation, "dimname")[[1]]
if (normalize_data) {
res <- res %>% dplyr::mutate_at(column_names, exploratory::normalize)
}
if (!is.null(n_sample)) { # default is no sampling.
# limit n_sample so that no more dots are created than the max that can be plotted on scatter plot, which is 5000.
n_sample <- min(n_sample, floor(5000 / length(column_names)))
res <- res %>% sample_rows(n_sample)
}
if (type == "gathered_data") { # for boxplot and parallel coordinates. this is only when with kmeans.
# We used to drop columns other than cluster and ones used for clustering like this commented out line,
# to keep only the data we use, but since we are showing Subject Column value
# on parallel coordinates, we need to keep other columns, which would include Subject Column.
# res <- res %>% dplyr::select(!!c(column_names,"cluster"))
res <- res %>% dplyr::mutate(row_id=seq(n())) # row_id for line representation.
res <- res %>% tidyr::gather(key="key",value="value",!!column_names)
}
}
res
}
| /R/prcomp.R | permissive | jfontestad/exploratory_func | R | false | false | 9,578 | r | #' do PCA
#' allow_single_column - Do not throw error and go ahead with PCA even if only one column is left after preprocessing. For K-means.
#' @export
do_prcomp <- function(df, ..., normalize_data=TRUE, max_nrow = NULL, allow_single_column = FALSE, seed = 1, na.rm = TRUE) {
# this evaluates select arguments like starts_with
selected_cols <- tidyselect::vars_select(names(df), !!! rlang::quos(...))
grouped_cols <- grouped_by(df)
# remove grouped col or target col
selected_cols <- setdiff(selected_cols, grouped_cols)
if (any(selected_cols %in% grouped_cols)) {
stop("Repeat-By column cannot be used as a variable column.")
}
# list and difftime etc. causes error in tidy_rowwise(model, type="biplot").
# For now, we are removing them upfront.
df <- df %>% dplyr::select(-where(is.list),
-where(lubridate::is.difftime),
-where(lubridate::is.duration),
-where(lubridate::is.interval),
-where(lubridate::is.period))
if(!is.null(seed)) { # Set seed before starting to call sample_n.
set.seed(seed)
}
each_func <- function(df) {
# sample the data for quicker turn around on UI,
# if data size is larger than specified max_nrow.
sampled_nrow <- NULL
if (!is.null(max_nrow) && nrow(df) > max_nrow) {
# Record that sampling happened.
sampled_nrow <- max_nrow
df <- df %>% sample_rows(max_nrow)
}
# As the name suggests, this preprocessing function was originally designed to be done
# before sampling, but we found that for this PCA function, that makes the
# process as a whole slower in the cases we tried. So, we are doing this after sampling.
if (na.rm) { # Do NA preprocessing under this if statement, so that it can be skipped if it is already done. For exp_kmeans.
filtered_df <- preprocess_factanal_data_before_sample(df, selected_cols)
selected_cols <- attr(filtered_df, 'predictors') # predictors are updated (removed) in preprocess_factanal_data_before_sample. Sync with it.
}
else {
filtered_df <- df
}
# select_ was not able to handle space in target_col. let's do it in base R way.
cleaned_df <- filtered_df[,colnames(filtered_df) %in% selected_cols, drop=FALSE]
# remove columns with only one unique value
cols_copy <- colnames(cleaned_df)
for (col in cols_copy) {
unique_val <- unique(cleaned_df[[col]])
if (length(unique_val) == 1) {
cleaned_df <- cleaned_df[colnames(cleaned_df) != col]
}
}
if (allow_single_column) { # This is when exp_kmeans calling this function wants to go ahead even with single column.
min_ncol <- 1
}
else {
min_ncol <- 2
}
if (length(colnames(cleaned_df)) < min_ncol) {
if (length(grouped_cols) < 1) {
# If without group_by, throw error to display message.
stop("There are not enough columns after removing the columns with only NA or a single value.")
}
else {
# skip this group if less than 2 column is left. (We can't handle single column for now.)
return(NULL)
}
}
# "scale." is an argument name. There is no such operator like ".=".
fit <- prcomp(cleaned_df, scale.=normalize_data)
fit$df <- filtered_df # add filtered df to model so that we can bind_col it for output. It needs to be the filtered one to match row number.
fit$grouped_cols <- grouped_cols
fit$sampled_nrow <- sampled_nrow
class(fit) <- c("prcomp_exploratory", class(fit))
fit
}
do_on_each_group(df, each_func, name = "model", with_unnest = FALSE)
}
#' extracts results from prcomp as a dataframe
#' @export
#' @param n_sample Sample number for biplot. Default 5000, which is the default of our scatter plot.
#' we use it for gathered_data for parallel coordinates too. sampling is applied before gather.
tidy.prcomp_exploratory <- function(x, type="variances", n_sample=NULL, pretty.name=FALSE, normalize_data=FALSE, ...) {
if (type == "variances") {
res <- as.data.frame(x$sdev*x$sdev) # square it to make it variance
colnames(res)[1] <- "variance"
res <- tibble::rownames_to_column(res, var="component") %>% # square it to make it variance
mutate(component = forcats::fct_inorder(component)) # fct_inorder is to make order on chart right, e.g. PC2 before PC10
total_variance = sum(res$variance)
res <- res %>% dplyr::mutate(cum_pct_variance = cumsum(variance), cum_pct_variance = cum_pct_variance/total_variance*100)
res <- res %>% dplyr::mutate(pct_variance = variance/total_variance*100)
if (pretty.name) {
res <- res %>% dplyr::rename(`% Variance`=pct_variance, `Cummulated % Variance`=cum_pct_variance)
}
}
else if (type == "loadings") {
res <- tibble::rownames_to_column(as.data.frame(x$rotation[,]), var="measure")
res <- res %>% tidyr::gather(component, value, dplyr::starts_with("PC"), na.rm = TRUE, convert = TRUE)
res <- res %>% dplyr::mutate(component = forcats::fct_inorder(component)) # fct_inorder is to make order on chart right, e.g. PC2 before PC10
res <- res %>% dplyr::mutate(value = value^2) # square it to make it squared cosine. the original value is cosine.
}
else if (type == "biplot") {
# prepare loadings matrix
loadings_matrix <- x$rotation[,1:2] # keep only PC1 and PC2 for biplot
# prepare scores matrix
scores_matrix <- x$x[,1:2] # keep only PC1 and PC2 for biplot
if (is.null(n_sample)) { # set default of 5000 for biplot case.
n_sample = 5000
}
# sum of number of loading rows times 2 (because it is line between 2 points) and number of score rows should fit in n_sample.
score_n_sample <- n_sample - nrow(loadings_matrix)*2
# table of observations. bind original data so that color can be used later.
res <- x$df
orig_cols <- colnames(res)
for (orig_col in orig_cols) {
if (!is.numeric(res[[orig_col]])) {
if (!is.logical(res[[orig_col]])) {
# make categorical columns into factor with NA level, so that legend will show NA.
# if we leave them as real NA, legend for NA would not be shown on biplot chart,
# since we supress it not to show NAs from the lines for measures.
res[[orig_col]] <- forcats::fct_explicit_na(as.factor(res[[orig_col]]), na_level="(NA)")
}
else {
# make logical columns into factor with NA level, so that legend will show NA.
res[[orig_col]] <- forcats::fct_explicit_na(factor(res[[orig_col]], levels = c("TRUE","FALSE")), na_level="(NA)")
}
}
}
res <- res %>% dplyr::bind_cols(as.data.frame(scores_matrix))
if (!is.null(x$kmeans)) { # add cluster column if with kmeans.
# res <- res %>% dplyr::mutate(cluster=factor(x$kmeans$cluster)) # this caused error when input had column x.
res$cluster <- factor(x$kmeans$cluster)
}
res <- res %>% sample_rows(score_n_sample)
# calculate scale ratio for displaying loadings on the same chart as scores.
max_abs_loading <- max(abs(loadings_matrix))
max_abs_score <- max(abs(c(res$PC1, res$PC2)))
scale_ratio <- max_abs_score/max_abs_loading
res <- res %>% rename(Observations=PC2) # name to appear at legend for dots in scatter plot.
# scale loading_matrix so that the scale of measures and data points matches in the scatter plot.
loadings_matrix <- loadings_matrix * scale_ratio
loadings_df <- tibble::rownames_to_column(as.data.frame(loadings_matrix), var="measure_name") #TODO: what if name conflicts?
loadings_df <- loadings_df %>% dplyr::rename(Measures=PC2) # use different column name for PC2 of measures.
loadings_df0 <- loadings_df %>% dplyr::mutate(PC1=0, Measures=0) # create df for origin of coordinates.
loadings_df <- loadings_df0 %>% dplyr::bind_rows(loadings_df)
res <- res %>% dplyr::bind_rows(loadings_df)
# fill group_by column so that Repeat By on chart works fine. loadings_df does not have values for the group_by column.
res <- res %>% tidyr::fill(x$grouped_cols)
res
}
else { # should be data or gathered_data
res <- x$df
if (!is.null(x$kmeans)) {
# res <- res %>% dplyr::mutate(cluster=factor(x$kmeans$cluster)) # this caused error when input had column x.
res$cluster <- factor(x$kmeans$cluster)
}
res <- res %>% dplyr::bind_cols(as.data.frame(x$x))
column_names <- attr(x$rotation, "dimname")[[1]]
if (normalize_data) {
res <- res %>% dplyr::mutate_at(column_names, exploratory::normalize)
}
if (!is.null(n_sample)) { # default is no sampling.
# limit n_sample so that no more dots are created than the max that can be plotted on scatter plot, which is 5000.
n_sample <- min(n_sample, floor(5000 / length(column_names)))
res <- res %>% sample_rows(n_sample)
}
if (type == "gathered_data") { # for boxplot and parallel coordinates. this is only when with kmeans.
# We used to drop columns other than cluster and ones used for clustering like this commented out line,
# to keep only the data we use, but since we are showing Subject Column value
# on parallel coordinates, we need to keep other columns, which would include Subject Column.
# res <- res %>% dplyr::select(!!c(column_names,"cluster"))
res <- res %>% dplyr::mutate(row_id=seq(n())) # row_id for line representation.
res <- res %>% tidyr::gather(key="key",value="value",!!column_names)
}
}
res
}
|
#Day 10
setwd("C:/Users/David.simons/Documents/advent of code")
#part 1 ----
lengths <- c(34,88,2,222,254,93,150,0,199,255,39,32,137,136,1,167)
getResult <- function(lengths, lst=0:255){
n <- length(lst)
pos <- 1
skip <- 0
for (x in lengths) {
start <- pos
end <- pos + x - 1
if (end <= n) select <- start:end else select <- c(start:n, 1:(end-n))
lst[select] <- rev(lst[select])
pos <- pos + x + skip
while (pos > n) pos <- pos - n
skip <- skip + 1
}
lst[1]*lst[2]
}
print(getResult(lengths))
#part 2 ----
input <- "34,88,2,222,254,93,150,0,199,255,39,32,137,136,1,167"
lengths <- c(R.oo::charToInt(unlist(strsplit(input,NULL))), 17,31,73,47,23)
getHash <- function(lengths, lst=0:255){
#get sparse hash (as lst)
n <- length(lst)
pos <- 1
skip <- 0
for (x in rep(lengths, 64)) {
start <- pos
end <- pos + x - 1
if (end <= n) select <- start:end else select <- c(start:n, 1:(end-n))
lst[select] <- rev(lst[select])
pos <- pos + x + skip
while (pos > n) pos <- pos - n
skip <- (skip + 1) %% n
}
#do extra hash stuff
denseHash <- sapply(split(lst, rep(1:16, each=16)), function(sub) Reduce(bitwXor, sub))
do.call(paste0, as.list(as.character(as.hexmode(denseHash), width=2)))
}
print(getHash(lengths))
| /day10.R | no_license | d-sci/Advent-of-Code-2017 | R | false | false | 1,345 | r | #Day 10
setwd("C:/Users/David.simons/Documents/advent of code")
#part 1 ----
lengths <- c(34,88,2,222,254,93,150,0,199,255,39,32,137,136,1,167)
getResult <- function(lengths, lst=0:255){
n <- length(lst)
pos <- 1
skip <- 0
for (x in lengths) {
start <- pos
end <- pos + x - 1
if (end <= n) select <- start:end else select <- c(start:n, 1:(end-n))
lst[select] <- rev(lst[select])
pos <- pos + x + skip
while (pos > n) pos <- pos - n
skip <- skip + 1
}
lst[1]*lst[2]
}
print(getResult(lengths))
#part 2 ----
input <- "34,88,2,222,254,93,150,0,199,255,39,32,137,136,1,167"
lengths <- c(R.oo::charToInt(unlist(strsplit(input,NULL))), 17,31,73,47,23)
getHash <- function(lengths, lst=0:255){
#get sparse hash (as lst)
n <- length(lst)
pos <- 1
skip <- 0
for (x in rep(lengths, 64)) {
start <- pos
end <- pos + x - 1
if (end <= n) select <- start:end else select <- c(start:n, 1:(end-n))
lst[select] <- rev(lst[select])
pos <- pos + x + skip
while (pos > n) pos <- pos - n
skip <- (skip + 1) %% n
}
#do extra hash stuff
denseHash <- sapply(split(lst, rep(1:16, each=16)), function(sub) Reduce(bitwXor, sub))
do.call(paste0, as.list(as.character(as.hexmode(denseHash), width=2)))
}
print(getHash(lengths))
|
#-------------------------------- NOTE ----------------------------------------
# 1 This code is to train the ann model for the 1st layer;
# 2 Coder: Cong Feng Date: 2016/06/24 @ DOES Lab, UTD
#--------------------------------------------------------------------------------
ann_train<-function(training_data,learning_function,act_function,version_ctrl){
library(caret)
library(RSNNS)
library(nnet)
#x is the model inputs and y is the model target
x<-training_data[,1:(ncol(training_data)-1)]
y<-training_data[,(ncol(training_data))]
#Train model directly (or used commented out caret module which took too long on the full dataset)
#print("Training ANN model...")
# 1st Version
if (version_ctrl == 'type1') {
model_ann <-mlp(x, y, size = c(30), maxit = 1000,
initFunc = "Randomize_Weights", initFuncParams = c(-0.3, 0.3),
learnFunc = learning_function, learnFuncParams = c(0.2,0),
updateFunc = "Topological_Order", updateFuncParams = c(0),
hiddenActFunc = act_function, shufflePatterns = TRUE, linOut = FALSE,
inputsTest = NULL, targetsTest = NULL, pruneFunc = NULL,
pruneFuncParams = NULL)
}
if (version_ctrl == 'type2') {
model_ann <- caret::train(x,y,
method = "nnet",
preProcess = "range", #scales the data to be within [0,1]
tuneLength = 5,
trace = FALSE,
maxit = 100)
}
if (version_ctrl == 'type3') {
model_ann <- rbf(x, y, size=5, maxit=1000,
initFuncParams=c(0, 1, 0, 0.01, 0.001),
learnFuncParams=c(1e-8, 0, 1e-8, 0.1, 0.8), linOut=FALSE)
}
#print("Finish training ANN model")
return(model_ann)
}
| /code/ann_train.R | permissive | UTD-DOES/M3 | R | false | false | 1,804 | r | #-------------------------------- NOTE ----------------------------------------
# 1 This code is to train the ann model for the 1st layer;
# 2 Coder: Cong Feng Date: 2016/06/24 @ DOES Lab, UTD
#--------------------------------------------------------------------------------
ann_train<-function(training_data,learning_function,act_function,version_ctrl){
library(caret)
library(RSNNS)
library(nnet)
#x is the model inputs and y is the model target
x<-training_data[,1:(ncol(training_data)-1)]
y<-training_data[,(ncol(training_data))]
#Train model directly (or used commented out caret module which took too long on the full dataset)
#print("Training ANN model...")
# 1st Version
if (version_ctrl == 'type1') {
model_ann <-mlp(x, y, size = c(30), maxit = 1000,
initFunc = "Randomize_Weights", initFuncParams = c(-0.3, 0.3),
learnFunc = learning_function, learnFuncParams = c(0.2,0),
updateFunc = "Topological_Order", updateFuncParams = c(0),
hiddenActFunc = act_function, shufflePatterns = TRUE, linOut = FALSE,
inputsTest = NULL, targetsTest = NULL, pruneFunc = NULL,
pruneFuncParams = NULL)
}
if (version_ctrl == 'type2') {
model_ann <- caret::train(x,y,
method = "nnet",
preProcess = "range", #scales the data to be within [0,1]
tuneLength = 5,
trace = FALSE,
maxit = 100)
}
if (version_ctrl == 'type3') {
model_ann <- rbf(x, y, size=5, maxit=1000,
initFuncParams=c(0, 1, 0, 0.01, 0.001),
learnFuncParams=c(1e-8, 0, 1e-8, 0.1, 0.8), linOut=FALSE)
}
#print("Finish training ANN model")
return(model_ann)
}
|
# -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
getBatchAlgoWrapper = function(learner) {
function(job, static, dynamic) {
obj = convertOMLTaskToMlr(static$task)
n = mlr::getTaskSize(obj$mlr.task)
p = mlr::getTaskNFeats(obj$mlr.task)
par.set = getHyperSpace(learner = learner, p = p, n = n)
if(length(par.set$pars) != 0) {
params = ParamHelpers::sampleValue(par.set, trafo = TRUE)
new.lrn = setHyperPars(learner = learner, par.vals = params)
} else {
new.lrn = learner
}
oml.run = runTaskMlr(task = static$task, learner = new.lrn)
run.id = NA
if(SHOULD.UPLOAD) {
run.id = uploadOMLRun(run = oml.run, upload.bmr = TRUE)
oml.run$run$run.id = run.id
}
perf = getBMRAggrPerformances(bmr = oml.run$bmr, as.df = TRUE)
if(length(par.set$pars) == 0) {
res = cbind(run.id, perf)
} else {
res = cbind(run.id, as.data.frame(params), perf)
}
return(res)
}
}
# -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
| /R/getBatchAlgoWrapper.R | no_license | openml/randomBot | R | false | false | 1,343 | r | # -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
getBatchAlgoWrapper = function(learner) {
function(job, static, dynamic) {
obj = convertOMLTaskToMlr(static$task)
n = mlr::getTaskSize(obj$mlr.task)
p = mlr::getTaskNFeats(obj$mlr.task)
par.set = getHyperSpace(learner = learner, p = p, n = n)
if(length(par.set$pars) != 0) {
params = ParamHelpers::sampleValue(par.set, trafo = TRUE)
new.lrn = setHyperPars(learner = learner, par.vals = params)
} else {
new.lrn = learner
}
oml.run = runTaskMlr(task = static$task, learner = new.lrn)
run.id = NA
if(SHOULD.UPLOAD) {
run.id = uploadOMLRun(run = oml.run, upload.bmr = TRUE)
oml.run$run$run.id = run.id
}
perf = getBMRAggrPerformances(bmr = oml.run$bmr, as.df = TRUE)
if(length(par.set$pars) == 0) {
res = cbind(run.id, perf)
} else {
res = cbind(run.id, as.data.frame(params), perf)
}
return(res)
}
}
# -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
|
context("examples")
# some helpers
run_example <- function(example) {
env <- new.env()
capture.output({
example_path <- system.file("examples", example, package = "tensorflow")
old_wd <- setwd(dirname(example_path))
on.exit(setwd(old_wd), add = TRUE)
source(basename(example_path), local = env)
}, type = "output")
rm(list = ls(env), envir = env)
gc()
}
examples <- if (nzchar(Sys.getenv("TENSORFLOW_TEST_EXAMPLES"))) {
examples <- c("hello.R",
"introduction.R",
"mnist/mnist_softmax.R",
"mnist/fully_connected_feed.R",
"regression/tensorflow_linear_regression.R")
if (tf_version() >= "2.0") {
# disable examples since tf_compat() requires session restart
examples <- NULL
}
examples
}
for (example in examples) {
test_that(paste(example, "example runs successfully"), {
skip_if_no_tensorflow()
expect_error(run_example(example), NA)
})
}
| /tests/testthat/test-examples.R | permissive | rstudio/tensorflow | R | false | false | 974 | r | context("examples")
# some helpers
run_example <- function(example) {
env <- new.env()
capture.output({
example_path <- system.file("examples", example, package = "tensorflow")
old_wd <- setwd(dirname(example_path))
on.exit(setwd(old_wd), add = TRUE)
source(basename(example_path), local = env)
}, type = "output")
rm(list = ls(env), envir = env)
gc()
}
examples <- if (nzchar(Sys.getenv("TENSORFLOW_TEST_EXAMPLES"))) {
examples <- c("hello.R",
"introduction.R",
"mnist/mnist_softmax.R",
"mnist/fully_connected_feed.R",
"regression/tensorflow_linear_regression.R")
if (tf_version() >= "2.0") {
# disable examples since tf_compat() requires session restart
examples <- NULL
}
examples
}
for (example in examples) {
test_that(paste(example, "example runs successfully"), {
skip_if_no_tensorflow()
expect_error(run_example(example), NA)
})
}
|
#' Summarize the results of a POUMM-fit
#'
#' @param object a POUMM object returned by POUMM-function (see ?POUMM).
#' @param ... Not used, but declared for consistency with the generic method summary.
#' @param startMCMC,endMCMC integers indicating the range of the MCMC chains
#' to be used for the analysis (excluding the initial warm-up phase)
#' @param thinMCMC thinning interval of the MCMC chain to avoid strong
#' autocorrelation between sampled elements;
#' @param stats a named list of functions of the form function(par) { number },
#' which are called for each sample of each mcmc chain in object. Defaults to
#' a call of statistics(object) returning a list of statistics functions relevant for
#' the object. See also statistics.
#' @param mode a character indicating the desired format of the returned summary
#' as follows:
#' 'short' - a data.table with the ML and MCMC estimates of heritability,
#' model parameters, root-value and other statistics.
#' 'long' - same information as in 'short' but including also the samples, which
#' can be convenient for
#'
#' @import data.table
#' @import coda
#' @importFrom stats AIC
#'
#' @export
summary.POUMM <- function(object, ...,
startMCMC = NA, endMCMC = NA, thinMCMC = 1000,
stats = statistics(object),
mode = c('short', 'long', 'expert')) {
# declare global variables to avoid CRAN CHECK NOTES "no visible binding":
N <- MLE <- samplePriorMCMC <- HPD <- HPD50 <- ESS <- HPDUpperFiltered <-
HPDLowerFiltered <- value <- HPDUpper <- HPDLower <- it <- PostMean <- mcs <-
ESS <- nChains <- chain <- G.R. <- stat <- Mean <- NULL
mode <- tolower(mode)
tipTimes <- nodeTimes(object$pruneInfo$tree, tipsOnly = TRUE)
tMax <- max(tipTimes)
tMean <- mean(tipTimes)
parLower <- matrix(object$spec$parLower, nrow = 1)
parUpper <- matrix(object$spec$parUpper, nrow = 1)
parML <- matrix(object$fitML$par, nrow = 1)
anlist <- lapply(1:length(stats), function(i) {
data.table(stat = names(stats)[i], MLE = stats[[i]](parML))
})
anlist <- c(anlist, list(
data.table(stat = "logpost", MLE = NA),
data.table(stat = "loglik", MLE = object$fitML$value),
data.table(stat = "AIC", MLE = AIC(object)),
data.table(stat = "AICc", MLE = AIC(object) + 2*object$dof*(object$dof+1)/(object$N-object$dof-1)),
data.table(stat = "g0", MLE = attr(object$fitML$value, "g0"))
))
an.ML <- rbindlist(anlist)
an.ML[, N:=object$N]
setcolorder(an.ML, c('stat', 'N', 'MLE'))
if(!is.null(object$fitMCMC)) {
if(is.na(startMCMC)) {
startMCMC <- object$spec$nSamplesMCMC / 10
}
if(is.na(endMCMC)) {
endMCMC <- object$spec$nSamplesMCMC
}
anlist <- lapply(1:length(stats), function(i) {
analyseMCMCs(object$fitMCMC$chains,
stat = stats[[i]], statName = names(stats)[i],
start = startMCMC, end = endMCMC, thinMCMC = thinMCMC,
as.dt = TRUE)
})
anlist <- c(anlist, list(
analyseMCMCs(object$fitMCMC$chains,
stat=NULL, statName='logpost',
start = startMCMC, end=endMCMC, thinMCMC = thinMCMC,
as.dt = TRUE),
analyseMCMCs(object$fitMCMC$chains,
stat = NULL, statName='loglik', logprior=object$spec$parPriorMCMC,
start = startMCMC, end = endMCMC, thinMCMC = thinMCMC,
as.dt = TRUE),
analyseMCMCs(object$fitMCMC$chains,
stat = NULL, statName='AIC', logprior=object$spec$parPriorMCMC,
start = startMCMC, end = endMCMC, thinMCMC = thinMCMC,
as.dt = TRUE, k = object$dof, N = object$N),
analyseMCMCs(object$fitMCMC$chains,
stat = NULL, statName='AICc', logprior=object$spec$parPriorMCMC,
start = startMCMC, end = endMCMC, thinMCMC = thinMCMC,
as.dt = TRUE, k = object$dof, N = object$N)
))
if( !("g0" %in% names(stats)) ) {
anlist <- c(anlist, list(
analyseMCMCs(object$fitMCMC$chains,
stat=NULL, statName='g0', logprior=object$spec$parPriorMCMC,
start=startMCMC, end=endMCMC, thinMCMC=thinMCMC,
as.dt=TRUE))
)
}
an.MCMC <- rbindlist(anlist)
an.MCMC[, samplePriorMCMC:=c(object$spec$samplePriorMCMC,
rep(FALSE, object$spec$nChainsMCMC - 1))]
if(mode[1] != 'expert') {
an.MCMC <- an.MCMC[,
list(
PostMean = mean(unlist(Mean)),
HPD = list(colMeans(do.call(rbind, HPD))),
HPD50 = list(colMeans(do.call(rbind, HPD50))),
start = start(mcs),
end = end(mcs),
thin = thin(mcs),
ESS = sum(unlist(ESS)),
G.R. = if(length(mcs)>1) {
gelman.diag(mcs, autoburnin=FALSE)$psrf[1]
} else {
as.double(NA)
},
nChains = length(mcs),
mcmc = mcmc.list(mcmc(do.call(rbind, mcs)))),
by=list(stat, samplePriorMCMC)]
}
if(mode[1] == 'short') {
an.MCMC <- an.MCMC[
samplePriorMCMC == FALSE,
list(stat, PostMean, HPD, ESS, G.R.)]
} else if(mode[1] == 'long') {
an.MCMC <- an.MCMC[
samplePriorMCMC == FALSE,
list(stat, PostMean, HPD, HPD50, start, end,
thin = thinMCMC, ESS, G.R., nChains, mcmc)]
} else if(mode[1] == 'expert') {
an.MCMC <- an.MCMC[, list(stat, samplePriorMCMC,
PostMean = Mean, HPD, HPD50, start, end, thin = thinMCMC,
ESS, mcmc = mcs, chain)]
} else {
warning(paste('mode should be one of "short", "long" or "expert", but was', mode[1], '.'))
}
} else {
an.MCMC <- NULL
}
if(mode[1] %in% c('short', 'long')) {
if(!is.null(an.ML) & !is.null(an.MCMC)) {
res <- merge(an.ML, an.MCMC, by = 'stat', all = TRUE, sort = FALSE)
res[sapply(HPD, is.null), HPD:=list(list(as.double(c(NA, NA))))]
if(mode[1] == 'long')
res[sapply(HPD50, is.null), HPD50:=list(list(as.double(c(NA, NA))))]
} else if(!is.null(an.ML)) {
res <- an.ML
} else if(!is.null(an.MCMC)) {
res <- an.MCMC
}
} else {
res <- list(spec = object$spec, startMCMC = startMCMC, endMCMC = endMCMC,
thinMCMC = thinMCMC,
ML = an.ML, MCMC = an.MCMC,
MCMCBetterLik = object$MCMCBetterLik)
}
class(res) <- c('summary.POUMM', class(res))
res
}
#' Plot a summary of a POUMM fit
#' @param x An object of class POUMM.
#' @param type A character indicating the type of plot(s) to be generated.
#' Defaults to "MCMC", resulting in a trace and density plot for the selected
#' statistics (see argument stat). Currently, only 'MCMC' type is supported.
#' @param doPlot Logical indicating whether a plot should be printed on the
#' currently active graphics device or whether only to return a list of plot-
#' objects for further processing. Defaults to TRUE.
#' @param stat A character vector with the names of statistics to be plotted.
#' These should be names from the stats-list (see argument statFunctions).
#' Defaults to c("alpha", "theta", "sigma", "sigmae", "H2tMean", "H2tInf").
#' @param chain A vector of integers indicating the chains to be plotted.
#' @param doZoomIn (type MCMC only) A logical value indicating whether the
#' produced plots should have a limitation on the x-axis according to an
#' expression set in zoomInFilter (see below). Default value is FALSE.
#' @param zoomInFilter A character string which evaluates as logical value. If
#' doZoomIn is set to TRUE, this filter is applied to each point in each MCMC
#' chain and the data-point is filtered out if it evaluates to FALSE. This
#' allows to zoomIn the x-axis of density plots but should be used with caution,
#' since filtering out points from the MCMC-sample can affect the kernel densities.
#' Unfortunately, filtering out values is currently the only way to affect the
#' limits of individual facets in ggplot2. The default value is a complicated
#' expression involving the HPD from all MCMC chains (normally one chain from the
#' prior and 2 chains from the posterior):
#' zoomInFilter = paste0("stat %in% c('H2e','H2tMean','H2tInf','H2tMax') |",
# "(value <= median(HPDUpper) + 4 * (median(HPDUpper) - median(HPDLower)) &",
# "value >= median(HPDLower) - 4 * (median(HPDUpper) - median(HPDLower)))").
#' The identifiers in this expression can be any
#' column names found in a summary of a POUMM object.
#' @param palette A vector of colors (can be character strings) corresponding to the
#' different chains (in their order 1 (prior), 2, 3). Defaults to c("#999999",
#' "#0072B2", "#CC79A7", "#E69F00", "#D55E00", "#56B4E9", "#009E73", "#F0E442"),
#' which is a color-blind friendly.
#' @param prettyNames A logical indicating if greek letters and sub/superscripts
#' should be used for the names of columns in the posterior density pairs-plot.
#' @param showUnivarDensityOnDiag A logical indicating if univariate density
#' plots should be displaied on the main diagonal in the bivariate posterior plot.
#' Defaults to FALSE, in which case the column names are displayed on the diagonal.
#' @param ... Not used; included for compatibility with the generic function plot.
#'
#' @return If doPlot==TRUE, the function returns nothing and produces output on
#' the current graphics device as a side-effect. Otherwise, the function returns
#' a list of plot-objects: traceplot and densplot.
#'
#' @import ggplot2
#' @importFrom GGally ggpairs print_if_interactive wrap ggally_text
#' @importFrom stats cor complete.cases
#' @import methods
#'
#' @examples
#' \dontrun{
#' library(POUMM)
#'
#' set.seed(1)
#'
#' N <- 1000
#'
#' # create a random non-ultrametric tree of N tips
#' tree <- ape::rtree(N)
#'
#' # Simulate the evolution of a trait along the tree
#' z <- rVNodesGivenTreePOUMM(
#' tree, g0 = 8, alpha = 1, theta = 4, sigma = 1.2, sigmae = .8)
#'
#' fit <- POUMM(z[1:N], tree, spec = list(nSamplesMCMC = 4e5))
#'
#' # Summarize the results from the fit in a table:
#' summary(fit)
#'
#' # Create plots for some of the inferred parameters/statistics:
#' pl <- plot(fit, stat = c("alpha", "theta", "sigma", "sigmae", "H2tMean"),
#' doZoomIn = TRUE,
#' zoomInFilter = paste("!(stat %in% c('alpha', 'sigma', 'sigmae')) |",
#' "(value >= 0 & value <= 8)"),
#' doPlot = FALSE)
#'
#' pl$traceplot
#' pl$densplot
#' }
#'
#' @export
plot.summary.POUMM <- function(
x, type = c("MCMC"),
doPlot = TRUE,
stat = c("alpha", "theta", "sigma", "sigmae", "g0", "H2tMean"),
chain = NULL,
doZoomIn = FALSE,
zoomInFilter = paste0("(stat %in% c('H2e','H2tMean','H2tInf','H2tMax') & ",
"(value >= 0 & value <= 1) ) |",
"( !stat %in% c('H2e','H2tMean','H2tInf','H2tMax') & ",
"(value <= median(HPDUpper) + 4 * (median(HPDUpper) - median(HPDLower)) &",
"value >= median(HPDLower) - 4 * (median(HPDUpper) - median(HPDLower))))"),
palette = c("#999999", "#0072B2", "#CC79A7", "#E69F00", "#D55E00", "#56B4E9", "#009E73", "#F0E442"),
prettyNames = TRUE,
showUnivarDensityOnDiag = FALSE,
...) {
# declare global variables to avoid CRAN CHECK NOTES "no visible binding":
N <- MLE <- samplePriorMCMC <- HPD <- HPD50 <- ESS <- HPDUpperFiltered <-
HPDLowerFiltered <- value <- HPDUpper <- HPDLower <- it <- PostMean <- mcs <-
ESS <- nChains <- chain <- G.R. <- stat2 <- statFactor <- NULL
if(class(x) == "summary.POUMM" & !is.null(x$MCMC)) {
.stat <- stat
.chain <- chain
data <- merge(x$ML, x$MCMC, by = "stat")
data <- data[
{
if(!is.null(.stat)) {stat %in% .stat} else TRUE
} & {
if(!is.null(.chain)) {chain %in% .chain} else TRUE
}]
setkey(data, stat)
data <- data[list(.stat)]
data <- data[{
if(!is.null(.stat)) {stat %in% .stat} else TRUE
} & {
if(!is.null(.chain)) {chain %in% .chain} else TRUE
}, list(
N, MLE,
samplePriorMCMC,
HPDLower = sapply(HPD, function(.) .[1]),
HPDUpper = sapply(HPD, function(.) .[2]),
HPD50Lower = sapply(HPD50, function(.) .[1]),
HPD50Upper = sapply(HPD50, function(.) .[2]),
ESS,
value = unlist(mcmc),
it = seq(x$startMCMC, by = x$thinMCMC, along.with = mcmc[[1]])),
by = list(stat = factor(stat), chain = factor(chain))]
if(doZoomIn) {
data[, stat2:=stat]
data <- data[, {
.SD[eval(parse(text = zoomInFilter))]
}, by = stat2]
data[, stat2:=NULL]
}
data[, HPDUpperFiltered:=min(max(value), unique(HPDUpper)),
list(stat = factor(stat), chain = factor(chain))]
data[, HPDLowerFiltered:=max(min(value), unique(HPDLower)),
list(stat = factor(stat), chain = factor(chain))]
.availStats <- data[, as.character(unique(stat))]
statFactorLabels <- if(prettyNames) {
prettifyNames(.availStats)
} else {
.availStats
}
data[, statFactor:=factor(stat, levels = .availStats, labels = statFactorLabels)]
.stat <- .availStats[1]
dtm <- data[stat == .stat, list(stat, it, chain, value)]
dtm[, (.stat) := value]
dtm[, c("stat", "value") := NULL]
for(.stat in .availStats[-1]) {
dtm2 <-
data[stat == .stat,
eval(parse(text=paste0("list(it, chain, ", .stat, ' = value)')))]
dtm <- merge(dtm, dtm2, by=c("it", "chain"), all=TRUE)
}
names(palette) <- as.character(1:length(palette))
my_ggplot <- function(...) ggplot(...) +
scale_color_manual(values = palette) +
scale_fill_manual(values = palette)
if(type == "MCMC") {
traceplot <- my_ggplot(data) +
geom_line(aes(x=it, y=value, col = chain)) +
facet_wrap(~statFactor,
scales = "free",
labeller = if(prettyNames) "label_parsed" else "label_value")
my_dens <- function(data, mapping, ...) {
my_ggplot(data = data, mapping=mapping) +
geom_density(..., alpha = 0.5)
}
eval_data_col <- function (data, aes_col) {
eval(aes_col, data)
}
my_points <- function(data, mapping, ...) {
my_ggplot(data = data, mapping = mapping) +
geom_point(..., alpha = 0.5)
}
# copied and modified from GGally
my_cor <- function(data, mapping, alignPercent = 0, method = "pearson",
use = "complete.obs", corAlignPercent = NULL, corMethod = NULL,
corUse = NULL, ...) {
# global variable name to avoid check note:
labelp <- NULL
# copied from GGally
is_date <- function (x) {
inherits(x, c("POSIXt", "POSIXct", "POSIXlt", "Date"))
}
# copied from GGally
str_c <- function (..., sep = "", collapse = NULL) {
paste(..., sep = sep, collapse = collapse)
}
if (!is.null(corAlignPercent)) {
stop("'corAlignPercent' is deprecated. Please use argument 'alignPercent'")
}
if (!is.null(corMethod)) {
stop("'corMethod' is deprecated. Please use argument 'method'")
}
if (!is.null(corUse)) {
stop("'corUse' is deprecated. Please use argument 'use'")
}
useOptions <- c("all.obs", "complete.obs", "pairwise.complete.obs",
"everything", "na.or.complete")
use <- pmatch(use, useOptions)
if (is.na(use)) {
warning("correlation 'use' not found. Using default value of 'all.obs'")
use <- useOptions[1]
}
else {
use <- useOptions[use]
}
cor_fn <- function(x, y) {
cor(x, y, method = method, use = use)
}
xCol <- deparse(mapping$x)
yCol <- deparse(mapping$y)
if (is.numeric(eval_data_col(data, mapping$colour))) {
stop("ggally_cor: mapping color column must be categorical, not numeric")
}
colorCol <- deparse(mapping$colour)
singleColorCol <- ifelse(is.null(colorCol), NULL, paste(colorCol,
collapse = ""))
if (use %in% c("complete.obs", "pairwise.complete.obs", "na.or.complete")) {
if (length(colorCol) > 0) {
if (singleColorCol %in% colnames(data)) {
rows <- complete.cases(data[c(xCol, yCol, colorCol)])
}
else {
rows <- complete.cases(data[c(xCol, yCol)])
}
} else {
rows <- complete.cases(data[c(xCol, yCol)])
}
if (any(!rows)) {
total <- sum(!rows)
if (total > 1) {
warning("Removed ", total, " rows containing missing values")
}
else if (total == 1) {
warning("Removing 1 row that contained a missing value")
}
}
data <- data[rows, ]
}
xVal <- data[[xCol]]
yVal <- data[[yCol]]
if (length(names(mapping)) > 0) {
for (i in length(names(mapping)):1) {
tmp_map_val <- deparse(mapping[names(mapping)[i]][[1]])
if (tmp_map_val[length(tmp_map_val)] %in% colnames(data))
mapping[[names(mapping)[i]]] <- NULL
if (length(names(mapping)) < 1) {
mapping <- NULL
break
}
}
}
if (length(colorCol) < 1) {
colorCol <- "ggally_NO_EXIST"
}
if ((singleColorCol != "ggally_NO_EXIST") && (singleColorCol %in%
colnames(data))) {
cord <- as.data.frame(
as.data.table(data)[, cor_fn(eval(parse(text=(xCol))), eval(parse(text=yCol))),
by=eval(colorCol)])
colnames(cord)[2] <- "ggally_cor"
cord$ggally_cor <- signif(as.numeric(cord$ggally_cor),
3)
lev <- levels(data[[colorCol]])
ord <- rep(-1, nrow(cord))
for (i in 1:nrow(cord)) {
for (j in seq_along(lev)) {
if (identical(as.character(cord[i, colorCol]),
as.character(lev[j]))) {
ord[i] <- j
}
}
}
cord <- cord[order(ord[ord >= 0]), ]
cord$label <- str_c(cord[[colorCol]], ": ", cord$ggally_cor)
xmin <- min(xVal, na.rm = TRUE)
xmax <- max(xVal, na.rm = TRUE)
xrange <- c(xmin - 0.01 * (xmax - xmin), xmax + 0.01 *
(xmax - xmin))
ymin <- min(yVal, na.rm = TRUE)
ymax <- max(yVal, na.rm = TRUE)
yrange <- c(ymin - 0.01 * (ymax - ymin), ymax + 0.01 *
(ymax - ymin))
p <- ggally_text(label = str_c("Correlation: ", ""),
mapping = mapping, xP = 0.5, yP = 0.9,
xrange = xrange, yrange = yrange, color = "black",
...) + theme(legend.position = "none")
xPos <- rep(alignPercent, nrow(cord)) * diff(xrange) +
min(xrange, na.rm = TRUE)
yPos <- seq(from = 0.9, to = 0.2, length.out = nrow(cord) +
1)
yPos <- yPos * diff(yrange) + min(yrange, na.rm = TRUE)
yPos <- yPos[-1]
cordf <- data.frame(xPos = xPos, yPos = yPos, labelp = cord$label,
colorCol = cord[[colorCol]])
cordf$labelp <- factor(cordf$labelp, levels = cordf$labelp)
p <- p + geom_text(data = cordf,
aes(x = xPos, y = yPos, label = labelp,
color = colorCol), hjust = "left", ...) +
scale_color_manual(values = palette) +
theme(panel.grid = element_blank(),
panel.grid.major = element_blank(),
panel.background = element_rect(color="grey"))
p
} else {
xmin <- min(xVal, na.rm = TRUE)
xmax <- max(xVal, na.rm = TRUE)
xrange <- c(xmin - 0.01 * (xmax - xmin), xmax + 0.01 *
(xmax - xmin))
ymin <- min(yVal, na.rm = TRUE)
ymax <- max(yVal, na.rm = TRUE)
yrange <- c(ymin - 0.01 * (ymax - ymin), ymax + 0.01 *
(ymax - ymin))
p <- ggally_text(
label = paste("Correlation:\n",
signif(cor_fn(xVal, yVal), 3),
sep = "", collapse = ""), mapping, xP = 0.5,
yP = 0.5, xrange = xrange, yrange = yrange, ...) +
theme(legend.position = "none")
p
}
}
densplot <- my_ggplot(data) +
geom_density(aes(x=value, fill = chain, col = chain), alpha=0.5) +
geom_segment(aes(x=HPDLowerFiltered, xend=HPDUpperFiltered,
y=0, yend=0, col = chain)) +
geom_point(aes(x=MLE, y=0)) +
facet_wrap(~statFactor,
scales = "free", labeller= if(prettyNames) "label_parsed" else "label_value")
densplot_bivar <- ggpairs(
as.data.frame(dtm),
mapping = aes(fill = chain, color = chain),
columns = .availStats,
columnLabels = if(prettyNames) prettifyNames(.availStats) else .availStats,
axisLabels = "show",
showStrips = showUnivarDensityOnDiag,
labeller = if(prettyNames) "label_parsed" else "label_value",
diag = list(continuous = if(showUnivarDensityOnDiag) my_dens else "blankDiag"),
lower = list(continuous = my_points,
combo = wrap("box_no_facet")),
upper = list(continuous = my_cor,
combo = wrap("box_no_facet")))
if(doPlot) {
print(traceplot)
if(interactive()) {
print("Press Enter to see a univariate posterior density plot")
scan("", what = "character", nlines = 1)
}
print(densplot)
if(interactive()) {
print("Press Enter to see a bivariate posterior density plot")
scan("", what = "character", nlines = 1)
}
print(densplot_bivar)
} else {
list(traceplot = traceplot, densplot = densplot, densplot_bivar = densplot_bivar)
}
}
} else {
stop("plot.summary.POUMM called on a non summary.POUMM-object or a missing MCMC element. Verify that summary.POUMM has been called with mode = 'expert'")
}
}
prettifyNames <- function(names) {
prettyNames <- c(alpha = "alpha",
theta = "theta",
g0 = "g[0]",
sigma = "sigma",
sigmae = "sigma[e]",
H2tMean = "H[bar(t)]^2",
H2e = "H[e]^2",
H2tInf = "H[infinity]^2")
sapply(names, function(n) {pn <- prettyNames[n]; if(!is.na(pn)) pn else n}, USE.NAMES = FALSE)
}
#' Extract statistics from sampled or inferred parameters of a
#' POUMM fit
#' @param object An object of class "POUMM".
#'
#' @details This is a generic method.
#' @export
statistics <- function(object) {
UseMethod('statistics')
}
#' @describeIn statistics Relevant statistics from the sampled parameters of a
#' POUMM fit
#'
#' @export
statistics.POUMM <- function(object) {
listPar <- sapply(1:length(object$spec$parLower), function(i) {
name <- names(object$spec$parLower)[i]
stat <- eval(
parse(text=paste0("list(", name, " = function(par) par[, ", i , "])"))
)
})
listOtherStats <- list(
H2e = function(par) H2e(z = object$pruneInfo$z,
sigmae = object$spec$parMapping(par)[, 'sigmae']),
H2tInf = function(par) H2(alpha = object$spec$parMapping(par)[, 'alpha'],
sigma = object$spec$parMapping(par)[, 'sigma'],
sigmae = object$spec$parMapping(par)[, 'sigmae'],
t = Inf),
H2tMax = function(par) H2(alpha = object$spec$parMapping(par)[, 'alpha'],
sigma = object$spec$parMapping(par)[, 'sigma'],
sigmae = object$spec$parMapping(par)[, 'sigmae'],
t = object$tMax),
H2tMean = function(par) H2(alpha = object$spec$parMapping(par)[, 'alpha'],
sigma = object$spec$parMapping(par)[, 'sigma'],
sigmae = object$spec$parMapping(par)[, 'sigmae'],
t = object$tMean),
alpha = function(par) object$spec$parMapping(par)[, 'alpha'],
theta = function(par) object$spec$parMapping(par)[, 'theta'],
sigma = function(par) object$spec$parMapping(par)[, 'sigma'],
sigmae = function(par) object$spec$parMapping(par)[, 'sigmae'],
sigmaG2tMean = function(par) varOU(alpha = object$spec$parMapping(par)[, 'alpha'],
sigma = object$spec$parMapping(par)[, 'sigma'],
t = object$tMean),
sigmaG2tMax = function(par) varOU(alpha = object$spec$parMapping(par)[, 'alpha'],
sigma = object$spec$parMapping(par)[, 'sigma'],
t = object$tMax),
sigmaG2tInf = function(par) varOU(alpha = object$spec$parMapping(par)[, 'alpha'],
sigma = object$spec$parMapping(par)[, 'sigma'],
t = Inf))
c(listPar, listOtherStats[setdiff(names(listOtherStats), names(listPar))])
}
| /R/summaryPOUMM.R | no_license | gtonkinhill/POUMM | R | false | false | 26,543 | r | #' Summarize the results of a POUMM-fit
#'
#' @param object a POUMM object returned by POUMM-function (see ?POUMM).
#' @param ... Not used, but declared for consistency with the generic method summary.
#' @param startMCMC,endMCMC integers indicating the range of the MCMC chains
#' to be used for the analysis (excluding the initial warm-up phase)
#' @param thinMCMC thinning interval of the MCMC chain to avoid strong
#' autocorrelation between sampled elements;
#' @param stats a named list of functions of the form function(par) { number },
#' which are called for each sample of each mcmc chain in object. Defaults to
#' a call of statistics(object) returning a list of statistics functions relevant for
#' the object. See also statistics.
#' @param mode a character indicating the desired format of the returned summary
#' as follows:
#' 'short' - a data.table with the ML and MCMC estimates of heritability,
#' model parameters, root-value and other statistics.
#' 'long' - same information as in 'short' but including also the samples, which
#' can be convenient for
#'
#' @import data.table
#' @import coda
#' @importFrom stats AIC
#'
#' @export
summary.POUMM <- function(object, ...,
startMCMC = NA, endMCMC = NA, thinMCMC = 1000,
stats = statistics(object),
mode = c('short', 'long', 'expert')) {
# declare global variables to avoid CRAN CHECK NOTES "no visible binding":
N <- MLE <- samplePriorMCMC <- HPD <- HPD50 <- ESS <- HPDUpperFiltered <-
HPDLowerFiltered <- value <- HPDUpper <- HPDLower <- it <- PostMean <- mcs <-
ESS <- nChains <- chain <- G.R. <- stat <- Mean <- NULL
mode <- tolower(mode)
tipTimes <- nodeTimes(object$pruneInfo$tree, tipsOnly = TRUE)
tMax <- max(tipTimes)
tMean <- mean(tipTimes)
parLower <- matrix(object$spec$parLower, nrow = 1)
parUpper <- matrix(object$spec$parUpper, nrow = 1)
parML <- matrix(object$fitML$par, nrow = 1)
anlist <- lapply(1:length(stats), function(i) {
data.table(stat = names(stats)[i], MLE = stats[[i]](parML))
})
anlist <- c(anlist, list(
data.table(stat = "logpost", MLE = NA),
data.table(stat = "loglik", MLE = object$fitML$value),
data.table(stat = "AIC", MLE = AIC(object)),
data.table(stat = "AICc", MLE = AIC(object) + 2*object$dof*(object$dof+1)/(object$N-object$dof-1)),
data.table(stat = "g0", MLE = attr(object$fitML$value, "g0"))
))
an.ML <- rbindlist(anlist)
an.ML[, N:=object$N]
setcolorder(an.ML, c('stat', 'N', 'MLE'))
if(!is.null(object$fitMCMC)) {
if(is.na(startMCMC)) {
startMCMC <- object$spec$nSamplesMCMC / 10
}
if(is.na(endMCMC)) {
endMCMC <- object$spec$nSamplesMCMC
}
anlist <- lapply(1:length(stats), function(i) {
analyseMCMCs(object$fitMCMC$chains,
stat = stats[[i]], statName = names(stats)[i],
start = startMCMC, end = endMCMC, thinMCMC = thinMCMC,
as.dt = TRUE)
})
anlist <- c(anlist, list(
analyseMCMCs(object$fitMCMC$chains,
stat=NULL, statName='logpost',
start = startMCMC, end=endMCMC, thinMCMC = thinMCMC,
as.dt = TRUE),
analyseMCMCs(object$fitMCMC$chains,
stat = NULL, statName='loglik', logprior=object$spec$parPriorMCMC,
start = startMCMC, end = endMCMC, thinMCMC = thinMCMC,
as.dt = TRUE),
analyseMCMCs(object$fitMCMC$chains,
stat = NULL, statName='AIC', logprior=object$spec$parPriorMCMC,
start = startMCMC, end = endMCMC, thinMCMC = thinMCMC,
as.dt = TRUE, k = object$dof, N = object$N),
analyseMCMCs(object$fitMCMC$chains,
stat = NULL, statName='AICc', logprior=object$spec$parPriorMCMC,
start = startMCMC, end = endMCMC, thinMCMC = thinMCMC,
as.dt = TRUE, k = object$dof, N = object$N)
))
if( !("g0" %in% names(stats)) ) {
anlist <- c(anlist, list(
analyseMCMCs(object$fitMCMC$chains,
stat=NULL, statName='g0', logprior=object$spec$parPriorMCMC,
start=startMCMC, end=endMCMC, thinMCMC=thinMCMC,
as.dt=TRUE))
)
}
an.MCMC <- rbindlist(anlist)
an.MCMC[, samplePriorMCMC:=c(object$spec$samplePriorMCMC,
rep(FALSE, object$spec$nChainsMCMC - 1))]
if(mode[1] != 'expert') {
an.MCMC <- an.MCMC[,
list(
PostMean = mean(unlist(Mean)),
HPD = list(colMeans(do.call(rbind, HPD))),
HPD50 = list(colMeans(do.call(rbind, HPD50))),
start = start(mcs),
end = end(mcs),
thin = thin(mcs),
ESS = sum(unlist(ESS)),
G.R. = if(length(mcs)>1) {
gelman.diag(mcs, autoburnin=FALSE)$psrf[1]
} else {
as.double(NA)
},
nChains = length(mcs),
mcmc = mcmc.list(mcmc(do.call(rbind, mcs)))),
by=list(stat, samplePriorMCMC)]
}
if(mode[1] == 'short') {
an.MCMC <- an.MCMC[
samplePriorMCMC == FALSE,
list(stat, PostMean, HPD, ESS, G.R.)]
} else if(mode[1] == 'long') {
an.MCMC <- an.MCMC[
samplePriorMCMC == FALSE,
list(stat, PostMean, HPD, HPD50, start, end,
thin = thinMCMC, ESS, G.R., nChains, mcmc)]
} else if(mode[1] == 'expert') {
an.MCMC <- an.MCMC[, list(stat, samplePriorMCMC,
PostMean = Mean, HPD, HPD50, start, end, thin = thinMCMC,
ESS, mcmc = mcs, chain)]
} else {
warning(paste('mode should be one of "short", "long" or "expert", but was', mode[1], '.'))
}
} else {
an.MCMC <- NULL
}
if(mode[1] %in% c('short', 'long')) {
if(!is.null(an.ML) & !is.null(an.MCMC)) {
res <- merge(an.ML, an.MCMC, by = 'stat', all = TRUE, sort = FALSE)
res[sapply(HPD, is.null), HPD:=list(list(as.double(c(NA, NA))))]
if(mode[1] == 'long')
res[sapply(HPD50, is.null), HPD50:=list(list(as.double(c(NA, NA))))]
} else if(!is.null(an.ML)) {
res <- an.ML
} else if(!is.null(an.MCMC)) {
res <- an.MCMC
}
} else {
res <- list(spec = object$spec, startMCMC = startMCMC, endMCMC = endMCMC,
thinMCMC = thinMCMC,
ML = an.ML, MCMC = an.MCMC,
MCMCBetterLik = object$MCMCBetterLik)
}
class(res) <- c('summary.POUMM', class(res))
res
}
#' Plot a summary of a POUMM fit
#' @param x An object of class POUMM.
#' @param type A character indicating the type of plot(s) to be generated.
#' Defaults to "MCMC", resulting in a trace and density plot for the selected
#' statistics (see argument stat). Currently, only 'MCMC' type is supported.
#' @param doPlot Logical indicating whether a plot should be printed on the
#' currently active graphics device or whether only to return a list of plot-
#' objects for further processing. Defaults to TRUE.
#' @param stat A character vector with the names of statistics to be plotted.
#' These should be names from the stats-list (see argument statFunctions).
#' Defaults to c("alpha", "theta", "sigma", "sigmae", "H2tMean", "H2tInf").
#' @param chain A vector of integers indicating the chains to be plotted.
#' @param doZoomIn (type MCMC only) A logical value indicating whether the
#' produced plots should have a limitation on the x-axis according to an
#' expression set in zoomInFilter (see below). Default value is FALSE.
#' @param zoomInFilter A character string which evaluates as logical value. If
#' doZoomIn is set to TRUE, this filter is applied to each point in each MCMC
#' chain and the data-point is filtered out if it evaluates to FALSE. This
#' allows to zoomIn the x-axis of density plots but should be used with caution,
#' since filtering out points from the MCMC-sample can affect the kernel densities.
#' Unfortunately, filtering out values is currently the only way to affect the
#' limits of individual facets in ggplot2. The default value is a complicated
#' expression involving the HPD from all MCMC chains (normally one chain from the
#' prior and 2 chains from the posterior):
#' zoomInFilter = paste0("stat %in% c('H2e','H2tMean','H2tInf','H2tMax') |",
# "(value <= median(HPDUpper) + 4 * (median(HPDUpper) - median(HPDLower)) &",
# "value >= median(HPDLower) - 4 * (median(HPDUpper) - median(HPDLower)))").
#' The identifiers in this expression can be any
#' column names found in a summary of a POUMM object.
#' @param palette A vector of colors (can be character strings) corresponding to the
#' different chains (in their order 1 (prior), 2, 3). Defaults to c("#999999",
#' "#0072B2", "#CC79A7", "#E69F00", "#D55E00", "#56B4E9", "#009E73", "#F0E442"),
#' which is a color-blind friendly.
#' @param prettyNames A logical indicating if greek letters and sub/superscripts
#' should be used for the names of columns in the posterior density pairs-plot.
#' @param showUnivarDensityOnDiag A logical indicating if univariate density
#' plots should be displaied on the main diagonal in the bivariate posterior plot.
#' Defaults to FALSE, in which case the column names are displayed on the diagonal.
#' @param ... Not used; included for compatibility with the generic function plot.
#'
#' @return If doPlot==TRUE, the function returns nothing and produces output on
#' the current graphics device as a side-effect. Otherwise, the function returns
#' a list of plot-objects: traceplot and densplot.
#'
#' @import ggplot2
#' @importFrom GGally ggpairs print_if_interactive wrap ggally_text
#' @importFrom stats cor complete.cases
#' @import methods
#'
#' @examples
#' \dontrun{
#' library(POUMM)
#'
#' set.seed(1)
#'
#' N <- 1000
#'
#' # create a random non-ultrametric tree of N tips
#' tree <- ape::rtree(N)
#'
#' # Simulate the evolution of a trait along the tree
#' z <- rVNodesGivenTreePOUMM(
#' tree, g0 = 8, alpha = 1, theta = 4, sigma = 1.2, sigmae = .8)
#'
#' fit <- POUMM(z[1:N], tree, spec = list(nSamplesMCMC = 4e5))
#'
#' # Summarize the results from the fit in a table:
#' summary(fit)
#'
#' # Create plots for some of the inferred parameters/statistics:
#' pl <- plot(fit, stat = c("alpha", "theta", "sigma", "sigmae", "H2tMean"),
#' doZoomIn = TRUE,
#' zoomInFilter = paste("!(stat %in% c('alpha', 'sigma', 'sigmae')) |",
#' "(value >= 0 & value <= 8)"),
#' doPlot = FALSE)
#'
#' pl$traceplot
#' pl$densplot
#' }
#'
#' @export
plot.summary.POUMM <- function(
x, type = c("MCMC"),
doPlot = TRUE,
stat = c("alpha", "theta", "sigma", "sigmae", "g0", "H2tMean"),
chain = NULL,
doZoomIn = FALSE,
zoomInFilter = paste0("(stat %in% c('H2e','H2tMean','H2tInf','H2tMax') & ",
"(value >= 0 & value <= 1) ) |",
"( !stat %in% c('H2e','H2tMean','H2tInf','H2tMax') & ",
"(value <= median(HPDUpper) + 4 * (median(HPDUpper) - median(HPDLower)) &",
"value >= median(HPDLower) - 4 * (median(HPDUpper) - median(HPDLower))))"),
palette = c("#999999", "#0072B2", "#CC79A7", "#E69F00", "#D55E00", "#56B4E9", "#009E73", "#F0E442"),
prettyNames = TRUE,
showUnivarDensityOnDiag = FALSE,
...) {
# declare global variables to avoid CRAN CHECK NOTES "no visible binding":
N <- MLE <- samplePriorMCMC <- HPD <- HPD50 <- ESS <- HPDUpperFiltered <-
HPDLowerFiltered <- value <- HPDUpper <- HPDLower <- it <- PostMean <- mcs <-
ESS <- nChains <- chain <- G.R. <- stat2 <- statFactor <- NULL
if(class(x) == "summary.POUMM" & !is.null(x$MCMC)) {
.stat <- stat
.chain <- chain
data <- merge(x$ML, x$MCMC, by = "stat")
data <- data[
{
if(!is.null(.stat)) {stat %in% .stat} else TRUE
} & {
if(!is.null(.chain)) {chain %in% .chain} else TRUE
}]
setkey(data, stat)
data <- data[list(.stat)]
data <- data[{
if(!is.null(.stat)) {stat %in% .stat} else TRUE
} & {
if(!is.null(.chain)) {chain %in% .chain} else TRUE
}, list(
N, MLE,
samplePriorMCMC,
HPDLower = sapply(HPD, function(.) .[1]),
HPDUpper = sapply(HPD, function(.) .[2]),
HPD50Lower = sapply(HPD50, function(.) .[1]),
HPD50Upper = sapply(HPD50, function(.) .[2]),
ESS,
value = unlist(mcmc),
it = seq(x$startMCMC, by = x$thinMCMC, along.with = mcmc[[1]])),
by = list(stat = factor(stat), chain = factor(chain))]
if(doZoomIn) {
data[, stat2:=stat]
data <- data[, {
.SD[eval(parse(text = zoomInFilter))]
}, by = stat2]
data[, stat2:=NULL]
}
data[, HPDUpperFiltered:=min(max(value), unique(HPDUpper)),
list(stat = factor(stat), chain = factor(chain))]
data[, HPDLowerFiltered:=max(min(value), unique(HPDLower)),
list(stat = factor(stat), chain = factor(chain))]
.availStats <- data[, as.character(unique(stat))]
statFactorLabels <- if(prettyNames) {
prettifyNames(.availStats)
} else {
.availStats
}
data[, statFactor:=factor(stat, levels = .availStats, labels = statFactorLabels)]
.stat <- .availStats[1]
dtm <- data[stat == .stat, list(stat, it, chain, value)]
dtm[, (.stat) := value]
dtm[, c("stat", "value") := NULL]
for(.stat in .availStats[-1]) {
dtm2 <-
data[stat == .stat,
eval(parse(text=paste0("list(it, chain, ", .stat, ' = value)')))]
dtm <- merge(dtm, dtm2, by=c("it", "chain"), all=TRUE)
}
names(palette) <- as.character(1:length(palette))
my_ggplot <- function(...) ggplot(...) +
scale_color_manual(values = palette) +
scale_fill_manual(values = palette)
if(type == "MCMC") {
traceplot <- my_ggplot(data) +
geom_line(aes(x=it, y=value, col = chain)) +
facet_wrap(~statFactor,
scales = "free",
labeller = if(prettyNames) "label_parsed" else "label_value")
my_dens <- function(data, mapping, ...) {
my_ggplot(data = data, mapping=mapping) +
geom_density(..., alpha = 0.5)
}
eval_data_col <- function (data, aes_col) {
eval(aes_col, data)
}
my_points <- function(data, mapping, ...) {
my_ggplot(data = data, mapping = mapping) +
geom_point(..., alpha = 0.5)
}
# copied and modified from GGally
my_cor <- function(data, mapping, alignPercent = 0, method = "pearson",
use = "complete.obs", corAlignPercent = NULL, corMethod = NULL,
corUse = NULL, ...) {
# global variable name to avoid check note:
labelp <- NULL
# copied from GGally
is_date <- function (x) {
inherits(x, c("POSIXt", "POSIXct", "POSIXlt", "Date"))
}
# copied from GGally
str_c <- function (..., sep = "", collapse = NULL) {
paste(..., sep = sep, collapse = collapse)
}
if (!is.null(corAlignPercent)) {
stop("'corAlignPercent' is deprecated. Please use argument 'alignPercent'")
}
if (!is.null(corMethod)) {
stop("'corMethod' is deprecated. Please use argument 'method'")
}
if (!is.null(corUse)) {
stop("'corUse' is deprecated. Please use argument 'use'")
}
useOptions <- c("all.obs", "complete.obs", "pairwise.complete.obs",
"everything", "na.or.complete")
use <- pmatch(use, useOptions)
if (is.na(use)) {
warning("correlation 'use' not found. Using default value of 'all.obs'")
use <- useOptions[1]
}
else {
use <- useOptions[use]
}
cor_fn <- function(x, y) {
cor(x, y, method = method, use = use)
}
xCol <- deparse(mapping$x)
yCol <- deparse(mapping$y)
if (is.numeric(eval_data_col(data, mapping$colour))) {
stop("ggally_cor: mapping color column must be categorical, not numeric")
}
colorCol <- deparse(mapping$colour)
singleColorCol <- ifelse(is.null(colorCol), NULL, paste(colorCol,
collapse = ""))
if (use %in% c("complete.obs", "pairwise.complete.obs", "na.or.complete")) {
if (length(colorCol) > 0) {
if (singleColorCol %in% colnames(data)) {
rows <- complete.cases(data[c(xCol, yCol, colorCol)])
}
else {
rows <- complete.cases(data[c(xCol, yCol)])
}
} else {
rows <- complete.cases(data[c(xCol, yCol)])
}
if (any(!rows)) {
total <- sum(!rows)
if (total > 1) {
warning("Removed ", total, " rows containing missing values")
}
else if (total == 1) {
warning("Removing 1 row that contained a missing value")
}
}
data <- data[rows, ]
}
xVal <- data[[xCol]]
yVal <- data[[yCol]]
if (length(names(mapping)) > 0) {
for (i in length(names(mapping)):1) {
tmp_map_val <- deparse(mapping[names(mapping)[i]][[1]])
if (tmp_map_val[length(tmp_map_val)] %in% colnames(data))
mapping[[names(mapping)[i]]] <- NULL
if (length(names(mapping)) < 1) {
mapping <- NULL
break
}
}
}
if (length(colorCol) < 1) {
colorCol <- "ggally_NO_EXIST"
}
if ((singleColorCol != "ggally_NO_EXIST") && (singleColorCol %in%
colnames(data))) {
cord <- as.data.frame(
as.data.table(data)[, cor_fn(eval(parse(text=(xCol))), eval(parse(text=yCol))),
by=eval(colorCol)])
colnames(cord)[2] <- "ggally_cor"
cord$ggally_cor <- signif(as.numeric(cord$ggally_cor),
3)
lev <- levels(data[[colorCol]])
ord <- rep(-1, nrow(cord))
for (i in 1:nrow(cord)) {
for (j in seq_along(lev)) {
if (identical(as.character(cord[i, colorCol]),
as.character(lev[j]))) {
ord[i] <- j
}
}
}
cord <- cord[order(ord[ord >= 0]), ]
cord$label <- str_c(cord[[colorCol]], ": ", cord$ggally_cor)
xmin <- min(xVal, na.rm = TRUE)
xmax <- max(xVal, na.rm = TRUE)
xrange <- c(xmin - 0.01 * (xmax - xmin), xmax + 0.01 *
(xmax - xmin))
ymin <- min(yVal, na.rm = TRUE)
ymax <- max(yVal, na.rm = TRUE)
yrange <- c(ymin - 0.01 * (ymax - ymin), ymax + 0.01 *
(ymax - ymin))
p <- ggally_text(label = str_c("Correlation: ", ""),
mapping = mapping, xP = 0.5, yP = 0.9,
xrange = xrange, yrange = yrange, color = "black",
...) + theme(legend.position = "none")
xPos <- rep(alignPercent, nrow(cord)) * diff(xrange) +
min(xrange, na.rm = TRUE)
yPos <- seq(from = 0.9, to = 0.2, length.out = nrow(cord) +
1)
yPos <- yPos * diff(yrange) + min(yrange, na.rm = TRUE)
yPos <- yPos[-1]
cordf <- data.frame(xPos = xPos, yPos = yPos, labelp = cord$label,
colorCol = cord[[colorCol]])
cordf$labelp <- factor(cordf$labelp, levels = cordf$labelp)
p <- p + geom_text(data = cordf,
aes(x = xPos, y = yPos, label = labelp,
color = colorCol), hjust = "left", ...) +
scale_color_manual(values = palette) +
theme(panel.grid = element_blank(),
panel.grid.major = element_blank(),
panel.background = element_rect(color="grey"))
p
} else {
xmin <- min(xVal, na.rm = TRUE)
xmax <- max(xVal, na.rm = TRUE)
xrange <- c(xmin - 0.01 * (xmax - xmin), xmax + 0.01 *
(xmax - xmin))
ymin <- min(yVal, na.rm = TRUE)
ymax <- max(yVal, na.rm = TRUE)
yrange <- c(ymin - 0.01 * (ymax - ymin), ymax + 0.01 *
(ymax - ymin))
p <- ggally_text(
label = paste("Correlation:\n",
signif(cor_fn(xVal, yVal), 3),
sep = "", collapse = ""), mapping, xP = 0.5,
yP = 0.5, xrange = xrange, yrange = yrange, ...) +
theme(legend.position = "none")
p
}
}
densplot <- my_ggplot(data) +
geom_density(aes(x=value, fill = chain, col = chain), alpha=0.5) +
geom_segment(aes(x=HPDLowerFiltered, xend=HPDUpperFiltered,
y=0, yend=0, col = chain)) +
geom_point(aes(x=MLE, y=0)) +
facet_wrap(~statFactor,
scales = "free", labeller= if(prettyNames) "label_parsed" else "label_value")
densplot_bivar <- ggpairs(
as.data.frame(dtm),
mapping = aes(fill = chain, color = chain),
columns = .availStats,
columnLabels = if(prettyNames) prettifyNames(.availStats) else .availStats,
axisLabels = "show",
showStrips = showUnivarDensityOnDiag,
labeller = if(prettyNames) "label_parsed" else "label_value",
diag = list(continuous = if(showUnivarDensityOnDiag) my_dens else "blankDiag"),
lower = list(continuous = my_points,
combo = wrap("box_no_facet")),
upper = list(continuous = my_cor,
combo = wrap("box_no_facet")))
if(doPlot) {
print(traceplot)
if(interactive()) {
print("Press Enter to see a univariate posterior density plot")
scan("", what = "character", nlines = 1)
}
print(densplot)
if(interactive()) {
print("Press Enter to see a bivariate posterior density plot")
scan("", what = "character", nlines = 1)
}
print(densplot_bivar)
} else {
list(traceplot = traceplot, densplot = densplot, densplot_bivar = densplot_bivar)
}
}
} else {
stop("plot.summary.POUMM called on a non summary.POUMM-object or a missing MCMC element. Verify that summary.POUMM has been called with mode = 'expert'")
}
}
prettifyNames <- function(names) {
prettyNames <- c(alpha = "alpha",
theta = "theta",
g0 = "g[0]",
sigma = "sigma",
sigmae = "sigma[e]",
H2tMean = "H[bar(t)]^2",
H2e = "H[e]^2",
H2tInf = "H[infinity]^2")
sapply(names, function(n) {pn <- prettyNames[n]; if(!is.na(pn)) pn else n}, USE.NAMES = FALSE)
}
#' Extract statistics from sampled or inferred parameters of a
#' POUMM fit
#' @param object An object of class "POUMM".
#'
#' @details This is a generic method.
#' @export
statistics <- function(object) {
UseMethod('statistics')
}
#' @describeIn statistics Relevant statistics from the sampled parameters of a
#' POUMM fit
#'
#' @export
statistics.POUMM <- function(object) {
listPar <- sapply(1:length(object$spec$parLower), function(i) {
name <- names(object$spec$parLower)[i]
stat <- eval(
parse(text=paste0("list(", name, " = function(par) par[, ", i , "])"))
)
})
listOtherStats <- list(
H2e = function(par) H2e(z = object$pruneInfo$z,
sigmae = object$spec$parMapping(par)[, 'sigmae']),
H2tInf = function(par) H2(alpha = object$spec$parMapping(par)[, 'alpha'],
sigma = object$spec$parMapping(par)[, 'sigma'],
sigmae = object$spec$parMapping(par)[, 'sigmae'],
t = Inf),
H2tMax = function(par) H2(alpha = object$spec$parMapping(par)[, 'alpha'],
sigma = object$spec$parMapping(par)[, 'sigma'],
sigmae = object$spec$parMapping(par)[, 'sigmae'],
t = object$tMax),
H2tMean = function(par) H2(alpha = object$spec$parMapping(par)[, 'alpha'],
sigma = object$spec$parMapping(par)[, 'sigma'],
sigmae = object$spec$parMapping(par)[, 'sigmae'],
t = object$tMean),
alpha = function(par) object$spec$parMapping(par)[, 'alpha'],
theta = function(par) object$spec$parMapping(par)[, 'theta'],
sigma = function(par) object$spec$parMapping(par)[, 'sigma'],
sigmae = function(par) object$spec$parMapping(par)[, 'sigmae'],
sigmaG2tMean = function(par) varOU(alpha = object$spec$parMapping(par)[, 'alpha'],
sigma = object$spec$parMapping(par)[, 'sigma'],
t = object$tMean),
sigmaG2tMax = function(par) varOU(alpha = object$spec$parMapping(par)[, 'alpha'],
sigma = object$spec$parMapping(par)[, 'sigma'],
t = object$tMax),
sigmaG2tInf = function(par) varOU(alpha = object$spec$parMapping(par)[, 'alpha'],
sigma = object$spec$parMapping(par)[, 'sigma'],
t = Inf))
c(listPar, listOtherStats[setdiff(names(listOtherStats), names(listPar))])
}
|
# drop1_glm_loop
## Function to automate the drop1 steps for backward model selection.
## Examines a model, and drops each least significant variable, until an alpha threshold for significance is met.
## Returns: final model, with option to print interim models to the screen.
## Options:
### outcome - string of variable name to be included in the model as an outcome. Default: none.
### allVariables - vector of character strings of all predictor variables to be included in the original full model. Default: none.
### data - dataframe containing the data to be modeled. Default: none.
### trace - logical value indicating whether all steps in the drop1 loop should be displayed or just the final model. Default: FALSE.
### family - string indicating family to be used in glm model. Default: "binomial".
### test - string indicating test statistic to be used in the drop1 function. Default: "Chisq".
### alpha - significance level required for each predictor, will not drop any predictors with p < alpha. Default: 0.05.
drop1_glm_loop <- function(outcome, allVariables, data, trace=F, family="binomial", test="Chisq", alpha=0.05){
try(if(!is.character(outcome) | length(outcome)>1) stop("The option `outcome` must be a single string for the outcome of interest."))
try(if(!is.character(allVariables)) stop("The option `allVariables` must be a string or a vector of strings for predictor variables."))
try(if(is.null(data)) stop("The option `data` is required and has no default."))
m1 <- glm(eval(parse(text=paste0(outcome,"~", paste0(paste0('`',allVariables,'`'),collapse="+")))),
data=data,
family=family)
d1 <- drop1(m1,test=test)
while(any(d1$`Pr(>Chi)`>=alpha,na.rm = T)){
var <- d1 %>% mutate(var = row.names(d1)) %>% filter(!is.na(Df)) %>% arrange(`Pr(>Chi)`) %>% head(-1) %>% pull(var)
m1 <- glm(eval(parse(text=paste0(outcome,"~",paste0(var,collapse="+")))),
data=data,
family=family)
d1 <- drop1(m1,test=test)
if(trace) print(d1)
}
if(!trace) print(d1)
return(m1)
}
| /drop1_loop.R | no_license | roseputler/rputler-tools | R | false | false | 2,069 | r | # drop1_glm_loop
## Function to automate the drop1 steps for backward model selection.
## Examines a model, and drops each least significant variable, until an alpha threshold for significance is met.
## Returns: final model, with option to print interim models to the screen.
## Options:
### outcome - string of variable name to be included in the model as an outcome. Default: none.
### allVariables - vector of character strings of all predictor variables to be included in the original full model. Default: none.
### data - dataframe containing the data to be modeled. Default: none.
### trace - logical value indicating whether all steps in the drop1 loop should be displayed or just the final model. Default: FALSE.
### family - string indicating family to be used in glm model. Default: "binomial".
### test - string indicating test statistic to be used in the drop1 function. Default: "Chisq".
### alpha - significance level required for each predictor, will not drop any predictors with p < alpha. Default: 0.05.
drop1_glm_loop <- function(outcome, allVariables, data, trace=F, family="binomial", test="Chisq", alpha=0.05){
try(if(!is.character(outcome) | length(outcome)>1) stop("The option `outcome` must be a single string for the outcome of interest."))
try(if(!is.character(allVariables)) stop("The option `allVariables` must be a string or a vector of strings for predictor variables."))
try(if(is.null(data)) stop("The option `data` is required and has no default."))
m1 <- glm(eval(parse(text=paste0(outcome,"~", paste0(paste0('`',allVariables,'`'),collapse="+")))),
data=data,
family=family)
d1 <- drop1(m1,test=test)
while(any(d1$`Pr(>Chi)`>=alpha,na.rm = T)){
var <- d1 %>% mutate(var = row.names(d1)) %>% filter(!is.na(Df)) %>% arrange(`Pr(>Chi)`) %>% head(-1) %>% pull(var)
m1 <- glm(eval(parse(text=paste0(outcome,"~",paste0(var,collapse="+")))),
data=data,
family=family)
d1 <- drop1(m1,test=test)
if(trace) print(d1)
}
if(!trace) print(d1)
return(m1)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim_bessel_layers.R
\name{simulate_bessel_layer}
\alias{simulate_bessel_layer}
\title{Bessel Layer simulation}
\usage{
simulate_bessel_layer(x, y, s, t, a)
}
\arguments{
\item{x}{start value of Brownian bridge}
\item{y}{end value of Brownian bridge}
\item{s}{start time of Brownian bridge}
\item{t}{end time of Brownian bridge}
\item{a}{vector/sequence of numbers}
}
\description{
Simulates a Bessel layer l for a given sequence a
}
\examples{
simulate_bessel_layer(x = 0, y = 0, s = 0, t = 1, a = seq(0.1, 0.5, 0.1))
}
| /man/simulate_bessel_layer.Rd | no_license | rchan26/RlayeredBB | R | false | true | 603 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim_bessel_layers.R
\name{simulate_bessel_layer}
\alias{simulate_bessel_layer}
\title{Bessel Layer simulation}
\usage{
simulate_bessel_layer(x, y, s, t, a)
}
\arguments{
\item{x}{start value of Brownian bridge}
\item{y}{end value of Brownian bridge}
\item{s}{start time of Brownian bridge}
\item{t}{end time of Brownian bridge}
\item{a}{vector/sequence of numbers}
}
\description{
Simulates a Bessel layer l for a given sequence a
}
\examples{
simulate_bessel_layer(x = 0, y = 0, s = 0, t = 1, a = seq(0.1, 0.5, 0.1))
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/liver.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.03,family="gaussian",standardize=FALSE)
sink('./liver_016.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/AvgRank/liver/liver_016.R | no_license | esbgkannan/QSMART | R | false | false | 345 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/liver.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.03,family="gaussian",standardize=FALSE)
sink('./liver_016.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
library(tidyverse)
library(dplyr)
library(betapart)
library(rdiversity)
#working directory
setwd("~/GLASGOW/Dissertation/data")
hist_01_23km_LC <- read.csv("~/GLASGOW/Dissertation/data/hist_01_23km_LC.csv")
hist_01_23km_LC <- hist_01_23km_LC %>%
rename("RouteName"=1, "OpenWater"=3, "Snow"=4, "DevOpen"=5,
"DevLow"=6, "DevMed"=7, "DevHigh"=8, "Barren"=9,
"DeciduousForest"=10,"EvergreenForest"=11, "MixedForest"=12,
"Shrub"=13,"Grassland"=14, "Pasture"=15,"Crops"=16,
"WoodyWetlands"=17,"HerbaceousWetland"=18) %>%
mutate(total = rowSums(select(.,-RouteName))) %>%
mutate("Urban"=DevOpen+DevLow+DevMed+DevHigh) %>%
mutate("Forest"=DeciduousForest+EvergreenForest+MixedForest) %>%
merge(bcr, by="RouteName")
habitat.2001 <-hist_01_23km_LC %>% select(-total, -Forest, -Urban, -X23HIST_0)
hist_01_23km_LC[,c(2:19)] <- (hist_01_23km_LC[,c(2:19)]/hist_01_23km_LC[,c(2:19)]$total)*100
Hist_01_23km_LC <- index.df %>% select(-Red_bcr.2016,-Rep_bcr.2016,-Red_usa.2016,-Rep_usa.2016) %>%
merge(hist_01_23km_LC, by="RouteName")
# Redundancy of habitat 2001 ----
habitat.df.01 <- data.frame()
for (r in bcr.usa) {
data <- habitat.2001 %>% filter(BCR==r) %>% column_to_rownames("RouteName") %>% select(-BCR)
meta.data <- metacommunity(t(data))
redundancy <- raw_sub_rho(meta.data,1)
redundancy.clean <- redundancy %>% rename("RouteName"=partition_name) %>% select(RouteName, diversity) %>%
merge(bcr,by="RouteName")
habitat.df.01 <- rbind(habitat.df.01, redundancy.clean)
}
habitat.df.01 <- habitat.df.01 %>% rename("Red_habitat"=diversity) %>% select(-BCR) %>%
merge(Hist_01_23km_LC, by="RouteName")
ggplot(habitat.df.01, aes(Red_habitat,Red_bcr.2001)) +
geom_point() +
geom_smooth(method = "loess") +
geom_rug() +
ggtitle("Redundancy of bird community as a function of the redundancy of the landscape", "within regions, 2001")
# Representativeness of habitat 2001 ----
habitat.rep.df.01 <- data.frame()
for (r in bcr.usa) {
data <- habitat.2001 %>% filter(BCR==r) %>% column_to_rownames("RouteName") %>% select(-BCR)
meta.data <- metacommunity(t(data))
Representativeness <- norm_sub_rho(meta.data,1)
representativeness.clean <- Representativeness %>% rename("RouteName"=partition_name) %>% select(RouteName, diversity) %>%
merge(bcr,by="RouteName")
habitat.rep.df.01 <- rbind(habitat.rep.df.01, representativeness.clean)
}
habitat.rep.df.01 <- habitat.rep.df.01 %>% rename("Rep_habitat"=diversity) %>% select(-BCR) %>%
merge(Hist_01_23km_LC, by="RouteName")
ggplot(habitat.rep.df.01, aes(Rep_habitat,Rep_bcr.2001)) +
geom_point() +
geom_smooth(method = "loess") +
geom_rug() +
ggtitle("Representativeness of bird community as a function of the Representativeness of the landscape", "within regions, 2001")
# PLOTS ----
ggplot(Hist_01_23km_LC, aes(x=Forest, Red_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Forest, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=DeciduousForest, Red_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Deciduous Forest, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=EvergreenForest, Red_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Evergreen Forest, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=MixedForest, Red_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Mixed Forest, 2001","Redundancy within regions")
#urban
ggplot(Hist_01_23km_LC, aes(x=Urban, Red_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Urban , 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=DevOpen, Red_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Urban Open Space, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=DevLow, Red_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Urban Low, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=DevMed, Red_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Urban Medium, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=DevHigh, Red_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Urban High, 2001","Redundancy within regions")
#agriculture
ggplot(Hist_01_23km_LC, aes(x=Pasture, Red_bcr.2001)) +
geom_point(alpha=0.2, col="coral") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Pasture, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=Crops, Red_bcr.2001)) +
geom_point(alpha=0.2, col="coral") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Cropland, 2001","Redundancy within regions")
#grassland and shrubland
ggplot(Hist_01_23km_LC, aes(x=Shrub, Red_bcr.2001)) +
geom_point(alpha=0.2, col="brown") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Shrubland, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=Grassland, Red_bcr.2001)) +
geom_point(alpha=0.2, col="brown") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Grassland, 2001","Redundancy within regions")
#Wetlands
ggplot(Hist_01_23km_LC, aes(x=WoodyWetlands, Red_bcr.2001)) +
geom_point(alpha=0.2, col="mediumpurple4") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Woody Wetland, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=HerbaceousWetland, Red_bcr.2001)) +
geom_point(alpha=0.2, col="mediumpurple4") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Herbaceous Wetland, 2001","Redundancy within regions")
### REP ----
ggplot(Hist_01_23km_LC, aes(x=Forest, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Forest, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=DeciduousForest, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Deciduous Forest, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=EvergreenForest, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Evergreen Forest, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=MixedForest, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Mixed Forest, 2001","Representativeness within regions")
#urban
ggplot(Hist_01_23km_LC, aes(x=Urban, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Urban , 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=DevOpen, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Urban Open Space, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=DevLow, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Urban Low, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=DevMed, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Urban Medium, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=DevHigh, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Urban High, 2001","Representativeness within regions")
#agriculture
ggplot(Hist_01_23km_LC, aes(x=Pasture, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="coral") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Pasture, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=Crops, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="coral") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Cropland, 2001","Representativeness within regions")
#grassland and shrubland
ggplot(Hist_01_23km_LC, aes(x=Shrub, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="brown") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Shrubland, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=Grassland, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="brown") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Grassland, 2001","Representativeness within regions")
#Wetlands
ggplot(Hist_01_23km_LC, aes(x=WoodyWetlands, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="mediumpurple4") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Woody Wetland, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=HerbaceousWetland, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="mediumpurple4") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Herbaceous Wetland, 2001","Representativeness within regions")
c <- c()
for (b in bcr.usa) {
x<- sum(Hist_01_23km_LC$BCR==b)
c<-c(c,x)
}
table<-cbind(bcr.usa,c)
print(order(table, bcr.usa))
| /Dissimilarity approach/red_and_rep_withinBCR2001.R | no_license | Madalena-RA/Thesis | R | false | false | 12,095 | r |
library(tidyverse)
library(dplyr)
library(betapart)
library(rdiversity)
#working directory
setwd("~/GLASGOW/Dissertation/data")
hist_01_23km_LC <- read.csv("~/GLASGOW/Dissertation/data/hist_01_23km_LC.csv")
hist_01_23km_LC <- hist_01_23km_LC %>%
rename("RouteName"=1, "OpenWater"=3, "Snow"=4, "DevOpen"=5,
"DevLow"=6, "DevMed"=7, "DevHigh"=8, "Barren"=9,
"DeciduousForest"=10,"EvergreenForest"=11, "MixedForest"=12,
"Shrub"=13,"Grassland"=14, "Pasture"=15,"Crops"=16,
"WoodyWetlands"=17,"HerbaceousWetland"=18) %>%
mutate(total = rowSums(select(.,-RouteName))) %>%
mutate("Urban"=DevOpen+DevLow+DevMed+DevHigh) %>%
mutate("Forest"=DeciduousForest+EvergreenForest+MixedForest) %>%
merge(bcr, by="RouteName")
habitat.2001 <-hist_01_23km_LC %>% select(-total, -Forest, -Urban, -X23HIST_0)
hist_01_23km_LC[,c(2:19)] <- (hist_01_23km_LC[,c(2:19)]/hist_01_23km_LC[,c(2:19)]$total)*100
Hist_01_23km_LC <- index.df %>% select(-Red_bcr.2016,-Rep_bcr.2016,-Red_usa.2016,-Rep_usa.2016) %>%
merge(hist_01_23km_LC, by="RouteName")
# Redundancy of habitat 2001 ----
habitat.df.01 <- data.frame()
for (r in bcr.usa) {
data <- habitat.2001 %>% filter(BCR==r) %>% column_to_rownames("RouteName") %>% select(-BCR)
meta.data <- metacommunity(t(data))
redundancy <- raw_sub_rho(meta.data,1)
redundancy.clean <- redundancy %>% rename("RouteName"=partition_name) %>% select(RouteName, diversity) %>%
merge(bcr,by="RouteName")
habitat.df.01 <- rbind(habitat.df.01, redundancy.clean)
}
habitat.df.01 <- habitat.df.01 %>% rename("Red_habitat"=diversity) %>% select(-BCR) %>%
merge(Hist_01_23km_LC, by="RouteName")
ggplot(habitat.df.01, aes(Red_habitat,Red_bcr.2001)) +
geom_point() +
geom_smooth(method = "loess") +
geom_rug() +
ggtitle("Redundancy of bird community as a function of the redundancy of the landscape", "within regions, 2001")
# Representativeness of habitat 2001 ----
habitat.rep.df.01 <- data.frame()
for (r in bcr.usa) {
data <- habitat.2001 %>% filter(BCR==r) %>% column_to_rownames("RouteName") %>% select(-BCR)
meta.data <- metacommunity(t(data))
Representativeness <- norm_sub_rho(meta.data,1)
representativeness.clean <- Representativeness %>% rename("RouteName"=partition_name) %>% select(RouteName, diversity) %>%
merge(bcr,by="RouteName")
habitat.rep.df.01 <- rbind(habitat.rep.df.01, representativeness.clean)
}
habitat.rep.df.01 <- habitat.rep.df.01 %>% rename("Rep_habitat"=diversity) %>% select(-BCR) %>%
merge(Hist_01_23km_LC, by="RouteName")
ggplot(habitat.rep.df.01, aes(Rep_habitat,Rep_bcr.2001)) +
geom_point() +
geom_smooth(method = "loess") +
geom_rug() +
ggtitle("Representativeness of bird community as a function of the Representativeness of the landscape", "within regions, 2001")
# PLOTS ----
ggplot(Hist_01_23km_LC, aes(x=Forest, Red_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Forest, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=DeciduousForest, Red_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Deciduous Forest, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=EvergreenForest, Red_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Evergreen Forest, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=MixedForest, Red_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Mixed Forest, 2001","Redundancy within regions")
#urban
ggplot(Hist_01_23km_LC, aes(x=Urban, Red_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Urban , 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=DevOpen, Red_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Urban Open Space, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=DevLow, Red_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Urban Low, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=DevMed, Red_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Urban Medium, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=DevHigh, Red_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Urban High, 2001","Redundancy within regions")
#agriculture
ggplot(Hist_01_23km_LC, aes(x=Pasture, Red_bcr.2001)) +
geom_point(alpha=0.2, col="coral") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Pasture, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=Crops, Red_bcr.2001)) +
geom_point(alpha=0.2, col="coral") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Cropland, 2001","Redundancy within regions")
#grassland and shrubland
ggplot(Hist_01_23km_LC, aes(x=Shrub, Red_bcr.2001)) +
geom_point(alpha=0.2, col="brown") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Shrubland, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=Grassland, Red_bcr.2001)) +
geom_point(alpha=0.2, col="brown") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Grassland, 2001","Redundancy within regions")
#Wetlands
ggplot(Hist_01_23km_LC, aes(x=WoodyWetlands, Red_bcr.2001)) +
geom_point(alpha=0.2, col="mediumpurple4") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Woody Wetland, 2001","Redundancy within regions")
ggplot(Hist_01_23km_LC, aes(x=HerbaceousWetland, Red_bcr.2001)) +
geom_point(alpha=0.2, col="mediumpurple4") +
ylab("Redundancy")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Redundancy q=1 as a function of proportion of Herbaceous Wetland, 2001","Redundancy within regions")
### REP ----
ggplot(Hist_01_23km_LC, aes(x=Forest, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Forest, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=DeciduousForest, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Deciduous Forest, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=EvergreenForest, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Evergreen Forest, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=MixedForest, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="darkgreen") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Mixed Forest, 2001","Representativeness within regions")
#urban
ggplot(Hist_01_23km_LC, aes(x=Urban, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Urban , 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=DevOpen, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Urban Open Space, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=DevLow, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Urban Low, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=DevMed, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Urban Medium, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=DevHigh, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="black") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Urban High, 2001","Representativeness within regions")
#agriculture
ggplot(Hist_01_23km_LC, aes(x=Pasture, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="coral") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Pasture, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=Crops, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="coral") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Cropland, 2001","Representativeness within regions")
#grassland and shrubland
ggplot(Hist_01_23km_LC, aes(x=Shrub, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="brown") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Shrubland, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=Grassland, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="brown") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Grassland, 2001","Representativeness within regions")
#Wetlands
ggplot(Hist_01_23km_LC, aes(x=WoodyWetlands, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="mediumpurple4") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Woody Wetland, 2001","Representativeness within regions")
ggplot(Hist_01_23km_LC, aes(x=HerbaceousWetland, Rep_bcr.2001)) +
geom_point(alpha=0.2, col="mediumpurple4") +
ylab("Representativeness")+
geom_rug()+
geom_smooth(method="loess", col="black") +
ggtitle("Representativeness q=1 as a function of proportion of Herbaceous Wetland, 2001","Representativeness within regions")
c <- c()
for (b in bcr.usa) {
x<- sum(Hist_01_23km_LC$BCR==b)
c<-c(c,x)
}
table<-cbind(bcr.usa,c)
print(order(table, bcr.usa))
|
#!/usr/bin/Rscript
library(ggplot2)
library(ggpubr)
data=read.table("CIBERSORTx-OLR-age-results.COVID.final.nonNA.txt",sep="\t",header=TRUE,row.names=1)
#data=read.table("music-krasnow-OLR-results.nonNA.txt",sep="\t",header=TRUE,row.names=1)
data=data[order(data$pval),]
data$cell = factor(rownames(data),levels=rev(rownames(data)))
data$nlp = log(data$pval)*-1
updata = data[data$OLR_estimate > 0 & data$pval < 0.05,]
downdata = data[data$OLR_estimate < 0 & data$pval < 0.05,]
otherdata = data[data$OLR_estimate ==0 | data$pval > 0.05,]
data$color = rep("black",nrow(data))
data[data$cell %in% updata$cell,"color"] = "#00AFBB"
data[data$cell %in% downdata$cell,"color"] = "#FC4E07"
pdf("OLR-CIBERSORTx-lung.age.v2.pdf",height=10,width=10,useDingbats=FALSE)
ggplot(data,aes(x=OLR_estimate,y=cell))+geom_vline(xintercept=0,linetype=2,color="gray44")+geom_errorbar(aes(xmin=lowerCI,xmax=upperCI),width=0.2)+geom_point(aes(color=color,size=nlp))+theme_bw()+scale_color_manual(values=c("#00AFBB","#FC4E07","black")) + scale_size_continuous(name = "-log pvalue",range=c(1,5))
dev.off()
####################################
# box plot of estimated abundances, no filtering
library(robustbase)
results=read.table("CIBERSORTx_Adjusted.txt",sep="\t",header=TRUE,row.names=1)
df=results[,1:(length(results)-3)]
df = df[,order(colMedians(as.matrix(df)),colMeans(df),decreasing=TRUE)]
library(dplyr)
library(reshape2)
meltdata = melt(df)
meltdata = meltdata %>% group_by(variable) %>% mutate(outlier = value > median(value) + IQR(value)*1.5 | value < median(value) - IQR(value)*1.5)
#mycols = data$color
#mycols = gsub("black","gray44",mycols)
bp = ggplot(meltdata,aes(x=variable,y=value))+geom_jitter(data=meltdata[meltdata$outlier==TRUE,],aes(color=variable),size=0.5,width=0.2,pch=19)+geom_boxplot(aes(fill=variable),color="black",outlier.shape=NA,size=0.25)+theme_bw() + theme(legend.position = "none")+ theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) #+scale_fill_manual(values=mycols)+scale_color_manual(values=mycols)
# bar plot of % samples > 0 proportion
prop0 = as.data.frame((colSums(df != 0)/nrow(df))*100)
prop0$cell = rownames(prop0)
prop0$cell = factor(prop0$cell,levels=colnames(df))
prop0$data = prop0[,1]
pbp = ggplot(prop0,aes(x=cell,y=data))+geom_bar(stat="identity",aes(fill=cell))+theme_bw() + theme(legend.position = "none")+ theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
library(gridExtra)
pdf("CIBERSORTx-lung-estimated-proportions.boxplot.pdf",height=9,width=12,useDingbats=FALSE)
grid.arrange(bp,pbp,nrow=2,heights=c(1.5,1))
dev.off() | /GTEx-RNAseq-analysis/forest-plot-olr.boxplots.R | no_license | qin0507/agingLung-COVID | R | false | false | 2,643 | r | #!/usr/bin/Rscript
library(ggplot2)
library(ggpubr)
data=read.table("CIBERSORTx-OLR-age-results.COVID.final.nonNA.txt",sep="\t",header=TRUE,row.names=1)
#data=read.table("music-krasnow-OLR-results.nonNA.txt",sep="\t",header=TRUE,row.names=1)
data=data[order(data$pval),]
data$cell = factor(rownames(data),levels=rev(rownames(data)))
data$nlp = log(data$pval)*-1
updata = data[data$OLR_estimate > 0 & data$pval < 0.05,]
downdata = data[data$OLR_estimate < 0 & data$pval < 0.05,]
otherdata = data[data$OLR_estimate ==0 | data$pval > 0.05,]
data$color = rep("black",nrow(data))
data[data$cell %in% updata$cell,"color"] = "#00AFBB"
data[data$cell %in% downdata$cell,"color"] = "#FC4E07"
pdf("OLR-CIBERSORTx-lung.age.v2.pdf",height=10,width=10,useDingbats=FALSE)
ggplot(data,aes(x=OLR_estimate,y=cell))+geom_vline(xintercept=0,linetype=2,color="gray44")+geom_errorbar(aes(xmin=lowerCI,xmax=upperCI),width=0.2)+geom_point(aes(color=color,size=nlp))+theme_bw()+scale_color_manual(values=c("#00AFBB","#FC4E07","black")) + scale_size_continuous(name = "-log pvalue",range=c(1,5))
dev.off()
####################################
# box plot of estimated abundances, no filtering
library(robustbase)
results=read.table("CIBERSORTx_Adjusted.txt",sep="\t",header=TRUE,row.names=1)
df=results[,1:(length(results)-3)]
df = df[,order(colMedians(as.matrix(df)),colMeans(df),decreasing=TRUE)]
library(dplyr)
library(reshape2)
meltdata = melt(df)
meltdata = meltdata %>% group_by(variable) %>% mutate(outlier = value > median(value) + IQR(value)*1.5 | value < median(value) - IQR(value)*1.5)
#mycols = data$color
#mycols = gsub("black","gray44",mycols)
bp = ggplot(meltdata,aes(x=variable,y=value))+geom_jitter(data=meltdata[meltdata$outlier==TRUE,],aes(color=variable),size=0.5,width=0.2,pch=19)+geom_boxplot(aes(fill=variable),color="black",outlier.shape=NA,size=0.25)+theme_bw() + theme(legend.position = "none")+ theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) #+scale_fill_manual(values=mycols)+scale_color_manual(values=mycols)
# bar plot of % samples > 0 proportion
prop0 = as.data.frame((colSums(df != 0)/nrow(df))*100)
prop0$cell = rownames(prop0)
prop0$cell = factor(prop0$cell,levels=colnames(df))
prop0$data = prop0[,1]
pbp = ggplot(prop0,aes(x=cell,y=data))+geom_bar(stat="identity",aes(fill=cell))+theme_bw() + theme(legend.position = "none")+ theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
library(gridExtra)
pdf("CIBERSORTx-lung-estimated-proportions.boxplot.pdf",height=9,width=12,useDingbats=FALSE)
grid.arrange(bp,pbp,nrow=2,heights=c(1.5,1))
dev.off() |
library(ggplot2)
library(dplyr)
# Preview the data
diamonds
diamonds_subset = sample_n(diamonds, 1000, replace=FALSE)
# To start, assign variables to the x and y axes,
# and pick a geom (here I chose scatter plot or "geom_point")
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point()
# Take a moment to learn some Rstudio features:
# - "CTRL + Enter" to run selected code (or "Run" button above)
# - Environment tab to view data
# - Console vs editor
# - Plot, Files, Packages and Help tabs
# - ?function to access help
# Examples of other geoms
# Notice that these ones include a statistical transform
ggplot(diamonds_subset, aes(x=carat, y=price)) +
geom_hex()
ggplot(diamonds_subset, aes(x=carat, y=price)) +
geom_bin2d()
# It's easy to add layers ... just tack on another "geom"
# Take a moment to explore all the possible geom layers
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point() +
geom_rug(alpha=0.3)
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point() +
geom_smooth()
# To modify the appearance of the elements of a layer,
# add specifications for size, alpha (transparency), etc.
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point(size=4, color='red') +
geom_smooth()
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point(size=4, alpha=0.2) +
geom_smooth(size=3)
# To link the appearance of the elements of a layer to
# variables in the data, use the same specifications as above
# but define the dependency in an "aes" section
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point(size=4, aes(shape=cut)) +
geom_smooth() +
theme_bw()
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point(size=4, aes(color=cut, shape=color)) +
geom_smooth()
# Facets are another way to map variables onto your visualization
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point(size=4) +
facet_wrap(~cut)
# Statistical transformations incorporate summaries of your data
# easily into your visualization
ggplot(diamonds_subset,aes(x=price)) +
geom_histogram()
ggplot(diamonds_subset) +
geom_density(y=price) +
geom_jitter()
# Labels make your axes interpretable
ggplot(diamonds_subset,aes(x=price)) +
geom_histogram() +
xlab('Price in US Dollars') +
ylab('Count') +
ggtitle('Distribution of Diamond Prices')
# OR
p = ggplot(diamonds_subset,aes(x=price)) +
geom_histogram() +
labs(x='Price in US Dollars',
y='Count',
title='Distribution of Diamond Prices')
print(p)
p = p + geom_jitter()
print(p)
# And finally, to save a figure as a file
ggsave('outfile.png', height=3, width=5)
| /powerpoint/resources/ggplot_examples.R | no_license | meaganfoster/Bios611 | R | false | false | 2,649 | r | library(ggplot2)
library(dplyr)
# Preview the data
diamonds
diamonds_subset = sample_n(diamonds, 1000, replace=FALSE)
# To start, assign variables to the x and y axes,
# and pick a geom (here I chose scatter plot or "geom_point")
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point()
# Take a moment to learn some Rstudio features:
# - "CTRL + Enter" to run selected code (or "Run" button above)
# - Environment tab to view data
# - Console vs editor
# - Plot, Files, Packages and Help tabs
# - ?function to access help
# Examples of other geoms
# Notice that these ones include a statistical transform
ggplot(diamonds_subset, aes(x=carat, y=price)) +
geom_hex()
ggplot(diamonds_subset, aes(x=carat, y=price)) +
geom_bin2d()
# It's easy to add layers ... just tack on another "geom"
# Take a moment to explore all the possible geom layers
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point() +
geom_rug(alpha=0.3)
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point() +
geom_smooth()
# To modify the appearance of the elements of a layer,
# add specifications for size, alpha (transparency), etc.
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point(size=4, color='red') +
geom_smooth()
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point(size=4, alpha=0.2) +
geom_smooth(size=3)
# To link the appearance of the elements of a layer to
# variables in the data, use the same specifications as above
# but define the dependency in an "aes" section
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point(size=4, aes(shape=cut)) +
geom_smooth() +
theme_bw()
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point(size=4, aes(color=cut, shape=color)) +
geom_smooth()
# Facets are another way to map variables onto your visualization
ggplot(diamonds_subset,aes(x=carat, y=price)) +
geom_point(size=4) +
facet_wrap(~cut)
# Statistical transformations incorporate summaries of your data
# easily into your visualization
ggplot(diamonds_subset,aes(x=price)) +
geom_histogram()
ggplot(diamonds_subset) +
geom_density(y=price) +
geom_jitter()
# Labels make your axes interpretable
ggplot(diamonds_subset,aes(x=price)) +
geom_histogram() +
xlab('Price in US Dollars') +
ylab('Count') +
ggtitle('Distribution of Diamond Prices')
# OR
p = ggplot(diamonds_subset,aes(x=price)) +
geom_histogram() +
labs(x='Price in US Dollars',
y='Count',
title='Distribution of Diamond Prices')
print(p)
p = p + geom_jitter()
print(p)
# And finally, to save a figure as a file
ggsave('outfile.png', height=3, width=5)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/functions.R
\name{scde.expression.prior}
\alias{scde.expression.prior}
\title{Estimate prior distribution for gene expression magnitudes}
\usage{
scde.expression.prior(models, counts, length.out = 400, show.plot = FALSE,
pseudo.count = 1, bw = 0.1, max.quantile = 1 - 0.001,
max.value = NULL)
}
\arguments{
\item{models}{models determined by \code{\link{scde.error.models}}}
\item{counts}{count matrix}
\item{length.out}{number of points (resolution) of the expression magnitude grid (default: 400). Note: larger numbers will linearly increase memory/CPU demands.}
\item{show.plot}{show the estimate posterior}
\item{pseudo.count}{pseudo-count value to use (default 1)}
\item{bw}{smoothing bandwidth to use in estimating the prior (default: 0.1)}
\item{max.quantile}{determine the maximum expression magnitude based on a quantile (default : 0.999)}
\item{max.value}{alternatively, specify the exact maximum expression magnitude value}
}
\value{
a structure describing expression magnitude grid ($x, on log10 scale) and prior ($y)
}
\description{
Use existing count data to determine a prior distribution of genes in the dataset
}
\examples{
data(es.mef.small)
cd <- es.mef.small
cd <- cd[rowSums(cd) > 0, ]
cd <- cd[, colSums(cd) > 1e4]
data(o.ifm) # Load precomputed model. Use ?scde.error.models to see how o.ifm was generated
o.prior <- scde.expression.prior(models = o.ifm, counts = cd, length.out = 400, show.plot = FALSE)
}
| /man/scde.expression.prior.Rd | no_license | roryk/scde | R | false | false | 1,529 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/functions.R
\name{scde.expression.prior}
\alias{scde.expression.prior}
\title{Estimate prior distribution for gene expression magnitudes}
\usage{
scde.expression.prior(models, counts, length.out = 400, show.plot = FALSE,
pseudo.count = 1, bw = 0.1, max.quantile = 1 - 0.001,
max.value = NULL)
}
\arguments{
\item{models}{models determined by \code{\link{scde.error.models}}}
\item{counts}{count matrix}
\item{length.out}{number of points (resolution) of the expression magnitude grid (default: 400). Note: larger numbers will linearly increase memory/CPU demands.}
\item{show.plot}{show the estimate posterior}
\item{pseudo.count}{pseudo-count value to use (default 1)}
\item{bw}{smoothing bandwidth to use in estimating the prior (default: 0.1)}
\item{max.quantile}{determine the maximum expression magnitude based on a quantile (default : 0.999)}
\item{max.value}{alternatively, specify the exact maximum expression magnitude value}
}
\value{
a structure describing expression magnitude grid ($x, on log10 scale) and prior ($y)
}
\description{
Use existing count data to determine a prior distribution of genes in the dataset
}
\examples{
data(es.mef.small)
cd <- es.mef.small
cd <- cd[rowSums(cd) > 0, ]
cd <- cd[, colSums(cd) > 1e4]
data(o.ifm) # Load precomputed model. Use ?scde.error.models to see how o.ifm was generated
o.prior <- scde.expression.prior(models = o.ifm, counts = cd, length.out = 400, show.plot = FALSE)
}
|
report_sender <-
function(username,
...){
out <- imgurPOST(paste0('conversations/report/', username), ...)
structure(out, class = 'imgur_basic')
}
| /R/report_sender.R | no_license | Zedseayou/imguR | R | false | false | 164 | r | report_sender <-
function(username,
...){
out <- imgurPOST(paste0('conversations/report/', username), ...)
structure(out, class = 'imgur_basic')
}
|
### Setup data for probit model
setup_logit <- function(){
library(dplyr)
library(ggplot2)
library(XML)
# library(MBACprojections)
data("sd2015")
data("player_codes")
raw.data <- sd2015 %>% mutate(cf = ifelse(hit_location == 8, 1, 0)) %>%
mutate(lf = ifelse(hit_location == 7, 1, 0)) %>%
mutate(rf = ifelse(hit_location == 9, 1, 0)) %>%
mutate(single = ifelse(events == "Single", 1, 0)) %>%
mutate(double = ifelse(events == "Double", 1, 0)) %>%
mutate(triple = ifelse(events == "Triple", 1, 0)) %>%
mutate(homerun = ifelse(events == "Home Run", 1, 0))
raw.data <- mutate(raw.data, out = ifelse(single + double + triple + homerun == 0, 1, 0))
raw.data <- mutate(raw.data, outcome = ifelse(single == 1, "Single", "Out"))
raw.data <- raw.data %>% mutate(outcome = ifelse(double == 1, "Double", outcome)) %>%
mutate(outcome = ifelse(triple == 1, "Triple", outcome)) %>% mutate(outcome = ifelse(homerun == 1, "Homerun", outcome))
raw.data <- raw.data %>% filter(hit_speed != "null" & hit_angle != "null")
raw.data$hit_speed <- as.numeric(raw.data$hit_speed)
raw.data$hit_angle <- as.numeric(raw.data$hit_angle)
raw.data <- raw.data %>% mutate(hit_speed_sq = hit_speed^2) %>% mutate(hit_angle_sq = hit_angle^2) %>%
mutate(hit_speed_cu = hit_speed^3)
fg.stats <- readHTMLTable("http://www.fangraphs.com/leaders.aspx?pos=all&stats=bat&lg=all&qual=0&type=c,4,6,11,12,13,21,-1,34,35,40,41,-1,23,37,38,50,61,-1,111,-1,203,199,58,60&season=2015&month=0&season1=2015&ind=0&team=&rost=&age=&filter=&players=&page=1_1500", stringsAsFactors = FALSE)$LeaderBoard1_dg1_ctl00
class(fg.stats[,23]) <- "numeric"
fg.stats.spd <- fg.stats[,c(2,23)]
hitter.codes <- player_codes
hitter.codes <- hitter.codes[,1:2]
raw.data <- full_join(raw.data, hitter.codes, by = c("batter" = "mlb_id"))
raw.data <- full_join(raw.data, fg.stats.spd, by = c("player_name.y" = "Name"))
raw.data <- raw.data[complete.cases(raw.data),]
raw.data <- filter(raw.data, hit_speed != 0)
raw.data$game_date <- as.Date(raw.data$game_date)
return(raw.data)
}
| /R/setup_logit.R | no_license | masonhinckley/Projections2017 | R | false | false | 2,102 | r | ### Setup data for probit model
setup_logit <- function(){
library(dplyr)
library(ggplot2)
library(XML)
# library(MBACprojections)
data("sd2015")
data("player_codes")
raw.data <- sd2015 %>% mutate(cf = ifelse(hit_location == 8, 1, 0)) %>%
mutate(lf = ifelse(hit_location == 7, 1, 0)) %>%
mutate(rf = ifelse(hit_location == 9, 1, 0)) %>%
mutate(single = ifelse(events == "Single", 1, 0)) %>%
mutate(double = ifelse(events == "Double", 1, 0)) %>%
mutate(triple = ifelse(events == "Triple", 1, 0)) %>%
mutate(homerun = ifelse(events == "Home Run", 1, 0))
raw.data <- mutate(raw.data, out = ifelse(single + double + triple + homerun == 0, 1, 0))
raw.data <- mutate(raw.data, outcome = ifelse(single == 1, "Single", "Out"))
raw.data <- raw.data %>% mutate(outcome = ifelse(double == 1, "Double", outcome)) %>%
mutate(outcome = ifelse(triple == 1, "Triple", outcome)) %>% mutate(outcome = ifelse(homerun == 1, "Homerun", outcome))
raw.data <- raw.data %>% filter(hit_speed != "null" & hit_angle != "null")
raw.data$hit_speed <- as.numeric(raw.data$hit_speed)
raw.data$hit_angle <- as.numeric(raw.data$hit_angle)
raw.data <- raw.data %>% mutate(hit_speed_sq = hit_speed^2) %>% mutate(hit_angle_sq = hit_angle^2) %>%
mutate(hit_speed_cu = hit_speed^3)
fg.stats <- readHTMLTable("http://www.fangraphs.com/leaders.aspx?pos=all&stats=bat&lg=all&qual=0&type=c,4,6,11,12,13,21,-1,34,35,40,41,-1,23,37,38,50,61,-1,111,-1,203,199,58,60&season=2015&month=0&season1=2015&ind=0&team=&rost=&age=&filter=&players=&page=1_1500", stringsAsFactors = FALSE)$LeaderBoard1_dg1_ctl00
class(fg.stats[,23]) <- "numeric"
fg.stats.spd <- fg.stats[,c(2,23)]
hitter.codes <- player_codes
hitter.codes <- hitter.codes[,1:2]
raw.data <- full_join(raw.data, hitter.codes, by = c("batter" = "mlb_id"))
raw.data <- full_join(raw.data, fg.stats.spd, by = c("player_name.y" = "Name"))
raw.data <- raw.data[complete.cases(raw.data),]
raw.data <- filter(raw.data, hit_speed != 0)
raw.data$game_date <- as.Date(raw.data$game_date)
return(raw.data)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{hgsc}
\alias{hgsc}
\title{Gene expression data for High Grade Serous Carcinoma from TCGA}
\format{
A data frame with 489 rows and 321 columns.
}
\usage{
hgsc
}
\description{
There are 489 samples measured on 321 genes. Sample IDs are in the row names
and gene names are in the column names. This data set is used for clustering
HGSC into subtypes with prognostic significance. The cluster assignments
obtained by TCGA are indicated by the last six characters of each row name in
\code{hgsc}: \code{MES.C1}, \code{IMM.C2}, \code{DIF.C4}, and \code{PRO.C5}
}
\keyword{datasets}
| /man/hgsc.Rd | permissive | AlineTalhouk/diceR | R | false | true | 682 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{hgsc}
\alias{hgsc}
\title{Gene expression data for High Grade Serous Carcinoma from TCGA}
\format{
A data frame with 489 rows and 321 columns.
}
\usage{
hgsc
}
\description{
There are 489 samples measured on 321 genes. Sample IDs are in the row names
and gene names are in the column names. This data set is used for clustering
HGSC into subtypes with prognostic significance. The cluster assignments
obtained by TCGA are indicated by the last six characters of each row name in
\code{hgsc}: \code{MES.C1}, \code{IMM.C2}, \code{DIF.C4}, and \code{PRO.C5}
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/globals.R
\name{bq_global_project}
\alias{bq_global_project}
\title{Set global project name}
\usage{
bq_global_project(project)
}
\arguments{
\item{project}{project name you want this session to use by default, or a project object}
}
\value{
The project name (invisibly)
}
\description{
Set a project name used for this R session
}
\details{
This sets a project to a global environment value so you don't need to
supply the project argument to other API calls.
}
\seealso{
Other project functions: \code{\link{bq_get_global_project}}
}
| /man/bq_global_project.Rd | no_license | jtigani/bigQueryR | R | false | true | 614 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/globals.R
\name{bq_global_project}
\alias{bq_global_project}
\title{Set global project name}
\usage{
bq_global_project(project)
}
\arguments{
\item{project}{project name you want this session to use by default, or a project object}
}
\value{
The project name (invisibly)
}
\description{
Set a project name used for this R session
}
\details{
This sets a project to a global environment value so you don't need to
supply the project argument to other API calls.
}
\seealso{
Other project functions: \code{\link{bq_get_global_project}}
}
|
library(lavaan)
### Name: lavCor
### Title: Polychoric, polyserial and Pearson correlations
### Aliases: lavCor
### ** Examples
# Holzinger and Swineford (1939) example
HS9 <- HolzingerSwineford1939[,c("x1","x2","x3","x4","x5",
"x6","x7","x8","x9")]
# Pearson correlations
lavCor(HS9)
# ordinal version, with three categories
HS9ord <- as.data.frame( lapply(HS9, cut, 3, labels=FALSE) )
# polychoric correlations, two-stage estimation
lavCor(HS9ord, ordered=names(HS9ord))
# thresholds only
lavCor(HS9ord, ordered=names(HS9ord), output = "th")
# polychoric correlations, with standard errors
lavCor(HS9ord, ordered=names(HS9ord), se = "standard", output="est")
# polychoric correlations, full output
fit.un <- lavCor(HS9ord, ordered=names(HS9ord), se = "standard", output="fit")
summary(fit.un)
| /data/genthat_extracted_code/lavaan/examples/lavCor.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 841 | r | library(lavaan)
### Name: lavCor
### Title: Polychoric, polyserial and Pearson correlations
### Aliases: lavCor
### ** Examples
# Holzinger and Swineford (1939) example
HS9 <- HolzingerSwineford1939[,c("x1","x2","x3","x4","x5",
"x6","x7","x8","x9")]
# Pearson correlations
lavCor(HS9)
# ordinal version, with three categories
HS9ord <- as.data.frame( lapply(HS9, cut, 3, labels=FALSE) )
# polychoric correlations, two-stage estimation
lavCor(HS9ord, ordered=names(HS9ord))
# thresholds only
lavCor(HS9ord, ordered=names(HS9ord), output = "th")
# polychoric correlations, with standard errors
lavCor(HS9ord, ordered=names(HS9ord), se = "standard", output="est")
# polychoric correlations, full output
fit.un <- lavCor(HS9ord, ordered=names(HS9ord), se = "standard", output="fit")
summary(fit.un)
|
# Produces Fig S8
# packages
library(tidyverse)
library(brms)
library(tidybayes)
library(ggplot2)
library(patchwork)
library(gridExtra)
library(grid)
p.all <- read.csv("~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/nutnet_cumulative_time.csv",header=T,fill=TRUE,sep=",",na.strings=c(""," ","NA","NA ","na"))
colnames(p.all)
p.all <- p.all %>% group_by(site_code) %>% #filter(max.year >= 3)
filter(year_max >= 3) %>% mutate(year.y == max(year.y))
head(p.all)
p.all %>% ungroup() %>% select(year_max) %>% distinct() %>% mutate(mean(year_max))
head(p.all)
p.all.max <- p.all %>%
mutate( year.y == as.numeric(year.y)) %>%
#group_by(site_code) %>%
filter(year.y == max(year.y)) %>%
mutate(value = "max")
p.all.min <- p.all %>%
mutate( year.y == as.numeric(year.y)) %>%
#group_by(site_code) %>%
filter(year.y == min(year.y)) %>%
mutate(value = "min")
p.all.mm <- p.all.max %>% rbind(p.all.min) %>%
mutate(value = fct_relevel(value, c("min","max")))
p.all$site_code <- as.factor(p.all$site_code)
p.all$block<-as.factor(p.all$block)
p.all$plot<-as.factor(p.all$plot)
p.all$year.y<-as.numeric(p.all$year.y)
View(p.all.mm)
# load model objects
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Fits/3/sl.Rdata') # sl.s
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Fits/3/sg.Rdata') # sg.s
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Fits/3/cde.Rdata') # CDE.s
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Fits/3/sloss.Rdata') # s.loss.s
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Fits/3/sgain.Rdata') # s.gain.s
s.loss.fitted <- p.all %>%
mutate(Trt_group = trt.y) %>%
group_by(Trt_group, trt.y) %>%
summarise(year.y.m,
year.y,
) %>%
nest(data = c(trt.y, year.y.m, year.y)) %>%
mutate(fitted = map(data, ~epred_draws(sloss.3_p, newdata= .x, re_formula = ~(trt.y * year.y.m) )))
head(s.loss.fitted)
s.loss.fitted.df <- s.loss.fitted %>%
unnest(cols = c(fitted)) %>% select(-data) %>%
select(-c(.row, .chain, .iteration))
View(s.loss.fitted.df)
View(s.loss.fitted.df)
s.loss.fitted <- s.loss.fitted.df %>%
select(-.draw) %>%
group_by(Trt_group, year.y, year.y.m) %>%
mutate( P_Estimate = mean(.epred),
P_Estimate_lower = quantile(.epred, probs=0.025),
P_Estimate_upper = quantile(.epred, probs=0.975) ) %>%
select(-.epred) %>% distinct()
head(s.loss.fitted)
s.loss.fitted.min <- s.loss.fitted %>% filter(year.y %in% c(1)) %>%
mutate(value = "min")
s.loss.fitted.max <- s.loss.fitted %>%
filter(year.y %in% c(13)) %>%
mutate(value = "max")
View(s.loss.fitted.max)
s.loss.mm <- s.loss.fitted.max %>% rbind(s.loss.fitted.min) %>%
mutate(value = fct_relevel(value, c("min","max")))
head(s.loss.mm)
setwd('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/')
save(s.loss.mm, file = 'fitted_s.loss_compare.Rdata')
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/fitted_s.loss_compare.Rdata')
fig_s8a <- ggplot() +
geom_hline(yintercept = 0,linetype="longdash") +
# raw points
geom_point(data = p.all.mm,
aes(y = s.loss.n , x = trt.y, colour = trt.y, shape= value, group = value),
position = position_jitterdodge(
jitter.width = 0.15,
jitter.height = 1,
dodge.width = 0.75,
seed = NA
),
size = 1, alpha = 0.2) +
geom_point(data = s.loss.mm,
aes(x = trt.y , y = P_Estimate, colour = trt.y, shape = value, group = value),
position = position_dodge(width = 0.75), size = 3) +
geom_errorbar(data = s.loss.mm,
aes(x = trt.y , ymin = P_Estimate_lower, ymax = P_Estimate_upper, colour = trt.y, group = value),
position = position_dodge(width = 0.75),
size = 1, width = 0) +
scale_color_manual(name = "Treatment",
values = c( "black", "#B40F20"), labels = c("Control","NPK")) +
scale_shape_manual(name = "Average change between t0 & tn",
values = c(16, 17), labels = c("Year 1", "Max Year") )+
ggtitle((expression(paste(italic(alpha), '-scale', sep = ''))))+
theme_bw(base_size=18)+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.margin= margin(t = 0.2, r = 0.2, b = -0.2, l = 0.2, unit = "cm"),
plot.title=element_text(size=18, hjust=0.5),
strip.background = element_blank(),legend.position="none") +
ylim(-20, 0)+
labs(x='',
y = 'Average total number of species lost',
title= 'a) Number of species lost (s.loss)')
fig_s8a
#sgain
summary(sgain.3_p)
s.gain.fitted <- p.all %>%
mutate(Trt_group = trt.y) %>%
group_by(Trt_group, trt.y) %>%
summarise(year.y.m,
year.y ) %>%
nest(data = c(trt.y, year.y.m, year.y)) %>%
mutate(fitted = map(data, ~epred_draws(sgain.3_p, newdata= .x, re_formula = ~(trt.y * year.y.m) )))
head(s.gain.fitted)
s.gain.fitted.df <- s.gain.fitted %>%
unnest(cols = c(fitted)) %>% select(-data) %>%
select(-c(.row, .chain, .iteration))
head(s.gain.fitted.df)
s.gain.fitted <- s.gain.fitted.df %>%
select(-.draw) %>%
group_by(Trt_group, year.y, year.y.m) %>%
mutate( P_Estimate = mean(.epred),
P_Estimate_lower = quantile(.epred, probs=0.025),
P_Estimate_upper = quantile(.epred, probs=0.975) ) %>%
select(-.epred) %>% distinct()
head(s.gain.fitted)
s.gain.fitted.min <- s.gain.fitted %>% filter(year.y %in% c(1)) %>%
mutate(value = "min")
s.gain.fitted.max <- s.gain.fitted %>%
filter(year.y %in% c(13)) %>%
mutate(value = "max")
View(s.gain.fitted.max)
s.gain.mm <- s.gain.fitted.max %>% rbind(s.gain.fitted.min) %>%
mutate(value = fct_relevel(value, c("min","max")))
head(s.gain.mm)
setwd('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/')
save(s.gain.mm, file = 'fitted_s.gain_compare.Rdata')
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/fitted_s.gain_compare.Rdata')
fig_s8b <- ggplot() +
geom_hline(yintercept = 0,linetype="longdash") +
# raw points
geom_point(data = p.all.mm,
aes(y = s.gain , x = trt.y, colour = trt.y, shape= value, group = value),
position = position_jitterdodge(
jitter.width = 0.15,
jitter.height = 1,
dodge.width = 0.75,
seed = NA
),
size = 1, alpha = 0.2) +
geom_point(data = s.gain.mm,
aes(x = trt.y , y = P_Estimate, colour = trt.y, shape = value, group = value),
position = position_dodge(width = 0.75), size = 3) +
geom_errorbar(data = s.gain.mm,
aes(x = trt.y , ymin = P_Estimate_lower, ymax = P_Estimate_upper, colour = trt.y, group = value),
position = position_dodge(width = 0.75),
size = 1, width = 0) +
scale_color_manual(name = "Treatment",
values = c( "black", "#046C9A"), labels = c("Control","NPK")) +
scale_shape_manual(name = "Average change between t0 & tn",
values = c(16, 17), labels = c("Year 1", "Max Year") )+
ggtitle((expression(paste(italic(alpha), '-scale', sep = ''))))+
theme_bw(base_size=18)+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.margin= margin(t = 0.2, r = 0.2, b = -0.2, l = 0.2, unit = "cm"),
plot.title=element_text(size=18, hjust=0.5),
strip.background = element_blank(),legend.position="none") +
ylim(0, 20)+
labs(x='',
y = 'Average total number of species gained',
title= 'b) Number of species gained (s.gain)')
fig_s8b
# SL
s.sl.fitted <- p.all %>%
mutate(Trt_group = trt.y) %>%
group_by(Trt_group, trt.y) %>%
summarise(year.y.m ,
year.y ) %>%
nest(data = c(trt.y, year.y.m, year.y)) %>%
mutate(fitted = map(data, ~epred_draws(sl.3_p, newdata= .x, re_formula = ~(trt.y * year.y.m) )))
head(s.sl.fitted)
s.sl.fitted.df <- s.sl.fitted %>%
unnest(cols = c(fitted)) %>% select(-data) %>%
select(-c(.row, .chain, .iteration))
head(s.sl.fitted.df)
s.sl.fitted <- s.sl.fitted.df %>%
select(-.draw) %>%
group_by(Trt_group, year.y, year.y.m) %>%
mutate( P_Estimate = mean(.epred),
P_Estimate_lower = quantile(.epred, probs=0.025),
P_Estimate_upper = quantile(.epred, probs=0.975) ) %>%
select(-.epred) %>% distinct()
head(s.sl.fitted)
s.sl.fitted.min <- s.sl.fitted %>% filter(year.y %in% c(1)) %>%
mutate(value = "min")
s.sl.fitted.max <- s.sl.fitted %>%
filter(year.y %in% c(13)) %>%
mutate(value = "max")
head(s.sl.fitted.max)
s.sl.mm <- s.sl.fitted.max %>% rbind(s.sl.fitted.min) %>%
mutate(value = fct_relevel(value, c("min","max")))
head(s.loss.mm)
setwd('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/')
save(s.sl.mm, file = 'fitted_sl_compare.Rdata')
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/fitted_sl_compare.Rdata')
fig_s8c <- ggplot() +
geom_hline(yintercept = 0,linetype="longdash") +
# raw points
geom_point(data = p.all.mm,
aes(y = SL, x = trt.y, colour = trt.y, shape= value, group = value),
position = position_jitterdodge(
jitter.width = 0.15,
jitter.height = 1,
dodge.width = 0.75,
seed = NA
),
size = 1, alpha = 0.2) +
geom_point(data = s.sl.mm,
aes(x = trt.y , y = P_Estimate, colour = trt.y, shape = value, group = value),
position = position_dodge(width = 0.75), size = 3) +
geom_errorbar(data = s.sl.mm,
aes(x = trt.y , ymin = P_Estimate_lower, ymax = P_Estimate_upper, colour = trt.y, group = value),
position = position_dodge(width = 0.75),
size = 1, width = 0) +
scale_color_manual(name = "Treatment",
values = c( "black", "#B40F20"), labels = c("Control","NPK")) +
scale_shape_manual(name = "Average change between t0 & tn",
values = c(16, 17), labels = c("Year 1", "Max Year") )+
ggtitle((expression(paste(italic(alpha), '-scale', sep = ''))))+
theme_bw(base_size=18)+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.margin= margin(t = 0.2, r = 0.2, b = -0.2, l = 0.2, unit = "cm"),
plot.title=element_text(size=18, hjust=0.5),
strip.background = element_blank(),legend.position="none") +
ylim(-250, 250)+
labs(x='',
y = expression(paste('Average total change in biomass (g/' ,m^2, ')')),
title= 'c) Biomass change associated \n with species loss (SL)')
fig_s8c
#SG
summary(sg.3_p)
s.sg.fitted <- p.all %>%
mutate(Trt_group = trt.y) %>%
group_by(Trt_group, trt.y) %>%
summarise(year.y.m ,
year.y ) %>%
nest(data = c(trt.y, year.y.m, year.y)) %>%
mutate(fitted = map(data, ~epred_draws(sg.3_p, newdata= .x, re_formula = ~(trt.y * year.y.m) )))
head(s.sg.fitted)
s.sg.fitted.df <- s.sg.fitted %>%
unnest(cols = c(fitted)) %>% select(-data) %>%
select(-c(.row, .chain, .iteration))
head(s.sg.fitted.df)
s.sg.fitted <- s.sg.fitted.df %>%
select(-.draw) %>%
group_by(Trt_group, year.y, year.y.m) %>%
mutate( P_Estimate = mean(.epred),
P_Estimate_lower = quantile(.epred, probs=0.025),
P_Estimate_upper = quantile(.epred, probs=0.975) ) %>%
select(-.epred) %>% distinct()
head(s.sg.fitted)
s.sg.fitted.min <- s.sg.fitted %>% filter(year.y %in% c(1)) %>%
mutate(value = "min")
s.sg.fitted.max <- s.sg.fitted %>%
filter(year.y %in% c(13)) %>%
mutate(value = "max")
head(s.sg.fitted.max)
s.sg.mm <- s.sg.fitted.max %>% rbind(s.sg.fitted.min) %>%
mutate(value = fct_relevel(value, c("min","max")))
head(s.sg.mm)
setwd('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/')
save(s.sg.mm, file = 'fitted_sg_compare.Rdata')
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/fitted_sg_compare.Rdata')
fig_s8d <- ggplot() +
geom_hline(yintercept = 0,linetype="longdash") +
# raw points
geom_point(data = p.all.mm,
aes(y = SG, x = trt.y, colour = trt.y, shape= value, group = value),
position = position_jitterdodge(
jitter.width = 0.15,
jitter.height = 1,
dodge.width = 0.75,
seed = NA
),
size = 1, alpha = 0.2) +
geom_point(data = s.sg.mm,
aes(x = trt.y , y = P_Estimate, colour = trt.y, shape = value, group = value),
position = position_dodge(width = 0.75), size = 3) +
geom_errorbar(data = s.sg.mm,
aes(x = trt.y , ymin = P_Estimate_lower, ymax = P_Estimate_upper, colour = trt.y, group = value),
position = position_dodge(width = 0.75),
size = 1, width = 0) +
scale_color_manual(name = "Treatment",
values = c( "black", "#046C9A"), labels = c("Control","NPK")) +
scale_shape_manual(name = "Average change between t0 & tn",
values = c(16, 17), labels = c("Year 1", "Max Year") )+
ggtitle((expression(paste(italic(alpha), '-scale', sep = ''))))+
theme_bw(base_size=18)+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.margin= margin(t = 0.2, r = 0.2, b = -0.2, l = 0.2, unit = "cm"),
plot.title=element_text(size=18, hjust=0.5),
strip.background = element_blank(),legend.position="none") +
ylim(-250, 250)+
labs(x='',
y = '',
title= 'd) Biomass change associated \n with species gain (SG)')
fig_s8d
# cde
summary(cde.3_p)
s.cde.fitted <- p.all %>%
mutate(Trt_group = trt.y) %>%
group_by(Trt_group, trt.y) %>%
summarise(year.y.m ,
year.y ) %>%
nest(data = c(trt.y, year.y.m, year.y)) %>%
mutate(fitted = map(data, ~epred_draws(cde.3_p, newdata= .x, re_formula = ~(trt.y * year.y.m) )))
head(s.cde.fitted)
s.cde.fitted.df <- s.cde.fitted %>%
unnest(cols = c(fitted)) %>% select(-data) %>%
select(-c(.row, .chain, .iteration))
head(s.cde.fitted.df)
s.cde.fitted <- s.cde.fitted.df %>%
select(-.draw) %>%
group_by(Trt_group, year.y, year.y.m) %>%
filter(.epred > quantile(.epred, probs=0.025),
.epred < quantile(.epred, probs=0.975)) %>% sample_n(1000) %>%
mutate( P_Estimate = mean(.epred),
P_Estimate_lower = quantile(.epred, probs=0.025),
P_Estimate_upper = quantile(.epred, probs=0.975) ) %>%
select(-.epred) %>% distinct()
head(s.cde.fitted)
s.cde.fitted.min <- s.cde.fitted %>% filter(year.y %in% c(1)) %>%
mutate(value = "min")
s.cde.fitted.max <- s.cde.fitted %>%
filter(year.y %in% c(13)) %>%
mutate(value = "max")
head(s.cde.fitted.max)
s.cde.mm <- s.cde.fitted.max %>% rbind(s.cde.fitted.min) %>%
mutate(value = fct_relevel(value, c("min","max")))
head(s.sg.mm)
setwd('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/')
save(s.cde.mm, file = 'fitted_cde_compare.Rdata')
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/fitted_sg_compare.Rdata')
fig_s8e <- ggplot() +
geom_hline(yintercept = 0,linetype="longdash") +
# raw points
geom_point(data = p.all.mm,
aes(y = CDE, x = trt.y, colour = trt.y, shape= value, group = value),
position = position_jitterdodge(
jitter.width = 0.15,
jitter.height = 1,
dodge.width = 0.75,
seed = NA
),
size = 1, alpha = 0.2) +
geom_point(data = s.cde.mm,
aes(x = trt.y , y = P_Estimate, colour = trt.y, shape = value, group = value),
position = position_dodge(width = 0.75), size = 3) +
geom_errorbar(data = s.cde.mm,
aes(x = trt.y , ymin = P_Estimate_lower, ymax = P_Estimate_upper, colour = trt.y, group = value),
position = position_dodge(width = 0.75),
size = 1, width = 0) +
scale_color_manual(name = "Treatment",
values = c( "black", "#F98400"), labels = c("Control","NPK")) +
scale_shape_manual(name = "Average change between t0 & tn",
values = c(16, 17), labels = c("Year 1", "Max Year") )+
ggtitle((expression(paste(italic(alpha), '-scale', sep = ''))))+
theme_bw(base_size=18)+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.margin= margin(t = 0.2, r = 0.2, b = -0.2, l = 0.2, unit = "cm"),
plot.title=element_text(size=18, hjust=0.5),
strip.background = element_blank(),legend.position="none") +
ylim(-250, 250)+
labs(x='',
y = '',
title= 'e) Biomass change associated \n with persistent species (PS)')
fig_s8e
fig_s8_leg <- ggplot() +
geom_hline(yintercept = 0,linetype="longdash") +
geom_point(data = s.cde.mm,
aes(x = trt.y , y = P_Estimate, shape = value, group = value),
position = position_dodge(width = 0.75), size = 3) +
geom_errorbar(data = s.cde.mm,
aes(x = trt.y , ymin = P_Estimate_lower, ymax = P_Estimate_upper, group = value),
position = position_dodge(width = 0.75),
size = 1, width = 0) +
scale_shape_manual(name = "Average change \n between t0 & tn",
values = c(16, 17), labels = c("Year 1", "Max Year") )+
ggtitle((expression(paste(italic(alpha), '-scale', sep = ''))))+
theme_bw(base_size=18)+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.margin= margin(t = 0.2, r = 0.2, b = -0.2, l = 0.2, unit = "cm"),
plot.title=element_text(size=18, hjust=0.5),
strip.background = element_blank(),legend.position="bottom") +
ylim(-250, 250)+
labs(x='',
y = '',
title= 'e) Biomass change associated \n with persistent species (PS)')
fig_s8_leg
# extract legend
# Source: https://github.com/hadley/ggplot2/wiki/Share-a-legend-between-two-ggplot2-graphs
g_legend <- function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
# overall legend
fig_s8_legend <- g_legend(fig_s8_leg)
# 11X14 LANDSCAPE
(fig_s8a | fig_s8b)/ (fig_s8c | fig_s8d | fig_s8e) / (fig_s8_legend) + plot_layout(heights = c(10, 10, 1))
| /15_Supplementary_Figure_S8.R | no_license | emma-ladouceur/NutNet-CAFE | R | false | false | 19,032 | r |
# Produces Fig S8
# packages
library(tidyverse)
library(brms)
library(tidybayes)
library(ggplot2)
library(patchwork)
library(gridExtra)
library(grid)
p.all <- read.csv("~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/nutnet_cumulative_time.csv",header=T,fill=TRUE,sep=",",na.strings=c(""," ","NA","NA ","na"))
colnames(p.all)
p.all <- p.all %>% group_by(site_code) %>% #filter(max.year >= 3)
filter(year_max >= 3) %>% mutate(year.y == max(year.y))
head(p.all)
p.all %>% ungroup() %>% select(year_max) %>% distinct() %>% mutate(mean(year_max))
head(p.all)
p.all.max <- p.all %>%
mutate( year.y == as.numeric(year.y)) %>%
#group_by(site_code) %>%
filter(year.y == max(year.y)) %>%
mutate(value = "max")
p.all.min <- p.all %>%
mutate( year.y == as.numeric(year.y)) %>%
#group_by(site_code) %>%
filter(year.y == min(year.y)) %>%
mutate(value = "min")
p.all.mm <- p.all.max %>% rbind(p.all.min) %>%
mutate(value = fct_relevel(value, c("min","max")))
p.all$site_code <- as.factor(p.all$site_code)
p.all$block<-as.factor(p.all$block)
p.all$plot<-as.factor(p.all$plot)
p.all$year.y<-as.numeric(p.all$year.y)
View(p.all.mm)
# load model objects
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Fits/3/sl.Rdata') # sl.s
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Fits/3/sg.Rdata') # sg.s
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Fits/3/cde.Rdata') # CDE.s
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Fits/3/sloss.Rdata') # s.loss.s
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Fits/3/sgain.Rdata') # s.gain.s
s.loss.fitted <- p.all %>%
mutate(Trt_group = trt.y) %>%
group_by(Trt_group, trt.y) %>%
summarise(year.y.m,
year.y,
) %>%
nest(data = c(trt.y, year.y.m, year.y)) %>%
mutate(fitted = map(data, ~epred_draws(sloss.3_p, newdata= .x, re_formula = ~(trt.y * year.y.m) )))
head(s.loss.fitted)
s.loss.fitted.df <- s.loss.fitted %>%
unnest(cols = c(fitted)) %>% select(-data) %>%
select(-c(.row, .chain, .iteration))
View(s.loss.fitted.df)
View(s.loss.fitted.df)
s.loss.fitted <- s.loss.fitted.df %>%
select(-.draw) %>%
group_by(Trt_group, year.y, year.y.m) %>%
mutate( P_Estimate = mean(.epred),
P_Estimate_lower = quantile(.epred, probs=0.025),
P_Estimate_upper = quantile(.epred, probs=0.975) ) %>%
select(-.epred) %>% distinct()
head(s.loss.fitted)
s.loss.fitted.min <- s.loss.fitted %>% filter(year.y %in% c(1)) %>%
mutate(value = "min")
s.loss.fitted.max <- s.loss.fitted %>%
filter(year.y %in% c(13)) %>%
mutate(value = "max")
View(s.loss.fitted.max)
s.loss.mm <- s.loss.fitted.max %>% rbind(s.loss.fitted.min) %>%
mutate(value = fct_relevel(value, c("min","max")))
head(s.loss.mm)
setwd('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/')
save(s.loss.mm, file = 'fitted_s.loss_compare.Rdata')
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/fitted_s.loss_compare.Rdata')
fig_s8a <- ggplot() +
geom_hline(yintercept = 0,linetype="longdash") +
# raw points
geom_point(data = p.all.mm,
aes(y = s.loss.n , x = trt.y, colour = trt.y, shape= value, group = value),
position = position_jitterdodge(
jitter.width = 0.15,
jitter.height = 1,
dodge.width = 0.75,
seed = NA
),
size = 1, alpha = 0.2) +
geom_point(data = s.loss.mm,
aes(x = trt.y , y = P_Estimate, colour = trt.y, shape = value, group = value),
position = position_dodge(width = 0.75), size = 3) +
geom_errorbar(data = s.loss.mm,
aes(x = trt.y , ymin = P_Estimate_lower, ymax = P_Estimate_upper, colour = trt.y, group = value),
position = position_dodge(width = 0.75),
size = 1, width = 0) +
scale_color_manual(name = "Treatment",
values = c( "black", "#B40F20"), labels = c("Control","NPK")) +
scale_shape_manual(name = "Average change between t0 & tn",
values = c(16, 17), labels = c("Year 1", "Max Year") )+
ggtitle((expression(paste(italic(alpha), '-scale', sep = ''))))+
theme_bw(base_size=18)+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.margin= margin(t = 0.2, r = 0.2, b = -0.2, l = 0.2, unit = "cm"),
plot.title=element_text(size=18, hjust=0.5),
strip.background = element_blank(),legend.position="none") +
ylim(-20, 0)+
labs(x='',
y = 'Average total number of species lost',
title= 'a) Number of species lost (s.loss)')
fig_s8a
#sgain
summary(sgain.3_p)
s.gain.fitted <- p.all %>%
mutate(Trt_group = trt.y) %>%
group_by(Trt_group, trt.y) %>%
summarise(year.y.m,
year.y ) %>%
nest(data = c(trt.y, year.y.m, year.y)) %>%
mutate(fitted = map(data, ~epred_draws(sgain.3_p, newdata= .x, re_formula = ~(trt.y * year.y.m) )))
head(s.gain.fitted)
s.gain.fitted.df <- s.gain.fitted %>%
unnest(cols = c(fitted)) %>% select(-data) %>%
select(-c(.row, .chain, .iteration))
head(s.gain.fitted.df)
s.gain.fitted <- s.gain.fitted.df %>%
select(-.draw) %>%
group_by(Trt_group, year.y, year.y.m) %>%
mutate( P_Estimate = mean(.epred),
P_Estimate_lower = quantile(.epred, probs=0.025),
P_Estimate_upper = quantile(.epred, probs=0.975) ) %>%
select(-.epred) %>% distinct()
head(s.gain.fitted)
s.gain.fitted.min <- s.gain.fitted %>% filter(year.y %in% c(1)) %>%
mutate(value = "min")
s.gain.fitted.max <- s.gain.fitted %>%
filter(year.y %in% c(13)) %>%
mutate(value = "max")
View(s.gain.fitted.max)
s.gain.mm <- s.gain.fitted.max %>% rbind(s.gain.fitted.min) %>%
mutate(value = fct_relevel(value, c("min","max")))
head(s.gain.mm)
setwd('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/')
save(s.gain.mm, file = 'fitted_s.gain_compare.Rdata')
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/fitted_s.gain_compare.Rdata')
fig_s8b <- ggplot() +
geom_hline(yintercept = 0,linetype="longdash") +
# raw points
geom_point(data = p.all.mm,
aes(y = s.gain , x = trt.y, colour = trt.y, shape= value, group = value),
position = position_jitterdodge(
jitter.width = 0.15,
jitter.height = 1,
dodge.width = 0.75,
seed = NA
),
size = 1, alpha = 0.2) +
geom_point(data = s.gain.mm,
aes(x = trt.y , y = P_Estimate, colour = trt.y, shape = value, group = value),
position = position_dodge(width = 0.75), size = 3) +
geom_errorbar(data = s.gain.mm,
aes(x = trt.y , ymin = P_Estimate_lower, ymax = P_Estimate_upper, colour = trt.y, group = value),
position = position_dodge(width = 0.75),
size = 1, width = 0) +
scale_color_manual(name = "Treatment",
values = c( "black", "#046C9A"), labels = c("Control","NPK")) +
scale_shape_manual(name = "Average change between t0 & tn",
values = c(16, 17), labels = c("Year 1", "Max Year") )+
ggtitle((expression(paste(italic(alpha), '-scale', sep = ''))))+
theme_bw(base_size=18)+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.margin= margin(t = 0.2, r = 0.2, b = -0.2, l = 0.2, unit = "cm"),
plot.title=element_text(size=18, hjust=0.5),
strip.background = element_blank(),legend.position="none") +
ylim(0, 20)+
labs(x='',
y = 'Average total number of species gained',
title= 'b) Number of species gained (s.gain)')
fig_s8b
# SL
s.sl.fitted <- p.all %>%
mutate(Trt_group = trt.y) %>%
group_by(Trt_group, trt.y) %>%
summarise(year.y.m ,
year.y ) %>%
nest(data = c(trt.y, year.y.m, year.y)) %>%
mutate(fitted = map(data, ~epred_draws(sl.3_p, newdata= .x, re_formula = ~(trt.y * year.y.m) )))
head(s.sl.fitted)
s.sl.fitted.df <- s.sl.fitted %>%
unnest(cols = c(fitted)) %>% select(-data) %>%
select(-c(.row, .chain, .iteration))
head(s.sl.fitted.df)
s.sl.fitted <- s.sl.fitted.df %>%
select(-.draw) %>%
group_by(Trt_group, year.y, year.y.m) %>%
mutate( P_Estimate = mean(.epred),
P_Estimate_lower = quantile(.epred, probs=0.025),
P_Estimate_upper = quantile(.epred, probs=0.975) ) %>%
select(-.epred) %>% distinct()
head(s.sl.fitted)
s.sl.fitted.min <- s.sl.fitted %>% filter(year.y %in% c(1)) %>%
mutate(value = "min")
s.sl.fitted.max <- s.sl.fitted %>%
filter(year.y %in% c(13)) %>%
mutate(value = "max")
head(s.sl.fitted.max)
s.sl.mm <- s.sl.fitted.max %>% rbind(s.sl.fitted.min) %>%
mutate(value = fct_relevel(value, c("min","max")))
head(s.loss.mm)
setwd('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/')
save(s.sl.mm, file = 'fitted_sl_compare.Rdata')
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/fitted_sl_compare.Rdata')
fig_s8c <- ggplot() +
geom_hline(yintercept = 0,linetype="longdash") +
# raw points
geom_point(data = p.all.mm,
aes(y = SL, x = trt.y, colour = trt.y, shape= value, group = value),
position = position_jitterdodge(
jitter.width = 0.15,
jitter.height = 1,
dodge.width = 0.75,
seed = NA
),
size = 1, alpha = 0.2) +
geom_point(data = s.sl.mm,
aes(x = trt.y , y = P_Estimate, colour = trt.y, shape = value, group = value),
position = position_dodge(width = 0.75), size = 3) +
geom_errorbar(data = s.sl.mm,
aes(x = trt.y , ymin = P_Estimate_lower, ymax = P_Estimate_upper, colour = trt.y, group = value),
position = position_dodge(width = 0.75),
size = 1, width = 0) +
scale_color_manual(name = "Treatment",
values = c( "black", "#B40F20"), labels = c("Control","NPK")) +
scale_shape_manual(name = "Average change between t0 & tn",
values = c(16, 17), labels = c("Year 1", "Max Year") )+
ggtitle((expression(paste(italic(alpha), '-scale', sep = ''))))+
theme_bw(base_size=18)+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.margin= margin(t = 0.2, r = 0.2, b = -0.2, l = 0.2, unit = "cm"),
plot.title=element_text(size=18, hjust=0.5),
strip.background = element_blank(),legend.position="none") +
ylim(-250, 250)+
labs(x='',
y = expression(paste('Average total change in biomass (g/' ,m^2, ')')),
title= 'c) Biomass change associated \n with species loss (SL)')
fig_s8c
#SG
summary(sg.3_p)
s.sg.fitted <- p.all %>%
mutate(Trt_group = trt.y) %>%
group_by(Trt_group, trt.y) %>%
summarise(year.y.m ,
year.y ) %>%
nest(data = c(trt.y, year.y.m, year.y)) %>%
mutate(fitted = map(data, ~epred_draws(sg.3_p, newdata= .x, re_formula = ~(trt.y * year.y.m) )))
head(s.sg.fitted)
s.sg.fitted.df <- s.sg.fitted %>%
unnest(cols = c(fitted)) %>% select(-data) %>%
select(-c(.row, .chain, .iteration))
head(s.sg.fitted.df)
s.sg.fitted <- s.sg.fitted.df %>%
select(-.draw) %>%
group_by(Trt_group, year.y, year.y.m) %>%
mutate( P_Estimate = mean(.epred),
P_Estimate_lower = quantile(.epred, probs=0.025),
P_Estimate_upper = quantile(.epred, probs=0.975) ) %>%
select(-.epred) %>% distinct()
head(s.sg.fitted)
s.sg.fitted.min <- s.sg.fitted %>% filter(year.y %in% c(1)) %>%
mutate(value = "min")
s.sg.fitted.max <- s.sg.fitted %>%
filter(year.y %in% c(13)) %>%
mutate(value = "max")
head(s.sg.fitted.max)
s.sg.mm <- s.sg.fitted.max %>% rbind(s.sg.fitted.min) %>%
mutate(value = fct_relevel(value, c("min","max")))
head(s.sg.mm)
setwd('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/')
save(s.sg.mm, file = 'fitted_sg_compare.Rdata')
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/fitted_sg_compare.Rdata')
fig_s8d <- ggplot() +
geom_hline(yintercept = 0,linetype="longdash") +
# raw points
geom_point(data = p.all.mm,
aes(y = SG, x = trt.y, colour = trt.y, shape= value, group = value),
position = position_jitterdodge(
jitter.width = 0.15,
jitter.height = 1,
dodge.width = 0.75,
seed = NA
),
size = 1, alpha = 0.2) +
geom_point(data = s.sg.mm,
aes(x = trt.y , y = P_Estimate, colour = trt.y, shape = value, group = value),
position = position_dodge(width = 0.75), size = 3) +
geom_errorbar(data = s.sg.mm,
aes(x = trt.y , ymin = P_Estimate_lower, ymax = P_Estimate_upper, colour = trt.y, group = value),
position = position_dodge(width = 0.75),
size = 1, width = 0) +
scale_color_manual(name = "Treatment",
values = c( "black", "#046C9A"), labels = c("Control","NPK")) +
scale_shape_manual(name = "Average change between t0 & tn",
values = c(16, 17), labels = c("Year 1", "Max Year") )+
ggtitle((expression(paste(italic(alpha), '-scale', sep = ''))))+
theme_bw(base_size=18)+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.margin= margin(t = 0.2, r = 0.2, b = -0.2, l = 0.2, unit = "cm"),
plot.title=element_text(size=18, hjust=0.5),
strip.background = element_blank(),legend.position="none") +
ylim(-250, 250)+
labs(x='',
y = '',
title= 'd) Biomass change associated \n with species gain (SG)')
fig_s8d
# cde
summary(cde.3_p)
s.cde.fitted <- p.all %>%
mutate(Trt_group = trt.y) %>%
group_by(Trt_group, trt.y) %>%
summarise(year.y.m ,
year.y ) %>%
nest(data = c(trt.y, year.y.m, year.y)) %>%
mutate(fitted = map(data, ~epred_draws(cde.3_p, newdata= .x, re_formula = ~(trt.y * year.y.m) )))
head(s.cde.fitted)
s.cde.fitted.df <- s.cde.fitted %>%
unnest(cols = c(fitted)) %>% select(-data) %>%
select(-c(.row, .chain, .iteration))
head(s.cde.fitted.df)
s.cde.fitted <- s.cde.fitted.df %>%
select(-.draw) %>%
group_by(Trt_group, year.y, year.y.m) %>%
filter(.epred > quantile(.epred, probs=0.025),
.epred < quantile(.epred, probs=0.975)) %>% sample_n(1000) %>%
mutate( P_Estimate = mean(.epred),
P_Estimate_lower = quantile(.epred, probs=0.025),
P_Estimate_upper = quantile(.epred, probs=0.975) ) %>%
select(-.epred) %>% distinct()
head(s.cde.fitted)
s.cde.fitted.min <- s.cde.fitted %>% filter(year.y %in% c(1)) %>%
mutate(value = "min")
s.cde.fitted.max <- s.cde.fitted %>%
filter(year.y %in% c(13)) %>%
mutate(value = "max")
head(s.cde.fitted.max)
s.cde.mm <- s.cde.fitted.max %>% rbind(s.cde.fitted.min) %>%
mutate(value = fct_relevel(value, c("min","max")))
head(s.sg.mm)
setwd('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/')
save(s.cde.mm, file = 'fitted_cde_compare.Rdata')
load('~/GRP GAZP Dropbox/Emma Ladouceur/_Projects/NutNet/Data/Model_Extract/fitted_sg_compare.Rdata')
fig_s8e <- ggplot() +
geom_hline(yintercept = 0,linetype="longdash") +
# raw points
geom_point(data = p.all.mm,
aes(y = CDE, x = trt.y, colour = trt.y, shape= value, group = value),
position = position_jitterdodge(
jitter.width = 0.15,
jitter.height = 1,
dodge.width = 0.75,
seed = NA
),
size = 1, alpha = 0.2) +
geom_point(data = s.cde.mm,
aes(x = trt.y , y = P_Estimate, colour = trt.y, shape = value, group = value),
position = position_dodge(width = 0.75), size = 3) +
geom_errorbar(data = s.cde.mm,
aes(x = trt.y , ymin = P_Estimate_lower, ymax = P_Estimate_upper, colour = trt.y, group = value),
position = position_dodge(width = 0.75),
size = 1, width = 0) +
scale_color_manual(name = "Treatment",
values = c( "black", "#F98400"), labels = c("Control","NPK")) +
scale_shape_manual(name = "Average change between t0 & tn",
values = c(16, 17), labels = c("Year 1", "Max Year") )+
ggtitle((expression(paste(italic(alpha), '-scale', sep = ''))))+
theme_bw(base_size=18)+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.margin= margin(t = 0.2, r = 0.2, b = -0.2, l = 0.2, unit = "cm"),
plot.title=element_text(size=18, hjust=0.5),
strip.background = element_blank(),legend.position="none") +
ylim(-250, 250)+
labs(x='',
y = '',
title= 'e) Biomass change associated \n with persistent species (PS)')
fig_s8e
fig_s8_leg <- ggplot() +
geom_hline(yintercept = 0,linetype="longdash") +
geom_point(data = s.cde.mm,
aes(x = trt.y , y = P_Estimate, shape = value, group = value),
position = position_dodge(width = 0.75), size = 3) +
geom_errorbar(data = s.cde.mm,
aes(x = trt.y , ymin = P_Estimate_lower, ymax = P_Estimate_upper, group = value),
position = position_dodge(width = 0.75),
size = 1, width = 0) +
scale_shape_manual(name = "Average change \n between t0 & tn",
values = c(16, 17), labels = c("Year 1", "Max Year") )+
ggtitle((expression(paste(italic(alpha), '-scale', sep = ''))))+
theme_bw(base_size=18)+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.margin= margin(t = 0.2, r = 0.2, b = -0.2, l = 0.2, unit = "cm"),
plot.title=element_text(size=18, hjust=0.5),
strip.background = element_blank(),legend.position="bottom") +
ylim(-250, 250)+
labs(x='',
y = '',
title= 'e) Biomass change associated \n with persistent species (PS)')
fig_s8_leg
# extract legend
# Source: https://github.com/hadley/ggplot2/wiki/Share-a-legend-between-two-ggplot2-graphs
g_legend <- function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
# overall legend
fig_s8_legend <- g_legend(fig_s8_leg)
# 11X14 LANDSCAPE
(fig_s8a | fig_s8b)/ (fig_s8c | fig_s8d | fig_s8e) / (fig_s8_legend) + plot_layout(heights = c(10, 10, 1))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/structuredSparseMatrices.R
\name{strucMatrixBlank}
\alias{strucMatrixBlank}
\title{Blank structured sparse matrix}
\usage{
strucMatrixBlank(nrow, ncol)
}
\arguments{
\item{nrow, ncol}{number of rows and columns}
}
\description{
Blank structured sparse matrix
}
\examples{
(xBlank <- strucMatrixBlank(5, 5))
}
\seealso{
Other strucMatrixSpecial: \code{\link{strucMatrixCol}},
\code{\link{strucMatrixCompSymm}},
\code{\link{strucMatrixConstVarChol}},
\code{\link{strucMatrixCorFactor}},
\code{\link{strucMatrixCorMatChol}},
\code{\link{strucMatrixDiag}},
\code{\link{strucMatrixExpChol}},
\code{\link{strucMatrixFull}},
\code{\link{strucMatrixGenFullTri}},
\code{\link{strucMatrixIdent}},
\code{\link{strucMatrixInd}},
\code{\link{strucMatrixOneOffDiag}},
\code{\link{strucMatrixOnes}},
\code{\link{strucMatrixSymm}},
\code{\link{strucMatrixTri}},
\code{\link{strucMatrixVarWithCovariate}}
}
| /man/strucMatrixBlank.Rd | no_license | bbolker/lme4ord | R | false | true | 999 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/structuredSparseMatrices.R
\name{strucMatrixBlank}
\alias{strucMatrixBlank}
\title{Blank structured sparse matrix}
\usage{
strucMatrixBlank(nrow, ncol)
}
\arguments{
\item{nrow, ncol}{number of rows and columns}
}
\description{
Blank structured sparse matrix
}
\examples{
(xBlank <- strucMatrixBlank(5, 5))
}
\seealso{
Other strucMatrixSpecial: \code{\link{strucMatrixCol}},
\code{\link{strucMatrixCompSymm}},
\code{\link{strucMatrixConstVarChol}},
\code{\link{strucMatrixCorFactor}},
\code{\link{strucMatrixCorMatChol}},
\code{\link{strucMatrixDiag}},
\code{\link{strucMatrixExpChol}},
\code{\link{strucMatrixFull}},
\code{\link{strucMatrixGenFullTri}},
\code{\link{strucMatrixIdent}},
\code{\link{strucMatrixInd}},
\code{\link{strucMatrixOneOffDiag}},
\code{\link{strucMatrixOnes}},
\code{\link{strucMatrixSymm}},
\code{\link{strucMatrixTri}},
\code{\link{strucMatrixVarWithCovariate}}
}
|
### dichotomize.R (2015-02-28)
###
### Dichotomize Continuous Labeled Data
###
### Copyright 2013-15 Sebastian Gibb and Korbinian Strimmer
###
###
### This file is part of the `binda' library for R and related languages.
### It is made available under the terms of the GNU General Public
### License, version 3, or at your option, any later version,
### incorporated herein by reference.
###
### This program is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied
### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
### PURPOSE. See the GNU General Public License for more
### details.
###
### You should have received a copy of the GNU General Public
### License along with this program; if not, write to the Free
### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
### MA 02111-1307, USA
# dichotomize using given threshold
dichotomize = function(X, thresh)
{
if(length(thresh)==1)
thresh = rep(thresh, ncol(X))
if(length(thresh) != ncol(X))
stop("Number of specified thresholds not identical with the number of variables (columns)!")
## compare features (columns) with thresholds
## (we need rowwise comparison but R supports only columnwise comparison
## => double t() )
m = t(t(X) >= thresh) * 1L
## set attributes
attr(m, "thresh") = thresh
return(m)
}
# optimize threshold
optimizeThreshold = function(X, L, lambda.freqs, verbose=FALSE)
{
L = factor(L) # make sure L is a factor
d = ncol(X) # dimension, number of variables, number of columns
if (verbose) reportDetails(X, L)
## calculate class frequencies
freqs = getClassFreqs(L, lambda.freqs=lambda.freqs, verbose=verbose)
## calculate grid of possible thresholds
## rows: thresholds 1:length(breaks); columns: features
# we simply use all possible thresholds
grid = apply(X, 2, sort ) # sort to get the smallest cutoff value
grid = rbind(grid, grid[nrow(X),]+1)
breaks = nrow(grid)
## create score matrix
## rows: thresholds 1:length(breaks); columns: variables
scr = matrix(0L, nrow=breaks, ncol=d)
## loop through thresholds (columns)
for (i in 1:breaks)
{
## create binary matrix
## compare features (columns) with thresholds
## (we need rowwise comparison but R supports only columnwise comparison
## => double t() )
bm = t(t(X) >= grid[i, ]) * 1L
## mu matrix
mu = getClassMeans(bm, L)
scr[i, ] = rankingScore(mu, freqs)
}
## find thesholds with maximal scores
idx = cbind(max.col(t(scr), ties.method="first"), 1:d)
thr = grid[idx]
names(thr) = colnames(X)
return(thr)
}
| /R/dichotomize.R | no_license | cran/binda | R | false | false | 2,657 | r | ### dichotomize.R (2015-02-28)
###
### Dichotomize Continuous Labeled Data
###
### Copyright 2013-15 Sebastian Gibb and Korbinian Strimmer
###
###
### This file is part of the `binda' library for R and related languages.
### It is made available under the terms of the GNU General Public
### License, version 3, or at your option, any later version,
### incorporated herein by reference.
###
### This program is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied
### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
### PURPOSE. See the GNU General Public License for more
### details.
###
### You should have received a copy of the GNU General Public
### License along with this program; if not, write to the Free
### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
### MA 02111-1307, USA
# dichotomize using given threshold
dichotomize = function(X, thresh)
{
if(length(thresh)==1)
thresh = rep(thresh, ncol(X))
if(length(thresh) != ncol(X))
stop("Number of specified thresholds not identical with the number of variables (columns)!")
## compare features (columns) with thresholds
## (we need rowwise comparison but R supports only columnwise comparison
## => double t() )
m = t(t(X) >= thresh) * 1L
## set attributes
attr(m, "thresh") = thresh
return(m)
}
# optimize threshold
optimizeThreshold = function(X, L, lambda.freqs, verbose=FALSE)
{
L = factor(L) # make sure L is a factor
d = ncol(X) # dimension, number of variables, number of columns
if (verbose) reportDetails(X, L)
## calculate class frequencies
freqs = getClassFreqs(L, lambda.freqs=lambda.freqs, verbose=verbose)
## calculate grid of possible thresholds
## rows: thresholds 1:length(breaks); columns: features
# we simply use all possible thresholds
grid = apply(X, 2, sort ) # sort to get the smallest cutoff value
grid = rbind(grid, grid[nrow(X),]+1)
breaks = nrow(grid)
## create score matrix
## rows: thresholds 1:length(breaks); columns: variables
scr = matrix(0L, nrow=breaks, ncol=d)
## loop through thresholds (columns)
for (i in 1:breaks)
{
## create binary matrix
## compare features (columns) with thresholds
## (we need rowwise comparison but R supports only columnwise comparison
## => double t() )
bm = t(t(X) >= grid[i, ]) * 1L
## mu matrix
mu = getClassMeans(bm, L)
scr[i, ] = rankingScore(mu, freqs)
}
## find thesholds with maximal scores
idx = cbind(max.col(t(scr), ties.method="first"), 1:d)
thr = grid[idx]
names(thr) = colnames(X)
return(thr)
}
|
# load required libraries -------------------------------------------------
library(pdftools)
library(naniar)
library(tidyverse)
library(rstudioapi)
require(dplyr)
# setting up --------------------------------------------------------------
current_path <- rstudioapi::getActiveDocumentContext()$path
setwd(dirname(current_path ))
print(getwd())
# function to read subset of pages
read_single_page <- function(pdf, page){
tmp <- tempfile()
on.exit(unlink(tmp))
tempfile <- pdftools::pdf_subset(pdf, tmp, pages = page)
pdftools::pdf_text(tmp)
}
# set link to pdf file
pdf_file = 'https://github.com/jasonjb82/general/raw/master/species/Grissino-Mayer%201993%20dendro%20ssp%20list.pdf'
# PARSING -----------------------------------------------------------------
# parse first page
i = 5
df1 <- read_single_page(pdf_file, page = i) %>%
strsplit("\n") %>%
as_tibble(.name_repair = make.names) %>%
slice(16:100) %>%
mutate(
CDI = str_sub(X,0,9),
code = str_sub(X,10,25),
text = str_sub(X,26,100)) %>%
# remove original string
select(-X) %>%
slice(2:100) %>%
# remove white spaces around values
mutate_all(str_trim) %>%
#filter(!(CDI=="CD")) %>%
as.data.frame() %>%
mutate_if(is.character, list(~na_if(.,""))) %>%
fill(CDI,code) %>%
mutate(page_no = i) %>%
replace(is.na(.), "") %>%
mutate(CDI = ifelse(is.na(CDI),0,CDI))
# parse last page
i = 26
df2 <- read_single_page(pdf_file, page = i) %>%
strsplit("\n") %>%
as_tibble(.name_repair = make.names) %>%
slice(3:11) %>%
mutate(
CDI = str_sub(X,0,9),
code = str_sub(X,10,25),
text = str_sub(X,26,100)) %>%
# remove original string
select(-X) %>%
slice(2:100) %>%
# remove white spaces around values
mutate_all(str_trim) %>%
#filter(!(CDI=="CD")) %>%
as.data.frame() %>%
mutate_if(is.character, list(~na_if(.,""))) %>%
fill(CDI,code) %>%
mutate(page_no = i) %>%
replace(is.na(.), "") %>%
mutate(CDI = ifelse(is.na(CDI),0,CDI))
# parse rest of the pages
combined_df <- data.frame(CDI=character(),
dode=character(),
text=character())
for (i in 6:25) {
df3 = read_single_page(pdf_file, page = i) %>%
strsplit("\n") %>%
as_tibble(.name_repair = make.names) %>%
slice(3:100) %>%
mutate(
CDI = str_sub(X,0,8),
code = str_sub(X,14,19),
text = str_sub(X,20,100),
page_no = i,
text = str_replace(text, "I ",""),
code = str_replace(code," ","")) %>%
# remove original string
select(-X) %>%
slice(2:100) %>%
# remove white spaces around values
mutate_all(str_trim) %>%
#filter(!(CDI=="CD")) %>%
as.data.frame() %>%
mutate_if(is.character, list(~na_if(.,""))) %>%
mutate(CDI = ifelse(!is.na(code) & is.na(CDI),"",CDI)) %>%
mutate(code = str_pad(code,4, side = 'right', pad = 'I')) %>%
fill(CDI,code)
combined_df <- rbind(combined_df,df3)
}
# Merge dataframes
merge_df <- rbind(df1,combined_df,df2) %>%
mutate(page_no = as.integer(page_no))
# Clean up dataframe
comb_df <- merge_df %>%
group_by(CDI,code) %>%
mutate(text = str_replace(text,'[*]','')) %>%
mutate(text = str_replace(text,'[.]','')) %>%
summarise_at(vars(-page_no),funs(paste(., collapse = " ; "))) %>%
separate(text, c("name", "common_name","common_name1","common_name2","common_name3"), " ; ") %>%
mutate(common_name = ifelse(!is.na(common_name1),paste0(common_name," ",common_name1),common_name)) %>%
mutate(common_name = ifelse(!is.na(common_name2),paste0(common_name," ",common_name1," ",common_name2),common_name)) %>%
mutate(common_name = ifelse(!is.na(common_name2),paste0(common_name," ",common_name1," ",common_name2," ",common_name3),common_name)) %>%
select(-common_name1,-common_name2,-common_name3) %>%
#mutate(Genus = stri_extract_first(Name, regex="\\w+")) %>%
separate(name, into = c("genus", "species","authority"), sep = "\\s",extra="merge") %>%
mutate(genus = ifelse(grepl('SP$',code),paste0(genus," ",species),genus),
species = ifelse(grepl('SP$',code),NA,species)) %>%
mutate(species = ifelse(grepl('^L.',authority),paste0(species," ",authority),species),
authority = ifelse(grepl('^L.',authority),NA,authority)) %>%
mutate(authority = str_replace(authority,"[']",'')) %>%
mutate(genus = ifelse(code == "MIXI",paste0(genus," ",species),genus)) %>%
mutate(species = ifelse(code == "MIXI",NA,species)) %>%
mutate(genus = ifelse(code == "CONI",paste0(genus," ",species),genus)) %>%
mutate(genus = str_replace(genus,'[,]','')) %>%
mutate(species = ifelse(code == "CONI",NA,species)) %>%
mutate(common_name = str_squish(common_name)) %>%
arrange(code)
# export to csv
write.csv(comb_df,"species_table.csv",row.names = FALSE)
| /species/species_tables_extract.R | no_license | jasonjb82/general | R | false | false | 4,742 | r | # load required libraries -------------------------------------------------
library(pdftools)
library(naniar)
library(tidyverse)
library(rstudioapi)
require(dplyr)
# setting up --------------------------------------------------------------
current_path <- rstudioapi::getActiveDocumentContext()$path
setwd(dirname(current_path ))
print(getwd())
# function to read subset of pages
read_single_page <- function(pdf, page){
tmp <- tempfile()
on.exit(unlink(tmp))
tempfile <- pdftools::pdf_subset(pdf, tmp, pages = page)
pdftools::pdf_text(tmp)
}
# set link to pdf file
pdf_file = 'https://github.com/jasonjb82/general/raw/master/species/Grissino-Mayer%201993%20dendro%20ssp%20list.pdf'
# PARSING -----------------------------------------------------------------
# parse first page
i = 5
df1 <- read_single_page(pdf_file, page = i) %>%
strsplit("\n") %>%
as_tibble(.name_repair = make.names) %>%
slice(16:100) %>%
mutate(
CDI = str_sub(X,0,9),
code = str_sub(X,10,25),
text = str_sub(X,26,100)) %>%
# remove original string
select(-X) %>%
slice(2:100) %>%
# remove white spaces around values
mutate_all(str_trim) %>%
#filter(!(CDI=="CD")) %>%
as.data.frame() %>%
mutate_if(is.character, list(~na_if(.,""))) %>%
fill(CDI,code) %>%
mutate(page_no = i) %>%
replace(is.na(.), "") %>%
mutate(CDI = ifelse(is.na(CDI),0,CDI))
# parse last page
i = 26
df2 <- read_single_page(pdf_file, page = i) %>%
strsplit("\n") %>%
as_tibble(.name_repair = make.names) %>%
slice(3:11) %>%
mutate(
CDI = str_sub(X,0,9),
code = str_sub(X,10,25),
text = str_sub(X,26,100)) %>%
# remove original string
select(-X) %>%
slice(2:100) %>%
# remove white spaces around values
mutate_all(str_trim) %>%
#filter(!(CDI=="CD")) %>%
as.data.frame() %>%
mutate_if(is.character, list(~na_if(.,""))) %>%
fill(CDI,code) %>%
mutate(page_no = i) %>%
replace(is.na(.), "") %>%
mutate(CDI = ifelse(is.na(CDI),0,CDI))
# parse rest of the pages
combined_df <- data.frame(CDI=character(),
dode=character(),
text=character())
for (i in 6:25) {
df3 = read_single_page(pdf_file, page = i) %>%
strsplit("\n") %>%
as_tibble(.name_repair = make.names) %>%
slice(3:100) %>%
mutate(
CDI = str_sub(X,0,8),
code = str_sub(X,14,19),
text = str_sub(X,20,100),
page_no = i,
text = str_replace(text, "I ",""),
code = str_replace(code," ","")) %>%
# remove original string
select(-X) %>%
slice(2:100) %>%
# remove white spaces around values
mutate_all(str_trim) %>%
#filter(!(CDI=="CD")) %>%
as.data.frame() %>%
mutate_if(is.character, list(~na_if(.,""))) %>%
mutate(CDI = ifelse(!is.na(code) & is.na(CDI),"",CDI)) %>%
mutate(code = str_pad(code,4, side = 'right', pad = 'I')) %>%
fill(CDI,code)
combined_df <- rbind(combined_df,df3)
}
# Merge dataframes
merge_df <- rbind(df1,combined_df,df2) %>%
mutate(page_no = as.integer(page_no))
# Clean up dataframe
comb_df <- merge_df %>%
group_by(CDI,code) %>%
mutate(text = str_replace(text,'[*]','')) %>%
mutate(text = str_replace(text,'[.]','')) %>%
summarise_at(vars(-page_no),funs(paste(., collapse = " ; "))) %>%
separate(text, c("name", "common_name","common_name1","common_name2","common_name3"), " ; ") %>%
mutate(common_name = ifelse(!is.na(common_name1),paste0(common_name," ",common_name1),common_name)) %>%
mutate(common_name = ifelse(!is.na(common_name2),paste0(common_name," ",common_name1," ",common_name2),common_name)) %>%
mutate(common_name = ifelse(!is.na(common_name2),paste0(common_name," ",common_name1," ",common_name2," ",common_name3),common_name)) %>%
select(-common_name1,-common_name2,-common_name3) %>%
#mutate(Genus = stri_extract_first(Name, regex="\\w+")) %>%
separate(name, into = c("genus", "species","authority"), sep = "\\s",extra="merge") %>%
mutate(genus = ifelse(grepl('SP$',code),paste0(genus," ",species),genus),
species = ifelse(grepl('SP$',code),NA,species)) %>%
mutate(species = ifelse(grepl('^L.',authority),paste0(species," ",authority),species),
authority = ifelse(grepl('^L.',authority),NA,authority)) %>%
mutate(authority = str_replace(authority,"[']",'')) %>%
mutate(genus = ifelse(code == "MIXI",paste0(genus," ",species),genus)) %>%
mutate(species = ifelse(code == "MIXI",NA,species)) %>%
mutate(genus = ifelse(code == "CONI",paste0(genus," ",species),genus)) %>%
mutate(genus = str_replace(genus,'[,]','')) %>%
mutate(species = ifelse(code == "CONI",NA,species)) %>%
mutate(common_name = str_squish(common_name)) %>%
arrange(code)
# export to csv
write.csv(comb_df,"species_table.csv",row.names = FALSE)
|
#' 'Build' (i.e., evaluate) a plotly object
#'
#' This generic function creates the list object sent to plotly.js
#' for rendering. Using this function can be useful for overriding defaults
#' provided by `ggplotly`/`plot_ly` or for debugging rendering
#' errors.
#'
#' @param p a ggplot object, or a plotly object, or a list.
#' @param registerFrames should a frame trace attribute be interpreted as frames in an animation?
#' @export
#' @examples
#'
#' p <- plot_ly(economics, x = ~date, y = ~pce)
#' # the unevaluated plotly object
#' str(p)
#' # the evaluated data
#' str(plotly_build(p)$x$data)
#'
plotly_build <- function(p, registerFrames = TRUE) {
UseMethod("plotly_build")
}
#' @export
plotly_build.NULL <- function(...) {
htmltools::browsable(htmltools::div(...))
}
#' @export
plotly_build.list <- function(p, registerFrames = TRUE) {
plotly_build(as_widget(p))
}
#' @export
plotly_build.gg <- function(p, registerFrames = TRUE) {
# note: since preRenderHook = plotly_build in as_widget(),
# plotly_build.plotly() will be called on gg objects as well
plotly_build(ggplotly(p))
}
#' @export
plotly_build.plotly <- function(p, registerFrames = TRUE) {
# make this plot retrievable
set_last_plot(p)
layouts <- Map(function(x, y) {
d <- plotly_data(p, y)
x <- rapply(x, eval_attr, data = d, how = "list")
# if an annotation attribute is an array, expand into multiple annotations
nAnnotations <- max(lengths(x$annotations) %||% 0)
if (!is.null(names(x$annotations))) {
# font is the only list object, so store it, and attach after transposing
font <- x$annotations[["font"]]
x$annotations <- purrr::transpose(lapply(x$annotations, function(x) {
as.list(rep(x, length.out = nAnnotations))
}))
for (i in seq_len(nAnnotations)) {
x$annotations[[i]][["font"]] <- font
}
}
x[lengths(x) > 0]
}, p$x$layoutAttrs, names2(p$x$layoutAttrs))
# get rid of the data -> layout mapping
p$x$layoutAttrs <- NULL
# accumulate, rather than override, annotations.
annotations <- Reduce(c, c(
list(p$x$layout$annotations),
setNames(compact(lapply(layouts, "[[", "annotations")), NULL)
))
# merge layouts into a single layout (more recent layouts will override older ones)
p$x$layout <- modify_list(p$x$layout, Reduce(modify_list, layouts))
p$x$layout$annotations <- annotations
# keep frame mapping for populating layout.slider.currentvalue in animations
frameMapping <- unique(unlist(
lapply(p$x$attrs, function(x) deparse2(x[["frame"]])),
use.names = FALSE
))
if (length(frameMapping) > 1) {
warning("Only one `frame` variable is allowed", call. = FALSE)
}
# Attributes should be NULL if none exist (rather than an empty list)
if (length(p$x$attrs) == 0) p$x$attrs <- NULL
# If there is just one (unevaluated) trace, and the data is sf, add an sf layer
if (length(p$x$attrs) == 1 && !inherits(p$x$attrs[[1]], "plotly_eval") && is_sf(plotly_data(p))) {
p <- add_sf(p)
}
# If type was not specified in plot_ly(), it doesn't create a trace unless
# there are no other traces
if (is.null(p$x$attrs[[1]][["type"]]) && length(p$x$attrs) > 1) {
p$x$attrs[[1]] <- NULL
}
# have the attributes already been evaluated?
is.evaled <- function(x) inherits(x, "plotly_eval")
attrsToEval <- p$x$attrs[!vapply(p$x$attrs, is.evaled, logical(1))]
# trace type checking and renaming for plot objects
if (is_mapbox(p) || is_geo(p)) {
p <- geo2cartesian(p)
attrsToEval <- lapply(attrsToEval, function(tr) {
if (!grepl("scatter|choropleth", tr[["type"]] %||% "scatter")) {
stop("Cant add a '", tr[["type"]], "' trace to a map object", call. = FALSE)
}
if (is_mapbox(p)) tr[["type"]] <- tr[["type"]] %||% "scattermapbox"
if (is_geo(p)) {
tr[["type"]] <- if (!is.null(tr[["z"]])) "choropleth" else "scattergeo"
}
tr
})
}
dats <- Map(function(x, y) {
# grab the data for this trace
dat <- plotly_data(p, y)
# formula/symbol/attribute evaluation
trace <- structure(
rapply(x, eval_attr, data = dat, how = "list"),
class = oldClass(x)
)
# determine trace type (if not specified, can depend on the # of data points)
# note that this should also determine a sensible mode, if appropriate
trace <- verify_type(trace)
# verify orientation of boxes/bars
trace <- verify_orientation(trace)
# supply sensible defaults based on trace type
trace <- coerce_attr_defaults(trace, p$x$layout)
# attach crosstalk info, if necessary
if (crosstalk_key() %in% names(dat) && isTRUE(trace[["inherit"]] %||% TRUE)) {
trace[["key"]] <- trace[["key"]] %||% dat[[crosstalk_key()]]
trace[["set"]] <- trace[["set"]] %||% attr(dat, "set")
}
# if appropriate, tack on a group index
grps <- if (has_group(trace)) tryNULL(dplyr::group_vars(dat))
if (length(grps) && any(lengths(trace) == NROW(dat))) {
trace[[".plotlyGroupIndex"]] <- interaction(dat[, grps, drop = F])
}
# add sensible axis names to layout
for (i in c("x", "y", "z")) {
nm <- paste0(i, "axis")
idx <- which(names(trace) %in% i)
if (length(idx) == 1) {
title <- default(deparse2(x[[idx]]))
if (is3d(trace$type) || i == "z") {
p$x$layout$scene[[nm]]$title <<- p$x$layout$scene[[nm]]$title %||% title
} else {
p$x$layout[[nm]]$title <<- p$x$layout[[nm]]$title %||% title
}
}
}
if (inherits(trace, c("plotly_surface", "plotly_contour"))) {
# TODO: generate matrix for users?
# (1) if z is vector, and x/y are null throw error
# (2) if x/y/z are vectors and length(x) * length(y) == length(z), convert z to matrix
if (!is.matrix(trace[["z"]]) || !is.numeric(trace[["z"]])) {
stop("`z` must be a numeric matrix", call. = FALSE)
}
}
# collect non-positional scales, plotly.js data_arrays, and "special"
# array attributes for "data training"
Attrs <- Schema$traces[[trace[["type"]]]]$attributes
isArray <- vapply(Attrs, function(x) {
tryFALSE(identical(x[["valType"]], "data_array"))
}, logical(1))
arrayOk <- vapply(Attrs, function(x) tryNULL(x[["arrayOk"]]) %||% FALSE, logical(1))
# "non-tidy" traces allow x/y of different lengths, so ignore those
dataArrayAttrs <- if (is_tidy(trace)) names(Attrs)[isArray | arrayOk]
allAttrs <- c(
dataArrayAttrs, special_attrs(trace), npscales(), "frame",
# for some reason, text isn't listed as a data array in some traces
# I'm looking at you scattergeo...
".plotlyGroupIndex", "text", "key", "fillcolor", "name", "legendgroup"
)
tr <- trace[names(trace) %in% allAttrs]
# TODO: does it make sense to "train" matrices/2D-tables (e.g. z)?
tr <- tr[vapply(tr, function(x) is.null(dim(x)) && is.atomic(x), logical(1))]
# white-list customdata as this can be a non-atomic vector
tr$customdata <- trace$customdata
builtData <- tibble::as_tibble(tr)
# avoid clobbering I() (i.e., variables that shouldn't be scaled)
for (i in seq_along(tr)) {
if (inherits(tr[[i]], "AsIs")) builtData[[i]] <- I(builtData[[i]])
}
if (NROW(builtData) > 0) {
# Build the index used to split one "trace" into multiple traces
isAsIs <- vapply(builtData, function(x) inherits(x, "AsIs"), logical(1))
isDiscrete <- vapply(builtData, is.discrete, logical(1))
# note: can only have one linetype per trace
isSplit <- names(builtData) %in% c("split", "linetype", "frame", "fillcolor", "name") |
!isAsIs & isDiscrete & names(builtData) %in% c("symbol", "color")
if (any(isSplit)) {
paste2 <- function(x, y) if (identical(x, y)) x else paste(x, y, sep = br())
splitVars <- builtData[isSplit]
builtData[[".plotlyTraceIndex"]] <- Reduce(paste2, splitVars)
# in registerFrames() we need to strip the frame from .plotlyTraceIndex
# so keep track of which variable it is...
trace$frameOrder <- which(names(splitVars) %in% "frame")
}
# Build the index used to determine grouping (later on, NAs are inserted
# via group2NA() to create the groups). This is done in 3 parts:
# 1. Sort data by the trace index since groups are nested within traces.
# 2. Translate missing values on positional scales to a grouping variable.
# If grouping isn't relevant for this trace, a warning is thrown since
# NAs are removed.
# 3. The grouping from (2) and any groups detected via dplyr::groups()
# are combined into a single grouping variable, .plotlyGroupIndex
builtData <- arrange_safe(builtData, ".plotlyTraceIndex")
isComplete <- complete.cases(builtData[names(builtData) %in% c("x", "y", "z")])
# warn about missing values if groups aren't relevant for this trace type
if (any(!isComplete) && !has_group(trace)) {
warning("Ignoring ", sum(!isComplete), " observations", call. = FALSE)
}
builtData[[".plotlyMissingIndex"]] <- cumsum(!isComplete)
builtData <- builtData[isComplete, ]
if (length(grps) && has_group(trace) && isTRUE(trace[["connectgaps"]])) {
stop(
"Can't use connectgaps=TRUE when data has group(s).", call. = FALSE
)
}
builtData[[".plotlyGroupIndex"]] <- interaction(
builtData[[".plotlyGroupIndex"]] %||% "",
builtData[[".plotlyMissingIndex"]]
)
builtData <- arrange_safe(builtData,
c(".plotlyTraceIndex", ".plotlyGroupIndex",
if (inherits(trace, "plotly_line")) "x")
)
builtData <- train_data(builtData, trace)
trace[[".plotlyVariableMapping"]] <- names(builtData)
# copy over to the trace data
for (i in names(builtData)) {
trace[[i]] <- builtData[[i]]
}
}
# TODO: provide a better way to clean up "high-level" attrs
trace[c("ymin", "ymax", "yend", "xend")] <- NULL
trace[lengths(trace) > 0]
}, attrsToEval, names2(attrsToEval))
p$x$attrs <- lapply(p$x$attrs, function(x) structure(x, class = "plotly_eval"))
# traceify by the interaction of discrete variables
traces <- list()
for (i in seq_along(dats)) {
d <- dats[[i]]
scaleAttrs <- names(d) %in% paste0(npscales(), "s")
traces <- c(traces, traceify(d[!scaleAttrs], d$.plotlyTraceIndex))
if (i == 1) traces[[1]] <- c(traces[[1]], d[scaleAttrs])
}
# insert NAs to differentiate groups
traces <- lapply(traces, function(x) {
d <- tibble::as_tibble(x[names(x) %in% x$.plotlyVariableMapping])
d <- group2NA(
d, if (has_group(x)) ".plotlyGroupIndex",
ordered = if (inherits(x, "plotly_line")) "x",
retrace.first = inherits(x, "plotly_polygon")
)
for (i in x$.plotlyVariableMapping) {
# try to reduce the amount of data we have to send for non-positional scales
entry <- if (i %in% npscales()) uniq(d[[i]]) else d[[i]]
if (is.null(entry)) {
x[[i]] <- NULL
} else {
x[[i]] <- structure(entry, class = oldClass(x[[i]]))
}
}
x
})
# Map special plot_ly() arguments to plotly.js trace attributes.
# Note that symbol/linetype can modify the mode, so those are applied first
# TODO: use 'legends 2.0' to create legends for these discrete mappings
# https://github.com/plotly/plotly.js/issues/1668
if (length(traces)) {
traces <- map_symbol(traces)
traces <- map_linetype(traces)
traces <- map_size(traces)
traces <- map_size(traces, stroke = TRUE) #i.e., span
colorTitle <- unlist(lapply(p$x$attrs, function(x) { deparse2(x[["color"]] %||% x[["z"]]) }))
strokeTitle <- unlist(lapply(p$x$attrs, function(x) deparse2(x[["stroke"]])))
traces <- map_color(traces, title = paste(colorTitle, collapse = br()), colorway = colorway(p))
traces <- map_color(traces, stroke = TRUE, title = paste(strokeTitle, collapse = br()), colorway = colorway(p))
}
for (i in seq_along(traces)) {
# remove special mapping attributes
mappingAttrs <- c(
"alpha", "alpha_stroke", npscales(), paste0(npscales(), "s"),
".plotlyGroupIndex", ".plotlyMissingIndex",
".plotlyTraceIndex", ".plotlyVariableMapping", "inherit"
)
for (j in mappingAttrs) {
traces[[i]][[j]] <- NULL
}
}
# .crossTalkKey -> key
traces <- lapply(traces, function(x) {
setNames(x, sub(crosstalk_key(), "key", names(x), fixed = TRUE))
})
# it's possible that the plot object already has some traces
# (like figures pulled from a plotly server)
p$x$data <- setNames(c(p$x$data, traces), NULL)
# supply linked highlighting options/features
p <- supply_highlight_attrs(p)
# supply trace anchor and domain information
p <- supply_defaults(p)
# attribute naming corrections for "geo-like" traces
p <- cartesian2geo(p)
# Compute sensible bounding boxes for each mapbox/geo subplot
p <- fit_bounds(p)
# polar charts don't like null width/height keys
if (is.null(p$x$layout[["height"]])) p$x$layout[["height"]] <- NULL
if (is.null(p$x$layout[["width"]])) p$x$layout[["width"]] <- NULL
# ensure we get the order of categories correct
# (plotly.js uses the order in which categories appear by default)
p <- populate_categorical_axes(p)
# translate '\n' to '<br />' in text strings
p <- translate_linebreaks(p)
# if it makes sense, add markers/lines/text to mode
p <- verify_mode(p)
# annotations & shapes must be an array of objects
# TODO: should we add anything else to this?
p <- verify_arrays(p)
# set a sensible hovermode if it hasn't been specified already
p <- verify_hovermode(p)
# try to convert to webgl if toWebGl was used
p <- verify_webgl(p)
# throw warning if webgl is being used in shinytest
# currently, shinytest won't rely this warning, but it should
# https://github.com/rstudio/shinytest/issues/146
if (isTRUE(getOption("shiny.testmode"))) {
if (is.webgl(p)) warning("shinytest can't currently render WebGL-based graphics.")
}
# crosstalk dynamically adds traces, meaning that a legend could be dynamically
# added, which is confusing. So here we populate a sensible default.
p <- verify_showlegend(p)
# NOTE: this needs to occur *before* registering frames so simple/nested key
# flags get passed onto frame data.
p <- verify_key_type(p)
if (registerFrames) {
p <- registerFrames(p, frameMapping = frameMapping)
}
# set the default plotly.js events to register in shiny
p <- shiny_defaults_set(p)
p <- verify_guides(p)
# verify colorscale attributes are in a sensible data structure
p <- verify_colorscale(p)
# verify plot attributes are legal according to the plotly.js spec
p <- verify_attr_names(p)
# box up 'data_array' attributes where appropriate
p <- verify_attr_spec(p)
# make sure we're including mathjax (if TeX() is used)
p <- verify_mathjax(p)
# if a partial bundle was specified, make sure it supports the visualization
p <- verify_partial_bundle(p)
# scattergl currently doesn't render in RStudio on Windows
# https://github.com/ropensci/plotly/issues/1214
p <- verify_scattergl_platform(p)
# make sure plots don't get sent out of the network (for enterprise)
p$x$base_url <- get_domain()
p
}
# ----------------------------------------------------------------
# Functions used solely within plotly_build
# ----------------------------------------------------------------
registerFrames <- function(p, frameMapping = NULL) {
# ensure one frame value per trace, and if its missing, insert NA
p$x$data <- lapply(p$x$data, function(tr) {
tr[["frame"]] <- tr[["frame"]][[1]] %||% NA
tr
})
# the ordering of this object determines the ordering of the frames
frameAttrs <- unlist(lapply(p$x$data, "[[", "frame"))
# NOTE: getLevels() should drop NAs
frameNames <- getLevels(frameAttrs)
p$x$data <- lapply(p$x$data, function(tr) { tr$frame <- as.character(tr$frame); tr })
# remove frames from the trace names
for (i in seq_along(p$x$data)) {
tr <- p$x$data[[i]]
if (length(tr[["name"]]) != 1) next
nms <- strsplit(as.character(tr[["name"]]), br())[[1]]
idx <- setdiff(seq_along(nms), tr$frameOrder %||% 0)
p$x$data[[i]]$name <- if (length(idx)) paste(nms[idx], collapse = br()) else NULL
p$x$data[[i]]$frameOrder <- NULL
}
# exit in trivial cases
nFrames <- length(frameNames)
if (nFrames < 2) return(p)
# --------------------------------------------------------------------------
# set a "global" range of x/y (TODO: handle multiple axes?)
# --------------------------------------------------------------------------
x <- unlist(lapply(p$x$data, function(x) x[["x"]]))
if (is.numeric(x)) {
rng <- range(x, na.rm = TRUE)
if (identical(p$x$layout$xaxis$type, "log")) {
rng <- log10(rng)
rng[is.nan(rng)] <- 0
}
p$x$layout$xaxis$range <- p$x$layout$xaxis$range %||% extendrange(rng)
}
y <- unlist(lapply(p$x$data, function(x) x[["y"]]))
if (is.numeric(y)) {
rng <- range(y, na.rm = TRUE)
if (identical(p$x$layout$yaxis$type, "log")) {
rng <- log10(rng)
rng[is.nan(rng)] <- 0
}
p$x$layout$yaxis$range <- p$x$layout$yaxis$range %||% extendrange(rng)
}
# --------------------------------------------------------------------------
# Similar to setting a global x/y range, we need a "global trace range"
#
# implementation details via @rreusser: frames specify *state changes*,
# so if frame 1 has 3 traces, and frame 2 has 2 traces,
# we need to explicity supply 3 traces
# in both frames, but make 1 invisible in frame 2. For example,
# http://codepen.io/cpsievert/pen/gmXVWe
# For that reason, every frame (including the "initial" frame) has the
# max # of traces and "missing traces" are not visible (i.e., `visible=false`)
# --------------------------------------------------------------------------
# remember, at this point, frame has been removed from the trace name
frameTraceNames <- unique(unlist(lapply(p$x$data[!is.na(frameAttrs)], "[[", "name")))
for (i in seq_along(frameNames)) {
nm <- frameNames[[i]]
d <- p$x$data[sapply(p$x$data, "[[", "frame") %in% nm]
# ensure, the frames API knows what is visible/invisible
d <- lapply(d, function(tr) { tr$visible <- tr$visible %||% TRUE; tr })
# if this frame is missing a trace name, supply an invisible one
traceNamesMissing <- setdiff(frameTraceNames, sapply(d, "[[", "name"))
for (j in traceNamesMissing) {
idx <- vapply(p$x$data, function(tr) isTRUE(tr[["name"]] == j), logical(1))
idx <- which(idx)[[1]]
invisible <- modify_list(p$x$data[[idx]], list(visible = FALSE))
d <- c(d, list(invisible))
}
p$x$frames[[i]] <- list(
name = as.character(format(nm)),
data = lapply(d, function(tr) {
spec <- Schema$traces[[tr$type %||% "scatter"]]$attributes
verify_attr(tr, spec)
})
)
}
# ensure the plot knows about the "global trace range"
firstFrame <- vapply(p$x$data, function(tr) isTRUE(tr[["frame"]] %in% frameNames[[1]]), logical(1))
p$x$data[firstFrame] <- p$x$frames[[1]]$data
# remove frame traces
idx <- vapply(p$x$data, function(tr) isTRUE(tr[["frame"]] %in% frameNames[-1]), logical(1))
p$x$data[idx] <- NULL
# this works since we now have a global trace range
p$x$frames <- lapply(p$x$frames, function(f) {
f$traces <- i(which(!is.na(sapply(p$x$data, "[[", "frame"))) - 1)
f
})
# retrain color defaults
p$x$data <- colorway_retrain(p$x$data, colorway(p))
p$x$frames <- lapply(p$x$frames, function(f) {
f$data <- colorway_retrain(f$data, colorway(p)[f$traces + 1])
f
})
# populate layout.sliders.currentvalue with a sensible default
defaultvalue <- if (length(frameMapping) == 1) {
list(
prefix = paste0(frameMapping, ": "),
xanchor = 'right',
font = list(
size = 16,
color = toRGB("gray80")
)
)
} else NULL
# supply animation option defaults (a la, highlight_defaults())
p$animation <- p$animation %||% animation_opts_defaults()
# if all the frame trace data are scatter traces, set a default of redraw=F
types <- unique(unlist(lapply(p$x$frames, function(f) {
vapply(f$data, function(tr) tr$type %||% "scatter", character(1))
})))
if (identical(types, "scatter") && is.default(p$animation$frame$redraw)) {
p$animation$frame$redraw <- default(FALSE)
}
# _always_ display an animation button and slider by default
animation_button_supply(
animation_slider_supply(p, currentvalue = defaultvalue)
)
}
train_data <- function(data, trace) {
if (inherits(trace, "plotly_ribbon")) {
data <- ribbon_dat(data)
}
if (inherits(trace, "plotly_segment")) {
# TODO: this could be faster, more efficient
data$.plotlyGroupIndex <- seq_len(NROW(data))
idx <- rep(seq_len(NROW(data)), each = 2)
dat <- as.data.frame(data[!grepl("^xend$|^yend", names(data))])
dat <- dat[idx, ]
idx2 <- seq.int(2, NROW(dat), by = 2)
dat[idx2, "x"] <- data[["xend"]]
dat[idx2, "y"] <- data[["yend"]]
data <- dplyr::group_by_(dat, ".plotlyGroupIndex", add = TRUE)
}
# TODO: a lot more geoms!!!
data
}
map_size <- function(traces, stroke = FALSE) {
sizeList <- lapply(traces, "[[", if (stroke) "span" else "size")
nSizes <- lengths(sizeList)
# if no "top-level" size is present, return traces untouched
if (all(nSizes == 0)) return(traces)
allSize <- unlist(compact(sizeList))
if (!is.null(allSize) && is.discrete(allSize)) {
stop("`size`/`width` values must be numeric .", call. = FALSE)
}
sizeRange <- range(allSize, na.rm = TRUE)
mapSize <- switch(
if (stroke) "span" else "size",
span = function(trace, sizes) {
type <- trace$type %||% "scatter"
size_ <- uniq(sizes)
isSingular <- length(size_) == 1
attrs <- Schema$traces[[type]]$attributes
# `span` controls marker.line.width
if (has_attr(type, "marker")) {
s <- if (isSingular) size_ else if (array_ok(attrs$marker$line$width)) sizes
trace$marker$line <- modify_list(list(width = default(s)), trace$marker$line)
}
# `span` controls error_[x/y].thickness
for (attr in c("error_y", "error_x")) {
if (!has_attr(type, attr)) next
s <- if (isSingular) size_ else if (array_ok(attrs[[attr]]$thickness)) sizes
trace[[attr]] <- modify_list(list(thickness = default(s)), trace[[attr]])
}
# When fill exists, `span` controls line.width
if (has_fill(trace) && has_attr(type, "line")) {
s <- if (isSingular) size_ else if (array_ok(attrs$line$width)) sizes else NA
if (is.na(s)) {
warning("`line.width` does not currently support multiple values.", call. = FALSE)
} else {
trace[["line"]] <- modify_list(list(width = default(s)), trace[["line"]])
}
}
trace
},
size = function(trace, sizes) {
type <- trace$type %||% "scatter"
size_ <- uniq(sizes)
isSingular <- length(size_) == 1
attrs <- Schema$traces[[type]]$attributes
# `size` controls marker.size (note 'bar' traces have marker but not marker.size)
# TODO: always ensure an array? https://github.com/ropensci/plotly/pull/1176
if (has_attr(type, "marker") && "size" %in% names(attrs$marker)) {
s <- if (isSingular) size_ else if (array_ok(attrs$marker$size)) sizes
trace$marker <- modify_list(list(size = default(s), sizemode = default("area")), trace$marker)
}
# `size` controls textfont.size
if (has_attr(type, "textfont")) {
s <- if (isSingular) size_ else if (array_ok(attrs$textfont$size)) sizes
trace$textfont <- modify_list(list(size = default(s)), trace$textfont)
}
# `size` controls error_[x/y].width
for (attr in c("error_y", "error_x")) {
if (!has_attr(type, attr)) next
s <- if (isSingular) size_ else if (array_ok(attrs[[attr]]$width)) sizes
trace[[attr]] <- modify_list(list(width = default(s)), trace[[attr]])
}
# if fill does not exist, `size` controls line.width
if (!has_fill(trace) && has_attr(type, "line")) {
s <- if (isSingular) size_ else if (array_ok(attrs$line$width)) sizes else NA
if (is.na(s)) {
warning("`line.width` does not currently support multiple values.", call. = FALSE)
} else {
trace[["line"]] <- modify_list(list(width = default(s)), trace[["line"]])
}
}
trace
}
)
for (i in which(nSizes > 0)) {
s <- sizeList[[i]]
isConstant <- inherits(s, "AsIs")
sizeI <- if (isConstant) {
structure(s, class = setdiff(class(s), "AsIs"))
} else {
to <- if (stroke) traces[[1]][["spans"]] else traces[[1]][["sizes"]]
scales::rescale(s, from = sizeRange, to = to)
}
traces[[i]] <- mapSize(traces[[i]], sizeI)
}
traces
}
# appends a new (empty) trace to generate (plot-wide) colorbar/colorscale
map_color <- function(traces, stroke = FALSE, title = "", colorway, na.color = "transparent") {
color <- if (stroke) {
lapply(traces, function(x) { x[["stroke"]] %||% x[["color"]] })
} else {
lapply(traces, function(x) { x[["color"]] %||% if (grepl("histogram2d", x[["type"]])) c(0, 1) else if (has_attr(x[["type"]], "colorscale")) x[["surfacecolor"]] %||% x[["z"]] })
}
alphas <- if (stroke) {
vapply(traces, function(x) x$alpha_stroke %||% 1, numeric(1))
} else {
vapply(traces, function(x) x[["alpha"]] %||% 1, numeric(1))
}
isConstant <- vapply(color, function(x) inherits(x, "AsIs") || is.null(x), logical(1))
isNumeric <- vapply(color, is.numeric, logical(1)) & !isConstant
isDiscrete <- vapply(color, is.discrete, logical(1)) & !isConstant
if (any(isNumeric & isDiscrete)) stop("Can't have both discrete and numeric color mappings", call. = FALSE)
uniqColor <- lapply(color, uniq)
isSingular <- lengths(uniqColor) == 1
# color/colorscale/colorbar attribute placement depends on trace type and marker mode
# TODO: remove these and make numeric colorscale mapping more like the rest
types <- vapply(traces, function(tr) tr$type %||% "scatter", character(1))
modes <- vapply(traces, function(tr) tr$mode %||% "lines", character(1))
hasLine <- has_line(types, modes)
hasLineColor <- has_color_array(types, "line")
hasText <- has_text(types, modes)
hasTextColor <- has_color_array(types, "text")
hasZ <- has_attr(types, "colorscale") & !stroke &
any(vapply(traces, function(tr) {
!is.null(tr[["z"]]) || grepl("histogram2d", tr[["type"]])
}, logical(1)))
# IDEA - attach color codes whether they make sense, unless there is a
# vector of color codes and the target is a constant
mapColor <- switch(
if (stroke) "stroke" else "fill",
stroke = function(trace, rgba, is_colorway = FALSE) {
type <- trace$type %||% "scatter"
rgba_ <- uniq(rgba)
isSingular <- length(rgba_) == 1
attrs <- Schema$traces[[type]]$attributes
default_ <- if (is_colorway) function(x) prefix_class(default(x), "colorway") else default
if (has_attr(type, "marker")) {
col <- if (isSingular) rgba_ else if (array_ok(attrs$marker$line$color)) rgba
trace$marker$line <- modify_list(list(color = default_(col)), trace$marker$line)
}
if (has_fill(trace)) {
col <- if (isSingular) rgba_ else if (array_ok(attrs$line$color)) rgba
if (is.null(col)) {
warning("`line.color` does not currently support multiple values.", call. = FALSE)
} else {
trace$line <- modify_list(list(color = default_(col)), trace$line)
}
}
trace
},
fill = function(trace, rgba, is_colorway = FALSE) {
type <- trace$type %||% "scatter"
rgba_ <- uniq(rgba)
isSingular <- length(rgba_) == 1
attrs <- Schema$traces[[type]]$attributes
default_ <- if (is_colorway) function(x) prefix_class(default(x), "colorway") else default
# `color` controls marker.color, textfont.color, error_[x/y].color
# TODO: any more attributes that make sense to include here?
for (attr in c("marker", "textfont", "error_y", "error_x")) {
if (!has_attr(type, attr)) next
if (is_colorway && "textfont" == attr) next
col <- if (isSingular) rgba_ else if (array_ok(attrs[[attr]]$color)) rgba else NA
if (is.na(col)) {
warning("`", attr, ".color` does not currently support multiple values.", call. = FALSE)
} else {
trace[[attr]] <- modify_list(list(color = default_(col)), trace[[attr]])
}
}
# If trace has fill, `color` controls fillcolor; otherwise line.color
if (has_fill(trace)) {
if (!isSingular) warning("Only one fillcolor per trace allowed", call. = FALSE)
# alpha defaults to 0.5 when applied to fillcolor
if (is.null(trace[["alpha"]])) rgba_ <- toRGB(rgba_, 0.5)
if (isSingular) trace <- modify_list(list(fillcolor = default_(rgba_)), trace)
} else if (has_attr(type, "line")) {
# if fill does not exist, 'color' controls line.color
col <- if (isSingular) rgba_ else if (array_ok(attrs$line$color)) rgba else NA
if (is.na(col)) {
warning("`line.color` does not currently support multiple values.", call. = FALSE)
} else {
trace[["line"]] <- modify_list(list(color = default_(col)), trace[["line"]])
}
}
trace
}
)
# i.e., interpret values as color codes
if (any(isConstant)) {
colorCodes <- Map(`%||%`, color, rep(colorway, length.out = length(traces)))
colorCodes <- Map(toRGB, colorCodes[isConstant], alphas[isConstant])
isColorway <- lengths(color[isConstant]) == 0
traces[isConstant] <- Map(mapColor, traces[isConstant], colorCodes, isColorway)
}
# since stroke inherits from color, it should inherit the palette too
palette <- if (stroke) traces[[1]][["strokes"]] %||% traces[[1]][["colors"]] else traces[[1]][["colors"]]
if (any(isDiscrete)) {
# unlist() does _not_ preserve order factors
isOrdered <- all(vapply(color[isDiscrete], is.ordered, logical(1)))
lvls <- getLevels(unlist(color[isDiscrete]))
N <- length(lvls)
pal <- palette %||% if (isOrdered) viridisLite::viridis(N) else RColorBrewer::brewer.pal(N, "Set2")
colScale <- scales::col_factor(pal, levels = names(pal) %||% lvls, na.color = na.color)
color_codes <- Map(function(x, y) toRGB(colScale(as.character(x)), y), color[isDiscrete], alphas[isDiscrete])
traces[isDiscrete] <- Map(mapColor, traces[isDiscrete], color_codes)
}
if (any(isNumeric)) {
pal <- palette %||% viridisLite::viridis(10)
# TODO: use ggstat::frange() when it's on CRAN?
allColor <- unlist(color[isNumeric])
rng <- range(allColor, na.rm = TRUE)
colScale <- scales::col_numeric(pal, rng, na.color = na.color)
# generate the colorscale to be shared across traces
vals <- if (diff(rng) > 0) {
seq(rng[1], rng[2], length.out = 25)
} else {
c(0, 1)
}
colorScale <- matrix(
c(scales::rescale(vals), toRGB(colScale(vals), alphas[[1]])),
ncol = 2
)
colorObj <- list(
colorbar = lapply(list(title = as.character(title), ticklen = 2), default),
cmin = default(rng[1]),
cmax = default(rng[2]),
colorscale = default(colorScale),
showscale = default(FALSE)
)
for (i in which(isNumeric)) {
# when colorscale is being attached to `z`, we don't need color values in
# colorObj, so create colorbar trace now and exit early
if (hasZ[[i]]) {
colorObj[c("cmin", "cmax")] <- NULL
colorObj[["showscale"]] <- default(TRUE)
traces[[i]] <- modify_list(colorObj, traces[[i]])
traces[[i]] <- structure(traces[[i]], class = c("plotly_colorbar", "zcolor"))
next
}
# if trace is singular (i.e., one unique color in this trace), then there
# is no need for a colorscale, and both stroke/color have relevancy
if (isSingular[[i]]) {
col <- colScale(uniq(color[[i]]))
traces[[i]] <- mapColor(traces[[i]], toRGB(col, alphas[[i]]))
} else {
colorObj$color <- default(color[[i]])
if (stroke) {
traces[[i]]$marker$line <- modify_list(colorObj, traces[[i]]$marker$line)
} else {
traces[[i]]$marker <- modify_list(colorObj, traces[[i]]$marker)
}
if (hasLine[[i]]) {
if (hasLineColor[[i]]) {
traces[[i]]$line <- modify_list(colorObj, traces[[i]]$line)
} else {
warning("line.color doesn't (yet) support data arrays", call. = FALSE)
}
}
if (hasText[[i]]) {
if (hasTextColor[[i]]) {
traces[[i]]$textfont <- modify_list(colorObj, traces[[i]]$textfont)
} else {
warning("textfont.color doesn't (yet) support data arrays", call. = FALSE)
}
}
# TODO: how to make the summary stat (mean) customizable?
if (has_fill(traces[[i]])) {
warning("Only one fillcolor per trace allowed", call. = FALSE)
col <- toRGB(colScale(mean(colorObj$color, na.rm = TRUE)), alphas[[i]])
if (is.null(traces[[i]][["alpha"]])) col <- toRGB(col, 0.5)
traces[[i]] <- modify_list(list(fillcolor = col), traces[[i]])
}
}
}
# exit early if no additional colorbar trace is needed
if (any(hasZ)) return(traces)
if (stroke && sum(lengths(lapply(traces, "[[", "stroke"))) == 0) return(traces)
# add an "empty" trace with the colorbar
colorObj$color <- rng
colorObj$showscale <- default(TRUE)
colorBarTrace <- list(
x = range(unlist(lapply(traces, "[[", "x")), na.rm = TRUE),
y = range(unlist(lapply(traces, "[[", "y")), na.rm = TRUE),
type = if (any(types %in% glTypes())) "scattergl" else "scatter",
mode = "markers",
opacity = 0,
hoverinfo = "none",
showlegend = FALSE,
marker = colorObj
)
# 3D needs a z property
if ("scatter3d" %in% types) {
colorBarTrace$type <- "scatter3d"
colorBarTrace$z <- range(unlist(lapply(traces, "[[", "z")), na.rm = TRUE)
}
if (length(type <- intersect(c("scattergeo", "scattermapbox"), types))) {
colorBarTrace$type <- type
colorBarTrace$lat <- colorBarTrace$y
colorBarTrace$lon <- colorBarTrace$x
colorBarTrace[["x"]] <- NULL
colorBarTrace[["y"]] <- NULL
}
traces[[length(traces) + 1]] <- structure(colorBarTrace, class = "plotly_colorbar")
}
traces
}
map_symbol <- function(traces) {
symbolList <- lapply(traces, "[[", "symbol")
nSymbols <- lengths(symbolList)
# if no "top-level" symbol is present, return traces untouched
if (all(nSymbols == 0)) {
return(traces)
}
symbol <- unlist(compact(symbolList))
lvls <- getLevels(symbol)
# get a sensible default palette (also throws warnings)
pal <- setNames(scales::shape_pal()(length(lvls)), lvls)
pal <- supplyUserPalette(pal, traces[[1]][["symbols"]])
validSymbols <- as.character(Schema$traces$scatter$attributes$marker$symbol$values)
for (i in which(nSymbols > 0)) {
s <- symbolList[[i]]
symbols <- pch2symbol(if (inherits(s, "AsIs")) s else as.character(pal[as.character(s)]))
illegalSymbols <- setdiff(symbols, validSymbols)
if (length(illegalSymbols)) {
warning(
"The following are not valid symbol codes:\n'",
paste(illegalSymbols, collapse = "', '"), "'\n",
"Valid symbols include:\n'",
paste(validSymbols, collapse = "', '"), call. = FALSE
)
}
traces[[i]][["marker"]] <- modify_list(
list(symbol = default(symbols)), traces[[i]][["marker"]]
)
# ensure the mode is set so that the symbol is relevant
if (!grepl("markers", traces[[i]]$mode %||% "")) {
message("Adding markers to mode; otherwise symbol would have no effect.")
traces[[i]]$mode <- paste0(traces[[i]]$mode, "+markers")
}
}
traces
}
map_linetype <- function(traces) {
linetypeList <- lapply(traces, "[[", "linetype")
nLinetypes <- lengths(linetypeList)
# if no "top-level" linetype is present, return traces untouched
if (all(nLinetypes == 0)) return(traces)
linetype <- unlist(compact(linetypeList))
lvls <- getLevels(linetype)
# get a sensible default palette
pal <- setNames(scales::linetype_pal()(length(lvls)), lvls)
pal <- supplyUserPalette(pal, traces[[1]][["linetypes"]])
validLinetypes <- as.character(Schema$traces$scatter$attributes$line$dash$values)
if (length(pal) > length(validLinetypes)) {
warning("plotly.js only supports 6 different linetypes", call. = FALSE)
}
for (i in which(nLinetypes > 0)) {
l <- linetypeList[[i]]
dashes <- lty2dash(if (inherits(l, "AsIs")) l else as.character(pal[as.character(l)]))
illegalLinetypes <- setdiff(dashes, validLinetypes)
if (length(illegalLinetypes)) {
warning(
"The following are not valid linetype codes:\n'",
paste(illegalLinetypes, collapse = "', '"), "'\n",
"Valid linetypes include:\n'",
paste(validLinetypes, collapse = "', '"), "'", call. = FALSE
)
}
traces[[i]][["line"]] <- modify_list(
list(dash = default(dashes)), traces[[i]][["line"]]
)
# ensure the mode is set so that the linetype is relevant
if (!grepl("lines", traces[[i]]$mode %||% "")) {
message("Adding lines to mode; otherwise linetype would have no effect.")
traces[[i]][["mode"]] <- paste0(traces[[i]][["mode"]], "+lines")
}
}
traces
}
# break up a single trace into multiple traces according to values stored
# a particular key name
traceify <- function(dat, x = NULL) {
if (length(x) == 0) return(list(dat))
lvls <- if (is.factor(x)) levels(x) else unique(x)
lvls <- lvls[lvls %in% x]
# the order of lvls determines the order in which traces are drawn
# for ordered factors at least, it makes sense to draw the highest level first
# since that _should_ be the darkest color in a sequential pallette
if (is.ordered(x)) lvls <- rev(lvls)
n <- length(x)
# recursively search for a non-list of appropriate length (if it is, subset it)
recurse <- function(z, n, idx) {
if (is.list(z)) lapply(z, recurse, n, idx) else if (length(z) == n) z[idx] else z
}
new_dat <- list()
for (j in seq_along(lvls)) {
new_dat[[j]] <- lapply(dat, function(y) recurse(y, n, x %in% lvls[j]))
new_dat[[j]]$name <- new_dat[[j]]$name %||% lvls[j]
}
return(new_dat)
}
eval_attr <- function(x, data = NULL) {
if (lazyeval::is_formula(x)) lazyeval::f_eval(x, data) else x
}
# overwrite defaults with the user defined palette
supplyUserPalette <- function(default, user) {
for (i in seq_along(user)) {
idx <- names(user)[i] %||% i
default[idx] <- user[i]
}
default
}
# helper functions
array_ok <- function(attr) isTRUE(tryNULL(attr$arrayOk))
has_fill <- function(trace) {
trace_type <- trace[["type"]] %||% "scatter"
# if trace type has fillcolor, but no fill attribute, then fill is always relevant
has_fillcolor <- has_attr(trace_type, "fillcolor")
has_fill <- has_attr(trace_type, "fill")
if (has_fillcolor && !has_fill) return(TRUE)
fill <- trace[["fill"]] %||% "none"
if (has_fillcolor && isTRUE(fill != "none")) return(TRUE)
FALSE
}
# ensure we've set a sensible trace defaults
# based on the trace type
coerce_attr_defaults <- function(trace, layout) {
# if user has specified stroke, make sure the span
# defaults to something greater than 0 (so they can see the stroke!)
if (length(trace[["stroke"]]) && !is.default(trace[["stroke"]])) {
trace$span <- trace[["span"]] %||% default(I(1))
}
if (trace[["type"]] %in% c("sunburst", "pie")) {
# As of v1.46.1, paper_bgcolor defaults to '#fff' which
# col2rgb() can't parse, but expands to '#ffffff'
# https://stackoverflow.com/a/2899224/1583084
bgcolor <- layout$paper_bgcolor %||% "#ffffff"
trace$stroke <- trace[["stroke"]] %||% default(I(bgcolor))
}
trace
}
| /R/plotly_build.R | permissive | xwydq/plotly | R | false | false | 40,680 | r | #' 'Build' (i.e., evaluate) a plotly object
#'
#' This generic function creates the list object sent to plotly.js
#' for rendering. Using this function can be useful for overriding defaults
#' provided by `ggplotly`/`plot_ly` or for debugging rendering
#' errors.
#'
#' @param p a ggplot object, or a plotly object, or a list.
#' @param registerFrames should a frame trace attribute be interpreted as frames in an animation?
#' @export
#' @examples
#'
#' p <- plot_ly(economics, x = ~date, y = ~pce)
#' # the unevaluated plotly object
#' str(p)
#' # the evaluated data
#' str(plotly_build(p)$x$data)
#'
plotly_build <- function(p, registerFrames = TRUE) {
UseMethod("plotly_build")
}
#' @export
plotly_build.NULL <- function(...) {
htmltools::browsable(htmltools::div(...))
}
#' @export
plotly_build.list <- function(p, registerFrames = TRUE) {
plotly_build(as_widget(p))
}
#' @export
plotly_build.gg <- function(p, registerFrames = TRUE) {
# note: since preRenderHook = plotly_build in as_widget(),
# plotly_build.plotly() will be called on gg objects as well
plotly_build(ggplotly(p))
}
#' @export
plotly_build.plotly <- function(p, registerFrames = TRUE) {
# make this plot retrievable
set_last_plot(p)
layouts <- Map(function(x, y) {
d <- plotly_data(p, y)
x <- rapply(x, eval_attr, data = d, how = "list")
# if an annotation attribute is an array, expand into multiple annotations
nAnnotations <- max(lengths(x$annotations) %||% 0)
if (!is.null(names(x$annotations))) {
# font is the only list object, so store it, and attach after transposing
font <- x$annotations[["font"]]
x$annotations <- purrr::transpose(lapply(x$annotations, function(x) {
as.list(rep(x, length.out = nAnnotations))
}))
for (i in seq_len(nAnnotations)) {
x$annotations[[i]][["font"]] <- font
}
}
x[lengths(x) > 0]
}, p$x$layoutAttrs, names2(p$x$layoutAttrs))
# get rid of the data -> layout mapping
p$x$layoutAttrs <- NULL
# accumulate, rather than override, annotations.
annotations <- Reduce(c, c(
list(p$x$layout$annotations),
setNames(compact(lapply(layouts, "[[", "annotations")), NULL)
))
# merge layouts into a single layout (more recent layouts will override older ones)
p$x$layout <- modify_list(p$x$layout, Reduce(modify_list, layouts))
p$x$layout$annotations <- annotations
# keep frame mapping for populating layout.slider.currentvalue in animations
frameMapping <- unique(unlist(
lapply(p$x$attrs, function(x) deparse2(x[["frame"]])),
use.names = FALSE
))
if (length(frameMapping) > 1) {
warning("Only one `frame` variable is allowed", call. = FALSE)
}
# Attributes should be NULL if none exist (rather than an empty list)
if (length(p$x$attrs) == 0) p$x$attrs <- NULL
# If there is just one (unevaluated) trace, and the data is sf, add an sf layer
if (length(p$x$attrs) == 1 && !inherits(p$x$attrs[[1]], "plotly_eval") && is_sf(plotly_data(p))) {
p <- add_sf(p)
}
# If type was not specified in plot_ly(), it doesn't create a trace unless
# there are no other traces
if (is.null(p$x$attrs[[1]][["type"]]) && length(p$x$attrs) > 1) {
p$x$attrs[[1]] <- NULL
}
# have the attributes already been evaluated?
is.evaled <- function(x) inherits(x, "plotly_eval")
attrsToEval <- p$x$attrs[!vapply(p$x$attrs, is.evaled, logical(1))]
# trace type checking and renaming for plot objects
if (is_mapbox(p) || is_geo(p)) {
p <- geo2cartesian(p)
attrsToEval <- lapply(attrsToEval, function(tr) {
if (!grepl("scatter|choropleth", tr[["type"]] %||% "scatter")) {
stop("Cant add a '", tr[["type"]], "' trace to a map object", call. = FALSE)
}
if (is_mapbox(p)) tr[["type"]] <- tr[["type"]] %||% "scattermapbox"
if (is_geo(p)) {
tr[["type"]] <- if (!is.null(tr[["z"]])) "choropleth" else "scattergeo"
}
tr
})
}
dats <- Map(function(x, y) {
# grab the data for this trace
dat <- plotly_data(p, y)
# formula/symbol/attribute evaluation
trace <- structure(
rapply(x, eval_attr, data = dat, how = "list"),
class = oldClass(x)
)
# determine trace type (if not specified, can depend on the # of data points)
# note that this should also determine a sensible mode, if appropriate
trace <- verify_type(trace)
# verify orientation of boxes/bars
trace <- verify_orientation(trace)
# supply sensible defaults based on trace type
trace <- coerce_attr_defaults(trace, p$x$layout)
# attach crosstalk info, if necessary
if (crosstalk_key() %in% names(dat) && isTRUE(trace[["inherit"]] %||% TRUE)) {
trace[["key"]] <- trace[["key"]] %||% dat[[crosstalk_key()]]
trace[["set"]] <- trace[["set"]] %||% attr(dat, "set")
}
# if appropriate, tack on a group index
grps <- if (has_group(trace)) tryNULL(dplyr::group_vars(dat))
if (length(grps) && any(lengths(trace) == NROW(dat))) {
trace[[".plotlyGroupIndex"]] <- interaction(dat[, grps, drop = F])
}
# add sensible axis names to layout
for (i in c("x", "y", "z")) {
nm <- paste0(i, "axis")
idx <- which(names(trace) %in% i)
if (length(idx) == 1) {
title <- default(deparse2(x[[idx]]))
if (is3d(trace$type) || i == "z") {
p$x$layout$scene[[nm]]$title <<- p$x$layout$scene[[nm]]$title %||% title
} else {
p$x$layout[[nm]]$title <<- p$x$layout[[nm]]$title %||% title
}
}
}
if (inherits(trace, c("plotly_surface", "plotly_contour"))) {
# TODO: generate matrix for users?
# (1) if z is vector, and x/y are null throw error
# (2) if x/y/z are vectors and length(x) * length(y) == length(z), convert z to matrix
if (!is.matrix(trace[["z"]]) || !is.numeric(trace[["z"]])) {
stop("`z` must be a numeric matrix", call. = FALSE)
}
}
# collect non-positional scales, plotly.js data_arrays, and "special"
# array attributes for "data training"
Attrs <- Schema$traces[[trace[["type"]]]]$attributes
isArray <- vapply(Attrs, function(x) {
tryFALSE(identical(x[["valType"]], "data_array"))
}, logical(1))
arrayOk <- vapply(Attrs, function(x) tryNULL(x[["arrayOk"]]) %||% FALSE, logical(1))
# "non-tidy" traces allow x/y of different lengths, so ignore those
dataArrayAttrs <- if (is_tidy(trace)) names(Attrs)[isArray | arrayOk]
allAttrs <- c(
dataArrayAttrs, special_attrs(trace), npscales(), "frame",
# for some reason, text isn't listed as a data array in some traces
# I'm looking at you scattergeo...
".plotlyGroupIndex", "text", "key", "fillcolor", "name", "legendgroup"
)
tr <- trace[names(trace) %in% allAttrs]
# TODO: does it make sense to "train" matrices/2D-tables (e.g. z)?
tr <- tr[vapply(tr, function(x) is.null(dim(x)) && is.atomic(x), logical(1))]
# white-list customdata as this can be a non-atomic vector
tr$customdata <- trace$customdata
builtData <- tibble::as_tibble(tr)
# avoid clobbering I() (i.e., variables that shouldn't be scaled)
for (i in seq_along(tr)) {
if (inherits(tr[[i]], "AsIs")) builtData[[i]] <- I(builtData[[i]])
}
if (NROW(builtData) > 0) {
# Build the index used to split one "trace" into multiple traces
isAsIs <- vapply(builtData, function(x) inherits(x, "AsIs"), logical(1))
isDiscrete <- vapply(builtData, is.discrete, logical(1))
# note: can only have one linetype per trace
isSplit <- names(builtData) %in% c("split", "linetype", "frame", "fillcolor", "name") |
!isAsIs & isDiscrete & names(builtData) %in% c("symbol", "color")
if (any(isSplit)) {
paste2 <- function(x, y) if (identical(x, y)) x else paste(x, y, sep = br())
splitVars <- builtData[isSplit]
builtData[[".plotlyTraceIndex"]] <- Reduce(paste2, splitVars)
# in registerFrames() we need to strip the frame from .plotlyTraceIndex
# so keep track of which variable it is...
trace$frameOrder <- which(names(splitVars) %in% "frame")
}
# Build the index used to determine grouping (later on, NAs are inserted
# via group2NA() to create the groups). This is done in 3 parts:
# 1. Sort data by the trace index since groups are nested within traces.
# 2. Translate missing values on positional scales to a grouping variable.
# If grouping isn't relevant for this trace, a warning is thrown since
# NAs are removed.
# 3. The grouping from (2) and any groups detected via dplyr::groups()
# are combined into a single grouping variable, .plotlyGroupIndex
builtData <- arrange_safe(builtData, ".plotlyTraceIndex")
isComplete <- complete.cases(builtData[names(builtData) %in% c("x", "y", "z")])
# warn about missing values if groups aren't relevant for this trace type
if (any(!isComplete) && !has_group(trace)) {
warning("Ignoring ", sum(!isComplete), " observations", call. = FALSE)
}
builtData[[".plotlyMissingIndex"]] <- cumsum(!isComplete)
builtData <- builtData[isComplete, ]
if (length(grps) && has_group(trace) && isTRUE(trace[["connectgaps"]])) {
stop(
"Can't use connectgaps=TRUE when data has group(s).", call. = FALSE
)
}
builtData[[".plotlyGroupIndex"]] <- interaction(
builtData[[".plotlyGroupIndex"]] %||% "",
builtData[[".plotlyMissingIndex"]]
)
builtData <- arrange_safe(builtData,
c(".plotlyTraceIndex", ".plotlyGroupIndex",
if (inherits(trace, "plotly_line")) "x")
)
builtData <- train_data(builtData, trace)
trace[[".plotlyVariableMapping"]] <- names(builtData)
# copy over to the trace data
for (i in names(builtData)) {
trace[[i]] <- builtData[[i]]
}
}
# TODO: provide a better way to clean up "high-level" attrs
trace[c("ymin", "ymax", "yend", "xend")] <- NULL
trace[lengths(trace) > 0]
}, attrsToEval, names2(attrsToEval))
p$x$attrs <- lapply(p$x$attrs, function(x) structure(x, class = "plotly_eval"))
# traceify by the interaction of discrete variables
traces <- list()
for (i in seq_along(dats)) {
d <- dats[[i]]
scaleAttrs <- names(d) %in% paste0(npscales(), "s")
traces <- c(traces, traceify(d[!scaleAttrs], d$.plotlyTraceIndex))
if (i == 1) traces[[1]] <- c(traces[[1]], d[scaleAttrs])
}
# insert NAs to differentiate groups
traces <- lapply(traces, function(x) {
d <- tibble::as_tibble(x[names(x) %in% x$.plotlyVariableMapping])
d <- group2NA(
d, if (has_group(x)) ".plotlyGroupIndex",
ordered = if (inherits(x, "plotly_line")) "x",
retrace.first = inherits(x, "plotly_polygon")
)
for (i in x$.plotlyVariableMapping) {
# try to reduce the amount of data we have to send for non-positional scales
entry <- if (i %in% npscales()) uniq(d[[i]]) else d[[i]]
if (is.null(entry)) {
x[[i]] <- NULL
} else {
x[[i]] <- structure(entry, class = oldClass(x[[i]]))
}
}
x
})
# Map special plot_ly() arguments to plotly.js trace attributes.
# Note that symbol/linetype can modify the mode, so those are applied first
# TODO: use 'legends 2.0' to create legends for these discrete mappings
# https://github.com/plotly/plotly.js/issues/1668
if (length(traces)) {
traces <- map_symbol(traces)
traces <- map_linetype(traces)
traces <- map_size(traces)
traces <- map_size(traces, stroke = TRUE) #i.e., span
colorTitle <- unlist(lapply(p$x$attrs, function(x) { deparse2(x[["color"]] %||% x[["z"]]) }))
strokeTitle <- unlist(lapply(p$x$attrs, function(x) deparse2(x[["stroke"]])))
traces <- map_color(traces, title = paste(colorTitle, collapse = br()), colorway = colorway(p))
traces <- map_color(traces, stroke = TRUE, title = paste(strokeTitle, collapse = br()), colorway = colorway(p))
}
for (i in seq_along(traces)) {
# remove special mapping attributes
mappingAttrs <- c(
"alpha", "alpha_stroke", npscales(), paste0(npscales(), "s"),
".plotlyGroupIndex", ".plotlyMissingIndex",
".plotlyTraceIndex", ".plotlyVariableMapping", "inherit"
)
for (j in mappingAttrs) {
traces[[i]][[j]] <- NULL
}
}
# .crossTalkKey -> key
traces <- lapply(traces, function(x) {
setNames(x, sub(crosstalk_key(), "key", names(x), fixed = TRUE))
})
# it's possible that the plot object already has some traces
# (like figures pulled from a plotly server)
p$x$data <- setNames(c(p$x$data, traces), NULL)
# supply linked highlighting options/features
p <- supply_highlight_attrs(p)
# supply trace anchor and domain information
p <- supply_defaults(p)
# attribute naming corrections for "geo-like" traces
p <- cartesian2geo(p)
# Compute sensible bounding boxes for each mapbox/geo subplot
p <- fit_bounds(p)
# polar charts don't like null width/height keys
if (is.null(p$x$layout[["height"]])) p$x$layout[["height"]] <- NULL
if (is.null(p$x$layout[["width"]])) p$x$layout[["width"]] <- NULL
# ensure we get the order of categories correct
# (plotly.js uses the order in which categories appear by default)
p <- populate_categorical_axes(p)
# translate '\n' to '<br />' in text strings
p <- translate_linebreaks(p)
# if it makes sense, add markers/lines/text to mode
p <- verify_mode(p)
# annotations & shapes must be an array of objects
# TODO: should we add anything else to this?
p <- verify_arrays(p)
# set a sensible hovermode if it hasn't been specified already
p <- verify_hovermode(p)
# try to convert to webgl if toWebGl was used
p <- verify_webgl(p)
# throw warning if webgl is being used in shinytest
# currently, shinytest won't rely this warning, but it should
# https://github.com/rstudio/shinytest/issues/146
if (isTRUE(getOption("shiny.testmode"))) {
if (is.webgl(p)) warning("shinytest can't currently render WebGL-based graphics.")
}
# crosstalk dynamically adds traces, meaning that a legend could be dynamically
# added, which is confusing. So here we populate a sensible default.
p <- verify_showlegend(p)
# NOTE: this needs to occur *before* registering frames so simple/nested key
# flags get passed onto frame data.
p <- verify_key_type(p)
if (registerFrames) {
p <- registerFrames(p, frameMapping = frameMapping)
}
# set the default plotly.js events to register in shiny
p <- shiny_defaults_set(p)
p <- verify_guides(p)
# verify colorscale attributes are in a sensible data structure
p <- verify_colorscale(p)
# verify plot attributes are legal according to the plotly.js spec
p <- verify_attr_names(p)
# box up 'data_array' attributes where appropriate
p <- verify_attr_spec(p)
# make sure we're including mathjax (if TeX() is used)
p <- verify_mathjax(p)
# if a partial bundle was specified, make sure it supports the visualization
p <- verify_partial_bundle(p)
# scattergl currently doesn't render in RStudio on Windows
# https://github.com/ropensci/plotly/issues/1214
p <- verify_scattergl_platform(p)
# make sure plots don't get sent out of the network (for enterprise)
p$x$base_url <- get_domain()
p
}
# ----------------------------------------------------------------
# Functions used solely within plotly_build
# ----------------------------------------------------------------
registerFrames <- function(p, frameMapping = NULL) {
# ensure one frame value per trace, and if its missing, insert NA
p$x$data <- lapply(p$x$data, function(tr) {
tr[["frame"]] <- tr[["frame"]][[1]] %||% NA
tr
})
# the ordering of this object determines the ordering of the frames
frameAttrs <- unlist(lapply(p$x$data, "[[", "frame"))
# NOTE: getLevels() should drop NAs
frameNames <- getLevels(frameAttrs)
p$x$data <- lapply(p$x$data, function(tr) { tr$frame <- as.character(tr$frame); tr })
# remove frames from the trace names
for (i in seq_along(p$x$data)) {
tr <- p$x$data[[i]]
if (length(tr[["name"]]) != 1) next
nms <- strsplit(as.character(tr[["name"]]), br())[[1]]
idx <- setdiff(seq_along(nms), tr$frameOrder %||% 0)
p$x$data[[i]]$name <- if (length(idx)) paste(nms[idx], collapse = br()) else NULL
p$x$data[[i]]$frameOrder <- NULL
}
# exit in trivial cases
nFrames <- length(frameNames)
if (nFrames < 2) return(p)
# --------------------------------------------------------------------------
# set a "global" range of x/y (TODO: handle multiple axes?)
# --------------------------------------------------------------------------
x <- unlist(lapply(p$x$data, function(x) x[["x"]]))
if (is.numeric(x)) {
rng <- range(x, na.rm = TRUE)
if (identical(p$x$layout$xaxis$type, "log")) {
rng <- log10(rng)
rng[is.nan(rng)] <- 0
}
p$x$layout$xaxis$range <- p$x$layout$xaxis$range %||% extendrange(rng)
}
y <- unlist(lapply(p$x$data, function(x) x[["y"]]))
if (is.numeric(y)) {
rng <- range(y, na.rm = TRUE)
if (identical(p$x$layout$yaxis$type, "log")) {
rng <- log10(rng)
rng[is.nan(rng)] <- 0
}
p$x$layout$yaxis$range <- p$x$layout$yaxis$range %||% extendrange(rng)
}
# --------------------------------------------------------------------------
# Similar to setting a global x/y range, we need a "global trace range"
#
# implementation details via @rreusser: frames specify *state changes*,
# so if frame 1 has 3 traces, and frame 2 has 2 traces,
# we need to explicity supply 3 traces
# in both frames, but make 1 invisible in frame 2. For example,
# http://codepen.io/cpsievert/pen/gmXVWe
# For that reason, every frame (including the "initial" frame) has the
# max # of traces and "missing traces" are not visible (i.e., `visible=false`)
# --------------------------------------------------------------------------
# remember, at this point, frame has been removed from the trace name
frameTraceNames <- unique(unlist(lapply(p$x$data[!is.na(frameAttrs)], "[[", "name")))
for (i in seq_along(frameNames)) {
nm <- frameNames[[i]]
d <- p$x$data[sapply(p$x$data, "[[", "frame") %in% nm]
# ensure, the frames API knows what is visible/invisible
d <- lapply(d, function(tr) { tr$visible <- tr$visible %||% TRUE; tr })
# if this frame is missing a trace name, supply an invisible one
traceNamesMissing <- setdiff(frameTraceNames, sapply(d, "[[", "name"))
for (j in traceNamesMissing) {
idx <- vapply(p$x$data, function(tr) isTRUE(tr[["name"]] == j), logical(1))
idx <- which(idx)[[1]]
invisible <- modify_list(p$x$data[[idx]], list(visible = FALSE))
d <- c(d, list(invisible))
}
p$x$frames[[i]] <- list(
name = as.character(format(nm)),
data = lapply(d, function(tr) {
spec <- Schema$traces[[tr$type %||% "scatter"]]$attributes
verify_attr(tr, spec)
})
)
}
# ensure the plot knows about the "global trace range"
firstFrame <- vapply(p$x$data, function(tr) isTRUE(tr[["frame"]] %in% frameNames[[1]]), logical(1))
p$x$data[firstFrame] <- p$x$frames[[1]]$data
# remove frame traces
idx <- vapply(p$x$data, function(tr) isTRUE(tr[["frame"]] %in% frameNames[-1]), logical(1))
p$x$data[idx] <- NULL
# this works since we now have a global trace range
p$x$frames <- lapply(p$x$frames, function(f) {
f$traces <- i(which(!is.na(sapply(p$x$data, "[[", "frame"))) - 1)
f
})
# retrain color defaults
p$x$data <- colorway_retrain(p$x$data, colorway(p))
p$x$frames <- lapply(p$x$frames, function(f) {
f$data <- colorway_retrain(f$data, colorway(p)[f$traces + 1])
f
})
# populate layout.sliders.currentvalue with a sensible default
defaultvalue <- if (length(frameMapping) == 1) {
list(
prefix = paste0(frameMapping, ": "),
xanchor = 'right',
font = list(
size = 16,
color = toRGB("gray80")
)
)
} else NULL
# supply animation option defaults (a la, highlight_defaults())
p$animation <- p$animation %||% animation_opts_defaults()
# if all the frame trace data are scatter traces, set a default of redraw=F
types <- unique(unlist(lapply(p$x$frames, function(f) {
vapply(f$data, function(tr) tr$type %||% "scatter", character(1))
})))
if (identical(types, "scatter") && is.default(p$animation$frame$redraw)) {
p$animation$frame$redraw <- default(FALSE)
}
# _always_ display an animation button and slider by default
animation_button_supply(
animation_slider_supply(p, currentvalue = defaultvalue)
)
}
train_data <- function(data, trace) {
if (inherits(trace, "plotly_ribbon")) {
data <- ribbon_dat(data)
}
if (inherits(trace, "plotly_segment")) {
# TODO: this could be faster, more efficient
data$.plotlyGroupIndex <- seq_len(NROW(data))
idx <- rep(seq_len(NROW(data)), each = 2)
dat <- as.data.frame(data[!grepl("^xend$|^yend", names(data))])
dat <- dat[idx, ]
idx2 <- seq.int(2, NROW(dat), by = 2)
dat[idx2, "x"] <- data[["xend"]]
dat[idx2, "y"] <- data[["yend"]]
data <- dplyr::group_by_(dat, ".plotlyGroupIndex", add = TRUE)
}
# TODO: a lot more geoms!!!
data
}
map_size <- function(traces, stroke = FALSE) {
sizeList <- lapply(traces, "[[", if (stroke) "span" else "size")
nSizes <- lengths(sizeList)
# if no "top-level" size is present, return traces untouched
if (all(nSizes == 0)) return(traces)
allSize <- unlist(compact(sizeList))
if (!is.null(allSize) && is.discrete(allSize)) {
stop("`size`/`width` values must be numeric .", call. = FALSE)
}
sizeRange <- range(allSize, na.rm = TRUE)
mapSize <- switch(
if (stroke) "span" else "size",
span = function(trace, sizes) {
type <- trace$type %||% "scatter"
size_ <- uniq(sizes)
isSingular <- length(size_) == 1
attrs <- Schema$traces[[type]]$attributes
# `span` controls marker.line.width
if (has_attr(type, "marker")) {
s <- if (isSingular) size_ else if (array_ok(attrs$marker$line$width)) sizes
trace$marker$line <- modify_list(list(width = default(s)), trace$marker$line)
}
# `span` controls error_[x/y].thickness
for (attr in c("error_y", "error_x")) {
if (!has_attr(type, attr)) next
s <- if (isSingular) size_ else if (array_ok(attrs[[attr]]$thickness)) sizes
trace[[attr]] <- modify_list(list(thickness = default(s)), trace[[attr]])
}
# When fill exists, `span` controls line.width
if (has_fill(trace) && has_attr(type, "line")) {
s <- if (isSingular) size_ else if (array_ok(attrs$line$width)) sizes else NA
if (is.na(s)) {
warning("`line.width` does not currently support multiple values.", call. = FALSE)
} else {
trace[["line"]] <- modify_list(list(width = default(s)), trace[["line"]])
}
}
trace
},
size = function(trace, sizes) {
type <- trace$type %||% "scatter"
size_ <- uniq(sizes)
isSingular <- length(size_) == 1
attrs <- Schema$traces[[type]]$attributes
# `size` controls marker.size (note 'bar' traces have marker but not marker.size)
# TODO: always ensure an array? https://github.com/ropensci/plotly/pull/1176
if (has_attr(type, "marker") && "size" %in% names(attrs$marker)) {
s <- if (isSingular) size_ else if (array_ok(attrs$marker$size)) sizes
trace$marker <- modify_list(list(size = default(s), sizemode = default("area")), trace$marker)
}
# `size` controls textfont.size
if (has_attr(type, "textfont")) {
s <- if (isSingular) size_ else if (array_ok(attrs$textfont$size)) sizes
trace$textfont <- modify_list(list(size = default(s)), trace$textfont)
}
# `size` controls error_[x/y].width
for (attr in c("error_y", "error_x")) {
if (!has_attr(type, attr)) next
s <- if (isSingular) size_ else if (array_ok(attrs[[attr]]$width)) sizes
trace[[attr]] <- modify_list(list(width = default(s)), trace[[attr]])
}
# if fill does not exist, `size` controls line.width
if (!has_fill(trace) && has_attr(type, "line")) {
s <- if (isSingular) size_ else if (array_ok(attrs$line$width)) sizes else NA
if (is.na(s)) {
warning("`line.width` does not currently support multiple values.", call. = FALSE)
} else {
trace[["line"]] <- modify_list(list(width = default(s)), trace[["line"]])
}
}
trace
}
)
for (i in which(nSizes > 0)) {
s <- sizeList[[i]]
isConstant <- inherits(s, "AsIs")
sizeI <- if (isConstant) {
structure(s, class = setdiff(class(s), "AsIs"))
} else {
to <- if (stroke) traces[[1]][["spans"]] else traces[[1]][["sizes"]]
scales::rescale(s, from = sizeRange, to = to)
}
traces[[i]] <- mapSize(traces[[i]], sizeI)
}
traces
}
# appends a new (empty) trace to generate (plot-wide) colorbar/colorscale
map_color <- function(traces, stroke = FALSE, title = "", colorway, na.color = "transparent") {
color <- if (stroke) {
lapply(traces, function(x) { x[["stroke"]] %||% x[["color"]] })
} else {
lapply(traces, function(x) { x[["color"]] %||% if (grepl("histogram2d", x[["type"]])) c(0, 1) else if (has_attr(x[["type"]], "colorscale")) x[["surfacecolor"]] %||% x[["z"]] })
}
alphas <- if (stroke) {
vapply(traces, function(x) x$alpha_stroke %||% 1, numeric(1))
} else {
vapply(traces, function(x) x[["alpha"]] %||% 1, numeric(1))
}
isConstant <- vapply(color, function(x) inherits(x, "AsIs") || is.null(x), logical(1))
isNumeric <- vapply(color, is.numeric, logical(1)) & !isConstant
isDiscrete <- vapply(color, is.discrete, logical(1)) & !isConstant
if (any(isNumeric & isDiscrete)) stop("Can't have both discrete and numeric color mappings", call. = FALSE)
uniqColor <- lapply(color, uniq)
isSingular <- lengths(uniqColor) == 1
# color/colorscale/colorbar attribute placement depends on trace type and marker mode
# TODO: remove these and make numeric colorscale mapping more like the rest
types <- vapply(traces, function(tr) tr$type %||% "scatter", character(1))
modes <- vapply(traces, function(tr) tr$mode %||% "lines", character(1))
hasLine <- has_line(types, modes)
hasLineColor <- has_color_array(types, "line")
hasText <- has_text(types, modes)
hasTextColor <- has_color_array(types, "text")
hasZ <- has_attr(types, "colorscale") & !stroke &
any(vapply(traces, function(tr) {
!is.null(tr[["z"]]) || grepl("histogram2d", tr[["type"]])
}, logical(1)))
# IDEA - attach color codes whether they make sense, unless there is a
# vector of color codes and the target is a constant
mapColor <- switch(
if (stroke) "stroke" else "fill",
stroke = function(trace, rgba, is_colorway = FALSE) {
type <- trace$type %||% "scatter"
rgba_ <- uniq(rgba)
isSingular <- length(rgba_) == 1
attrs <- Schema$traces[[type]]$attributes
default_ <- if (is_colorway) function(x) prefix_class(default(x), "colorway") else default
if (has_attr(type, "marker")) {
col <- if (isSingular) rgba_ else if (array_ok(attrs$marker$line$color)) rgba
trace$marker$line <- modify_list(list(color = default_(col)), trace$marker$line)
}
if (has_fill(trace)) {
col <- if (isSingular) rgba_ else if (array_ok(attrs$line$color)) rgba
if (is.null(col)) {
warning("`line.color` does not currently support multiple values.", call. = FALSE)
} else {
trace$line <- modify_list(list(color = default_(col)), trace$line)
}
}
trace
},
fill = function(trace, rgba, is_colorway = FALSE) {
type <- trace$type %||% "scatter"
rgba_ <- uniq(rgba)
isSingular <- length(rgba_) == 1
attrs <- Schema$traces[[type]]$attributes
default_ <- if (is_colorway) function(x) prefix_class(default(x), "colorway") else default
# `color` controls marker.color, textfont.color, error_[x/y].color
# TODO: any more attributes that make sense to include here?
for (attr in c("marker", "textfont", "error_y", "error_x")) {
if (!has_attr(type, attr)) next
if (is_colorway && "textfont" == attr) next
col <- if (isSingular) rgba_ else if (array_ok(attrs[[attr]]$color)) rgba else NA
if (is.na(col)) {
warning("`", attr, ".color` does not currently support multiple values.", call. = FALSE)
} else {
trace[[attr]] <- modify_list(list(color = default_(col)), trace[[attr]])
}
}
# If trace has fill, `color` controls fillcolor; otherwise line.color
if (has_fill(trace)) {
if (!isSingular) warning("Only one fillcolor per trace allowed", call. = FALSE)
# alpha defaults to 0.5 when applied to fillcolor
if (is.null(trace[["alpha"]])) rgba_ <- toRGB(rgba_, 0.5)
if (isSingular) trace <- modify_list(list(fillcolor = default_(rgba_)), trace)
} else if (has_attr(type, "line")) {
# if fill does not exist, 'color' controls line.color
col <- if (isSingular) rgba_ else if (array_ok(attrs$line$color)) rgba else NA
if (is.na(col)) {
warning("`line.color` does not currently support multiple values.", call. = FALSE)
} else {
trace[["line"]] <- modify_list(list(color = default_(col)), trace[["line"]])
}
}
trace
}
)
# i.e., interpret values as color codes
if (any(isConstant)) {
colorCodes <- Map(`%||%`, color, rep(colorway, length.out = length(traces)))
colorCodes <- Map(toRGB, colorCodes[isConstant], alphas[isConstant])
isColorway <- lengths(color[isConstant]) == 0
traces[isConstant] <- Map(mapColor, traces[isConstant], colorCodes, isColorway)
}
# since stroke inherits from color, it should inherit the palette too
palette <- if (stroke) traces[[1]][["strokes"]] %||% traces[[1]][["colors"]] else traces[[1]][["colors"]]
if (any(isDiscrete)) {
# unlist() does _not_ preserve order factors
isOrdered <- all(vapply(color[isDiscrete], is.ordered, logical(1)))
lvls <- getLevels(unlist(color[isDiscrete]))
N <- length(lvls)
pal <- palette %||% if (isOrdered) viridisLite::viridis(N) else RColorBrewer::brewer.pal(N, "Set2")
colScale <- scales::col_factor(pal, levels = names(pal) %||% lvls, na.color = na.color)
color_codes <- Map(function(x, y) toRGB(colScale(as.character(x)), y), color[isDiscrete], alphas[isDiscrete])
traces[isDiscrete] <- Map(mapColor, traces[isDiscrete], color_codes)
}
if (any(isNumeric)) {
pal <- palette %||% viridisLite::viridis(10)
# TODO: use ggstat::frange() when it's on CRAN?
allColor <- unlist(color[isNumeric])
rng <- range(allColor, na.rm = TRUE)
colScale <- scales::col_numeric(pal, rng, na.color = na.color)
# generate the colorscale to be shared across traces
vals <- if (diff(rng) > 0) {
seq(rng[1], rng[2], length.out = 25)
} else {
c(0, 1)
}
colorScale <- matrix(
c(scales::rescale(vals), toRGB(colScale(vals), alphas[[1]])),
ncol = 2
)
colorObj <- list(
colorbar = lapply(list(title = as.character(title), ticklen = 2), default),
cmin = default(rng[1]),
cmax = default(rng[2]),
colorscale = default(colorScale),
showscale = default(FALSE)
)
for (i in which(isNumeric)) {
# when colorscale is being attached to `z`, we don't need color values in
# colorObj, so create colorbar trace now and exit early
if (hasZ[[i]]) {
colorObj[c("cmin", "cmax")] <- NULL
colorObj[["showscale"]] <- default(TRUE)
traces[[i]] <- modify_list(colorObj, traces[[i]])
traces[[i]] <- structure(traces[[i]], class = c("plotly_colorbar", "zcolor"))
next
}
# if trace is singular (i.e., one unique color in this trace), then there
# is no need for a colorscale, and both stroke/color have relevancy
if (isSingular[[i]]) {
col <- colScale(uniq(color[[i]]))
traces[[i]] <- mapColor(traces[[i]], toRGB(col, alphas[[i]]))
} else {
colorObj$color <- default(color[[i]])
if (stroke) {
traces[[i]]$marker$line <- modify_list(colorObj, traces[[i]]$marker$line)
} else {
traces[[i]]$marker <- modify_list(colorObj, traces[[i]]$marker)
}
if (hasLine[[i]]) {
if (hasLineColor[[i]]) {
traces[[i]]$line <- modify_list(colorObj, traces[[i]]$line)
} else {
warning("line.color doesn't (yet) support data arrays", call. = FALSE)
}
}
if (hasText[[i]]) {
if (hasTextColor[[i]]) {
traces[[i]]$textfont <- modify_list(colorObj, traces[[i]]$textfont)
} else {
warning("textfont.color doesn't (yet) support data arrays", call. = FALSE)
}
}
# TODO: how to make the summary stat (mean) customizable?
if (has_fill(traces[[i]])) {
warning("Only one fillcolor per trace allowed", call. = FALSE)
col <- toRGB(colScale(mean(colorObj$color, na.rm = TRUE)), alphas[[i]])
if (is.null(traces[[i]][["alpha"]])) col <- toRGB(col, 0.5)
traces[[i]] <- modify_list(list(fillcolor = col), traces[[i]])
}
}
}
# exit early if no additional colorbar trace is needed
if (any(hasZ)) return(traces)
if (stroke && sum(lengths(lapply(traces, "[[", "stroke"))) == 0) return(traces)
# add an "empty" trace with the colorbar
colorObj$color <- rng
colorObj$showscale <- default(TRUE)
colorBarTrace <- list(
x = range(unlist(lapply(traces, "[[", "x")), na.rm = TRUE),
y = range(unlist(lapply(traces, "[[", "y")), na.rm = TRUE),
type = if (any(types %in% glTypes())) "scattergl" else "scatter",
mode = "markers",
opacity = 0,
hoverinfo = "none",
showlegend = FALSE,
marker = colorObj
)
# 3D needs a z property
if ("scatter3d" %in% types) {
colorBarTrace$type <- "scatter3d"
colorBarTrace$z <- range(unlist(lapply(traces, "[[", "z")), na.rm = TRUE)
}
if (length(type <- intersect(c("scattergeo", "scattermapbox"), types))) {
colorBarTrace$type <- type
colorBarTrace$lat <- colorBarTrace$y
colorBarTrace$lon <- colorBarTrace$x
colorBarTrace[["x"]] <- NULL
colorBarTrace[["y"]] <- NULL
}
traces[[length(traces) + 1]] <- structure(colorBarTrace, class = "plotly_colorbar")
}
traces
}
map_symbol <- function(traces) {
symbolList <- lapply(traces, "[[", "symbol")
nSymbols <- lengths(symbolList)
# if no "top-level" symbol is present, return traces untouched
if (all(nSymbols == 0)) {
return(traces)
}
symbol <- unlist(compact(symbolList))
lvls <- getLevels(symbol)
# get a sensible default palette (also throws warnings)
pal <- setNames(scales::shape_pal()(length(lvls)), lvls)
pal <- supplyUserPalette(pal, traces[[1]][["symbols"]])
validSymbols <- as.character(Schema$traces$scatter$attributes$marker$symbol$values)
for (i in which(nSymbols > 0)) {
s <- symbolList[[i]]
symbols <- pch2symbol(if (inherits(s, "AsIs")) s else as.character(pal[as.character(s)]))
illegalSymbols <- setdiff(symbols, validSymbols)
if (length(illegalSymbols)) {
warning(
"The following are not valid symbol codes:\n'",
paste(illegalSymbols, collapse = "', '"), "'\n",
"Valid symbols include:\n'",
paste(validSymbols, collapse = "', '"), call. = FALSE
)
}
traces[[i]][["marker"]] <- modify_list(
list(symbol = default(symbols)), traces[[i]][["marker"]]
)
# ensure the mode is set so that the symbol is relevant
if (!grepl("markers", traces[[i]]$mode %||% "")) {
message("Adding markers to mode; otherwise symbol would have no effect.")
traces[[i]]$mode <- paste0(traces[[i]]$mode, "+markers")
}
}
traces
}
map_linetype <- function(traces) {
linetypeList <- lapply(traces, "[[", "linetype")
nLinetypes <- lengths(linetypeList)
# if no "top-level" linetype is present, return traces untouched
if (all(nLinetypes == 0)) return(traces)
linetype <- unlist(compact(linetypeList))
lvls <- getLevels(linetype)
# get a sensible default palette
pal <- setNames(scales::linetype_pal()(length(lvls)), lvls)
pal <- supplyUserPalette(pal, traces[[1]][["linetypes"]])
validLinetypes <- as.character(Schema$traces$scatter$attributes$line$dash$values)
if (length(pal) > length(validLinetypes)) {
warning("plotly.js only supports 6 different linetypes", call. = FALSE)
}
for (i in which(nLinetypes > 0)) {
l <- linetypeList[[i]]
dashes <- lty2dash(if (inherits(l, "AsIs")) l else as.character(pal[as.character(l)]))
illegalLinetypes <- setdiff(dashes, validLinetypes)
if (length(illegalLinetypes)) {
warning(
"The following are not valid linetype codes:\n'",
paste(illegalLinetypes, collapse = "', '"), "'\n",
"Valid linetypes include:\n'",
paste(validLinetypes, collapse = "', '"), "'", call. = FALSE
)
}
traces[[i]][["line"]] <- modify_list(
list(dash = default(dashes)), traces[[i]][["line"]]
)
# ensure the mode is set so that the linetype is relevant
if (!grepl("lines", traces[[i]]$mode %||% "")) {
message("Adding lines to mode; otherwise linetype would have no effect.")
traces[[i]][["mode"]] <- paste0(traces[[i]][["mode"]], "+lines")
}
}
traces
}
# break up a single trace into multiple traces according to values stored
# a particular key name
traceify <- function(dat, x = NULL) {
if (length(x) == 0) return(list(dat))
lvls <- if (is.factor(x)) levels(x) else unique(x)
lvls <- lvls[lvls %in% x]
# the order of lvls determines the order in which traces are drawn
# for ordered factors at least, it makes sense to draw the highest level first
# since that _should_ be the darkest color in a sequential pallette
if (is.ordered(x)) lvls <- rev(lvls)
n <- length(x)
# recursively search for a non-list of appropriate length (if it is, subset it)
recurse <- function(z, n, idx) {
if (is.list(z)) lapply(z, recurse, n, idx) else if (length(z) == n) z[idx] else z
}
new_dat <- list()
for (j in seq_along(lvls)) {
new_dat[[j]] <- lapply(dat, function(y) recurse(y, n, x %in% lvls[j]))
new_dat[[j]]$name <- new_dat[[j]]$name %||% lvls[j]
}
return(new_dat)
}
eval_attr <- function(x, data = NULL) {
if (lazyeval::is_formula(x)) lazyeval::f_eval(x, data) else x
}
# overwrite defaults with the user defined palette
supplyUserPalette <- function(default, user) {
for (i in seq_along(user)) {
idx <- names(user)[i] %||% i
default[idx] <- user[i]
}
default
}
# helper functions
array_ok <- function(attr) isTRUE(tryNULL(attr$arrayOk))
has_fill <- function(trace) {
trace_type <- trace[["type"]] %||% "scatter"
# if trace type has fillcolor, but no fill attribute, then fill is always relevant
has_fillcolor <- has_attr(trace_type, "fillcolor")
has_fill <- has_attr(trace_type, "fill")
if (has_fillcolor && !has_fill) return(TRUE)
fill <- trace[["fill"]] %||% "none"
if (has_fillcolor && isTRUE(fill != "none")) return(TRUE)
FALSE
}
# ensure we've set a sensible trace defaults
# based on the trace type
coerce_attr_defaults <- function(trace, layout) {
# if user has specified stroke, make sure the span
# defaults to something greater than 0 (so they can see the stroke!)
if (length(trace[["stroke"]]) && !is.default(trace[["stroke"]])) {
trace$span <- trace[["span"]] %||% default(I(1))
}
if (trace[["type"]] %in% c("sunburst", "pie")) {
# As of v1.46.1, paper_bgcolor defaults to '#fff' which
# col2rgb() can't parse, but expands to '#ffffff'
# https://stackoverflow.com/a/2899224/1583084
bgcolor <- layout$paper_bgcolor %||% "#ffffff"
trace$stroke <- trace[["stroke"]] %||% default(I(bgcolor))
}
trace
}
|
rtestaptree <-
function(phyl, comm, nrep = 99, alter = "two-sided", exponent = 2,
wcom = c("even", "speciesab"), tol = 1e-8){
if(!inherits(comm, "data.frame") & !inherits(comm, "matrix")) stop("comm must be a numeric matrix or data frame")
if(nrow(comm) < 2)
stop("comm must have at least 2 rows")
tre <- .checkphyloarg(phyl)
phyl.phylo <- tre$phyl.phylo
if(length(phyl.phylo$tip.label)==ncol(comm)){
if(!is.null(colnames(comm)) & any(!phyl.phylo$tip.label%in%colnames(comm))) stop("names of species in comm are not equal to tip names in phyl")
}
else if(length(phyl.phylo$tip.label)<ncol(comm)) stop("phyl must contain all species in comm")
else{
if(any(!colnames(comm)%in%phyl.phylo$tip.label)) stop("some names of species in comm are not in tip names of phyl")
else
phyl.phylo <- drop.tip(phyl.phylo, phyl.phylo$tip.label[!phyl.phylo$tip.label%in%colnames(comm)])
}
if(!is.null(colnames(comm)))
comm <- comm[, phyl.phylo$tip.label]
if(!all(apply(comm, 1, is.numeric))) stop("comm must be a numeric matrix or data frame")
if(any(comm < (-tol))) stop("comm must have nonnegative values")
if(wcom[1] == "even")
wcom <- rep(1/nrow(comm), nrow(comm))
else if(wcom[1] == "speciesab")
wcom <- rowSums(comm)/sum(comm)
else if(is.numeric(wcom) & length(wcom)==nrow(comm)){
if(any(wcom < -tol))
stop("negative values in wcom")
wcom <- wcom/sum(wcom)
}
else stop("incorrect definition of wcom")
nsp <- ncol(comm)
obs <- abgaptree(phyl.phylo, comm, exponent = exponent, wcom = wcom, tol = tol)[-(1:2), ]
obs <- obs[, 2] / obs[, 3]
#orderobs <- rev(order(obs))
funsim <- function(i){
e <- sample(1:nsp)
comsim <- comm[, e]
colnames(comsim) <- colnames(comm)
theo <- abgaptree(phyl.phylo, comsim, exponent = exponent, wcom = wcom, tol = tol)[-(1:2), ]
theo <- theo[, 2] / theo[, 3]
return(theo)
}
theotab <- t(cbind.data.frame(sapply(1:nrep, funsim)))
#nam <- paste("p", 3:(length(obs)+2), sep = "")[orderobs]
nam <- paste("p", 3:(length(obs)+2), sep = "")
#res <- as.krandtest(theotab[, orderobs], obs[orderobs], alter = alter, names = nam, call = match.call())
res <- as.krandtest(theotab, obs, alter = alter, names = nam, call = match.call())
class(res) <- c("rtestaptree", class(res))
return(res)
}
| /R/rtestaptree.R | no_license | cran/adiv | R | false | false | 2,597 | r | rtestaptree <-
function(phyl, comm, nrep = 99, alter = "two-sided", exponent = 2,
wcom = c("even", "speciesab"), tol = 1e-8){
if(!inherits(comm, "data.frame") & !inherits(comm, "matrix")) stop("comm must be a numeric matrix or data frame")
if(nrow(comm) < 2)
stop("comm must have at least 2 rows")
tre <- .checkphyloarg(phyl)
phyl.phylo <- tre$phyl.phylo
if(length(phyl.phylo$tip.label)==ncol(comm)){
if(!is.null(colnames(comm)) & any(!phyl.phylo$tip.label%in%colnames(comm))) stop("names of species in comm are not equal to tip names in phyl")
}
else if(length(phyl.phylo$tip.label)<ncol(comm)) stop("phyl must contain all species in comm")
else{
if(any(!colnames(comm)%in%phyl.phylo$tip.label)) stop("some names of species in comm are not in tip names of phyl")
else
phyl.phylo <- drop.tip(phyl.phylo, phyl.phylo$tip.label[!phyl.phylo$tip.label%in%colnames(comm)])
}
if(!is.null(colnames(comm)))
comm <- comm[, phyl.phylo$tip.label]
if(!all(apply(comm, 1, is.numeric))) stop("comm must be a numeric matrix or data frame")
if(any(comm < (-tol))) stop("comm must have nonnegative values")
if(wcom[1] == "even")
wcom <- rep(1/nrow(comm), nrow(comm))
else if(wcom[1] == "speciesab")
wcom <- rowSums(comm)/sum(comm)
else if(is.numeric(wcom) & length(wcom)==nrow(comm)){
if(any(wcom < -tol))
stop("negative values in wcom")
wcom <- wcom/sum(wcom)
}
else stop("incorrect definition of wcom")
nsp <- ncol(comm)
obs <- abgaptree(phyl.phylo, comm, exponent = exponent, wcom = wcom, tol = tol)[-(1:2), ]
obs <- obs[, 2] / obs[, 3]
#orderobs <- rev(order(obs))
funsim <- function(i){
e <- sample(1:nsp)
comsim <- comm[, e]
colnames(comsim) <- colnames(comm)
theo <- abgaptree(phyl.phylo, comsim, exponent = exponent, wcom = wcom, tol = tol)[-(1:2), ]
theo <- theo[, 2] / theo[, 3]
return(theo)
}
theotab <- t(cbind.data.frame(sapply(1:nrep, funsim)))
#nam <- paste("p", 3:(length(obs)+2), sep = "")[orderobs]
nam <- paste("p", 3:(length(obs)+2), sep = "")
#res <- as.krandtest(theotab[, orderobs], obs[orderobs], alter = alter, names = nam, call = match.call())
res <- as.krandtest(theotab, obs, alter = alter, names = nam, call = match.call())
class(res) <- c("rtestaptree", class(res))
return(res)
}
|
##0: Download and unzip the dataset
##Dataset file URL
fileURL<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
## Download (if doesn't exist) and unzip the file (if not unzipped)
if (!file.exists("SamsungDataset.zip")){download.file(fileURL,"SamsungDataset.zip",mode="wb")}
if (!file.exists("UCI HAR Dataset")){unzip("SamsungDataset.zip")}
##Read data from files
X_test <- read.table("UCI HAR Dataset/test/X_test.txt", quote="\"", comment.char="")
X_train <- read.table("UCI HAR Dataset/train/X_train.txt", quote="\"", comment.char="")
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", quote="\"", comment.char="")
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", quote="\"", comment.char="")
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", quote="\"", comment.char="")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", quote="\"", comment.char="")
##1: Merge the train and test datasets
## "x" dataset
x_merged <- rbind(X_train, X_test)
## "y" dataset
y_merged <- rbind(y_train, y_test)
## "Subject" dataset
subject_merged <- rbind(subject_train, subject_test)
##2: Extract only the measurements on the mean and standard deviation for each measurement.
features <- read.table("UCI HAR Dataset/features.txt", quote="\"", comment.char="")
## get indices of only columns with mean() or std() in their names
mean_and_std_features <- grep("-(mean|std)\\(\\)", features[,2])
## subset the columns and Set the column names
x_mean_std <- x_merged[, mean_and_std_features]
names(x_mean_std) <- features[mean_and_std_features,2]
##3: Use descriptive activity names to name the activities in the dataset.
activities<-read.table("UCI HAR Dataset/activity_labels.txt", quote="\"", comment.char="")
## update y values with activity names
y_merged[,1] <- activities[y_merged[,1],2]
names(y_merged)<-"activity"
##4: Appropriately label the dataset with descriptive variable names.
## correct subject column name
names(subject_merged) <- "subject"
# bind all three datasets in a single dataset
complete_data <- cbind(x_mean_std, y_merged, subject_merged)
##5: Create a second, independent tidy dataset with the average of each variable for each activity and each subject.
library(dplyr)
## Group dataset by activity and subject
grouped_data<-group_by(complete_data,activity,subject)
## Get the average for each activity and subject and write the tidy dataset into txt file
averaged_data<-summarize_each(grouped_data,funs(mean))
write.table(averaged_data, file= "tidy_data.txt", row.names = FALSE)
| /GettingAndCleaningData-Samsung/run_analysis.R | no_license | Ahmed-Zaki/datasciencecoursera | R | false | false | 2,590 | r | ##0: Download and unzip the dataset
##Dataset file URL
fileURL<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
## Download (if doesn't exist) and unzip the file (if not unzipped)
if (!file.exists("SamsungDataset.zip")){download.file(fileURL,"SamsungDataset.zip",mode="wb")}
if (!file.exists("UCI HAR Dataset")){unzip("SamsungDataset.zip")}
##Read data from files
X_test <- read.table("UCI HAR Dataset/test/X_test.txt", quote="\"", comment.char="")
X_train <- read.table("UCI HAR Dataset/train/X_train.txt", quote="\"", comment.char="")
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", quote="\"", comment.char="")
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", quote="\"", comment.char="")
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", quote="\"", comment.char="")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", quote="\"", comment.char="")
##1: Merge the train and test datasets
## "x" dataset
x_merged <- rbind(X_train, X_test)
## "y" dataset
y_merged <- rbind(y_train, y_test)
## "Subject" dataset
subject_merged <- rbind(subject_train, subject_test)
##2: Extract only the measurements on the mean and standard deviation for each measurement.
features <- read.table("UCI HAR Dataset/features.txt", quote="\"", comment.char="")
## get indices of only columns with mean() or std() in their names
mean_and_std_features <- grep("-(mean|std)\\(\\)", features[,2])
## subset the columns and Set the column names
x_mean_std <- x_merged[, mean_and_std_features]
names(x_mean_std) <- features[mean_and_std_features,2]
##3: Use descriptive activity names to name the activities in the dataset.
activities<-read.table("UCI HAR Dataset/activity_labels.txt", quote="\"", comment.char="")
## update y values with activity names
y_merged[,1] <- activities[y_merged[,1],2]
names(y_merged)<-"activity"
##4: Appropriately label the dataset with descriptive variable names.
## correct subject column name
names(subject_merged) <- "subject"
# bind all three datasets in a single dataset
complete_data <- cbind(x_mean_std, y_merged, subject_merged)
##5: Create a second, independent tidy dataset with the average of each variable for each activity and each subject.
library(dplyr)
## Group dataset by activity and subject
grouped_data<-group_by(complete_data,activity,subject)
## Get the average for each activity and subject and write the tidy dataset into txt file
averaged_data<-summarize_each(grouped_data,funs(mean))
write.table(averaged_data, file= "tidy_data.txt", row.names = FALSE)
|
library(ricu)
r_dir <- file.path(rprojroot::find_root(".git/index"), "r")
invisible(lapply(list.files(r_dir, full.names = TRUE), source))
type <- "icu24"
if (type == "icu24") {
tbl <- load_concepts("sofa2", "covid19", explicit_wins = hours(c(24L, 48L)))
sofa_baseline <- list(
tbl[get(index_var(tbl)) == hours(24L)],
tbl[get(index_var(tbl)) == hours(48L)]
)
save(sofa_baseline, file = "~/sofa_icu24.RData")
tbl <- load_concepts("four_c", "covid19", explicit_wins = hours(c(24L, 48L)))
four_c_baseline <- list(
tbl[get(index_var(tbl)) == hours(24L)],
tbl[get(index_var(tbl)) == hours(48L)]
)
save(four_c_baseline, file = "~/four_c_icu24.RData")
saps <- load_concepts("saps_3", "covid19", explicit_wins = hours(c(24L, 48L)),
keep_components = TRUE)
saps_baseline <- list(
saps[get(index_var(saps)) == hours(24L)],
saps[get(index_var(saps)) == hours(48L)]
)
save(saps_baseline, file = "~/saps_3_icu24.RData")
} else {
upr <- load_concepts("vent_ind2", "covid19")
upr <- upr[, head(.SD, n = 1L), by = c(id_vars(upr))]
upr[, max_time := get(index_var(upr))]
upr <- upr[, c(id_vars(upr), "max_time"), with=F]
lwr <- data.table::copy(upr)
lwr[, min_time := max_time - hours(48L)]
lwr[, max_time := NULL]
lwr <- list(lwr)
upr <- list(upr)
tbl <- load_concepts("sofa2", "covid19",
explicit_wins = merge(lwr[[1L]], upr[[1L]]))
sofa_baseline <- list(tbl)
save(sofa_baseline, file = "~/sofa_mechvent.RData")
tbl <- load_concepts(c("four_c"), "covid19",
explicit_wins = merge(lwr[[1L]], upr[[1L]]))
four_c_baseline <- list(tbl)
save(four_c_baseline, file = "~/four_c_mechvent.RData")
}
| /scripts/get-baseline.R | permissive | eth-mds/recoils | R | false | false | 1,776 | r | library(ricu)
r_dir <- file.path(rprojroot::find_root(".git/index"), "r")
invisible(lapply(list.files(r_dir, full.names = TRUE), source))
type <- "icu24"
if (type == "icu24") {
tbl <- load_concepts("sofa2", "covid19", explicit_wins = hours(c(24L, 48L)))
sofa_baseline <- list(
tbl[get(index_var(tbl)) == hours(24L)],
tbl[get(index_var(tbl)) == hours(48L)]
)
save(sofa_baseline, file = "~/sofa_icu24.RData")
tbl <- load_concepts("four_c", "covid19", explicit_wins = hours(c(24L, 48L)))
four_c_baseline <- list(
tbl[get(index_var(tbl)) == hours(24L)],
tbl[get(index_var(tbl)) == hours(48L)]
)
save(four_c_baseline, file = "~/four_c_icu24.RData")
saps <- load_concepts("saps_3", "covid19", explicit_wins = hours(c(24L, 48L)),
keep_components = TRUE)
saps_baseline <- list(
saps[get(index_var(saps)) == hours(24L)],
saps[get(index_var(saps)) == hours(48L)]
)
save(saps_baseline, file = "~/saps_3_icu24.RData")
} else {
upr <- load_concepts("vent_ind2", "covid19")
upr <- upr[, head(.SD, n = 1L), by = c(id_vars(upr))]
upr[, max_time := get(index_var(upr))]
upr <- upr[, c(id_vars(upr), "max_time"), with=F]
lwr <- data.table::copy(upr)
lwr[, min_time := max_time - hours(48L)]
lwr[, max_time := NULL]
lwr <- list(lwr)
upr <- list(upr)
tbl <- load_concepts("sofa2", "covid19",
explicit_wins = merge(lwr[[1L]], upr[[1L]]))
sofa_baseline <- list(tbl)
save(sofa_baseline, file = "~/sofa_mechvent.RData")
tbl <- load_concepts(c("four_c"), "covid19",
explicit_wins = merge(lwr[[1L]], upr[[1L]]))
four_c_baseline <- list(tbl)
save(four_c_baseline, file = "~/four_c_mechvent.RData")
}
|
## Create Error Component for CDU 1 and FDP 1
gererror1 <- ger[ger$electionyr==2013,]
cdumean1 <- mean(gererror1$poll_p1, na.rm=TRUE)
fdpmean1 <- mean(gererror1$poll_p4, na.rm=TRUE)
cdumean1 <- cdumean1/100
fdpmean1 <- fdpmean1/100
cdusd1 <- sqrt((cdumean1*(1-cdumean1))/600)
fdpsd1 <- sqrt((fdpmean1*(1-fdpmean1))/600)
## Create Error Component for CDU 2 and FDP 2
gererror2 <- ger[ger$electionyr==2013,]
cdumean2 <- mean(gererror2$poll_p1, na.rm=TRUE)
fdpmean2 <- mean(gererror2$poll_p4, na.rm=TRUE)
cdumean2 <- cdumean2/100
fdpmean2 <- fdpmean2/100
cdusd2 <- sqrt((cdumean2*(1-cdumean2))/600)
fdpsd2 <- sqrt((fdpmean2*(1-fdpmean2))/600)
## Create Error Component for LP
ukerror1 <- ukl[ukl$electionyr==2010,]
lpmean1 <- mean(ukerror1$poll_, na.rm=TRUE)
lpmean1 <- lpmean1/100
lpsd1 <- sqrt((lpmean1*(1-lpmean1))/600) | /R Files/ErrorTerm Generation.R | permissive | tzuliu/Do-Scandals-Matter-An-Interrupted-Time-Series-Design-on-Three-Cases | R | false | false | 822 | r | ## Create Error Component for CDU 1 and FDP 1
gererror1 <- ger[ger$electionyr==2013,]
cdumean1 <- mean(gererror1$poll_p1, na.rm=TRUE)
fdpmean1 <- mean(gererror1$poll_p4, na.rm=TRUE)
cdumean1 <- cdumean1/100
fdpmean1 <- fdpmean1/100
cdusd1 <- sqrt((cdumean1*(1-cdumean1))/600)
fdpsd1 <- sqrt((fdpmean1*(1-fdpmean1))/600)
## Create Error Component for CDU 2 and FDP 2
gererror2 <- ger[ger$electionyr==2013,]
cdumean2 <- mean(gererror2$poll_p1, na.rm=TRUE)
fdpmean2 <- mean(gererror2$poll_p4, na.rm=TRUE)
cdumean2 <- cdumean2/100
fdpmean2 <- fdpmean2/100
cdusd2 <- sqrt((cdumean2*(1-cdumean2))/600)
fdpsd2 <- sqrt((fdpmean2*(1-fdpmean2))/600)
## Create Error Component for LP
ukerror1 <- ukl[ukl$electionyr==2010,]
lpmean1 <- mean(ukerror1$poll_, na.rm=TRUE)
lpmean1 <- lpmean1/100
lpsd1 <- sqrt((lpmean1*(1-lpmean1))/600) |
#' @title Color Selector Input
#'
#' @description Choose between a restrictive set of colors.
#'
#' @param inputId The \code{input} slot that will be used to access the value.
#' @param label Display label for the control, or \code{NULL} for no label.
#' @param choices A list of colors, can be a list of named list, see example.
#' @param selected Default selected color, if \code{NULL} the first color for \code{mode = 'radio'}
#' and none for \code{mode = 'checkbox'}
#' @param mode \code{'radio'} for only one choice, \code{'checkbox'} for
#' selecting multiple values.
#' @param display_label Display list's names after palette of color.
#' @param ncol If choices is not a list but a vector, go to line after n elements.
#'
#' @return a colorSelectorInput control
#' @importFrom htmltools tags tagList
#' @export
#'
#' @examples
#' \dontrun{
#' if (interactive()) {
#'
#' # Full example
#' colorSelectorExample()
#'
#' # Simple example
#' ui <- fluidPage(
#' colorSelectorInput(
#' inputId = "mycolor1", label = "Pick a color :",
#' choices = c("steelblue", "cornflowerblue",
#' "firebrick", "palegoldenrod",
#' "forestgreen")
#' ),
#' verbatimTextOutput("result1")
#' )
#'
#' server <- function(input, output, session) {
#' output$result1 <- renderPrint({
#' input$mycolor1
#' })
#' }
#'
#' shinyApp(ui = ui, server = server)
#'
#' }
#' }
colorSelectorInput <- function(inputId, label, choices, selected = NULL, mode = c("radio", "checkbox"), display_label = FALSE, ncol = 10) {
selected <- shiny::restoreInput(id = inputId, default = selected)
mode <- match.arg(arg = mode)
if (!is.list(choices))
choices <- split(x = choices, f = (seq_along(choices) - 1) %/% ncol)
choices <- choicesWithNames(choices)
if (!is.null(selected) && length(selected) > 1)
stop("selected must be length 1")
if (is.null(selected) & mode == "radio")
selected <- firstChoice(choices)
tagCS <- htmltools::tags$div(
class="shiny-input-container-inline form-group", class=paste0(mode, "GroupButtons"),
`data-toggle`="buttons", id = inputId,
style="margin-top: 3px; margin-bottom: 10px; ",
if (!is.null(label)) htmltools::tagList(htmltools::tags$label(class="control-label", label), htmltools::tags$br()),
colorOptions(
inputId = inputId, choices = choices,
selected = selected, mode = mode,
display_label = display_label
)
)
attachShinyWidgetsDep(tagCS)
}
colorOptions <- function(inputId, choices, selected = NULL, mode = "radio", display_label = FALSE) {
html <- lapply(seq_along(choices), FUN = function(i) {
label <- names(choices)[i]
choice <- choices[[i]]
if (is.list(choice)) {
htmltools::tagList(
htmltools::tags$div(
class="btn-group",
colorOptions(inputId, choice, selected, mode, display_label)
), if (display_label) htmltools::tags$em(htmltools::HTML(names(choices)[i])),
htmltools::tags$br()
)
}
else {
htmltools::tagList(
htmltools::tags$span(
class = "btn btn-color-sw", type="button",
style = paste("background-color:", choice),
htmltools::tags$input(
type=mode, name=inputId, value=choice, id=choice,
checked = if (choice %in% selected) "checked"
)
)
)
}
})
return(htmltools::tagList(html))
}
#' @title Color Selector Example
#'
#' @export
#' @importFrom shiny shinyAppFile
#'
#' @describeIn colorSelectorInput Examples of use for colorSelectorInput
colorSelectorExample <- function() {
if (!requireNamespace(package = "RColorBrewer"))
message("Package 'RColorBrewer' is required to run this function")
if (!requireNamespace(package = "viridisLite"))
message("Package 'viridisLite' is required to run this function")
if (!requireNamespace(package = "grDevices"))
message("Package 'grDevices' is required to run this function")
shiny::shinyAppFile(
appFile = system.file("examples/colorSelector/example.R", package = "shinyWidgets"),
options = list("display.mode" = "showcase")
)
}
#' @title Color Selector In A Dropdown
#'
#' @param circle Logical, use a circle or a square button
#' @param size Size of the button : default, lg, sm, xs.
#' @param up Logical. Display the dropdown menu above.
#' @param width Width of the dropdown menu content.
#'
#' @export
#' @describeIn colorSelectorInput Display a colorSelector in a dropdown button
#' @importFrom htmltools tags validateCssUnit
colorSelectorDrop <- function(inputId, label, choices, selected = NULL,
display_label = FALSE, ncol = 10, circle = TRUE, size = "sm",
up = FALSE, width = NULL) {
size <- match.arg(arg = size, choices = c("default", "lg", "sm", "xs"))
btnId <- paste("btn", inputId, sep = "-")
funButton <- if (circle) circleButton else squareButton
btn <- funButton(
inputId = btnId, icon = NULL, status = "default", size = size,
class = "dropdown-toggle", `data-toggle` = "dropdown"
)
dropTag <- htmltools::tags$ul(
class = "dropdown-menu",
style = if (!is.null(width))
paste0("width: ", htmltools::validateCssUnit(width), ";"),
colorSelectorInput(
inputId = inputId,
label = label,
choices = choices,
selected = selected,
mode = "radio",
display_label = display_label,
ncol = ncol
)
)
js <- paste0(
'$(document).on("change","input[name=\'', inputId, '\']",function(){
var v = $("input[name=\'', inputId, '\']:checked").val();
$("#', btnId, '").css("background-color", v);
});'
)
htmltools::tags$div(
class = ifelse(up, "dropup", "dropdown"),
btn, dropTag, htmltools::tags$script(HTML(js))
)
}
| /R/input-colorselector.R | permissive | DataXujing/shinyWidgets | R | false | false | 5,799 | r | #' @title Color Selector Input
#'
#' @description Choose between a restrictive set of colors.
#'
#' @param inputId The \code{input} slot that will be used to access the value.
#' @param label Display label for the control, or \code{NULL} for no label.
#' @param choices A list of colors, can be a list of named list, see example.
#' @param selected Default selected color, if \code{NULL} the first color for \code{mode = 'radio'}
#' and none for \code{mode = 'checkbox'}
#' @param mode \code{'radio'} for only one choice, \code{'checkbox'} for
#' selecting multiple values.
#' @param display_label Display list's names after palette of color.
#' @param ncol If choices is not a list but a vector, go to line after n elements.
#'
#' @return a colorSelectorInput control
#' @importFrom htmltools tags tagList
#' @export
#'
#' @examples
#' \dontrun{
#' if (interactive()) {
#'
#' # Full example
#' colorSelectorExample()
#'
#' # Simple example
#' ui <- fluidPage(
#' colorSelectorInput(
#' inputId = "mycolor1", label = "Pick a color :",
#' choices = c("steelblue", "cornflowerblue",
#' "firebrick", "palegoldenrod",
#' "forestgreen")
#' ),
#' verbatimTextOutput("result1")
#' )
#'
#' server <- function(input, output, session) {
#' output$result1 <- renderPrint({
#' input$mycolor1
#' })
#' }
#'
#' shinyApp(ui = ui, server = server)
#'
#' }
#' }
colorSelectorInput <- function(inputId, label, choices, selected = NULL, mode = c("radio", "checkbox"), display_label = FALSE, ncol = 10) {
selected <- shiny::restoreInput(id = inputId, default = selected)
mode <- match.arg(arg = mode)
if (!is.list(choices))
choices <- split(x = choices, f = (seq_along(choices) - 1) %/% ncol)
choices <- choicesWithNames(choices)
if (!is.null(selected) && length(selected) > 1)
stop("selected must be length 1")
if (is.null(selected) & mode == "radio")
selected <- firstChoice(choices)
tagCS <- htmltools::tags$div(
class="shiny-input-container-inline form-group", class=paste0(mode, "GroupButtons"),
`data-toggle`="buttons", id = inputId,
style="margin-top: 3px; margin-bottom: 10px; ",
if (!is.null(label)) htmltools::tagList(htmltools::tags$label(class="control-label", label), htmltools::tags$br()),
colorOptions(
inputId = inputId, choices = choices,
selected = selected, mode = mode,
display_label = display_label
)
)
attachShinyWidgetsDep(tagCS)
}
colorOptions <- function(inputId, choices, selected = NULL, mode = "radio", display_label = FALSE) {
html <- lapply(seq_along(choices), FUN = function(i) {
label <- names(choices)[i]
choice <- choices[[i]]
if (is.list(choice)) {
htmltools::tagList(
htmltools::tags$div(
class="btn-group",
colorOptions(inputId, choice, selected, mode, display_label)
), if (display_label) htmltools::tags$em(htmltools::HTML(names(choices)[i])),
htmltools::tags$br()
)
}
else {
htmltools::tagList(
htmltools::tags$span(
class = "btn btn-color-sw", type="button",
style = paste("background-color:", choice),
htmltools::tags$input(
type=mode, name=inputId, value=choice, id=choice,
checked = if (choice %in% selected) "checked"
)
)
)
}
})
return(htmltools::tagList(html))
}
#' @title Color Selector Example
#'
#' @export
#' @importFrom shiny shinyAppFile
#'
#' @describeIn colorSelectorInput Examples of use for colorSelectorInput
colorSelectorExample <- function() {
if (!requireNamespace(package = "RColorBrewer"))
message("Package 'RColorBrewer' is required to run this function")
if (!requireNamespace(package = "viridisLite"))
message("Package 'viridisLite' is required to run this function")
if (!requireNamespace(package = "grDevices"))
message("Package 'grDevices' is required to run this function")
shiny::shinyAppFile(
appFile = system.file("examples/colorSelector/example.R", package = "shinyWidgets"),
options = list("display.mode" = "showcase")
)
}
#' @title Color Selector In A Dropdown
#'
#' @param circle Logical, use a circle or a square button
#' @param size Size of the button : default, lg, sm, xs.
#' @param up Logical. Display the dropdown menu above.
#' @param width Width of the dropdown menu content.
#'
#' @export
#' @describeIn colorSelectorInput Display a colorSelector in a dropdown button
#' @importFrom htmltools tags validateCssUnit
colorSelectorDrop <- function(inputId, label, choices, selected = NULL,
display_label = FALSE, ncol = 10, circle = TRUE, size = "sm",
up = FALSE, width = NULL) {
size <- match.arg(arg = size, choices = c("default", "lg", "sm", "xs"))
btnId <- paste("btn", inputId, sep = "-")
funButton <- if (circle) circleButton else squareButton
btn <- funButton(
inputId = btnId, icon = NULL, status = "default", size = size,
class = "dropdown-toggle", `data-toggle` = "dropdown"
)
dropTag <- htmltools::tags$ul(
class = "dropdown-menu",
style = if (!is.null(width))
paste0("width: ", htmltools::validateCssUnit(width), ";"),
colorSelectorInput(
inputId = inputId,
label = label,
choices = choices,
selected = selected,
mode = "radio",
display_label = display_label,
ncol = ncol
)
)
js <- paste0(
'$(document).on("change","input[name=\'', inputId, '\']",function(){
var v = $("input[name=\'', inputId, '\']:checked").val();
$("#', btnId, '").css("background-color", v);
});'
)
htmltools::tags$div(
class = ifelse(up, "dropup", "dropdown"),
btn, dropTag, htmltools::tags$script(HTML(js))
)
}
|
## ----cylcic-sockeye-setup, include=FALSE----------------------------------------
knitr::opts_knit$set(unnamed.chunk.label = "cyclic-sockeye-")
knitr::opts_chunk$set(echo = TRUE, comment=NA, cache=TRUE,
tidy.opts=list(width.cutoff=60), tidy=TRUE,
fig.align='center', out.width='80%', message=FALSE,
warning=FALSE)
## ----message=FALSE--------------------------------------------------------------
library(atsalibrary)
library(ggplot2)
library(MARSS)
## ----echo=FALSE, out.width="50%"------------------------------------------------
knitr::include_graphics("images/BB_sockeye_rivers_inset.png")
# 
## ----echo=FALSE-----------------------------------------------------------------
ggplot(sockeye, aes(x=brood_year, y=log(spawners))) + geom_line() + facet_wrap(~region, scales="free_y") + ggtitle("log spawners")
## ----echo=FALSE-----------------------------------------------------------------
a <- tapply(sockeye$spawners, sockeye$region, function(x){acf(x, na.action=na.pass, plot=FALSE, lag=10)$acf[,1,1]})
aa <- data.frame(acf=Reduce(c, a),
region=rep(names(a), each=11),
lag=rep(0:10, length(names(a))))
ggplot(aa, aes(x=lag, y=acf)) +
geom_bar(stat = "identity", position = "identity") + geom_vline(xintercept=5)+
facet_wrap(~region)+ggtitle("ACF")
## -------------------------------------------------------------------------------
river <- "KVICHAK"
df <- subset(sockeye, region==river)
yt <- log(df$spawners)
TT <- length(yt)
p <- 5
## ----cylcic-sockeye-Z1----------------------------------------------------------
Z <- array(1, dim=c(1,3,TT))
Z[1,2,] <- sin(2*pi*(1:TT)/p)
Z[1,3,] <- cos(2*pi*(1:TT)/p)
## ----cylcic-sockeye-mod-list1---------------------------------------------------
mod.list <- list(
U = "zero",
Q = "diagonal and unequal",
Z = Z,
A = "zero")
## ----cyclic-sockeye-fit-1, cache=TRUE-------------------------------------------
m <- dim(Z)[2]
fit <- MARSS(yt, model=mod.list, inits=list(x0=matrix(0,m,1)))
## ----echo=FALSE-----------------------------------------------------------------
plot(fit, plot.type="xtT")
## ----echo=FALSE-----------------------------------------------------------------
beta1s = fit$states[2,]
beta2s = fit$states[3,]
value = beta1s*sin(2*pi*(1:TT/p))+beta2s*cos(2*pi*(1:TT)/p)
plot(1:TT, value, type="l",xlab="", ylab="beta1*sin() + beta2*cos()")
abline(v=seq(0,TT,p), col="grey")
title(river)
## -------------------------------------------------------------------------------
fitriver <- function(river, p=5){
df <- subset(sockeye, region==river)
yt <- log(df$spawners)
TT <- length(yt)
Z <- array(1, dim=c(1,3,TT))
Z[1,2,] <- sin(2*pi*(1:TT)/p)
Z[1,3,] <- cos(2*pi*(1:TT)/p)
mod.list <- list(
U = "zero",
Q = "diagonal and unequal",
Z = Z,
A = "zero")
fit <- MARSS(yt, model=mod.list, inits=list(x0=matrix(0,3,1)), silent=TRUE)
return(fit)
}
## ----cyclic-sockeye-list-of-fits, cache=TRUE------------------------------------
fits <- list()
for(river in names(a)){
fits[[river]] <- fitriver(river)
}
## -------------------------------------------------------------------------------
dfz <- data.frame()
for(river in names(a)){
fit <- fits[[river]]
tmp <- data.frame(amplitude = sqrt(fit$states[2,]^2+fit$states[3,]^2),
trend = fit$states[1,],
river=river,
brood_year=subset(sockeye, region==river)$brood_year)
dfz <- rbind(dfz, tmp)
}
## -------------------------------------------------------------------------------
ggplot(dfz, aes(x=brood_year, y=amplitude)) +
geom_line() +
facet_wrap(~river, scales="free_y") +
ggtitle("Cycle Amplitude")
## -------------------------------------------------------------------------------
ggplot(dfz, aes(x=brood_year, y=trend)) +
geom_line() +
facet_wrap(~river, scales="free_y") +
ggtitle("Stochastic Level")
## -------------------------------------------------------------------------------
n <- 2
## -------------------------------------------------------------------------------
Z <- array(1, dim=c(n,n*3,TT))
Z[1:n,1:n,] <- diag(1,n)
for(t in 1:TT){
Z[,(n+1):(2*n),t] <- diag(sin(2*pi*t/p),n)
Z[,(2*n+1):(3*n),t] <- diag(cos(2*pi*t/p),n)
}
Z[,,1]
## -------------------------------------------------------------------------------
Q <- matrix(list(0), 3*n, 3*n)
Q[1:n,1:n] <- "c"
diag(Q) <- c(paste0("q",letters[1:n]), paste0("q",1:(2*n)))
Q
## -------------------------------------------------------------------------------
fitriver.m <- function(river, p=5){
require(tidyr)
require(dplyr)
require(MARSS)
df <- subset(sockeye, region %in% river)
df <- df %>% pivot_wider(id_cols=brood_year,names_from="region", values_from=spawners) %>%
ungroup() %>% select(-brood_year)
yt <- t(log(df))
TT <- ncol(yt)
n <- nrow(yt)
Z <- array(1, dim=c(n,n*3,TT))
Z[1:n,1:n,] <- diag(1,n)
for(t in 1:TT){
Z[,(n+1):(2*n),t] <- diag(sin(2*pi*t/p),n)
Z[,(2*n+1):(3*n),t] <- diag(cos(2*pi*t/p),n)
}
Q <- matrix(list(0), 3*n, 3*n)
Q[1:n,1:n] <- paste0("c",1:(n^2))
diag(Q) <- c(paste0("q",letters[1:n]), paste0("q",1:(2*n)))
Q[lower.tri(Q)] <- t(Q)[lower.tri(Q)]
mod.list <- list(
U = "zero",
Q = Q,
Z = Z,
A = "zero")
fit <- MARSS(yt, model=mod.list, inits=list(x0=matrix(0,3*n,1)), silent=TRUE)
return(fit)
}
## ----cyclic-sockeye-more-rivers, cache=TRUE-------------------------------------
river <- unique(sockeye$region)
n <- length(river)
fit <- fitriver.m(river)
## ----cyclic-sockeye-corrplot----------------------------------------------------
require(corrplot)
Qmat <- coef(fit, type="matrix")$Q[1:n,1:n]
rownames(Qmat) <- colnames(Qmat) <- river
M <- cov2cor(Qmat)
corrplot(M, order = "hclust", addrect = 4)
## ----echo=FALSE, out.width="50%"------------------------------------------------
knitr::include_graphics("images/BB_sockeye_rivers_inset.png")
# 
| /docs/Rcode/cyclic-sockeye.R | permissive | realsmak88/atsa-labs | R | false | false | 6,009 | r | ## ----cylcic-sockeye-setup, include=FALSE----------------------------------------
knitr::opts_knit$set(unnamed.chunk.label = "cyclic-sockeye-")
knitr::opts_chunk$set(echo = TRUE, comment=NA, cache=TRUE,
tidy.opts=list(width.cutoff=60), tidy=TRUE,
fig.align='center', out.width='80%', message=FALSE,
warning=FALSE)
## ----message=FALSE--------------------------------------------------------------
library(atsalibrary)
library(ggplot2)
library(MARSS)
## ----echo=FALSE, out.width="50%"------------------------------------------------
knitr::include_graphics("images/BB_sockeye_rivers_inset.png")
# 
## ----echo=FALSE-----------------------------------------------------------------
ggplot(sockeye, aes(x=brood_year, y=log(spawners))) + geom_line() + facet_wrap(~region, scales="free_y") + ggtitle("log spawners")
## ----echo=FALSE-----------------------------------------------------------------
a <- tapply(sockeye$spawners, sockeye$region, function(x){acf(x, na.action=na.pass, plot=FALSE, lag=10)$acf[,1,1]})
aa <- data.frame(acf=Reduce(c, a),
region=rep(names(a), each=11),
lag=rep(0:10, length(names(a))))
ggplot(aa, aes(x=lag, y=acf)) +
geom_bar(stat = "identity", position = "identity") + geom_vline(xintercept=5)+
facet_wrap(~region)+ggtitle("ACF")
## -------------------------------------------------------------------------------
river <- "KVICHAK"
df <- subset(sockeye, region==river)
yt <- log(df$spawners)
TT <- length(yt)
p <- 5
## ----cylcic-sockeye-Z1----------------------------------------------------------
Z <- array(1, dim=c(1,3,TT))
Z[1,2,] <- sin(2*pi*(1:TT)/p)
Z[1,3,] <- cos(2*pi*(1:TT)/p)
## ----cylcic-sockeye-mod-list1---------------------------------------------------
mod.list <- list(
U = "zero",
Q = "diagonal and unequal",
Z = Z,
A = "zero")
## ----cyclic-sockeye-fit-1, cache=TRUE-------------------------------------------
m <- dim(Z)[2]
fit <- MARSS(yt, model=mod.list, inits=list(x0=matrix(0,m,1)))
## ----echo=FALSE-----------------------------------------------------------------
plot(fit, plot.type="xtT")
## ----echo=FALSE-----------------------------------------------------------------
beta1s = fit$states[2,]
beta2s = fit$states[3,]
value = beta1s*sin(2*pi*(1:TT/p))+beta2s*cos(2*pi*(1:TT)/p)
plot(1:TT, value, type="l",xlab="", ylab="beta1*sin() + beta2*cos()")
abline(v=seq(0,TT,p), col="grey")
title(river)
## -------------------------------------------------------------------------------
fitriver <- function(river, p=5){
df <- subset(sockeye, region==river)
yt <- log(df$spawners)
TT <- length(yt)
Z <- array(1, dim=c(1,3,TT))
Z[1,2,] <- sin(2*pi*(1:TT)/p)
Z[1,3,] <- cos(2*pi*(1:TT)/p)
mod.list <- list(
U = "zero",
Q = "diagonal and unequal",
Z = Z,
A = "zero")
fit <- MARSS(yt, model=mod.list, inits=list(x0=matrix(0,3,1)), silent=TRUE)
return(fit)
}
## ----cyclic-sockeye-list-of-fits, cache=TRUE------------------------------------
fits <- list()
for(river in names(a)){
fits[[river]] <- fitriver(river)
}
## -------------------------------------------------------------------------------
dfz <- data.frame()
for(river in names(a)){
fit <- fits[[river]]
tmp <- data.frame(amplitude = sqrt(fit$states[2,]^2+fit$states[3,]^2),
trend = fit$states[1,],
river=river,
brood_year=subset(sockeye, region==river)$brood_year)
dfz <- rbind(dfz, tmp)
}
## -------------------------------------------------------------------------------
ggplot(dfz, aes(x=brood_year, y=amplitude)) +
geom_line() +
facet_wrap(~river, scales="free_y") +
ggtitle("Cycle Amplitude")
## -------------------------------------------------------------------------------
ggplot(dfz, aes(x=brood_year, y=trend)) +
geom_line() +
facet_wrap(~river, scales="free_y") +
ggtitle("Stochastic Level")
## -------------------------------------------------------------------------------
n <- 2
## -------------------------------------------------------------------------------
Z <- array(1, dim=c(n,n*3,TT))
Z[1:n,1:n,] <- diag(1,n)
for(t in 1:TT){
Z[,(n+1):(2*n),t] <- diag(sin(2*pi*t/p),n)
Z[,(2*n+1):(3*n),t] <- diag(cos(2*pi*t/p),n)
}
Z[,,1]
## -------------------------------------------------------------------------------
Q <- matrix(list(0), 3*n, 3*n)
Q[1:n,1:n] <- "c"
diag(Q) <- c(paste0("q",letters[1:n]), paste0("q",1:(2*n)))
Q
## -------------------------------------------------------------------------------
fitriver.m <- function(river, p=5){
require(tidyr)
require(dplyr)
require(MARSS)
df <- subset(sockeye, region %in% river)
df <- df %>% pivot_wider(id_cols=brood_year,names_from="region", values_from=spawners) %>%
ungroup() %>% select(-brood_year)
yt <- t(log(df))
TT <- ncol(yt)
n <- nrow(yt)
Z <- array(1, dim=c(n,n*3,TT))
Z[1:n,1:n,] <- diag(1,n)
for(t in 1:TT){
Z[,(n+1):(2*n),t] <- diag(sin(2*pi*t/p),n)
Z[,(2*n+1):(3*n),t] <- diag(cos(2*pi*t/p),n)
}
Q <- matrix(list(0), 3*n, 3*n)
Q[1:n,1:n] <- paste0("c",1:(n^2))
diag(Q) <- c(paste0("q",letters[1:n]), paste0("q",1:(2*n)))
Q[lower.tri(Q)] <- t(Q)[lower.tri(Q)]
mod.list <- list(
U = "zero",
Q = Q,
Z = Z,
A = "zero")
fit <- MARSS(yt, model=mod.list, inits=list(x0=matrix(0,3*n,1)), silent=TRUE)
return(fit)
}
## ----cyclic-sockeye-more-rivers, cache=TRUE-------------------------------------
river <- unique(sockeye$region)
n <- length(river)
fit <- fitriver.m(river)
## ----cyclic-sockeye-corrplot----------------------------------------------------
require(corrplot)
Qmat <- coef(fit, type="matrix")$Q[1:n,1:n]
rownames(Qmat) <- colnames(Qmat) <- river
M <- cov2cor(Qmat)
corrplot(M, order = "hclust", addrect = 4)
## ----echo=FALSE, out.width="50%"------------------------------------------------
knitr::include_graphics("images/BB_sockeye_rivers_inset.png")
# 
|
#read a salary dataset
sala<-read.csv(file.choose())
names(sala)[1]="year" #change column names
names(sala)[2]="salary"
attach(sala)#attaching the dataset
#correlation Analysis
cor(salary,year)#0.9782416
regv<-lm(salary~year)#linear model for dataset
summary(regv)#R-squared: 0.957, Adjusted R-squared: 0.9554
sqrt(sum(regv$residuals^2)/nrow(sala))#RMSE : 5592.044
#correlation Analysis with transformation technique
cor(salary,sqrt(year))#0.96488
regi<-lm(salary~sqrt(year))#linear model with squared technique
summary(regi)#R-squared: 0.931, Adjusted R-squared: 0.9285
sqrt(sum(regi$residuals^2)/nrow(sala))#RMSE : 7080.096
#correlation Analysis with transformation technique
cor(salary,log(year))# 0.9240611
regve<-lm(salary~log(year))#linear model with log technique
summary(regve)# R-squared: 0.8539, Adjusted R-squared: 0.8487
sqrt(sum(regve$residuals^2)/nrow(sala))#RMSE :: 10302.89
#correlation Analysis with transformation technique
cor(log(salary),year)# 0.9653844
regvk<-lm(log(salary)~year)#linear model with log technique on Output variable
summary(regvk)# R-squared: 0.932, Adjusted R-squared: 0.9295
sqrt(sum(regvk$residuals^2)/nrow(sala))#RMSE :: 0.09457437
#correlation Analysis with transformation technique
cor(sqrt(salary),year)# 0.975
regvu<-lm(sqrt(salary)~year)#linear model with sqrt technique on Output variable
summary(regvu)# R-squared: 0.9498, Adjusted R-squared: 0.948
sqrt(sum(regvu$residuals^2)/nrow(sala))#RMSE :: 10.93
#prediction model for Better R values
predd<-as.data.frame(predict(regvu,interval = "predict"))
Absolute=abs(predd$fit)#Find absolute value because sqrt technique gives high R values
errore=Absolute-sala$year# find error with year
plot(Absolute,errore)#plot the graph
| /simple linear/assign-sal.R | no_license | Vivek-DataScientist/assignments | R | false | false | 1,743 | r | #read a salary dataset
sala<-read.csv(file.choose())
names(sala)[1]="year" #change column names
names(sala)[2]="salary"
attach(sala)#attaching the dataset
#correlation Analysis
cor(salary,year)#0.9782416
regv<-lm(salary~year)#linear model for dataset
summary(regv)#R-squared: 0.957, Adjusted R-squared: 0.9554
sqrt(sum(regv$residuals^2)/nrow(sala))#RMSE : 5592.044
#correlation Analysis with transformation technique
cor(salary,sqrt(year))#0.96488
regi<-lm(salary~sqrt(year))#linear model with squared technique
summary(regi)#R-squared: 0.931, Adjusted R-squared: 0.9285
sqrt(sum(regi$residuals^2)/nrow(sala))#RMSE : 7080.096
#correlation Analysis with transformation technique
cor(salary,log(year))# 0.9240611
regve<-lm(salary~log(year))#linear model with log technique
summary(regve)# R-squared: 0.8539, Adjusted R-squared: 0.8487
sqrt(sum(regve$residuals^2)/nrow(sala))#RMSE :: 10302.89
#correlation Analysis with transformation technique
cor(log(salary),year)# 0.9653844
regvk<-lm(log(salary)~year)#linear model with log technique on Output variable
summary(regvk)# R-squared: 0.932, Adjusted R-squared: 0.9295
sqrt(sum(regvk$residuals^2)/nrow(sala))#RMSE :: 0.09457437
#correlation Analysis with transformation technique
cor(sqrt(salary),year)# 0.975
regvu<-lm(sqrt(salary)~year)#linear model with sqrt technique on Output variable
summary(regvu)# R-squared: 0.9498, Adjusted R-squared: 0.948
sqrt(sum(regvu$residuals^2)/nrow(sala))#RMSE :: 10.93
#prediction model for Better R values
predd<-as.data.frame(predict(regvu,interval = "predict"))
Absolute=abs(predd$fit)#Find absolute value because sqrt technique gives high R values
errore=Absolute-sala$year# find error with year
plot(Absolute,errore)#plot the graph
|
## Project 1 - Create Plot4
## set working directory
setwd("C:/Users/Ramesh/Desktop/DataScience/ExploratoryDataAnalysis/Project1")
## Have the data file downloaded into the working directory
##Read the entire data set
full.data <- read.csv("household_power_consumption.txt",sep=";",na.strings="?")
##Extract the sub set
sub.data <- full.data[full.data$Date %in% c("1/2/2007","2/2/2007"),]
##Perform the data transformation
sub.data <- transform(
sub.data,
Date = as.Date(Date, format = "%d/%m/%Y"),
Time = strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"))
attach(sub.data)
## Basic settings
par(ps=12, bg="transparent", mfrow=c(2,2))
## Generate diagram
plot(Time, Global_active_power, type="n", ylab="Global Active Power", xlab="")
lines(Time, Global_active_power)
plot(Time, Voltage, type="n",xlab="datetime")
lines(Time, Voltage)
plot(Time, Sub_metering_1,ylab="Energy sub metering",xlab="", type="n")
lines(Time, Sub_metering_1)
lines(Time, Sub_metering_2,col="red")
lines(Time, Sub_metering_3,col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1),col=c("black","red","blue"), bty = "n")
plot(Time, Global_reactive_power, type="n", xlab="datetime")
lines(Time, Global_reactive_power)
detach(sub.data)
axis(1, at=c(1,(nrow(sub.data)/4),nrow(sub.data)/2), label=c("Thu","Fri","Sat"))
## Create Plot4 PNG file
dev.copy(png,file="plot4.png", width=480,height=480)
dev.off()
| /plot4.R | no_license | Paravasthuramesh/ExData_Plotting1 | R | false | false | 1,443 | r | ## Project 1 - Create Plot4
## set working directory
setwd("C:/Users/Ramesh/Desktop/DataScience/ExploratoryDataAnalysis/Project1")
## Have the data file downloaded into the working directory
##Read the entire data set
full.data <- read.csv("household_power_consumption.txt",sep=";",na.strings="?")
##Extract the sub set
sub.data <- full.data[full.data$Date %in% c("1/2/2007","2/2/2007"),]
##Perform the data transformation
sub.data <- transform(
sub.data,
Date = as.Date(Date, format = "%d/%m/%Y"),
Time = strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"))
attach(sub.data)
## Basic settings
par(ps=12, bg="transparent", mfrow=c(2,2))
## Generate diagram
plot(Time, Global_active_power, type="n", ylab="Global Active Power", xlab="")
lines(Time, Global_active_power)
plot(Time, Voltage, type="n",xlab="datetime")
lines(Time, Voltage)
plot(Time, Sub_metering_1,ylab="Energy sub metering",xlab="", type="n")
lines(Time, Sub_metering_1)
lines(Time, Sub_metering_2,col="red")
lines(Time, Sub_metering_3,col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1),col=c("black","red","blue"), bty = "n")
plot(Time, Global_reactive_power, type="n", xlab="datetime")
lines(Time, Global_reactive_power)
detach(sub.data)
axis(1, at=c(1,(nrow(sub.data)/4),nrow(sub.data)/2), label=c("Thu","Fri","Sat"))
## Create Plot4 PNG file
dev.copy(png,file="plot4.png", width=480,height=480)
dev.off()
|
#The following functions are used to calculate the inverse of a matrix
#and cache the result. If the inverse has already been calculated
#the cached inverse will be called, rather than calculating
#the inverse again
#makeCacheMatrix calculates the inverse of an input matrix (x).
#It then stores both the inverse and the matrix in as variables
#in the parent environment (for calling in cacheSolve)
makeCacheMatrix <- function(x = matrix()) {
#calculate the inverse
xinv<<- solve(x)
#cache matrix used to calculate the inverse as new variable
y <<- x
}
#cacheSolve will do 1 of 2 things, under certain conditions. First,
#if y=x, this means that the original matrix(x) has not been changed
#since running 'makeCacheMatrix', which means the inverse has already
#been calculated and has not changed. In this case, the cached inverse
#is called and returned, along with a message explaining the retrieval.
#However, if y is different from x, this means x has changed, and so
#a new inverse must be calculated. In this case, messages are displayed
#explaining that the matrix has changed and a new inverse is being
#calculated. Then, the inverse is calculated, stored, and y is reset
#in the parent environment for the next time 'cacheSolve' is called.
#This effectively makes 'makeCacheMatrix' useless, since 'cacheSolve'
#does the same steps, plus added functionality. It seemed better
#that way, but I wrote 'makeCacheMatrix' anyways, because the
#assignment required it.
cacheSolve <- function(x, ...) {
#check if matrix is the same as previous runs
if(all(y == x)) {
#retieve cached matrix and make action explicit via message
message("getting cached inverse")
return(xinv)
}
#if matrix is new, calculate new inverse
else {
#explain matrix is new and inverse is being calculated
message("matrix x has changed")
message("new inverse:")
#set new inverse in parent environment
xinv<<- solve(x)
#store matrix used to calculate inverse in parent environemt
y <<- x
return(xinv)
}
} | /cachematrix.R | no_license | rathbunkm/ProgrammingAssignment2 | R | false | false | 2,061 | r | #The following functions are used to calculate the inverse of a matrix
#and cache the result. If the inverse has already been calculated
#the cached inverse will be called, rather than calculating
#the inverse again
#makeCacheMatrix calculates the inverse of an input matrix (x).
#It then stores both the inverse and the matrix in as variables
#in the parent environment (for calling in cacheSolve)
makeCacheMatrix <- function(x = matrix()) {
#calculate the inverse
xinv<<- solve(x)
#cache matrix used to calculate the inverse as new variable
y <<- x
}
#cacheSolve will do 1 of 2 things, under certain conditions. First,
#if y=x, this means that the original matrix(x) has not been changed
#since running 'makeCacheMatrix', which means the inverse has already
#been calculated and has not changed. In this case, the cached inverse
#is called and returned, along with a message explaining the retrieval.
#However, if y is different from x, this means x has changed, and so
#a new inverse must be calculated. In this case, messages are displayed
#explaining that the matrix has changed and a new inverse is being
#calculated. Then, the inverse is calculated, stored, and y is reset
#in the parent environment for the next time 'cacheSolve' is called.
#This effectively makes 'makeCacheMatrix' useless, since 'cacheSolve'
#does the same steps, plus added functionality. It seemed better
#that way, but I wrote 'makeCacheMatrix' anyways, because the
#assignment required it.
cacheSolve <- function(x, ...) {
#check if matrix is the same as previous runs
if(all(y == x)) {
#retieve cached matrix and make action explicit via message
message("getting cached inverse")
return(xinv)
}
#if matrix is new, calculate new inverse
else {
#explain matrix is new and inverse is being calculated
message("matrix x has changed")
message("new inverse:")
#set new inverse in parent environment
xinv<<- solve(x)
#store matrix used to calculate inverse in parent environemt
y <<- x
return(xinv)
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.