content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testFunctions.R
\name{ShepFun1}
\alias{ShepFun1}
\alias{ShepFun2}
\alias{ShepFun3}
\alias{ShepFun4}
\alias{ShepFun5}
\alias{ShepFuns}
\title{Test functions for/from SHEPPACK}
\usage{
ShepFun1(x)
}
\arguments{
\item{x}{A numeric vector with arbitrary length.}
}
\value{
Function's value.
}
\description{
Test functions for/from SHEPPACK
}
\details{
These functions are described in the article cited in the \bold{references}
section.
\deqn{f_1(\mathbf{x}) = 1 + \frac{2}{d} \, \left| d/2 - \sum_i x_i
\right|}{ f1(x) = 1 + (2/d) | (d/2) - sum_i x_i |}
\deqn{f_2(\mathbf{x}) = 1 - \frac{2}{d}\, \sum_i \left|x_i - 0.5
\right|}{ f2(x) = 1 - (2/d) sum_i |x_i - 0.5 |}
\deqn{f_3(\mathbf{x}) = 1 - 2 \, \max_{i} \left|x_i - 0.5 \right|}{
f3(x) = 1 - 2 max_{i} |x_i - 0.5 |}
\deqn{f_4(\mathbf{x}) = \prod_i \left[ 1 - 2 \left| x_i - 0.5
\right| \right]}{ f4(x) = prod_i [ 1 - 2 | x_i - 0.5 | ]}
\deqn{f_5(\mathbf{x}) = 1 - c_5 \, \left[ \sum_i \left|x_i - 0.5
\right| + \prod_i \left| x_i - 0.5 \right| \right]}{ f5(x) = 1 -
c_5 [ sum_i |x_i - 0.5 | + prod_i | x_i - 0.5 | ]}
where \eqn{c_5 = d/2 + (0.5)^d}, and all sums or products are for
\eqn{i=1} to \eqn{d}. All these functions are defined on
\eqn{[0,\,1]^d} and take values in \eqn{[0,1]}. The four functions
\eqn{f_i} for \eqn{i > 1} have an unique maximum located at
\eqn{\mathbf{x}^\star}{xStar} with all coordinates \eqn{x_j^\star
= 0.5}{xStar[j] = 0.5} and \eqn{f_i(\mathbf{x}^\star) =1}{f_i(xStar) = 1}.
}
\note{
These functions are also exported as elements of the
\code{ShepFuns} list.
}
\examples{
## interpolate 'Shepfun3' for d = 4
d <- 4
GDd <- Grid(nlevels = rep(8, d))
fGrid <- apply_Grid(GDd, ShepFun3)
Xoutd <- matrix(runif(200 * d), ncol = d)
GI <- interp_Grid(X = GDd, Y = fGrid, Xout = Xoutd)
F <- apply(Xoutd, 1, ShepFun3)
max(abs(F - GI))
## 3D plot
require(lattice)
X <- as.data.frame(Grid(nlevels = c("x1" = 30, "x2" = 30)))
df <- data.frame(x1 = numeric(0), x2 = numeric(0),
f = numeric(0), i = numeric(0))
for (i in 1:5) {
f <- apply(X, 1, ShepFuns[[i]])
df <- rbind(df, data.frame(x1 = X$x1, x2 = X$x2, f = f, i = i))
}
pl <- wireframe(f ~ x1 * x2 | i, data = df,
outer = TRUE, shade = FALSE, zlab = "",
screen = list(z = 20, x = -30),
strip = strip.custom(strip.names = c(TRUE),
strip.levels = c(TRUE)),
main = "", horizontal = TRUE, col = "SpringGreen4")
pl
}
\references{
W.I. Thacker, J. Zhang, L.T. Watson, J.B. Birch,
M.A. Iyer and M.W. Berry (2010). Algorithm 905: SHEPPACK: Modified
Shepard Algorithm for Interpolation of Scattered Multivariate Data
\emph{ACM Trans. on Math. Software} (TOMS) Vol. 37, n. 3.
\href{http://dl.acm.org/citation.cfm?id=1824812}{link}
}
| /man/ShepFun1.Rd | no_license | changhw/smint | R | false | true | 2,854 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testFunctions.R
\name{ShepFun1}
\alias{ShepFun1}
\alias{ShepFun2}
\alias{ShepFun3}
\alias{ShepFun4}
\alias{ShepFun5}
\alias{ShepFuns}
\title{Test functions for/from SHEPPACK}
\usage{
ShepFun1(x)
}
\arguments{
\item{x}{A numeric vector with arbitrary length.}
}
\value{
Function's value.
}
\description{
Test functions for/from SHEPPACK
}
\details{
These functions are described in the article cited in the \bold{references}
section.
\deqn{f_1(\mathbf{x}) = 1 + \frac{2}{d} \, \left| d/2 - \sum_i x_i
\right|}{ f1(x) = 1 + (2/d) | (d/2) - sum_i x_i |}
\deqn{f_2(\mathbf{x}) = 1 - \frac{2}{d}\, \sum_i \left|x_i - 0.5
\right|}{ f2(x) = 1 - (2/d) sum_i |x_i - 0.5 |}
\deqn{f_3(\mathbf{x}) = 1 - 2 \, \max_{i} \left|x_i - 0.5 \right|}{
f3(x) = 1 - 2 max_{i} |x_i - 0.5 |}
\deqn{f_4(\mathbf{x}) = \prod_i \left[ 1 - 2 \left| x_i - 0.5
\right| \right]}{ f4(x) = prod_i [ 1 - 2 | x_i - 0.5 | ]}
\deqn{f_5(\mathbf{x}) = 1 - c_5 \, \left[ \sum_i \left|x_i - 0.5
\right| + \prod_i \left| x_i - 0.5 \right| \right]}{ f5(x) = 1 -
c_5 [ sum_i |x_i - 0.5 | + prod_i | x_i - 0.5 | ]}
where \eqn{c_5 = d/2 + (0.5)^d}, and all sums or products are for
\eqn{i=1} to \eqn{d}. All these functions are defined on
\eqn{[0,\,1]^d} and take values in \eqn{[0,1]}. The four functions
\eqn{f_i} for \eqn{i > 1} have an unique maximum located at
\eqn{\mathbf{x}^\star}{xStar} with all coordinates \eqn{x_j^\star
= 0.5}{xStar[j] = 0.5} and \eqn{f_i(\mathbf{x}^\star) =1}{f_i(xStar) = 1}.
}
\note{
These functions are also exported as elements of the
\code{ShepFuns} list.
}
\examples{
## interpolate 'Shepfun3' for d = 4
d <- 4
GDd <- Grid(nlevels = rep(8, d))
fGrid <- apply_Grid(GDd, ShepFun3)
Xoutd <- matrix(runif(200 * d), ncol = d)
GI <- interp_Grid(X = GDd, Y = fGrid, Xout = Xoutd)
F <- apply(Xoutd, 1, ShepFun3)
max(abs(F - GI))
## 3D plot
require(lattice)
X <- as.data.frame(Grid(nlevels = c("x1" = 30, "x2" = 30)))
df <- data.frame(x1 = numeric(0), x2 = numeric(0),
f = numeric(0), i = numeric(0))
for (i in 1:5) {
f <- apply(X, 1, ShepFuns[[i]])
df <- rbind(df, data.frame(x1 = X$x1, x2 = X$x2, f = f, i = i))
}
pl <- wireframe(f ~ x1 * x2 | i, data = df,
outer = TRUE, shade = FALSE, zlab = "",
screen = list(z = 20, x = -30),
strip = strip.custom(strip.names = c(TRUE),
strip.levels = c(TRUE)),
main = "", horizontal = TRUE, col = "SpringGreen4")
pl
}
\references{
W.I. Thacker, J. Zhang, L.T. Watson, J.B. Birch,
M.A. Iyer and M.W. Berry (2010). Algorithm 905: SHEPPACK: Modified
Shepard Algorithm for Interpolation of Scattered Multivariate Data
\emph{ACM Trans. on Math. Software} (TOMS) Vol. 37, n. 3.
\href{http://dl.acm.org/citation.cfm?id=1824812}{link}
}
|
# Day 3
# Chapter 8 & 9
# Christelle Larey
# 21 April 2021
# Chapter 8 - Simple linear regressions -----------------------------------
library(tidyverse)
library(ggplot2)
library(ggpubr)
library(purrr)
data("faithful")
head(faithful)
eruption.lm <- lm(eruptions ~ waiting, data = faithful)
summary(eruption.lm)
#how to interpret linear model (predicts trends)
#use journals
slope <- round(eruption.lm$coef[2], 3)
# p.val <- round(coefficients(summary(eruption.lm))[2, 4], 3) # it approx. 0, so...
p.val = 0.001
r2 <- round(summary(eruption.lm)$r.squared, 3)
#graph
#point graph with a regression line
ggplot(data = faithful, aes(x = waiting, y = eruptions, colour = eruptions)) +
geom_point() +
annotate("text", x = 45, y = 5, label = paste0("slope == ", slope, "~(min/min)"), parse = TRUE, hjust = 0) +
annotate("text", x = 45, y = 4.75, label = paste0("italic(p) < ", p.val), parse = TRUE, hjust = 0) +
annotate("text", x = 45, y = 4.5, label = paste0("italic(r)^2 == ", r2), parse = TRUE, hjust = 0) +
stat_smooth(method = "lm", colour = "salmon") +
labs(title = "Old Faithful eruption data",
subtitle = "Linear regression",
x = "Waiting time (minutes)",
y = "Eruption duration (minutes)")
# Chapter 9 - Correlations ------------------------------------------------
#assumptions:
#pair-wise data
#absence of outliers
#linearity
#normality of distribution
#homoscedasticity
#level (type) of measurement
#Continuous data (Pearson correlation)
#Ordinal data (Spearman correlation)
# Load libraries
library(tidyverse)
library(ggpubr)
library(corrplot)
# Load data
ecklonia <- read_csv("data/ecklonia.csv")
#got rid of categorical variables
#dataframe where each column represents pair-wise
#continuous/ordinal measurements
ecklonia_sub <- ecklonia %>%
select(-species, - site, - ID)
# Pearson correlation
# continuous data
# look at cor value
# closer to 1, stronger the correlation
# Perform correlation analysis on two specific variables
# Note that we do not need the final two arguments in this function to be stated
# as they are the default settings.
# They are only shown here to illustrate that they exist.
cor.test(x = ecklonia$stipe_length, ecklonia$frond_length,
use = "everything", method = "pearson")
# look at the correlation for the whole dataset
ecklonia_pearson <- cor(ecklonia_sub)
ecklonia_pearson
#Spearman rank correlation
#ordinal data
cor.test(x = ecklonia$stipe_length, ecklonia$frond_length,
use = "everything", method = "spearman")
# Create ordinal data
ecklonia$length <- as.numeric(cut((ecklonia$stipe_length+ecklonia$frond_length), breaks = 3))
# Run test on any variable
cor.test(ecklonia$length, ecklonia$digits)
# Kendall rank correlation
# both continuous and ordinal data
ecklonia_norm <- ecklonia_sub %>%
gather(key = "variable") %>%
group_by(variable) %>%
summarise(variable_norm = as.numeric(shapiro.test(value)[2]))
ecklonia_norm
#change correlation by method
cor.test(ecklonia$primary_blade_length, ecklonia$primary_blade_width, method = "kendall")
# one panel visual
# Calculate Pearson r beforehand for plotting
r_print <- paste0("r = ",
round(cor(x = ecklonia$stipe_length, ecklonia$frond_length),2))
# Then create a single panel showing one correlation
eck_plot <- ecklonia %>%
ggplot(aes(x = stipe_length, y = frond_length)) +
geom_smooth(method = "lm", colour = "grey90", se = F) +
geom_point(colour = "mediumorchid4") +
geom_label(x = 300, y = 240, label = r_print) +
labs(x = "Stipe length (cm)", y = "Frond length (cm)") +
theme_pubclean()
eck_plot
# Multiple panel visual
corrplot(ecklonia_pearson, method = "circle")
#basic heat map without explanation of colour correlation
heat1 <- ecklonia_pearson %>%
heatmap(Colv = NA, Rowv = NA, scale="column")
heat1
# Add classic arguments like main title and axis title
heat2 <- ecklonia_pearson %>%
heatmap(Colv = NA, Rowv = NA, scale="column",
xlab="", ylab="", main="Ecklonia_Pearson")
heat2
#pretty heat map with correlation bar
library("pheatmap")
heat3 <- ecklonia_pearson %>%
pheatmap(cutree_rows = 9,
xlab = "", ylab = "", main = "Ecklonia_pearson")
heat3
#figure title at the bottoms
#tables = at top
| /Day_3.R | no_license | ChristelleJLarey/Biostats | R | false | false | 4,261 | r | # Day 3
# Chapter 8 & 9
# Christelle Larey
# 21 April 2021
# Chapter 8 - Simple linear regressions -----------------------------------
library(tidyverse)
library(ggplot2)
library(ggpubr)
library(purrr)
data("faithful")
head(faithful)
eruption.lm <- lm(eruptions ~ waiting, data = faithful)
summary(eruption.lm)
#how to interpret linear model (predicts trends)
#use journals
slope <- round(eruption.lm$coef[2], 3)
# p.val <- round(coefficients(summary(eruption.lm))[2, 4], 3) # it approx. 0, so...
p.val = 0.001
r2 <- round(summary(eruption.lm)$r.squared, 3)
#graph
#point graph with a regression line
ggplot(data = faithful, aes(x = waiting, y = eruptions, colour = eruptions)) +
geom_point() +
annotate("text", x = 45, y = 5, label = paste0("slope == ", slope, "~(min/min)"), parse = TRUE, hjust = 0) +
annotate("text", x = 45, y = 4.75, label = paste0("italic(p) < ", p.val), parse = TRUE, hjust = 0) +
annotate("text", x = 45, y = 4.5, label = paste0("italic(r)^2 == ", r2), parse = TRUE, hjust = 0) +
stat_smooth(method = "lm", colour = "salmon") +
labs(title = "Old Faithful eruption data",
subtitle = "Linear regression",
x = "Waiting time (minutes)",
y = "Eruption duration (minutes)")
# Chapter 9 - Correlations ------------------------------------------------
#assumptions:
#pair-wise data
#absence of outliers
#linearity
#normality of distribution
#homoscedasticity
#level (type) of measurement
#Continuous data (Pearson correlation)
#Ordinal data (Spearman correlation)
# Load libraries
library(tidyverse)
library(ggpubr)
library(corrplot)
# Load data
ecklonia <- read_csv("data/ecklonia.csv")
#got rid of categorical variables
#dataframe where each column represents pair-wise
#continuous/ordinal measurements
ecklonia_sub <- ecklonia %>%
select(-species, - site, - ID)
# Pearson correlation
# continuous data
# look at cor value
# closer to 1, stronger the correlation
# Perform correlation analysis on two specific variables
# Note that we do not need the final two arguments in this function to be stated
# as they are the default settings.
# They are only shown here to illustrate that they exist.
cor.test(x = ecklonia$stipe_length, ecklonia$frond_length,
use = "everything", method = "pearson")
# look at the correlation for the whole dataset
ecklonia_pearson <- cor(ecklonia_sub)
ecklonia_pearson
#Spearman rank correlation
#ordinal data
cor.test(x = ecklonia$stipe_length, ecklonia$frond_length,
use = "everything", method = "spearman")
# Create ordinal data
ecklonia$length <- as.numeric(cut((ecklonia$stipe_length+ecklonia$frond_length), breaks = 3))
# Run test on any variable
cor.test(ecklonia$length, ecklonia$digits)
# Kendall rank correlation
# both continuous and ordinal data
ecklonia_norm <- ecklonia_sub %>%
gather(key = "variable") %>%
group_by(variable) %>%
summarise(variable_norm = as.numeric(shapiro.test(value)[2]))
ecklonia_norm
#change correlation by method
cor.test(ecklonia$primary_blade_length, ecklonia$primary_blade_width, method = "kendall")
# one panel visual
# Calculate Pearson r beforehand for plotting
r_print <- paste0("r = ",
round(cor(x = ecklonia$stipe_length, ecklonia$frond_length),2))
# Then create a single panel showing one correlation
eck_plot <- ecklonia %>%
ggplot(aes(x = stipe_length, y = frond_length)) +
geom_smooth(method = "lm", colour = "grey90", se = F) +
geom_point(colour = "mediumorchid4") +
geom_label(x = 300, y = 240, label = r_print) +
labs(x = "Stipe length (cm)", y = "Frond length (cm)") +
theme_pubclean()
eck_plot
# Multiple panel visual
corrplot(ecklonia_pearson, method = "circle")
#basic heat map without explanation of colour correlation
heat1 <- ecklonia_pearson %>%
heatmap(Colv = NA, Rowv = NA, scale="column")
heat1
# Add classic arguments like main title and axis title
heat2 <- ecklonia_pearson %>%
heatmap(Colv = NA, Rowv = NA, scale="column",
xlab="", ylab="", main="Ecklonia_Pearson")
heat2
#pretty heat map with correlation bar
library("pheatmap")
heat3 <- ecklonia_pearson %>%
pheatmap(cutree_rows = 9,
xlab = "", ylab = "", main = "Ecklonia_pearson")
heat3
#figure title at the bottoms
#tables = at top
|
dataset <-read.csv("Gender_StatsData.csv", stringsAsFactors=FALSE)
#write_countries <- data.frame(row.names = unique(dataset$Country.Code));
#write_countries$Indicator.Name <- unique(dataset$Country.Name);
#write.table(write_countries, file = "country_names.txt", sep = "\t",row.names = TRUE, col.names = NA)
mxset <- subset(dataset, dataset$Country.Code=='SGP');
#unique(dataset$Country.Name=='Mexico')
#mxset <- dataset[dataset[1]=='Mexico']
#mxset <- setNames(mxset, colnames(dataset))
firstcol = which(colnames(mxset)=="X2012");
lastcol = which(colnames(mxset)=="X2018");
a_mxset<-mxset[c(firstcol:lastcol)];
row.names(a_mxset) <- mxset$Indicator.Code;
#head(a_mxset);
t_a_mxset <- as.data.frame(t(as.matrix(a_mxset)));
#length(t_a_mxset$SE.TER.CUAT.BA.FE.ZS)
t_a_mxset$SE.TER.CUAT.BA.FE.ZS#Studied Bachelor
t_a_mxset$SE.SEC.CUAT.LO.FE.ZS#Studied Secondary
t_a_mxset$SE.PRM.CUAT.FE.ZS #Completed Primary
t_a_mxset$SE.TER.CUAT.MS.FE.ZS#Completed Masters
t_a_mxset$SE.TER.CUAT.DO.FE.ZS#Completed Ph D
t_a_mxset$SL.UEM.ADVN.FE.ZS #Unemployment with advanced education
t_a_mxset$SL.UEM.BASC.FE.ZS #Unemployment with basic education
t_a_mxset$SL.UEM.INTM.FE.ZS #Unemployment with intermediate education
t_a_mxset$SL.UEM.TOTL.FE.ZS #Unemployment total
t_a_mxset$NY.GDP.MKTP.CD #GDP
t_a_mxset$NY.GDP.MKTP.KD.ZG #Annual growth
t_a_mxset$NY.GDP.PCAP.CD #GDP per capita
t_a_mxset$IC.WEF.LLCO.FE #Business Owners
t_a_mxset$IC.WEF.LLCD.FE #Female directors
t_a_mxset$IC.WEF.SOLO.FE #Female sole propietors
t_a_mxset$SL.AGR.EMPL.FE.ZS #Employment in agriculture as percentage ILO
t_a_mxset$SL.IND.EMPL.FE.ZS #Employment in industry ILO
t_a_mxset$SL.SRV.EMPL.FE.ZS #Employment in services, ILO
t_a_mxset$SL.EMP.TOTL.SP.FE.ZS #Employment to population ratio, 15+, female modeled ILO
t_a_mxset$SL.EMP.SMGT.FE.ZS #Female share of employment in senior and middle mgmt
t_a_mxset$SE.TER.GRAD.FE.SI.ZS #Female share of graduates from STEM.
#colnames(t_a_mxset)
rownames(t_a_mxset)
rm(list=ls())
| /GetCountries.r | no_license | edjacob25/DisparityAnalysisOnR | R | false | false | 1,996 | r | dataset <-read.csv("Gender_StatsData.csv", stringsAsFactors=FALSE)
#write_countries <- data.frame(row.names = unique(dataset$Country.Code));
#write_countries$Indicator.Name <- unique(dataset$Country.Name);
#write.table(write_countries, file = "country_names.txt", sep = "\t",row.names = TRUE, col.names = NA)
mxset <- subset(dataset, dataset$Country.Code=='SGP');
#unique(dataset$Country.Name=='Mexico')
#mxset <- dataset[dataset[1]=='Mexico']
#mxset <- setNames(mxset, colnames(dataset))
firstcol = which(colnames(mxset)=="X2012");
lastcol = which(colnames(mxset)=="X2018");
a_mxset<-mxset[c(firstcol:lastcol)];
row.names(a_mxset) <- mxset$Indicator.Code;
#head(a_mxset);
t_a_mxset <- as.data.frame(t(as.matrix(a_mxset)));
#length(t_a_mxset$SE.TER.CUAT.BA.FE.ZS)
t_a_mxset$SE.TER.CUAT.BA.FE.ZS#Studied Bachelor
t_a_mxset$SE.SEC.CUAT.LO.FE.ZS#Studied Secondary
t_a_mxset$SE.PRM.CUAT.FE.ZS #Completed Primary
t_a_mxset$SE.TER.CUAT.MS.FE.ZS#Completed Masters
t_a_mxset$SE.TER.CUAT.DO.FE.ZS#Completed Ph D
t_a_mxset$SL.UEM.ADVN.FE.ZS #Unemployment with advanced education
t_a_mxset$SL.UEM.BASC.FE.ZS #Unemployment with basic education
t_a_mxset$SL.UEM.INTM.FE.ZS #Unemployment with intermediate education
t_a_mxset$SL.UEM.TOTL.FE.ZS #Unemployment total
t_a_mxset$NY.GDP.MKTP.CD #GDP
t_a_mxset$NY.GDP.MKTP.KD.ZG #Annual growth
t_a_mxset$NY.GDP.PCAP.CD #GDP per capita
t_a_mxset$IC.WEF.LLCO.FE #Business Owners
t_a_mxset$IC.WEF.LLCD.FE #Female directors
t_a_mxset$IC.WEF.SOLO.FE #Female sole propietors
t_a_mxset$SL.AGR.EMPL.FE.ZS #Employment in agriculture as percentage ILO
t_a_mxset$SL.IND.EMPL.FE.ZS #Employment in industry ILO
t_a_mxset$SL.SRV.EMPL.FE.ZS #Employment in services, ILO
t_a_mxset$SL.EMP.TOTL.SP.FE.ZS #Employment to population ratio, 15+, female modeled ILO
t_a_mxset$SL.EMP.SMGT.FE.ZS #Female share of employment in senior and middle mgmt
t_a_mxset$SE.TER.GRAD.FE.SI.ZS #Female share of graduates from STEM.
#colnames(t_a_mxset)
rownames(t_a_mxset)
rm(list=ls())
|
\name{BANOVA.Binomial}
\alias{BANOVA.Binomial}
\alias{predict.BANOVA.Binomial}
\alias{print.BANOVA.Binomial}
\alias{summary.BANOVA.Binomial}
\title{Estimation of BANOVA with a Binomial dependent variable}
\description{
\code{BANOVA.Binomial} implements a Hierarchical Bayesian ANOVA for a binomial response variable using a logit link and a normal heterogeneity distribution.
}
\usage{
BANOVA.Binomial(l1_formula = "NA", l2_formula = "NA", data,
id, num_trials, l2_hyper = c(1, 1, 0.0001), burnin = 5000, sample = 2000,
thin = 10, adapt = 0, conv_speedup = F, jags = runjags.getOption('jagspath'))
\method{summary}{BANOVA.Binomial}(object, ...)
\method{predict}{BANOVA.Binomial}(object, newdata = NULL,...)
\method{print}{BANOVA.Binomial}(x, ...)
}
\arguments{
\item{l1_formula}{formula for level 1 e.g. 'Y~X1+X2'}
\item{l2_formula}{formula for level 2 e.g. '~Z1+Z2',
response variable must not be included}
\item{data}{a data.frame in long format including all features in level 1 and level 2(covariates and categorical factors) and responses}
\item{id}{subject ID of each response unit}
\item{num_trials}{the number of trials of each
observation(=1, if it is bernoulli), the type is forced to be 'integer'}
\item{l2_hyper}{level 2 hyperparameters, c(a, b, \eqn{\gamma}), default c(1,1,0.0001)}
\item{burnin}{the number of burn in draws in the MCMC algorithm, default 5000}
\item{sample}{target samples in the MCMC algorithm after thinning, default 2000}
\item{thin}{the number of samples in the MCMC algorithm that needs to be thinned, default 10}
\item{adapt}{the number of adaptive iterations, default 0 (see \link[runjags]{run.jags})}
\item{conv_speedup}{whether to speedup convergence, default F}
\item{jags}{the system call or path for activating 'JAGS'. Default calls findjags() to attempt to locate 'JAGS' on your system}
\item{object}{object of class \code{BANOVA.Bin} (returned by \code{BANOVA.Bin})}
\item{newdata}{test data, either a matrix, vector or a
data frame. It must have the same format with the original data (the same column number)}
\item{x}{object of class \code{BANOVA.Bin} (returned by \code{BANOVA.Bin})}
\item{\dots}{additional arguments,currently ignored}
}
\details{
Level 1 model: \cr
\eqn{y_i} {~} \eqn{Binomial(ntrials,p_i)}, \eqn{p_i = logit^{-1}(\eta_i)} \cr
where ntrials is the binomial total for each record i, \eqn{\eta_i = \sum_{p = 0}^{P}\sum_{j=1}^{J_p}X_{i,j}^p\beta_{j,s_i}^p}, \eqn{s_i} is the subject id of response \eqn{i}. see \code{\link{BANOVA-package}}
}
\value{
\code{BANOVA.Binomial} returns an object of class \code{"BANOVA.Bin"}. The returned object is a list containing:
\item{anova.table}{table of effect sizes \code{\link{BAnova}}}
\item{coef.tables}{table of estimated coefficients}
\item{pvalue.table}{table of p-values \code{\link{table.pvalues}}}
\item{dMatrice}{design matrices at level 1 and level 2}
\item{samples_l2_param}{posterior samples of level 2 parameters}
\item{data}{original data.frame}
\item{mf1}{model.frame of level 1}
\item{mf2}{model.frame of level 2}
\item{JAGSmodel}{'JAGS' model}
}
\examples{
data(colorad)
\donttest{
# mean center Blur for effect coding
colorad$blur <- colorad$blur - mean(colorad$blur)
res <- BANOVA.Binomial(y~typic, ~color*blur, colorad, colorad$id, as.integer(16),
burnin = 5000, sample = 2000, thin = 10)
summary(res)
# or use BANOVA.run
res0 <- BANOVA.run(y~typic, ~color*blur, data = colorad, model_name = 'Binomial',
id = colorad$id, num_trials = as.integer(16), iter = 100, thin = 1, chains = 2)
summary(res0)
table.predictions(res)
# only in-model variables(except numeric variables) will be used
predict(res0, c(1, 0, 8, 2, 1, 0.03400759))
}
}
| /man/BANOVA.Binomial.Rd | no_license | chen4519902/BANOVA_R | R | false | false | 3,857 | rd | \name{BANOVA.Binomial}
\alias{BANOVA.Binomial}
\alias{predict.BANOVA.Binomial}
\alias{print.BANOVA.Binomial}
\alias{summary.BANOVA.Binomial}
\title{Estimation of BANOVA with a Binomial dependent variable}
\description{
\code{BANOVA.Binomial} implements a Hierarchical Bayesian ANOVA for a binomial response variable using a logit link and a normal heterogeneity distribution.
}
\usage{
BANOVA.Binomial(l1_formula = "NA", l2_formula = "NA", data,
id, num_trials, l2_hyper = c(1, 1, 0.0001), burnin = 5000, sample = 2000,
thin = 10, adapt = 0, conv_speedup = F, jags = runjags.getOption('jagspath'))
\method{summary}{BANOVA.Binomial}(object, ...)
\method{predict}{BANOVA.Binomial}(object, newdata = NULL,...)
\method{print}{BANOVA.Binomial}(x, ...)
}
\arguments{
\item{l1_formula}{formula for level 1 e.g. 'Y~X1+X2'}
\item{l2_formula}{formula for level 2 e.g. '~Z1+Z2',
response variable must not be included}
\item{data}{a data.frame in long format including all features in level 1 and level 2(covariates and categorical factors) and responses}
\item{id}{subject ID of each response unit}
\item{num_trials}{the number of trials of each
observation(=1, if it is bernoulli), the type is forced to be 'integer'}
\item{l2_hyper}{level 2 hyperparameters, c(a, b, \eqn{\gamma}), default c(1,1,0.0001)}
\item{burnin}{the number of burn in draws in the MCMC algorithm, default 5000}
\item{sample}{target samples in the MCMC algorithm after thinning, default 2000}
\item{thin}{the number of samples in the MCMC algorithm that needs to be thinned, default 10}
\item{adapt}{the number of adaptive iterations, default 0 (see \link[runjags]{run.jags})}
\item{conv_speedup}{whether to speedup convergence, default F}
\item{jags}{the system call or path for activating 'JAGS'. Default calls findjags() to attempt to locate 'JAGS' on your system}
\item{object}{object of class \code{BANOVA.Bin} (returned by \code{BANOVA.Bin})}
\item{newdata}{test data, either a matrix, vector or a
data frame. It must have the same format with the original data (the same column number)}
\item{x}{object of class \code{BANOVA.Bin} (returned by \code{BANOVA.Bin})}
\item{\dots}{additional arguments,currently ignored}
}
\details{
Level 1 model: \cr
\eqn{y_i} {~} \eqn{Binomial(ntrials,p_i)}, \eqn{p_i = logit^{-1}(\eta_i)} \cr
where ntrials is the binomial total for each record i, \eqn{\eta_i = \sum_{p = 0}^{P}\sum_{j=1}^{J_p}X_{i,j}^p\beta_{j,s_i}^p}, \eqn{s_i} is the subject id of response \eqn{i}. see \code{\link{BANOVA-package}}
}
\value{
\code{BANOVA.Binomial} returns an object of class \code{"BANOVA.Bin"}. The returned object is a list containing:
\item{anova.table}{table of effect sizes \code{\link{BAnova}}}
\item{coef.tables}{table of estimated coefficients}
\item{pvalue.table}{table of p-values \code{\link{table.pvalues}}}
\item{dMatrice}{design matrices at level 1 and level 2}
\item{samples_l2_param}{posterior samples of level 2 parameters}
\item{data}{original data.frame}
\item{mf1}{model.frame of level 1}
\item{mf2}{model.frame of level 2}
\item{JAGSmodel}{'JAGS' model}
}
\examples{
data(colorad)
\donttest{
# mean center Blur for effect coding
colorad$blur <- colorad$blur - mean(colorad$blur)
res <- BANOVA.Binomial(y~typic, ~color*blur, colorad, colorad$id, as.integer(16),
burnin = 5000, sample = 2000, thin = 10)
summary(res)
# or use BANOVA.run
res0 <- BANOVA.run(y~typic, ~color*blur, data = colorad, model_name = 'Binomial',
id = colorad$id, num_trials = as.integer(16), iter = 100, thin = 1, chains = 2)
summary(res0)
table.predictions(res)
# only in-model variables(except numeric variables) will be used
predict(res0, c(1, 0, 8, 2, 1, 0.03400759))
}
}
|
library(showtext)
font_add(family = "mplus", regular = "birth-announcement/files/mplus-2p-light.ttf")
showtextdb:::already_loaded(family = "mplus") # should be TRUE
plot(0, 0, type = "n")
par(family = "M+ 2p light")
text(0, 0, labels = "japanese text: 2008年8月8日")
# If you see Japanese characters, it worked | /birth-announcement/01 dealing with fonts.R | no_license | gobbios/playground | R | false | false | 315 | r | library(showtext)
font_add(family = "mplus", regular = "birth-announcement/files/mplus-2p-light.ttf")
showtextdb:::already_loaded(family = "mplus") # should be TRUE
plot(0, 0, type = "n")
par(family = "M+ 2p light")
text(0, 0, labels = "japanese text: 2008年8月8日")
# If you see Japanese characters, it worked |
## Put comments here that give an overall description of what your
## functions do
#Usage example :
#t <- makeCacheMatrix(matrix(10:13,nrow=2,ncol=2))
#t1 <- cacheSolve(t)
# function to create a list of functions
makeCacheMatrix <- function(x = matrix()) {
# m is the var holds mean, is set NULL as default value
i <- NULL
# define set function
set <- function(y) {
x <<- y
i <<- NULL
}
# define get function
get <- function() x
# define setmean function, assigns mean to m and return
# super assignment operator <<- is used, which starts the closing framework, works it way up to the global environment
# until it finds a variable called mean
# super assignment operator can only be used in functions
setinvers <- function(invers) i <<- invers
# define getmean function, returns m var
getinvers <- function() i
# create the list with names are set, get, setmean, getmean
# and assign the functions to the names
list(set = set, get = get,
setinvers = setinvers,
getinvers = getinvers)
}
# function to get mean from cache or calculate
cacheSolve <- function(x, ...) {
# get the mean value from x
i <- x$getinvers()
# if mean is already calculated, then it will not be a null
if(!is.null(i)) {
message("getting cached data")
# return cached mean, once return, this function ends
return(i)
}
# else get the vector that is the input of makeVector
data <- x$get()
# calculate mean and store it into var m
i <- solve(data, ...)
# set the mean in setmean which uses a super assignment <<-
x$setinvers(i)
# return calculate mean value
i
} | /Caching time consuming operations wk3/matrix_inversion_caching.R | no_license | dingchaoz/R | R | false | false | 1,670 | r | ## Put comments here that give an overall description of what your
## functions do
#Usage example :
#t <- makeCacheMatrix(matrix(10:13,nrow=2,ncol=2))
#t1 <- cacheSolve(t)
# function to create a list of functions
makeCacheMatrix <- function(x = matrix()) {
# m is the var holds mean, is set NULL as default value
i <- NULL
# define set function
set <- function(y) {
x <<- y
i <<- NULL
}
# define get function
get <- function() x
# define setmean function, assigns mean to m and return
# super assignment operator <<- is used, which starts the closing framework, works it way up to the global environment
# until it finds a variable called mean
# super assignment operator can only be used in functions
setinvers <- function(invers) i <<- invers
# define getmean function, returns m var
getinvers <- function() i
# create the list with names are set, get, setmean, getmean
# and assign the functions to the names
list(set = set, get = get,
setinvers = setinvers,
getinvers = getinvers)
}
# function to get mean from cache or calculate
cacheSolve <- function(x, ...) {
# get the mean value from x
i <- x$getinvers()
# if mean is already calculated, then it will not be a null
if(!is.null(i)) {
message("getting cached data")
# return cached mean, once return, this function ends
return(i)
}
# else get the vector that is the input of makeVector
data <- x$get()
# calculate mean and store it into var m
i <- solve(data, ...)
# set the mean in setmean which uses a super assignment <<-
x$setinvers(i)
# return calculate mean value
i
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validate_templates.R
\name{validate_geographic_coverage_column_names}
\alias{validate_geographic_coverage_column_names}
\title{Check column names of geographic coverage template}
\usage{
validate_geographic_coverage_column_names(x)
}
\arguments{
\item{x}{(list) The data and metadata object returned by
\code{template_arguments()}.}
}
\value{
(Error or NULL) An error if any issues were found,
otherwise NULL.
}
\description{
Check column names of geographic coverage template
}
\keyword{internal}
| /man/validate_geographic_coverage_column_names.Rd | permissive | EDIorg/EMLassemblyline | R | false | true | 582 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validate_templates.R
\name{validate_geographic_coverage_column_names}
\alias{validate_geographic_coverage_column_names}
\title{Check column names of geographic coverage template}
\usage{
validate_geographic_coverage_column_names(x)
}
\arguments{
\item{x}{(list) The data and metadata object returned by
\code{template_arguments()}.}
}
\value{
(Error or NULL) An error if any issues were found,
otherwise NULL.
}
\description{
Check column names of geographic coverage template
}
\keyword{internal}
|
# -------------------
# Title : Churn Analysis
# Author: Harshitha Ravindra
# Date: Nov 6, 2018
# Analysis : Churn Prediction - Logistic regression
# -------------------
library(data.table)
library(lubridate)
library(DMwR)
library(caret)
library(e1071)
options(scipen = 999)
#reading as a data frame first to autmatically add "1"/"2" next ro duplicated column names
member = read.csv("Member Dataset.csv")
#Using data table syntax moving forward
member = as.data.table(member)
member[is.na(member)] = 0
#creating age groups
member[,Age_treated := ifelse(Age <= 0 | Age > 100 , mean(Age), Age)]
#Adding type of customer
#member[,Cust_type := ifelse(ClosedDate == "", "Active", ifelse(mdy(ClosedDate) < ymd("2018-09-30"),
# "Closed_can't_analyse","Churned_analyse"))]
member[,Cust_type := ifelse(ClosedDate == "", "Active", "Churn")]
#Duration
member[,loyalty_months := ifelse(Cust_type== "Active",
interval(mdy(EarliestMemAcctDate), mdy("10-01-2018")) %/% months(1),
interval(mdy(EarliestMemAcctDate), mdy(ClosedDate)) %/% months(1))]
model_var = member[,-c("Member_ID","Age","EarliestMemAcctDate","ClosedDate","July_Bal","Aug_Bal",
"Sept_Bal","ZipCode_Validated")]
#write.csv(model_var,"model_var2.csv", row.names = F)
model_var[, Cust_type:= as.factor(Cust_type)]
Ovr_model_var = SMOTE(Cust_type~., model_var, perc.over = 200, k = 5, learner = NULL)
churn_pred = glm(Cust_type~.,family = binomial(link = 'logit'), Ovr_model_var)
# train control - 4 fold cross validation repeated 4 times
for_cv = trainControl(
method = "repeatedcv",
number = 4,
repeats = 4,
classProbs = TRUE,
summaryFunction = twoClassSummary)
# logistic regression model for fitting on the over sampled data
logreg <- train(Cust_type ~., Ovr_model_var,
method = "glm",
family = "binomial",
trControl = for_cv,
metric = "ROC")
# Predicting on the original data
mod_pred = predict(logreg,model_var[,-c("Cust_type")],type = "raw")
member[,churnProb := mod_pred]
confusionMatrix(member$Cust_type, member$churnProb)
| /Churn_Prediction.R | permissive | HarshithaRavindra29/CU_churn | R | false | false | 2,288 | r | # -------------------
# Title : Churn Analysis
# Author: Harshitha Ravindra
# Date: Nov 6, 2018
# Analysis : Churn Prediction - Logistic regression
# -------------------
library(data.table)
library(lubridate)
library(DMwR)
library(caret)
library(e1071)
options(scipen = 999)
#reading as a data frame first to autmatically add "1"/"2" next ro duplicated column names
member = read.csv("Member Dataset.csv")
#Using data table syntax moving forward
member = as.data.table(member)
member[is.na(member)] = 0
#creating age groups
member[,Age_treated := ifelse(Age <= 0 | Age > 100 , mean(Age), Age)]
#Adding type of customer
#member[,Cust_type := ifelse(ClosedDate == "", "Active", ifelse(mdy(ClosedDate) < ymd("2018-09-30"),
# "Closed_can't_analyse","Churned_analyse"))]
member[,Cust_type := ifelse(ClosedDate == "", "Active", "Churn")]
#Duration
member[,loyalty_months := ifelse(Cust_type== "Active",
interval(mdy(EarliestMemAcctDate), mdy("10-01-2018")) %/% months(1),
interval(mdy(EarliestMemAcctDate), mdy(ClosedDate)) %/% months(1))]
model_var = member[,-c("Member_ID","Age","EarliestMemAcctDate","ClosedDate","July_Bal","Aug_Bal",
"Sept_Bal","ZipCode_Validated")]
#write.csv(model_var,"model_var2.csv", row.names = F)
model_var[, Cust_type:= as.factor(Cust_type)]
Ovr_model_var = SMOTE(Cust_type~., model_var, perc.over = 200, k = 5, learner = NULL)
churn_pred = glm(Cust_type~.,family = binomial(link = 'logit'), Ovr_model_var)
# train control - 4 fold cross validation repeated 4 times
for_cv = trainControl(
method = "repeatedcv",
number = 4,
repeats = 4,
classProbs = TRUE,
summaryFunction = twoClassSummary)
# logistic regression model for fitting on the over sampled data
logreg <- train(Cust_type ~., Ovr_model_var,
method = "glm",
family = "binomial",
trControl = for_cv,
metric = "ROC")
# Predicting on the original data
mod_pred = predict(logreg,model_var[,-c("Cust_type")],type = "raw")
member[,churnProb := mod_pred]
confusionMatrix(member$Cust_type, member$churnProb)
|
#' Read 'rodeo' Model from Excel or CSV Files and Return Text Representation
#'
#' @param folder valid folder name or excel file with rodeo model description
#' @param sheets names of the spreadsheets imported
#' @param type type of the data format
#' @param ext file extension of text and csv files
#' @param ... other arguments passed to the file read function, e.g. to
#' \code{\link{read_excel}} or \code{\link{read.csv2}}
#'
#' @return list of data frames
#'
#' @details The tables must follow the rodeo specification and contain valid column headers.
#'
#' @export
#'
#'
rodeo_tables <- function (folder,
sheets = c("vars", "pars", "funs", "pros", "stoi"),
type = c("Excel", "csv2"), ext = NA, ...) {
## alternative: read all tables from excel file
#sheets <- excel_sheets(xlfile)
## read the tables
if (type == "Excel") {
tables <- lapply(sheets, function(sheet)
read_excel(folder, sheet = sheet))
} else if (type == "csv2") {
ext <- ifelse(is.na(ext), "csv", ext)
ext <- sub("^[.]", "", ext) # strip leading "." from ext
tables <- lapply(sheets, function(sheet)
read.csv2(paste0(folder, "/", sheet, ".", ext), header=TRUE, ...))
} else {
stop("unknown type")
}
## remove rows for which identifier (=1st column) contains NA
tables <- lapply(tables, function(sheet) sheet[!is.na(sheet[,1]), ])
names(tables) <- sheets
## reformat stoichiometry as cross table
#tables$xstoi <- dcast(tables$stoi, process ~ variable, value.var="expression", fill="0")
tables
}
| /R/rodeo_tables.R | no_license | tpetzoldt/rodeoExt | R | false | false | 1,636 | r | #' Read 'rodeo' Model from Excel or CSV Files and Return Text Representation
#'
#' @param folder valid folder name or excel file with rodeo model description
#' @param sheets names of the spreadsheets imported
#' @param type type of the data format
#' @param ext file extension of text and csv files
#' @param ... other arguments passed to the file read function, e.g. to
#' \code{\link{read_excel}} or \code{\link{read.csv2}}
#'
#' @return list of data frames
#'
#' @details The tables must follow the rodeo specification and contain valid column headers.
#'
#' @export
#'
#'
rodeo_tables <- function (folder,
sheets = c("vars", "pars", "funs", "pros", "stoi"),
type = c("Excel", "csv2"), ext = NA, ...) {
## alternative: read all tables from excel file
#sheets <- excel_sheets(xlfile)
## read the tables
if (type == "Excel") {
tables <- lapply(sheets, function(sheet)
read_excel(folder, sheet = sheet))
} else if (type == "csv2") {
ext <- ifelse(is.na(ext), "csv", ext)
ext <- sub("^[.]", "", ext) # strip leading "." from ext
tables <- lapply(sheets, function(sheet)
read.csv2(paste0(folder, "/", sheet, ".", ext), header=TRUE, ...))
} else {
stop("unknown type")
}
## remove rows for which identifier (=1st column) contains NA
tables <- lapply(tables, function(sheet) sheet[!is.na(sheet[,1]), ])
names(tables) <- sheets
## reformat stoichiometry as cross table
#tables$xstoi <- dcast(tables$stoi, process ~ variable, value.var="expression", fill="0")
tables
}
|
#' @importFrom webutils parse_http
multipart <- function(body, type){
formdata <- webutils::parse_http(body, type)
lapply(formdata, function(x){
if(length(x$filename)){
tmp <- tempfile(fileext=paste0("_", basename(x$filename)))
writeBin(x$value, tmp)
list (
name = x$filename,
tmp_name = tmp
)
} else if(length(x$content_type)){
# binary form-data objects that are not file uploads
if(identical(x$content_type, "application/rds")){
I(unserialize(x$value))
} else if(identical(x$content_type, "application/json")){
I(jsonlite::fromJSON(rawToChar(x$value)))
} else if(grepl("^application/r?protobuf$", x$content_type)){
I(protolite::unserialize_pb(x$value))
} else if(grepl("^text/", x$content_type)){
I(rawToChar(x$value))
} else if(grepl("^application/octet", x$content_type)){
I(x$value)
} else {
stop(sprintf("Multipart request contains unsupported data type '%s'.", x$content_type))
}
} else if(is.raw(x$value)){
rawToChar(x$value)
} else {
return(x$value)
}
})
}
| /R/multipart.R | permissive | opencpu/opencpu | R | false | false | 1,141 | r | #' @importFrom webutils parse_http
multipart <- function(body, type){
formdata <- webutils::parse_http(body, type)
lapply(formdata, function(x){
if(length(x$filename)){
tmp <- tempfile(fileext=paste0("_", basename(x$filename)))
writeBin(x$value, tmp)
list (
name = x$filename,
tmp_name = tmp
)
} else if(length(x$content_type)){
# binary form-data objects that are not file uploads
if(identical(x$content_type, "application/rds")){
I(unserialize(x$value))
} else if(identical(x$content_type, "application/json")){
I(jsonlite::fromJSON(rawToChar(x$value)))
} else if(grepl("^application/r?protobuf$", x$content_type)){
I(protolite::unserialize_pb(x$value))
} else if(grepl("^text/", x$content_type)){
I(rawToChar(x$value))
} else if(grepl("^application/octet", x$content_type)){
I(x$value)
} else {
stop(sprintf("Multipart request contains unsupported data type '%s'.", x$content_type))
}
} else if(is.raw(x$value)){
rawToChar(x$value)
} else {
return(x$value)
}
})
}
|
suppressPackageStartupMessages({
library(POWSC)
library(SingleCellExperiment)
})
fun <- function(x) {
y <- counts(x)
if (!is.matrix(y)) {
y <- as.matrix(y)
counts(x) <- y
}
if (is.null(x$cluster)) {
y <- Est2Phase(x)
} else {
i <- split(seq_len(ncol(x)), x$cluster)
y <- lapply(i, function(.) Est2Phase(x[, .]))
}
}
| /scripts/03-est_pars-POWSC.R | no_license | jligm-hash/simulation-comparison | R | false | false | 394 | r | suppressPackageStartupMessages({
library(POWSC)
library(SingleCellExperiment)
})
fun <- function(x) {
y <- counts(x)
if (!is.matrix(y)) {
y <- as.matrix(y)
counts(x) <- y
}
if (is.null(x$cluster)) {
y <- Est2Phase(x)
} else {
i <- split(seq_len(ncol(x)), x$cluster)
y <- lapply(i, function(.) Est2Phase(x[, .]))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TGBot.R
\name{sendDocument}
\alias{sendDocument}
\title{sendDocument}
\usage{
sendDocument(document = NULL, reply_to_message_id = NULL, chat_id = NULL)
}
\arguments{
\item{document}{path to the file to send (required)}
\item{reply_to_message_id}{if the message is a reply, ID of the
original message}
\item{chat_id}{Unique identifier for the target chat or username of
the target channel (required)}
}
\description{
Send general files
}
| /man/sendDocument.Rd | no_license | ebeneditos/telegram | R | false | true | 517 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TGBot.R
\name{sendDocument}
\alias{sendDocument}
\title{sendDocument}
\usage{
sendDocument(document = NULL, reply_to_message_id = NULL, chat_id = NULL)
}
\arguments{
\item{document}{path to the file to send (required)}
\item{reply_to_message_id}{if the message is a reply, ID of the
original message}
\item{chat_id}{Unique identifier for the target chat or username of
the target channel (required)}
}
\description{
Send general files
}
|
#' Run an MLflow Project
#'
#' Wrapper for the `mlflow run` CLI command. See
#' https://www.mlflow.org/docs/latest/cli.html#mlflow-run for more info.
#'
#' @examples
#' \dontrun{
#' # This parametrized script trains a GBM model on the Iris dataset and can be run as an MLflow
#' # project. You can run this script (assuming it's saved at /some/directory/params_example.R)
#' # with custom parameters via:
#' # mlflow_run(entry_point = "params_example.R", uri = "/some/directory",
#' # parameters = list(num_trees = 200, learning_rate = 0.1))
#' install.packages("gbm")
#' library(mlflow)
#' library(gbm)
#' # define and read input parameters
#' num_trees <- mlflow_param(name = "num_trees", default = 200, type = "integer")
#' lr <- mlflow_param(name = "learning_rate", default = 0.1, type = "numeric")
#' # use params to fit a model
#' ir.adaboost <- gbm(Species ~., data=iris, n.trees=num_trees, shrinkage=lr)
#' }
#'
#'
#' @param entry_point Entry point within project, defaults to `main` if not specified.
#' @param uri A directory containing modeling scripts, defaults to the current directory.
#' @param version Version of the project to run, as a Git commit reference for Git projects.
#' @param parameters A list of parameters.
#' @param experiment_id ID of the experiment under which to launch the run.
#' @param experiment_name Name of the experiment under which to launch the run.
#' @param backend Execution backend to use for run.
#' @param backend_config Path to JSON file which will be passed to the backend. For the Databricks backend,
#' it should describe the cluster to use when launching a run on Databricks.
#' @param no_conda If specified, assume that MLflow is running within a Conda environment with the necessary
#' dependencies for the current project instead of attempting to create a new Conda environment. Only
#' valid if running locally.
#' @param storage_dir Valid only when `backend` is local. MLflow downloads artifacts from distributed URIs passed to
#' parameters of type `path` to subdirectories of `storage_dir`.
#'
#' @return The run associated with this run.
#'
#' @export
mlflow_run <- function(uri = ".", entry_point = NULL, version = NULL, parameters = NULL,
experiment_id = NULL, experiment_name = NULL, backend = NULL, backend_config = NULL,
no_conda = FALSE, storage_dir = NULL) {
if (!is.null(experiment_name) && !is.null(experiment_id)) {
stop("Specify only one of `experiment_name` or `experiment_id`.")
}
if (is.null(experiment_name)) {
experiment_id <- mlflow_infer_experiment_id(experiment_id)
}
if (file.exists(uri))
uri <- fs::path_expand(uri)
param_list <- if (!is.null(parameters)) parameters %>%
purrr::imap_chr(~ paste0(.y, "=", format(.x, scientific = FALSE))) %>%
purrr::reduce(~ mlflow_cli_param(.x, "--param-list", .y), .init = list())
args <- list(uri) %>%
mlflow_cli_param("--entry-point", entry_point) %>%
mlflow_cli_param("--version", version) %>%
mlflow_cli_param("--experiment-id", experiment_id) %>%
mlflow_cli_param("--experiment-name", experiment_name) %>%
mlflow_cli_param("--backend", backend) %>%
mlflow_cli_param("--backend-config", backend_config) %>%
mlflow_cli_param("--storage-dir", storage_dir) %>%
c(param_list)
args <- if (!no_conda) args else c(args, "--no-conda")
result <- do.call(mlflow_cli, c("run", args))
matches <- regexec(".*Run \\(ID \\'([^\\']+).*", result$stderr)
run_id <- regmatches(result$stderr, matches)[[1]][[2]]
invisible(run_id)
}
| /mlflow/R/mlflow/R/project-run.R | permissive | criteo-forks/mlflow | R | false | false | 3,568 | r | #' Run an MLflow Project
#'
#' Wrapper for the `mlflow run` CLI command. See
#' https://www.mlflow.org/docs/latest/cli.html#mlflow-run for more info.
#'
#' @examples
#' \dontrun{
#' # This parametrized script trains a GBM model on the Iris dataset and can be run as an MLflow
#' # project. You can run this script (assuming it's saved at /some/directory/params_example.R)
#' # with custom parameters via:
#' # mlflow_run(entry_point = "params_example.R", uri = "/some/directory",
#' # parameters = list(num_trees = 200, learning_rate = 0.1))
#' install.packages("gbm")
#' library(mlflow)
#' library(gbm)
#' # define and read input parameters
#' num_trees <- mlflow_param(name = "num_trees", default = 200, type = "integer")
#' lr <- mlflow_param(name = "learning_rate", default = 0.1, type = "numeric")
#' # use params to fit a model
#' ir.adaboost <- gbm(Species ~., data=iris, n.trees=num_trees, shrinkage=lr)
#' }
#'
#'
#' @param entry_point Entry point within project, defaults to `main` if not specified.
#' @param uri A directory containing modeling scripts, defaults to the current directory.
#' @param version Version of the project to run, as a Git commit reference for Git projects.
#' @param parameters A list of parameters.
#' @param experiment_id ID of the experiment under which to launch the run.
#' @param experiment_name Name of the experiment under which to launch the run.
#' @param backend Execution backend to use for run.
#' @param backend_config Path to JSON file which will be passed to the backend. For the Databricks backend,
#' it should describe the cluster to use when launching a run on Databricks.
#' @param no_conda If specified, assume that MLflow is running within a Conda environment with the necessary
#' dependencies for the current project instead of attempting to create a new Conda environment. Only
#' valid if running locally.
#' @param storage_dir Valid only when `backend` is local. MLflow downloads artifacts from distributed URIs passed to
#' parameters of type `path` to subdirectories of `storage_dir`.
#'
#' @return The run associated with this run.
#'
#' @export
mlflow_run <- function(uri = ".", entry_point = NULL, version = NULL, parameters = NULL,
experiment_id = NULL, experiment_name = NULL, backend = NULL, backend_config = NULL,
no_conda = FALSE, storage_dir = NULL) {
if (!is.null(experiment_name) && !is.null(experiment_id)) {
stop("Specify only one of `experiment_name` or `experiment_id`.")
}
if (is.null(experiment_name)) {
experiment_id <- mlflow_infer_experiment_id(experiment_id)
}
if (file.exists(uri))
uri <- fs::path_expand(uri)
param_list <- if (!is.null(parameters)) parameters %>%
purrr::imap_chr(~ paste0(.y, "=", format(.x, scientific = FALSE))) %>%
purrr::reduce(~ mlflow_cli_param(.x, "--param-list", .y), .init = list())
args <- list(uri) %>%
mlflow_cli_param("--entry-point", entry_point) %>%
mlflow_cli_param("--version", version) %>%
mlflow_cli_param("--experiment-id", experiment_id) %>%
mlflow_cli_param("--experiment-name", experiment_name) %>%
mlflow_cli_param("--backend", backend) %>%
mlflow_cli_param("--backend-config", backend_config) %>%
mlflow_cli_param("--storage-dir", storage_dir) %>%
c(param_list)
args <- if (!no_conda) args else c(args, "--no-conda")
result <- do.call(mlflow_cli, c("run", args))
matches <- regexec(".*Run \\(ID \\'([^\\']+).*", result$stderr)
run_id <- regmatches(result$stderr, matches)[[1]][[2]]
invisible(run_id)
}
|
#####
# Script for macro-comparison of shifts in multivariate traits
## inferred from different phylogenetic methods (ML, BI, MSC, SCC)
###################################################################################
## Idea and Introduction:
# we want to test macroevolutionary analyses across trees from different methods
# and data subsets to determine how robust the results and subsequent inferences
# are. We'll initially try two things (1) Disparity through time 'DTT' and (2) l1ou
# to test for shifts in morphological traits. The input trait data will stay the
# same for all analyses, but data subsets and methods will change.
# Method 1 is 'Concatenation': We can either use RAxML or BEAST2. BEAST2 is preferrable
# because it will output a distribution of plausible ultrametric trees of varied topologies
# and branch lengths all at once. Alternatively, we can run RAxML for 100 tree
# searches, which will give us some branch length/phylogenetic variation, but we
# will need to transform the tree to make it ultrametric ('chronos', lambda = ...).
# Method 2 is 'Short-Cut Coalescent': These are methods (ASTRAL, ASTRID, MP-EST)
# that use as inputs, gene trees estimated from another program (I will use either
# RAxML or *BEAST2). These methods can be sensitive to the input gene trees, so we
# can test this by using gene trees built in RAxML and in BEAST2. If we run a
# bootstrapped ASTRAL analysis, we will get an output of 102 trees (100 bs,
# 1 greedy algorithim best fit, and 1 consensus), perfect for our analyses!
# Method 3 is 'Full Coalescent': The only proper full coalescent method that we'll
# use is implemented in *BEAST2.
library(geiger)
library(phytools)
library(ggbiplot);library(ggplot2);library(ggtree); library(ggridges)
library(l1ou)
library(Rmisc)
library(wesanderson); library(ggthemes)
library(corrplot)
source("/Users/Ian/Google.Drive/R.Analyses/Convenient Scripts/l1ou.Uncertainty.R")
source("/Users/Ian/Google.Drive/R.Analyses/Convenient Scripts/Get.Descendant.Edges.R")
source("/Users/Ian/Google.Drive/R.Analyses/Convenient Scripts/star.tree_Make_Polytomies.R")
# A list of the final sets of trees/data we're using in this study
#############################################################
## (all in "/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Final_Trees/")
# or ("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/"):
# Marsupials
# RAxML concatenated = "Marsupials.RAxML.Concat.trees"
# phylo regressed traits = "Marsupials.RAxML.Concat.PhyloResiduals.rds"
# ASTRAL w/RAxML gene trees = "Marsupials.ASTRAL.RAxML.trees"
# phylo regressed traits = "Marsupials.Astral.RAxML.PhyloResiduals.rds"
# starBEAST =
# ASTRAL w/starBEAST gene trees =
# Elapids
# RAxML concatenated = "T222_concatenated_Ingroup.trimmed.trees"
# phylo regressed traits = "Elapids.Concat.RAxML.PhyloResiduals.rds"
# ASTRAL w/RAxML gene trees = "Elapidae.RAxML.ASTRAL.bs.SCALED.100.TRIMMED.trees"
# phylo regressed traits = "Elapids.Astral.RAxML.PhyloResiduals.rds"
# starBEAST = "Elapidae.*BEAST.by25s.TRIMMED.trees"
# phylo regressed traits = "Elapids.BEAST.PhyloResiduals.rds"
# ASTRAL w/starBEAST gene trees = "Elapidae.ASTRAL.*BEAST.TRIMMED.trees"
# phylo regressed traits = "Elapids.Astral.BEAST.PhyloResiduals.rds"
# phylogenetically regressed traits (for l1ou) =
# Protea
# RAxML concatenated = "Protea.RAxML.Concat.tre"
# ASTRAL w/RAxML gene trees = "Protea.ASTRAL.RAxML.SCALED.tre"
# starBEAST = "Protea.starBEAST.trimmed.trees"
# ASTRAL w/starBEAST gene trees = "Protea.ASTRAL.starBEAST.TRIMMED.SCALED.trees"
# phylogenetically regressed traits (for l1ou) =
# Cichlids
# RAxML concatenated =
# ASTRAL w/RAxML gene trees = "Cichlids.ASTRAL.RAxML.trees"
# Just a staging area for TREEs:
#################################
trees = read.tree("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Final_Trees/Cichlids.ASTRAL.RAxML.trees") #pick out our set of posterior trees
#################################
# Quickly standardize the data to body size
#######################################################################
#### I've decided to use a phylogenetic regression instead of a standard linear regression,
##### the steps for running it for each tree are looped below
datum <- read.csv("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Cichlids.RAW.csv", header=T, row.names=1)
datum <- log(datum) # quickly log the raw values (if not already done!)
total.data <- NULL # create a frame for holding all our residuals
against <- datum$Body_Mass; names(against) <- rownames(datum) # phyl.resid needs names to determine proper order
for (b in 1:length(trees)) {
resid.data <- NULL # temporary object
#resids <- phyl.resid(trees[[b]], against, datum[,c("Brain.size", "M.avgWT")]) # regress brain size and weight against body length
resids <- phyl.resid(trees[[b]], against, datum[,2:5]) # regress brain size and weight against body length
residual.data <- resids$resid[order(rownames(resids$resid)),] # order the data to match what we regressed it against
resid.data <- cbind(resid.data, residual.data);
Body_Mass <- against; Body_Mass <- as.data.frame(Body_Mass); resid.data <- cbind(Body_Mass, resid.data);
colnames(resid.data) <- colnames(datum)[1:5]
total.data[[b]] <- resid.data
}
saveRDS(total.data, "/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Cichlids.ASTRAL.RAxML.PhyloResiduals.rds")
compare.traits <- function(data, trait1, trait2){
xaxis <- colnames(data[trait1])
yaxis <- colnames(data[trait2])
fit <- lm(data[,trait1]~data[,trait2])
(ggplot(fit$model, aes_string(x = names(fit$model)[2], y = names(fit$model)[1]))
+ geom_point(alpha=0.5, color="red")
+ geom_smooth(method="lm", color="black")
+ theme_classic()
+ labs(x=xaxis, y=yaxis,
title = paste("Adj R2 = ",signif(summary(fit)$adj.r.squared, 5),
"Intercept =",signif(fit$coef[[1]],5 ),
" Slope =",signif(fit$coef[[2]], 5),
" P =",signif(summary(fit)$coef[2,4], 5))))
}
compare.traits(datum, 1, 2)
test.cor <- cor(datum, method="pearson", use = "complete.obs")
res1 <- cor.mtest(datum, conf.level = .95)
corrplot(test.cor, method="circle", type="lower", order="alphabet",
addgrid.col=NA, tl.col="black", title="unburnt continuous",
tl.cex=0.5, p.mat = res1$p, insig = "label_sig", pch.col = "white")
# more corrplot info at: https://cran.r-project.org/web/packages/corrplot/vignettes/corrplot-intro.html
concat <- (ggplot(mdi.estimates, aes(x=MDI))
+ geom_density(fill="green")
+ scale_x_continuous(limits = c(0, 1))
+ theme_classic())
# You'll want to go through these steps each time you start again
trait.data <- read.csv("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Marsupials.residuals.data.csv")
#trait.data <- subset(trait.data, trait.data$In_analyses == "Yes")
#trait.data <- trait.data[complete.cases(trait.data),] # trim any data columns that aren't complete
trait.data <- trait.data[,c("Name_in_Duchene", "logLength", "brain_res", "weight_res")]
trait.data <- trait.data[,c("Name_in_Duchene", "logLength", "nonlogbrain_res", "nonlogweight_res")]
rownames(trait.data) <- trait.data$Name_in_Duchene
# or
trait.data <- readRDS("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Cichlids.ASTRAL.RAxML.PhyloResiduals.rds")
trait.data <- read.csv("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Cichlids.RAW.csv", header=T, row.names=1)
# Read in the trees we'll use:
astral.raxml <- read.tree ("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Final_Trees/Marsupials_ASTRAL_RAxML.trees")
raxml.concat <- read.tree ("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Final_Trees/")
starbeast <- read.nexus("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Final_Trees/")
astralbeast <- read.nexus("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Final_Trees/")
# if you've already cleaned up your trees assign them to the 'trees' object
trees = astral.raxml
# Add on the Thylacine next to Myrmecobius at a reasonable depth
depths <- seq(0.3,0.9, 0.01)
out.trees <- NULL
for (k in 1:length(trees)){
at.depth <- sample(depths, 1)
out.trees[[k]] <- bind.tip(trees[[k]], where=which(trees[[k]]$tip.label=="Myrmecobiidae_Myrmecobius_fasciatus"),
tip.label="Thylacinidae_Thylacinus_cynocephalus",
position=at.depth*trees[[k]]$edge.length[which(trees[[k]]$edge[,2]==which(trees[[k]]$tip.label=="Myrmecobiidae_Myrmecobius_fasciatus"))])
}
# then we can clean up your tree(s) to match your data; unnecessary if using 'TRIMMED' trees
###############################################################
keep <- rownames(trait.data)
all <- astral.raxml[[1]]$tip.label # change to appropriate tree
drop <- setdiff(all, keep)
new.trees <- lapply(astral.raxml, drop.tip, tip=drop) # change to appropriate tree
class(new.trees) <- "multiPhylo"
name.check(new.trees[[1]], trait.data)
trees = new.trees
# now we can get into the analyses, start with DTT in geiger
##############################################################
# either use a raw trait
trait <- trait.data$Total_L # specify your trait of interest, make sure it has names attached!
names(trait) <- rownames(trans.data) # make sure it has names attached!
test <- dtt(trees[[1]], trait, plot=T, nsim=1000)
plot(test$dtt ~ test$times)
lines(test$times, test$dtt)
# or a PCA value, which is reducing the dimensionality of multiple traits
## if you want to do a PCA value (which you likely do, follow the steps below)
pca.test <- trait.data[complete.cases(trait.data),] # make sure to remove incomplete taxa
test.data <- pca.test[,2:length(trait.data)] # designate the variables of interest (columns)
#test.data <- log(test.data) # log transform data
#ln.test[ln.test=="-Inf"] <- 0
species <- pca.test[,1] # make note of the species names
#genus <- pca.test[,7]
#parity <- pca.test[,8]
trait.pca <- prcomp(test.data) # perform your PCA
plot(trait.pca, type="l") # visualize how many axes to keep
trait.pca # determine how much influence each axis holds
summary(trait.pca) # and the cumulative value of each
#(ggbiplot(protea.pca, obs.scale=1, var.scale=1, ellipse=T, circle=T))
#loadings <- as.data.frame(protea.pca$rotation)
axes <- predict(trait.pca, newdata = test.data) # pull out each taxon's value for each PC axis
trait.data[,"PC1"] <- axes[,1] # save the PC loadings as a new variable for the DTT
(ggbiplot(trait.pca, obs.scale=1, var.scale=1, ellipse=T, circle=T))
### now you can do your DTT on PC1!
trait <- trait.data[,"PC1"] # specify your trait of interest, make sure it has names attached!
names(trait) <- rownames(axes) # make sure it has names attached!
test <- dtt(trees[[1]], trait, plot=T, nsim=1000)
plot(test$dtt ~ test$times)
lines(test$times, test$dtt)
# otherwise, just jump right in and do this analysis for all the trees!
par(new=F)
mdi.estimates <- NULL
timing.of.disparity <- NULL
for (i in 1:length(trees)) {
cat("iteration", i, "of 100", "\n") #keep track of what tree/loop# we're on
input <- dtt(trees[[i]], trait, plot=F, nsim=1000) # run the DTT analysis
# create a set of objects to hold the outputs
mdi.info <- as.data.frame(t(c(i, input$MDI)))
disparity <- as.data.frame(input$dtt)
timing <- as.data.frame(input$times)
sim.means <- as.data.frame(rowMeans(input$sim)) # keep this for plotting inside this function
sim.bounds <- as.data.frame(t(apply(input$sim, 1, CI))) # apply the CI function to each row (time interval) of the simulations
colnames(sim.bounds) <- c("simulated.upper95", "simulated.mean", "simulated.lower95")
# create an object to hold the combined timing/disparity data, and feed it into a bigger frame
disp.timing <- NULL
disp.timing <- cbind(timing, disparity, sim.bounds) # could use sim.means if you prefer (less uncertainty)
timing.of.disparity <- rbind.data.frame(timing.of.disparity, disp.timing)
# plot only the empirical trends (not the simulated)
plot(input$dtt ~ input$times,
xlim=c(0,1), ylim=c(0,2), col="red")
lines(input$times, input$dtt); par(new=T)
# plot only the simulated mean trends
plot(sim.means[,1] ~ input$times,
xlim=c(0,1), ylim=c(0,2), col="blue")
lines(input$times, sim.means[,1]); par(new=T)
mdi.estimates <- rbind(mdi.estimates, mdi.info)
}
colnames(timing.of.disparity) <- c("timing", "disparity", "simulated.upper95", "simulated.mean", "simulated.lower95")
timing.of.disparity <- timing.of.disparity[order(timing.of.disparity$timing),]
## Plot the timing of shifts as a density distribution (with mean estimate)
colnames(mdi.estimates) <- c("tree.num", "MDI")
concat <- (ggplot(mdi.estimates, aes(x=MDI))
+ geom_density(fill="green")
+ scale_x_continuous(limits = c(0, 1))
+ theme_classic())
#+ geom_vline(aes(xintercept=mean(timing, na.rm=T)),
# color="red", linetype="dashed", size=1)
#+ scale_x_reverse(limits=c(20,0))) # use this to reverse then define the limits of the x axis
#### if you want to add another method to the MDI estimates plot:
total.mdi <- read.csv("/Users/Ian/Google.Drive/ANU Herp Work/Lemmon Projects/T222_Elapidae/Elapid_macroevolution/Protea_Files/All_Methods.MDI.estimates.csv", header=T)
total.mdi[,"X"] <- NULL
mdi.estimates[,"model"] <- "starBEAST"
total.mdi <- rbind(total.mdi, mdi.estimates)
write.csv(total.mdi, file="/Users/Ian/Google.Drive/ANU Herp Work/Lemmon Projects/T222_Elapidae/Elapid_macroevolution/Protea_Files/All_Methods.MDI.estimates.csv")
total.mdi$model <- factor(total.mdi$model, levels=c("starBEAST", "raxml.concatenated", "astral.raxml", "astral.starbeast")) # this re-orders the models in the legend
(ggplot(total.mdi, aes(x = MDI, y = model, fill=model))
+ geom_density_ridges(scale=7)
+ scale_fill_manual(values=wes_palette("Zissou", 3, "discrete"))
+ theme_ridges())
#### Make a summary of the Disparity estimates within a sliding window (mean, lowerCI, upperCI)
tof <- timing.of.disparity
sim.nums <- seq(0,1,0.01)
emp.mean.estimates <- NULL; sim.mean.estimates <- NULL
for (t in sim.nums) {
time.min <- t
time.max <- t+0.1 # adjust the window width if you're getting NaN values (no observations within a time period)
timed.chunk <- subset(tof, tof$timing>time.min & tof$timing<time.max)
emp.estimates <- as.data.frame(t(CI(timed.chunk$disparity)))
#simu.estimates <- as.data.frame(t(CI(timed.chunk$simulated.mean))) # this takes the CIs from the mean of simulations
sim.estimates <- as.data.frame(t(apply(timed.chunk[,3:5], 2, mean))) # this takes the mean of the CIs from the simulations
timing <- paste(time.min, "to", time.max); colnames(timing)
emp.output.frame <- NULL; sim.output.frame <- NULL
emp.output.frame <- as.data.frame(c(timing, emp.estimates))
sim.output.frame <- as.data.frame(c(timing, sim.estimates))
colnames(emp.output.frame) <- c("time.window", "empirical.upper95", "empirical.mean", "empirical.lower95")
colnames(sim.output.frame) <- c("time.window", "simulated.upper95", "simulated.mean", "simulated.lower95")
#colnames(output.frame) <- NULL
emp.mean.estimates <- rbind(emp.mean.estimates, emp.output.frame)
sim.mean.estimates <- rbind(sim.mean.estimates, sim.output.frame)
}
emp.mean.estimates[,"timing"] <- sim.nums # add a column with the times
emp.mean.estimates <- emp.mean.estimates[-c(101),] # drop the last row which is superfluous
sim.mean.estimates[,"timing"] <- sim.nums # add a column with the times
sim.mean.estimates <- sim.mean.estimates[-c(101),] # drop the last row which is superfluous
total.estimates <- cbind(emp.mean.estimates, c(sim.mean.estimates[,2:4]))
#external.total <- NULL
#read.csv(external.total) # read in the results from other tree sets
total.estimates[,"model"] <- "starBEAST" # change this according to the trees you're using
external.total <- rbind(external.total, total.estimates)
write.csv(external.total, file="/Users/Ian/Google.Drive/ANU Herp Work/Lemmon Projects/T222_Elapidae/Elapid_macroevolution/Protea_Files/All_Methods.DTT.trends.csv")
#### Now let's plot the trend in disparity from the posterior of trees as a confidence ribbon
rib <- (ggplot(data=total.estimates)
+ geom_ribbon(aes(x=timing, ymin=empirical.lower95, ymax=empirical.upper95, fill="empirical"))
+ geom_ribbon(aes(x=timing, ymin=simulated.lower95, ymax=simulated.upper95, fill="simulated"))
+ theme_classic())
#+ geom_smooth()
#+ scale_x_reverse())
#+ geom_smooth(method="auto", aes(x=x, y=all.emp.ci.vj$y.meanCI), se=T))
#### Now combine the MDI plot and the tree into a single figure
test <- (ggtree(trees[[1]])
+ geom_tiplab(size=4))
multiplot(rib, test, rib, test, ncol=2)
mardata <- cbind(mardata, log(mardata[,2:4])) # log transform the raw measurements
colnames(mardata) <- c("Name_in_Duchene", "Brain.size", "M.avgBL", "M.avgWT",
"logBrain", "logLength", "logWeight")
trait.data
logbrain_res <- log(trait.data[,"nonlogbrain_res"])
########################################################
# Now we can start looking at shifts in size and shape
## using 'l1ou' we'll estimate morphological shifts
## then attempt to identify instances of convergence
########################################################
output.est <- estimate.uncertainty(trees, trait.data, n.iter=2, estimate.convergence=F)
output.post <- process.uncertainty(output.est, 2)
# if you get 'figure margins' error, do: par(mar=c(1,1,1,1))
plot(output.post)
test1 <- star.tree(trees[[1]])
test2 <- star.tree(trees[[2]])
testies <- c(test1, test2)
output.testie <- estimate.uncertainty(testies, trait.data, n.iter=2, estimate.convergence=F)
output.postie <- process.uncertainty(output.testie, 2)
# if you get 'figure margins' error, do: par(mar=c(1,1,1,1))
plot(output.postie)
#### Check to make sure the tips match the data labels
name.check(trees[[23]], trait.data[[23]]);
#### Adjust the data and tree to fit (order matters in l1ou!)
data <- adjust_data(trees[[16]], trait.data[[16]]) # exclude your PC1 values from above!
#### Estimate the number and position of shifts a priori
shift.fit <- estimate_shift_configuration(data$tree, data$Y, nCores=8, quietly=F, criterion="pBIC") # if you want to do only a single trait, 'data$Y[,x]'
plot(shift.fit, cex=0.8)
# if you get 'figure margins' error, do: par(mar=c(1,1,1,1))
#### Investigate convergence among the supported shift set
fit.conv <- estimate_convergent_regimes(shift.fit, nCores=8, criterion="pBIC")
plot(fit.conv, cex=0.5)
## Let's try building a loop to make sense of the shifts
########################################################
# *note, edge indices are rearranged in the 'adjust_data' function,
# so to get the proper edge index, call from data$tree
no.shifts <- NULL
shift.positions.by.tree <- list()
shift.positions.list <- NULL
l1ou.res <- NULL
# before you hit enter, make sure to change the names of the output files below!
for (i in 1:length(trees)) {
cat("iteration", i, "of", length(trees), "\n") #keep track of what tree/loop# we're on
#### Adjust the data and tree to fit (order matters in l1ou!)
data <- adjust_data(trees[[i]], trait.data[[i]]) # specify the data columns of interest
#### Estimate the number and position of shifts a priori
shift.fit <- estimate_shift_configuration(data$tree, data$Y, nCores=8, quietly=F, criterion="pBIC") # if you want to do only a single trait, 'data$Y[,x]'
l1ou.res[[i]] <- shift.fit
#plot(shift.fit, cex=0.8)
# if you get 'figure margins' error, do: par(mar=c(1,1,1,1))
#### Create a data frame to hold the tree # and the # of shifts inferred
shifts.frame <- NULL
shifts.frame <- as.data.frame(t(c(i, shift.fit$nShifts)))
no.shifts <- rbind(no.shifts, shifts.frame)
# have to pull the shift positions (edges)
shift.edges <- shift.fit$shift.configuration
# match it to tips
all.shifted.tips <- NULL
if (length(shift.edges) == 0) {
all.shifted.tips <- "no shifts"
} else for (t in 1:length(shift.edges)) {
names <- getDescendants.edges(data$tree, shift.edges[[t]])
all.shifted.tips <- append(all.shifted.tips, names)
}
shift.positions.list <- append(shift.positions.list, all.shifted.tips)
shift.positions.by.tree[[i]] <- all.shifted.tips
saveRDS(no.shifts, file="/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/l1ou_Output/Marsupials.ASTRAL.RAxML.num.shifts.RDS")
saveRDS(shift.positions.list, file="/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/l1ou_Output/Marsupials.ASTRAL.RAxML.list.shift.positions.RDS")
saveRDS(shift.positions.by.tree, file="/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/l1ou_Output/Marsupials.ASTRAL.RAxML.shift.positions.by.tree.RDS")
saveRDS(l1ou.res, file="/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/l1ou_Output/Marsupials.ASTRAL.RAxML.Results.RDS")
}
colnames(no.shifts) <- c("tree.no", "n.shifts")
# add.lengths to appropriate edges of the tree
res.counts <- table(shift.positions.list) # make a table of the shift frequencies
shifted.tips <- as.data.frame(res.counts) # turn it into a data frame
# shifted.tips <- shifted.tips[1,] # remove any labelled "no shifts"
all.node.numbers <- as.data.frame(trees[[1]]$tip.label) # get all the nodes of the tree and the numbers, the tree must match the one you want to plot!
all.node.numbers[,"tip.no"] <- rownames(all.node.numbers); colnames(all.node.numbers) <- c("tip.name", "tip.no") # make a column that shows the tip number
target.numbers <- all.node.numbers[all.node.numbers$tip.name %in% shifted.tips$shift.positions.list,] # subset all the tips, so show just the node numbers of the shifted tips
target.numbers[,2] <- as.numeric(target.numbers[,2]) # make it numeric
target.numbers <- target.numbers[order(match(target.numbers$tip.name, shifted.tips$shift.positions.list)),] # match the new frame to the output (shift) frame
target.numbers[,"shift.freq"] <- cbind(shifted.tips$Freq) # add on the frequencies of shifts
# now we need to designate the tree we want to plot, and adjust its branch lengths to match the shifts
chosen.shift <- l1ou.res[[1]] # designate which shift set you want to plot the adjustments on
tree <- chosen.shift$tree
#alt.data <- adjust_data(trees[[60]], protea.data[,c(2:6)]) # it has to be adjust to fit the 'post order'
#tree <- alt.data$tree # designate your target tree, I've put in the tree from the last 'shift.fit' object or do "tree <- trees[[10]]"
#tree <- shift.fit$tree # designate your target tree, I've put in the tree from the last 'shift.fit' object
#shift.fit$shift.configuration <- l1ou.res[[90]]$shift.configuration # bring along the shift config from the tree you want to plot!
for (i in 1:length(target.numbers[,2])){
rownames(tree$edge) <- c(1:length(tree$edge[,1])) # give the tree edge frame rownames
target.almost <- subset(tree$edge, tree$edge[,2]==target.numbers[,2][i]) # pull out the ancestor and descendant nodes of the target edge
interim.target <- subset(target.numbers, target.numbers[,2]==target.numbers[,2][i]) # subset the data frame to just the current tip of interest (descendant node)
target.edge <- as.numeric(rownames(target.almost)) # get the number of the target edge
tree$edge.length[[target.edge]] <- tree$edge.length[[target.edge]]+(0.01*interim.target[,3]) # add the desired length to the branch, per shift (here, 0.01)
}
chosen.shift$tree <- tree # set the tree in the 'shift.fit' object to our rescaled tree
#shift.fit$Y <- alt.data$Y
#plot(tree) # have a look to see if it worked
# if you get 'figure margins' error, do: par(mar=c(1,1,1,1))
plot(chosen.shift, cex=1)
#####################
## I've now saved the l1ou output as it's own object "l1ou.res", so we should be able to
### call on it to plot any tree we want without having to go back. We'll try it next time
####################
shifted.tips$method <- "ASTRAL.starBEAST"
shifted.tips$colors <- cut(shifted.tips$Freq,
breaks = c(0, 50, 100, 200),
labels = c("noise", "real", "reallyreal"))
ggplot(shifted.tips, aes(x=shift.positions.list, y=Freq)) +
geom_bar(stat="identity")
# if we want to join the shift.positions lists together, read them in and combined
load("/Users/Ian/Google.Drive/ANU Herp Work/Lemmon Projects/T222_Elapidae/Elapid_macroevolution/Protea_Files/l1ou_Outputs_and_Figures/Protea.starBEAST.list.shift.positions.RData")
sbeast.shifts <- shift.positions.list
sbeast.list <- as.data.frame(count(sbeast.shifts))
sbeast.list$method <- "starBEAST"
colnames(sbeast.list) <- c("shift.positions.list","Freq", "method")
# then we can export the document
all.shifted.tips <- rbind.data.frame(all.shifted.tips, sbeast.list)
write.csv(all.shifted.tips, file="/Users/Ian/Google.Drive/ANU Herp Work/Lemmon Projects/T222_Elapidae/Elapid_macroevolution/Protea_Files/ShiftedTips_by_Method.csv")
# and read it back in if necessary, so that we can plot it
all.shifts.list <- read.csv("/Users/Ian/Google.Drive/ANU Herp Work/Lemmon Projects/T222_Elapidae/Elapid_macroevolution/Protea_Files/ShiftedTips_by_Method.csv")
ggplot(all.shifts.list, aes(x=shift.positions.list, y=Freq, fill=method)) +
geom_bar(stat="identity", position = position_dodge(width = 0.5), width=2) +
#theme(axis.text.x=element_text(angle=45, hjust=1)) +
scale_fill_manual( values=wes_palette("Moonrise3")) +
coord_flip() +
#scale_color_ptol() +
theme_few()
###################################################################################
# One more thing we might want to show is the branch length difference among trees
## we can try to do this by plotting pairwise distances to show inherent bias
###################################################################################
# start by rescaling the trees to height 1
as <- lapply(astralbeast, rescale, "depth", 1); sb <- lapply(starbeast, rescale, "depth", 1)
class(as) <- "multiPhylo"; class(sb) <- "multiPhylo"
# now a loop to compare tree1/method1 to tree1/method2 via pairwise distances
total.dff <- NULL # make an empty object to store all distances (ntips! x ntrees)
method1 <- starbeast; method2 <- astralbeast
for (k in 1:length(starbeast)) {
dff <- NULL
in1 <- method1[[k]] # designate tree.n/method.n
in2 <- method2[[k]] # designate tree.n/method.n+1
inboth <- intersect(in1$tip.label, in2$tip.label) # check all the tips that match between trees
# Get the pairwise distance matrices
pw.in1 <- cophenetic.phylo(in1)
pw.in2 <- cophenetic.phylo(in2)
# now: compare pairwise distances from these 2 trees
# the complication is that they have different sets of taxa
# Get unique combinations of this set:
ucomb <- combn(inboth, m = 2)
# make vectors to hold results
dist_1 <- rep(NA, ncol(ucomb))
dist_2 <- rep(NA, ncol(ucomb))
dff <- data.frame(species1 = ucomb[1,], species2 = ucomb[2,] , dist_1, dist_2, stringsAsFactors=F)
# fill in the blanks....
for (ii in 1:nrow(dff)){
dff$dist_1[ii] <- pw.in1[ dff$species1[ii], dff$species2[ii] ]
dff$dist_2[ii] <- pw.in2[ dff$species1[ii], dff$species2[ii] ]
}
total.dff <- rbind.data.frame(total.dff, dff)
}
fit <- lm(dist_1 ~ dist_2, data=total.dff) # change this according to the parameter you simulated
plot.fit <- (ggplotRegression(fit))
plot.fit + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
#######################################################################################
# Interlude: the base 'plot' and 'abline' functions are alright, but we want to
## make it (1) prettier, and (2) include the information from our linear regression
### into the plot, so that we know what our results were. Use custom 'ggplotRegression'
### if you want to change the saturation use 'alpha'
ggplotRegression <- function (fit) {
require(ggplot2)
ggplot(fit$model, aes_string(x = names(fit$model)[2], y = names(fit$model)[1])) +
geom_point(alpha=0.25, color="red") + # change to 0.25 and "red" for time plots
stat_smooth(method = "lm", col = "black") + # change to "black" for time plots
labs(title = paste("Adj R2 = ",signif(summary(fit)$adj.r.squared, 5),
"Intercept =",signif(fit$coef[[1]],5 ),
" Slope =",signif(fit$coef[[2]], 5),
" P =",signif(summary(fit)$coef[2,4], 5)))
}
#######################################################################################
wd = "/Users/Ian/Google.Drive/ANU Herp Work/Lemmon Projects/T222_Elapidae/Elapid_macroevolution/Protea_Files/"
# Run a loop that writes each one to an individual file in a named folder
dir.create(np(paste(addslash(wd), "Protea.RAxML.GeneTrees", sep=""))) #create folder for the trees
for (i in 1:length(trees)){
name <- paste(wd,"/Protea.RAxML.gene.",i,".tre", sep="")
write.tree(trees[[i]], file=name)
} #you should now have a folder with 100 tree separate tree files
butt <- load(file="Protea.ASTRAL.RAxML.list.shift.positions.RData")
View(butt)
# Quickly standardize the data to body size
#######################################################################
# regress (log transformed) Tail Length against SVL, extract residuals
length.weight <- lm(logWeight ~ logLength, data=datum)
length.weight <- lm(M.avgWT ~ M.avgBL, data=datum)
#plot(length.weight)
nonlogweight_res <- resid(length.weight)
mardata <- cbind(mardata, nonlogweight_res)
# regress (log transformed) Head Length against SVL, extract residuals
length.brain <- lm(logBrain ~ logLength, data=mardata)
length.brain <- lm(Brain.size ~ M.avgBL, data=datum)
#plot(length.brain)
nonlogbrain_res <- resid(length.brain)
mardata <- cbind(mardata, nonlogbrain_res)
datum <- read.csv("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Marsupials.RAW.csv", header=T, row.names=1)
datum <- log(datum) # quickly log the raw values (if not already done!)
total.data <- NULL # create a frame for holding all our residuals
against <- datum$M.avgBL; names(against) <- rownames(datum) # phyl.resid needs names to determine proper order
for (b in 1:length(trees)) {
resid.data <- NULL # temporary object
resids <- phyl.resid(trees[[b]], datum$M.avgBL, datum[,c("Brain.size", "M.avgWT")]) # regress brain size and weight against body length
resid.data <- cbind(resid.data, datum$M.avgBL); resid.data <- cbind(resid.data, resids$resid)
colnames(resid.data) <- c("BodyLength", "BrainSize", "Weight")
total.data[[b]] <- resid.data
}
saveRDS(total.data, "/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Marsupials.RAxML.Concat.PhyloResiduals.rds")
butt <- readRDS("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Marsupials.Astral.RAxML.PhyloResiduals.rds")
trees = read.tree("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Final_Trees/Marsupials.ASTRAL.RAxML.trees") #pick out our set of posterior trees
test <- phyl.resid(trees[[1]], against, datum[,c("Brain.size", "M.avgWT")])
test
butt[[1]]
trait.data <- t(total.data[[1]]) # transpose the data (taxa as columns, traits as rows)
#rownames(morph.data) <- "TL.Trunk" #adjust this to name the trait if you'd like
#rownames(trait.data) <- c("HL.Trunk", "TL.Trunk", "HW.Trunk") #adjust this to name the trait if you'd like
res<- PhyloEM(phylo=trees[[1]],
Y_data=trait.data, # read in the trait data
process="scOU", # identify the process to analyse
#random.root=T, #
K_max=Ntip(trees[[1]]), # set a maximum limit on the number of shifts to search
check.tips.names=T, # check to make sure names/trait data match
parallel_alpha=T, # we want to parallelize the analysis
Ncores=8) # with how many cores?
independent=F) # if using multiple traits, are they independent?
plot(res, show.tip.label=T, label_cex=0.1)
| /MacroComparison.R | no_license | IanGBrennan/Multi-MacroInference | R | false | false | 32,280 | r | #####
# Script for macro-comparison of shifts in multivariate traits
## inferred from different phylogenetic methods (ML, BI, MSC, SCC)
###################################################################################
## Idea and Introduction:
# we want to test macroevolutionary analyses across trees from different methods
# and data subsets to determine how robust the results and subsequent inferences
# are. We'll initially try two things (1) Disparity through time 'DTT' and (2) l1ou
# to test for shifts in morphological traits. The input trait data will stay the
# same for all analyses, but data subsets and methods will change.
# Method 1 is 'Concatenation': We can either use RAxML or BEAST2. BEAST2 is preferrable
# because it will output a distribution of plausible ultrametric trees of varied topologies
# and branch lengths all at once. Alternatively, we can run RAxML for 100 tree
# searches, which will give us some branch length/phylogenetic variation, but we
# will need to transform the tree to make it ultrametric ('chronos', lambda = ...).
# Method 2 is 'Short-Cut Coalescent': These are methods (ASTRAL, ASTRID, MP-EST)
# that use as inputs, gene trees estimated from another program (I will use either
# RAxML or *BEAST2). These methods can be sensitive to the input gene trees, so we
# can test this by using gene trees built in RAxML and in BEAST2. If we run a
# bootstrapped ASTRAL analysis, we will get an output of 102 trees (100 bs,
# 1 greedy algorithim best fit, and 1 consensus), perfect for our analyses!
# Method 3 is 'Full Coalescent': The only proper full coalescent method that we'll
# use is implemented in *BEAST2.
library(geiger)
library(phytools)
library(ggbiplot);library(ggplot2);library(ggtree); library(ggridges)
library(l1ou)
library(Rmisc)
library(wesanderson); library(ggthemes)
library(corrplot)
source("/Users/Ian/Google.Drive/R.Analyses/Convenient Scripts/l1ou.Uncertainty.R")
source("/Users/Ian/Google.Drive/R.Analyses/Convenient Scripts/Get.Descendant.Edges.R")
source("/Users/Ian/Google.Drive/R.Analyses/Convenient Scripts/star.tree_Make_Polytomies.R")
# A list of the final sets of trees/data we're using in this study
#############################################################
## (all in "/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Final_Trees/")
# or ("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/"):
# Marsupials
# RAxML concatenated = "Marsupials.RAxML.Concat.trees"
# phylo regressed traits = "Marsupials.RAxML.Concat.PhyloResiduals.rds"
# ASTRAL w/RAxML gene trees = "Marsupials.ASTRAL.RAxML.trees"
# phylo regressed traits = "Marsupials.Astral.RAxML.PhyloResiduals.rds"
# starBEAST =
# ASTRAL w/starBEAST gene trees =
# Elapids
# RAxML concatenated = "T222_concatenated_Ingroup.trimmed.trees"
# phylo regressed traits = "Elapids.Concat.RAxML.PhyloResiduals.rds"
# ASTRAL w/RAxML gene trees = "Elapidae.RAxML.ASTRAL.bs.SCALED.100.TRIMMED.trees"
# phylo regressed traits = "Elapids.Astral.RAxML.PhyloResiduals.rds"
# starBEAST = "Elapidae.*BEAST.by25s.TRIMMED.trees"
# phylo regressed traits = "Elapids.BEAST.PhyloResiduals.rds"
# ASTRAL w/starBEAST gene trees = "Elapidae.ASTRAL.*BEAST.TRIMMED.trees"
# phylo regressed traits = "Elapids.Astral.BEAST.PhyloResiduals.rds"
# phylogenetically regressed traits (for l1ou) =
# Protea
# RAxML concatenated = "Protea.RAxML.Concat.tre"
# ASTRAL w/RAxML gene trees = "Protea.ASTRAL.RAxML.SCALED.tre"
# starBEAST = "Protea.starBEAST.trimmed.trees"
# ASTRAL w/starBEAST gene trees = "Protea.ASTRAL.starBEAST.TRIMMED.SCALED.trees"
# phylogenetically regressed traits (for l1ou) =
# Cichlids
# RAxML concatenated =
# ASTRAL w/RAxML gene trees = "Cichlids.ASTRAL.RAxML.trees"
# Just a staging area for TREEs:
#################################
trees = read.tree("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Final_Trees/Cichlids.ASTRAL.RAxML.trees") #pick out our set of posterior trees
#################################
# Quickly standardize the data to body size
#######################################################################
#### I've decided to use a phylogenetic regression instead of a standard linear regression,
##### the steps for running it for each tree are looped below
datum <- read.csv("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Cichlids.RAW.csv", header=T, row.names=1)
datum <- log(datum) # quickly log the raw values (if not already done!)
total.data <- NULL # create a frame for holding all our residuals
against <- datum$Body_Mass; names(against) <- rownames(datum) # phyl.resid needs names to determine proper order
for (b in 1:length(trees)) {
resid.data <- NULL # temporary object
#resids <- phyl.resid(trees[[b]], against, datum[,c("Brain.size", "M.avgWT")]) # regress brain size and weight against body length
resids <- phyl.resid(trees[[b]], against, datum[,2:5]) # regress brain size and weight against body length
residual.data <- resids$resid[order(rownames(resids$resid)),] # order the data to match what we regressed it against
resid.data <- cbind(resid.data, residual.data);
Body_Mass <- against; Body_Mass <- as.data.frame(Body_Mass); resid.data <- cbind(Body_Mass, resid.data);
colnames(resid.data) <- colnames(datum)[1:5]
total.data[[b]] <- resid.data
}
saveRDS(total.data, "/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Cichlids.ASTRAL.RAxML.PhyloResiduals.rds")
compare.traits <- function(data, trait1, trait2){
xaxis <- colnames(data[trait1])
yaxis <- colnames(data[trait2])
fit <- lm(data[,trait1]~data[,trait2])
(ggplot(fit$model, aes_string(x = names(fit$model)[2], y = names(fit$model)[1]))
+ geom_point(alpha=0.5, color="red")
+ geom_smooth(method="lm", color="black")
+ theme_classic()
+ labs(x=xaxis, y=yaxis,
title = paste("Adj R2 = ",signif(summary(fit)$adj.r.squared, 5),
"Intercept =",signif(fit$coef[[1]],5 ),
" Slope =",signif(fit$coef[[2]], 5),
" P =",signif(summary(fit)$coef[2,4], 5))))
}
compare.traits(datum, 1, 2)
test.cor <- cor(datum, method="pearson", use = "complete.obs")
res1 <- cor.mtest(datum, conf.level = .95)
corrplot(test.cor, method="circle", type="lower", order="alphabet",
addgrid.col=NA, tl.col="black", title="unburnt continuous",
tl.cex=0.5, p.mat = res1$p, insig = "label_sig", pch.col = "white")
# more corrplot info at: https://cran.r-project.org/web/packages/corrplot/vignettes/corrplot-intro.html
concat <- (ggplot(mdi.estimates, aes(x=MDI))
+ geom_density(fill="green")
+ scale_x_continuous(limits = c(0, 1))
+ theme_classic())
# You'll want to go through these steps each time you start again
trait.data <- read.csv("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Marsupials.residuals.data.csv")
#trait.data <- subset(trait.data, trait.data$In_analyses == "Yes")
#trait.data <- trait.data[complete.cases(trait.data),] # trim any data columns that aren't complete
trait.data <- trait.data[,c("Name_in_Duchene", "logLength", "brain_res", "weight_res")]
trait.data <- trait.data[,c("Name_in_Duchene", "logLength", "nonlogbrain_res", "nonlogweight_res")]
rownames(trait.data) <- trait.data$Name_in_Duchene
# or
trait.data <- readRDS("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Cichlids.ASTRAL.RAxML.PhyloResiduals.rds")
trait.data <- read.csv("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Cichlids.RAW.csv", header=T, row.names=1)
# Read in the trees we'll use:
astral.raxml <- read.tree ("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Final_Trees/Marsupials_ASTRAL_RAxML.trees")
raxml.concat <- read.tree ("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Final_Trees/")
starbeast <- read.nexus("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Final_Trees/")
astralbeast <- read.nexus("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Final_Trees/")
# if you've already cleaned up your trees assign them to the 'trees' object
trees = astral.raxml
# Add on the Thylacine next to Myrmecobius at a reasonable depth
depths <- seq(0.3,0.9, 0.01)
out.trees <- NULL
for (k in 1:length(trees)){
at.depth <- sample(depths, 1)
out.trees[[k]] <- bind.tip(trees[[k]], where=which(trees[[k]]$tip.label=="Myrmecobiidae_Myrmecobius_fasciatus"),
tip.label="Thylacinidae_Thylacinus_cynocephalus",
position=at.depth*trees[[k]]$edge.length[which(trees[[k]]$edge[,2]==which(trees[[k]]$tip.label=="Myrmecobiidae_Myrmecobius_fasciatus"))])
}
# then we can clean up your tree(s) to match your data; unnecessary if using 'TRIMMED' trees
###############################################################
keep <- rownames(trait.data)
all <- astral.raxml[[1]]$tip.label # change to appropriate tree
drop <- setdiff(all, keep)
new.trees <- lapply(astral.raxml, drop.tip, tip=drop) # change to appropriate tree
class(new.trees) <- "multiPhylo"
name.check(new.trees[[1]], trait.data)
trees = new.trees
# now we can get into the analyses, start with DTT in geiger
##############################################################
# either use a raw trait
trait <- trait.data$Total_L # specify your trait of interest, make sure it has names attached!
names(trait) <- rownames(trans.data) # make sure it has names attached!
test <- dtt(trees[[1]], trait, plot=T, nsim=1000)
plot(test$dtt ~ test$times)
lines(test$times, test$dtt)
# or a PCA value, which is reducing the dimensionality of multiple traits
## if you want to do a PCA value (which you likely do, follow the steps below)
pca.test <- trait.data[complete.cases(trait.data),] # make sure to remove incomplete taxa
test.data <- pca.test[,2:length(trait.data)] # designate the variables of interest (columns)
#test.data <- log(test.data) # log transform data
#ln.test[ln.test=="-Inf"] <- 0
species <- pca.test[,1] # make note of the species names
#genus <- pca.test[,7]
#parity <- pca.test[,8]
trait.pca <- prcomp(test.data) # perform your PCA
plot(trait.pca, type="l") # visualize how many axes to keep
trait.pca # determine how much influence each axis holds
summary(trait.pca) # and the cumulative value of each
#(ggbiplot(protea.pca, obs.scale=1, var.scale=1, ellipse=T, circle=T))
#loadings <- as.data.frame(protea.pca$rotation)
axes <- predict(trait.pca, newdata = test.data) # pull out each taxon's value for each PC axis
trait.data[,"PC1"] <- axes[,1] # save the PC loadings as a new variable for the DTT
(ggbiplot(trait.pca, obs.scale=1, var.scale=1, ellipse=T, circle=T))
### now you can do your DTT on PC1!
trait <- trait.data[,"PC1"] # specify your trait of interest, make sure it has names attached!
names(trait) <- rownames(axes) # make sure it has names attached!
test <- dtt(trees[[1]], trait, plot=T, nsim=1000)
plot(test$dtt ~ test$times)
lines(test$times, test$dtt)
# otherwise, just jump right in and do this analysis for all the trees!
par(new=F)
mdi.estimates <- NULL
timing.of.disparity <- NULL
for (i in 1:length(trees)) {
cat("iteration", i, "of 100", "\n") #keep track of what tree/loop# we're on
input <- dtt(trees[[i]], trait, plot=F, nsim=1000) # run the DTT analysis
# create a set of objects to hold the outputs
mdi.info <- as.data.frame(t(c(i, input$MDI)))
disparity <- as.data.frame(input$dtt)
timing <- as.data.frame(input$times)
sim.means <- as.data.frame(rowMeans(input$sim)) # keep this for plotting inside this function
sim.bounds <- as.data.frame(t(apply(input$sim, 1, CI))) # apply the CI function to each row (time interval) of the simulations
colnames(sim.bounds) <- c("simulated.upper95", "simulated.mean", "simulated.lower95")
# create an object to hold the combined timing/disparity data, and feed it into a bigger frame
disp.timing <- NULL
disp.timing <- cbind(timing, disparity, sim.bounds) # could use sim.means if you prefer (less uncertainty)
timing.of.disparity <- rbind.data.frame(timing.of.disparity, disp.timing)
# plot only the empirical trends (not the simulated)
plot(input$dtt ~ input$times,
xlim=c(0,1), ylim=c(0,2), col="red")
lines(input$times, input$dtt); par(new=T)
# plot only the simulated mean trends
plot(sim.means[,1] ~ input$times,
xlim=c(0,1), ylim=c(0,2), col="blue")
lines(input$times, sim.means[,1]); par(new=T)
mdi.estimates <- rbind(mdi.estimates, mdi.info)
}
colnames(timing.of.disparity) <- c("timing", "disparity", "simulated.upper95", "simulated.mean", "simulated.lower95")
timing.of.disparity <- timing.of.disparity[order(timing.of.disparity$timing),]
## Plot the timing of shifts as a density distribution (with mean estimate)
colnames(mdi.estimates) <- c("tree.num", "MDI")
concat <- (ggplot(mdi.estimates, aes(x=MDI))
+ geom_density(fill="green")
+ scale_x_continuous(limits = c(0, 1))
+ theme_classic())
#+ geom_vline(aes(xintercept=mean(timing, na.rm=T)),
# color="red", linetype="dashed", size=1)
#+ scale_x_reverse(limits=c(20,0))) # use this to reverse then define the limits of the x axis
#### if you want to add another method to the MDI estimates plot:
total.mdi <- read.csv("/Users/Ian/Google.Drive/ANU Herp Work/Lemmon Projects/T222_Elapidae/Elapid_macroevolution/Protea_Files/All_Methods.MDI.estimates.csv", header=T)
total.mdi[,"X"] <- NULL
mdi.estimates[,"model"] <- "starBEAST"
total.mdi <- rbind(total.mdi, mdi.estimates)
write.csv(total.mdi, file="/Users/Ian/Google.Drive/ANU Herp Work/Lemmon Projects/T222_Elapidae/Elapid_macroevolution/Protea_Files/All_Methods.MDI.estimates.csv")
total.mdi$model <- factor(total.mdi$model, levels=c("starBEAST", "raxml.concatenated", "astral.raxml", "astral.starbeast")) # this re-orders the models in the legend
(ggplot(total.mdi, aes(x = MDI, y = model, fill=model))
+ geom_density_ridges(scale=7)
+ scale_fill_manual(values=wes_palette("Zissou", 3, "discrete"))
+ theme_ridges())
#### Make a summary of the Disparity estimates within a sliding window (mean, lowerCI, upperCI)
tof <- timing.of.disparity
sim.nums <- seq(0,1,0.01)
emp.mean.estimates <- NULL; sim.mean.estimates <- NULL
for (t in sim.nums) {
time.min <- t
time.max <- t+0.1 # adjust the window width if you're getting NaN values (no observations within a time period)
timed.chunk <- subset(tof, tof$timing>time.min & tof$timing<time.max)
emp.estimates <- as.data.frame(t(CI(timed.chunk$disparity)))
#simu.estimates <- as.data.frame(t(CI(timed.chunk$simulated.mean))) # this takes the CIs from the mean of simulations
sim.estimates <- as.data.frame(t(apply(timed.chunk[,3:5], 2, mean))) # this takes the mean of the CIs from the simulations
timing <- paste(time.min, "to", time.max); colnames(timing)
emp.output.frame <- NULL; sim.output.frame <- NULL
emp.output.frame <- as.data.frame(c(timing, emp.estimates))
sim.output.frame <- as.data.frame(c(timing, sim.estimates))
colnames(emp.output.frame) <- c("time.window", "empirical.upper95", "empirical.mean", "empirical.lower95")
colnames(sim.output.frame) <- c("time.window", "simulated.upper95", "simulated.mean", "simulated.lower95")
#colnames(output.frame) <- NULL
emp.mean.estimates <- rbind(emp.mean.estimates, emp.output.frame)
sim.mean.estimates <- rbind(sim.mean.estimates, sim.output.frame)
}
emp.mean.estimates[,"timing"] <- sim.nums # add a column with the times
emp.mean.estimates <- emp.mean.estimates[-c(101),] # drop the last row which is superfluous
sim.mean.estimates[,"timing"] <- sim.nums # add a column with the times
sim.mean.estimates <- sim.mean.estimates[-c(101),] # drop the last row which is superfluous
total.estimates <- cbind(emp.mean.estimates, c(sim.mean.estimates[,2:4]))
#external.total <- NULL
#read.csv(external.total) # read in the results from other tree sets
total.estimates[,"model"] <- "starBEAST" # change this according to the trees you're using
external.total <- rbind(external.total, total.estimates)
write.csv(external.total, file="/Users/Ian/Google.Drive/ANU Herp Work/Lemmon Projects/T222_Elapidae/Elapid_macroevolution/Protea_Files/All_Methods.DTT.trends.csv")
#### Now let's plot the trend in disparity from the posterior of trees as a confidence ribbon
rib <- (ggplot(data=total.estimates)
+ geom_ribbon(aes(x=timing, ymin=empirical.lower95, ymax=empirical.upper95, fill="empirical"))
+ geom_ribbon(aes(x=timing, ymin=simulated.lower95, ymax=simulated.upper95, fill="simulated"))
+ theme_classic())
#+ geom_smooth()
#+ scale_x_reverse())
#+ geom_smooth(method="auto", aes(x=x, y=all.emp.ci.vj$y.meanCI), se=T))
#### Now combine the MDI plot and the tree into a single figure
test <- (ggtree(trees[[1]])
+ geom_tiplab(size=4))
multiplot(rib, test, rib, test, ncol=2)
mardata <- cbind(mardata, log(mardata[,2:4])) # log transform the raw measurements
colnames(mardata) <- c("Name_in_Duchene", "Brain.size", "M.avgBL", "M.avgWT",
"logBrain", "logLength", "logWeight")
trait.data
logbrain_res <- log(trait.data[,"nonlogbrain_res"])
########################################################
# Now we can start looking at shifts in size and shape
## using 'l1ou' we'll estimate morphological shifts
## then attempt to identify instances of convergence
########################################################
output.est <- estimate.uncertainty(trees, trait.data, n.iter=2, estimate.convergence=F)
output.post <- process.uncertainty(output.est, 2)
# if you get 'figure margins' error, do: par(mar=c(1,1,1,1))
plot(output.post)
test1 <- star.tree(trees[[1]])
test2 <- star.tree(trees[[2]])
testies <- c(test1, test2)
output.testie <- estimate.uncertainty(testies, trait.data, n.iter=2, estimate.convergence=F)
output.postie <- process.uncertainty(output.testie, 2)
# if you get 'figure margins' error, do: par(mar=c(1,1,1,1))
plot(output.postie)
#### Check to make sure the tips match the data labels
name.check(trees[[23]], trait.data[[23]]);
#### Adjust the data and tree to fit (order matters in l1ou!)
data <- adjust_data(trees[[16]], trait.data[[16]]) # exclude your PC1 values from above!
#### Estimate the number and position of shifts a priori
shift.fit <- estimate_shift_configuration(data$tree, data$Y, nCores=8, quietly=F, criterion="pBIC") # if you want to do only a single trait, 'data$Y[,x]'
plot(shift.fit, cex=0.8)
# if you get 'figure margins' error, do: par(mar=c(1,1,1,1))
#### Investigate convergence among the supported shift set
fit.conv <- estimate_convergent_regimes(shift.fit, nCores=8, criterion="pBIC")
plot(fit.conv, cex=0.5)
## Let's try building a loop to make sense of the shifts
########################################################
# *note, edge indices are rearranged in the 'adjust_data' function,
# so to get the proper edge index, call from data$tree
no.shifts <- NULL
shift.positions.by.tree <- list()
shift.positions.list <- NULL
l1ou.res <- NULL
# before you hit enter, make sure to change the names of the output files below!
for (i in 1:length(trees)) {
cat("iteration", i, "of", length(trees), "\n") #keep track of what tree/loop# we're on
#### Adjust the data and tree to fit (order matters in l1ou!)
data <- adjust_data(trees[[i]], trait.data[[i]]) # specify the data columns of interest
#### Estimate the number and position of shifts a priori
shift.fit <- estimate_shift_configuration(data$tree, data$Y, nCores=8, quietly=F, criterion="pBIC") # if you want to do only a single trait, 'data$Y[,x]'
l1ou.res[[i]] <- shift.fit
#plot(shift.fit, cex=0.8)
# if you get 'figure margins' error, do: par(mar=c(1,1,1,1))
#### Create a data frame to hold the tree # and the # of shifts inferred
shifts.frame <- NULL
shifts.frame <- as.data.frame(t(c(i, shift.fit$nShifts)))
no.shifts <- rbind(no.shifts, shifts.frame)
# have to pull the shift positions (edges)
shift.edges <- shift.fit$shift.configuration
# match it to tips
all.shifted.tips <- NULL
if (length(shift.edges) == 0) {
all.shifted.tips <- "no shifts"
} else for (t in 1:length(shift.edges)) {
names <- getDescendants.edges(data$tree, shift.edges[[t]])
all.shifted.tips <- append(all.shifted.tips, names)
}
shift.positions.list <- append(shift.positions.list, all.shifted.tips)
shift.positions.by.tree[[i]] <- all.shifted.tips
saveRDS(no.shifts, file="/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/l1ou_Output/Marsupials.ASTRAL.RAxML.num.shifts.RDS")
saveRDS(shift.positions.list, file="/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/l1ou_Output/Marsupials.ASTRAL.RAxML.list.shift.positions.RDS")
saveRDS(shift.positions.by.tree, file="/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/l1ou_Output/Marsupials.ASTRAL.RAxML.shift.positions.by.tree.RDS")
saveRDS(l1ou.res, file="/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/l1ou_Output/Marsupials.ASTRAL.RAxML.Results.RDS")
}
colnames(no.shifts) <- c("tree.no", "n.shifts")
# add.lengths to appropriate edges of the tree
res.counts <- table(shift.positions.list) # make a table of the shift frequencies
shifted.tips <- as.data.frame(res.counts) # turn it into a data frame
# shifted.tips <- shifted.tips[1,] # remove any labelled "no shifts"
all.node.numbers <- as.data.frame(trees[[1]]$tip.label) # get all the nodes of the tree and the numbers, the tree must match the one you want to plot!
all.node.numbers[,"tip.no"] <- rownames(all.node.numbers); colnames(all.node.numbers) <- c("tip.name", "tip.no") # make a column that shows the tip number
target.numbers <- all.node.numbers[all.node.numbers$tip.name %in% shifted.tips$shift.positions.list,] # subset all the tips, so show just the node numbers of the shifted tips
target.numbers[,2] <- as.numeric(target.numbers[,2]) # make it numeric
target.numbers <- target.numbers[order(match(target.numbers$tip.name, shifted.tips$shift.positions.list)),] # match the new frame to the output (shift) frame
target.numbers[,"shift.freq"] <- cbind(shifted.tips$Freq) # add on the frequencies of shifts
# now we need to designate the tree we want to plot, and adjust its branch lengths to match the shifts
chosen.shift <- l1ou.res[[1]] # designate which shift set you want to plot the adjustments on
tree <- chosen.shift$tree
#alt.data <- adjust_data(trees[[60]], protea.data[,c(2:6)]) # it has to be adjust to fit the 'post order'
#tree <- alt.data$tree # designate your target tree, I've put in the tree from the last 'shift.fit' object or do "tree <- trees[[10]]"
#tree <- shift.fit$tree # designate your target tree, I've put in the tree from the last 'shift.fit' object
#shift.fit$shift.configuration <- l1ou.res[[90]]$shift.configuration # bring along the shift config from the tree you want to plot!
for (i in 1:length(target.numbers[,2])){
rownames(tree$edge) <- c(1:length(tree$edge[,1])) # give the tree edge frame rownames
target.almost <- subset(tree$edge, tree$edge[,2]==target.numbers[,2][i]) # pull out the ancestor and descendant nodes of the target edge
interim.target <- subset(target.numbers, target.numbers[,2]==target.numbers[,2][i]) # subset the data frame to just the current tip of interest (descendant node)
target.edge <- as.numeric(rownames(target.almost)) # get the number of the target edge
tree$edge.length[[target.edge]] <- tree$edge.length[[target.edge]]+(0.01*interim.target[,3]) # add the desired length to the branch, per shift (here, 0.01)
}
chosen.shift$tree <- tree # set the tree in the 'shift.fit' object to our rescaled tree
#shift.fit$Y <- alt.data$Y
#plot(tree) # have a look to see if it worked
# if you get 'figure margins' error, do: par(mar=c(1,1,1,1))
plot(chosen.shift, cex=1)
#####################
## I've now saved the l1ou output as it's own object "l1ou.res", so we should be able to
### call on it to plot any tree we want without having to go back. We'll try it next time
####################
shifted.tips$method <- "ASTRAL.starBEAST"
shifted.tips$colors <- cut(shifted.tips$Freq,
breaks = c(0, 50, 100, 200),
labels = c("noise", "real", "reallyreal"))
ggplot(shifted.tips, aes(x=shift.positions.list, y=Freq)) +
geom_bar(stat="identity")
# if we want to join the shift.positions lists together, read them in and combined
load("/Users/Ian/Google.Drive/ANU Herp Work/Lemmon Projects/T222_Elapidae/Elapid_macroevolution/Protea_Files/l1ou_Outputs_and_Figures/Protea.starBEAST.list.shift.positions.RData")
sbeast.shifts <- shift.positions.list
sbeast.list <- as.data.frame(count(sbeast.shifts))
sbeast.list$method <- "starBEAST"
colnames(sbeast.list) <- c("shift.positions.list","Freq", "method")
# then we can export the document
all.shifted.tips <- rbind.data.frame(all.shifted.tips, sbeast.list)
write.csv(all.shifted.tips, file="/Users/Ian/Google.Drive/ANU Herp Work/Lemmon Projects/T222_Elapidae/Elapid_macroevolution/Protea_Files/ShiftedTips_by_Method.csv")
# and read it back in if necessary, so that we can plot it
all.shifts.list <- read.csv("/Users/Ian/Google.Drive/ANU Herp Work/Lemmon Projects/T222_Elapidae/Elapid_macroevolution/Protea_Files/ShiftedTips_by_Method.csv")
ggplot(all.shifts.list, aes(x=shift.positions.list, y=Freq, fill=method)) +
geom_bar(stat="identity", position = position_dodge(width = 0.5), width=2) +
#theme(axis.text.x=element_text(angle=45, hjust=1)) +
scale_fill_manual( values=wes_palette("Moonrise3")) +
coord_flip() +
#scale_color_ptol() +
theme_few()
###################################################################################
# One more thing we might want to show is the branch length difference among trees
## we can try to do this by plotting pairwise distances to show inherent bias
###################################################################################
# start by rescaling the trees to height 1
as <- lapply(astralbeast, rescale, "depth", 1); sb <- lapply(starbeast, rescale, "depth", 1)
class(as) <- "multiPhylo"; class(sb) <- "multiPhylo"
# now a loop to compare tree1/method1 to tree1/method2 via pairwise distances
total.dff <- NULL # make an empty object to store all distances (ntips! x ntrees)
method1 <- starbeast; method2 <- astralbeast
for (k in 1:length(starbeast)) {
dff <- NULL
in1 <- method1[[k]] # designate tree.n/method.n
in2 <- method2[[k]] # designate tree.n/method.n+1
inboth <- intersect(in1$tip.label, in2$tip.label) # check all the tips that match between trees
# Get the pairwise distance matrices
pw.in1 <- cophenetic.phylo(in1)
pw.in2 <- cophenetic.phylo(in2)
# now: compare pairwise distances from these 2 trees
# the complication is that they have different sets of taxa
# Get unique combinations of this set:
ucomb <- combn(inboth, m = 2)
# make vectors to hold results
dist_1 <- rep(NA, ncol(ucomb))
dist_2 <- rep(NA, ncol(ucomb))
dff <- data.frame(species1 = ucomb[1,], species2 = ucomb[2,] , dist_1, dist_2, stringsAsFactors=F)
# fill in the blanks....
for (ii in 1:nrow(dff)){
dff$dist_1[ii] <- pw.in1[ dff$species1[ii], dff$species2[ii] ]
dff$dist_2[ii] <- pw.in2[ dff$species1[ii], dff$species2[ii] ]
}
total.dff <- rbind.data.frame(total.dff, dff)
}
fit <- lm(dist_1 ~ dist_2, data=total.dff) # change this according to the parameter you simulated
plot.fit <- (ggplotRegression(fit))
plot.fit + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
#######################################################################################
# Interlude: the base 'plot' and 'abline' functions are alright, but we want to
## make it (1) prettier, and (2) include the information from our linear regression
### into the plot, so that we know what our results were. Use custom 'ggplotRegression'
### if you want to change the saturation use 'alpha'
ggplotRegression <- function (fit) {
require(ggplot2)
ggplot(fit$model, aes_string(x = names(fit$model)[2], y = names(fit$model)[1])) +
geom_point(alpha=0.25, color="red") + # change to 0.25 and "red" for time plots
stat_smooth(method = "lm", col = "black") + # change to "black" for time plots
labs(title = paste("Adj R2 = ",signif(summary(fit)$adj.r.squared, 5),
"Intercept =",signif(fit$coef[[1]],5 ),
" Slope =",signif(fit$coef[[2]], 5),
" P =",signif(summary(fit)$coef[2,4], 5)))
}
#######################################################################################
wd = "/Users/Ian/Google.Drive/ANU Herp Work/Lemmon Projects/T222_Elapidae/Elapid_macroevolution/Protea_Files/"
# Run a loop that writes each one to an individual file in a named folder
dir.create(np(paste(addslash(wd), "Protea.RAxML.GeneTrees", sep=""))) #create folder for the trees
for (i in 1:length(trees)){
name <- paste(wd,"/Protea.RAxML.gene.",i,".tre", sep="")
write.tree(trees[[i]], file=name)
} #you should now have a folder with 100 tree separate tree files
butt <- load(file="Protea.ASTRAL.RAxML.list.shift.positions.RData")
View(butt)
# Quickly standardize the data to body size
#######################################################################
# regress (log transformed) Tail Length against SVL, extract residuals
length.weight <- lm(logWeight ~ logLength, data=datum)
length.weight <- lm(M.avgWT ~ M.avgBL, data=datum)
#plot(length.weight)
nonlogweight_res <- resid(length.weight)
mardata <- cbind(mardata, nonlogweight_res)
# regress (log transformed) Head Length against SVL, extract residuals
length.brain <- lm(logBrain ~ logLength, data=mardata)
length.brain <- lm(Brain.size ~ M.avgBL, data=datum)
#plot(length.brain)
nonlogbrain_res <- resid(length.brain)
mardata <- cbind(mardata, nonlogbrain_res)
datum <- read.csv("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Marsupials.RAW.csv", header=T, row.names=1)
datum <- log(datum) # quickly log the raw values (if not already done!)
total.data <- NULL # create a frame for holding all our residuals
against <- datum$M.avgBL; names(against) <- rownames(datum) # phyl.resid needs names to determine proper order
for (b in 1:length(trees)) {
resid.data <- NULL # temporary object
resids <- phyl.resid(trees[[b]], datum$M.avgBL, datum[,c("Brain.size", "M.avgWT")]) # regress brain size and weight against body length
resid.data <- cbind(resid.data, datum$M.avgBL); resid.data <- cbind(resid.data, resids$resid)
colnames(resid.data) <- c("BodyLength", "BrainSize", "Weight")
total.data[[b]] <- resid.data
}
saveRDS(total.data, "/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Marsupials.RAxML.Concat.PhyloResiduals.rds")
butt <- readRDS("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Trait_Data/Marsupials.Astral.RAxML.PhyloResiduals.rds")
trees = read.tree("/Users/Ian/Google.Drive/R.Analyses/Macro_Inference/Final_Trees/Marsupials.ASTRAL.RAxML.trees") #pick out our set of posterior trees
test <- phyl.resid(trees[[1]], against, datum[,c("Brain.size", "M.avgWT")])
test
butt[[1]]
trait.data <- t(total.data[[1]]) # transpose the data (taxa as columns, traits as rows)
#rownames(morph.data) <- "TL.Trunk" #adjust this to name the trait if you'd like
#rownames(trait.data) <- c("HL.Trunk", "TL.Trunk", "HW.Trunk") #adjust this to name the trait if you'd like
res<- PhyloEM(phylo=trees[[1]],
Y_data=trait.data, # read in the trait data
process="scOU", # identify the process to analyse
#random.root=T, #
K_max=Ntip(trees[[1]]), # set a maximum limit on the number of shifts to search
check.tips.names=T, # check to make sure names/trait data match
parallel_alpha=T, # we want to parallelize the analysis
Ncores=8) # with how many cores?
independent=F) # if using multiple traits, are they independent?
plot(res, show.tip.label=T, label_cex=0.1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CreateBuildData.R
\name{CreateBuildData}
\alias{CreateBuildData}
\title{Generate Model Building Table By Combining Training Data Sets}
\usage{
CreateBuildData(parentDir, buildDataKeyFileName, outputFolderPath)
}
\arguments{
\item{parentDir}{path; path to directory with the peak attribute
tables of the training data, and the key table}
\item{buildDataKeyFileName}{file name of the csv table with the peak
attribute file name in column 1 and the mz-rt of retained peaks file in
column 2 imported above}
\item{outputFolderPath}{path to directory where build data key will be
output}
}
\value{
returns a data table in the format required to build a prediction
model
}
\description{
Training data files are provided in a table of file names including
the peak attribute tables in the first column and the corresponding tables
accepted peaks' mz and RT values in the second column. This function
reads the files and generates the table used for model building.
}
\examples{
TODO
}
| /man/CreateBuildData.Rd | no_license | ankitshah009/lcmsMetab | R | false | true | 1,058 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CreateBuildData.R
\name{CreateBuildData}
\alias{CreateBuildData}
\title{Generate Model Building Table By Combining Training Data Sets}
\usage{
CreateBuildData(parentDir, buildDataKeyFileName, outputFolderPath)
}
\arguments{
\item{parentDir}{path; path to directory with the peak attribute
tables of the training data, and the key table}
\item{buildDataKeyFileName}{file name of the csv table with the peak
attribute file name in column 1 and the mz-rt of retained peaks file in
column 2 imported above}
\item{outputFolderPath}{path to directory where build data key will be
output}
}
\value{
returns a data table in the format required to build a prediction
model
}
\description{
Training data files are provided in a table of file names including
the peak attribute tables in the first column and the corresponding tables
accepted peaks' mz and RT values in the second column. This function
reads the files and generates the table used for model building.
}
\examples{
TODO
}
|
# Define server logic required to draw a histogram ----
server <- function(input, output, session) {
## stop the R session
session$onSessionEnded(stopApp)
##
## file upload max size
options(shiny.maxRequestSize=100*1024^2)
### initial values ####
values = reactiveValues()
values$results=list("NA")
values$log="working..."
values$load="FALSE"
values$field="NA"
values$citField=values$colField=values$citSep="NA"
values$NetWords=values$NetRefs=values$ColNetRefs=matrix(NA,1,1)
values$Title="Network"
values$Histfield="NA"
values$histlog="working..."
values$kk=0
values$M=data.frame(PY=0)
values$histsearch="NA"
values$citShortlabel="NA"
values$S=list("NA")
values$GR="NA"
### LOAD MENU ####
# observe({
# volumes <- c(Home = fs::path_home(), getVolumes()())
# shinyFileSave(input, "save", roots=volumes, session=session)
# fileinfo <- parseSavePath(volumes, input$save)
# #data <- data.frame(a=c(1,2))
# if (nrow(fileinfo) > 0) {
# ext <- tolower(getFileNameExtension(fileinfo$datapath))
# #print(ext)
# switch(ext,
# xlsx={
# rio::export(values$M, file=as.character(fileinfo$datapath))
# },
# rdata={
# M=values$M
# save(M, file=as.character(fileinfo$datapath))
# })
# }
# })
output$contents <- DT::renderDT({
# input$file1 will be NULL initially. After the user selects
# and uploads a file, it will be a data frame with 'name',
# 'size', 'type', and 'datapath' columns. The 'datapath'
# column will contain the local filenames where the data can
# be found.
input$applyLoad
isolate({
inFile <- input$file1
if (!is.null(inFile) & input$load=="import") {
ext <- getFileNameExtension(inFile$datapath)
switch(
input$dbsource,
isi = {
switch(ext,
### WoS ZIP Files
zip = {
files = unzip(inFile$datapath)
D = unlist(lapply(files, function(l) {
Dpar = readFiles(l)
return(Dpar)
}))
withProgress(message = 'Conversion in progress',
value = 0, {
M <- convert2df(D,
dbsource = input$dbsource,
format = input$format)
})
},
### WoS Txt/Bib Files
{
D = readFiles(inFile$datapath)
withProgress(message = 'Conversion in progress',
value = 0, {
M <- convert2df(D,
dbsource = input$dbsource,
format = input$format)
})
})
},
scopus = {
switch(ext,
### Scopus ZIP Files
zip = {
files = unzip(inFile$datapath)
D = unlist(lapply(files, function(l) {
Dpar = readFiles(l)
return(Dpar)
}))
withProgress(message = 'Conversion in progress',
value = 0, {
M <- convert2df(D,
dbsource = input$dbsource,
format = input$format)
})
},
### WoS Txt/Bib Files
{
D = readFiles(inFile$datapath)
withProgress(message = 'Conversion in progress',
value = 0, {
M <- convert2df(D,
dbsource = input$dbsource,
format = "bibtex")
})
})
},
dimensions = {
switch(ext,
### Dimensions ZIP Files
zip = {
files = unzip(inFile$datapath)
withProgress(message = 'Conversion in progress',
value = 0, {
M <-
convert2df(files,
dbsource = input$dbsource,
format = input$format)
})
},
### Dimensions Xlsx/csv Files
xlsx = {
#D = readFiles(inFile$datapath)
withProgress(message = 'Conversion in progress',
value = 0, {
M <-
convert2df(
inFile$datapath,
dbsource = "dimensions",
format = "excel"
)
})
},
csv = {
#D = readFiles(inFile$datapath)
withProgress(message = 'Conversion in progress',
value = 0, {
M <-
convert2df(
inFile$datapath,
dbsource = "dimensions",
format = "csv"
)
})
})
}
)
} else if (!is.null(inFile) & input$load=="load") {
ext <- tolower(getFileNameExtension(inFile$datapath))
#print(ext)
switch(ext,
### excel format
xlsx={
M <- rio::import(inFile$datapath)
### M row names
### identify duplicated SRs
SR=M$SR
tab=table(SR)
tab2=table(tab)
ind=as.numeric(names(tab2))
ind=ind[which(ind>1)]
if (length(ind)>0){
for (i in ind){
indice=names(which(tab==i))
for (j in indice){
indice2=which(SR==j)
SR[indice2]=paste(SR[indice2],as.character(1:length(indice2)),sep=" ")
}
}
}
row.names(M) <- SR
},
### RData format
rdata={
load(inFile$datapath)
},
rda={
load(inFile$datapath)
},
rds={
load(inFile$datapath)
})
} else if (is.null(inFile)) {return(NULL)}
values = initial(values)
values$M <- M
values$Morig = M
values$Histfield = "NA"
values$results = list("NA")
MData = as.data.frame(apply(values$M, 2, function(x) {
substring(x, 1, 150)
}), stringsAsFactors = FALSE)
MData$DOI <-
paste0(
'<a href=\"http://doi.org/',
MData$DI,
'\" target=\"_blank\">',
MData$DI,
'</a>'
)
nome = c("DOI", names(MData)[-length(names(MData))])
MData = MData[nome]
DT::datatable(MData,escape = FALSE,rownames = FALSE, extensions = c("Buttons"),
options = list(
pageLength = 50,
dom = 'Bfrtip',
buttons = list(list(extend = 'pageLength'),
list(extend = 'print')),
lengthMenu = list(c(10, 25, 50, -1),
c('10 rows', '25 rows', '50 rows', 'Show all')),
columnDefs = list(list(
className = 'dt-center', targets = 0:(length(names(MData)) - 1)
))
),
class = 'cell-border compact stripe'
) %>%
formatStyle(
names(MData),
backgroundColor = 'white',
textAlign = 'center',
fontSize = '70%'
)
})
})
output$collection.save <- downloadHandler(
filename = function() {
paste("Bibliometrix-Export-File-", Sys.Date(), ".",input$save_file, sep="")
},
content <- function(file) {
switch(input$save_file,
xlsx={suppressWarnings(rio::export(values$M, file=file))},
RData={
M=values$M
save(M, file=file)
})
},
contentType = input$save_file
)
output$textLog <- renderUI({
#log=gsub(" Art","\\\nArt",values$log)
#log=gsub("Done! ","Done!\\\n",log)
k=dim(values$M)[1]
if (k==1){k=0}
log=paste("Number of Documents ",k)
textInput("textLog", "Conversion results",
value=log)
})
### FILTERS MENU ####
### Filters uiOutput
output$textDim <- renderUI({
dimMatrix=paste("Documents ",dim(values$M)[1]," of ",dim(values$Morig)[1])
textInput("textDim", "Number of Documents",
value=dimMatrix)
})
output$selectType <- renderUI({
artType=sort(unique(values$Morig$DT)) #artType=sort(unique(values$Morig$DT))
selectInput("selectType", "Document Type",
choices = artType,
selected = artType,
multiple = TRUE )
})
output$sliderPY <- renderUI({
sliderInput("sliderPY", "Publication Year", min = min(values$Morig$PY,na.rm=T),sep="",
max = max(values$Morig$PY,na.rm=T), value = c(min(values$Morig$PY,na.rm=T),max(values$Morig$PY,na.rm=T)))
})
output$selectSource <- renderUI({
SO=sort(unique(values$Morig$SO))
selectInput("selectSource", "Source",
choices = SO,
selected = SO,
multiple = TRUE)
})
output$sliderTC <- renderUI({
sliderInput("sliderTC", "Total Citation", min = min(values$Morig$TC, na.rm=T),
max = max(values$Morig$TC, na.rm=T), value = c(min(values$Morig$TC, na.rm=T),max(values$Morig$TC,na.rm=T)))
})
### End Filters uiOutput
output$dataFiltered <- DT::renderDT({
M=values$Morig
B=bradford(M)$table
M=subset(M, M$PY>=input$sliderPY[1] & M$PY<=input$sliderPY[2])
M=subset(M, M$TC>=input$sliderTC[1] & M$TC<=input$sliderTC[2])
M=subset(M, M$DT %in% input$selectType)
switch(input$bradfordSources,
"core"={
SO=B$SO[B$Zone %in% "Zone 1"]
},
"zone2"={
SO=B$SO[B$Zone %in% c("Zone 1", "Zone 2")]
},
"all"={SO=B$SO})
M=M[M$SO %in% SO,]
values<-initial(values)
values$M=M
Mdisp=as.data.frame(apply(values$M,2,function(x){substring(x,1,150)}),stringsAsFactors = FALSE)
if (dim(Mdisp)[1]>0){
DT::datatable(Mdisp, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Filtered_DataTable',
title = "My Title",
header = TRUE),
list(extend = 'excel',
filename = 'Filtered_DataTable',
title = "My Title",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(Mdisp))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(Mdisp), backgroundColor = 'white',textAlign = 'center', fontSize = '70%')
}else{Mdisp=data.frame(Message="Empty collection",stringsAsFactors = FALSE, row.names = " ")}
})
### DATASET MENU ####
output$MainInfo <- DT::renderDT({
res <- descriptive(values,type="tab1")
TAB<-res$TAB
values <-res$values
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 30, dom = 'Bfrtip',ordering=F,
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
#####################################
#####################################
output$Mos.Prod.Authors <- DT::renderDT({
res <- descriptive(values,type="tab3")
TAB<-res$TAB
values <-res$values
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 30, dom = 'Bfrtip',ordering=F,
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
output$Most.Cited.Papers <- DT::renderDT({
res <- descriptive(values,type="tab4")
TAB<-res$TAB
values <-res$values
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 30, dom = 'Bfrtip',ordering=F,
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
output$Most.Prod.Countries <- DT::renderDT({
res <- descriptive(values,type="tab5")
TAB<-res$TAB
values <-res$values
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 30, dom = 'Bfrtip',ordering=F,
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
output$TC.Per.Countries <- DT::renderDT({
res <- descriptive(values,type="tab6")
TAB<-res$TAB
values <-res$values
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 30, dom = 'Bfrtip',ordering=F,
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
output$Most.Rel.Sources <- DT::renderDT({
res <- descriptive(values,type="tab7")
TAB<-res$TAB
values <-res$values
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 30, dom = 'Bfrtip',ordering=F,
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
output$Most.Rel.Keywords <- DT::renderDT({
res <- descriptive(values,type="tab8")
TAB<-res$TAB
values <-res$values
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 30, dom = 'Bfrtip',ordering=F,
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
#####################################
#####################################
output$CAGR <- renderText({
Y=table(values$M$PY)
ny=dim(Y)[1]
values$GR<-round(((Y[ny]/Y[1])^(1/(ny-1))-1)*100,2)
paste("Annual Growth Rate: ",values$GR,"%",collapse="",sep="")
})
output$AnnualProdPlot <- renderPlotly({
res <- descriptive(values,type="tab2")
values <-res$values
Tab=table(values$results$Years)
## inserting missing years
YY=setdiff(seq(min(values$results$Years),max(values$results$Years)),names(Tab))
Y=data.frame(Year=as.numeric(c(names(Tab),YY)),Freq=c(as.numeric(Tab),rep(0,length(YY))))
Y=Y[order(Y$Year),]
names(Y)=c("Year","Freq")
g=ggplot2::ggplot(Y, aes(x = Y$Year, y = Y$Freq, text=paste("Year: ",Y$Year,"\nN .of Documents: ",Y$Freq))) +
geom_line(aes(group="NA")) +
geom_area(aes(group="NA"),fill = '#002F80', alpha = .5) +
labs(x = 'Year'
, y = 'Articles'
, title = "Annual Scientific Production") +
scale_x_continuous(breaks= (Y$Year[seq(1,length(Y$Year),by=2)])) +
theme(text = element_text(color = "#444444")
,panel.background = element_rect(fill = '#EFEFEF')
,panel.grid.minor = element_line(color = '#FFFFFF')
,panel.grid.major = element_line(color = '#FFFFFF')
,plot.title = element_text(size = 24)
,axis.title = element_text(size = 14, color = '#555555')
,axis.title.y = element_text(vjust = 1, angle = 0)
,axis.title.x = element_text(hjust = 0)
)
plot.ly(g)
})#, height = 500, width =900)
output$AnnualProdTable <- DT::renderDT({
TAB <- values$TAB
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Annual_Production',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Annual_Production',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Annual_Production',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$AnnualTotCitperYearPlot <- renderPlotly({
if (values$results[[1]]=="NA"){
values$results=biblioAnalysis(values$M)}
x=values$results
# Total Citation Plot
Table2=aggregate(x$TotalCitation,by=list(x$Years),length)
Table2$xx=aggregate(x$TotalCitation,by=list(x$Years),mean)$x
Table2$Annual=NA
d=date()
d=as.numeric(substring(d,nchar(d)-3,nchar(d)))
Table2$Years=d-Table2$Group.1
Table2$Annual=Table2$xx/Table2$Years
names(Table2)=c("Year","N","MeanTCperArt","MeanTCperYear","CitableYears")
## inserting missing years
YY=setdiff(seq(min(x$Years,na.rm=TRUE),max(x$Years,na.rm=TRUE)),Table2$Year)
if (length(YY>0)){
YY=data.frame(YY,0,0,0,0)
names(YY)=c("Year","N","MeanTCperArt","MeanTCperYear","CitableYears")
Table2=rbind(Table2,YY)
Table2=Table2[order(Table2$Year),]
row.names(Table2)=Table2$Year}
values$AnnualTotCitperYear=Table2
Table2$group="A"
g=ggplot(Table2, aes(x = Table2$Year, y =Table2$MeanTCperYear,text=paste("Year: ",Table2$Year,"\nAverage Citations per Year: ",round(Table2$MeanTCperYear,1)))) +
geom_line(aes(x = Table2$Year, y = Table2$MeanTCperYear, group=Table2$group)) +
geom_area(aes(x = Table2$Year, y = Table2$MeanTCperYear, group=Table2$group),fill = '#002F80', alpha = .5) +
labs(x = 'Year'
, y = 'Citations'
, title = "Average Article Citations per Year")+
scale_x_continuous(breaks= (Table2$Year[seq(1,length(Table2$Year),by=2)])) +
theme(text = element_text(color = "#444444")
,panel.background = element_rect(fill = '#EFEFEF')
,panel.grid.minor = element_line(color = '#FFFFFF')
,panel.grid.major = element_line(color = '#FFFFFF')
,plot.title = element_text(size = 24)
,axis.title = element_text(size = 14, color = '#555555')
,axis.title.y = element_text(vjust = 1, angle = 0)
,axis.title.x = element_text(hjust = 0)
)
plot.ly(g)
})
output$AnnualTotCitperYearTable <- DT::renderDT({
TAB <- values$AnnualTotCitperYear
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Annual_Total_Citation_per_Year',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Annual_Total_Citation_per_Year',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Annual_Total_Citation_per_Year',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$ThreeFielsPlot <- networkD3::renderSankeyNetwork({
input$apply3F
isolate({
fields=c(input$LeftField, input$CentralField, input$RightField)
threeFieldsPlot(values$M, fields=fields,n=c(input$LeftFieldn, input$CentralFieldn,input$RightFieldn), width=1200,height=600)
})
})
### SOURCES MENU ####
output$MostRelSourcesPlot <- renderPlotly({
res <- descriptive(values,type="tab7")
values <-res$values
values$TABSo<-values$TAB
#xx=as.data.frame(values$results$Sources)
xx<- values$TAB
if (input$MostRelSourcesK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostRelSourcesK}
#xx=xx[1:k,]
xx=subset(xx, row.names(xx) %in% row.names(xx)[1:k])
xx$Articles=as.numeric(xx$Articles)
xx$Sources=substr(xx$Sources,1,50)
g=ggplot2::ggplot(data=xx, aes(x=xx$Sources, y=xx$Articles, fill=-xx$Articles,text=paste("Source: ",xx$Sources,"\nN. of Documents: ",xx$Articles))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx$Sources))+
labs(title="Most Relevant Sources", x = "Sources")+
labs(y = "N. of Documents")+
theme_minimal()+
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostRelSourcesTable <- DT::renderDT({
TAB <- values$TABSo
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Relevant_Sources',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Relevant_Sources',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Relevant_Sources',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
output$MostRelCitSourcesPlot <- renderPlotly({
values$M=metaTagExtraction(values$M,"CR_SO")
TAB=tableTag(values$M,"CR_SO")
TAB=data.frame(Sources=names(TAB),Articles=as.numeric(TAB),stringsAsFactors = FALSE)
values$TABSoCit<-TAB
#xx=as.data.frame(values$results$Sources)
xx<- TAB
if (input$MostRelCitSourcesK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostRelCitSourcesK}
#xx=xx[1:k,]
xx=subset(xx, row.names(xx) %in% row.names(xx)[1:k])
xx$Articles=as.numeric(xx$Articles)
xx$Sources=substr(xx$Sources,1,50)
g=ggplot2::ggplot(data=xx, aes(x=xx$Sources, y=xx$Articles, fill=-xx$Articles,text=paste("Source: ",xx$Sources,"\nN. of Documents: ",xx$Articles))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx$Sources))+
labs(title="Most Cited Sources", x = "Sources")+
labs(y = "N. of Documents")+
theme_minimal()+
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostRelCitSourcesTable <- DT::renderDT({
TAB <- values$TABSoCit
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Cited_Sources',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Cited_Sources',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Cited_Sources',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$bradfordPlot <- renderPlotly({
values$bradford=bradford(values$M)
plot.ly(values$bradford$graph)
})#,height = 600)
output$bradfordTable <- DT::renderDT({
DT::datatable(values$bradford$table, rownames = FALSE,
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Bradford_Law',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Bradford_Law',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Bradford_Law',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(values$bradford$table))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(values$bradford$table), backgroundColor = 'white',textAlign = 'center')
})
output$SourceHindexPlot <- renderPlotly({
input$applyHsource
withProgress(message = 'Calculation in progress',
value = 0, {
isolate(res <- Hindex_plot(values,type="source"))
})
isolate(plot.ly(res$g))
})#, height = 500, width =900)
output$SourceHindexTable <- DT::renderDT({
DT::datatable(values$H, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Source_Impact',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Source_Impact',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Source_Impact',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(values$H))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(values$H), backgroundColor = 'white',textAlign = 'center')
})
output$soGrowthPlot <- renderPlot({
if (input$SOse=="Yes"){se=TRUE}else{se=FALSE}
if (input$cumSO=="Cum"){
cdf=TRUE
laby="Cumulate occurrences (loess smoothing)"
}else{
cdf=FALSE
laby="Annual occurrences (loess smoothing)"}
values$PYSO=sourceGrowth(values$M,input$topSO, cdf=cdf)
term=names(values$PYSO)[-1]
term=rep(term,each=dim(values$PYSO)[1])
n=dim(values$PYSO)[1]*(dim(values$PYSO)[2]-1)
freq=matrix(as.matrix(values$PYSO[,-1]),n,1)
values$SODF=data.frame(Year=rep(values$PYSO$Year,(dim(values$PYSO)[2]-1)),Source=term, Freq=freq)
g=ggplot(values$SODF)+
geom_smooth(aes(x=values$SODF$Year,y=values$SODF$Freq, group=values$SODF$Source, color=values$SODF$Source),se=se, method = "loess", formula="y ~ x")+
labs(x = 'Year'
, y = laby
, title = "Source Growth") +
#ylim(0, NA) +
scale_x_continuous(breaks= (values$PYSO$Year[seq(1,length(values$PYSO$Year),by=ceiling(length(values$PYSO$Year)/20))])) +
geom_hline(aes(yintercept=0, alpha=0.1))+
theme(text = element_text(color = "#444444"), legend.position="none"
,plot.caption = element_text(size = 9, hjust = 0.5, color = "black", face = "bold")
,panel.background = element_rect(fill = '#EFEFEF')
,panel.grid.minor = element_line(color = '#FFFFFF')
,panel.grid.major = element_line(color = '#FFFFFF')
,plot.title = element_text(size = 24)
,axis.title = element_text(size = 14, color = '#555555')
,axis.title.y = element_text(vjust = 1, angle = 90)
,axis.title.x = element_text(hjust = 0.95, angle = 0)
,axis.text.x = element_text(size=10)
)
DFsmooth=(ggplot_build(g)$data[[1]])
DFsmooth$group=factor(DFsmooth$group, labels=levels(values$SODF$Source))
maximum=sort(unique(DFsmooth$x),decreasing=TRUE)[2]
DF2=subset(DFsmooth, x == maximum)
g=g+
ggrepel::geom_text_repel(data = DF2, aes(label = DF2$group, colour = DF2$group, x =DF2$x, y = DF2$y), hjust = -.1)
suppressWarnings(plot(g))
},height = 600, width = 900)
output$soGrowthtable <- DT::renderDT({
soData=values$PYSO
DT::datatable(soData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Source_Dynamics',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Source_Dynamics',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Source_Dynamics',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(soData))-1))))) %>%
formatStyle(names(soData), backgroundColor = 'white')
#return(Data)
})
### AUTHORS MENU ####
### Authors ----
output$MostRelAuthorsPlot <- renderPlotly({
res <- descriptive(values,type="tab3")
values <-res$values
values$TABAu<-values$TAB
#xx=as.data.frame(values$results$Authors, stringsAsFactors = FALSE)
xx=values$TABAu
switch(input$AuFreqMeasure,
t={
lab="N. of Documents"
xx=xx[,1:2]
},
p={xx=xx[,1:2]
xx[,2]=as.numeric(xx[,2])/dim(values$M)[1]*100
lab="N. of Documents (in %)"
},
f={
xx=xx[,3:4]
lab="N. of Documents (Fractionalized)"
})
xx[,2]=as.numeric(xx[,2])
if (input$MostRelAuthorsK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostRelAuthorsK}
xx=xx[1:k,]
xx[,2]=round(xx[,2],1)
g=ggplot2::ggplot(data=xx, aes(x=xx[,1], y=xx[,2], fill=-xx[,2], text=paste("Author: ",xx[,1],"\n",lab,": ",xx[,2]))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx[,1]))+
labs(title="Most Relevant Authors", x = "Authors")+
labs(y = lab)+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostRelAuthorsTable <- DT::renderDT({
TAB <- values$TABAu
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Relevant_Authors',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Relevant_Authors',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Relevant_Authors',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$AuthorHindexPlot <- renderPlotly({
input$applyHauthor
withProgress(message = 'Calculation in progress',
value = 0, {
isolate(res <- Hindex_plot(values,type="author"))
})
isolate(plot.ly(res$g))
})#, height = 500, width =900)
output$AuthorHindexTable <- DT::renderDT({
DT::datatable(values$H, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Author_Impact',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Author_Impact',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Author_Impact',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(values$H))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(values$H), backgroundColor = 'white',textAlign = 'center') %>%
formatRound(names(values$H)[4], 3)
})
output$TopAuthorsProdPlot <- renderPlotly({
values$AUProdOverTime <- authorProdOverTime(values$M, k=input$TopAuthorsProdK, graph=FALSE)
plot.ly(values$AUProdOverTime$graph)
})#, height = 550, width =1100)
output$TopAuthorsProdTable <- DT::renderDT({
TAB <- values$AUProdOverTime$dfAU
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Author_Production_Over_Time',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Author_Production_Over_Time',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Author_Production_Over_Time',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%') %>%
formatRound(names(TAB)[dim(TAB)[2]], 3)
})
output$TopAuthorsProdTablePapers <- DT::renderDT({
TAB <- values$AUProdOverTime$dfPapersAU
TAB$DOI=paste0('<a href=\"http://doi.org/',TAB$DOI,'\" target=\"_blank\">',TAB$DOI,'</a>')
DT::datatable(TAB, rownames = FALSE, escape = FALSE,extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Author_Production_Over_Time_Documents',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Author_Production_Over_Time_Documents',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Author_Production_Over_Time_Documents',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%') %>%
formatRound(names(TAB)[dim(TAB)[2]], 3)
})
output$lotkaPlot <- renderPlotly({
values$lotka=lotka(biblioAnalysis(values$M))
AuProd=values$lotka$AuthorProd
AuProd$Theoretical=10^(log10(values$lotka$C)-2*log10(AuProd[,1]))
AuProd$Theoretical=AuProd$Theoretical/sum(AuProd$Theoretical)
g=ggplot2::ggplot(AuProd, aes(x = AuProd$N.Articles, y = AuProd$Freq*100, text=paste("N.Articles: ",AuProd$N.Articles,"\n% of production: ",round(AuProd$Freq*100,1)))) +
geom_line(aes(group="NA")) +
geom_area(aes(group="NA"),fill = '#002F80', alpha = .5) +
geom_line(aes(y=AuProd$Theoretical*100, group="NA"),linetype = "dashed",color="black",alpha=0.8)+
xlim(0,max(AuProd$N.Articles)+1)+
labs(x = 'Documents written'
, y = '% of Authors'
, title = "The Frequency Distribution of Scientific Productivity") +
#scale_x_continuous(breaks= (Y$Year[seq(1,length(Y$Year),by=2)])) +
theme(text = element_text(color = "#444444")
,panel.background = element_rect(fill = '#EFEFEF')
,panel.grid.minor = element_line(color = '#FFFFFF')
,panel.grid.major = element_line(color = '#FFFFFF')
,plot.title = element_text(size = 24)
,axis.title = element_text(size = 14, color = '#555555')
,axis.title.y = element_text(vjust = 1, angle = 90)
,axis.title.x = element_text(hjust = 0)
)
plot.ly(g)
})#,height = 600)
output$lotkaTable <- DT::renderDT({
names(values$lotka$AuthorProd)=c("Documents written","N. of Authors","Proportion of Authors")
DT::datatable(values$lotka$AuthorProd, rownames = FALSE,
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Lotka_Law',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Lotka_Law',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Lotka_Law',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(values$lotka$AuthorProd))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(values$lotka$AuthorProd), backgroundColor = 'white',textAlign = 'center') %>%
formatRound(names(values$lotka$AuthorProd)[3], 3)
})
### Affiliations ----
output$MostRelAffiliationsPlot <- renderPlotly({
if (input$disAff=="Y"){
res <- descriptive(values,type="tab11")
xx=as.data.frame(values$results$Affiliations, stringsAsFactors = FALSE)
}else{
res <- descriptive(values,type="tab12")
xx=values$TAB
names(xx)=c("AFF","Freq")
}
values <-res$values
values$TABAff <- values$TAB
if (input$MostRelAffiliationsK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostRelAffiliationsK}
xx=xx[1:k,]
g=ggplot2::ggplot(data=xx, aes(x=xx$AFF, y=xx$Freq, fill=-xx$Freq, text=paste("Affiliation: ",xx$AFF,"\nN. of Author in the Affiliation: ",xx$Freq))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx$AFF))+
labs(title="Most Relevant Affiliations: Frequency distribution of affiliations (of all co-authors for each document)", x = "Affiliations")+
labs(y = "N. of Author in the Affiliation")+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostRelAffiliationsTable <- DT::renderDT({
TAB <- values$TABAff
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Relevant_Affiliations',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Relevant_Affiliations',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Relevant_Affiliations',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
### Countries ----
output$MostRelCountriesPlot <- renderPlotly({
res <- descriptive(values,type="tab5")
values <-res$values
values$TABCo <- values$TAB
k=input$MostRelCountriesK
xx=values$results$CountryCollaboration[1:k,]
xx=xx[order(-(xx$SCP+xx$MCP)),]
xx1=cbind(xx[,1:2],rep("SCP",k))
names(xx1)=c("Country","Freq","Collaboration")
xx2=cbind(xx[,c(1,3)],rep("MCP",k))
names(xx2)=c("Country","Freq","Collaboration")
xx=rbind(xx2,xx1)
xx$Country=factor(xx$Country,levels=xx$Country[1:dim(xx2)[1]])
g=suppressWarnings(ggplot2::ggplot(data=xx, aes(x=xx$Country, y=xx$Freq,fill=xx$Collaboration, text=paste("Country: ",xx$Country,"\nN.of Documents: ",xx$Freq))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_x_discrete(limits = rev(levels(xx$Country)))+
scale_fill_discrete(name="Collaboration",
breaks=c("SCP","MCP"))+
labs(title = "Corresponding Author's Country", x = "Countries", y = "N. of Documents",
caption = "SCP: Single Country Publications, MCP: Multiple Country Publications")+
theme_minimal() +
theme(plot.caption = element_text(size = 9, hjust = 0.5,
color = "blue", face = "italic"))+
coord_flip())
plot.ly(g)
})#, height = 500, width =900)
output$MostRelCountriesTable <- DT::renderDT({
TAB <- values$TABCo
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Relevant_Countries_By_Corresponding_Author',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Relevant_Countries_By_Corresponding_Author',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Relevant_Countries_By_Corresponding_Author',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$countryProdPlot <- renderPlotly({
values$mapworld<-mapworld(values$M)
plot.ly(values$mapworld$g)
})#, height = 500, width =900)
output$countryProdTable <- DT::renderDT({
TAB <- values$mapworld$tab
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Country_Production',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Country_Production',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Country_Production',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$MostCitCountriesPlot <- renderPlotly({
res <- descriptive(values,type="tab6")
values <-res$values
values$TABCitCo <- values$TAB
xx=values$TAB
xx[,2]=as.numeric(xx[,2])
xx[,3]=as.numeric(xx[,3])
if (input$MostCitCountriesK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostRelAffiliationsK}
if (input$CitCountriesMeasure=="TC"){
xx=xx[1:k,c(1,2)]
laby="N. of Citations"
} else {
xx=xx[order(-xx[,3]),]
xx=xx[1:k,c(1,3)]
laby="N. of Citations per Year"
}
g=ggplot2::ggplot(data=xx, aes(x=xx[,1], y=xx[,2], fill=-xx[,2],text=paste("Country: ",xx[,1],"\n",laby,": ",xx[,2]))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx[,1]))+
labs(title="Most Cited Countries", x = "Countries")+
labs(y = laby)+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostCitCountriesTable <- DT::renderDT({
TAB <- values$TABCitCo
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Cited_Countries',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Cited_Countries',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Cited_Countries',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
### DOCUMENTS MENU ####
### Documents ----
output$MostCitDocsPlot <- renderPlotly({
res <- descriptive(values,type="tab4")
values <-res$values
values$TABGlobDoc <- values$TAB
if (input$CitDocsMeasure=="TC"){
xx=data.frame(values$results$MostCitedPapers[1],values$results$MostCitedPapers[2], stringsAsFactors = FALSE,row.names=NULL)
lab="Total Citations"} else {
xx=data.frame(values$results$MostCitedPapers[1],values$results$MostCitedPapers[3], stringsAsFactors = FALSE,row.names=NULL)
lab="Total Citations per Year"
}
if (input$MostCitDocsK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostCitDocsK}
xx=xx[1:k,]
g=ggplot2::ggplot(data=xx, aes(x=xx[,1], y=xx[,2], fill=-xx[,2], text=paste("Document: ", xx[,1],"\nGlobal Citations: ",xx[,2]))) +
geom_bar(stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx[,1]))+
labs(title="Most Cited Documents", x = "Documents")+
labs(y = lab)+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostCitDocsTable <- DT::renderDT({
TAB <- values$TABGlobDoc
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Global_Cited_Documents',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Global_Cited_Documents',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Global_Cited_Documents',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$MostLocCitDocsPlot <- renderPlotly({
withProgress(message = 'Calculation in progress',
value = 0, {
TAB <-localCitations(values$M, fast.search=TRUE, sep = input$LocCitSep)$Paper
})
xx=data.frame(Document=as.character(TAB[,1]), DOI=as.character(TAB[,2]), Year=TAB[,3], "Local Citations"=TAB[,4], "Global Citations"=TAB[,5],stringsAsFactors = FALSE)
values$TABLocDoc=xx
if (input$MostLocCitDocsK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostLocCitDocsK}
xx=xx[1:k,]
g=ggplot2::ggplot(data=xx, aes(x=xx[,1], y=xx[,4], fill=-xx[,4], text=paste("Document: ",xx[,1],"\nLocal Citations: ",xx[,4]))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx[,1]))+
labs(title="Most Local Cited Documents", x = "Documents")+
labs(y = "Local Citations")+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostLocCitDocsTable <- DT::renderDT({
TAB <- values$TABLocDoc
TAB$DOI<- paste0('<a href=\"http://doi.org/',TAB$DOI,'\" target=\"_blank\">',TAB$DOI,'</a>')
DT::datatable(TAB, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Local_Cited_Documents',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Local_Cited_Documents',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Local_Cited_Documents',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
### Cited References ----
output$MostCitRefsPlot <- renderPlotly({
CR=citations(values$M,sep=input$CitRefsSep)$Cited
TAB=data.frame(names(CR),as.numeric(CR),stringsAsFactors = FALSE)
names(TAB)=c("Cited References", "Citations")
values$TABCitRef=TAB
xx=values$TABCitRef
if (input$MostCitRefsK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostCitRefsK}
xx=xx[1:k,]
#xx[,1]=substr(xx[,1],1,50)
g=ggplot2::ggplot(data=xx, aes(x=xx[,1], y=xx[,2], fill=-xx[,2], text=paste("Reference: ",xx[,1],"\nLocal Citations: ",xx[,2]))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx[,1]), labels=substr(rev(xx[,1]),1,50))+
labs(title="Most Cited References", x = "References")+
labs(y = "Local Citations")+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostCitRefsTable <- DT::renderDT({
TAB <- values$TABCitRef
TAB$link <- trimES(gsub("[[:punct:]]" , " ",reduceRefs(TAB[,1])))
TAB$link <- paste0('<a href=\"https://scholar.google.it/scholar?hl=en&as_sdt=0%2C5&q=',TAB$link,'\" target=\"_blank\">','link','</a>')
TAB=TAB[,c(3,1,2)]
names(TAB)[1]="Google Scholar"
DT::datatable(TAB, rownames = FALSE, escape=FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Local_Cited_References',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Local_Cited_References',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Local_Cited_References',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$rpysPlot <- renderPlotly({
values$res <- rpys(values$M, sep=input$rpysSep, graph=FALSE)
#values$res <- rpys(values$M, sep=input$rpysSep, timespan=input$sliderYears ,graph=FALSE)
plot.ly(values$res$spectroscopy)
})#,height = 600, width = 900)
output$rpysTable <- DT::renderDT({
rpysData=values$res$rpysTable
DT::datatable(rpysData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'RPYS',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'RPYS',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'RPYS',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(rpysData))-1))))) %>%
formatStyle(names(rpysData), backgroundColor = 'white')
#return(Data)
})
output$crTable <- DT::renderDT({
crData=values$res$CR
crData=crData[order(-as.numeric(crData$Year),-crData$Freq),]
names(crData)=c("Year", "Reference", "Local Citations")
DT::datatable(crData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'RPYS_Documents',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'RPYS_Documents',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'RPYS_Documents',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(crData))-1))))) %>%
formatStyle(names(crData), backgroundColor = 'white')
#return(Data)
})
### Words ----
output$MostRelWordsPlot <- renderPlotly({
WR=wordlist(values$M,Field=input$MostRelWords,n=Inf,measure="identity")$v
TAB=data.frame(names(WR),as.numeric(WR),stringsAsFactors = FALSE)
names(TAB)=c("Words", "Occurrences")
values$TABWord=TAB
xx=values$TABWord
if (input$MostRelWordsN>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostRelWordsN}
xx=xx[1:k,]
switch(input$MostRelWords,
ID={lab="Keywords Plus"},
DE={lab="Auhtor's Keywords"},
TI={lab="Title's Words"},
AB={lab="Abstract's Words"})
g=ggplot2::ggplot(data=xx, aes(x=xx[,1], y=xx[,2], fill=-xx[,2], text=paste(lab,": ",xx[,1],"\nOccurrences: ",xx[,2]))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx[,1]))+
labs(title="Most Relevant Words", x = lab)+
labs(y = "Occurrences")+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostRelWordsTable <- DT::renderDT({
TAB <- values$TABWord
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$wordcloud <- wordcloud2::renderWordcloud2({
resW=wordlist(M=values$M, Field=input$summaryTerms, n=input$n_words, measure=input$measure)
W=resW$W
values$Words=resW$Words
wordcloud2::wordcloud2(W, size = input$scale, minSize = 0, gridSize = input$padding,
fontFamily = input$font, fontWeight = 'normal',
color = input$wcCol, backgroundColor = input$wcBGCol,
minRotation = 0, maxRotation = input$rotate/10, shuffle = TRUE,
rotateRatio = 0.7, shape = input$wcShape, ellipticity = input$ellipticity,
widgetsize = NULL, figPath = NULL, hoverFunction = NULL)
})
output$treemap <- renderPlot({
resW=wordlist(M=values$M, Field=input$treeTerms, n=input$treen_words, measure=input$treemeasure)
W=resW$W
values$WordsT=resW$Words
treemap::treemap(W, #Your data frame object
index=c("Terms"), #A list of your categorical variables
vSize = "Frequency", #This is your quantitative variable
type="index", #Type sets the organization and color scheme of your treemap
palette = input$treeCol, #Select your color palette from the RColorBrewer presets or make your own.
title="Word TreeMap", #Customize your title
fontsize.title = 14, #Change the font size of the title
fontsize.labels = input$treeFont
)
})
output$wordTable <- DT::renderDT({
DT::datatable(values$Words, rownames = FALSE,
options = list(pageLength = 10, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(values$Words))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(values$Words), backgroundColor = 'white',textAlign = 'center')
})
output$treeTable <- DT::renderDT({
DT::datatable(values$WordsT, rownames = FALSE,
options = list(pageLength = 10, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(values$Words))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(values$Words), backgroundColor = 'white',textAlign = 'center')
},height = 600, width = 900)
output$kwGrowthPlot <- renderPlot({
if (input$cumTerms=="Cum"){
cdf=TRUE
laby="Cumulate occurrences (loess smoothing)"
}else{
cdf=FALSE
laby="Annual occurrences (loess smoothing)"}
if (input$se=="Yes"){se=TRUE}else{se=FALSE}
switch(input$growthTerms,
ID={
KW=KeywordGrowth(values$M, Tag = "ID", sep = ";", top = input$topkw[2], cdf = cdf)
},
DE={
KW=KeywordGrowth(values$M, Tag = "DE", sep = ";", top = input$topkw[2], cdf = cdf)
},
TI={
if (!("TI_TM" %in% names(values$M))){
values$M=termExtraction(values$M,Field = "TI", verbose=FALSE)
}
KW=KeywordGrowth(values$M, Tag = "TI_TM", sep = ";", top = input$topkw[2], cdf = cdf)
},
AB={
if (!("AB_TM" %in% names(values$M))){
values$M=termExtraction(values$M,Field = "AB", verbose=FALSE)
}
KW=KeywordGrowth(values$M, Tag = "AB_TM", sep = ";", top = input$topkw[2], cdf = cdf)
}
)
values$KW=KW[,c(1,seq(input$topkw[1],input$topkw[2])+1)]
term=names(values$KW)[-1]
term=rep(term,each=dim(values$KW)[1])
n=dim(values$KW)[1]*(dim(values$KW)[2]-1)
freq=matrix(as.matrix(values$KW[,-1]),n,1)
values$DF=data.frame(Year=rep(values$KW$Year,(dim(values$KW)[2]-1)),Term=term, Freq=freq)
g=ggplot(values$DF)+
geom_smooth(aes(x=values$DF$Year,y=values$DF$Freq, group=values$DF$Term, color=values$DF$Term),se = se,method = "loess",formula ='y ~ x')+
labs(x = 'Year'
, y = laby
, title = "Word Growth") +
#ylim(0, NA) +
scale_x_continuous(breaks= (values$KW$Year[seq(1,length(values$KW$Year),by=ceiling(length(values$KW$Year)/20))])) +
geom_hline(aes(yintercept=0, alpha=0.1))+
theme(text = element_text(color = "#444444"), legend.position="none"
,plot.caption = element_text(size = 9, hjust = 0.5, color = "black", face = "bold")
,panel.background = element_rect(fill = '#EFEFEF')
,panel.grid.minor = element_line(color = '#FFFFFF')
,panel.grid.major = element_line(color = '#FFFFFF')
,plot.title = element_text(size = 24)
,axis.title = element_text(size = 14, color = '#555555')
,axis.title.y = element_text(vjust = 1, angle = 90)
,axis.title.x = element_text(hjust = 0.95, angle = 0)
,axis.text.x = element_text(size=10)
)
DFsmooth=(ggplot_build(g)$data[[1]])
DFsmooth$group=factor(DFsmooth$group, labels=levels(values$DF$Term))
maximum=sort(unique(DFsmooth$x),decreasing=TRUE)[2]
DF2=subset(DFsmooth, x == maximum)
g=g+
ggrepel::geom_text_repel(data = DF2, aes(label = DF2$group, colour = DF2$group, x =DF2$x, y = DF2$y), hjust = -.1)
suppressWarnings(plot(g))
},height = 600, width = 900)
output$kwGrowthtable <- DT::renderDT({
kwData=values$KW
DT::datatable(kwData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Word_Dynamics',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Word_Dynamics',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Word_Dynamics',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(kwData))-1))))) %>%
formatStyle(names(kwData), backgroundColor = 'white')
#return(Data)
})
#### Trend Topics ####
output$trendSliderPY <- renderUI({
sliderInput("trendSliderPY", "Timespan", min = min(values$M$PY,na.rm=T),sep="",
max = max(values$M$PY,na.rm=T), value = c(min(values$M$PY,na.rm=T),max(values$M$PY,na.rm=T)))
})
output$trendTopicsPlot <- renderPlot({
input$applyTrendTopics
isolate({
if (input$trendTerms %in% c("TI","AB")){
values$M=termExtraction(values$M, Field = input$trendTerms, stemming = input$trendStemming, verbose = FALSE)
field=paste(input$trendTerms,"_TM",sep="")
} else {field=input$trendTerms}
values$trendTopics <- fieldByYear(values$M, field = field, timespan = input$trendSliderPY, min.freq = input$trendMinFreq,
n.items = input$trendNItems, labelsize = input$trendSize, graph = FALSE)
plot(values$trendTopics$graph)
})
},height = 700)
output$trendTopicsTable <- DT::renderDT({
tpData=values$trendTopics$df_graph
DT::datatable(tpData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Trend_Topics',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Trend_Topics',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Trend_Topics',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(tpData))-1))))) %>%
formatStyle(names(tpData), backgroundColor = 'white')
#return(Data)
})
### Conceptual Structure #####
### Co-occurrences network ----
output$cocPlot <- renderVisNetwork({
input$applyCoc
#t = tempfile();pdf(file=t) #### trick to hide igraph plot
values <- isolate(cocNetwork(input,values))
#dev.off();file.remove(t) ### end of trick
isolate(values$network<-igraph2vis(g=values$cocnet$graph,curved=(input$coc.curved=="Yes"),
labelsize=input$labelsize, opacity=input$cocAlpha,type=input$layout,
shape=input$coc.shape))
isolate(values$network$VIS)
})
output$network.coc <- downloadHandler(
filename = "Co_occurrence_network.net",
content <- function(file) {
igraph::write.graph(values$cocnet$graph_pajek,file=file, format="pajek")
},
contentType = "net"
)
### save coc network image as html ####
output$networkCoc.fig <- downloadHandler(
filename = "network.html",
content <- function(con) {
savenetwork(con)
},
contentType = "html"
)
output$cocTable <- DT::renderDT({
cocData=values$cocnet$cluster_res
names(cocData)=c("Term", "Cluster", "Btw Centrality")
DT::datatable(cocData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"), filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'CoWord_Network_Analysis',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'CoWord_Network_Analysis',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'CoWord_Network_Analysis',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(cocData))-1))))) %>%
formatStyle(names(cocData), backgroundColor = 'white')
#return(Data)
})
### Correspondence Analysis ----
output$CSPlot1 <- renderPlot({
input$applyCA
values <- isolate(CAmap(input,values))
}, height = 650, width = 800)
output$CSPlot2 <- renderPlot({
if (input$method!="MDS"){
if (values$CS[[1]][1]!="NA"){
plot(values$CS$graph_documents_Contrib)
}else{
emptyPlot("Selected field is not included in your data collection")
}
}else{
emptyPlot("This plot is available only for CA or MCA analyses")
}
}, height = 650, width = 800)
output$CSPlot3 <- renderPlot({
if (input$method!="MDS"){
if (values$CS[[1]][1]!="NA"){
plot(values$CS$graph_documents_TC)
}else{
emptyPlot("Selected field is not included in your data collection")
}
}else{
emptyPlot("This plot is available only for CA or MCA analyses")
}
}, height = 650, width = 800)
output$CSPlot4 <- renderPlot({
if (values$CS[[1]][1]!="NA"){
plot(values$CS$graph_dendogram)
}else{
emptyPlot("Selected field is not included in your data collection")
}
}, height = 650, width = 1000)
output$CSTableW <- DT::renderDT({
switch(input$method,
CA={
WData=data.frame(word=row.names(values$CS$km.res$data.clust), values$CS$km.res$data.clust,
stringsAsFactors = FALSE)
names(WData)[4]="cluster"
},
MCA={
WData=data.frame(word=row.names(values$CS$km.res$data.clust), values$CS$km.res$data.clust,
stringsAsFactors = FALSE)
names(WData)[4]="cluster"
},
MDS={
WData=data.frame(word=row.names(values$CS$res), values$CS$res,
cluster=values$CS$km.res$cluster,stringsAsFactors = FALSE)
})
WData$Dim.1=round(WData$Dim.1,2)
WData$Dim.2=round(WData$Dim.2,2)
DT::datatable(WData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'CoWord_Factorial_Analysis_Words_By_Cluster',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'CoWord_Factorial_Analysis_Words_By_Cluster',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'CoWord_Factorial_Analysis_Words_By_Cluster',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(WData))-1))))) %>%
formatStyle(names(WData), backgroundColor = 'white')
#return(Data)
})
output$CSTableD <- DT::renderDT({
CSData=values$CS$docCoord
CSData=data.frame(Documents=row.names(CSData),CSData,stringsAsFactors = FALSE)
CSData$dim1=round(CSData$dim1,2)
CSData$dim2=round(CSData$dim2,2)
CSData$contrib=round(CSData$contrib,2)
DT::datatable(CSData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'CoWord_Factorial_Analysis_Articles_By_Cluster',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'CoWord_Factorial_Analysis_Articles_By_Cluster',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'CoWord_Factorial_Analysis_Articles_By_Cluster',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(CSData))-1))))) %>%
formatStyle(names(CSData), backgroundColor = 'white')
})
### Thematic Map ----
output$TMPlot <- renderPlotly({
input$applyTM
#values <- isolate(TMmap(input,values))
values$TM <- isolate(thematicMap(values$M, field=input$TMfield, n=input$TMn, minfreq=input$TMfreq, stemming=input$TMstemming, size=input$sizeTM, n.labels=input$TMn.labels, repel=FALSE))
validate(
need(values$TM$nclust > 0, "\n\nNo topics in one or more periods. Please select a different set of parameters.")
)
plot.ly(values$TM$map)
})#, height = 650, width = 800)
output$NetPlot <- renderVisNetwork({
values$networkTM<-igraph2vis(g=values$TM$net$graph,curved=(input$coc.curved=="Yes"),
labelsize=input$labelsize, opacity=input$cocAlpha,type=input$layout,
shape=input$coc.shape)
values$networkTM$VIS
})
output$TMTable <- DT::renderDT({
tmData=values$TM$words[,-4]
DT::datatable(tmData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Thematic_Map',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Thematic_Map',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Thematic_Map',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(tmData))-1))))) %>%
formatStyle(names(tmData), backgroundColor = 'white')
#return(Data)
})
### Thematic Evolution ----
output$sliders <- renderUI({
numSlices <- as.integer(input$numSlices)
v=quantile(values$M$PY, seq(0,1,by=(1/(numSlices+1))), na.rm=TRUE)
v=round(v[-c(1,length(v))],0)
lapply(1:numSlices, function(i) {
# sliderInput(inputId = paste0("Slice", i), label = paste("Cutting Year", i),
# min=1990,max=2018,value=1990)
numericInput(inputId = paste0("Slice", i), label = paste("Cutting Year", i),value=v[i],min=min(values$M$PY, na.rm = TRUE)+1,max=max(values$M$PY, na.rm = TRUE)-1, step=1)
#numericInput(inputId = paste0("Slice", i), label = paste("Cutting Year", i),value=median(values$M$PY),min=min(values$M$PY)+1,max=max(values$M$PY)-1, step=1)
})
})
output$TEPlot <- networkD3::renderSankeyNetwork({
input$applyTE
values$yearSlices <- isolate(as.numeric())
isolate(for (i in 1:as.integer(input$numSlices)){
if (length(input[[paste0("Slice", i)]])>0){values$yearSlices=c(values$yearSlices,input[[paste0("Slice", i)]])}
})
if (length(values$yearSlices)>0){
values$nexus <- isolate(thematicEvolution(values$M, field=input$TEfield, values$yearSlices, n = input$nTE, minFreq = input$fTE, size = input$sizeTE, n.labels=input$TEn.labels, repel=FALSE))
validate(
need(values$nexus$check != FALSE, "\n\nNo topics in one or more periods. Please select a different set of parameters.")
)
isolate(plotThematicEvolution(Nodes = values$nexus$Nodes,Edges = values$nexus$Edges, measure = input$TEmeasure, min.flow = input$minFlowTE))
}
})
output$TETable <- DT::renderDT({
TEData=values$nexus$Data
TEData=TEData[TEData$Inc_index>0,-c(4,8)]
names(TEData)=c("From", "To", "Words", "Weighted Inclusion Index", "Inclusion Index", "Occurrences", "Stability Index")
DT::datatable(TEData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Thematic_Evolution',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Thematic_Evolution',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Thematic_Evolution',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TEData))-1))))) %>%
formatStyle(names(TEData), backgroundColor = 'white') %>%
formatRound(names(TEData)[4], 2) %>%
formatRound(names(TEData)[5], 2) %>%
formatRound(names(TEData)[7], 2)
#return(Data)
})
output$TMPlot1 <- renderPlotly({
#input$applyTM
if (length(values$nexus$TM)>=1){
plot.ly(values$nexus$TM[[1]]$map)
} else {emptyPlot("You have selected fewer periods!")}
})#, height = 650, width = 800)
output$TMPlot2 <- renderPlotly({
#input$applyTM
if (length(values$nexus$TM)>=2){
plot.ly(values$nexus$TM[[2]]$map)
} else {emptyPlot("You have selected fewer periods!")}
})#, height = 650, width = 800)
output$TMPlot3 <- renderPlotly({
#input$applyTM
if (length(values$nexus$TM)>=3){
plot.ly(values$nexus$TM[[3]]$map)
} else {emptyPlot("You have selected fewer periods!")}
})#, height = 650, width = 800)
output$TMPlot4 <- renderPlotly({
#input$applyTM
if (length(values$nexus$TM)>=4){
plot.ly(values$nexus$TM[[4]]$map)
} else (emptyPlot("You have selected fewer periods!"))
})#, height = 650, width = 800)
output$TMPlot5 <- renderPlotly({
#input$applyTM
if (length(values$nexus$TM)>=5){
plot.ly(values$nexus$TM[[5]]$map)
} else (emptyPlot("You have selected fewer periods!"))
})#, height = 650, width = 800)
output$NetPlot1 <- renderVisNetwork({
k=1
values$network1<-igraph2vis(g=values$nexus$Net[[k]]$graph,curved=(input$coc.curved=="Yes"),
labelsize=input$labelsize, opacity=input$cocAlpha,type=input$layout,
shape=input$coc.shape)
values$network1$VIS
})
output$NetPlot2 <- renderVisNetwork({
k=2
values$network2<-igraph2vis(g=values$nexus$Net[[k]]$graph,curved=(input$coc.curved=="Yes"),
labelsize=input$labelsize, opacity=input$cocAlpha,type=input$layout,
shape=input$coc.shape)
values$network2$VIS
})
output$NetPlot3 <- renderVisNetwork({
k=3
values$network3<-igraph2vis(g=values$nexus$Net[[k]]$graph,curved=(input$coc.curved=="Yes"),
labelsize=input$labelsize, opacity=input$cocAlpha,type=input$layout,
shape=input$coc.shape)
values$network3$VIS
})
output$NetPlot4 <- renderVisNetwork({
k=4
values$network4<-igraph2vis(g=values$nexus$Net[[k]]$graph,curved=(input$coc.curved=="Yes"),
labelsize=input$labelsize, opacity=input$cocAlpha,type=input$layout,
shape=input$coc.shape)
values$network4$VIS
})
output$NetPlot5 <- renderVisNetwork({
k=5
values$network5<-igraph2vis(g=values$nexus$Net[[k]]$graph,curved=(input$coc.curved=="Yes"),
labelsize=input$labelsize, opacity=input$cocAlpha,type=input$layout,
shape=input$coc.shape)
values$network5$VIS
})
output$TMTable1 <- DT::renderDT({
tmData=values$nexus$TM[[1]]$words[,-4]
DT::datatable(tmData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Thematic_Map_Period_1',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Thematic_Map_Period_1',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Thematic_Map_Period_1',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(tmData))-1))))) %>%
formatStyle(names(tmData), backgroundColor = 'white')
#return(Data)
})
output$TMTable2 <- DT::renderDT({
tmData=values$nexus$TM[[2]]$words[,-4]
DT::datatable(tmData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Thematic_Map_Period_2',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Thematic_Map_Period_2',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Thematic_Map_Period_2',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(tmData))-1))))) %>%
formatStyle(names(tmData), backgroundColor = 'white')
#return(Data)
})
output$TMTable3 <- DT::renderDT({
tmData=values$nexus$TM[[3]]$words[,-4]
DT::datatable(tmData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Thematic_Map_Period_3',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Thematic_Map_Period_3',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Thematic_Map_Period_3',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(tmData))-1))))) %>%
formatStyle(names(tmData), backgroundColor = 'white')
#return(Data)
})
output$TMTable4 <- DT::renderDT({
tmData=values$nexus$TM[[4]]$words[,-4]
DT::datatable(tmData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Thematic_Map_Period_4',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Thematic_Map_Period_4',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Thematic_Map_Period_4',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(tmData))-1))))) %>%
formatStyle(names(tmData), backgroundColor = 'white')
#return(Data)
})
output$TMTable5 <- DT::renderDT({
tmData=values$nexus$TM[[5]]$words[,-4]
DT::datatable(tmData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Thematic_Map_Period_5',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Thematic_Map_Period_5',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Thematic_Map_Period_5',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(tmData))-1))))) %>%
formatStyle(names(tmData), backgroundColor = 'white')
#return(Data)
})
### INTELLECTUAL STRUCTURE ####
### Co-citation network ----
output$cocitPlot <- renderVisNetwork({
input$applyCocit
#t = tempfile();pdf(file=t) #### trick to hide igraph plot
values <- isolate(intellectualStructure(input,values))
#dev.off();file.remove(t) ### end of trick
isolate(values$network<-igraph2vis(g=values$cocitnet$graph,curved=(input$cocit.curved=="Yes"),
labelsize=input$citlabelsize, opacity=input$cocitAlpha,type=input$citlayout,
shape=input$cocit.shape))
isolate(values$network$VIS)
})
output$network.cocit <- downloadHandler(
filename = "Co_citation_network.net",
content <- function(file) {
igraph::write.graph(values$cocitnet$graph_pajek,file=file, format="pajek")
#rio::export(values$M, file=file)
},
contentType = "net"
)
output$cocitTable <- DT::renderDT({
cocitData=values$cocitnet$cluster_res
names(cocitData)=c("Node", "Cluster", "Btw Centrality")
DT::datatable(cocitData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'CoCitation_Network',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'CoCitation_Network',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'CoCitation_Network',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(cocitData))-1))))) %>%
formatStyle(names(cocitData), backgroundColor = 'white')
#return(Data)
})
### save coc network image as html ####
output$networkCocit.fig <- downloadHandler(
filename = "network.html",
content <- function(con) {
savenetwork(con)
},
contentType = "html"
)
### Historiograph ----
output$histPlot <- renderPlot({
## Historiograph
input$applyHist
withProgress(message = 'Calculation in progress',
value = 0, {
values <- isolate(historiograph(input,values))
})
}, height = 500, width = 900)
output$histTable <- DT::renderDT({
LCS=values$histResults$LCS
s=sort(LCS,decreasing = TRUE)[input$histNodes]
ind=which(LCS>=s)
Data=values$histResults$histData
Data=Data[ind,]
Data$DOI<- paste0('<a href=\"http://doi.org/',Data$DOI,'\" target=\"_blank\">',Data$DOI,'</a>')
DT::datatable(Data, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Historiograph_Network',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Historiograph_Network',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Historiograph_Network',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(Data))-1))))) %>%
formatStyle(names(Data), backgroundColor = 'white') %>%
formatStyle(
'GCS',
background = styleColorBar(Data$GCS, 'steelblue'),
backgroundSize = '100% 90%',
backgroundRepeat = 'no-repeat',
backgroundPosition = 'center'
) %>%
formatStyle(
'LCS',
background = styleColorBar(Data$LCS, 'steelblue'),
backgroundSize = '100% 90%',
backgroundRepeat = 'no-repeat',
backgroundPosition = 'center'
)
#return(Data)
})
### SOCIAL STRUCTURE ####
### Collaboration network ----
output$colPlot <- renderVisNetwork({
input$applyCol
#t = tempfile();pdf(file=t) #### trick to hide igraph plot
values <- isolate(socialStructure(input,values))
#dev.off();file.remove(t) ### end of trick
isolate(values$network<-igraph2vis(g=values$colnet$graph,curved=(input$soc.curved=="Yes"),
labelsize=input$collabelsize, opacity=input$colAlpha,type=input$collayout,
shape=input$col.shape, color = "blue"))
isolate(values$network$VIS)
})
output$network.col <- downloadHandler(
filename = "Collaboration_network.net",
content <- function(file) {
igraph::write.graph(values$colnet$graph_pajek,file=file, format="pajek")
#rio::export(values$M, file=file)
},
contentType = "net"
)
output$colTable <- DT::renderDT({
colData=values$colnet$cluster_res
names(colData)=c("Node", "Cluster", "Btw Centrality")
DT::datatable(colData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"), filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Collaboration_Network',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Collaboration_Network',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Collaboration_Network',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(colData))-1))))) %>%
formatStyle(names(colData), backgroundColor = 'white')
#return(Data)
})
### save coc network image as html ####
output$networkCol.fig <- downloadHandler(
filename = "network.html",
content <- function(con) {
savenetwork(con)
},
contentType = "html"
)
### WPPlot ----
output$WMPlot<- renderPlot({
input$applyWM
isolate({
values$WMmap=countrycollaboration(values$M,label=FALSE,edgesize=input$WMedgesize/2,min.edges=input$WMedges.min)
plot(values$WMmap$g)
})
#isolate(values$WMmap=countrycollaboration(values$M,label=FALSE,edgesize=input$WMedgesize/2,min.edges=input$WMedges.min))
#isolate(plot(values$WMmap$g))
},height = 750)#, width = 750
output$WMTable <- DT::renderDT({
colData=values$WMmap$tab
colData=colData[,c(1,2,9)]
names(colData)=c("From","To","Frequency")
DT::datatable(colData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"), filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'World_Collaboration_Map',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'World_Collaboration_Map',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'World_Collaboration_Map',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(colData))-1))))) %>%
formatStyle(names(colData), backgroundColor = 'white')
#return(Data)
})
### COMMON FUNCTIONS ####
getFileNameExtension <- function (fn) {
# remove a path
splitted <- strsplit(x=fn, split='/')[[1]]
# or use .Platform$file.sep in stead of '/'
fn <- splitted [length(splitted)]
ext <- ''
splitted <- strsplit(x=fn, split='\\.')[[1]]
l <-length (splitted)
if (l > 1 && sum(splitted[1:(l-1)] != '')) ext <-splitted [l]
# the extention must be the suffix of a non-empty name
ext
}
plot.ly <- function(g){
ggplotly(g, tooltip = "text") %>%
config(displaylogo = FALSE,
modeBarButtonsToRemove = c(
'sendDataToCloud',
'pan2d',
'select2d',
'lasso2d',
'toggleSpikelines',
'hoverClosestCartesian',
'hoverCompareCartesian'
))
}
emptyPlot<-function(errortext){
g=ggplot()+
theme_void() + theme(legend.position="none")+
annotate("text", x = 4, y = 25, label = errortext, size=10)
plot(g)
}
count.duplicates <- function(DF){
x <- do.call('paste', c(DF, sep = '\r'))
ox <- order(x)
rl <- rle(x[ox])
cbind(DF[ox[cumsum(rl$lengths)],,drop=FALSE],count = rl$lengths)
}
reduceRefs<- function(A){
ind=unlist(regexec("*V[0-9]", A))
A[ind>-1]=substr(A[ind>-1],1,(ind[ind>-1]-1))
ind=unlist(regexec("*DOI ", A))
A[ind>-1]=substr(A[ind>-1],1,(ind[ind>-1]-1))
return(A)
}
initial <- function(values){
values$results=list("NA")
values$log="working..."
values$load="FALSE"
values$field="NA"
values$citField=values$colField=values$citSep="NA"
values$NetWords=values$NetRefs=values$ColNetRefs=matrix(NA,1,1)
values$Title="Network"
values$Histfield="NA"
values$histlog="working..."
values$kk=0
values$histsearch="NA"
values$citShortlabel="NA"
values$S=list("NA")
values$GR="NA"
return(values)
}
### ANALYSIS FUNCTIONS ####
### Descriptive functions ----
Hindex_plot <- function(values, type){
hindex<-function(values,type){
switch(type,
author={
AU=trim(gsub(",","",names(tableTag(values$M,"AU"))))
values$H=Hindex(values$M, field = "author", elements = AU, sep = ";", years=Inf)$H
},
source={
SO=names(sort(table(values$M$SO),decreasing = TRUE))
values$H=Hindex(values$M, field = "source", elements = SO, sep = ";", years=Inf)$H
}
)
return(values)
}
values<-hindex(values, type = type)
xx=values$H
if (type=="author"){
K=input$Hkauthor
measure=input$HmeasureAuthors
title="Author Impact"
xn="Authors"
} else {
K=input$Hksource
measure=input$HmeasureSources
title="Source Impact"
xn="Sources"
}
if (K>dim(xx)[1]){
k=dim(xx)[1]
} else {k=K}
switch(measure,
h={m=2},
g={m=3},
m={m=4},
tc={m=5}
)
xx=xx[order(-xx[,m]),]
xx=xx[1:k,c(1,m)]
g=ggplot2::ggplot(data=xx, aes(x=xx[,1], y=xx[,2], fill=-xx[,2], text=paste(xn,": ",xx[,1],"\n", names(values$H)[m],": ",xx[,2]))) +
#geom_bar(stat="identity", fill="steelblue")+
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev((xx[,1])))+
labs(title=title, x = xn)+
labs(y = names(values$H)[m])+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
res<-list(values=values,g=g)
return(res)
}
descriptive <- function(values,type){
if (values$results[[1]]=="NA"){
values$results=biblioAnalysis(values$M)}
if (values$S[[1]][1]=="NA"){
values$S=summary(values$results,k=Inf,verbose=FALSE)}
switch(type,
"tab1"={
#TAB=data.frame(Information=gsub("[[:digit:]]", "", S$MainInformation), Data=gsub("[^0-9]", "", S$MainInformation)) #this is better
TAB=data.frame(values$S$MainInformationDF)
# cat(S$MainInformation)
},
"tab2"={
TAB=values$S$AnnualProduction
names(TAB)=c("Year","Articles")
#print(S$AnnualProduction)
#cat("\n\n")
#cat("Annual Growth Rate ",round(S$AnnualGrowthRate, digits=2),"%")
},
"tab3"={
TAB=values$S$MostProdAuthors
names(TAB)=c("Authors","Articles","Authors-Frac","Articles Fractionalized")
#print(S$MostProdAuthors)
},
"tab4"={
TAB=values$S$MostCitedPapers
names(TAB)=c("Paper", "Total Citations","TC per Year")
#print(S$MostCitedPapers)
},
"tab5"={
TAB=values$S$MostProdCountries
#print(S$MostProdCountries)
},
"tab6"={
TAB=values$S$TCperCountries
#print(S$TCperCountries)
},
"tab7"={
TAB=values$S$MostRelSources
#print(S$MostRelSources)
},
"tab8"={
TAB=values$S$MostRelKeywords
#print(S$MostRelSources)
},
"tab10"={
TAB<-mapworld(values$M)$tab
},
"tab11"={
TAB=as.data.frame(values$results$Affiliations,stringsAsFactors = FALSE)
names(TAB)=c("Affiliations", "Articles")
},
"tab12"={
TAB=tableTag(values$M,"C1")
TAB=data.frame(Affiliations=names(TAB), Articles=as.numeric(TAB),stringsAsFactors = FALSE)
TAB=TAB[nchar(TAB[,1])>4,]
#names(TAB)=c("Affiliations", "Articles")
}
)
values$TAB=TAB
res=list(values=values,TAB=TAB)
return(res)
}
wordlist <- function(M, Field, n, measure){
switch(Field,
ID={v=tableTag(values$M,"ID")},
DE={v=tableTag(values$M,"DE")},
TI={
if (!("TI_TM" %in% names(M))){
v=tableTag(M,"TI")
}},
AB={if (!("AB_TM" %in% names(M))){
v=tableTag(M,"AB")
}}
)
names(v)=tolower(names(v))
#v=tableTag(values$M,"ID")
n=min(c(n,length(v)))
Words=data.frame(Terms=names(v)[1:n], Frequency=(as.numeric(v)[1:n]))
W=Words
switch(measure,
identity={},
sqrt={W$Frequency=sqrt(W$Frequency)},
log={W$Frequency=log(W$Frequency+1)},
log10={W$Frequency=log10(W$Frequency+1)}
)
results=list(v=v,W=W, Words=Words)
return(results)
}
mapworld <- function(M){
if (!("AU_CO" %in% names(M))){M=metaTagExtraction(M,"AU_CO")}
CO=as.data.frame(tableTag(M,"AU_CO"),stringsAsFactors = FALSE)
CO$Tab=gsub("UNITED KINGDOM","UK",CO$Tab)
CO$Tab=gsub("KOREA","SOUTH KOREA",CO$Tab)
map.world <- map_data("world")
map.world$region=toupper(map.world$region)
dplyr::anti_join(CO, map.world, by = c('Tab' = 'region'))
country.prod <- dplyr::left_join( map.world, CO, by = c('region' = 'Tab'))
tab=data.frame(country.prod %>%
dplyr::group_by(region) %>%
dplyr::summarise(Freq=mean(Freq)))
tab=tab[!is.na(tab$Freq),]
tab=tab[order(-tab$Freq),]
breaks=as.numeric(round(quantile(CO$Freq,c(0.2,0.4,0.6,0.8,1))))
names(breaks)=breaks
breaks=log(breaks)
g= ggplot(country.prod, aes( x = long, y = lat, group=group, text=paste("Country: ",country.prod$region,"\nN.of Documents: ",country.prod$Freq))) +
geom_polygon(aes(fill = log(Freq), group=group) )+#, col = "white") +
scale_fill_continuous(low='dodgerblue', high='dodgerblue4',breaks=breaks)+
guides(fill = guide_legend(reverse = T)) +
#geom_text(data=centroids, aes(label = centroids$Tab, x = centroids$long, y = centroids$lat, group=centroids$Tab)) +
labs(fill = 'N.Documents'
,title = 'Country Scientific Colobration'
,x = NULL
,y = NULL) +
theme(text = element_text(color = '#333333') #'#333333'
,plot.title = element_text(size = 28)
,plot.subtitle = element_text(size = 14)
,axis.ticks = element_blank()
,axis.text = element_blank()
,panel.grid = element_blank()
,panel.background = element_rect(fill = '#FFFFFF') #'#333333'
,plot.background = element_rect(fill = '#FFFFFF')
,legend.position = c(.18,.36)
,legend.background = element_blank()
,legend.key = element_blank()
)
results=list(g=g,tab=tab)
return(results)
}
### Structure fuctions ----
CAmap <- function(input, values){
if ((input$CSfield %in% names(values$M))){
tab=tableTag(values$M,input$CSfield)
if (length(tab>=2)){
minDegree=as.numeric(tab[input$CSn])
values$CS <- conceptualStructure(values$M, method=input$method , field=input$CSfield, minDegree=minDegree, clust=input$nClustersCS, k.max = 8, stemming=F, labelsize=input$CSlabelsize,documents=input$CSdoc,graph=FALSE)
plot(values$CS$graph_terms)
}else{emptyPlot("Selected field is not included in your data collection")
values$CS=list("NA")}
}else{
emptyPlot("Selected field is not included in your data collection")
values$CS=list("NA")
}
}
historiograph <- function(input,values){
if (input$histsearch=="FAST"){
min.cit=quantile(values$M$TC,0.75, na.rm = TRUE)
}else{min.cit=1}
if (values$Histfield=="NA" | values$histsearch!=input$histsearch){
values$histResults <- histNetwork(values$M, min.citations=min.cit, sep = ";")
values$Histfield="done"
values$histsearch=input$histsearch
}
values$histlog<- capture.output(values$histPlot <- histPlot(values$histResults, n=input$histNodes, size =input$histsize, labelsize = input$histlabelsize))
return(values)
}
### Network functions ----
cocNetwork <- function(input,values){
n = input$Nodes
label.n = input$Labels
if ((input$field %in% names(values$M))){
if ((dim(values$NetWords)[1])==1 | !(input$field==values$field)){
values$field=input$field
switch(input$field,
ID={
values$NetWords <- biblioNetwork(values$M, analysis = "co-occurrences", network = "keywords", sep = ";")
values$Title= "Keywords Plus Network"
},
DE={
values$NetWords <- biblioNetwork(values$M, analysis = "co-occurrences", network = "author_keywords", sep = ";")
values$Title= "Authors' Keywords network"
},
TI={
if(!("TI_TM" %in% names(values$M))){values$M=termExtraction(values$M,Field="TI",verbose=FALSE)}
values$NetWords <- biblioNetwork(values$M, analysis = "co-occurrences", network = "titles", sep = ";")
values$Title= "Title Words network"
},
AB={
if(!("AB_TM" %in% names(values$M))){values$M=termExtraction(values$M,Field="AB",verbose=FALSE)}
values$NetWords <- biblioNetwork(values$M, analysis = "co-occurrences", network = "abstracts", sep = ";")
values$Title= "Abstract Words network"
})
}
if (n>dim(values$NetWords)[1]){n=dim(values$NetWords)[1]}
if (label.n>n){label.n=n}
if (input$normalize=="none"){normalize=NULL}else{normalize=input$normalize}
if (input$label.cex=="Yes"){label.cex=TRUE}else{label.cex=FALSE}
if (input$coc.curved=="Yes"){curved=TRUE}else{curved=FALSE}
#par(bg="grey92", mar=c(0,0,0,0))
values$cocnet=networkPlot(values$NetWords, normalize=normalize,n = n, Title = values$Title, type = input$layout,
size.cex=TRUE, size=5 , remove.multiple=F, edgesize = input$edgesize*3, labelsize=input$labelsize,label.cex=label.cex,
label.n=label.n,edges.min=input$edges.min,label.color = F, curved=curved,alpha=input$cocAlpha,
cluster=input$cocCluster, remove.isolates = (input$coc.isolates=="yes"), verbose = FALSE)
if (input$cocyears=="Yes"){
Y=fieldByYear(values$M, field = input$field, graph=FALSE)
g=values$cocnet$graph
label=igraph::V(g)$name
ind=which(tolower(Y$df$item) %in% label)
df=Y$df[ind,]
#bluefunc <- colorRampPalette(c("lightblue", "darkblue"))
#col=bluefunc((diff(range(df$year))+1)*10)
col=heat.colors((diff(range(df$year))+1)*10)
igraph::V(g)$color=col[(max(df$year)-df$year+1)*10]
igraph::V(g)$year=df$year
values$cocnet$graph=g
}
}else{
emptyPlot("Selected field is not included in your data collection")
}
return(values)
}
intellectualStructure <- function(input,values){
n = input$citNodes
label.n = input$citLabels
if ((dim(values$NetRefs)[1])==1 | !(input$citField==values$citField) | !(input$citSep==values$citSep) | !(input$citShortlabel==values$citShortlabel)){
values$citField=input$citField
values$citSep=input$citSep
if (input$citShortlabel=="Yes"){shortlabel=TRUE}else{shortlabel=FALSE}
values$citShortlabel=input$citShortlabel
switch(input$citField,
CR={
values$NetRefs <- biblioNetwork(values$M, analysis = "co-citation", network = "references", sep = input$citSep, shortlabel=shortlabel)
values$Title= "Cited References network"
},
CR_AU={
if(!("CR_AU" %in% names(values$M))){values$M=metaTagExtraction(values$M,Field="CR_AU", sep = input$citSep)}
values$NetRefs <- biblioNetwork(values$M, analysis = "co-citation", network = "authors", sep = input$citSep)
values$Title= "Cited Authors network"
},
CR_SO={
if(!("CR_SO" %in% names(values$M))){values$M=metaTagExtraction(values$M,Field="CR_SO", sep = input$citSep)}
values$NetRefs <- biblioNetwork(values$M, analysis = "co-citation", network = "sources", sep = input$citSep)
values$Title= "Cited Sources network"
})
}
if (n>dim(values$NetRefs)[1]){n=dim(values$NetRefs)[1]}
if (label.n>n){label.n=n}
if (input$citlabel.cex=="Yes"){label.cex=TRUE}else{label.cex=FALSE}
if (input$cocit.curved=="Yes"){curved=TRUE}else{curved=FALSE}
values$cocitnet=networkPlot(values$NetRefs, normalize=NULL, n = n, Title = values$Title, type = input$citlayout,
size.cex=TRUE, size=5 , remove.multiple=F, edgesize = input$citedgesize*3,
labelsize=input$citlabelsize,label.cex=label.cex, curved=curved,
label.n=label.n,edges.min=input$citedges.min,label.color = F,remove.isolates = (input$cit.isolates=="yes"),
alpha=input$cocitAlpha, cluster=input$cocitCluster, verbose = FALSE)
return(values)
}
socialStructure<-function(input,values){
n = input$colNodes
label.n = input$colLabels
if ((dim(values$ColNetRefs)[1])==1 | !(input$colField==values$colField)){
values$colField=input$colField
values$cluster="walktrap"
switch(input$colField,
COL_AU={
values$ColNetRefs <- biblioNetwork(values$M, analysis = "collaboration", network = "authors", sep = ";")
values$Title= "Author Collaboration network"
},
COL_UN={
if(!("AU_UN" %in% names(values$M))){values$M=metaTagExtraction(values$M,Field="AU_UN", sep=";")}
values$ColNetRefs <- biblioNetwork(values$M, analysis = "collaboration", network = "universities", sep = ";")
values$Title= "Edu Collaboration network"
},
COL_CO={
if(!("AU_CO" %in% names(values$M))){values$M=metaTagExtraction(values$M,Field="AU_CO", sep=";")}
values$ColNetRefs <- biblioNetwork(values$M, analysis = "collaboration", network = "countries", sep = ";")
values$Title= "Country Collaboration network"
#values$cluster="none"
})
}
if (n>dim(values$ColNetRefs)[1]){n=dim(values$ColNetRefs)[1]}
if (label.n>n){label.n=n}
if (input$colnormalize=="none"){normalize=NULL}else{normalize=input$colnormalize}
if (input$collabel.cex=="Yes"){label.cex=TRUE}else{label.cex=FALSE}
if (input$soc.curved=="Yes"){curved=TRUE}else{curved=FALSE}
type=input$collayout
if (input$collayout=="worldmap"){type="auto"}
values$colnet=networkPlot(values$ColNetRefs, normalize=normalize, n = n, Title = values$Title, type = type,
size.cex=TRUE, size=5 , remove.multiple=F, edgesize = input$coledgesize*3,
labelsize=input$collabelsize,label.cex=label.cex, curved=curved,
label.n=label.n,edges.min=input$coledges.min,label.color = F,alpha=input$colAlpha,
remove.isolates = (input$col.isolates=="yes"), cluster=input$colCluster, verbose = FALSE)
return(values)
}
countrycollaboration <- function(M,label,edgesize,min.edges){
M=metaTagExtraction(M,"AU_CO")
net=biblioNetwork(M,analysis="collaboration",network="countries")
CO=data.frame(Tab=rownames(net),Freq=diag(net),stringsAsFactors = FALSE)
bsk.network=igraph::graph_from_adjacency_matrix(net,mode="undirected")
COedges=as.data.frame(igraph::ends(bsk.network,igraph::E(bsk.network),names=TRUE),stringsAsFactors = FALSE)
map.world <- map_data("world")
map.world$region=toupper(map.world$region)
map.world$region=gsub("UK","UNITED KINGDOM",map.world$region)
map.world$region=gsub("SOUTH KOREA","KOREA",map.world$region)
country.prod <- dplyr::left_join( map.world, CO, by = c('region' = 'Tab'))
breaks=as.numeric(round(quantile(CO$Freq,c(0.2,0.4,0.6,0.8,1))))
names(breaks)=breaks
breaks=log(breaks)
data("countries",envir=environment())
names(countries)[1]="Tab"
COedges=dplyr::inner_join(COedges,countries, by=c('V1'='Tab'))
COedges=dplyr::inner_join(COedges,countries, by=c('V2'='Tab'))
COedges=COedges[COedges$V1!=COedges$V2,]
COedges=count.duplicates(COedges)
tab=COedges
COedges=COedges[COedges$count>=min.edges,]
g=ggplot(country.prod, aes( x = country.prod$long, y = country.prod$lat, group = country.prod$group )) +
geom_polygon(aes(fill = log(Freq))) +
scale_fill_continuous(low='dodgerblue', high='dodgerblue4',breaks=breaks)+
#guides(fill = guide_legend(reverse = T)) +
guides(colour=FALSE, fill=FALSE)+
geom_curve(data=COedges, aes(x = COedges$Longitude.x , y = COedges$Latitude.x, xend = COedges$Longitude.y, yend = COedges$Latitude.y, # draw edges as arcs
color = "firebrick4", size = COedges$count, group=COedges$continent.x),
curvature = 0.33,
alpha = 0.5) +
labs(title = "Country Collaboration Map", x = "Latitude", y = "Longitude")+
scale_size_continuous(guide = FALSE, range = c(0.25, edgesize))+
theme(text = element_text(color = '#333333')
,plot.title = element_text(size = 28)
,plot.subtitle = element_text(size = 14)
,axis.ticks = element_blank()
,axis.text = element_blank()
,panel.grid = element_blank()
,panel.background = element_rect(fill = '#FFFFFF') #'#333333'
,plot.background = element_rect(fill = '#FFFFFF')
,legend.position = c(.18,.36)
,legend.background = element_blank()
,legend.key = element_blank()
)
if (isTRUE(label)){
CO=dplyr::inner_join(CO,countries, by=c('Tab'='Tab'))
g=g+
ggrepel::geom_text_repel(data=CO, aes(x = .data$Longitude, y = .data$Latitude, label = .data$Tab, group=.data$continent), # draw text labels
hjust = 0, nudge_x = 1, nudge_y = 4,
size = 3, color = "orange", fontface = "bold")
}
results=list(g=g,tab=tab)
return(results)
}
### visNetwork tools ----
netLayout <- function(type){
switch(type,
auto={l <- "layout_nicely"},
circle={l <- "layout_in_circle"},
mds={l <- "layout_with_mds"},
star={l <- "layout_as_star"},
sphere={l <- "layout_on_sphere"},
fruchterman={l <- "layout_with_fr"},
kamada={l <- "layout_with_kk"}
)
return(l)
}
savenetwork <- function(con){
vn=values$network$vn
visNetwork(nodes = vn$nodes, edges = vn$edges, type="full", smooth=TRUE, physics=FALSE, height = "2000px",width = "2000px" ) %>%
visNodes(shape="box", font=list(color="black"),scaling=list(label=list(enables=TRUE))) %>%
visIgraphLayout(layout = values$network$l) %>%
visEdges(smooth = values$network$curved) %>%
visOptions(highlightNearest =list(enabled = T, hover = T, degree=1), nodesIdSelection = T) %>%
visInteraction(dragNodes = TRUE, navigationButtons = TRUE, hideEdgesOnDrag = TRUE) %>% visExport() %>%
visPhysics(enabled = FALSE) %>% visSave(con)
}
igraph2vis<-function(g,curved,labelsize,opacity,type,shape){
LABEL=igraph::V(g)$name
LABEL[igraph::V(g)$labelsize==0]=""
vn <- toVisNetworkData(g)
vn$nodes$label=LABEL
vn$edges$num=1
vn$edges$dashes=FALSE
vn$edges$dashes[vn$edges$lty==2]=TRUE
## opacity
vn$nodes$color=adjustcolor(vn$nodes$color,alpha=min(c(opacity+0.2,1)))
vn$edges$color=adjustcolor(vn$edges$color,alpha=opacity)
## removing multiple edges
vn$edges=unique(vn$edges)
## labelsize
scalemin=20
scalemax=150
Min=min(vn$nodes$font.size)
Max=max(vn$nodes$font.size)
if (Max>Min){
size=(vn$nodes$font.size-Min)/(Max-Min)*10*labelsize+10
} else {size=10*labelsize}
size[size<scalemin]=scalemin
size[size>scalemax]=scalemax
vn$nodes$font.size=size
l<-netLayout(type)
### TO ADD SHAPE AND FONT COLOR OPTIONS
VIS<-visNetwork(nodes = vn$nodes, edges = vn$edges, type="full", smooth=TRUE, physics=FALSE) %>%
visNodes(shape=shape, font=list(color="black")) %>%
visIgraphLayout(layout = l) %>%
visEdges(smooth = curved) %>%
visOptions(highlightNearest =list(enabled = T, hover = T, degree=1), nodesIdSelection = T) %>%
visInteraction(dragNodes = TRUE, navigationButtons = TRUE, hideEdgesOnDrag = TRUE)
values$COCVIS=VIS
return(list(VIS=VIS,vn=vn, type=type, l=l, curved=curved))
}
} ## End of Server | /server.R | no_license | seymakalay/biblio | R | false | false | 156,164 | r | # Define server logic required to draw a histogram ----
server <- function(input, output, session) {
## stop the R session
session$onSessionEnded(stopApp)
##
## file upload max size
options(shiny.maxRequestSize=100*1024^2)
### initial values ####
values = reactiveValues()
values$results=list("NA")
values$log="working..."
values$load="FALSE"
values$field="NA"
values$citField=values$colField=values$citSep="NA"
values$NetWords=values$NetRefs=values$ColNetRefs=matrix(NA,1,1)
values$Title="Network"
values$Histfield="NA"
values$histlog="working..."
values$kk=0
values$M=data.frame(PY=0)
values$histsearch="NA"
values$citShortlabel="NA"
values$S=list("NA")
values$GR="NA"
### LOAD MENU ####
# observe({
# volumes <- c(Home = fs::path_home(), getVolumes()())
# shinyFileSave(input, "save", roots=volumes, session=session)
# fileinfo <- parseSavePath(volumes, input$save)
# #data <- data.frame(a=c(1,2))
# if (nrow(fileinfo) > 0) {
# ext <- tolower(getFileNameExtension(fileinfo$datapath))
# #print(ext)
# switch(ext,
# xlsx={
# rio::export(values$M, file=as.character(fileinfo$datapath))
# },
# rdata={
# M=values$M
# save(M, file=as.character(fileinfo$datapath))
# })
# }
# })
output$contents <- DT::renderDT({
# input$file1 will be NULL initially. After the user selects
# and uploads a file, it will be a data frame with 'name',
# 'size', 'type', and 'datapath' columns. The 'datapath'
# column will contain the local filenames where the data can
# be found.
input$applyLoad
isolate({
inFile <- input$file1
if (!is.null(inFile) & input$load=="import") {
ext <- getFileNameExtension(inFile$datapath)
switch(
input$dbsource,
isi = {
switch(ext,
### WoS ZIP Files
zip = {
files = unzip(inFile$datapath)
D = unlist(lapply(files, function(l) {
Dpar = readFiles(l)
return(Dpar)
}))
withProgress(message = 'Conversion in progress',
value = 0, {
M <- convert2df(D,
dbsource = input$dbsource,
format = input$format)
})
},
### WoS Txt/Bib Files
{
D = readFiles(inFile$datapath)
withProgress(message = 'Conversion in progress',
value = 0, {
M <- convert2df(D,
dbsource = input$dbsource,
format = input$format)
})
})
},
scopus = {
switch(ext,
### Scopus ZIP Files
zip = {
files = unzip(inFile$datapath)
D = unlist(lapply(files, function(l) {
Dpar = readFiles(l)
return(Dpar)
}))
withProgress(message = 'Conversion in progress',
value = 0, {
M <- convert2df(D,
dbsource = input$dbsource,
format = input$format)
})
},
### WoS Txt/Bib Files
{
D = readFiles(inFile$datapath)
withProgress(message = 'Conversion in progress',
value = 0, {
M <- convert2df(D,
dbsource = input$dbsource,
format = "bibtex")
})
})
},
dimensions = {
switch(ext,
### Dimensions ZIP Files
zip = {
files = unzip(inFile$datapath)
withProgress(message = 'Conversion in progress',
value = 0, {
M <-
convert2df(files,
dbsource = input$dbsource,
format = input$format)
})
},
### Dimensions Xlsx/csv Files
xlsx = {
#D = readFiles(inFile$datapath)
withProgress(message = 'Conversion in progress',
value = 0, {
M <-
convert2df(
inFile$datapath,
dbsource = "dimensions",
format = "excel"
)
})
},
csv = {
#D = readFiles(inFile$datapath)
withProgress(message = 'Conversion in progress',
value = 0, {
M <-
convert2df(
inFile$datapath,
dbsource = "dimensions",
format = "csv"
)
})
})
}
)
} else if (!is.null(inFile) & input$load=="load") {
ext <- tolower(getFileNameExtension(inFile$datapath))
#print(ext)
switch(ext,
### excel format
xlsx={
M <- rio::import(inFile$datapath)
### M row names
### identify duplicated SRs
SR=M$SR
tab=table(SR)
tab2=table(tab)
ind=as.numeric(names(tab2))
ind=ind[which(ind>1)]
if (length(ind)>0){
for (i in ind){
indice=names(which(tab==i))
for (j in indice){
indice2=which(SR==j)
SR[indice2]=paste(SR[indice2],as.character(1:length(indice2)),sep=" ")
}
}
}
row.names(M) <- SR
},
### RData format
rdata={
load(inFile$datapath)
},
rda={
load(inFile$datapath)
},
rds={
load(inFile$datapath)
})
} else if (is.null(inFile)) {return(NULL)}
values = initial(values)
values$M <- M
values$Morig = M
values$Histfield = "NA"
values$results = list("NA")
MData = as.data.frame(apply(values$M, 2, function(x) {
substring(x, 1, 150)
}), stringsAsFactors = FALSE)
MData$DOI <-
paste0(
'<a href=\"http://doi.org/',
MData$DI,
'\" target=\"_blank\">',
MData$DI,
'</a>'
)
nome = c("DOI", names(MData)[-length(names(MData))])
MData = MData[nome]
DT::datatable(MData,escape = FALSE,rownames = FALSE, extensions = c("Buttons"),
options = list(
pageLength = 50,
dom = 'Bfrtip',
buttons = list(list(extend = 'pageLength'),
list(extend = 'print')),
lengthMenu = list(c(10, 25, 50, -1),
c('10 rows', '25 rows', '50 rows', 'Show all')),
columnDefs = list(list(
className = 'dt-center', targets = 0:(length(names(MData)) - 1)
))
),
class = 'cell-border compact stripe'
) %>%
formatStyle(
names(MData),
backgroundColor = 'white',
textAlign = 'center',
fontSize = '70%'
)
})
})
output$collection.save <- downloadHandler(
filename = function() {
paste("Bibliometrix-Export-File-", Sys.Date(), ".",input$save_file, sep="")
},
content <- function(file) {
switch(input$save_file,
xlsx={suppressWarnings(rio::export(values$M, file=file))},
RData={
M=values$M
save(M, file=file)
})
},
contentType = input$save_file
)
output$textLog <- renderUI({
#log=gsub(" Art","\\\nArt",values$log)
#log=gsub("Done! ","Done!\\\n",log)
k=dim(values$M)[1]
if (k==1){k=0}
log=paste("Number of Documents ",k)
textInput("textLog", "Conversion results",
value=log)
})
### FILTERS MENU ####
### Filters uiOutput
output$textDim <- renderUI({
dimMatrix=paste("Documents ",dim(values$M)[1]," of ",dim(values$Morig)[1])
textInput("textDim", "Number of Documents",
value=dimMatrix)
})
output$selectType <- renderUI({
artType=sort(unique(values$Morig$DT)) #artType=sort(unique(values$Morig$DT))
selectInput("selectType", "Document Type",
choices = artType,
selected = artType,
multiple = TRUE )
})
output$sliderPY <- renderUI({
sliderInput("sliderPY", "Publication Year", min = min(values$Morig$PY,na.rm=T),sep="",
max = max(values$Morig$PY,na.rm=T), value = c(min(values$Morig$PY,na.rm=T),max(values$Morig$PY,na.rm=T)))
})
output$selectSource <- renderUI({
SO=sort(unique(values$Morig$SO))
selectInput("selectSource", "Source",
choices = SO,
selected = SO,
multiple = TRUE)
})
output$sliderTC <- renderUI({
sliderInput("sliderTC", "Total Citation", min = min(values$Morig$TC, na.rm=T),
max = max(values$Morig$TC, na.rm=T), value = c(min(values$Morig$TC, na.rm=T),max(values$Morig$TC,na.rm=T)))
})
### End Filters uiOutput
output$dataFiltered <- DT::renderDT({
M=values$Morig
B=bradford(M)$table
M=subset(M, M$PY>=input$sliderPY[1] & M$PY<=input$sliderPY[2])
M=subset(M, M$TC>=input$sliderTC[1] & M$TC<=input$sliderTC[2])
M=subset(M, M$DT %in% input$selectType)
switch(input$bradfordSources,
"core"={
SO=B$SO[B$Zone %in% "Zone 1"]
},
"zone2"={
SO=B$SO[B$Zone %in% c("Zone 1", "Zone 2")]
},
"all"={SO=B$SO})
M=M[M$SO %in% SO,]
values<-initial(values)
values$M=M
Mdisp=as.data.frame(apply(values$M,2,function(x){substring(x,1,150)}),stringsAsFactors = FALSE)
if (dim(Mdisp)[1]>0){
DT::datatable(Mdisp, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Filtered_DataTable',
title = "My Title",
header = TRUE),
list(extend = 'excel',
filename = 'Filtered_DataTable',
title = "My Title",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(Mdisp))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(Mdisp), backgroundColor = 'white',textAlign = 'center', fontSize = '70%')
}else{Mdisp=data.frame(Message="Empty collection",stringsAsFactors = FALSE, row.names = " ")}
})
### DATASET MENU ####
output$MainInfo <- DT::renderDT({
res <- descriptive(values,type="tab1")
TAB<-res$TAB
values <-res$values
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 30, dom = 'Bfrtip',ordering=F,
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
#####################################
#####################################
output$Mos.Prod.Authors <- DT::renderDT({
res <- descriptive(values,type="tab3")
TAB<-res$TAB
values <-res$values
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 30, dom = 'Bfrtip',ordering=F,
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
output$Most.Cited.Papers <- DT::renderDT({
res <- descriptive(values,type="tab4")
TAB<-res$TAB
values <-res$values
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 30, dom = 'Bfrtip',ordering=F,
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
output$Most.Prod.Countries <- DT::renderDT({
res <- descriptive(values,type="tab5")
TAB<-res$TAB
values <-res$values
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 30, dom = 'Bfrtip',ordering=F,
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
output$TC.Per.Countries <- DT::renderDT({
res <- descriptive(values,type="tab6")
TAB<-res$TAB
values <-res$values
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 30, dom = 'Bfrtip',ordering=F,
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
output$Most.Rel.Sources <- DT::renderDT({
res <- descriptive(values,type="tab7")
TAB<-res$TAB
values <-res$values
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 30, dom = 'Bfrtip',ordering=F,
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
output$Most.Rel.Keywords <- DT::renderDT({
res <- descriptive(values,type="tab8")
TAB<-res$TAB
values <-res$values
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 30, dom = 'Bfrtip',ordering=F,
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Main_Information',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
#####################################
#####################################
output$CAGR <- renderText({
Y=table(values$M$PY)
ny=dim(Y)[1]
values$GR<-round(((Y[ny]/Y[1])^(1/(ny-1))-1)*100,2)
paste("Annual Growth Rate: ",values$GR,"%",collapse="",sep="")
})
output$AnnualProdPlot <- renderPlotly({
res <- descriptive(values,type="tab2")
values <-res$values
Tab=table(values$results$Years)
## inserting missing years
YY=setdiff(seq(min(values$results$Years),max(values$results$Years)),names(Tab))
Y=data.frame(Year=as.numeric(c(names(Tab),YY)),Freq=c(as.numeric(Tab),rep(0,length(YY))))
Y=Y[order(Y$Year),]
names(Y)=c("Year","Freq")
g=ggplot2::ggplot(Y, aes(x = Y$Year, y = Y$Freq, text=paste("Year: ",Y$Year,"\nN .of Documents: ",Y$Freq))) +
geom_line(aes(group="NA")) +
geom_area(aes(group="NA"),fill = '#002F80', alpha = .5) +
labs(x = 'Year'
, y = 'Articles'
, title = "Annual Scientific Production") +
scale_x_continuous(breaks= (Y$Year[seq(1,length(Y$Year),by=2)])) +
theme(text = element_text(color = "#444444")
,panel.background = element_rect(fill = '#EFEFEF')
,panel.grid.minor = element_line(color = '#FFFFFF')
,panel.grid.major = element_line(color = '#FFFFFF')
,plot.title = element_text(size = 24)
,axis.title = element_text(size = 14, color = '#555555')
,axis.title.y = element_text(vjust = 1, angle = 0)
,axis.title.x = element_text(hjust = 0)
)
plot.ly(g)
})#, height = 500, width =900)
output$AnnualProdTable <- DT::renderDT({
TAB <- values$TAB
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Annual_Production',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Annual_Production',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Annual_Production',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$AnnualTotCitperYearPlot <- renderPlotly({
if (values$results[[1]]=="NA"){
values$results=biblioAnalysis(values$M)}
x=values$results
# Total Citation Plot
Table2=aggregate(x$TotalCitation,by=list(x$Years),length)
Table2$xx=aggregate(x$TotalCitation,by=list(x$Years),mean)$x
Table2$Annual=NA
d=date()
d=as.numeric(substring(d,nchar(d)-3,nchar(d)))
Table2$Years=d-Table2$Group.1
Table2$Annual=Table2$xx/Table2$Years
names(Table2)=c("Year","N","MeanTCperArt","MeanTCperYear","CitableYears")
## inserting missing years
YY=setdiff(seq(min(x$Years,na.rm=TRUE),max(x$Years,na.rm=TRUE)),Table2$Year)
if (length(YY>0)){
YY=data.frame(YY,0,0,0,0)
names(YY)=c("Year","N","MeanTCperArt","MeanTCperYear","CitableYears")
Table2=rbind(Table2,YY)
Table2=Table2[order(Table2$Year),]
row.names(Table2)=Table2$Year}
values$AnnualTotCitperYear=Table2
Table2$group="A"
g=ggplot(Table2, aes(x = Table2$Year, y =Table2$MeanTCperYear,text=paste("Year: ",Table2$Year,"\nAverage Citations per Year: ",round(Table2$MeanTCperYear,1)))) +
geom_line(aes(x = Table2$Year, y = Table2$MeanTCperYear, group=Table2$group)) +
geom_area(aes(x = Table2$Year, y = Table2$MeanTCperYear, group=Table2$group),fill = '#002F80', alpha = .5) +
labs(x = 'Year'
, y = 'Citations'
, title = "Average Article Citations per Year")+
scale_x_continuous(breaks= (Table2$Year[seq(1,length(Table2$Year),by=2)])) +
theme(text = element_text(color = "#444444")
,panel.background = element_rect(fill = '#EFEFEF')
,panel.grid.minor = element_line(color = '#FFFFFF')
,panel.grid.major = element_line(color = '#FFFFFF')
,plot.title = element_text(size = 24)
,axis.title = element_text(size = 14, color = '#555555')
,axis.title.y = element_text(vjust = 1, angle = 0)
,axis.title.x = element_text(hjust = 0)
)
plot.ly(g)
})
output$AnnualTotCitperYearTable <- DT::renderDT({
TAB <- values$AnnualTotCitperYear
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Annual_Total_Citation_per_Year',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Annual_Total_Citation_per_Year',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Annual_Total_Citation_per_Year',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$ThreeFielsPlot <- networkD3::renderSankeyNetwork({
input$apply3F
isolate({
fields=c(input$LeftField, input$CentralField, input$RightField)
threeFieldsPlot(values$M, fields=fields,n=c(input$LeftFieldn, input$CentralFieldn,input$RightFieldn), width=1200,height=600)
})
})
### SOURCES MENU ####
output$MostRelSourcesPlot <- renderPlotly({
res <- descriptive(values,type="tab7")
values <-res$values
values$TABSo<-values$TAB
#xx=as.data.frame(values$results$Sources)
xx<- values$TAB
if (input$MostRelSourcesK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostRelSourcesK}
#xx=xx[1:k,]
xx=subset(xx, row.names(xx) %in% row.names(xx)[1:k])
xx$Articles=as.numeric(xx$Articles)
xx$Sources=substr(xx$Sources,1,50)
g=ggplot2::ggplot(data=xx, aes(x=xx$Sources, y=xx$Articles, fill=-xx$Articles,text=paste("Source: ",xx$Sources,"\nN. of Documents: ",xx$Articles))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx$Sources))+
labs(title="Most Relevant Sources", x = "Sources")+
labs(y = "N. of Documents")+
theme_minimal()+
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostRelSourcesTable <- DT::renderDT({
TAB <- values$TABSo
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Relevant_Sources',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Relevant_Sources',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Relevant_Sources',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%')
})
output$MostRelCitSourcesPlot <- renderPlotly({
values$M=metaTagExtraction(values$M,"CR_SO")
TAB=tableTag(values$M,"CR_SO")
TAB=data.frame(Sources=names(TAB),Articles=as.numeric(TAB),stringsAsFactors = FALSE)
values$TABSoCit<-TAB
#xx=as.data.frame(values$results$Sources)
xx<- TAB
if (input$MostRelCitSourcesK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostRelCitSourcesK}
#xx=xx[1:k,]
xx=subset(xx, row.names(xx) %in% row.names(xx)[1:k])
xx$Articles=as.numeric(xx$Articles)
xx$Sources=substr(xx$Sources,1,50)
g=ggplot2::ggplot(data=xx, aes(x=xx$Sources, y=xx$Articles, fill=-xx$Articles,text=paste("Source: ",xx$Sources,"\nN. of Documents: ",xx$Articles))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx$Sources))+
labs(title="Most Cited Sources", x = "Sources")+
labs(y = "N. of Documents")+
theme_minimal()+
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostRelCitSourcesTable <- DT::renderDT({
TAB <- values$TABSoCit
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Cited_Sources',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Cited_Sources',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Cited_Sources',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$bradfordPlot <- renderPlotly({
values$bradford=bradford(values$M)
plot.ly(values$bradford$graph)
})#,height = 600)
output$bradfordTable <- DT::renderDT({
DT::datatable(values$bradford$table, rownames = FALSE,
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Bradford_Law',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Bradford_Law',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Bradford_Law',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(values$bradford$table))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(values$bradford$table), backgroundColor = 'white',textAlign = 'center')
})
output$SourceHindexPlot <- renderPlotly({
input$applyHsource
withProgress(message = 'Calculation in progress',
value = 0, {
isolate(res <- Hindex_plot(values,type="source"))
})
isolate(plot.ly(res$g))
})#, height = 500, width =900)
output$SourceHindexTable <- DT::renderDT({
DT::datatable(values$H, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Source_Impact',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Source_Impact',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Source_Impact',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(values$H))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(values$H), backgroundColor = 'white',textAlign = 'center')
})
output$soGrowthPlot <- renderPlot({
if (input$SOse=="Yes"){se=TRUE}else{se=FALSE}
if (input$cumSO=="Cum"){
cdf=TRUE
laby="Cumulate occurrences (loess smoothing)"
}else{
cdf=FALSE
laby="Annual occurrences (loess smoothing)"}
values$PYSO=sourceGrowth(values$M,input$topSO, cdf=cdf)
term=names(values$PYSO)[-1]
term=rep(term,each=dim(values$PYSO)[1])
n=dim(values$PYSO)[1]*(dim(values$PYSO)[2]-1)
freq=matrix(as.matrix(values$PYSO[,-1]),n,1)
values$SODF=data.frame(Year=rep(values$PYSO$Year,(dim(values$PYSO)[2]-1)),Source=term, Freq=freq)
g=ggplot(values$SODF)+
geom_smooth(aes(x=values$SODF$Year,y=values$SODF$Freq, group=values$SODF$Source, color=values$SODF$Source),se=se, method = "loess", formula="y ~ x")+
labs(x = 'Year'
, y = laby
, title = "Source Growth") +
#ylim(0, NA) +
scale_x_continuous(breaks= (values$PYSO$Year[seq(1,length(values$PYSO$Year),by=ceiling(length(values$PYSO$Year)/20))])) +
geom_hline(aes(yintercept=0, alpha=0.1))+
theme(text = element_text(color = "#444444"), legend.position="none"
,plot.caption = element_text(size = 9, hjust = 0.5, color = "black", face = "bold")
,panel.background = element_rect(fill = '#EFEFEF')
,panel.grid.minor = element_line(color = '#FFFFFF')
,panel.grid.major = element_line(color = '#FFFFFF')
,plot.title = element_text(size = 24)
,axis.title = element_text(size = 14, color = '#555555')
,axis.title.y = element_text(vjust = 1, angle = 90)
,axis.title.x = element_text(hjust = 0.95, angle = 0)
,axis.text.x = element_text(size=10)
)
DFsmooth=(ggplot_build(g)$data[[1]])
DFsmooth$group=factor(DFsmooth$group, labels=levels(values$SODF$Source))
maximum=sort(unique(DFsmooth$x),decreasing=TRUE)[2]
DF2=subset(DFsmooth, x == maximum)
g=g+
ggrepel::geom_text_repel(data = DF2, aes(label = DF2$group, colour = DF2$group, x =DF2$x, y = DF2$y), hjust = -.1)
suppressWarnings(plot(g))
},height = 600, width = 900)
output$soGrowthtable <- DT::renderDT({
soData=values$PYSO
DT::datatable(soData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Source_Dynamics',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Source_Dynamics',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Source_Dynamics',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(soData))-1))))) %>%
formatStyle(names(soData), backgroundColor = 'white')
#return(Data)
})
### AUTHORS MENU ####
### Authors ----
output$MostRelAuthorsPlot <- renderPlotly({
res <- descriptive(values,type="tab3")
values <-res$values
values$TABAu<-values$TAB
#xx=as.data.frame(values$results$Authors, stringsAsFactors = FALSE)
xx=values$TABAu
switch(input$AuFreqMeasure,
t={
lab="N. of Documents"
xx=xx[,1:2]
},
p={xx=xx[,1:2]
xx[,2]=as.numeric(xx[,2])/dim(values$M)[1]*100
lab="N. of Documents (in %)"
},
f={
xx=xx[,3:4]
lab="N. of Documents (Fractionalized)"
})
xx[,2]=as.numeric(xx[,2])
if (input$MostRelAuthorsK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostRelAuthorsK}
xx=xx[1:k,]
xx[,2]=round(xx[,2],1)
g=ggplot2::ggplot(data=xx, aes(x=xx[,1], y=xx[,2], fill=-xx[,2], text=paste("Author: ",xx[,1],"\n",lab,": ",xx[,2]))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx[,1]))+
labs(title="Most Relevant Authors", x = "Authors")+
labs(y = lab)+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostRelAuthorsTable <- DT::renderDT({
TAB <- values$TABAu
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Relevant_Authors',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Relevant_Authors',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Relevant_Authors',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$AuthorHindexPlot <- renderPlotly({
input$applyHauthor
withProgress(message = 'Calculation in progress',
value = 0, {
isolate(res <- Hindex_plot(values,type="author"))
})
isolate(plot.ly(res$g))
})#, height = 500, width =900)
output$AuthorHindexTable <- DT::renderDT({
DT::datatable(values$H, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Author_Impact',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Author_Impact',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Author_Impact',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(values$H))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(values$H), backgroundColor = 'white',textAlign = 'center') %>%
formatRound(names(values$H)[4], 3)
})
output$TopAuthorsProdPlot <- renderPlotly({
values$AUProdOverTime <- authorProdOverTime(values$M, k=input$TopAuthorsProdK, graph=FALSE)
plot.ly(values$AUProdOverTime$graph)
})#, height = 550, width =1100)
output$TopAuthorsProdTable <- DT::renderDT({
TAB <- values$AUProdOverTime$dfAU
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Author_Production_Over_Time',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Author_Production_Over_Time',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Author_Production_Over_Time',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%') %>%
formatRound(names(TAB)[dim(TAB)[2]], 3)
})
output$TopAuthorsProdTablePapers <- DT::renderDT({
TAB <- values$AUProdOverTime$dfPapersAU
TAB$DOI=paste0('<a href=\"http://doi.org/',TAB$DOI,'\" target=\"_blank\">',TAB$DOI,'</a>')
DT::datatable(TAB, rownames = FALSE, escape = FALSE,extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Author_Production_Over_Time_Documents',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Author_Production_Over_Time_Documents',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Author_Production_Over_Time_Documents',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '100%') %>%
formatRound(names(TAB)[dim(TAB)[2]], 3)
})
output$lotkaPlot <- renderPlotly({
values$lotka=lotka(biblioAnalysis(values$M))
AuProd=values$lotka$AuthorProd
AuProd$Theoretical=10^(log10(values$lotka$C)-2*log10(AuProd[,1]))
AuProd$Theoretical=AuProd$Theoretical/sum(AuProd$Theoretical)
g=ggplot2::ggplot(AuProd, aes(x = AuProd$N.Articles, y = AuProd$Freq*100, text=paste("N.Articles: ",AuProd$N.Articles,"\n% of production: ",round(AuProd$Freq*100,1)))) +
geom_line(aes(group="NA")) +
geom_area(aes(group="NA"),fill = '#002F80', alpha = .5) +
geom_line(aes(y=AuProd$Theoretical*100, group="NA"),linetype = "dashed",color="black",alpha=0.8)+
xlim(0,max(AuProd$N.Articles)+1)+
labs(x = 'Documents written'
, y = '% of Authors'
, title = "The Frequency Distribution of Scientific Productivity") +
#scale_x_continuous(breaks= (Y$Year[seq(1,length(Y$Year),by=2)])) +
theme(text = element_text(color = "#444444")
,panel.background = element_rect(fill = '#EFEFEF')
,panel.grid.minor = element_line(color = '#FFFFFF')
,panel.grid.major = element_line(color = '#FFFFFF')
,plot.title = element_text(size = 24)
,axis.title = element_text(size = 14, color = '#555555')
,axis.title.y = element_text(vjust = 1, angle = 90)
,axis.title.x = element_text(hjust = 0)
)
plot.ly(g)
})#,height = 600)
output$lotkaTable <- DT::renderDT({
names(values$lotka$AuthorProd)=c("Documents written","N. of Authors","Proportion of Authors")
DT::datatable(values$lotka$AuthorProd, rownames = FALSE,
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Lotka_Law',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Lotka_Law',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Lotka_Law',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(values$lotka$AuthorProd))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(values$lotka$AuthorProd), backgroundColor = 'white',textAlign = 'center') %>%
formatRound(names(values$lotka$AuthorProd)[3], 3)
})
### Affiliations ----
output$MostRelAffiliationsPlot <- renderPlotly({
if (input$disAff=="Y"){
res <- descriptive(values,type="tab11")
xx=as.data.frame(values$results$Affiliations, stringsAsFactors = FALSE)
}else{
res <- descriptive(values,type="tab12")
xx=values$TAB
names(xx)=c("AFF","Freq")
}
values <-res$values
values$TABAff <- values$TAB
if (input$MostRelAffiliationsK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostRelAffiliationsK}
xx=xx[1:k,]
g=ggplot2::ggplot(data=xx, aes(x=xx$AFF, y=xx$Freq, fill=-xx$Freq, text=paste("Affiliation: ",xx$AFF,"\nN. of Author in the Affiliation: ",xx$Freq))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx$AFF))+
labs(title="Most Relevant Affiliations: Frequency distribution of affiliations (of all co-authors for each document)", x = "Affiliations")+
labs(y = "N. of Author in the Affiliation")+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostRelAffiliationsTable <- DT::renderDT({
TAB <- values$TABAff
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Relevant_Affiliations',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Relevant_Affiliations',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Relevant_Affiliations',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
### Countries ----
output$MostRelCountriesPlot <- renderPlotly({
res <- descriptive(values,type="tab5")
values <-res$values
values$TABCo <- values$TAB
k=input$MostRelCountriesK
xx=values$results$CountryCollaboration[1:k,]
xx=xx[order(-(xx$SCP+xx$MCP)),]
xx1=cbind(xx[,1:2],rep("SCP",k))
names(xx1)=c("Country","Freq","Collaboration")
xx2=cbind(xx[,c(1,3)],rep("MCP",k))
names(xx2)=c("Country","Freq","Collaboration")
xx=rbind(xx2,xx1)
xx$Country=factor(xx$Country,levels=xx$Country[1:dim(xx2)[1]])
g=suppressWarnings(ggplot2::ggplot(data=xx, aes(x=xx$Country, y=xx$Freq,fill=xx$Collaboration, text=paste("Country: ",xx$Country,"\nN.of Documents: ",xx$Freq))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_x_discrete(limits = rev(levels(xx$Country)))+
scale_fill_discrete(name="Collaboration",
breaks=c("SCP","MCP"))+
labs(title = "Corresponding Author's Country", x = "Countries", y = "N. of Documents",
caption = "SCP: Single Country Publications, MCP: Multiple Country Publications")+
theme_minimal() +
theme(plot.caption = element_text(size = 9, hjust = 0.5,
color = "blue", face = "italic"))+
coord_flip())
plot.ly(g)
})#, height = 500, width =900)
output$MostRelCountriesTable <- DT::renderDT({
TAB <- values$TABCo
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Relevant_Countries_By_Corresponding_Author',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Relevant_Countries_By_Corresponding_Author',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Relevant_Countries_By_Corresponding_Author',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$countryProdPlot <- renderPlotly({
values$mapworld<-mapworld(values$M)
plot.ly(values$mapworld$g)
})#, height = 500, width =900)
output$countryProdTable <- DT::renderDT({
TAB <- values$mapworld$tab
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Country_Production',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Country_Production',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Country_Production',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$MostCitCountriesPlot <- renderPlotly({
res <- descriptive(values,type="tab6")
values <-res$values
values$TABCitCo <- values$TAB
xx=values$TAB
xx[,2]=as.numeric(xx[,2])
xx[,3]=as.numeric(xx[,3])
if (input$MostCitCountriesK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostRelAffiliationsK}
if (input$CitCountriesMeasure=="TC"){
xx=xx[1:k,c(1,2)]
laby="N. of Citations"
} else {
xx=xx[order(-xx[,3]),]
xx=xx[1:k,c(1,3)]
laby="N. of Citations per Year"
}
g=ggplot2::ggplot(data=xx, aes(x=xx[,1], y=xx[,2], fill=-xx[,2],text=paste("Country: ",xx[,1],"\n",laby,": ",xx[,2]))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx[,1]))+
labs(title="Most Cited Countries", x = "Countries")+
labs(y = laby)+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostCitCountriesTable <- DT::renderDT({
TAB <- values$TABCitCo
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Cited_Countries',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Cited_Countries',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Cited_Countries',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
### DOCUMENTS MENU ####
### Documents ----
output$MostCitDocsPlot <- renderPlotly({
res <- descriptive(values,type="tab4")
values <-res$values
values$TABGlobDoc <- values$TAB
if (input$CitDocsMeasure=="TC"){
xx=data.frame(values$results$MostCitedPapers[1],values$results$MostCitedPapers[2], stringsAsFactors = FALSE,row.names=NULL)
lab="Total Citations"} else {
xx=data.frame(values$results$MostCitedPapers[1],values$results$MostCitedPapers[3], stringsAsFactors = FALSE,row.names=NULL)
lab="Total Citations per Year"
}
if (input$MostCitDocsK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostCitDocsK}
xx=xx[1:k,]
g=ggplot2::ggplot(data=xx, aes(x=xx[,1], y=xx[,2], fill=-xx[,2], text=paste("Document: ", xx[,1],"\nGlobal Citations: ",xx[,2]))) +
geom_bar(stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx[,1]))+
labs(title="Most Cited Documents", x = "Documents")+
labs(y = lab)+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostCitDocsTable <- DT::renderDT({
TAB <- values$TABGlobDoc
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Global_Cited_Documents',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Global_Cited_Documents',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Global_Cited_Documents',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$MostLocCitDocsPlot <- renderPlotly({
withProgress(message = 'Calculation in progress',
value = 0, {
TAB <-localCitations(values$M, fast.search=TRUE, sep = input$LocCitSep)$Paper
})
xx=data.frame(Document=as.character(TAB[,1]), DOI=as.character(TAB[,2]), Year=TAB[,3], "Local Citations"=TAB[,4], "Global Citations"=TAB[,5],stringsAsFactors = FALSE)
values$TABLocDoc=xx
if (input$MostLocCitDocsK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostLocCitDocsK}
xx=xx[1:k,]
g=ggplot2::ggplot(data=xx, aes(x=xx[,1], y=xx[,4], fill=-xx[,4], text=paste("Document: ",xx[,1],"\nLocal Citations: ",xx[,4]))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx[,1]))+
labs(title="Most Local Cited Documents", x = "Documents")+
labs(y = "Local Citations")+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostLocCitDocsTable <- DT::renderDT({
TAB <- values$TABLocDoc
TAB$DOI<- paste0('<a href=\"http://doi.org/',TAB$DOI,'\" target=\"_blank\">',TAB$DOI,'</a>')
DT::datatable(TAB, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Local_Cited_Documents',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Local_Cited_Documents',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Local_Cited_Documents',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
### Cited References ----
output$MostCitRefsPlot <- renderPlotly({
CR=citations(values$M,sep=input$CitRefsSep)$Cited
TAB=data.frame(names(CR),as.numeric(CR),stringsAsFactors = FALSE)
names(TAB)=c("Cited References", "Citations")
values$TABCitRef=TAB
xx=values$TABCitRef
if (input$MostCitRefsK>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostCitRefsK}
xx=xx[1:k,]
#xx[,1]=substr(xx[,1],1,50)
g=ggplot2::ggplot(data=xx, aes(x=xx[,1], y=xx[,2], fill=-xx[,2], text=paste("Reference: ",xx[,1],"\nLocal Citations: ",xx[,2]))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx[,1]), labels=substr(rev(xx[,1]),1,50))+
labs(title="Most Cited References", x = "References")+
labs(y = "Local Citations")+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostCitRefsTable <- DT::renderDT({
TAB <- values$TABCitRef
TAB$link <- trimES(gsub("[[:punct:]]" , " ",reduceRefs(TAB[,1])))
TAB$link <- paste0('<a href=\"https://scholar.google.it/scholar?hl=en&as_sdt=0%2C5&q=',TAB$link,'\" target=\"_blank\">','link','</a>')
TAB=TAB[,c(3,1,2)]
names(TAB)[1]="Google Scholar"
DT::datatable(TAB, rownames = FALSE, escape=FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Local_Cited_References',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Local_Cited_References',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Local_Cited_References',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$rpysPlot <- renderPlotly({
values$res <- rpys(values$M, sep=input$rpysSep, graph=FALSE)
#values$res <- rpys(values$M, sep=input$rpysSep, timespan=input$sliderYears ,graph=FALSE)
plot.ly(values$res$spectroscopy)
})#,height = 600, width = 900)
output$rpysTable <- DT::renderDT({
rpysData=values$res$rpysTable
DT::datatable(rpysData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'RPYS',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'RPYS',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'RPYS',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(rpysData))-1))))) %>%
formatStyle(names(rpysData), backgroundColor = 'white')
#return(Data)
})
output$crTable <- DT::renderDT({
crData=values$res$CR
crData=crData[order(-as.numeric(crData$Year),-crData$Freq),]
names(crData)=c("Year", "Reference", "Local Citations")
DT::datatable(crData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'RPYS_Documents',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'RPYS_Documents',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'RPYS_Documents',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(crData))-1))))) %>%
formatStyle(names(crData), backgroundColor = 'white')
#return(Data)
})
### Words ----
output$MostRelWordsPlot <- renderPlotly({
WR=wordlist(values$M,Field=input$MostRelWords,n=Inf,measure="identity")$v
TAB=data.frame(names(WR),as.numeric(WR),stringsAsFactors = FALSE)
names(TAB)=c("Words", "Occurrences")
values$TABWord=TAB
xx=values$TABWord
if (input$MostRelWordsN>dim(xx)[1]){
k=dim(xx)[1]
} else {k=input$MostRelWordsN}
xx=xx[1:k,]
switch(input$MostRelWords,
ID={lab="Keywords Plus"},
DE={lab="Auhtor's Keywords"},
TI={lab="Title's Words"},
AB={lab="Abstract's Words"})
g=ggplot2::ggplot(data=xx, aes(x=xx[,1], y=xx[,2], fill=-xx[,2], text=paste(lab,": ",xx[,1],"\nOccurrences: ",xx[,2]))) +
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev(xx[,1]))+
labs(title="Most Relevant Words", x = lab)+
labs(y = "Occurrences")+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
plot.ly(g)
})#, height = 500, width =900)
output$MostRelWordsTable <- DT::renderDT({
TAB <- values$TABWord
DT::datatable(TAB, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 20, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TAB))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(TAB), backgroundColor = 'white',textAlign = 'center', fontSize = '110%')
})
output$wordcloud <- wordcloud2::renderWordcloud2({
resW=wordlist(M=values$M, Field=input$summaryTerms, n=input$n_words, measure=input$measure)
W=resW$W
values$Words=resW$Words
wordcloud2::wordcloud2(W, size = input$scale, minSize = 0, gridSize = input$padding,
fontFamily = input$font, fontWeight = 'normal',
color = input$wcCol, backgroundColor = input$wcBGCol,
minRotation = 0, maxRotation = input$rotate/10, shuffle = TRUE,
rotateRatio = 0.7, shape = input$wcShape, ellipticity = input$ellipticity,
widgetsize = NULL, figPath = NULL, hoverFunction = NULL)
})
output$treemap <- renderPlot({
resW=wordlist(M=values$M, Field=input$treeTerms, n=input$treen_words, measure=input$treemeasure)
W=resW$W
values$WordsT=resW$Words
treemap::treemap(W, #Your data frame object
index=c("Terms"), #A list of your categorical variables
vSize = "Frequency", #This is your quantitative variable
type="index", #Type sets the organization and color scheme of your treemap
palette = input$treeCol, #Select your color palette from the RColorBrewer presets or make your own.
title="Word TreeMap", #Customize your title
fontsize.title = 14, #Change the font size of the title
fontsize.labels = input$treeFont
)
})
output$wordTable <- DT::renderDT({
DT::datatable(values$Words, rownames = FALSE,
options = list(pageLength = 10, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(values$Words))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(values$Words), backgroundColor = 'white',textAlign = 'center')
})
output$treeTable <- DT::renderDT({
DT::datatable(values$WordsT, rownames = FALSE,
options = list(pageLength = 10, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Most_Frequent_Words',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(values$Words))-1)))),
class = 'cell-border compact stripe') %>%
formatStyle(names(values$Words), backgroundColor = 'white',textAlign = 'center')
},height = 600, width = 900)
output$kwGrowthPlot <- renderPlot({
if (input$cumTerms=="Cum"){
cdf=TRUE
laby="Cumulate occurrences (loess smoothing)"
}else{
cdf=FALSE
laby="Annual occurrences (loess smoothing)"}
if (input$se=="Yes"){se=TRUE}else{se=FALSE}
switch(input$growthTerms,
ID={
KW=KeywordGrowth(values$M, Tag = "ID", sep = ";", top = input$topkw[2], cdf = cdf)
},
DE={
KW=KeywordGrowth(values$M, Tag = "DE", sep = ";", top = input$topkw[2], cdf = cdf)
},
TI={
if (!("TI_TM" %in% names(values$M))){
values$M=termExtraction(values$M,Field = "TI", verbose=FALSE)
}
KW=KeywordGrowth(values$M, Tag = "TI_TM", sep = ";", top = input$topkw[2], cdf = cdf)
},
AB={
if (!("AB_TM" %in% names(values$M))){
values$M=termExtraction(values$M,Field = "AB", verbose=FALSE)
}
KW=KeywordGrowth(values$M, Tag = "AB_TM", sep = ";", top = input$topkw[2], cdf = cdf)
}
)
values$KW=KW[,c(1,seq(input$topkw[1],input$topkw[2])+1)]
term=names(values$KW)[-1]
term=rep(term,each=dim(values$KW)[1])
n=dim(values$KW)[1]*(dim(values$KW)[2]-1)
freq=matrix(as.matrix(values$KW[,-1]),n,1)
values$DF=data.frame(Year=rep(values$KW$Year,(dim(values$KW)[2]-1)),Term=term, Freq=freq)
g=ggplot(values$DF)+
geom_smooth(aes(x=values$DF$Year,y=values$DF$Freq, group=values$DF$Term, color=values$DF$Term),se = se,method = "loess",formula ='y ~ x')+
labs(x = 'Year'
, y = laby
, title = "Word Growth") +
#ylim(0, NA) +
scale_x_continuous(breaks= (values$KW$Year[seq(1,length(values$KW$Year),by=ceiling(length(values$KW$Year)/20))])) +
geom_hline(aes(yintercept=0, alpha=0.1))+
theme(text = element_text(color = "#444444"), legend.position="none"
,plot.caption = element_text(size = 9, hjust = 0.5, color = "black", face = "bold")
,panel.background = element_rect(fill = '#EFEFEF')
,panel.grid.minor = element_line(color = '#FFFFFF')
,panel.grid.major = element_line(color = '#FFFFFF')
,plot.title = element_text(size = 24)
,axis.title = element_text(size = 14, color = '#555555')
,axis.title.y = element_text(vjust = 1, angle = 90)
,axis.title.x = element_text(hjust = 0.95, angle = 0)
,axis.text.x = element_text(size=10)
)
DFsmooth=(ggplot_build(g)$data[[1]])
DFsmooth$group=factor(DFsmooth$group, labels=levels(values$DF$Term))
maximum=sort(unique(DFsmooth$x),decreasing=TRUE)[2]
DF2=subset(DFsmooth, x == maximum)
g=g+
ggrepel::geom_text_repel(data = DF2, aes(label = DF2$group, colour = DF2$group, x =DF2$x, y = DF2$y), hjust = -.1)
suppressWarnings(plot(g))
},height = 600, width = 900)
output$kwGrowthtable <- DT::renderDT({
kwData=values$KW
DT::datatable(kwData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Word_Dynamics',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Word_Dynamics',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Word_Dynamics',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(kwData))-1))))) %>%
formatStyle(names(kwData), backgroundColor = 'white')
#return(Data)
})
#### Trend Topics ####
output$trendSliderPY <- renderUI({
sliderInput("trendSliderPY", "Timespan", min = min(values$M$PY,na.rm=T),sep="",
max = max(values$M$PY,na.rm=T), value = c(min(values$M$PY,na.rm=T),max(values$M$PY,na.rm=T)))
})
output$trendTopicsPlot <- renderPlot({
input$applyTrendTopics
isolate({
if (input$trendTerms %in% c("TI","AB")){
values$M=termExtraction(values$M, Field = input$trendTerms, stemming = input$trendStemming, verbose = FALSE)
field=paste(input$trendTerms,"_TM",sep="")
} else {field=input$trendTerms}
values$trendTopics <- fieldByYear(values$M, field = field, timespan = input$trendSliderPY, min.freq = input$trendMinFreq,
n.items = input$trendNItems, labelsize = input$trendSize, graph = FALSE)
plot(values$trendTopics$graph)
})
},height = 700)
output$trendTopicsTable <- DT::renderDT({
tpData=values$trendTopics$df_graph
DT::datatable(tpData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Trend_Topics',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Trend_Topics',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Trend_Topics',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(tpData))-1))))) %>%
formatStyle(names(tpData), backgroundColor = 'white')
#return(Data)
})
### Conceptual Structure #####
### Co-occurrences network ----
output$cocPlot <- renderVisNetwork({
input$applyCoc
#t = tempfile();pdf(file=t) #### trick to hide igraph plot
values <- isolate(cocNetwork(input,values))
#dev.off();file.remove(t) ### end of trick
isolate(values$network<-igraph2vis(g=values$cocnet$graph,curved=(input$coc.curved=="Yes"),
labelsize=input$labelsize, opacity=input$cocAlpha,type=input$layout,
shape=input$coc.shape))
isolate(values$network$VIS)
})
output$network.coc <- downloadHandler(
filename = "Co_occurrence_network.net",
content <- function(file) {
igraph::write.graph(values$cocnet$graph_pajek,file=file, format="pajek")
},
contentType = "net"
)
### save coc network image as html ####
output$networkCoc.fig <- downloadHandler(
filename = "network.html",
content <- function(con) {
savenetwork(con)
},
contentType = "html"
)
output$cocTable <- DT::renderDT({
cocData=values$cocnet$cluster_res
names(cocData)=c("Term", "Cluster", "Btw Centrality")
DT::datatable(cocData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"), filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'CoWord_Network_Analysis',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'CoWord_Network_Analysis',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'CoWord_Network_Analysis',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(cocData))-1))))) %>%
formatStyle(names(cocData), backgroundColor = 'white')
#return(Data)
})
### Correspondence Analysis ----
output$CSPlot1 <- renderPlot({
input$applyCA
values <- isolate(CAmap(input,values))
}, height = 650, width = 800)
output$CSPlot2 <- renderPlot({
if (input$method!="MDS"){
if (values$CS[[1]][1]!="NA"){
plot(values$CS$graph_documents_Contrib)
}else{
emptyPlot("Selected field is not included in your data collection")
}
}else{
emptyPlot("This plot is available only for CA or MCA analyses")
}
}, height = 650, width = 800)
output$CSPlot3 <- renderPlot({
if (input$method!="MDS"){
if (values$CS[[1]][1]!="NA"){
plot(values$CS$graph_documents_TC)
}else{
emptyPlot("Selected field is not included in your data collection")
}
}else{
emptyPlot("This plot is available only for CA or MCA analyses")
}
}, height = 650, width = 800)
output$CSPlot4 <- renderPlot({
if (values$CS[[1]][1]!="NA"){
plot(values$CS$graph_dendogram)
}else{
emptyPlot("Selected field is not included in your data collection")
}
}, height = 650, width = 1000)
output$CSTableW <- DT::renderDT({
switch(input$method,
CA={
WData=data.frame(word=row.names(values$CS$km.res$data.clust), values$CS$km.res$data.clust,
stringsAsFactors = FALSE)
names(WData)[4]="cluster"
},
MCA={
WData=data.frame(word=row.names(values$CS$km.res$data.clust), values$CS$km.res$data.clust,
stringsAsFactors = FALSE)
names(WData)[4]="cluster"
},
MDS={
WData=data.frame(word=row.names(values$CS$res), values$CS$res,
cluster=values$CS$km.res$cluster,stringsAsFactors = FALSE)
})
WData$Dim.1=round(WData$Dim.1,2)
WData$Dim.2=round(WData$Dim.2,2)
DT::datatable(WData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'CoWord_Factorial_Analysis_Words_By_Cluster',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'CoWord_Factorial_Analysis_Words_By_Cluster',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'CoWord_Factorial_Analysis_Words_By_Cluster',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(WData))-1))))) %>%
formatStyle(names(WData), backgroundColor = 'white')
#return(Data)
})
output$CSTableD <- DT::renderDT({
CSData=values$CS$docCoord
CSData=data.frame(Documents=row.names(CSData),CSData,stringsAsFactors = FALSE)
CSData$dim1=round(CSData$dim1,2)
CSData$dim2=round(CSData$dim2,2)
CSData$contrib=round(CSData$contrib,2)
DT::datatable(CSData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'CoWord_Factorial_Analysis_Articles_By_Cluster',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'CoWord_Factorial_Analysis_Articles_By_Cluster',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'CoWord_Factorial_Analysis_Articles_By_Cluster',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(CSData))-1))))) %>%
formatStyle(names(CSData), backgroundColor = 'white')
})
### Thematic Map ----
output$TMPlot <- renderPlotly({
input$applyTM
#values <- isolate(TMmap(input,values))
values$TM <- isolate(thematicMap(values$M, field=input$TMfield, n=input$TMn, minfreq=input$TMfreq, stemming=input$TMstemming, size=input$sizeTM, n.labels=input$TMn.labels, repel=FALSE))
validate(
need(values$TM$nclust > 0, "\n\nNo topics in one or more periods. Please select a different set of parameters.")
)
plot.ly(values$TM$map)
})#, height = 650, width = 800)
output$NetPlot <- renderVisNetwork({
values$networkTM<-igraph2vis(g=values$TM$net$graph,curved=(input$coc.curved=="Yes"),
labelsize=input$labelsize, opacity=input$cocAlpha,type=input$layout,
shape=input$coc.shape)
values$networkTM$VIS
})
output$TMTable <- DT::renderDT({
tmData=values$TM$words[,-4]
DT::datatable(tmData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Thematic_Map',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Thematic_Map',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Thematic_Map',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(tmData))-1))))) %>%
formatStyle(names(tmData), backgroundColor = 'white')
#return(Data)
})
### Thematic Evolution ----
output$sliders <- renderUI({
numSlices <- as.integer(input$numSlices)
v=quantile(values$M$PY, seq(0,1,by=(1/(numSlices+1))), na.rm=TRUE)
v=round(v[-c(1,length(v))],0)
lapply(1:numSlices, function(i) {
# sliderInput(inputId = paste0("Slice", i), label = paste("Cutting Year", i),
# min=1990,max=2018,value=1990)
numericInput(inputId = paste0("Slice", i), label = paste("Cutting Year", i),value=v[i],min=min(values$M$PY, na.rm = TRUE)+1,max=max(values$M$PY, na.rm = TRUE)-1, step=1)
#numericInput(inputId = paste0("Slice", i), label = paste("Cutting Year", i),value=median(values$M$PY),min=min(values$M$PY)+1,max=max(values$M$PY)-1, step=1)
})
})
output$TEPlot <- networkD3::renderSankeyNetwork({
input$applyTE
values$yearSlices <- isolate(as.numeric())
isolate(for (i in 1:as.integer(input$numSlices)){
if (length(input[[paste0("Slice", i)]])>0){values$yearSlices=c(values$yearSlices,input[[paste0("Slice", i)]])}
})
if (length(values$yearSlices)>0){
values$nexus <- isolate(thematicEvolution(values$M, field=input$TEfield, values$yearSlices, n = input$nTE, minFreq = input$fTE, size = input$sizeTE, n.labels=input$TEn.labels, repel=FALSE))
validate(
need(values$nexus$check != FALSE, "\n\nNo topics in one or more periods. Please select a different set of parameters.")
)
isolate(plotThematicEvolution(Nodes = values$nexus$Nodes,Edges = values$nexus$Edges, measure = input$TEmeasure, min.flow = input$minFlowTE))
}
})
output$TETable <- DT::renderDT({
TEData=values$nexus$Data
TEData=TEData[TEData$Inc_index>0,-c(4,8)]
names(TEData)=c("From", "To", "Words", "Weighted Inclusion Index", "Inclusion Index", "Occurrences", "Stability Index")
DT::datatable(TEData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Thematic_Evolution',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Thematic_Evolution',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Thematic_Evolution',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(TEData))-1))))) %>%
formatStyle(names(TEData), backgroundColor = 'white') %>%
formatRound(names(TEData)[4], 2) %>%
formatRound(names(TEData)[5], 2) %>%
formatRound(names(TEData)[7], 2)
#return(Data)
})
output$TMPlot1 <- renderPlotly({
#input$applyTM
if (length(values$nexus$TM)>=1){
plot.ly(values$nexus$TM[[1]]$map)
} else {emptyPlot("You have selected fewer periods!")}
})#, height = 650, width = 800)
output$TMPlot2 <- renderPlotly({
#input$applyTM
if (length(values$nexus$TM)>=2){
plot.ly(values$nexus$TM[[2]]$map)
} else {emptyPlot("You have selected fewer periods!")}
})#, height = 650, width = 800)
output$TMPlot3 <- renderPlotly({
#input$applyTM
if (length(values$nexus$TM)>=3){
plot.ly(values$nexus$TM[[3]]$map)
} else {emptyPlot("You have selected fewer periods!")}
})#, height = 650, width = 800)
output$TMPlot4 <- renderPlotly({
#input$applyTM
if (length(values$nexus$TM)>=4){
plot.ly(values$nexus$TM[[4]]$map)
} else (emptyPlot("You have selected fewer periods!"))
})#, height = 650, width = 800)
output$TMPlot5 <- renderPlotly({
#input$applyTM
if (length(values$nexus$TM)>=5){
plot.ly(values$nexus$TM[[5]]$map)
} else (emptyPlot("You have selected fewer periods!"))
})#, height = 650, width = 800)
output$NetPlot1 <- renderVisNetwork({
k=1
values$network1<-igraph2vis(g=values$nexus$Net[[k]]$graph,curved=(input$coc.curved=="Yes"),
labelsize=input$labelsize, opacity=input$cocAlpha,type=input$layout,
shape=input$coc.shape)
values$network1$VIS
})
output$NetPlot2 <- renderVisNetwork({
k=2
values$network2<-igraph2vis(g=values$nexus$Net[[k]]$graph,curved=(input$coc.curved=="Yes"),
labelsize=input$labelsize, opacity=input$cocAlpha,type=input$layout,
shape=input$coc.shape)
values$network2$VIS
})
output$NetPlot3 <- renderVisNetwork({
k=3
values$network3<-igraph2vis(g=values$nexus$Net[[k]]$graph,curved=(input$coc.curved=="Yes"),
labelsize=input$labelsize, opacity=input$cocAlpha,type=input$layout,
shape=input$coc.shape)
values$network3$VIS
})
output$NetPlot4 <- renderVisNetwork({
k=4
values$network4<-igraph2vis(g=values$nexus$Net[[k]]$graph,curved=(input$coc.curved=="Yes"),
labelsize=input$labelsize, opacity=input$cocAlpha,type=input$layout,
shape=input$coc.shape)
values$network4$VIS
})
output$NetPlot5 <- renderVisNetwork({
k=5
values$network5<-igraph2vis(g=values$nexus$Net[[k]]$graph,curved=(input$coc.curved=="Yes"),
labelsize=input$labelsize, opacity=input$cocAlpha,type=input$layout,
shape=input$coc.shape)
values$network5$VIS
})
output$TMTable1 <- DT::renderDT({
tmData=values$nexus$TM[[1]]$words[,-4]
DT::datatable(tmData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Thematic_Map_Period_1',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Thematic_Map_Period_1',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Thematic_Map_Period_1',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(tmData))-1))))) %>%
formatStyle(names(tmData), backgroundColor = 'white')
#return(Data)
})
output$TMTable2 <- DT::renderDT({
tmData=values$nexus$TM[[2]]$words[,-4]
DT::datatable(tmData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Thematic_Map_Period_2',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Thematic_Map_Period_2',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Thematic_Map_Period_2',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(tmData))-1))))) %>%
formatStyle(names(tmData), backgroundColor = 'white')
#return(Data)
})
output$TMTable3 <- DT::renderDT({
tmData=values$nexus$TM[[3]]$words[,-4]
DT::datatable(tmData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Thematic_Map_Period_3',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Thematic_Map_Period_3',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Thematic_Map_Period_3',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(tmData))-1))))) %>%
formatStyle(names(tmData), backgroundColor = 'white')
#return(Data)
})
output$TMTable4 <- DT::renderDT({
tmData=values$nexus$TM[[4]]$words[,-4]
DT::datatable(tmData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Thematic_Map_Period_4',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Thematic_Map_Period_4',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Thematic_Map_Period_4',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(tmData))-1))))) %>%
formatStyle(names(tmData), backgroundColor = 'white')
#return(Data)
})
output$TMTable5 <- DT::renderDT({
tmData=values$nexus$TM[[5]]$words[,-4]
DT::datatable(tmData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Thematic_Map_Period_5',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Thematic_Map_Period_5',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Thematic_Map_Period_5',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(tmData))-1))))) %>%
formatStyle(names(tmData), backgroundColor = 'white')
#return(Data)
})
### INTELLECTUAL STRUCTURE ####
### Co-citation network ----
output$cocitPlot <- renderVisNetwork({
input$applyCocit
#t = tempfile();pdf(file=t) #### trick to hide igraph plot
values <- isolate(intellectualStructure(input,values))
#dev.off();file.remove(t) ### end of trick
isolate(values$network<-igraph2vis(g=values$cocitnet$graph,curved=(input$cocit.curved=="Yes"),
labelsize=input$citlabelsize, opacity=input$cocitAlpha,type=input$citlayout,
shape=input$cocit.shape))
isolate(values$network$VIS)
})
output$network.cocit <- downloadHandler(
filename = "Co_citation_network.net",
content <- function(file) {
igraph::write.graph(values$cocitnet$graph_pajek,file=file, format="pajek")
#rio::export(values$M, file=file)
},
contentType = "net"
)
output$cocitTable <- DT::renderDT({
cocitData=values$cocitnet$cluster_res
names(cocitData)=c("Node", "Cluster", "Btw Centrality")
DT::datatable(cocitData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'CoCitation_Network',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'CoCitation_Network',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'CoCitation_Network',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(cocitData))-1))))) %>%
formatStyle(names(cocitData), backgroundColor = 'white')
#return(Data)
})
### save coc network image as html ####
output$networkCocit.fig <- downloadHandler(
filename = "network.html",
content <- function(con) {
savenetwork(con)
},
contentType = "html"
)
### Historiograph ----
output$histPlot <- renderPlot({
## Historiograph
input$applyHist
withProgress(message = 'Calculation in progress',
value = 0, {
values <- isolate(historiograph(input,values))
})
}, height = 500, width = 900)
output$histTable <- DT::renderDT({
LCS=values$histResults$LCS
s=sort(LCS,decreasing = TRUE)[input$histNodes]
ind=which(LCS>=s)
Data=values$histResults$histData
Data=Data[ind,]
Data$DOI<- paste0('<a href=\"http://doi.org/',Data$DOI,'\" target=\"_blank\">',Data$DOI,'</a>')
DT::datatable(Data, escape = FALSE, rownames = FALSE, extensions = c("Buttons"),
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Historiograph_Network',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Historiograph_Network',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Historiograph_Network',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(Data))-1))))) %>%
formatStyle(names(Data), backgroundColor = 'white') %>%
formatStyle(
'GCS',
background = styleColorBar(Data$GCS, 'steelblue'),
backgroundSize = '100% 90%',
backgroundRepeat = 'no-repeat',
backgroundPosition = 'center'
) %>%
formatStyle(
'LCS',
background = styleColorBar(Data$LCS, 'steelblue'),
backgroundSize = '100% 90%',
backgroundRepeat = 'no-repeat',
backgroundPosition = 'center'
)
#return(Data)
})
### SOCIAL STRUCTURE ####
### Collaboration network ----
output$colPlot <- renderVisNetwork({
input$applyCol
#t = tempfile();pdf(file=t) #### trick to hide igraph plot
values <- isolate(socialStructure(input,values))
#dev.off();file.remove(t) ### end of trick
isolate(values$network<-igraph2vis(g=values$colnet$graph,curved=(input$soc.curved=="Yes"),
labelsize=input$collabelsize, opacity=input$colAlpha,type=input$collayout,
shape=input$col.shape, color = "blue"))
isolate(values$network$VIS)
})
output$network.col <- downloadHandler(
filename = "Collaboration_network.net",
content <- function(file) {
igraph::write.graph(values$colnet$graph_pajek,file=file, format="pajek")
#rio::export(values$M, file=file)
},
contentType = "net"
)
output$colTable <- DT::renderDT({
colData=values$colnet$cluster_res
names(colData)=c("Node", "Cluster", "Btw Centrality")
DT::datatable(colData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"), filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'Collaboration_Network',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'Collaboration_Network',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'Collaboration_Network',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(colData))-1))))) %>%
formatStyle(names(colData), backgroundColor = 'white')
#return(Data)
})
### save coc network image as html ####
output$networkCol.fig <- downloadHandler(
filename = "network.html",
content <- function(con) {
savenetwork(con)
},
contentType = "html"
)
### WPPlot ----
output$WMPlot<- renderPlot({
input$applyWM
isolate({
values$WMmap=countrycollaboration(values$M,label=FALSE,edgesize=input$WMedgesize/2,min.edges=input$WMedges.min)
plot(values$WMmap$g)
})
#isolate(values$WMmap=countrycollaboration(values$M,label=FALSE,edgesize=input$WMedgesize/2,min.edges=input$WMedges.min))
#isolate(plot(values$WMmap$g))
},height = 750)#, width = 750
output$WMTable <- DT::renderDT({
colData=values$WMmap$tab
colData=colData[,c(1,2,9)]
names(colData)=c("From","To","Frequency")
DT::datatable(colData, escape = FALSE, rownames = FALSE, extensions = c("Buttons"), filter = 'top',
options = list(pageLength = 50, dom = 'Bfrtip',
buttons = list('pageLength',
list(extend = 'copy'),
list(extend = 'csv',
filename = 'World_Collaboration_Map',
title = " ",
header = TRUE),
list(extend = 'excel',
filename = 'World_Collaboration_Map',
title = " ",
header = TRUE),
list(extend = 'pdf',
filename = 'World_Collaboration_Map',
title = " ",
header = TRUE),
list(extend = 'print')),
lengthMenu = list(c(10,25,50,-1),c('10 rows', '25 rows', '50 rows','Show all')),
columnDefs = list(list(className = 'dt-center', targets = 0:(length(names(colData))-1))))) %>%
formatStyle(names(colData), backgroundColor = 'white')
#return(Data)
})
### COMMON FUNCTIONS ####
getFileNameExtension <- function (fn) {
# remove a path
splitted <- strsplit(x=fn, split='/')[[1]]
# or use .Platform$file.sep in stead of '/'
fn <- splitted [length(splitted)]
ext <- ''
splitted <- strsplit(x=fn, split='\\.')[[1]]
l <-length (splitted)
if (l > 1 && sum(splitted[1:(l-1)] != '')) ext <-splitted [l]
# the extention must be the suffix of a non-empty name
ext
}
plot.ly <- function(g){
ggplotly(g, tooltip = "text") %>%
config(displaylogo = FALSE,
modeBarButtonsToRemove = c(
'sendDataToCloud',
'pan2d',
'select2d',
'lasso2d',
'toggleSpikelines',
'hoverClosestCartesian',
'hoverCompareCartesian'
))
}
emptyPlot<-function(errortext){
g=ggplot()+
theme_void() + theme(legend.position="none")+
annotate("text", x = 4, y = 25, label = errortext, size=10)
plot(g)
}
count.duplicates <- function(DF){
x <- do.call('paste', c(DF, sep = '\r'))
ox <- order(x)
rl <- rle(x[ox])
cbind(DF[ox[cumsum(rl$lengths)],,drop=FALSE],count = rl$lengths)
}
reduceRefs<- function(A){
ind=unlist(regexec("*V[0-9]", A))
A[ind>-1]=substr(A[ind>-1],1,(ind[ind>-1]-1))
ind=unlist(regexec("*DOI ", A))
A[ind>-1]=substr(A[ind>-1],1,(ind[ind>-1]-1))
return(A)
}
initial <- function(values){
values$results=list("NA")
values$log="working..."
values$load="FALSE"
values$field="NA"
values$citField=values$colField=values$citSep="NA"
values$NetWords=values$NetRefs=values$ColNetRefs=matrix(NA,1,1)
values$Title="Network"
values$Histfield="NA"
values$histlog="working..."
values$kk=0
values$histsearch="NA"
values$citShortlabel="NA"
values$S=list("NA")
values$GR="NA"
return(values)
}
### ANALYSIS FUNCTIONS ####
### Descriptive functions ----
Hindex_plot <- function(values, type){
hindex<-function(values,type){
switch(type,
author={
AU=trim(gsub(",","",names(tableTag(values$M,"AU"))))
values$H=Hindex(values$M, field = "author", elements = AU, sep = ";", years=Inf)$H
},
source={
SO=names(sort(table(values$M$SO),decreasing = TRUE))
values$H=Hindex(values$M, field = "source", elements = SO, sep = ";", years=Inf)$H
}
)
return(values)
}
values<-hindex(values, type = type)
xx=values$H
if (type=="author"){
K=input$Hkauthor
measure=input$HmeasureAuthors
title="Author Impact"
xn="Authors"
} else {
K=input$Hksource
measure=input$HmeasureSources
title="Source Impact"
xn="Sources"
}
if (K>dim(xx)[1]){
k=dim(xx)[1]
} else {k=K}
switch(measure,
h={m=2},
g={m=3},
m={m=4},
tc={m=5}
)
xx=xx[order(-xx[,m]),]
xx=xx[1:k,c(1,m)]
g=ggplot2::ggplot(data=xx, aes(x=xx[,1], y=xx[,2], fill=-xx[,2], text=paste(xn,": ",xx[,1],"\n", names(values$H)[m],": ",xx[,2]))) +
#geom_bar(stat="identity", fill="steelblue")+
geom_bar(aes(group="NA"),stat="identity")+
scale_fill_continuous(type = "gradient")+
scale_x_discrete(limits = rev((xx[,1])))+
labs(title=title, x = xn)+
labs(y = names(values$H)[m])+
theme_minimal() +
guides(fill=FALSE)+
coord_flip()
res<-list(values=values,g=g)
return(res)
}
descriptive <- function(values,type){
if (values$results[[1]]=="NA"){
values$results=biblioAnalysis(values$M)}
if (values$S[[1]][1]=="NA"){
values$S=summary(values$results,k=Inf,verbose=FALSE)}
switch(type,
"tab1"={
#TAB=data.frame(Information=gsub("[[:digit:]]", "", S$MainInformation), Data=gsub("[^0-9]", "", S$MainInformation)) #this is better
TAB=data.frame(values$S$MainInformationDF)
# cat(S$MainInformation)
},
"tab2"={
TAB=values$S$AnnualProduction
names(TAB)=c("Year","Articles")
#print(S$AnnualProduction)
#cat("\n\n")
#cat("Annual Growth Rate ",round(S$AnnualGrowthRate, digits=2),"%")
},
"tab3"={
TAB=values$S$MostProdAuthors
names(TAB)=c("Authors","Articles","Authors-Frac","Articles Fractionalized")
#print(S$MostProdAuthors)
},
"tab4"={
TAB=values$S$MostCitedPapers
names(TAB)=c("Paper", "Total Citations","TC per Year")
#print(S$MostCitedPapers)
},
"tab5"={
TAB=values$S$MostProdCountries
#print(S$MostProdCountries)
},
"tab6"={
TAB=values$S$TCperCountries
#print(S$TCperCountries)
},
"tab7"={
TAB=values$S$MostRelSources
#print(S$MostRelSources)
},
"tab8"={
TAB=values$S$MostRelKeywords
#print(S$MostRelSources)
},
"tab10"={
TAB<-mapworld(values$M)$tab
},
"tab11"={
TAB=as.data.frame(values$results$Affiliations,stringsAsFactors = FALSE)
names(TAB)=c("Affiliations", "Articles")
},
"tab12"={
TAB=tableTag(values$M,"C1")
TAB=data.frame(Affiliations=names(TAB), Articles=as.numeric(TAB),stringsAsFactors = FALSE)
TAB=TAB[nchar(TAB[,1])>4,]
#names(TAB)=c("Affiliations", "Articles")
}
)
values$TAB=TAB
res=list(values=values,TAB=TAB)
return(res)
}
wordlist <- function(M, Field, n, measure){
switch(Field,
ID={v=tableTag(values$M,"ID")},
DE={v=tableTag(values$M,"DE")},
TI={
if (!("TI_TM" %in% names(M))){
v=tableTag(M,"TI")
}},
AB={if (!("AB_TM" %in% names(M))){
v=tableTag(M,"AB")
}}
)
names(v)=tolower(names(v))
#v=tableTag(values$M,"ID")
n=min(c(n,length(v)))
Words=data.frame(Terms=names(v)[1:n], Frequency=(as.numeric(v)[1:n]))
W=Words
switch(measure,
identity={},
sqrt={W$Frequency=sqrt(W$Frequency)},
log={W$Frequency=log(W$Frequency+1)},
log10={W$Frequency=log10(W$Frequency+1)}
)
results=list(v=v,W=W, Words=Words)
return(results)
}
mapworld <- function(M){
if (!("AU_CO" %in% names(M))){M=metaTagExtraction(M,"AU_CO")}
CO=as.data.frame(tableTag(M,"AU_CO"),stringsAsFactors = FALSE)
CO$Tab=gsub("UNITED KINGDOM","UK",CO$Tab)
CO$Tab=gsub("KOREA","SOUTH KOREA",CO$Tab)
map.world <- map_data("world")
map.world$region=toupper(map.world$region)
dplyr::anti_join(CO, map.world, by = c('Tab' = 'region'))
country.prod <- dplyr::left_join( map.world, CO, by = c('region' = 'Tab'))
tab=data.frame(country.prod %>%
dplyr::group_by(region) %>%
dplyr::summarise(Freq=mean(Freq)))
tab=tab[!is.na(tab$Freq),]
tab=tab[order(-tab$Freq),]
breaks=as.numeric(round(quantile(CO$Freq,c(0.2,0.4,0.6,0.8,1))))
names(breaks)=breaks
breaks=log(breaks)
g= ggplot(country.prod, aes( x = long, y = lat, group=group, text=paste("Country: ",country.prod$region,"\nN.of Documents: ",country.prod$Freq))) +
geom_polygon(aes(fill = log(Freq), group=group) )+#, col = "white") +
scale_fill_continuous(low='dodgerblue', high='dodgerblue4',breaks=breaks)+
guides(fill = guide_legend(reverse = T)) +
#geom_text(data=centroids, aes(label = centroids$Tab, x = centroids$long, y = centroids$lat, group=centroids$Tab)) +
labs(fill = 'N.Documents'
,title = 'Country Scientific Colobration'
,x = NULL
,y = NULL) +
theme(text = element_text(color = '#333333') #'#333333'
,plot.title = element_text(size = 28)
,plot.subtitle = element_text(size = 14)
,axis.ticks = element_blank()
,axis.text = element_blank()
,panel.grid = element_blank()
,panel.background = element_rect(fill = '#FFFFFF') #'#333333'
,plot.background = element_rect(fill = '#FFFFFF')
,legend.position = c(.18,.36)
,legend.background = element_blank()
,legend.key = element_blank()
)
results=list(g=g,tab=tab)
return(results)
}
### Structure fuctions ----
CAmap <- function(input, values){
if ((input$CSfield %in% names(values$M))){
tab=tableTag(values$M,input$CSfield)
if (length(tab>=2)){
minDegree=as.numeric(tab[input$CSn])
values$CS <- conceptualStructure(values$M, method=input$method , field=input$CSfield, minDegree=minDegree, clust=input$nClustersCS, k.max = 8, stemming=F, labelsize=input$CSlabelsize,documents=input$CSdoc,graph=FALSE)
plot(values$CS$graph_terms)
}else{emptyPlot("Selected field is not included in your data collection")
values$CS=list("NA")}
}else{
emptyPlot("Selected field is not included in your data collection")
values$CS=list("NA")
}
}
historiograph <- function(input,values){
if (input$histsearch=="FAST"){
min.cit=quantile(values$M$TC,0.75, na.rm = TRUE)
}else{min.cit=1}
if (values$Histfield=="NA" | values$histsearch!=input$histsearch){
values$histResults <- histNetwork(values$M, min.citations=min.cit, sep = ";")
values$Histfield="done"
values$histsearch=input$histsearch
}
values$histlog<- capture.output(values$histPlot <- histPlot(values$histResults, n=input$histNodes, size =input$histsize, labelsize = input$histlabelsize))
return(values)
}
### Network functions ----
cocNetwork <- function(input,values){
n = input$Nodes
label.n = input$Labels
if ((input$field %in% names(values$M))){
if ((dim(values$NetWords)[1])==1 | !(input$field==values$field)){
values$field=input$field
switch(input$field,
ID={
values$NetWords <- biblioNetwork(values$M, analysis = "co-occurrences", network = "keywords", sep = ";")
values$Title= "Keywords Plus Network"
},
DE={
values$NetWords <- biblioNetwork(values$M, analysis = "co-occurrences", network = "author_keywords", sep = ";")
values$Title= "Authors' Keywords network"
},
TI={
if(!("TI_TM" %in% names(values$M))){values$M=termExtraction(values$M,Field="TI",verbose=FALSE)}
values$NetWords <- biblioNetwork(values$M, analysis = "co-occurrences", network = "titles", sep = ";")
values$Title= "Title Words network"
},
AB={
if(!("AB_TM" %in% names(values$M))){values$M=termExtraction(values$M,Field="AB",verbose=FALSE)}
values$NetWords <- biblioNetwork(values$M, analysis = "co-occurrences", network = "abstracts", sep = ";")
values$Title= "Abstract Words network"
})
}
if (n>dim(values$NetWords)[1]){n=dim(values$NetWords)[1]}
if (label.n>n){label.n=n}
if (input$normalize=="none"){normalize=NULL}else{normalize=input$normalize}
if (input$label.cex=="Yes"){label.cex=TRUE}else{label.cex=FALSE}
if (input$coc.curved=="Yes"){curved=TRUE}else{curved=FALSE}
#par(bg="grey92", mar=c(0,0,0,0))
values$cocnet=networkPlot(values$NetWords, normalize=normalize,n = n, Title = values$Title, type = input$layout,
size.cex=TRUE, size=5 , remove.multiple=F, edgesize = input$edgesize*3, labelsize=input$labelsize,label.cex=label.cex,
label.n=label.n,edges.min=input$edges.min,label.color = F, curved=curved,alpha=input$cocAlpha,
cluster=input$cocCluster, remove.isolates = (input$coc.isolates=="yes"), verbose = FALSE)
if (input$cocyears=="Yes"){
Y=fieldByYear(values$M, field = input$field, graph=FALSE)
g=values$cocnet$graph
label=igraph::V(g)$name
ind=which(tolower(Y$df$item) %in% label)
df=Y$df[ind,]
#bluefunc <- colorRampPalette(c("lightblue", "darkblue"))
#col=bluefunc((diff(range(df$year))+1)*10)
col=heat.colors((diff(range(df$year))+1)*10)
igraph::V(g)$color=col[(max(df$year)-df$year+1)*10]
igraph::V(g)$year=df$year
values$cocnet$graph=g
}
}else{
emptyPlot("Selected field is not included in your data collection")
}
return(values)
}
intellectualStructure <- function(input,values){
n = input$citNodes
label.n = input$citLabels
if ((dim(values$NetRefs)[1])==1 | !(input$citField==values$citField) | !(input$citSep==values$citSep) | !(input$citShortlabel==values$citShortlabel)){
values$citField=input$citField
values$citSep=input$citSep
if (input$citShortlabel=="Yes"){shortlabel=TRUE}else{shortlabel=FALSE}
values$citShortlabel=input$citShortlabel
switch(input$citField,
CR={
values$NetRefs <- biblioNetwork(values$M, analysis = "co-citation", network = "references", sep = input$citSep, shortlabel=shortlabel)
values$Title= "Cited References network"
},
CR_AU={
if(!("CR_AU" %in% names(values$M))){values$M=metaTagExtraction(values$M,Field="CR_AU", sep = input$citSep)}
values$NetRefs <- biblioNetwork(values$M, analysis = "co-citation", network = "authors", sep = input$citSep)
values$Title= "Cited Authors network"
},
CR_SO={
if(!("CR_SO" %in% names(values$M))){values$M=metaTagExtraction(values$M,Field="CR_SO", sep = input$citSep)}
values$NetRefs <- biblioNetwork(values$M, analysis = "co-citation", network = "sources", sep = input$citSep)
values$Title= "Cited Sources network"
})
}
if (n>dim(values$NetRefs)[1]){n=dim(values$NetRefs)[1]}
if (label.n>n){label.n=n}
if (input$citlabel.cex=="Yes"){label.cex=TRUE}else{label.cex=FALSE}
if (input$cocit.curved=="Yes"){curved=TRUE}else{curved=FALSE}
values$cocitnet=networkPlot(values$NetRefs, normalize=NULL, n = n, Title = values$Title, type = input$citlayout,
size.cex=TRUE, size=5 , remove.multiple=F, edgesize = input$citedgesize*3,
labelsize=input$citlabelsize,label.cex=label.cex, curved=curved,
label.n=label.n,edges.min=input$citedges.min,label.color = F,remove.isolates = (input$cit.isolates=="yes"),
alpha=input$cocitAlpha, cluster=input$cocitCluster, verbose = FALSE)
return(values)
}
socialStructure<-function(input,values){
n = input$colNodes
label.n = input$colLabels
if ((dim(values$ColNetRefs)[1])==1 | !(input$colField==values$colField)){
values$colField=input$colField
values$cluster="walktrap"
switch(input$colField,
COL_AU={
values$ColNetRefs <- biblioNetwork(values$M, analysis = "collaboration", network = "authors", sep = ";")
values$Title= "Author Collaboration network"
},
COL_UN={
if(!("AU_UN" %in% names(values$M))){values$M=metaTagExtraction(values$M,Field="AU_UN", sep=";")}
values$ColNetRefs <- biblioNetwork(values$M, analysis = "collaboration", network = "universities", sep = ";")
values$Title= "Edu Collaboration network"
},
COL_CO={
if(!("AU_CO" %in% names(values$M))){values$M=metaTagExtraction(values$M,Field="AU_CO", sep=";")}
values$ColNetRefs <- biblioNetwork(values$M, analysis = "collaboration", network = "countries", sep = ";")
values$Title= "Country Collaboration network"
#values$cluster="none"
})
}
if (n>dim(values$ColNetRefs)[1]){n=dim(values$ColNetRefs)[1]}
if (label.n>n){label.n=n}
if (input$colnormalize=="none"){normalize=NULL}else{normalize=input$colnormalize}
if (input$collabel.cex=="Yes"){label.cex=TRUE}else{label.cex=FALSE}
if (input$soc.curved=="Yes"){curved=TRUE}else{curved=FALSE}
type=input$collayout
if (input$collayout=="worldmap"){type="auto"}
values$colnet=networkPlot(values$ColNetRefs, normalize=normalize, n = n, Title = values$Title, type = type,
size.cex=TRUE, size=5 , remove.multiple=F, edgesize = input$coledgesize*3,
labelsize=input$collabelsize,label.cex=label.cex, curved=curved,
label.n=label.n,edges.min=input$coledges.min,label.color = F,alpha=input$colAlpha,
remove.isolates = (input$col.isolates=="yes"), cluster=input$colCluster, verbose = FALSE)
return(values)
}
countrycollaboration <- function(M,label,edgesize,min.edges){
M=metaTagExtraction(M,"AU_CO")
net=biblioNetwork(M,analysis="collaboration",network="countries")
CO=data.frame(Tab=rownames(net),Freq=diag(net),stringsAsFactors = FALSE)
bsk.network=igraph::graph_from_adjacency_matrix(net,mode="undirected")
COedges=as.data.frame(igraph::ends(bsk.network,igraph::E(bsk.network),names=TRUE),stringsAsFactors = FALSE)
map.world <- map_data("world")
map.world$region=toupper(map.world$region)
map.world$region=gsub("UK","UNITED KINGDOM",map.world$region)
map.world$region=gsub("SOUTH KOREA","KOREA",map.world$region)
country.prod <- dplyr::left_join( map.world, CO, by = c('region' = 'Tab'))
breaks=as.numeric(round(quantile(CO$Freq,c(0.2,0.4,0.6,0.8,1))))
names(breaks)=breaks
breaks=log(breaks)
data("countries",envir=environment())
names(countries)[1]="Tab"
COedges=dplyr::inner_join(COedges,countries, by=c('V1'='Tab'))
COedges=dplyr::inner_join(COedges,countries, by=c('V2'='Tab'))
COedges=COedges[COedges$V1!=COedges$V2,]
COedges=count.duplicates(COedges)
tab=COedges
COedges=COedges[COedges$count>=min.edges,]
g=ggplot(country.prod, aes( x = country.prod$long, y = country.prod$lat, group = country.prod$group )) +
geom_polygon(aes(fill = log(Freq))) +
scale_fill_continuous(low='dodgerblue', high='dodgerblue4',breaks=breaks)+
#guides(fill = guide_legend(reverse = T)) +
guides(colour=FALSE, fill=FALSE)+
geom_curve(data=COedges, aes(x = COedges$Longitude.x , y = COedges$Latitude.x, xend = COedges$Longitude.y, yend = COedges$Latitude.y, # draw edges as arcs
color = "firebrick4", size = COedges$count, group=COedges$continent.x),
curvature = 0.33,
alpha = 0.5) +
labs(title = "Country Collaboration Map", x = "Latitude", y = "Longitude")+
scale_size_continuous(guide = FALSE, range = c(0.25, edgesize))+
theme(text = element_text(color = '#333333')
,plot.title = element_text(size = 28)
,plot.subtitle = element_text(size = 14)
,axis.ticks = element_blank()
,axis.text = element_blank()
,panel.grid = element_blank()
,panel.background = element_rect(fill = '#FFFFFF') #'#333333'
,plot.background = element_rect(fill = '#FFFFFF')
,legend.position = c(.18,.36)
,legend.background = element_blank()
,legend.key = element_blank()
)
if (isTRUE(label)){
CO=dplyr::inner_join(CO,countries, by=c('Tab'='Tab'))
g=g+
ggrepel::geom_text_repel(data=CO, aes(x = .data$Longitude, y = .data$Latitude, label = .data$Tab, group=.data$continent), # draw text labels
hjust = 0, nudge_x = 1, nudge_y = 4,
size = 3, color = "orange", fontface = "bold")
}
results=list(g=g,tab=tab)
return(results)
}
### visNetwork tools ----
netLayout <- function(type){
switch(type,
auto={l <- "layout_nicely"},
circle={l <- "layout_in_circle"},
mds={l <- "layout_with_mds"},
star={l <- "layout_as_star"},
sphere={l <- "layout_on_sphere"},
fruchterman={l <- "layout_with_fr"},
kamada={l <- "layout_with_kk"}
)
return(l)
}
savenetwork <- function(con){
vn=values$network$vn
visNetwork(nodes = vn$nodes, edges = vn$edges, type="full", smooth=TRUE, physics=FALSE, height = "2000px",width = "2000px" ) %>%
visNodes(shape="box", font=list(color="black"),scaling=list(label=list(enables=TRUE))) %>%
visIgraphLayout(layout = values$network$l) %>%
visEdges(smooth = values$network$curved) %>%
visOptions(highlightNearest =list(enabled = T, hover = T, degree=1), nodesIdSelection = T) %>%
visInteraction(dragNodes = TRUE, navigationButtons = TRUE, hideEdgesOnDrag = TRUE) %>% visExport() %>%
visPhysics(enabled = FALSE) %>% visSave(con)
}
igraph2vis<-function(g,curved,labelsize,opacity,type,shape){
LABEL=igraph::V(g)$name
LABEL[igraph::V(g)$labelsize==0]=""
vn <- toVisNetworkData(g)
vn$nodes$label=LABEL
vn$edges$num=1
vn$edges$dashes=FALSE
vn$edges$dashes[vn$edges$lty==2]=TRUE
## opacity
vn$nodes$color=adjustcolor(vn$nodes$color,alpha=min(c(opacity+0.2,1)))
vn$edges$color=adjustcolor(vn$edges$color,alpha=opacity)
## removing multiple edges
vn$edges=unique(vn$edges)
## labelsize
scalemin=20
scalemax=150
Min=min(vn$nodes$font.size)
Max=max(vn$nodes$font.size)
if (Max>Min){
size=(vn$nodes$font.size-Min)/(Max-Min)*10*labelsize+10
} else {size=10*labelsize}
size[size<scalemin]=scalemin
size[size>scalemax]=scalemax
vn$nodes$font.size=size
l<-netLayout(type)
### TO ADD SHAPE AND FONT COLOR OPTIONS
VIS<-visNetwork(nodes = vn$nodes, edges = vn$edges, type="full", smooth=TRUE, physics=FALSE) %>%
visNodes(shape=shape, font=list(color="black")) %>%
visIgraphLayout(layout = l) %>%
visEdges(smooth = curved) %>%
visOptions(highlightNearest =list(enabled = T, hover = T, degree=1), nodesIdSelection = T) %>%
visInteraction(dragNodes = TRUE, navigationButtons = TRUE, hideEdgesOnDrag = TRUE)
values$COCVIS=VIS
return(list(VIS=VIS,vn=vn, type=type, l=l, curved=curved))
}
} ## End of Server |
#### Subsets, filters and reformats a VCF
#### end point is a 'geno_df'
#### example run: Rscript 01_process_vcf_to_geno_df.R data/vcf/whtstbk_master.vcf.bgz multi 2
################################################################################
# Libraries
################################################################################
list.files("functions", full.names = TRUE) %>% sapply(.,source, verbose = FALSE, echo = FALSE) %>% invisible
file_list <- list.files("data/phased_vcf", pattern=".vcf$", full.names = TRUE)
vcf <- file_list[1]
tmp <- phased_vcf_to_geno_df(file_list[1])
| /old_scripts/10_plot_phased_data.R | no_license | ksamuk/whtstbk_geno | R | false | false | 598 | r | #### Subsets, filters and reformats a VCF
#### end point is a 'geno_df'
#### example run: Rscript 01_process_vcf_to_geno_df.R data/vcf/whtstbk_master.vcf.bgz multi 2
################################################################################
# Libraries
################################################################################
list.files("functions", full.names = TRUE) %>% sapply(.,source, verbose = FALSE, echo = FALSE) %>% invisible
file_list <- list.files("data/phased_vcf", pattern=".vcf$", full.names = TRUE)
vcf <- file_list[1]
tmp <- phased_vcf_to_geno_df(file_list[1])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepCountry.R
\name{prepCountry}
\alias{prepCountry}
\title{Format Country Name}
\usage{
prepCountry(x, to.lower = TRUE, special.char = FALSE, rm.abbrev = TRUE)
}
\arguments{
\item{x}{a vector of country names to be standardized}
\item{to.lower}{logical. Should the output names be return in lower cases?
Default to TRUE.}
\item{special.char}{logical. Should special characters be maintained? Default
to FALSE.}
\item{rm.abbrev}{logical. Should common name abbreviation be replaced? Default
to TRUE.}
}
\value{
The input vector \code{x} in the standard name notation (see Details)
}
\description{
Simple function to standardize the notation of country name
(administrative level 0) by converting country codes to their long names
and by removing special characters and some country name prepositions and
separators.
}
\details{
Country information is formatted into a standard notation, i.e. long
name format (in English). By default, all letters are lower-cased (argument
\code{to.lower}) and special characters (argument \code{special.char}) and common
abbreviations (e.g. 'st.') are removed (argument \code{rm.abbrev}). These
edits aim at reducing possible variation in country name notation and
facilitate further data processing and comparison within the \strong{plantR}
workflow.
All country information with less than four letters are treated as country
codes and they are converted to the long format. Currently, only the ISO
3166-1 aplha-2 and alpha-3 codes are considered for convertion to the long
country name format.
}
\examples{
# Creating a data frame with locality information
paises <- c("VC", "VCT", "St. Vincent and the Grenadines",
"St. Vincent & Grenadines", "Saint-Martin", "Falkland Is.", NA)
# Formating the locality information
prepCountry(paises)
prepCountry(paises, to.lower = FALSE)
prepCountry(paises, rm.abbrev = FALSE)
}
\author{
Renato A. F. de Lima
}
\keyword{internal}
| /man/prepCountry.Rd | no_license | kjrom-sol/plantR | R | false | true | 1,987 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepCountry.R
\name{prepCountry}
\alias{prepCountry}
\title{Format Country Name}
\usage{
prepCountry(x, to.lower = TRUE, special.char = FALSE, rm.abbrev = TRUE)
}
\arguments{
\item{x}{a vector of country names to be standardized}
\item{to.lower}{logical. Should the output names be return in lower cases?
Default to TRUE.}
\item{special.char}{logical. Should special characters be maintained? Default
to FALSE.}
\item{rm.abbrev}{logical. Should common name abbreviation be replaced? Default
to TRUE.}
}
\value{
The input vector \code{x} in the standard name notation (see Details)
}
\description{
Simple function to standardize the notation of country name
(administrative level 0) by converting country codes to their long names
and by removing special characters and some country name prepositions and
separators.
}
\details{
Country information is formatted into a standard notation, i.e. long
name format (in English). By default, all letters are lower-cased (argument
\code{to.lower}) and special characters (argument \code{special.char}) and common
abbreviations (e.g. 'st.') are removed (argument \code{rm.abbrev}). These
edits aim at reducing possible variation in country name notation and
facilitate further data processing and comparison within the \strong{plantR}
workflow.
All country information with less than four letters are treated as country
codes and they are converted to the long format. Currently, only the ISO
3166-1 aplha-2 and alpha-3 codes are considered for convertion to the long
country name format.
}
\examples{
# Creating a data frame with locality information
paises <- c("VC", "VCT", "St. Vincent and the Grenadines",
"St. Vincent & Grenadines", "Saint-Martin", "Falkland Is.", NA)
# Formating the locality information
prepCountry(paises)
prepCountry(paises, to.lower = FALSE)
prepCountry(paises, rm.abbrev = FALSE)
}
\author{
Renato A. F. de Lima
}
\keyword{internal}
|
library("ggplot2")
library("discSurv")
library("pec")
t0 <- read.csv("https://raw.githubusercontent.com/climdata/glaser2010/master/csv/ti_1500_2xxx_monthly.csv", sep=",", na = "NA")
t1 <- t0[,c("year","month","ti")]
p0 <- read.csv("https://raw.githubusercontent.com/climdata/glaser2019/master/csv/pi_1500_2xxx_monthly.csv", sep=",", na = "NA")
prec0 <- read.csv("https://raw.githubusercontent.com/climdata/dwdPrecipitation/master/csv/monthly_precipitation_de.csv", sep=",", na = "NA")
prec1 <- prec0[, c("year","month","Deutschland")]
tps <- merge(t1,p0, by=c("year","month"))
tps <- merge(tps,prec1, by=c("year","month"))
tps$t1 <- sin((tps$month-1)*pi/6)
tps$t2 <- cos((tps$month-1)*pi/6)
tps <- tps[order(tps$ts),]
mx5 <- lm(Deutschland ~ (pi+ti)*(t1+t2), tps)
summary(mx5)
pr5 <- predict(mx5, newdata=tps, se.fit=TRUE)
tps$spi5 <- pr5$fit
tps$se5 <- pr5$se
mx2 <- lm(Deutschland ~ pi+ti+t1+t2, tps)
summary(mx2)
pr2 <- predict(mx2, newdata=tps, se.fit=TRUE)
tps$spi2 <- pr2$fit
tps$se2 <- pr2$se
tps$pi3 <- (tps$pi+3)/60
tps$ti3 <- (tps$ti+3)/60
tps$t13 <- (tps$t1+1)/20
tps$t23 <- (tps$t2+1)/20
tps$Deutschland3 <- (tps$Deutschland)/200
mx3 <- lm(Deutschland ~ (pi+ti+t1+t2)^2, tps)
#mx3 <- glm(formula=Deutschland3 ~ (pi+ti+t1+t2)^2, data=tps, family=binomial(link=gumbel()))
summary(mx3)
pr3 <- predict(mx3, newdata=tps, se.fit=TRUE)
tps$spi3 <- pr3$fit
tps$se3 <- pr3$se
mx4 <- lm(Deutschland ~ t1+t2, tps)
summary(mx4)
pr4 <- predict(mx4, newdata=tps, se.fit=TRUE)
tps$spi4 <- pr4$fit
tps$se4 <- pr4$se
p <- ggplot(data = tps, aes(x = Deutschland, y = spi3)) +
#geom_point(aes(y = spi4), color="#00AA00", alpha=0.2, size=2) +
geom_point(aes(y = spi3), color="#0000BB", alpha=0.3, size=2) +
geom_point(aes(y = spi2), color="#FF0000", alpha=0.4, size=2) +
geom_point(aes(y = spi5), color="#00FF00", alpha=0.5, size=2) +
geom_smooth(method = "lm", se=TRUE, color="cyan", formula = y ~ x)
p
| /source/precipitation.r | permissive | climdata/playground | R | false | false | 1,922 | r | library("ggplot2")
library("discSurv")
library("pec")
t0 <- read.csv("https://raw.githubusercontent.com/climdata/glaser2010/master/csv/ti_1500_2xxx_monthly.csv", sep=",", na = "NA")
t1 <- t0[,c("year","month","ti")]
p0 <- read.csv("https://raw.githubusercontent.com/climdata/glaser2019/master/csv/pi_1500_2xxx_monthly.csv", sep=",", na = "NA")
prec0 <- read.csv("https://raw.githubusercontent.com/climdata/dwdPrecipitation/master/csv/monthly_precipitation_de.csv", sep=",", na = "NA")
prec1 <- prec0[, c("year","month","Deutschland")]
tps <- merge(t1,p0, by=c("year","month"))
tps <- merge(tps,prec1, by=c("year","month"))
tps$t1 <- sin((tps$month-1)*pi/6)
tps$t2 <- cos((tps$month-1)*pi/6)
tps <- tps[order(tps$ts),]
mx5 <- lm(Deutschland ~ (pi+ti)*(t1+t2), tps)
summary(mx5)
pr5 <- predict(mx5, newdata=tps, se.fit=TRUE)
tps$spi5 <- pr5$fit
tps$se5 <- pr5$se
mx2 <- lm(Deutschland ~ pi+ti+t1+t2, tps)
summary(mx2)
pr2 <- predict(mx2, newdata=tps, se.fit=TRUE)
tps$spi2 <- pr2$fit
tps$se2 <- pr2$se
tps$pi3 <- (tps$pi+3)/60
tps$ti3 <- (tps$ti+3)/60
tps$t13 <- (tps$t1+1)/20
tps$t23 <- (tps$t2+1)/20
tps$Deutschland3 <- (tps$Deutschland)/200
mx3 <- lm(Deutschland ~ (pi+ti+t1+t2)^2, tps)
#mx3 <- glm(formula=Deutschland3 ~ (pi+ti+t1+t2)^2, data=tps, family=binomial(link=gumbel()))
summary(mx3)
pr3 <- predict(mx3, newdata=tps, se.fit=TRUE)
tps$spi3 <- pr3$fit
tps$se3 <- pr3$se
mx4 <- lm(Deutschland ~ t1+t2, tps)
summary(mx4)
pr4 <- predict(mx4, newdata=tps, se.fit=TRUE)
tps$spi4 <- pr4$fit
tps$se4 <- pr4$se
p <- ggplot(data = tps, aes(x = Deutschland, y = spi3)) +
#geom_point(aes(y = spi4), color="#00AA00", alpha=0.2, size=2) +
geom_point(aes(y = spi3), color="#0000BB", alpha=0.3, size=2) +
geom_point(aes(y = spi2), color="#FF0000", alpha=0.4, size=2) +
geom_point(aes(y = spi5), color="#00FF00", alpha=0.5, size=2) +
geom_smooth(method = "lm", se=TRUE, color="cyan", formula = y ~ x)
p
|
library(NAM)
library(bigsnpr)
infos <- readRDS("validation/sim2a.rds")
G <- add_code256(big_copy(t(infos$G), type = "raw"), code = bigsnpr:::CODE_012)
maf <- snp_MAF(G)
ind.col <- which(maf > 0.05)
G2 <- big_copy(G, ind.col = ind.col)
G2
CHR <- infos$chromosome[ind.col]
normalize <- function(x) {
qx <- ppoints(length(x))
qnorm(qx[rank(x)])
}
G3 <- scale(G2[])
rownames(G3)<-make.names(1:1000, unique = TRUE)
colnames(G3)<-make.names(1:ncol(G3), unique = TRUE)
mod<-gwas3(y=infos$phenotype1, gen=G3, fam=infos$pop,
chr=as.vector(table(CHR)), cov=infos$envi)
dim(G3)
| /BayesianGWAS.R | no_license | privefl/SSMPG-17 | R | false | false | 584 | r | library(NAM)
library(bigsnpr)
infos <- readRDS("validation/sim2a.rds")
G <- add_code256(big_copy(t(infos$G), type = "raw"), code = bigsnpr:::CODE_012)
maf <- snp_MAF(G)
ind.col <- which(maf > 0.05)
G2 <- big_copy(G, ind.col = ind.col)
G2
CHR <- infos$chromosome[ind.col]
normalize <- function(x) {
qx <- ppoints(length(x))
qnorm(qx[rank(x)])
}
G3 <- scale(G2[])
rownames(G3)<-make.names(1:1000, unique = TRUE)
colnames(G3)<-make.names(1:ncol(G3), unique = TRUE)
mod<-gwas3(y=infos$phenotype1, gen=G3, fam=infos$pop,
chr=as.vector(table(CHR)), cov=infos$envi)
dim(G3)
|
library(testthat)
library(DO.utils)
test_check("DO.utils")
| /tests/testthat.R | permissive | DiseaseOntology/DO.utils | R | false | false | 60 | r | library(testthat)
library(DO.utils)
test_check("DO.utils")
|
library(testthat)
library(wiodv2)
test_check("wiodv2")
| /tests/testthat.R | no_license | zauster/wiodv2 | R | false | false | 56 | r | library(testthat)
library(wiodv2)
test_check("wiodv2")
|
library(QDNAseq)
library(dplyr)
options(future.globals.maxSize = 1048576000)
future::plan("multiprocess")
# ChIp-seq Metadata
metadata_chipseq = data.frame(
CellType = c("GICAN", "SH-EP", "SK-N-AS", "GIMEN", "SK-N-SH", "NB69", "SJNB12",
"SH-SY5Y", "SJNB1", "SK-N-FI", "CLB-GA", "NB-EBc1",
"LAN1", "CLB-PE", "SK-N-DZ", "CLB-CAR", "CLB-MA",
"IMR32", "CHP212", "SJNB8", "TR14", "SK-N-BE2-C",
"N206", "SJNB6", "CLB-BER-Lud")
)
metadata_chipseq$Class = NA
metadata_chipseq =
metadata_chipseq %>%
mutate(Class =
ifelse(CellType %in% c("GICAN", "SH-EP", "SK-N-AS", "GIMEN", "SK-N-SH", "NB69", "SJNB12", "SJNB12"),
"noMYCN",
Class)) %>%
mutate(Class =
ifelse(CellType %in% c("SH-SY5Y", "SJNB1", "SK-N-FI", "CLB-GA", "NB-EBc1"),
"lowMYCN",
Class)) %>%
mutate(Class =
ifelse(CellType %in% c("LAN1", "CLB-PE", "SK-N-DZ", "CLB-CAR", "CLB-MA",
"IMR32", "CHP212", "SJNB8", "TR14", "SK-N-BE2-C",
"N206", "SJNB6", "CLB-BER-Lud"),
"MNA",
Class))
metadata_chipseq$bam_fname = paste0("/Volumes/Elements/nb-cl-chipseq-results/bam/Boeva_", metadata_chipseq$CellType, "_Input.trimmed.bwa_hg19.rmdup.bam")
metadata_chipseq$output_bed_fname = paste0("/Volumes/Elements/nb-cl-chipseq-qdnaseq/Boeva_", metadata_chipseq$CellType, "_Input.trimmed.bwa_hg19.rmdup.bam.qdnaseq.bed")
metadata_chipseq$output_pdf_fname = paste0("/Volumes/Elements/nb-cl-chipseq-qdnaseq/Boeva_", metadata_chipseq$CellType, "_Input.trimmed.bwa_hg19.rmdup.bam.qdnaseq.pdf")
# this is just a very special case
metadata_chipseq[11, "bam_fname"] = paste0("/Volumes/Elements/nb-cl-chipseq-results/bam/Boeva_CLB-GA_rep2_Input.trimmed.bwa_hg19.rmdup.bam")
# make sure all files exist
sum(!file.exists(metadata_chipseq$bam_fname))
# Finished i=10. Afterewards broke at CLB-GA, bc did not find the bam file.
# 17:16 Finished i=14.
# 18:39 Finished i=16
# 20:05 Finished i=18
# 23:13 Finished i=22
for (i in 15:nrow(metadata_chipseq)){
bins <- getBinAnnotations(binSize = 1, genome="hg19")
r <- binReadCounts(bins, bamfiles = metadata_chipseq[i, "bam_fname"],
minMapq=20, isSecondaryAlignment=FALSE)
r <- applyFilters(r, residual = F, blacklist = T, mappability = F,
bases = F, chromosomes = c("X", "Y", "MT"))
r <- estimateCorrection(r)
r <- correctBins(r)
r <- normalizeBins(r)
r <- segmentBins(r, alpha = 0.01, transformFun = "sqrt")
# Save results to bed file
exportBins(r, file=metadata_chipseq[i, "output_bed_fname"],
format="bed", type = "segments",
filter = T, logTransform = F, digits = 2)
# Plot bins and segmentation
# if (file.exists(as.character(metadata_chipseq[i, "output_pdf_fname"])) )file.remove(as.character(metadata_chipseq[i, "output_pdf_fname"]))
# pdf(file = as.character(metadata_chipseq[i, "output_pdf_fname"]))
# plot(r)
# dev.off()
print("Finished:")
print(i)
}
| /Code/QDNAseqChIpSeqInput.R | no_license | graceooh/MYCNAmplicon | R | false | false | 3,139 | r | library(QDNAseq)
library(dplyr)
options(future.globals.maxSize = 1048576000)
future::plan("multiprocess")
# ChIp-seq Metadata
metadata_chipseq = data.frame(
CellType = c("GICAN", "SH-EP", "SK-N-AS", "GIMEN", "SK-N-SH", "NB69", "SJNB12",
"SH-SY5Y", "SJNB1", "SK-N-FI", "CLB-GA", "NB-EBc1",
"LAN1", "CLB-PE", "SK-N-DZ", "CLB-CAR", "CLB-MA",
"IMR32", "CHP212", "SJNB8", "TR14", "SK-N-BE2-C",
"N206", "SJNB6", "CLB-BER-Lud")
)
metadata_chipseq$Class = NA
metadata_chipseq =
metadata_chipseq %>%
mutate(Class =
ifelse(CellType %in% c("GICAN", "SH-EP", "SK-N-AS", "GIMEN", "SK-N-SH", "NB69", "SJNB12", "SJNB12"),
"noMYCN",
Class)) %>%
mutate(Class =
ifelse(CellType %in% c("SH-SY5Y", "SJNB1", "SK-N-FI", "CLB-GA", "NB-EBc1"),
"lowMYCN",
Class)) %>%
mutate(Class =
ifelse(CellType %in% c("LAN1", "CLB-PE", "SK-N-DZ", "CLB-CAR", "CLB-MA",
"IMR32", "CHP212", "SJNB8", "TR14", "SK-N-BE2-C",
"N206", "SJNB6", "CLB-BER-Lud"),
"MNA",
Class))
metadata_chipseq$bam_fname = paste0("/Volumes/Elements/nb-cl-chipseq-results/bam/Boeva_", metadata_chipseq$CellType, "_Input.trimmed.bwa_hg19.rmdup.bam")
metadata_chipseq$output_bed_fname = paste0("/Volumes/Elements/nb-cl-chipseq-qdnaseq/Boeva_", metadata_chipseq$CellType, "_Input.trimmed.bwa_hg19.rmdup.bam.qdnaseq.bed")
metadata_chipseq$output_pdf_fname = paste0("/Volumes/Elements/nb-cl-chipseq-qdnaseq/Boeva_", metadata_chipseq$CellType, "_Input.trimmed.bwa_hg19.rmdup.bam.qdnaseq.pdf")
# this is just a very special case
metadata_chipseq[11, "bam_fname"] = paste0("/Volumes/Elements/nb-cl-chipseq-results/bam/Boeva_CLB-GA_rep2_Input.trimmed.bwa_hg19.rmdup.bam")
# make sure all files exist
sum(!file.exists(metadata_chipseq$bam_fname))
# Finished i=10. Afterewards broke at CLB-GA, bc did not find the bam file.
# 17:16 Finished i=14.
# 18:39 Finished i=16
# 20:05 Finished i=18
# 23:13 Finished i=22
for (i in 15:nrow(metadata_chipseq)){
bins <- getBinAnnotations(binSize = 1, genome="hg19")
r <- binReadCounts(bins, bamfiles = metadata_chipseq[i, "bam_fname"],
minMapq=20, isSecondaryAlignment=FALSE)
r <- applyFilters(r, residual = F, blacklist = T, mappability = F,
bases = F, chromosomes = c("X", "Y", "MT"))
r <- estimateCorrection(r)
r <- correctBins(r)
r <- normalizeBins(r)
r <- segmentBins(r, alpha = 0.01, transformFun = "sqrt")
# Save results to bed file
exportBins(r, file=metadata_chipseq[i, "output_bed_fname"],
format="bed", type = "segments",
filter = T, logTransform = F, digits = 2)
# Plot bins and segmentation
# if (file.exists(as.character(metadata_chipseq[i, "output_pdf_fname"])) )file.remove(as.character(metadata_chipseq[i, "output_pdf_fname"]))
# pdf(file = as.character(metadata_chipseq[i, "output_pdf_fname"]))
# plot(r)
# dev.off()
print("Finished:")
print(i)
}
|
test_that("Checking show_sitemap class", {
result <- show_sitemap(epcdata, yrsel = 2018)
expect_is(result, 'ggplot')
})
test_that("Checking show_sitemap class, thrs = T", {
result <- show_sitemap(epcdata, thrs = TRUE, yrsel = 2018)
expect_is(result, 'ggplot')
})
test_that("Checking show_sitemap yrsel",{
expect_error(show_sitemap(epcdata, yrsel = 1962), "1962 not in epcdata")
})
| /tests/testthat/test-show_sitemap.R | no_license | melimore86/tbeptools | R | false | false | 391 | r | test_that("Checking show_sitemap class", {
result <- show_sitemap(epcdata, yrsel = 2018)
expect_is(result, 'ggplot')
})
test_that("Checking show_sitemap class, thrs = T", {
result <- show_sitemap(epcdata, thrs = TRUE, yrsel = 2018)
expect_is(result, 'ggplot')
})
test_that("Checking show_sitemap yrsel",{
expect_error(show_sitemap(epcdata, yrsel = 1962), "1962 not in epcdata")
})
|
## Author: Marion A Granich RN
## Date: 01/16/2015
## Programming Assigment 2
## Description: Costly operations can be cached. The following two
## functions will calculate and inverse the matrix
## and cache the results for later use. They are both
## Modeled after the mean example written by R D Peng in the
## homework assignment instructions.
## makeCacheMatrix
## This function produces a list of functions to get an set the value
## of a matrix, and get and set the inverse of the matrix. The final
## return value of this function is the list.
makeCacheMatrix <- function(x = matrix())
{
i <- NULL
set <- function(y)
{
x <<- y
i <<- NULL
}
get <- function() x
setinv <- function(inverse) i <<- inverse
getinv <- function() i
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## Write a short comment describing this function
## This function uses solve() fuction to obtain the inverse of
## the matrix, but first checks to see if the inverse exists in
## cache and if it does, will return the cache value, if not, will
## calculate the inverse and return it.
## Per the instructions, we assume the matrix is always invertible.
cacheSolve <- function(x, ...)
{
i <- x$getinv()
if(!is.null(i)) #Check to see if the inverse exists
{
message("getting cached data.")
return(i)
}
data <- x$get()
i <- solve(data)
x$setinv(i)
i
}
## Some Test Results
## > x = rbind(c(1, 4, 3), c(3,7,1), c(2,1,4))
## > x
## [,1] [,2] [,3]
## [1,] 1 4 3
## [2,] 3 7 1
## [3,] 2 1 4
## > m <- makeCacheMatrix(x)
## > m$get()
## [,1] [,2] [,3]
## [1,] 1 4 3
## [2,] 3 7 1
## [3,] 2 1 4
## > cacheSolve(m)
## [,1] [,2] [,3]
## [1,] -0.5869565 0.28260870 0.3695652
## [2,] 0.2173913 0.04347826 -0.1739130
## [3,] 0.2391304 -0.15217391 0.1086957
## > cacheSolve(m)
## getting cached data.
## [,1] [,2] [,3]
## [1,] -0.5869565 0.28260870 0.3695652
## [2,] 0.2173913 0.04347826 -0.1739130
## [3,] 0.2391304 -0.15217391 0.1086957
| /cachematrix.R | no_license | mgranich/ProgrammingAssignment2 | R | false | false | 2,090 | r | ## Author: Marion A Granich RN
## Date: 01/16/2015
## Programming Assigment 2
## Description: Costly operations can be cached. The following two
## functions will calculate and inverse the matrix
## and cache the results for later use. They are both
## Modeled after the mean example written by R D Peng in the
## homework assignment instructions.
## makeCacheMatrix
## This function produces a list of functions to get an set the value
## of a matrix, and get and set the inverse of the matrix. The final
## return value of this function is the list.
makeCacheMatrix <- function(x = matrix())
{
i <- NULL
set <- function(y)
{
x <<- y
i <<- NULL
}
get <- function() x
setinv <- function(inverse) i <<- inverse
getinv <- function() i
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## Write a short comment describing this function
## This function uses solve() fuction to obtain the inverse of
## the matrix, but first checks to see if the inverse exists in
## cache and if it does, will return the cache value, if not, will
## calculate the inverse and return it.
## Per the instructions, we assume the matrix is always invertible.
cacheSolve <- function(x, ...)
{
i <- x$getinv()
if(!is.null(i)) #Check to see if the inverse exists
{
message("getting cached data.")
return(i)
}
data <- x$get()
i <- solve(data)
x$setinv(i)
i
}
## Some Test Results
## > x = rbind(c(1, 4, 3), c(3,7,1), c(2,1,4))
## > x
## [,1] [,2] [,3]
## [1,] 1 4 3
## [2,] 3 7 1
## [3,] 2 1 4
## > m <- makeCacheMatrix(x)
## > m$get()
## [,1] [,2] [,3]
## [1,] 1 4 3
## [2,] 3 7 1
## [3,] 2 1 4
## > cacheSolve(m)
## [,1] [,2] [,3]
## [1,] -0.5869565 0.28260870 0.3695652
## [2,] 0.2173913 0.04347826 -0.1739130
## [3,] 0.2391304 -0.15217391 0.1086957
## > cacheSolve(m)
## getting cached data.
## [,1] [,2] [,3]
## [1,] -0.5869565 0.28260870 0.3695652
## [2,] 0.2173913 0.04347826 -0.1739130
## [3,] 0.2391304 -0.15217391 0.1086957
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map.test.R
\name{make.map.cce}
\alias{make.map.cce}
\title{Make Map (CCE)}
\usage{
make.map.cce(
coast = "coastline3",
lon.min = -126,
lon.max = -119,
lat.min = 31,
lat.max = 36,
p = make.proj(projection = "merc", lat = 35, lon = -122, dlat = 3),
land.col = "#252525",
draw.grid = TRUE,
dlon = 3,
dlat = 3
)
}
\arguments{
\item{coast}{Should be the name of a coastline data object. A value of NULL sets the default cosatline to 'coastline2'.}
\item{lon.min}{The minimum longitude displayed on the map.}
\item{lon.max}{The maximum longitude on the map}
\item{lat.min}{The minimum latitude shown on the map}
\item{lat.max}{The maximum latitude shown on the map}
\item{p}{a projection string, such as those generated by 'make.proj()'}
\item{land.col}{A color string or value for the land polygon}
\item{dlon}{The spacing for the longitude grid (in degrees)}
\item{dlat}{The spacing for the latitude grid (in degrees)}
}
\description{
An example map of the CCE study region.
}
\author{
Thomas Bryce Kelly
}
| /man/make.map.cce.Rd | no_license | tbrycekelly/TheSource | R | false | true | 1,109 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map.test.R
\name{make.map.cce}
\alias{make.map.cce}
\title{Make Map (CCE)}
\usage{
make.map.cce(
coast = "coastline3",
lon.min = -126,
lon.max = -119,
lat.min = 31,
lat.max = 36,
p = make.proj(projection = "merc", lat = 35, lon = -122, dlat = 3),
land.col = "#252525",
draw.grid = TRUE,
dlon = 3,
dlat = 3
)
}
\arguments{
\item{coast}{Should be the name of a coastline data object. A value of NULL sets the default cosatline to 'coastline2'.}
\item{lon.min}{The minimum longitude displayed on the map.}
\item{lon.max}{The maximum longitude on the map}
\item{lat.min}{The minimum latitude shown on the map}
\item{lat.max}{The maximum latitude shown on the map}
\item{p}{a projection string, such as those generated by 'make.proj()'}
\item{land.col}{A color string or value for the land polygon}
\item{dlon}{The spacing for the longitude grid (in degrees)}
\item{dlat}{The spacing for the latitude grid (in degrees)}
}
\description{
An example map of the CCE study region.
}
\author{
Thomas Bryce Kelly
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{weights_almon}
\alias{weights_almon}
\title{Compute Almon polynomials}
\usage{
weights_almon(n, orders = 1:3, do.inverse = TRUE, do.normalize = TRUE)
}
\arguments{
\item{n}{a single \code{numeric} to indicate the lag length (cf., \emph{n}).}
\item{orders}{a \code{numeric} vector as the sequence of the Almon orders (cf., \emph{r}). The maximum value
corresponds to \emph{R}.}
\item{do.inverse}{\code{TRUE} if the inverse Almon polynomials should be calculated as well.}
\item{do.normalize}{a \code{logical}, if \code{TRUE} weights are normalized to unity.}
}
\value{
A \code{data.frame} of all Almon polynomial weighting curves, of size \code{length(orders)} (times two if
\code{do.inverse = TRUE}).
}
\description{
Computes Almon polynomial weighting curves. Handy to self-select specific time aggregation weighting schemes
for input in \code{\link{ctr_agg}} using the \code{weights} argument.
}
\details{
The Almon polynomial formula implemented is:
\eqn{(1 - (1 - i/n)^{r})(1 - i/n)^{R - r}}{(1 - (1 - i/n)^r) * (1 - i/n)^(R - r)}, where \eqn{i} is the lag index ordered from
1 to \eqn{n}. The inverse is computed by changing \eqn{i/n} to \eqn{1 - i/n}.
}
\seealso{
\code{\link{ctr_agg}}
}
| /man/weights_almon.Rd | no_license | kbenoit/sentometrics | R | false | true | 1,289 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{weights_almon}
\alias{weights_almon}
\title{Compute Almon polynomials}
\usage{
weights_almon(n, orders = 1:3, do.inverse = TRUE, do.normalize = TRUE)
}
\arguments{
\item{n}{a single \code{numeric} to indicate the lag length (cf., \emph{n}).}
\item{orders}{a \code{numeric} vector as the sequence of the Almon orders (cf., \emph{r}). The maximum value
corresponds to \emph{R}.}
\item{do.inverse}{\code{TRUE} if the inverse Almon polynomials should be calculated as well.}
\item{do.normalize}{a \code{logical}, if \code{TRUE} weights are normalized to unity.}
}
\value{
A \code{data.frame} of all Almon polynomial weighting curves, of size \code{length(orders)} (times two if
\code{do.inverse = TRUE}).
}
\description{
Computes Almon polynomial weighting curves. Handy to self-select specific time aggregation weighting schemes
for input in \code{\link{ctr_agg}} using the \code{weights} argument.
}
\details{
The Almon polynomial formula implemented is:
\eqn{(1 - (1 - i/n)^{r})(1 - i/n)^{R - r}}{(1 - (1 - i/n)^r) * (1 - i/n)^(R - r)}, where \eqn{i} is the lag index ordered from
1 to \eqn{n}. The inverse is computed by changing \eqn{i/n} to \eqn{1 - i/n}.
}
\seealso{
\code{\link{ctr_agg}}
}
|
testlist <- list(a = -256L, b = 1573632L, x = c(134217728L, 1040187392L, 0L, 3L, 50331648L, 8388608L, 1023L, -1L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610388025-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 176 | r | testlist <- list(a = -256L, b = 1573632L, x = c(134217728L, 1040187392L, 0L, 3L, 50331648L, 8388608L, 1023L, -1L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
library(data.table)
library(readxl) #library used to import Excel data
# import the Hendrix Well Being Survey Data (change location for your own HWBS Data)
dataset <- read_excel("C:/Users/kates/Desktop/HWBS_STUDENTS_2017_condensed.xlsx")
rownum <- 531 #total number of responses recorded
#function for determining the response rate (% data that was not NA)
response_rate <- function(col){
rr <- (rownum - sum(is.na(col)))/ rownum
return(rr)
}
#function for getting the standard deviation
std <- function(col){
var <- var(col, na.rm = TRUE)
return(sqrt(var))
}
#frequeny of people who answered "yes"
#percentage with scores above 0:
freq_bool <- function(col){
tbl <- table(na.omit(col))
return(sum(tbl[names(tbl)==1])/sum(tbl))
}
#################################################################################
# MH and Academic Performance
#################################################################################
#In the past month, how many days have you felt that emotional or mental health
#difficulties have hurt your academic performance?
#None; 0 days - 1
#1-2 days - 2
#3-5 days - 3
#6 or more days - 4
MH_academic_impact_tbl <- table(na.omit(dataset$MH_academic_impact))
MH_academic_impact_hist <- hist(dataset$MH_academic_impact, breaks = c(0,1,2,3,4),
freq = FALSE, labels = c("0", "1-2", "3-5", ">5"),
xlab = "Days of Hurt Academic Performance",
main = "MH Affecting Academic Performance in Past Month")
PH_academic_impact_tbl <- table(dataset$PH_academic_impact)
Eating_academic_impact_tbl <- table(dataset$Eating_academic_impact)
Substance_academic_impact_tbl <- (dataset$Substance_academic_impact)
Anxiety_academic_impact_tbl <- table(dataset$Anxiety_academic_impact)
Depression_academic_impact_tbl <- table(dataset$Depression_academic_impact)
Extracurricular_academic_impact_tbl <- table(dataset$Extracurricular_academic_impact)
HelpingOthers_academic_impact_tbl <- table(dataset$HelpingOthers_academic_impact)
Relationships_academic_impact_tbl <- table(dataset$Relationships_academic_impact)
Discrimination_academic_impact_tbl <- table(dataset$Discrimination_academic_impact)
| /MH_academic_performance.R | no_license | SandersKM/hdx-well-being-survey | R | false | false | 2,225 | r | library(data.table)
library(readxl) #library used to import Excel data
# import the Hendrix Well Being Survey Data (change location for your own HWBS Data)
dataset <- read_excel("C:/Users/kates/Desktop/HWBS_STUDENTS_2017_condensed.xlsx")
rownum <- 531 #total number of responses recorded
#function for determining the response rate (% data that was not NA)
response_rate <- function(col){
rr <- (rownum - sum(is.na(col)))/ rownum
return(rr)
}
#function for getting the standard deviation
std <- function(col){
var <- var(col, na.rm = TRUE)
return(sqrt(var))
}
#frequeny of people who answered "yes"
#percentage with scores above 0:
freq_bool <- function(col){
tbl <- table(na.omit(col))
return(sum(tbl[names(tbl)==1])/sum(tbl))
}
#################################################################################
# MH and Academic Performance
#################################################################################
#In the past month, how many days have you felt that emotional or mental health
#difficulties have hurt your academic performance?
#None; 0 days - 1
#1-2 days - 2
#3-5 days - 3
#6 or more days - 4
MH_academic_impact_tbl <- table(na.omit(dataset$MH_academic_impact))
MH_academic_impact_hist <- hist(dataset$MH_academic_impact, breaks = c(0,1,2,3,4),
freq = FALSE, labels = c("0", "1-2", "3-5", ">5"),
xlab = "Days of Hurt Academic Performance",
main = "MH Affecting Academic Performance in Past Month")
PH_academic_impact_tbl <- table(dataset$PH_academic_impact)
Eating_academic_impact_tbl <- table(dataset$Eating_academic_impact)
Substance_academic_impact_tbl <- (dataset$Substance_academic_impact)
Anxiety_academic_impact_tbl <- table(dataset$Anxiety_academic_impact)
Depression_academic_impact_tbl <- table(dataset$Depression_academic_impact)
Extracurricular_academic_impact_tbl <- table(dataset$Extracurricular_academic_impact)
HelpingOthers_academic_impact_tbl <- table(dataset$HelpingOthers_academic_impact)
Relationships_academic_impact_tbl <- table(dataset$Relationships_academic_impact)
Discrimination_academic_impact_tbl <- table(dataset$Discrimination_academic_impact)
|
##################################################
# operations on objects of type AQUAENV
###################################################
# PRIVATE function:
# converts all elements of a special unit or pH scale in an object of class aquaenv
convert.aquaenv <- function(aquaenv, # object of class aquaenv
from, # the unit which needs to be converted (as a string; must be a perfect match)
to, # the unit to which the conversion should go
factor, # the conversion factor to be applied: can either be a number (e.g. 1000 to convert from mol to mmol), or any of the conversion factors given in an object of class aquaenv
convattr="unit", # which attribute should be converted? can either be "unit" or "pH scale"
...)
{
for (x in names(aquaenv))
{
if (!is.null(attr(aquaenv[[x]], convattr)))
{
if (attr(aquaenv[[x]], convattr) == from)
{
aquaenv[[x]] <- aquaenv[[x]] * factor
attr(aquaenv[[x]], convattr) <- to
}
}
}
return(aquaenv) # object of class aquaenv whith the converted elements
}
# PRIVATE function:
# returns the (maximal) length of the elements in an object of class aquaenv (i.e. > 1 if one of the input variables was a vector)
length.aquaenv <- function(x, # object of class aquaenv
...)
{
for (e in x)
{
if (length(e) > 1)
{
return(length(e)) # the maximal length of the elements in the object of class aquaenv
}
}
return(1) # the maximal length of the elements in the object of class aquaenv
}
# PRIVATE function:
# adds an element to an object of class aquaenv
c.aquaenv <- function(aquaenv, # object of class aquaenv
x, # a vector of the form c(value, name) representing the element to be inserted into the object of class aquaenv
...)
{
aquaenv[[x[[2]]]] <- x[[1]]
return(aquaenv) # object of class aquaenv with the added element
}
# PRIVATE function:
# merges the elements of two objects of class aquaenv: element names are taken from the first argument, the elements of which are also first in the merged object
merge.aquaenv <- function(x, # object of class aquaenv: this is where the element names are taken from
y, # object of class aquaenv: must contain at leas all the element (names) as x, extra elements are ignored
...)
{
nam <- names(x)
for (n in nam)
{
unit <- attr(x[[n]], "unit")
x[[n]] <- c(x[[n]], y[[n]])
attr(x[[n]], "unit") <- unit
}
return(x) # object of class aquaenv with merged elements
}
# PRIVATE function:
# clones an object of class aquaenv: it is possible to supply a new value for either TA or pH; the switches speciation, skeleton, revelle, and dsa are obtained from the object to be cloned
cloneaquaenv <- function(aquaenv, # object of class aquaenv
TA=NULL, # optional new value for TA
pH=NULL, # optional new value for pH
k_co2=NULL, # used for TA fitting: give a K_CO2 and NOT calculate it from T and S: i.e. K_CO2 can be fitted in the routine as well
k1k2="roy", # either "roy" (default, Roy1993a) or "lueker" (Lueker2000, calculated with seacarb) for K\_CO2 and K\_HCO3
khf="dickson", # either "dickson" (default, Dickson1979a) or "perez" (Perez1987a, calculated with seacarb) for K\_HF}
khso4="dickson") # either 'dickson" (default, Dickson1990) or "khoo" (Khoo1977) for K\_HSO4
{
if (is.null(TA) && is.null(pH))
{
pH <- aquaenv$pH
}
res <- aquaenv(S=aquaenv$S, t=aquaenv$t, p=aquaenv$p, SumCO2=aquaenv$SumCO2, SumNH4=aquaenv$SumNH4, SumH2S=aquaenv$SumH2S,SumH3PO4=aquaenv$SumH3PO4,
SumSiOH4=aquaenv$SumSiOH4, SumHNO3=aquaenv$SumHNO3, SumHNO2=aquaenv$SumHNO2, SumBOH3=aquaenv$SumBOH3, SumH2SO4=aquaenv$SumH2SO4,
SumHF=aquaenv$SumHF, pH=pH, TA=TA,
speciation=(!is.null(aquaenv$HCO3)), skeleton=(is.null(aquaenv$Na)), revelle=(!is.null(aquaenv$revelle)), dsa=(!is.null(aquaenv$dTAdH)), k1k2=k1k2, khf=khf, khso4=khso4)
if (!is.null(k_co2))
{
res$K_CO2 <- rep(k_co2,length(res))
attr(res$K_CO2, "unit") <- "mol/kg-soln"
attr(res$K_CO2, "pH scale") <- "free"
}
return(res) # cloned object of class aquaenv
}
# PRIVATE function:
# creates an object of class aquaenv from a data frame (e.g. as supplied from the numerical solver of a dynamic model)
from.data.frame <- function(df) # data frame
{
temp <- as.list(df)
class(temp) <- "aquaenv"
return(temp) # object of class aquaenv
}
#########################################################
# CONVERSION functions
#########################################################
# PRIVATE function:
# converts either the pH scale of a pH value, the pH scale of a dissociation constant (K*), or the unit of a concentration value
convert.standard <- function(x, # the object to be converted (pH value, K* value, or concentration value)
vartype, # the type of x, either "pHscale", "KHscale", or "conc"
what, # the type of conversion to be done, for pH scales one of "free2tot", "free2sws", "free2nbs", ... (any combination of "free", "tot", "sws", and "nbs"); for concentrations one of "molar2molal", "molar2molin", ... (any combination of "molar" (mol/l), "molal" (mol/kg-H2O), and "molin" (mol/kg-solution))
S, # salinity (in practical salinity units: no unit)
t, # temperature in degrees centigrade
p=0, # gauge pressure (total pressure minus atmospheric pressure) in bars
SumH2SO4=NULL, # total sulfate concentration in mol/kg-solution; if not supplied this is calculated from S
SumHF=NULL, # total fluoride concentration in mol/kg-solution; if not supplied this is calculated from S
khf="dickson", # either "dickson" (default, Dickson1979a) or "perez" (Perez1987a) for K\_HF
khso4="dickson") # either 'dickson" (default, Dickson1990) or "khoo" (Khoo1977) for K\_HSO4
{
result <- (switch
(vartype,
pHscale = switch
(what,
free2tot = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$free2tot),
free2sws = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$free2sws),
free2nbs = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$free2nbs),
tot2free = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$tot2free),
tot2sws = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$tot2sws),
tot2nbs = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$tot2nbs),
sws2free = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$sws2free),
sws2tot = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$sws2tot),
sws2nbs = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$sws2nbs),
nbs2free = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$nbs2free),
nbs2tot = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$nbs2tot),
nbs2sws = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$nbs2sws)
),
KHscale = switch
(what,
free2tot = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$free2tot,
free2sws = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$free2sws,
free2nbs = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$free2nbs,
tot2free = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$tot2free,
tot2sws = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$tot2sws,
tot2nbs = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$tot2nbs,
sws2free = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$sws2free,
sws2tot = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$sws2tot,
sws2nbs = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$sws2nbs,
nbs2free = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$nbs2free,
nbs2tot = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$nbs2tot,
nbs2sws = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$nbs2sws
),
conc = switch
(what,
molar2molal = x * (1/((seadensity(S,t)/1e3)* molal2molin(S))),
molar2molin = x * (1/((seadensity(S,t))/1e3)) ,
molal2molar = x * (molal2molin(S) * (seadensity(S,t))/1e3) ,
molal2molin = x * (molal2molin(S)) ,
molin2molar = x * (seadensity(S,t)/1e3) ,
molin2molal = x * (1/molal2molin(S))
)
)
)
if ((what == "tot2free") || (what == "sws2free") || (what == "nbs2free"))
{
attr(result, "pH scale") <- "free"
}
else if ((what == "free2tot") || (what == "sws2tot") || (what == "nbs2tot"))
{
attr(result, "pH scale") <- "tot"
}
else if ((what == "free2nbs") || (what == "sws2nbs") || (what == "tot2nbs"))
{
attr(result, "pH scale") <- "nbs"
}
else if ((what == "molar2molal") || (what == "molin2molal"))
{
attr(result, "unit") <- "mol/kg-H2O"
}
else if ((what == "molal2molin") || (what == "molar2molin"))
{
attr(result, "unit") <- "mol/kg-soln"
}
else if ((what == "molal2molar") || (what == "molin2molar"))
{
attr(result, "unit") <- "mol/l-soln"
}
return(result) # converted pH, K*, or concentration value, attributed with the new unit/pH scale
}
# PUBLIC function:
# calculates the depth (in m) from the gauge pressure p (or the total pressure P) and the latitude (in degrees: -90 to 90) and the atmospheric pressure (in bar)
# references Fofonoff1983
watdepth <- function(P=Pa, p=pmax(0, P-Pa), lat=0, Pa=1.013253)
{
gravity <- function(lat)
{
X <- sin(lat * pi/180)
X <- X * X
grav = 9.780318 * (1 + (0.0052788 + 2.36e-05 * X) * X)
return(grav)
}
P <- p * 10
denom = gravity(lat) + 1.092e-06 * P
nom = (9.72659 + (-2.2512e-05 + (2.279e-10 - 1.82e-15 * P) *
P) * P) * P
return(nom/denom)
}
# PUBLIC function:
# calculates the gauge pressure from the depth (in m) and the latitude (in degrees: -90 to 90) and the atmospheric pressure (in bar)
# references Fofonoff1983
gauge_p <- function(d, lat=0, Pa=1.01325)
{
gauge_p <- c()
for (de in d)
{
if (de==0)
{
gauge_p <- c(gauge_p,0)
}
else
{
xx <- function(x)
{
return(de - watdepth(P=x+Pa, lat=lat))
}
gauge_p <- c(gauge_p, (uniroot(f=xx, interval=c(0,1300), tol=Technicals$uniroottol, maxiter=Technicals$maxiter)$root))
}
}
return(gauge_p)
}
# PRIVATE function:
# calculates the ionic strength I as a function of salinity S
# references: DOE1994, Zeebe2001, Roy1993b (the carbonic acid paper)
I <- function(S) # salinity S in practical salinity units (i.e. no unit)
{
return(19.924*S/(1000-1.005*S)) # ionic strength in mol/kg-solution (molinity)
}
# PRIVATE function:
# calculates chlorinity Cl from salinity S
# references: DOE1994, Zeebe2001
Cl <- function(S) # salinity S in practical salinity units (i.e. no unit)
{
return(S/1.80655) # chlorinity Cl in permil
}
# PRIVATE function:
# calculates concentrations of constituents of natural seawater from a given salinity S
# reference: DOE1994
seaconc <- function(spec, # constituent of seawater (chemical species) of which the concentration should be calculated. can be any name of the vectors ConcRelCl and MeanMolecularMass: "Cl", "SO4", "Br", "F", "Na", "Mg", "Ca", "K", "Sr", "B", "S"
S) # salinity S in practical salinity units (i.e. no unit)
{
return( # concentration of the constituent of seawater speciefied in spec in mol/kg-solution (molinity): this is determined by the data in ConcRelCl and MeanMolecularMass
ConcRelCl[[spec]]/MeanMolecularMass[[spec]]*Cl(S))
}
# PRIVATE function:
# calculates the conversion factor converting from molality (mol/kg-H2O) to molinity (mol/kg-solution) from salinity S
# reference: Roy1993b (the carbonic acid paper), DOE1994
molal2molin <- function(S) # salinity S in practical salinity units (i.e. no unit)
{
return(1-0.001005*S) # the conversion factor from molality (mol/kg-H2O) to molinity (mol/kg-solution)
}
# PRIVATE function:
# calculates the temperature in Kelvin from the temperature in degrees centigrade
T <- function(t) # temperature in degrees centigrade
{
return(t - PhysChemConst$absZero) # temperature in Kelvin
}
# PRIVATE function:
# provides pH scale conversion factors (caution: the activity coefficient for H+ (needed for NBS scale conversions) is calculated with the Davies equation (Zeebe2001) which is only accurate up to ionic strengthes of I = 0.5)
# references: Dickson1984, DOE1994, Zeebe2001
scaleconvert <- function(S, # salinity S in practical salinity units (i.e. no unit)
t, # temperature in degrees centigrade
p=0, # gauge pressure (total pressure minus atmospheric pressure) in bars
SumH2SO4=NULL, # total sulfate concentration in mol/kg-solution; if not supplied this is calculated from S
SumHF=NULL, # total fluoride concentration in mol/kg-solution; if not supplied this is calculated from S
khf="dickson", # either "dickson" (Dickson1979a) or "perez" (Perez1987a) for K_HF
khso4="dickson") # either 'dickson" (default, Dickson1990) or "khoo" (Khoo1977) for K\_HSO4
{
if (is.null(SumH2SO4))
{
SumH2SO4 = seaconc("SO4", S)
}
if (is.null(SumHF))
{
SumHF = seaconc("F", S)
}
K_HSO4 <- K_HSO4(S, t, p, khso4=khso4)
K_HF <- K_HF(S, t, p, SumH2SO4, SumHF, khf=khf)
FreeToTot <- (1 + (SumH2SO4/K_HSO4))
FreeToSWS <- (1 + (SumH2SO4/K_HSO4) + (SumHF/K_HF))
attributes(FreeToTot) <- NULL
attributes(FreeToSWS) <- NULL
#davies equation: only valid up to I=0.5
SQRTI <- sqrt(I(S))
eT <- PhysChemConst$e*T(t)
A <- 1.82e6/(eT*sqrt(eT))
gamma_H <- 10^-((A*((SQRTI/(1+SQRTI)) - 0.2*I(S))))
NBSToFree <- 1/(gamma_H*FreeToSWS) * molal2molin(S) #Lewis1998, Perez1984: pH_NBS = -log10(gamma_H (H + HSO4 + HF)) with concs being molal
#i.e.: the NBS scale is related to the SEAWATER scale via gamma_H not the free scale
# (since, if you measure with NBS buffers in seawater, you do not get the activity of the proton alone
# but of the proton plus HSO4 and HF)
##################
#Lewis1998: NBS scale is based on mol/kg-H2O (molality) and all other scales (incl free) on mol/kg-soln (molinity)
return(list( # list of conversion factors "free2tot", "free2sws", etc.
free2tot = FreeToTot,
free2sws = FreeToSWS,
free2nbs = 1/NBSToFree,
tot2free = 1/FreeToTot,
tot2sws = 1/FreeToTot * FreeToSWS,
tot2nbs = 1/FreeToTot * 1/NBSToFree,
sws2free = 1/FreeToSWS,
sws2tot = 1/FreeToSWS * FreeToTot,
sws2nbs = 1/FreeToSWS * 1/NBSToFree,
nbs2free = NBSToFree,
nbs2tot = NBSToFree * FreeToTot,
nbs2sws = NBSToFree * FreeToSWS
))
}
# PRIVATE function:
# calculates seawater density (in kg/m3) from temperature (in degrees centigrade) and salinity
# references: Millero1981, DOE1994
seadensity <- function(S, # salinity S in practical salinity units (i.e. no unit)
t) # temperature in degrees centigrade
{
t2 <- t^2
t3 <- t2 * t
t4 <- t3 * t
t5 <- t4 * t
A <- 8.24493e-1 - 4.0899e-3*t + 7.6438e-5*t2 - 8.2467e-7*t3 + 5.3875e-9*t4
B <- -5.72466e-3 + 1.0227e-4*t - 1.6546e-6*t2
C <- 4.8314e-4
densityWater <- 999.842594 + 6.793952e-2*t - 9.095290e-3*t2 + 1.001685e-4*t3 - 1.120083e-6*t4 + 6.536332e-9*t5
return(densityWater + A*S + B*S*sqrt(S) + C*S^2) # seawater density in kg/m
}
################################################################
# input / output (IO) functions
################################################################
# PRIVATE function:
# basic wrapper for the R plot function for plotting objects of class aquaenv; no return value, just side-effect
basicplot <- function(aquaenv, # object of class aquaenv
xval, # x-value: the independent variable describing a change in elements of an object of class aquaenv
type="l", # standard plot parameter; default: plot lines
mgp=c(1.8, 0.5, 0), # standard plot parameter; default: axis title on line 1.8, axis labels on line 0.5, axis on line 0
mar=c(3,3,0.5,0.5), # standard plot parameter; default: margin of 3 lines bottom and left and 0.5 lines top and right
oma=c(0,0,0,0), # standard plot parameter; default: no outer margin
size=c(15,13), # the size of the plot device; default: 15 (width) by 13 (height) inches
mfrow=c(11,10), # standard plot parameter; default: 11 columns and 10 rows of plots
device="x11", # the device to plot on; default: "x11" (can also be "eps" or "pdf")
filename="aquaenv", # filename to be used if "eps" or "pdf" is selected for device
newdevice, # flag: if TRUE, new plot device is opened
setpar, # flag: if TRUE parameters are set with the function par
ylab=NULL, # y axis label: if given, it overrides the names from an aquaenv object
...)
{
if (newdevice)
{
opendevice(device, size, filename)
}
if (setpar)
{
par(mfrow=mfrow, mar=mar, oma=oma, mgp=mgp)
}
aquaenv <- as.data.frame(aquaenv)
for (i in 1:length(aquaenv))
{
if(is.null(ylab))
{
ylab_ <- names(aquaenv)[[i]]
}
else
{
ylab_ <- ylab
}
plot(xval, aquaenv[[i]], ylab=ylab_, type=type, ...)
}
}
# PRIVATE function:
# opens a device for plotting; no return value, just side-effect
opendevice <- function(device, # either "x11", "eps", or "pdf"
size, # size of the plot device in the form c(width, height)
filename) # filename to use if "eps" or "pdf" is used
{
if (device == "x11")
{
x11(width=size[[1]], height=size[[2]])
}
else if (device == "eps")
{
postscript(width=size[[1]], height=size[[2]], file=paste(filename, ".eps", sep=""), paper="special")
}
else if (device == "pdf")
{
pdf(width=size[[1]], height=size[[2]], file=paste(filename, ".pdf", sep=""), paper="special")
}
}
# PRIVATE function:
# plots all elements of an object of class aquaenv; no return value, just side-effect
plotall <- function(aquaenv, # object of class aquaenv
xval, # x-value: the independent variable describing a change in elements of an object of class aquaenv
...)
{
basicplot(aquaenv, xval=xval, ...)
}
# PRIVATE function:
# plots just the elements of an object of class aquaenv given in what; no return value, just side-effect
selectplot <- function(aquaenv, # object of class aquaenv
xval, # x-value: the independent variable describing a change in elements of an object of class aquaenv
what, # vector of names of elements of aquaenv that should be plotted
mfrow=c(1,1), # standard plot parameter; default: just one plot
size=c(7,7), # the size of the plot device; default: 7 (width) by 7 (height) inches
...)
{
aquaenvnew <- aquaenv[what]
class(aquaenvnew) <- "aquaenv"
basicplot(aquaenvnew, xval=xval, mfrow=mfrow, size=size, ...)
}
# PRIVATE function:
# creates a bjerrumplot from the elements of an object of class aquaenv given in what; no return value, just side-effect
bjerrumplot <- function(aquaenv, # object of class aquaenv
what, # vector of names of elements of aquaenv that should be plotted; if not specified: what <- c("CO2", "HCO3", "CO3", "BOH3", "BOH4", "OH", "H3PO4", "H2PO4", "HPO4", "PO4", "SiOH4", "SiOOH3", "SiO2OH2", "H2S", "HS", "S2min", "NH4", "NH3", "H2SO4", "HSO4", "SO4", "HF", "F", "HNO3", "NO3", "HNO2", "NO2")
log=FALSE, # should the plot be on a logarithmic y axis?
palette=NULL, # a vector of colors to use in the plot (either numbers or names given in colors())
device="x11", # the device to plot on; default: "x11" (can also be "eps" or "pdf")
filename="aquaenv", # filename to be used if "eps" or "pdf" is selected for device
size=c(12,10), # the size of the plot device; default: 12 (width) by 10 (height) inches
ylim=NULL, # standard plot parameter; if not supplied it will be calculated by range() of the elements to plot
lwd=2, # standard plot parameter; width of the lines in the plot
xlab="free scale pH", # x axis label
mgp=c(1.8, 0.5, 0), # standard plot parameter; default: axis title on line 1.8, axis labels on line 0.5, axis on line 0
mar=c(3,3,0.5,0.5), # standard plot parameter; default: margin of 3 lines bottom and left and 0.5 lines top and right
oma=c(0,0,0,0), # standard plot parameter; default: no outer margin
legendposition="bottomleft", # position of the legend
legendinset=0.05, # standard legend parameter inset
legendlwd=4, # standard legend parameter lwd: line width of lines in legend
bg="white", # standard legend parameter: default background color: white
newdevice, # flag: if TRUE, new plot device is opened
setpar, # flag: if TRUE parameters are set with the function par
...)
{
if (is.null(what))
{
what <- c("CO2", "HCO3", "CO3", "BOH3", "BOH4", "OH", "H3PO4", "H2PO4", "HPO4", "PO4", "SiOH4", "SiOOH3", "SiO2OH2",
"H2S", "HS", "S2min", "NH4", "NH3", "H2SO4", "HSO4", "SO4", "HF", "F", "HNO3", "NO3", "HNO2", "NO2")
}
bjerrumvarslist <- aquaenv[what]
class(bjerrumvarslist) <- "aquaenv"
bjerrumvars <- as.data.frame(bjerrumvarslist)
if (newdevice)
{
opendevice(device, size, filename)
}
if(setpar)
{
par(mar=mar, mgp=mgp, oma=oma)
}
if (is.null(palette))
{
palette <- 1:length(what)
}
if (log)
{
if (is.null(ylim))
{
ylim <- range(log10(bjerrumvars))
}
yvals <- log10(bjerrumvars)
ylab <- paste("log10([X]/(",attr(bjerrumvarslist[[1]], "unit"),"))", sep="")
}
else
{
if (is.null(ylim))
{
ylim <- range(bjerrumvars)
}
yvals <- bjerrumvars
ylab <- attr(bjerrumvarslist[[1]], "unit")
}
for (i in 1:length(bjerrumvars))
{
plot(aquaenv$pH, yvals[[i]], type="l", ylab=ylab, xlab=xlab, ylim=ylim, col=palette[[i]], lwd=lwd, ...)
par(new=TRUE)
}
par(new=FALSE)
legend(legendposition, inset=legendinset, legend=names(bjerrumvarslist), col=palette, bg=bg, lwd=legendlwd, ...)
}
# PRIVATE function:
# creates a cumulative plot from the elements of an object of class aquaenv given in what; no return value, just side-effect
cumulativeplot <- function(aquaenv, # object of class aquaenv
xval, # x-value: the independent variable describing a change in elements of an object of class aquaenv
what, # vector of names of elements of aquaenv that should be plotted
total=TRUE, # should the sum of all elements specified in what be plotted as well?
palette=NULL, # a vector of colors to use in the plot (either numbers or names given in colors())
device="x11", # the device to plot on; default: "x11" (can also be "eps" or "pdf")
filename="aquaenv",# filename to be used if "eps" or "pdf" is selected for device
size=c(12,10), # the size of the plot device; default: 12 (width) by 10 (height) inches
ylim=NULL, # standard plot parameter; if not supplied it will be calculated by an adaptation of range() of the elements to plot
lwd=2, # standard plot parameter; width of the lines in the plot
mgp=c(1.8, 0.5, 0),# standard plot parameter; default: axis title on line 1.8, axis labels on line 0.5, axis on line 0
mar=c(3,3,0.5,0.5),# standard plot parameter; default: margin of 3 lines bottom and left and 0.5 lines top and right
oma=c(0,0,0,0), # standard plot parameter; default: no outer margin
legendposition="bottomleft", # position of the legend
legendinset=0.05, # standard legend parameter inset
legendlwd=4, # standard legend parameter lwd: line width of lines in legend
bg="white", # standard legend parameter: default background color: white
y.intersp=1.2, # standard legend parameter; default: 1.2 lines space between the lines in the legend
newdevice, # flag: if TRUE, new plot device is opened
setpar, # flag: if TRUE parameters are set with the function par
...)
{
if (is.null(what))
{
what=names(aquaenv)
}
cumulativevarslist <- aquaenv[what]
class(cumulativevarslist) <- "aquaenv"
cumulativevars <- as.data.frame(cumulativevarslist)
if (is.null(ylim))
{
ylim <- c(0,0)
for (var in cumulativevars)
{
ylim <- ylim + range(c(var,0))
}
}
if (is.null(palette))
{
palette <- 1:length(names(cumulativevars))
}
if (newdevice)
{
opendevice(device, size, filename)
}
if (setpar)
{
par(mar=mar, mgp=mgp, oma=oma)
}
plot(xval, rep(0,length(xval)), type="l", ylim=ylim, col="white", ...)
sumfuncpos <- rep(0,length(xval))
for (x in 1:(length(cumulativevars)))
{
yval <- (cumulativevars[[x]])
yval[yval<=0] <- 0
newsumfuncpos <- sumfuncpos + yval
if (!(identical(yval, rep(0,length(yval)))))
{
polygon(c(xval,rev(xval)), c(newsumfuncpos, rev(sumfuncpos)), col=palette[[x]], border=NA)
}
sumfuncpos <- newsumfuncpos
}
sumfuncneg <- rep(0,length(xval))
for (x in 1:(length(cumulativevars)))
{
yval <- (cumulativevars[[x]])
yval[yval>=0] <- 0
newsumfuncneg <- sumfuncneg + yval
if (!(identical(yval, rep(0,length(yval)))))
{
polygon(c(xval,rev(xval)), c(newsumfuncneg, rev(sumfuncneg)), col=palette[[x]], border=NA)
}
sumfuncneg <- newsumfuncneg
}
if (total)
{
par(new=TRUE)
plot(xval, apply(cumulativevars, 1, sum), col="gray", type="l", ylim=ylim, xlab="", ylab="", lwd=lwd)
}
legend(legendposition, legend=names(cumulativevars), col=palette, inset=legendinset, y.intersp=y.intersp, bg=bg, lwd=legendlwd)
}
| /AquaEnv/R/aquaenv_private_auxilaryfunctions.R | no_license | ingted/R-Examples | R | false | false | 31,029 | r | ##################################################
# operations on objects of type AQUAENV
###################################################
# PRIVATE function:
# converts all elements of a special unit or pH scale in an object of class aquaenv
convert.aquaenv <- function(aquaenv, # object of class aquaenv
from, # the unit which needs to be converted (as a string; must be a perfect match)
to, # the unit to which the conversion should go
factor, # the conversion factor to be applied: can either be a number (e.g. 1000 to convert from mol to mmol), or any of the conversion factors given in an object of class aquaenv
convattr="unit", # which attribute should be converted? can either be "unit" or "pH scale"
...)
{
for (x in names(aquaenv))
{
if (!is.null(attr(aquaenv[[x]], convattr)))
{
if (attr(aquaenv[[x]], convattr) == from)
{
aquaenv[[x]] <- aquaenv[[x]] * factor
attr(aquaenv[[x]], convattr) <- to
}
}
}
return(aquaenv) # object of class aquaenv whith the converted elements
}
# PRIVATE function:
# returns the (maximal) length of the elements in an object of class aquaenv (i.e. > 1 if one of the input variables was a vector)
length.aquaenv <- function(x, # object of class aquaenv
...)
{
for (e in x)
{
if (length(e) > 1)
{
return(length(e)) # the maximal length of the elements in the object of class aquaenv
}
}
return(1) # the maximal length of the elements in the object of class aquaenv
}
# PRIVATE function:
# adds an element to an object of class aquaenv
c.aquaenv <- function(aquaenv, # object of class aquaenv
x, # a vector of the form c(value, name) representing the element to be inserted into the object of class aquaenv
...)
{
aquaenv[[x[[2]]]] <- x[[1]]
return(aquaenv) # object of class aquaenv with the added element
}
# PRIVATE function:
# merges the elements of two objects of class aquaenv: element names are taken from the first argument, the elements of which are also first in the merged object
merge.aquaenv <- function(x, # object of class aquaenv: this is where the element names are taken from
y, # object of class aquaenv: must contain at leas all the element (names) as x, extra elements are ignored
...)
{
nam <- names(x)
for (n in nam)
{
unit <- attr(x[[n]], "unit")
x[[n]] <- c(x[[n]], y[[n]])
attr(x[[n]], "unit") <- unit
}
return(x) # object of class aquaenv with merged elements
}
# PRIVATE function:
# clones an object of class aquaenv: it is possible to supply a new value for either TA or pH; the switches speciation, skeleton, revelle, and dsa are obtained from the object to be cloned
cloneaquaenv <- function(aquaenv, # object of class aquaenv
TA=NULL, # optional new value for TA
pH=NULL, # optional new value for pH
k_co2=NULL, # used for TA fitting: give a K_CO2 and NOT calculate it from T and S: i.e. K_CO2 can be fitted in the routine as well
k1k2="roy", # either "roy" (default, Roy1993a) or "lueker" (Lueker2000, calculated with seacarb) for K\_CO2 and K\_HCO3
khf="dickson", # either "dickson" (default, Dickson1979a) or "perez" (Perez1987a, calculated with seacarb) for K\_HF}
khso4="dickson") # either 'dickson" (default, Dickson1990) or "khoo" (Khoo1977) for K\_HSO4
{
if (is.null(TA) && is.null(pH))
{
pH <- aquaenv$pH
}
res <- aquaenv(S=aquaenv$S, t=aquaenv$t, p=aquaenv$p, SumCO2=aquaenv$SumCO2, SumNH4=aquaenv$SumNH4, SumH2S=aquaenv$SumH2S,SumH3PO4=aquaenv$SumH3PO4,
SumSiOH4=aquaenv$SumSiOH4, SumHNO3=aquaenv$SumHNO3, SumHNO2=aquaenv$SumHNO2, SumBOH3=aquaenv$SumBOH3, SumH2SO4=aquaenv$SumH2SO4,
SumHF=aquaenv$SumHF, pH=pH, TA=TA,
speciation=(!is.null(aquaenv$HCO3)), skeleton=(is.null(aquaenv$Na)), revelle=(!is.null(aquaenv$revelle)), dsa=(!is.null(aquaenv$dTAdH)), k1k2=k1k2, khf=khf, khso4=khso4)
if (!is.null(k_co2))
{
res$K_CO2 <- rep(k_co2,length(res))
attr(res$K_CO2, "unit") <- "mol/kg-soln"
attr(res$K_CO2, "pH scale") <- "free"
}
return(res) # cloned object of class aquaenv
}
# PRIVATE function:
# creates an object of class aquaenv from a data frame (e.g. as supplied from the numerical solver of a dynamic model)
from.data.frame <- function(df) # data frame
{
temp <- as.list(df)
class(temp) <- "aquaenv"
return(temp) # object of class aquaenv
}
#########################################################
# CONVERSION functions
#########################################################
# PRIVATE function:
# converts either the pH scale of a pH value, the pH scale of a dissociation constant (K*), or the unit of a concentration value
convert.standard <- function(x, # the object to be converted (pH value, K* value, or concentration value)
vartype, # the type of x, either "pHscale", "KHscale", or "conc"
what, # the type of conversion to be done, for pH scales one of "free2tot", "free2sws", "free2nbs", ... (any combination of "free", "tot", "sws", and "nbs"); for concentrations one of "molar2molal", "molar2molin", ... (any combination of "molar" (mol/l), "molal" (mol/kg-H2O), and "molin" (mol/kg-solution))
S, # salinity (in practical salinity units: no unit)
t, # temperature in degrees centigrade
p=0, # gauge pressure (total pressure minus atmospheric pressure) in bars
SumH2SO4=NULL, # total sulfate concentration in mol/kg-solution; if not supplied this is calculated from S
SumHF=NULL, # total fluoride concentration in mol/kg-solution; if not supplied this is calculated from S
khf="dickson", # either "dickson" (default, Dickson1979a) or "perez" (Perez1987a) for K\_HF
khso4="dickson") # either 'dickson" (default, Dickson1990) or "khoo" (Khoo1977) for K\_HSO4
{
result <- (switch
(vartype,
pHscale = switch
(what,
free2tot = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$free2tot),
free2sws = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$free2sws),
free2nbs = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$free2nbs),
tot2free = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$tot2free),
tot2sws = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$tot2sws),
tot2nbs = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$tot2nbs),
sws2free = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$sws2free),
sws2tot = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$sws2tot),
sws2nbs = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$sws2nbs),
nbs2free = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$nbs2free),
nbs2tot = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$nbs2tot),
nbs2sws = x - log10(scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$nbs2sws)
),
KHscale = switch
(what,
free2tot = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$free2tot,
free2sws = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$free2sws,
free2nbs = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$free2nbs,
tot2free = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$tot2free,
tot2sws = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$tot2sws,
tot2nbs = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$tot2nbs,
sws2free = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$sws2free,
sws2tot = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$sws2tot,
sws2nbs = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$sws2nbs,
nbs2free = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$nbs2free,
nbs2tot = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$nbs2tot,
nbs2sws = x * scaleconvert(S, t, p, SumH2SO4, SumHF, khf, khso4)$nbs2sws
),
conc = switch
(what,
molar2molal = x * (1/((seadensity(S,t)/1e3)* molal2molin(S))),
molar2molin = x * (1/((seadensity(S,t))/1e3)) ,
molal2molar = x * (molal2molin(S) * (seadensity(S,t))/1e3) ,
molal2molin = x * (molal2molin(S)) ,
molin2molar = x * (seadensity(S,t)/1e3) ,
molin2molal = x * (1/molal2molin(S))
)
)
)
if ((what == "tot2free") || (what == "sws2free") || (what == "nbs2free"))
{
attr(result, "pH scale") <- "free"
}
else if ((what == "free2tot") || (what == "sws2tot") || (what == "nbs2tot"))
{
attr(result, "pH scale") <- "tot"
}
else if ((what == "free2nbs") || (what == "sws2nbs") || (what == "tot2nbs"))
{
attr(result, "pH scale") <- "nbs"
}
else if ((what == "molar2molal") || (what == "molin2molal"))
{
attr(result, "unit") <- "mol/kg-H2O"
}
else if ((what == "molal2molin") || (what == "molar2molin"))
{
attr(result, "unit") <- "mol/kg-soln"
}
else if ((what == "molal2molar") || (what == "molin2molar"))
{
attr(result, "unit") <- "mol/l-soln"
}
return(result) # converted pH, K*, or concentration value, attributed with the new unit/pH scale
}
# PUBLIC function:
# calculates the depth (in m) from the gauge pressure p (or the total pressure P) and the latitude (in degrees: -90 to 90) and the atmospheric pressure (in bar)
# references Fofonoff1983
watdepth <- function(P=Pa, p=pmax(0, P-Pa), lat=0, Pa=1.013253)
{
gravity <- function(lat)
{
X <- sin(lat * pi/180)
X <- X * X
grav = 9.780318 * (1 + (0.0052788 + 2.36e-05 * X) * X)
return(grav)
}
P <- p * 10
denom = gravity(lat) + 1.092e-06 * P
nom = (9.72659 + (-2.2512e-05 + (2.279e-10 - 1.82e-15 * P) *
P) * P) * P
return(nom/denom)
}
# PUBLIC function:
# calculates the gauge pressure from the depth (in m) and the latitude (in degrees: -90 to 90) and the atmospheric pressure (in bar)
# references Fofonoff1983
gauge_p <- function(d, lat=0, Pa=1.01325)
{
gauge_p <- c()
for (de in d)
{
if (de==0)
{
gauge_p <- c(gauge_p,0)
}
else
{
xx <- function(x)
{
return(de - watdepth(P=x+Pa, lat=lat))
}
gauge_p <- c(gauge_p, (uniroot(f=xx, interval=c(0,1300), tol=Technicals$uniroottol, maxiter=Technicals$maxiter)$root))
}
}
return(gauge_p)
}
# PRIVATE function:
# calculates the ionic strength I as a function of salinity S
# references: DOE1994, Zeebe2001, Roy1993b (the carbonic acid paper)
I <- function(S) # salinity S in practical salinity units (i.e. no unit)
{
return(19.924*S/(1000-1.005*S)) # ionic strength in mol/kg-solution (molinity)
}
# PRIVATE function:
# calculates chlorinity Cl from salinity S
# references: DOE1994, Zeebe2001
Cl <- function(S) # salinity S in practical salinity units (i.e. no unit)
{
return(S/1.80655) # chlorinity Cl in permil
}
# PRIVATE function:
# calculates concentrations of constituents of natural seawater from a given salinity S
# reference: DOE1994
seaconc <- function(spec, # constituent of seawater (chemical species) of which the concentration should be calculated. can be any name of the vectors ConcRelCl and MeanMolecularMass: "Cl", "SO4", "Br", "F", "Na", "Mg", "Ca", "K", "Sr", "B", "S"
S) # salinity S in practical salinity units (i.e. no unit)
{
return( # concentration of the constituent of seawater speciefied in spec in mol/kg-solution (molinity): this is determined by the data in ConcRelCl and MeanMolecularMass
ConcRelCl[[spec]]/MeanMolecularMass[[spec]]*Cl(S))
}
# PRIVATE function:
# calculates the conversion factor converting from molality (mol/kg-H2O) to molinity (mol/kg-solution) from salinity S
# reference: Roy1993b (the carbonic acid paper), DOE1994
molal2molin <- function(S) # salinity S in practical salinity units (i.e. no unit)
{
return(1-0.001005*S) # the conversion factor from molality (mol/kg-H2O) to molinity (mol/kg-solution)
}
# PRIVATE function:
# calculates the temperature in Kelvin from the temperature in degrees centigrade
T <- function(t) # temperature in degrees centigrade
{
return(t - PhysChemConst$absZero) # temperature in Kelvin
}
# PRIVATE function:
# provides pH scale conversion factors (caution: the activity coefficient for H+ (needed for NBS scale conversions) is calculated with the Davies equation (Zeebe2001) which is only accurate up to ionic strengthes of I = 0.5)
# references: Dickson1984, DOE1994, Zeebe2001
scaleconvert <- function(S, # salinity S in practical salinity units (i.e. no unit)
t, # temperature in degrees centigrade
p=0, # gauge pressure (total pressure minus atmospheric pressure) in bars
SumH2SO4=NULL, # total sulfate concentration in mol/kg-solution; if not supplied this is calculated from S
SumHF=NULL, # total fluoride concentration in mol/kg-solution; if not supplied this is calculated from S
khf="dickson", # either "dickson" (Dickson1979a) or "perez" (Perez1987a) for K_HF
khso4="dickson") # either 'dickson" (default, Dickson1990) or "khoo" (Khoo1977) for K\_HSO4
{
if (is.null(SumH2SO4))
{
SumH2SO4 = seaconc("SO4", S)
}
if (is.null(SumHF))
{
SumHF = seaconc("F", S)
}
K_HSO4 <- K_HSO4(S, t, p, khso4=khso4)
K_HF <- K_HF(S, t, p, SumH2SO4, SumHF, khf=khf)
FreeToTot <- (1 + (SumH2SO4/K_HSO4))
FreeToSWS <- (1 + (SumH2SO4/K_HSO4) + (SumHF/K_HF))
attributes(FreeToTot) <- NULL
attributes(FreeToSWS) <- NULL
#davies equation: only valid up to I=0.5
SQRTI <- sqrt(I(S))
eT <- PhysChemConst$e*T(t)
A <- 1.82e6/(eT*sqrt(eT))
gamma_H <- 10^-((A*((SQRTI/(1+SQRTI)) - 0.2*I(S))))
NBSToFree <- 1/(gamma_H*FreeToSWS) * molal2molin(S) #Lewis1998, Perez1984: pH_NBS = -log10(gamma_H (H + HSO4 + HF)) with concs being molal
#i.e.: the NBS scale is related to the SEAWATER scale via gamma_H not the free scale
# (since, if you measure with NBS buffers in seawater, you do not get the activity of the proton alone
# but of the proton plus HSO4 and HF)
##################
#Lewis1998: NBS scale is based on mol/kg-H2O (molality) and all other scales (incl free) on mol/kg-soln (molinity)
return(list( # list of conversion factors "free2tot", "free2sws", etc.
free2tot = FreeToTot,
free2sws = FreeToSWS,
free2nbs = 1/NBSToFree,
tot2free = 1/FreeToTot,
tot2sws = 1/FreeToTot * FreeToSWS,
tot2nbs = 1/FreeToTot * 1/NBSToFree,
sws2free = 1/FreeToSWS,
sws2tot = 1/FreeToSWS * FreeToTot,
sws2nbs = 1/FreeToSWS * 1/NBSToFree,
nbs2free = NBSToFree,
nbs2tot = NBSToFree * FreeToTot,
nbs2sws = NBSToFree * FreeToSWS
))
}
# PRIVATE function:
# calculates seawater density (in kg/m3) from temperature (in degrees centigrade) and salinity
# references: Millero1981, DOE1994
seadensity <- function(S, # salinity S in practical salinity units (i.e. no unit)
t) # temperature in degrees centigrade
{
t2 <- t^2
t3 <- t2 * t
t4 <- t3 * t
t5 <- t4 * t
A <- 8.24493e-1 - 4.0899e-3*t + 7.6438e-5*t2 - 8.2467e-7*t3 + 5.3875e-9*t4
B <- -5.72466e-3 + 1.0227e-4*t - 1.6546e-6*t2
C <- 4.8314e-4
densityWater <- 999.842594 + 6.793952e-2*t - 9.095290e-3*t2 + 1.001685e-4*t3 - 1.120083e-6*t4 + 6.536332e-9*t5
return(densityWater + A*S + B*S*sqrt(S) + C*S^2) # seawater density in kg/m
}
################################################################
# input / output (IO) functions
################################################################
# PRIVATE function:
# basic wrapper for the R plot function for plotting objects of class aquaenv; no return value, just side-effect
basicplot <- function(aquaenv, # object of class aquaenv
xval, # x-value: the independent variable describing a change in elements of an object of class aquaenv
type="l", # standard plot parameter; default: plot lines
mgp=c(1.8, 0.5, 0), # standard plot parameter; default: axis title on line 1.8, axis labels on line 0.5, axis on line 0
mar=c(3,3,0.5,0.5), # standard plot parameter; default: margin of 3 lines bottom and left and 0.5 lines top and right
oma=c(0,0,0,0), # standard plot parameter; default: no outer margin
size=c(15,13), # the size of the plot device; default: 15 (width) by 13 (height) inches
mfrow=c(11,10), # standard plot parameter; default: 11 columns and 10 rows of plots
device="x11", # the device to plot on; default: "x11" (can also be "eps" or "pdf")
filename="aquaenv", # filename to be used if "eps" or "pdf" is selected for device
newdevice, # flag: if TRUE, new plot device is opened
setpar, # flag: if TRUE parameters are set with the function par
ylab=NULL, # y axis label: if given, it overrides the names from an aquaenv object
...)
{
if (newdevice)
{
opendevice(device, size, filename)
}
if (setpar)
{
par(mfrow=mfrow, mar=mar, oma=oma, mgp=mgp)
}
aquaenv <- as.data.frame(aquaenv)
for (i in 1:length(aquaenv))
{
if(is.null(ylab))
{
ylab_ <- names(aquaenv)[[i]]
}
else
{
ylab_ <- ylab
}
plot(xval, aquaenv[[i]], ylab=ylab_, type=type, ...)
}
}
# PRIVATE function:
# opens a device for plotting; no return value, just side-effect
opendevice <- function(device, # either "x11", "eps", or "pdf"
size, # size of the plot device in the form c(width, height)
filename) # filename to use if "eps" or "pdf" is used
{
if (device == "x11")
{
x11(width=size[[1]], height=size[[2]])
}
else if (device == "eps")
{
postscript(width=size[[1]], height=size[[2]], file=paste(filename, ".eps", sep=""), paper="special")
}
else if (device == "pdf")
{
pdf(width=size[[1]], height=size[[2]], file=paste(filename, ".pdf", sep=""), paper="special")
}
}
# PRIVATE function:
# plots all elements of an object of class aquaenv; no return value, just side-effect
plotall <- function(aquaenv, # object of class aquaenv
xval, # x-value: the independent variable describing a change in elements of an object of class aquaenv
...)
{
basicplot(aquaenv, xval=xval, ...)
}
# PRIVATE function:
# plots just the elements of an object of class aquaenv given in what; no return value, just side-effect
selectplot <- function(aquaenv, # object of class aquaenv
xval, # x-value: the independent variable describing a change in elements of an object of class aquaenv
what, # vector of names of elements of aquaenv that should be plotted
mfrow=c(1,1), # standard plot parameter; default: just one plot
size=c(7,7), # the size of the plot device; default: 7 (width) by 7 (height) inches
...)
{
aquaenvnew <- aquaenv[what]
class(aquaenvnew) <- "aquaenv"
basicplot(aquaenvnew, xval=xval, mfrow=mfrow, size=size, ...)
}
# PRIVATE function:
# creates a bjerrumplot from the elements of an object of class aquaenv given in what; no return value, just side-effect
bjerrumplot <- function(aquaenv, # object of class aquaenv
what, # vector of names of elements of aquaenv that should be plotted; if not specified: what <- c("CO2", "HCO3", "CO3", "BOH3", "BOH4", "OH", "H3PO4", "H2PO4", "HPO4", "PO4", "SiOH4", "SiOOH3", "SiO2OH2", "H2S", "HS", "S2min", "NH4", "NH3", "H2SO4", "HSO4", "SO4", "HF", "F", "HNO3", "NO3", "HNO2", "NO2")
log=FALSE, # should the plot be on a logarithmic y axis?
palette=NULL, # a vector of colors to use in the plot (either numbers or names given in colors())
device="x11", # the device to plot on; default: "x11" (can also be "eps" or "pdf")
filename="aquaenv", # filename to be used if "eps" or "pdf" is selected for device
size=c(12,10), # the size of the plot device; default: 12 (width) by 10 (height) inches
ylim=NULL, # standard plot parameter; if not supplied it will be calculated by range() of the elements to plot
lwd=2, # standard plot parameter; width of the lines in the plot
xlab="free scale pH", # x axis label
mgp=c(1.8, 0.5, 0), # standard plot parameter; default: axis title on line 1.8, axis labels on line 0.5, axis on line 0
mar=c(3,3,0.5,0.5), # standard plot parameter; default: margin of 3 lines bottom and left and 0.5 lines top and right
oma=c(0,0,0,0), # standard plot parameter; default: no outer margin
legendposition="bottomleft", # position of the legend
legendinset=0.05, # standard legend parameter inset
legendlwd=4, # standard legend parameter lwd: line width of lines in legend
bg="white", # standard legend parameter: default background color: white
newdevice, # flag: if TRUE, new plot device is opened
setpar, # flag: if TRUE parameters are set with the function par
...)
{
if (is.null(what))
{
what <- c("CO2", "HCO3", "CO3", "BOH3", "BOH4", "OH", "H3PO4", "H2PO4", "HPO4", "PO4", "SiOH4", "SiOOH3", "SiO2OH2",
"H2S", "HS", "S2min", "NH4", "NH3", "H2SO4", "HSO4", "SO4", "HF", "F", "HNO3", "NO3", "HNO2", "NO2")
}
bjerrumvarslist <- aquaenv[what]
class(bjerrumvarslist) <- "aquaenv"
bjerrumvars <- as.data.frame(bjerrumvarslist)
if (newdevice)
{
opendevice(device, size, filename)
}
if(setpar)
{
par(mar=mar, mgp=mgp, oma=oma)
}
if (is.null(palette))
{
palette <- 1:length(what)
}
if (log)
{
if (is.null(ylim))
{
ylim <- range(log10(bjerrumvars))
}
yvals <- log10(bjerrumvars)
ylab <- paste("log10([X]/(",attr(bjerrumvarslist[[1]], "unit"),"))", sep="")
}
else
{
if (is.null(ylim))
{
ylim <- range(bjerrumvars)
}
yvals <- bjerrumvars
ylab <- attr(bjerrumvarslist[[1]], "unit")
}
for (i in 1:length(bjerrumvars))
{
plot(aquaenv$pH, yvals[[i]], type="l", ylab=ylab, xlab=xlab, ylim=ylim, col=palette[[i]], lwd=lwd, ...)
par(new=TRUE)
}
par(new=FALSE)
legend(legendposition, inset=legendinset, legend=names(bjerrumvarslist), col=palette, bg=bg, lwd=legendlwd, ...)
}
# PRIVATE function:
# creates a cumulative plot from the elements of an object of class aquaenv given in what; no return value, just side-effect
cumulativeplot <- function(aquaenv, # object of class aquaenv
xval, # x-value: the independent variable describing a change in elements of an object of class aquaenv
what, # vector of names of elements of aquaenv that should be plotted
total=TRUE, # should the sum of all elements specified in what be plotted as well?
palette=NULL, # a vector of colors to use in the plot (either numbers or names given in colors())
device="x11", # the device to plot on; default: "x11" (can also be "eps" or "pdf")
filename="aquaenv",# filename to be used if "eps" or "pdf" is selected for device
size=c(12,10), # the size of the plot device; default: 12 (width) by 10 (height) inches
ylim=NULL, # standard plot parameter; if not supplied it will be calculated by an adaptation of range() of the elements to plot
lwd=2, # standard plot parameter; width of the lines in the plot
mgp=c(1.8, 0.5, 0),# standard plot parameter; default: axis title on line 1.8, axis labels on line 0.5, axis on line 0
mar=c(3,3,0.5,0.5),# standard plot parameter; default: margin of 3 lines bottom and left and 0.5 lines top and right
oma=c(0,0,0,0), # standard plot parameter; default: no outer margin
legendposition="bottomleft", # position of the legend
legendinset=0.05, # standard legend parameter inset
legendlwd=4, # standard legend parameter lwd: line width of lines in legend
bg="white", # standard legend parameter: default background color: white
y.intersp=1.2, # standard legend parameter; default: 1.2 lines space between the lines in the legend
newdevice, # flag: if TRUE, new plot device is opened
setpar, # flag: if TRUE parameters are set with the function par
...)
{
if (is.null(what))
{
what=names(aquaenv)
}
cumulativevarslist <- aquaenv[what]
class(cumulativevarslist) <- "aquaenv"
cumulativevars <- as.data.frame(cumulativevarslist)
if (is.null(ylim))
{
ylim <- c(0,0)
for (var in cumulativevars)
{
ylim <- ylim + range(c(var,0))
}
}
if (is.null(palette))
{
palette <- 1:length(names(cumulativevars))
}
if (newdevice)
{
opendevice(device, size, filename)
}
if (setpar)
{
par(mar=mar, mgp=mgp, oma=oma)
}
plot(xval, rep(0,length(xval)), type="l", ylim=ylim, col="white", ...)
sumfuncpos <- rep(0,length(xval))
for (x in 1:(length(cumulativevars)))
{
yval <- (cumulativevars[[x]])
yval[yval<=0] <- 0
newsumfuncpos <- sumfuncpos + yval
if (!(identical(yval, rep(0,length(yval)))))
{
polygon(c(xval,rev(xval)), c(newsumfuncpos, rev(sumfuncpos)), col=palette[[x]], border=NA)
}
sumfuncpos <- newsumfuncpos
}
sumfuncneg <- rep(0,length(xval))
for (x in 1:(length(cumulativevars)))
{
yval <- (cumulativevars[[x]])
yval[yval>=0] <- 0
newsumfuncneg <- sumfuncneg + yval
if (!(identical(yval, rep(0,length(yval)))))
{
polygon(c(xval,rev(xval)), c(newsumfuncneg, rev(sumfuncneg)), col=palette[[x]], border=NA)
}
sumfuncneg <- newsumfuncneg
}
if (total)
{
par(new=TRUE)
plot(xval, apply(cumulativevars, 1, sum), col="gray", type="l", ylim=ylim, xlab="", ylab="", lwd=lwd)
}
legend(legendposition, legend=names(cumulativevars), col=palette, inset=legendinset, y.intersp=y.intersp, bg=bg, lwd=legendlwd)
}
|
#' @useDynLib RClean
#' @importFrom Rcpp evalCpp
NULL
| /R/RClean.R | no_license | nuhorchak/RClean | R | false | false | 55 | r | #' @useDynLib RClean
#' @importFrom Rcpp evalCpp
NULL
|
# =================================================================
# Filename: 5_pcr.R
# Tasks: Preliminary analysis for data dimension reduction
# Author: Jenny Lee (JennyLee.Stat@gmail.com)
# Last updated: 1/29/2019
# ==================================================================
library(tidyverse)
library(xtable)
library(vtreat)
library(lmtest)
library(pls)
library(MASS)
source('utils.R')
theme_set(theme_bw())
# read in the datasets =============================================
bm <- read.csv('Data/combined_N25.csv')
rownames(bm) <- as.character(bm$patient_id)
# simplify meniscus tear feature to two level factor variable
bm$meniscal_tear_severity[bm$meniscal_tear_severity >= 1] <- 1
# let's make a smaller dataset with only continuous dataset
bm_all <- bm[, c(-1, -20)]
bm <- bm[, 2:19]
categorical <- c("effusion", "cartilage_defects",
"synovitis_factor", "meniscal_tear_severity")
categorical <- bm_all[, categorical]
count(categorical, effusion, cartilage_defects,
synovitis_factor, meniscal_tear_severity)
vbr_vol <- read.csv('Data/VBR_cluster_vols_by_compartments.csv')
rownames(vbr_vol) <- vbr_vol$patient_id
vol_mf <- vbr_vol[rownames(bm), "vol_MF"]
vol_lf <- vbr_vol[rownames(bm), "vol_LF"]
vol_mt <- vbr_vol[rownames(bm), "vol_MT"]
vol_lt <- vbr_vol[rownames(bm), "vol_LT"]
vol_tro <- vbr_vol[rownames(bm), "vol_TRO"]
vol_pat <- vbr_vol[rownames(bm), "vol_PAT"]
bm$vol_mf <- vol_mf
bm_all$vol_mf <- vol_mf
img_1yr <- read.csv('Data/img_1yr_small.csv')
t1rho_mf <- img_1yr$t1rho__global_mf
# y-aware scale =================================================
examplePruneSig <- 1.0
treatmentsM <- designTreatmentsN(
bm_all,
setdiff(colnames(bm_all), 'vol_mf'),
'vol_mf',
verbose = TRUE)
cont_data_scaled <- prepare(treatmentsM,
bm_all,
pruneSig = examplePruneSig,
# pruning is off by setting the sig as zero
scale = TRUE)
cont_data_scaled$vol_mf <- vol_mf
scaled_bm <- cont_data_scaled[, c( -26 )]
rownames(scaled_bm) <- rownames(bm_all)
boxplot(scaled_bm, las= 3)
# cont_data_scaled <- apply(scaled_bm, 2, function(x){
# y_aware_scale(x, y = vol_mf)
# })
#
# cont_data_scaled <- as.data.frame(cont_data_scaled)
# rownames(cont_data_scaled) <- rownames(bm)
# boxplot(cont_data_scaled, las = 2) # doesn't look like scaled at all
# run the PCR ===================================================
pca_res <- prcomp(scaled_bm, center = TRUE, scale = FALSE)
par(mfrow=c(1, 3))
biplot(pca_res, choices=1:2)
biplot(pca_res, choices=c(1, 3))
biplot(pca_res, choices=c(1, 4))
# plot the magnitude of singular values
tmp <- lapply(1:length(pca_res$sdev), function(x){
paste('PC', x, sep='')
})
tmp_df <- data.frame(pc_name = unlist(tmp),
pc = 1:length(pca_res$sdev),
magnitude = (pca_res$sdev) ** 2,
stringsAsFactors = FALSE)
ggplot(tmp_df[1:12, ],
aes(x = pc, y = magnitude, ymax = magnitude)) +
geom_point(size=2, colour="#993399") +
geom_linerange(aes(ymin = 0)) +
scale_x_continuous(breaks = 1:12, labels = tmp_df$pc_name[1:12]) +
xlab(' ') +
ylab('Variances') +
ggtitle(' ') +
ggsave('Figs/scree_plot_scaled_bm.png',
width = 7, height = 3.5)
# plot the scaled loadings
rot3 <- pca_res$rotation[, 1:4]
rot3 <- as.data.frame(rot3)
rot3$varName = rownames(rot3)
rot3_long <- rot3 %>%
gather(key = "PC", "loadings", -varName)
ggplot(rot3_long, aes(x = varName, y = loadings)) +
geom_point(color ="red", size = 5, shape = 18, alpha = .75) +
geom_linerange(aes(ymax = loadings, ymin = 0),
alpha = .5, size = 2) +
facet_wrap(~PC, nrow = 1) +
coord_flip() +
ylab(" ") +
xlab(" ") +
ggtitle("Scaled variable loadings (First 4 PCs)") +
ggsave('Figs/loadings_scaled_bm.png', width = 8, height = 6)
# preliminary analysis 2: T1rho measurement in yr 1 as outcome ===========
bm_all <- bm_all[, -26]
bm_all$t1rho_mf <- t1rho_mf
bm_scaled_global_mf <- get_scaled_df(bm_all, "t1rho_mf")
pca_res <- prcomp(bm_scaled_global_mf, center = TRUE, scale = FALSE)
projection <- pca_res$rotation[, 1:10]
projected_data <- as.data.frame(
as.matrix(bm_scaled_global_mf) %*% projection,
stringsAsFactors = FALSE)
projected_data$t1rho_mf <- t1rho_mf
full_model <- lm(t1rho_mf ~ ., data = projected_data)
summary(full_model)
step_model <- stepAIC(full_model, direction = "backward", trace = FALSE)
step_model$anova
summary(step_model)
# Primary analysis: Vol MF as outcome =====================================
# pcr_df <- scaled_bm
# pcr_df$vol_mf <- vol_mf
# mod <- pcr(vol_mf ~. , ncomp = 20, data = pcr_df, scale = FALSE)
# plot(mod, labels = rownames(scaled_bm))
# summary(mod)
pca_res <- prcomp(scaled_bm, center = TRUE, scale = FALSE)
projection <- pca_res$rotation[, 1:10]
projected_data <- as.data.frame(
as.matrix(scaled_bm) %*% projection,
stringsAsFactors = FALSE)
projected_data$vol_mf <- vol_mf
# stepwise selection
full_model <- lm(vol_mf ~ ., data = projected_data)
step_model <- stepAIC(full_model, direction = "backward", trace = FALSE)
step_model$anova
summary(full_model)
summary(step_model)
res = summary(step_model)$coefficients
V = pca_res$rotation[, c(1, 2, 10)]
beta = as.matrix(res[2:4, 1], nrow=3)
beta_lower = as.matrix(res[2:4, 1] - res[2:4, 2], nrow=3)
beta_upper = as.matrix(res[2:4, 1] + res[2:4, 2], nrow=3)
new_beta = V %*% beta
new_beta_lower = V %*% beta_lower
new_beta_upper = V %*% beta_upper
recovered = as.data.frame(cbind(new_beta, new_beta_lower, new_beta_upper))
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
tmp_l = pmin(recovered$CI_lower, recovered$CI_upper)
tmp_u = pmax(recovered$CI_lower, recovered$CI_upper)
recovered$CI_lower <- tmp_l
recovered$CI_upper <- tmp_u
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
recovered
# weights plot for significant variates
rot <- pca_res$rotation[, c(2, 3, 5, 7)] %>%
as.data.frame() %>%
mutate(varName = rownames(pca_res$rotation))
rownames(rot) <- rot[, 5]
rot[, 1:5] <- round(rot[, 1:4], 4)
rot <- rot[, -5]
rot
xtable(rot)
# plot the scaled loadings
rot3 <- as.data.frame(rot)
rot3$varName = rownames(rot3)
rot3_long <- rot3 %>%
gather(key = "PC", "loadings", -varName)
ggplot(rot3_long, aes(x = varName, y = loadings)) +
geom_point(color = 'seagreen4', size = 3) +
geom_linerange(aes(ymax = loadings, ymin = 0),
color = 'seagreen4') +
facet_wrap(~PC, nrow = 1) +
coord_flip() +
ylab(" ") +
xlab(" ") +
ggtitle(" ") +
ggsave("Figs/sig_coef.png", width = 8, height = 6)
# exploratory analysis: vol LF as outcome =============================
bm_all <- bm_all[, -26]
bm_all$vol_lf <- vol_lf
bm_scaled_lf <- get_scaled_df(bm_all, "vol_lf")
pca_res_lf <- prcomp(bm_scaled_lf, center = TRUE, scale = FALSE)
projection <- pca_res_lf$rotation[, 1:10]
projected_data <- as.data.frame(
as.matrix(bm_scaled_lf) %*% projection,
stringsAsFactors = FALSE)
projected_data$vol_lf <- vol_lf
# stepwise selection
full_model <- lm(vol_lf ~ ., data = projected_data)
step_model <- stepAIC(full_model, direction = "backward", trace = FALSE)
step_model$anova
summary(full_model)
summary(step_model)
res = summary(step_model)$coefficients
V = pca_res_lf$rotation[, c(2, 5, 8, 10)]
beta = as.matrix(res[2:5, 1], nrow=3)
beta_lower = as.matrix(res[2:5, 1] - res[2:5, 2], nrow=3)
beta_upper = as.matrix(res[2:5, 1] + res[2:5, 2], nrow=3)
new_beta = V %*% beta
new_beta_lower = V %*% beta_lower
new_beta_upper = V %*% beta_upper
recovered = as.data.frame(cbind(new_beta, new_beta_lower, new_beta_upper))
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
tmp_l = pmin(recovered$CI_lower, recovered$CI_upper)
tmp_u = pmax(recovered$CI_lower, recovered$CI_upper)
recovered$CI_lower <- tmp_l
recovered$CI_upper <- tmp_u
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
recovered
# exploratory analysis: vol LT as outcome ===============================
bm_all <- bm_all[, -26]
bm_all$vol_lt <- vol_lt
bm_scaled_lt <- get_scaled_df(bm_all, "vol_lt")
pca_res_lt <- prcomp(bm_scaled_lt, center = TRUE, scale = FALSE)
projection <- pca_res_lt$rotation[, 1:10]
projected_data <- as.data.frame(
as.matrix(bm_scaled_lt) %*% projection,
stringsAsFactors = FALSE)
projected_data$vol_lt <- vol_lt
# stepwise selection
full_model <- lm(vol_lt ~ ., data = projected_data)
step_model <- stepAIC(full_model, direction = "backward", trace = FALSE)
step_model$anova
summary(full_model)
summary(step_model)
res = summary(step_model)$coefficients
V = pca_res_lt$rotation[, c(1)]
beta = as.matrix(res[2, 1], nrow=3)
beta_lower = as.matrix(res[2, 1] - res[2, 2], nrow=3)
beta_upper = as.matrix(res[2, 1] + res[2, 2], nrow=3)
new_beta = V %*% beta
new_beta_lower = V %*% beta_lower
new_beta_upper = V %*% beta_upper
recovered = as.data.frame(cbind(new_beta, new_beta_lower, new_beta_upper))
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
rownames(recovered) = names(V)
tmp_l = pmin(recovered$CI_lower, recovered$CI_upper)
tmp_u = pmax(recovered$CI_lower, recovered$CI_upper)
recovered$CI_lower <- tmp_l
recovered$CI_upper <- tmp_u
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
recovered
# exploratory analysis: vol MT as outcome ===============================
bm_all <- bm_all[, -26]
bm_all$vol_mt <- vol_mt
bm_scaled_mt <- get_scaled_df(bm_all, "vol_mt")
pca_res_mt <- prcomp(bm_scaled_mt, center = TRUE, scale = FALSE)
projection <- pca_res_mt$rotation[, 1:10]
projected_data <- as.data.frame(
as.matrix(bm_scaled_mt) %*% projection,
stringsAsFactors = FALSE)
projected_data$vol_mt <- vol_mt
# stepwise selection
full_model <- lm(vol_mt ~ ., data = projected_data)
step_model <- stepAIC(full_model, direction = "backward", trace = FALSE)
step_model$anova
summary(full_model)
summary(step_model)
res = summary(step_model)$coefficients
V = pca_res_mt$rotation[, c(1, 2, 3, 8, 9)]
beta = as.matrix(res[2:6, 1], nrow=3)
beta_lower = as.matrix(res[2:6, 1] - res[2:6, 2], nrow=3)
beta_upper = as.matrix(res[2:6, 1] + res[2:6, 2], nrow=3)
new_beta = V %*% beta
new_beta_lower = V %*% beta_lower
new_beta_upper = V %*% beta_upper
recovered = as.data.frame(cbind(new_beta, new_beta_lower, new_beta_upper))
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
tmp_l = pmin(recovered$CI_lower, recovered$CI_upper)
tmp_u = pmax(recovered$CI_lower, recovered$CI_upper)
recovered$CI_lower <- tmp_l
recovered$CI_upper <- tmp_u
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
recovered
# exploratory analysis: vol PAT as outcome ===============================
bm_all <- bm_all[, -26]
bm_all$vol_pat <- vol_pat
bm_scaled_pat <- get_scaled_df(bm_all, "vol_pat")
pca_res_pat <- prcomp(bm_scaled_pat, center = TRUE, scale = FALSE)
projection <- pca_res_pat$rotation[, 1:10]
projected_data <- as.data.frame(
as.matrix(bm_scaled_pat) %*% projection,
stringsAsFactors = FALSE)
projected_data$vol_pat <- vol_pat
# stepwise selection
full_model <- lm(vol_pat ~ ., data = projected_data)
step_model <- stepAIC(full_model, direction = "backward", trace = FALSE)
step_model$anova
summary(full_model)
summary(step_model)
res = summary(step_model)$coefficients
V = pca_res_pat$rotation[, c(1, 2, 6)]
beta = as.matrix(res[2:4, 1], nrow=3)
beta_lower = as.matrix(res[2:4, 1] - res[2:4, 2], nrow=3)
beta_upper = as.matrix(res[2:4, 1] + res[2:4, 2], nrow=3)
new_beta = V %*% beta
new_beta_lower = V %*% beta_lower
new_beta_upper = V %*% beta_upper
recovered = as.data.frame(cbind(new_beta, new_beta_lower, new_beta_upper))
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
tmp_l = pmin(recovered$CI_lower, recovered$CI_upper)
tmp_u = pmax(recovered$CI_lower, recovered$CI_upper)
recovered$CI_lower <- tmp_l
recovered$CI_upper <- tmp_u
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
recovered
# exploratory analysis: vol TRO as outcome ===============================
bm_all <- bm_all[, -26]
bm_all$vol_tro <- vol_tro
bm_scaled_tro <- get_scaled_df(bm_all, "vol_tro")
pca_res_tro <- prcomp(bm_scaled_tro, center = TRUE, scale = FALSE)
projection <- pca_res_tro$rotation[, 1:10]
projected_data <- as.data.frame(
as.matrix(bm_scaled_tro) %*% projection,
stringsAsFactors = FALSE)
projected_data$vol_tro <- vol_tro
# stepwise selection
full_model <- lm(vol_tro ~ ., data = projected_data)
step_model <- stepAIC(full_model, direction = "backward", trace = FALSE)
step_model$anova
summary(full_model)
summary(step_model)
res = summary(step_model)$coefficients
V = pca_res_pat$rotation[, c(2, 7)]
beta = as.matrix(res[2:3, 1], nrow=3)
beta_lower = as.matrix(res[2:3, 1] - res[2:3, 2], nrow=3)
beta_upper = as.matrix(res[2:3, 1] + res[2:3, 2], nrow=3)
new_beta = V %*% beta
new_beta_lower = V %*% beta_lower
new_beta_upper = V %*% beta_upper
recovered = as.data.frame(cbind(new_beta, new_beta_lower, new_beta_upper))
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
tmp_l = pmin(recovered$CI_lower, recovered$CI_upper)
tmp_u = pmax(recovered$CI_lower, recovered$CI_upper)
recovered$CI_lower <- tmp_l
recovered$CI_upper <- tmp_u
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
recovered
| /5_pcr.R | permissive | JennyLeeStat/WorkSamples | R | false | false | 13,299 | r | # =================================================================
# Filename: 5_pcr.R
# Tasks: Preliminary analysis for data dimension reduction
# Author: Jenny Lee (JennyLee.Stat@gmail.com)
# Last updated: 1/29/2019
# ==================================================================
library(tidyverse)
library(xtable)
library(vtreat)
library(lmtest)
library(pls)
library(MASS)
source('utils.R')
theme_set(theme_bw())
# read in the datasets =============================================
bm <- read.csv('Data/combined_N25.csv')
rownames(bm) <- as.character(bm$patient_id)
# simplify meniscus tear feature to two level factor variable
bm$meniscal_tear_severity[bm$meniscal_tear_severity >= 1] <- 1
# let's make a smaller dataset with only continuous dataset
bm_all <- bm[, c(-1, -20)]
bm <- bm[, 2:19]
categorical <- c("effusion", "cartilage_defects",
"synovitis_factor", "meniscal_tear_severity")
categorical <- bm_all[, categorical]
count(categorical, effusion, cartilage_defects,
synovitis_factor, meniscal_tear_severity)
vbr_vol <- read.csv('Data/VBR_cluster_vols_by_compartments.csv')
rownames(vbr_vol) <- vbr_vol$patient_id
vol_mf <- vbr_vol[rownames(bm), "vol_MF"]
vol_lf <- vbr_vol[rownames(bm), "vol_LF"]
vol_mt <- vbr_vol[rownames(bm), "vol_MT"]
vol_lt <- vbr_vol[rownames(bm), "vol_LT"]
vol_tro <- vbr_vol[rownames(bm), "vol_TRO"]
vol_pat <- vbr_vol[rownames(bm), "vol_PAT"]
bm$vol_mf <- vol_mf
bm_all$vol_mf <- vol_mf
img_1yr <- read.csv('Data/img_1yr_small.csv')
t1rho_mf <- img_1yr$t1rho__global_mf
# y-aware scale =================================================
examplePruneSig <- 1.0
treatmentsM <- designTreatmentsN(
bm_all,
setdiff(colnames(bm_all), 'vol_mf'),
'vol_mf',
verbose = TRUE)
cont_data_scaled <- prepare(treatmentsM,
bm_all,
pruneSig = examplePruneSig,
# pruning is off by setting the sig as zero
scale = TRUE)
cont_data_scaled$vol_mf <- vol_mf
scaled_bm <- cont_data_scaled[, c( -26 )]
rownames(scaled_bm) <- rownames(bm_all)
boxplot(scaled_bm, las= 3)
# cont_data_scaled <- apply(scaled_bm, 2, function(x){
# y_aware_scale(x, y = vol_mf)
# })
#
# cont_data_scaled <- as.data.frame(cont_data_scaled)
# rownames(cont_data_scaled) <- rownames(bm)
# boxplot(cont_data_scaled, las = 2) # doesn't look like scaled at all
# run the PCR ===================================================
pca_res <- prcomp(scaled_bm, center = TRUE, scale = FALSE)
par(mfrow=c(1, 3))
biplot(pca_res, choices=1:2)
biplot(pca_res, choices=c(1, 3))
biplot(pca_res, choices=c(1, 4))
# plot the magnitude of singular values
tmp <- lapply(1:length(pca_res$sdev), function(x){
paste('PC', x, sep='')
})
tmp_df <- data.frame(pc_name = unlist(tmp),
pc = 1:length(pca_res$sdev),
magnitude = (pca_res$sdev) ** 2,
stringsAsFactors = FALSE)
ggplot(tmp_df[1:12, ],
aes(x = pc, y = magnitude, ymax = magnitude)) +
geom_point(size=2, colour="#993399") +
geom_linerange(aes(ymin = 0)) +
scale_x_continuous(breaks = 1:12, labels = tmp_df$pc_name[1:12]) +
xlab(' ') +
ylab('Variances') +
ggtitle(' ') +
ggsave('Figs/scree_plot_scaled_bm.png',
width = 7, height = 3.5)
# plot the scaled loadings
rot3 <- pca_res$rotation[, 1:4]
rot3 <- as.data.frame(rot3)
rot3$varName = rownames(rot3)
rot3_long <- rot3 %>%
gather(key = "PC", "loadings", -varName)
ggplot(rot3_long, aes(x = varName, y = loadings)) +
geom_point(color ="red", size = 5, shape = 18, alpha = .75) +
geom_linerange(aes(ymax = loadings, ymin = 0),
alpha = .5, size = 2) +
facet_wrap(~PC, nrow = 1) +
coord_flip() +
ylab(" ") +
xlab(" ") +
ggtitle("Scaled variable loadings (First 4 PCs)") +
ggsave('Figs/loadings_scaled_bm.png', width = 8, height = 6)
# preliminary analysis 2: T1rho measurement in yr 1 as outcome ===========
bm_all <- bm_all[, -26]
bm_all$t1rho_mf <- t1rho_mf
bm_scaled_global_mf <- get_scaled_df(bm_all, "t1rho_mf")
pca_res <- prcomp(bm_scaled_global_mf, center = TRUE, scale = FALSE)
projection <- pca_res$rotation[, 1:10]
projected_data <- as.data.frame(
as.matrix(bm_scaled_global_mf) %*% projection,
stringsAsFactors = FALSE)
projected_data$t1rho_mf <- t1rho_mf
full_model <- lm(t1rho_mf ~ ., data = projected_data)
summary(full_model)
step_model <- stepAIC(full_model, direction = "backward", trace = FALSE)
step_model$anova
summary(step_model)
# Primary analysis: Vol MF as outcome =====================================
# pcr_df <- scaled_bm
# pcr_df$vol_mf <- vol_mf
# mod <- pcr(vol_mf ~. , ncomp = 20, data = pcr_df, scale = FALSE)
# plot(mod, labels = rownames(scaled_bm))
# summary(mod)
pca_res <- prcomp(scaled_bm, center = TRUE, scale = FALSE)
projection <- pca_res$rotation[, 1:10]
projected_data <- as.data.frame(
as.matrix(scaled_bm) %*% projection,
stringsAsFactors = FALSE)
projected_data$vol_mf <- vol_mf
# stepwise selection
full_model <- lm(vol_mf ~ ., data = projected_data)
step_model <- stepAIC(full_model, direction = "backward", trace = FALSE)
step_model$anova
summary(full_model)
summary(step_model)
res = summary(step_model)$coefficients
V = pca_res$rotation[, c(1, 2, 10)]
beta = as.matrix(res[2:4, 1], nrow=3)
beta_lower = as.matrix(res[2:4, 1] - res[2:4, 2], nrow=3)
beta_upper = as.matrix(res[2:4, 1] + res[2:4, 2], nrow=3)
new_beta = V %*% beta
new_beta_lower = V %*% beta_lower
new_beta_upper = V %*% beta_upper
recovered = as.data.frame(cbind(new_beta, new_beta_lower, new_beta_upper))
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
tmp_l = pmin(recovered$CI_lower, recovered$CI_upper)
tmp_u = pmax(recovered$CI_lower, recovered$CI_upper)
recovered$CI_lower <- tmp_l
recovered$CI_upper <- tmp_u
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
recovered
# weights plot for significant variates
rot <- pca_res$rotation[, c(2, 3, 5, 7)] %>%
as.data.frame() %>%
mutate(varName = rownames(pca_res$rotation))
rownames(rot) <- rot[, 5]
rot[, 1:5] <- round(rot[, 1:4], 4)
rot <- rot[, -5]
rot
xtable(rot)
# plot the scaled loadings
rot3 <- as.data.frame(rot)
rot3$varName = rownames(rot3)
rot3_long <- rot3 %>%
gather(key = "PC", "loadings", -varName)
ggplot(rot3_long, aes(x = varName, y = loadings)) +
geom_point(color = 'seagreen4', size = 3) +
geom_linerange(aes(ymax = loadings, ymin = 0),
color = 'seagreen4') +
facet_wrap(~PC, nrow = 1) +
coord_flip() +
ylab(" ") +
xlab(" ") +
ggtitle(" ") +
ggsave("Figs/sig_coef.png", width = 8, height = 6)
# exploratory analysis: vol LF as outcome =============================
bm_all <- bm_all[, -26]
bm_all$vol_lf <- vol_lf
bm_scaled_lf <- get_scaled_df(bm_all, "vol_lf")
pca_res_lf <- prcomp(bm_scaled_lf, center = TRUE, scale = FALSE)
projection <- pca_res_lf$rotation[, 1:10]
projected_data <- as.data.frame(
as.matrix(bm_scaled_lf) %*% projection,
stringsAsFactors = FALSE)
projected_data$vol_lf <- vol_lf
# stepwise selection
full_model <- lm(vol_lf ~ ., data = projected_data)
step_model <- stepAIC(full_model, direction = "backward", trace = FALSE)
step_model$anova
summary(full_model)
summary(step_model)
res = summary(step_model)$coefficients
V = pca_res_lf$rotation[, c(2, 5, 8, 10)]
beta = as.matrix(res[2:5, 1], nrow=3)
beta_lower = as.matrix(res[2:5, 1] - res[2:5, 2], nrow=3)
beta_upper = as.matrix(res[2:5, 1] + res[2:5, 2], nrow=3)
new_beta = V %*% beta
new_beta_lower = V %*% beta_lower
new_beta_upper = V %*% beta_upper
recovered = as.data.frame(cbind(new_beta, new_beta_lower, new_beta_upper))
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
tmp_l = pmin(recovered$CI_lower, recovered$CI_upper)
tmp_u = pmax(recovered$CI_lower, recovered$CI_upper)
recovered$CI_lower <- tmp_l
recovered$CI_upper <- tmp_u
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
recovered
# exploratory analysis: vol LT as outcome ===============================
bm_all <- bm_all[, -26]
bm_all$vol_lt <- vol_lt
bm_scaled_lt <- get_scaled_df(bm_all, "vol_lt")
pca_res_lt <- prcomp(bm_scaled_lt, center = TRUE, scale = FALSE)
projection <- pca_res_lt$rotation[, 1:10]
projected_data <- as.data.frame(
as.matrix(bm_scaled_lt) %*% projection,
stringsAsFactors = FALSE)
projected_data$vol_lt <- vol_lt
# stepwise selection
full_model <- lm(vol_lt ~ ., data = projected_data)
step_model <- stepAIC(full_model, direction = "backward", trace = FALSE)
step_model$anova
summary(full_model)
summary(step_model)
res = summary(step_model)$coefficients
V = pca_res_lt$rotation[, c(1)]
beta = as.matrix(res[2, 1], nrow=3)
beta_lower = as.matrix(res[2, 1] - res[2, 2], nrow=3)
beta_upper = as.matrix(res[2, 1] + res[2, 2], nrow=3)
new_beta = V %*% beta
new_beta_lower = V %*% beta_lower
new_beta_upper = V %*% beta_upper
recovered = as.data.frame(cbind(new_beta, new_beta_lower, new_beta_upper))
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
rownames(recovered) = names(V)
tmp_l = pmin(recovered$CI_lower, recovered$CI_upper)
tmp_u = pmax(recovered$CI_lower, recovered$CI_upper)
recovered$CI_lower <- tmp_l
recovered$CI_upper <- tmp_u
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
recovered
# exploratory analysis: vol MT as outcome ===============================
bm_all <- bm_all[, -26]
bm_all$vol_mt <- vol_mt
bm_scaled_mt <- get_scaled_df(bm_all, "vol_mt")
pca_res_mt <- prcomp(bm_scaled_mt, center = TRUE, scale = FALSE)
projection <- pca_res_mt$rotation[, 1:10]
projected_data <- as.data.frame(
as.matrix(bm_scaled_mt) %*% projection,
stringsAsFactors = FALSE)
projected_data$vol_mt <- vol_mt
# stepwise selection
full_model <- lm(vol_mt ~ ., data = projected_data)
step_model <- stepAIC(full_model, direction = "backward", trace = FALSE)
step_model$anova
summary(full_model)
summary(step_model)
res = summary(step_model)$coefficients
V = pca_res_mt$rotation[, c(1, 2, 3, 8, 9)]
beta = as.matrix(res[2:6, 1], nrow=3)
beta_lower = as.matrix(res[2:6, 1] - res[2:6, 2], nrow=3)
beta_upper = as.matrix(res[2:6, 1] + res[2:6, 2], nrow=3)
new_beta = V %*% beta
new_beta_lower = V %*% beta_lower
new_beta_upper = V %*% beta_upper
recovered = as.data.frame(cbind(new_beta, new_beta_lower, new_beta_upper))
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
tmp_l = pmin(recovered$CI_lower, recovered$CI_upper)
tmp_u = pmax(recovered$CI_lower, recovered$CI_upper)
recovered$CI_lower <- tmp_l
recovered$CI_upper <- tmp_u
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
recovered
# exploratory analysis: vol PAT as outcome ===============================
bm_all <- bm_all[, -26]
bm_all$vol_pat <- vol_pat
bm_scaled_pat <- get_scaled_df(bm_all, "vol_pat")
pca_res_pat <- prcomp(bm_scaled_pat, center = TRUE, scale = FALSE)
projection <- pca_res_pat$rotation[, 1:10]
projected_data <- as.data.frame(
as.matrix(bm_scaled_pat) %*% projection,
stringsAsFactors = FALSE)
projected_data$vol_pat <- vol_pat
# stepwise selection
full_model <- lm(vol_pat ~ ., data = projected_data)
step_model <- stepAIC(full_model, direction = "backward", trace = FALSE)
step_model$anova
summary(full_model)
summary(step_model)
res = summary(step_model)$coefficients
V = pca_res_pat$rotation[, c(1, 2, 6)]
beta = as.matrix(res[2:4, 1], nrow=3)
beta_lower = as.matrix(res[2:4, 1] - res[2:4, 2], nrow=3)
beta_upper = as.matrix(res[2:4, 1] + res[2:4, 2], nrow=3)
new_beta = V %*% beta
new_beta_lower = V %*% beta_lower
new_beta_upper = V %*% beta_upper
recovered = as.data.frame(cbind(new_beta, new_beta_lower, new_beta_upper))
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
tmp_l = pmin(recovered$CI_lower, recovered$CI_upper)
tmp_u = pmax(recovered$CI_lower, recovered$CI_upper)
recovered$CI_lower <- tmp_l
recovered$CI_upper <- tmp_u
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
recovered
# exploratory analysis: vol TRO as outcome ===============================
bm_all <- bm_all[, -26]
bm_all$vol_tro <- vol_tro
bm_scaled_tro <- get_scaled_df(bm_all, "vol_tro")
pca_res_tro <- prcomp(bm_scaled_tro, center = TRUE, scale = FALSE)
projection <- pca_res_tro$rotation[, 1:10]
projected_data <- as.data.frame(
as.matrix(bm_scaled_tro) %*% projection,
stringsAsFactors = FALSE)
projected_data$vol_tro <- vol_tro
# stepwise selection
full_model <- lm(vol_tro ~ ., data = projected_data)
step_model <- stepAIC(full_model, direction = "backward", trace = FALSE)
step_model$anova
summary(full_model)
summary(step_model)
res = summary(step_model)$coefficients
V = pca_res_pat$rotation[, c(2, 7)]
beta = as.matrix(res[2:3, 1], nrow=3)
beta_lower = as.matrix(res[2:3, 1] - res[2:3, 2], nrow=3)
beta_upper = as.matrix(res[2:3, 1] + res[2:3, 2], nrow=3)
new_beta = V %*% beta
new_beta_lower = V %*% beta_lower
new_beta_upper = V %*% beta_upper
recovered = as.data.frame(cbind(new_beta, new_beta_lower, new_beta_upper))
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
tmp_l = pmin(recovered$CI_lower, recovered$CI_upper)
tmp_u = pmax(recovered$CI_lower, recovered$CI_upper)
recovered$CI_lower <- tmp_l
recovered$CI_upper <- tmp_u
colnames(recovered) = c('Estimate', 'CI_lower', 'CI_upper')
recovered
|
n = c("ee", "ff", "gg", "hh")
s = c("aa", "bb", "cc", "dd")
b = c(TRUE, FALSE, TRUE, FALSE)
a = c(FALSE, TRUE, FALSE, TRUE)
df = data.frame(n, s, b, a)
test_b2 <- model.matrix(~.,df)
test_b3 <- model.matrix(~ -1 +.,df)
| /R/test.R | no_license | IvonLiu/spcs-ai | R | false | false | 223 | r | n = c("ee", "ff", "gg", "hh")
s = c("aa", "bb", "cc", "dd")
b = c(TRUE, FALSE, TRUE, FALSE)
a = c(FALSE, TRUE, FALSE, TRUE)
df = data.frame(n, s, b, a)
test_b2 <- model.matrix(~.,df)
test_b3 <- model.matrix(~ -1 +.,df)
|
#' Generate synthetic data using CART.
#'
#' \code{gen_cart} uses Classification and Regression Trees (CART)
#' to generate synthetic data by sequentially predicting the value of
#' each variable depending on the value of other variables. Details can
#' be found in \code{\link[synthpop:syn]{syn}}.
#'
#' @param training_set A data frame of the training data. The generated data will
#' have the same size as the \code{training_set}.
#' @param structure A string of the relationships between variables from
#' \code{\link[bnlearn:model string utilities]{modelstring}}. If structure is NA,
#' the default structure would be the sequence of the variables in the \code{training_set}
#' data frame.
#' @return The output is a list of three objects: i) structure: the dependency/relationship
#' between the variables (a \code{\link[bnlearn:bn class]{bn-class}} object); ii) fit_model:
#' the fitted CART model ((a \code{\link[synthpop:syn]{syn}}) object and iii) gen_data:
#' the generated synthetic data.
#' @examples
#' adult_data <- split_data(adult[1:100,], 70)
#' cart <- gen_cart(adult_data$training_set)
#' bn_structure <- "[native_country][income][age|marital_status:education]"
#' bn_structure = paste0(bn_structure, "[sex][race|native_country][marital_status|race:sex]")
#' bn_structure = paste0(bn_structure,"[relationship|marital_status][education|sex:race]")
#' bn_structure = paste0(bn_structure,"[occupation|education][workclass|occupation]")
#' bn_structure = paste0(bn_structure,"[hours_per_week|occupation:workclass]")
#' bn_structure = paste0(bn_structure,"[capital_gain|occupation:workclass:income]")
#' bn_structure = paste0(bn_structure,"[capital_loss|occupation:workclass:income]")
#' cart_elicit <- gen_cart(adult_data$training_set, bn_structure)
#'
#' @export
gen_cart <- function(training_set, structure = NA)
{
if (!is.character(structure))
{
message("generating data using sequence of variables")
gen_synth_synthpop <- synthpop::syn(data = training_set, m = 1,
k = nrow(training_set), drop.not.used = FALSE)
m <- gen_synth_synthpop$predictor.matrix
} else
{
message("generating data using defined relationships")
m <- matrix(0, nrow = length(training_set), ncol = length(training_set))
rownames(m) <- colnames(training_set)
colnames(m) <- colnames(training_set)
tmp <- bnlearn::model2network(structure)$arcs
for (i in 1:nrow(tmp))
{
m[tmp[i, 1], tmp[i, 2]] <- 1
}
gen_synth_synthpop <- synthpop::syn(data = training_set, m = 1,
k = nrow(training_set), predictor.matrix = m)
}
return(list(structure = m, fit_model = gen_synth_synthpop, gen_data = gen_synth_synthpop$syn[]))
}
#' Compare the synthetic data generated by CART with the real data.
#'
#' \code{compare_cart} compare the synthetic data generated by CART with the real data.
#'
#' @param training_set A data frame of the training data. The generated data will
#' have the same size as the \code{training_set}.
#' @param fit_model A \code{\link[synthpop:syn]{syn}}) object.
#' @param var_list A string vector of the names of variables that we want to compare.
#' @return A plot of the comparision of the distribution of
#' synthetic data vs real data.
#' @examples
#' adult_data <- split_data(adult[1:100,], 70)
#' cart <- gen_cart(adult_data$training_set)
#' compare_cart(adult_data$training_set, cart$fit_model, c("age", "workclass", "sex"))
#'
#' @export
compare_cart <- function(training_set, fit_model, var_list)
{
synthpop::compare(fit_model, training_set, vars = var_list,
nrow = 1, ncol = length(var_list))$plot
}
| /R/gen_CART.R | no_license | cran/sdglinkage | R | false | false | 3,696 | r | #' Generate synthetic data using CART.
#'
#' \code{gen_cart} uses Classification and Regression Trees (CART)
#' to generate synthetic data by sequentially predicting the value of
#' each variable depending on the value of other variables. Details can
#' be found in \code{\link[synthpop:syn]{syn}}.
#'
#' @param training_set A data frame of the training data. The generated data will
#' have the same size as the \code{training_set}.
#' @param structure A string of the relationships between variables from
#' \code{\link[bnlearn:model string utilities]{modelstring}}. If structure is NA,
#' the default structure would be the sequence of the variables in the \code{training_set}
#' data frame.
#' @return The output is a list of three objects: i) structure: the dependency/relationship
#' between the variables (a \code{\link[bnlearn:bn class]{bn-class}} object); ii) fit_model:
#' the fitted CART model ((a \code{\link[synthpop:syn]{syn}}) object and iii) gen_data:
#' the generated synthetic data.
#' @examples
#' adult_data <- split_data(adult[1:100,], 70)
#' cart <- gen_cart(adult_data$training_set)
#' bn_structure <- "[native_country][income][age|marital_status:education]"
#' bn_structure = paste0(bn_structure, "[sex][race|native_country][marital_status|race:sex]")
#' bn_structure = paste0(bn_structure,"[relationship|marital_status][education|sex:race]")
#' bn_structure = paste0(bn_structure,"[occupation|education][workclass|occupation]")
#' bn_structure = paste0(bn_structure,"[hours_per_week|occupation:workclass]")
#' bn_structure = paste0(bn_structure,"[capital_gain|occupation:workclass:income]")
#' bn_structure = paste0(bn_structure,"[capital_loss|occupation:workclass:income]")
#' cart_elicit <- gen_cart(adult_data$training_set, bn_structure)
#'
#' @export
gen_cart <- function(training_set, structure = NA)
{
if (!is.character(structure))
{
message("generating data using sequence of variables")
gen_synth_synthpop <- synthpop::syn(data = training_set, m = 1,
k = nrow(training_set), drop.not.used = FALSE)
m <- gen_synth_synthpop$predictor.matrix
} else
{
message("generating data using defined relationships")
m <- matrix(0, nrow = length(training_set), ncol = length(training_set))
rownames(m) <- colnames(training_set)
colnames(m) <- colnames(training_set)
tmp <- bnlearn::model2network(structure)$arcs
for (i in 1:nrow(tmp))
{
m[tmp[i, 1], tmp[i, 2]] <- 1
}
gen_synth_synthpop <- synthpop::syn(data = training_set, m = 1,
k = nrow(training_set), predictor.matrix = m)
}
return(list(structure = m, fit_model = gen_synth_synthpop, gen_data = gen_synth_synthpop$syn[]))
}
#' Compare the synthetic data generated by CART with the real data.
#'
#' \code{compare_cart} compare the synthetic data generated by CART with the real data.
#'
#' @param training_set A data frame of the training data. The generated data will
#' have the same size as the \code{training_set}.
#' @param fit_model A \code{\link[synthpop:syn]{syn}}) object.
#' @param var_list A string vector of the names of variables that we want to compare.
#' @return A plot of the comparision of the distribution of
#' synthetic data vs real data.
#' @examples
#' adult_data <- split_data(adult[1:100,], 70)
#' cart <- gen_cart(adult_data$training_set)
#' compare_cart(adult_data$training_set, cart$fit_model, c("age", "workclass", "sex"))
#'
#' @export
compare_cart <- function(training_set, fit_model, var_list)
{
synthpop::compare(fit_model, training_set, vars = var_list,
nrow = 1, ncol = length(var_list))$plot
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HexaTex.R
\name{HexaTex}
\alias{HexaTex}
\title{Polygon to Hexagonal Grid Tessellation}
\usage{
HexaTex(Polygon1, size, plotTrue = FALSE)
}
\arguments{
\item{Polygon1}{The SpatialPolygons object}
\item{size}{The side length of an hexagon}
\item{plotTrue}{Should the object be plotted}
}
\value{
Returns a list with an indexed matrix of the point coordinates
and a SpatialPolygons object of the hexagons
}
\description{
The function takes a Polygon and a sizing argument and
creates a list with an indexed matrix with coordinates
and a SpatialPolygons object, that consists of hexagonal grids
}
\examples{
library(spatstat)
library(maptools)
library(sp)
library(raster)
Polygon1 <- Polygon(rbind(c(4498482, 2668272), c(4498482, 2669343),
c(4499991, 2669343), c(4499991, 2668272)))
Polygon1 <- Polygons(list(Polygon1),1);
Polygon1 <- SpatialPolygons(list(Polygon1))
Projection <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000
+ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
proj4string(Polygon1) <- CRS(Projection)
plot(Polygon1,axes=TRUE)
HexGrid <- HexaTex(Polygon1, 100, TRUE)
plot(HexGrid[[2]])
str(HexGrid[[1]])
}
\author{
Sebastian Gatscha
}
| /man/HexaTex.Rd | no_license | daveyrichard/windfarmGA | R | false | true | 1,272 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HexaTex.R
\name{HexaTex}
\alias{HexaTex}
\title{Polygon to Hexagonal Grid Tessellation}
\usage{
HexaTex(Polygon1, size, plotTrue = FALSE)
}
\arguments{
\item{Polygon1}{The SpatialPolygons object}
\item{size}{The side length of an hexagon}
\item{plotTrue}{Should the object be plotted}
}
\value{
Returns a list with an indexed matrix of the point coordinates
and a SpatialPolygons object of the hexagons
}
\description{
The function takes a Polygon and a sizing argument and
creates a list with an indexed matrix with coordinates
and a SpatialPolygons object, that consists of hexagonal grids
}
\examples{
library(spatstat)
library(maptools)
library(sp)
library(raster)
Polygon1 <- Polygon(rbind(c(4498482, 2668272), c(4498482, 2669343),
c(4499991, 2669343), c(4499991, 2668272)))
Polygon1 <- Polygons(list(Polygon1),1);
Polygon1 <- SpatialPolygons(list(Polygon1))
Projection <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000
+ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
proj4string(Polygon1) <- CRS(Projection)
plot(Polygon1,axes=TRUE)
HexGrid <- HexaTex(Polygon1, 100, TRUE)
plot(HexGrid[[2]])
str(HexGrid[[1]])
}
\author{
Sebastian Gatscha
}
|
#' @export
#' @return Returns an estimated p-value for some variable.
#' @keywords chisq
#' @description This is a function to estimates the p-value
#' @param n number of tests
#' @param df degrees of freedom
#' @param x the variable that the q-square test is done upon.
#' @author Nis Klausen \cr
#' Department of mathematics and computer science (IMADA) \cr
#' University of southern Denmark, SDU, Odense \cr
#' \email{nklau1414@student.sdu.dk} \cr
#'
#' @examples
Task31 <- function(x,df,n=1000){
var <- numeric(n)
var <- sapply(var, function(var) var+sum(rnorm(df)^2))
pvalue <- sum(var>x)/n
return(pvalue)
}
| /PackageExam2016/R/Task31.R | no_license | Nisond/PackageExam | R | false | false | 646 | r | #' @export
#' @return Returns an estimated p-value for some variable.
#' @keywords chisq
#' @description This is a function to estimates the p-value
#' @param n number of tests
#' @param df degrees of freedom
#' @param x the variable that the q-square test is done upon.
#' @author Nis Klausen \cr
#' Department of mathematics and computer science (IMADA) \cr
#' University of southern Denmark, SDU, Odense \cr
#' \email{nklau1414@student.sdu.dk} \cr
#'
#' @examples
Task31 <- function(x,df,n=1000){
var <- numeric(n)
var <- sapply(var, function(var) var+sum(rnorm(df)^2))
pvalue <- sum(var>x)/n
return(pvalue)
}
|
airdata<-data.frame(
E=factor(c("fog", "fog", "fog", "fog", "air", "fog",
"air", "fog", "fog", "air", "fog", "air")),
S=c(38.2, 28.6, 30.2, 23.7, 74.1, 88.2, 46.4, 135.9,
102.9, 28.9, 46.9, 44.3),
O=c(10.3, 6.9, 6.2, 12.4, 45.8, 9.9, 27.4, 44.8,
27.8, 6.5, 11.2, 16.6)
)
airdata$R<-airdata$O/airdata$S
t.test(R ~ E, data = airdata, var.equal = TRUE)
| /chap01/exam0110.R | no_license | KSDeng/Mathematical-Modeling-with-R | R | false | false | 401 | r | airdata<-data.frame(
E=factor(c("fog", "fog", "fog", "fog", "air", "fog",
"air", "fog", "fog", "air", "fog", "air")),
S=c(38.2, 28.6, 30.2, 23.7, 74.1, 88.2, 46.4, 135.9,
102.9, 28.9, 46.9, 44.3),
O=c(10.3, 6.9, 6.2, 12.4, 45.8, 9.9, 27.4, 44.8,
27.8, 6.5, 11.2, 16.6)
)
airdata$R<-airdata$O/airdata$S
t.test(R ~ E, data = airdata, var.equal = TRUE)
|
\name{makeDat}
\alias{makeDat}
\title{Create a dataframe from the variables defined in an expression}
\description{
Creates a dataframe from the variables defined in an expression by \code{\link{cbind}}ing the corresponding data found in the workspace. This is a convenience function for creating a dataframe to be passed to \code{\link{propagate}}, when starting with data which was simulated from distributions, i.e. when \code{type = "sim"}. Will throw an error if a variable is defined in the expression but is not available from the workspace.
}
\usage{
makeDat(expr)
}
\arguments{
\item{expr}{an expression to be use for \code{\link{propagate}}.}
}
\value{
A dataframe containing the data defined in \code{expr} in columns.
}
\author{
Andrej-Nikolai Spiess
}
\examples{
## Simulating from uniform
## and normal distribution,
## run 'propagate'.
EXPR1 <- expression(a + b^c)
a <- rnorm(100000, 12, 1)
b <- rnorm(100000, 5, 0.1)
c <- runif(100000, 6, 7)
DAT1 <- makeDat(EXPR1)
propagate(EXPR1, DAT1, type = "sim", cov = FALSE)
}
\keyword{algebra}
\keyword{univariate}
| /man/makeDat.Rd | no_license | anspiess/propagate | R | false | false | 1,096 | rd | \name{makeDat}
\alias{makeDat}
\title{Create a dataframe from the variables defined in an expression}
\description{
Creates a dataframe from the variables defined in an expression by \code{\link{cbind}}ing the corresponding data found in the workspace. This is a convenience function for creating a dataframe to be passed to \code{\link{propagate}}, when starting with data which was simulated from distributions, i.e. when \code{type = "sim"}. Will throw an error if a variable is defined in the expression but is not available from the workspace.
}
\usage{
makeDat(expr)
}
\arguments{
\item{expr}{an expression to be use for \code{\link{propagate}}.}
}
\value{
A dataframe containing the data defined in \code{expr} in columns.
}
\author{
Andrej-Nikolai Spiess
}
\examples{
## Simulating from uniform
## and normal distribution,
## run 'propagate'.
EXPR1 <- expression(a + b^c)
a <- rnorm(100000, 12, 1)
b <- rnorm(100000, 5, 0.1)
c <- runif(100000, 6, 7)
DAT1 <- makeDat(EXPR1)
propagate(EXPR1, DAT1, type = "sim", cov = FALSE)
}
\keyword{algebra}
\keyword{univariate}
|
"EVTsim" <- function(n=100,iter=3000,df=0){
#### df > 0, use Cauchy distribution
if(df <= 0){
rt <- matrix(rnorm(n*iter),n,iter)
cn <- 1/sqrt(2*log(n))
dn <- sqrt(2*log(n))-(log(4*pi)+log(log(n)))/(2*sqrt(2*log(n)))
}else{
rt <- matrix(rt(n*iter,1),n,iter)
dn <- 0
cn <- n/pi
}
evt <- apply(rt,2,max)
evt1 <- (evt-dn)/cn
EVTsim <- list(max=evt,stmax=evt1)
}
"qgumble" <- function(prob=c(0.95,0.975,0.99)){
### compute the quantile of Gumble distribution
n <- length(prob)
qgum <- NULL
for (i in 1:n){
p <- prob[i]
x <- -log(-log(p))
qgum <- c(qgum,x)
}
qgum
}
### | /R Scripts/Tsay - R simulation of maximum of iid random variables.R | no_license | arkagogoldey/Finance_R_Files | R | false | false | 629 | r | "EVTsim" <- function(n=100,iter=3000,df=0){
#### df > 0, use Cauchy distribution
if(df <= 0){
rt <- matrix(rnorm(n*iter),n,iter)
cn <- 1/sqrt(2*log(n))
dn <- sqrt(2*log(n))-(log(4*pi)+log(log(n)))/(2*sqrt(2*log(n)))
}else{
rt <- matrix(rt(n*iter,1),n,iter)
dn <- 0
cn <- n/pi
}
evt <- apply(rt,2,max)
evt1 <- (evt-dn)/cn
EVTsim <- list(max=evt,stmax=evt1)
}
"qgumble" <- function(prob=c(0.95,0.975,0.99)){
### compute the quantile of Gumble distribution
n <- length(prob)
qgum <- NULL
for (i in 1:n){
p <- prob[i]
x <- -log(-log(p))
qgum <- c(qgum,x)
}
qgum
}
### |
### Author: Alexander Kanitz
### Created: 22-JAN-2013
### Modified: 22-JAN-2013
### Description:
### Arguments: bed file
### Output:
### Usage: perl
### A. PRE-REQUISITES
# Initialize and get command line arguments
args <- commandArgs(trailingOnly=TRUE)
###
## INDUCED
# Load data
i <- read.table("/home/kanitz/Dropbox/Work/Eclipse/chip_seq_analysis_yH2Ax_R/chip_peaks_induced_cont", col.names = "position")
# Subset data
i <- i[,1]
# Plot
pdf("i.pdf", width = 24, height = 8)
hist(i, breaks=30000, main="ChIP peaks across genome, induced", xlab="Continuous Genome Position")
dev.off()
## UNINDUCED
# Load data
u <- read.table("chip_peaks_uninduced_cont", col.names = "position")
# Subset data
u <- u[,1]
# Plot
pdf("u.pdf", width = 24, height = 8)
hist(u, breaks=30000, main="ChIP peaks across genome, uninduced", xlab="Continuous Genome Position")
dev.off()
| /scripts/plot_positions_on_cont_genome.R | permissive | uniqueg/scripts | R | false | false | 867 | r | ### Author: Alexander Kanitz
### Created: 22-JAN-2013
### Modified: 22-JAN-2013
### Description:
### Arguments: bed file
### Output:
### Usage: perl
### A. PRE-REQUISITES
# Initialize and get command line arguments
args <- commandArgs(trailingOnly=TRUE)
###
## INDUCED
# Load data
i <- read.table("/home/kanitz/Dropbox/Work/Eclipse/chip_seq_analysis_yH2Ax_R/chip_peaks_induced_cont", col.names = "position")
# Subset data
i <- i[,1]
# Plot
pdf("i.pdf", width = 24, height = 8)
hist(i, breaks=30000, main="ChIP peaks across genome, induced", xlab="Continuous Genome Position")
dev.off()
## UNINDUCED
# Load data
u <- read.table("chip_peaks_uninduced_cont", col.names = "position")
# Subset data
u <- u[,1]
# Plot
pdf("u.pdf", width = 24, height = 8)
hist(u, breaks=30000, main="ChIP peaks across genome, uninduced", xlab="Continuous Genome Position")
dev.off()
|
###############################################
# example of how to retrieve and upload data from and
# to Synapse using query statements
###############################################
td <- tempdir()
query <- synapseQuery("select name, id from entity where parentId == '<synid1>'")
lapply(1:nrow(query), function(idx){
fname <- query$entity.name[idx]
newfname <- gsub(".tsv",".rds",fname)
synid <- query$entity.id[idx]
f <- read.table(synGet(synid)@filePath, header=T, sep="\t", row.names=1)
cat(dim(f))
saveRDS(f, file=file.path(td, newfname))
file <- File(file.path(td, newfname), parentId = "<synid2>")
file <- synStore(file)
cat(fname, "pushed\n")
})
| /code/pull_push_data_to_from_synapse.R | no_license | olganikolova/misc | R | false | false | 685 | r | ###############################################
# example of how to retrieve and upload data from and
# to Synapse using query statements
###############################################
td <- tempdir()
query <- synapseQuery("select name, id from entity where parentId == '<synid1>'")
lapply(1:nrow(query), function(idx){
fname <- query$entity.name[idx]
newfname <- gsub(".tsv",".rds",fname)
synid <- query$entity.id[idx]
f <- read.table(synGet(synid)@filePath, header=T, sep="\t", row.names=1)
cat(dim(f))
saveRDS(f, file=file.path(td, newfname))
file <- File(file.path(td, newfname), parentId = "<synid2>")
file <- synStore(file)
cat(fname, "pushed\n")
})
|
## Originally summer 2016
## updated 2017-01-04 to add bootstraps
## completely rewritten 2017-04-21 for so many reasons
require("rgdal") # requires sp, will use proj.4 if installed
require("maptools")
require("ggplot2")
require("plyr")
require('dismo')
require('raster')
require('grid')
require(geoscale)
data(timescales)
## new plot starts here
if(!exists('path')) path <- 'out'
pdf(paste(path, '/FIG02.timetree.newVersion.pdf', sep = ''), 7, 9.5)
par(mar = c(2.4,0,0,0) + 0.1)
regionColors = c(C = 'deepskyblue', E = 'maroon', M = 'orange', U = 'black')
cladesOfInterest = list(
root = c('Lithocarpus_hancei', 'Quercus_arizonica'),
newWorldOaks = c('Quercus_kelloggii', 'Quercus_arizonica'),
Cerris=c('Quercus_baronii', 'Quercus_trojana'),
Lobatae.core = c('Quercus_kelloggii', 'Quercus_gentryi'),
Lobatae.Mexico = c('Quercus_canbyi', 'Quercus_hypoleucoides'),
Protobalanus = c('Quercus_palmeri', 'Quercus_tomentella'),
Ponticae = c('Quercus_sadleriana', 'Quercus_pontica'),
Virentes = c('Quercus_fusiformis', 'Quercus_minima'),
Quercus.core = c('Quercus_lobata', 'Quercus_alba'),
Quercus.Eurasia = c('Quercus_mongolica', 'Quercus_robur'),
Quercus.Mexico = c('Quercus_potosina', 'Quercus_germana'),
Quercus.TX = c('Quercus_pungens', 'Quercus_polymorpha'),
Quercus.AZ = c('Quercus_turbinella', 'Quercus_arizonica')
)
overSections = list(
Cerris=c('Quercus_baronii', 'Quercus_trojana'),
Lobatae = c('Quercus_kelloggii', 'Quercus_hypoleucoides'),
Protobalanus = c('Quercus_palmeri', 'Quercus_tomentella'),
Ponticae = c('Quercus_sadleriana', 'Quercus_pontica'),
Virentes = c('Quercus_fusiformis', 'Quercus_minima'),
"Texas / N. Mexico" = c('Quercus_mohriana', 'Quercus_vaseyana')
)
underSections = list(
"section Quercus" = c('Quercus_lobata', 'Quercus_arizonica'),
"American oaks" = c('Quercus_lobata', 'Quercus_kelloggii'),
"Arizona / N. Mexico" = c('Quercus_laeta', 'Quercus_turbinella')
)
# get tree, plot with time scale
t1 <- subset(timescales$ICS2013, timescales$ICS2013[, 'Type'] == 'Epoch')
tr.beast <- tr.spp.4c.discreteClock.beast
tr.temp <- tr.spp.4c.discreteClock
a=plot(tr.temp, edge.color = 0, tip.color = 0, x.lim = c(-7, 96.61137))
#abline(v = 66.34929-t1[2:7, 'Start'], lty = 'dashed', col = 'gray')
segments(x0 = 66.34929-t1[2:7, 'Start'], y0 = 0, y1 = length(tr.temp$tip.label) + 1, lty = 'dashed', col = 'gray')
rect(66.34929-t1[2:7, 'Start'], par()$usr[3]+2, 66.34929-t1[2:7, 'End'], par()$usr[3] + 4, col = c('white', 'gray80'), border = 'black')
text(66.34929-t1[2:7, 'Start'], par()$usr[3]+1, labels = round(t1[2:7, 'Start'], 1), cex = 0.5)
text(66.34929-t1[2:7, 'Midpoint'], par()$usr[3] - 1.5, labels = t1[2:7, 'Name'], srt = -60, adj = 0, xpd = TRUE, cex = 0.5)
plot.phylo.upon(tr.temp, cex = 0.4)
# add biogeography coding
t2 <- tip.geog[tr.temp$tip.label,c('C', 'E', 'M', 'U')]
for(i in seq(dim(t2)[[1]])){
for(j in seq(dim(t2)[2])) {
points(j + 83, i, pch = 22, bg = ifelse(t2[i,j] == 1, regionColors[j], "white"), lwd = 0.5)
}}
text(84:(83+dim(t2)[2]), rep(length(tr.temp$tip.label) + 2, dim(t2)[2]), dimnames(t2)[[2]], cex = 0.5, xpd = TRUE)
# add leaf phenology
t3 <- lf.traits[tr.temp$tip.label, 'lfPhenology']
t3[t3 == "Deciduous, Brevideciduous"] <- 'Deciduous'
t3[is.na(t3)] <- ''
t3[t3 == ''] <- 'notCoded'
t3.colors <- sapply(t3, function(x) switch(x,
Deciduous = 'white',
Brevideciduous = 'lightgreen',
Evergreen = 'darkgreen',
notCoded = NA))
points(rep(81, length(t3)), 1:length(t3), pch = ifelse(t3 == 'notCoded', NA, 22), bg = t3.colors, lwd = 0.4)
text(81, length(tr.temp$tip.label) + 2.2, "Leaf\nhabit", cex = 0.4)
# add node bars
tr.mrca <- mrca(tr.temp)
nodesToBar <- sapply(cladesOfInterest, function(x) tr.mrca[x[1], x[2]])
par(lend = 2)
HPDbars(tr.beast, nodes = nodesToBar, col = 'black', lty = 'solid', lwd = 4.5)
HPDbars(tr.beast, nodes = nodesToBar, col = 'gray', lty = 'solid', lwd = 4)
## section / clade names
lastP <- get("last_plot.phylo", envir = .PlotPhyloEnv)
nodesToSection <- sapply(overSections, function(x) tr.mrca[x[1], x[2]])
for(i in 1:length(nodesToSection)) text(lastP$xx[nodesToSection[i]],
lastP$yy[nodesToSection[i]],
names(nodesToSection)[i],
adj = c(1.1,-0.5),
offset = c(0,1.2),
cex = 0.6,
font = 2)
nodesToSection <- sapply(underSections, function(x) tr.mrca[x[1], x[2]])
for(i in 1:length(nodesToSection)) text(lastP$xx[nodesToSection[i]],
lastP$yy[nodesToSection[i]],
names(nodesToSection)[i],
adj = c(1.1,1.7),
offset = c(0,-1.2),
cex = 0.6,
font = 2)
## leaf habit legend
legend(x=-6.5, y=46,
cex = 0.5, pch = 22, pt.cex = 1.5,
pt.bg = c('darkgreen', 'lightgreen', 'white'),
legend = c("Evergreen", "Brevideciduous", "Deciduous"),
box.lwd = 0.5, box.col = 'white', bg = 'white',
title = "")
legend(x = -6.5, y = 46.5, legend = c('','', ''),
title = 'Leaf habit', title.adj = 0.15,
bty = 'n', cex = 0.6)
## map legend
vp1 <- viewport(x = 0.03, y = 0.09,
width = 0.33, height = 0.20,
just = c('left', 'bottom'),
gp = gpar(bty = 'o'))
b = ggplot(mi.subsampled.bySp, aes(x=lon, y=lat))
ourArea.noCanada <- map_data('world', c('usa','mexico',
'costa rica', 'honduras', 'guatemala', 'belize', 'el salvador', 'nicaragua', 'panama'
))
ourArea.states <- map_data('state')
b <- b + geom_polygon(data = regions.df, aes(long, lat, group = group, fill = oakAreas))
b <- b + scale_fill_manual('Biogeographic\nregions',
labels = c('C', 'E', 'M'),
values = as.character(regionColors[1:3]))
b = b + geom_map(data = ourArea.noCanada, map = ourArea.noCanada, aes(x = long, y = lat, map_id = region), colour = 'black', fill = NA, size = 0.2)
b = b + geom_map(data = ourArea.states, map = ourArea.states, aes(x = long, y = lat, map_id = region), colour = 'black', fill = NA, size = 0.1)
b = b + xlim(-125, -57) + ylim(8, 50)
# for(i in seq(length(reg.list))) b <- b + geom_path(data = regions.df[regions.df$bioregio %in% reg.list[[i]], ], aes(long, lat, group = group), colour = reg.colors.under[i], size = 0.1)
#b <- b + geom_path(data = regions.df, aes(long, lat, group = group, colour = oakAreas), size = 0.3)
# b <- b + scale_colour_manual("Biogeographic\nregions", values=reg.colors.under)
b = b + theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank())
b <- b + theme(legend.position = c(0.85, 0.3),
legend.background = element_rect(fill=NA),
legend.title = element_text(size = 0),
legend.text = element_text(size = 5),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_rect(colour = 'gray', fill = NA, size = 0.5),
panel.background = element_blank(),
plot.title = element_text(size = 7)
)
b <- b + ggtitle("Biogeographic regions")
print(b, vp = vp1)
#
dev.off()
| /scripts/99.drawTree.v3.R | no_license | andrew-hipp/oak-convergence-2017 | R | false | false | 8,058 | r | ## Originally summer 2016
## updated 2017-01-04 to add bootstraps
## completely rewritten 2017-04-21 for so many reasons
require("rgdal") # requires sp, will use proj.4 if installed
require("maptools")
require("ggplot2")
require("plyr")
require('dismo')
require('raster')
require('grid')
require(geoscale)
data(timescales)
## new plot starts here
if(!exists('path')) path <- 'out'
pdf(paste(path, '/FIG02.timetree.newVersion.pdf', sep = ''), 7, 9.5)
par(mar = c(2.4,0,0,0) + 0.1)
regionColors = c(C = 'deepskyblue', E = 'maroon', M = 'orange', U = 'black')
cladesOfInterest = list(
root = c('Lithocarpus_hancei', 'Quercus_arizonica'),
newWorldOaks = c('Quercus_kelloggii', 'Quercus_arizonica'),
Cerris=c('Quercus_baronii', 'Quercus_trojana'),
Lobatae.core = c('Quercus_kelloggii', 'Quercus_gentryi'),
Lobatae.Mexico = c('Quercus_canbyi', 'Quercus_hypoleucoides'),
Protobalanus = c('Quercus_palmeri', 'Quercus_tomentella'),
Ponticae = c('Quercus_sadleriana', 'Quercus_pontica'),
Virentes = c('Quercus_fusiformis', 'Quercus_minima'),
Quercus.core = c('Quercus_lobata', 'Quercus_alba'),
Quercus.Eurasia = c('Quercus_mongolica', 'Quercus_robur'),
Quercus.Mexico = c('Quercus_potosina', 'Quercus_germana'),
Quercus.TX = c('Quercus_pungens', 'Quercus_polymorpha'),
Quercus.AZ = c('Quercus_turbinella', 'Quercus_arizonica')
)
overSections = list(
Cerris=c('Quercus_baronii', 'Quercus_trojana'),
Lobatae = c('Quercus_kelloggii', 'Quercus_hypoleucoides'),
Protobalanus = c('Quercus_palmeri', 'Quercus_tomentella'),
Ponticae = c('Quercus_sadleriana', 'Quercus_pontica'),
Virentes = c('Quercus_fusiformis', 'Quercus_minima'),
"Texas / N. Mexico" = c('Quercus_mohriana', 'Quercus_vaseyana')
)
underSections = list(
"section Quercus" = c('Quercus_lobata', 'Quercus_arizonica'),
"American oaks" = c('Quercus_lobata', 'Quercus_kelloggii'),
"Arizona / N. Mexico" = c('Quercus_laeta', 'Quercus_turbinella')
)
# get tree, plot with time scale
t1 <- subset(timescales$ICS2013, timescales$ICS2013[, 'Type'] == 'Epoch')
tr.beast <- tr.spp.4c.discreteClock.beast
tr.temp <- tr.spp.4c.discreteClock
a=plot(tr.temp, edge.color = 0, tip.color = 0, x.lim = c(-7, 96.61137))
#abline(v = 66.34929-t1[2:7, 'Start'], lty = 'dashed', col = 'gray')
segments(x0 = 66.34929-t1[2:7, 'Start'], y0 = 0, y1 = length(tr.temp$tip.label) + 1, lty = 'dashed', col = 'gray')
rect(66.34929-t1[2:7, 'Start'], par()$usr[3]+2, 66.34929-t1[2:7, 'End'], par()$usr[3] + 4, col = c('white', 'gray80'), border = 'black')
text(66.34929-t1[2:7, 'Start'], par()$usr[3]+1, labels = round(t1[2:7, 'Start'], 1), cex = 0.5)
text(66.34929-t1[2:7, 'Midpoint'], par()$usr[3] - 1.5, labels = t1[2:7, 'Name'], srt = -60, adj = 0, xpd = TRUE, cex = 0.5)
plot.phylo.upon(tr.temp, cex = 0.4)
# add biogeography coding
t2 <- tip.geog[tr.temp$tip.label,c('C', 'E', 'M', 'U')]
for(i in seq(dim(t2)[[1]])){
for(j in seq(dim(t2)[2])) {
points(j + 83, i, pch = 22, bg = ifelse(t2[i,j] == 1, regionColors[j], "white"), lwd = 0.5)
}}
text(84:(83+dim(t2)[2]), rep(length(tr.temp$tip.label) + 2, dim(t2)[2]), dimnames(t2)[[2]], cex = 0.5, xpd = TRUE)
# add leaf phenology
t3 <- lf.traits[tr.temp$tip.label, 'lfPhenology']
t3[t3 == "Deciduous, Brevideciduous"] <- 'Deciduous'
t3[is.na(t3)] <- ''
t3[t3 == ''] <- 'notCoded'
t3.colors <- sapply(t3, function(x) switch(x,
Deciduous = 'white',
Brevideciduous = 'lightgreen',
Evergreen = 'darkgreen',
notCoded = NA))
points(rep(81, length(t3)), 1:length(t3), pch = ifelse(t3 == 'notCoded', NA, 22), bg = t3.colors, lwd = 0.4)
text(81, length(tr.temp$tip.label) + 2.2, "Leaf\nhabit", cex = 0.4)
# add node bars
tr.mrca <- mrca(tr.temp)
nodesToBar <- sapply(cladesOfInterest, function(x) tr.mrca[x[1], x[2]])
par(lend = 2)
HPDbars(tr.beast, nodes = nodesToBar, col = 'black', lty = 'solid', lwd = 4.5)
HPDbars(tr.beast, nodes = nodesToBar, col = 'gray', lty = 'solid', lwd = 4)
## section / clade names
lastP <- get("last_plot.phylo", envir = .PlotPhyloEnv)
nodesToSection <- sapply(overSections, function(x) tr.mrca[x[1], x[2]])
for(i in 1:length(nodesToSection)) text(lastP$xx[nodesToSection[i]],
lastP$yy[nodesToSection[i]],
names(nodesToSection)[i],
adj = c(1.1,-0.5),
offset = c(0,1.2),
cex = 0.6,
font = 2)
nodesToSection <- sapply(underSections, function(x) tr.mrca[x[1], x[2]])
for(i in 1:length(nodesToSection)) text(lastP$xx[nodesToSection[i]],
lastP$yy[nodesToSection[i]],
names(nodesToSection)[i],
adj = c(1.1,1.7),
offset = c(0,-1.2),
cex = 0.6,
font = 2)
## leaf habit legend
legend(x=-6.5, y=46,
cex = 0.5, pch = 22, pt.cex = 1.5,
pt.bg = c('darkgreen', 'lightgreen', 'white'),
legend = c("Evergreen", "Brevideciduous", "Deciduous"),
box.lwd = 0.5, box.col = 'white', bg = 'white',
title = "")
legend(x = -6.5, y = 46.5, legend = c('','', ''),
title = 'Leaf habit', title.adj = 0.15,
bty = 'n', cex = 0.6)
## map legend
vp1 <- viewport(x = 0.03, y = 0.09,
width = 0.33, height = 0.20,
just = c('left', 'bottom'),
gp = gpar(bty = 'o'))
b = ggplot(mi.subsampled.bySp, aes(x=lon, y=lat))
ourArea.noCanada <- map_data('world', c('usa','mexico',
'costa rica', 'honduras', 'guatemala', 'belize', 'el salvador', 'nicaragua', 'panama'
))
ourArea.states <- map_data('state')
b <- b + geom_polygon(data = regions.df, aes(long, lat, group = group, fill = oakAreas))
b <- b + scale_fill_manual('Biogeographic\nregions',
labels = c('C', 'E', 'M'),
values = as.character(regionColors[1:3]))
b = b + geom_map(data = ourArea.noCanada, map = ourArea.noCanada, aes(x = long, y = lat, map_id = region), colour = 'black', fill = NA, size = 0.2)
b = b + geom_map(data = ourArea.states, map = ourArea.states, aes(x = long, y = lat, map_id = region), colour = 'black', fill = NA, size = 0.1)
b = b + xlim(-125, -57) + ylim(8, 50)
# for(i in seq(length(reg.list))) b <- b + geom_path(data = regions.df[regions.df$bioregio %in% reg.list[[i]], ], aes(long, lat, group = group), colour = reg.colors.under[i], size = 0.1)
#b <- b + geom_path(data = regions.df, aes(long, lat, group = group, colour = oakAreas), size = 0.3)
# b <- b + scale_colour_manual("Biogeographic\nregions", values=reg.colors.under)
b = b + theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank())
b <- b + theme(legend.position = c(0.85, 0.3),
legend.background = element_rect(fill=NA),
legend.title = element_text(size = 0),
legend.text = element_text(size = 5),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_rect(colour = 'gray', fill = NA, size = 0.5),
panel.background = element_blank(),
plot.title = element_text(size = 7)
)
b <- b + ggtitle("Biogeographic regions")
print(b, vp = vp1)
#
dev.off()
|
#'Calculate population growth rates by projection
#'
#'Calculates the population growth rate and stable stage distribution by
#'repeated projections of the equation \code{n(t+1)=An(t)}.
#'
#'Eventually, structured populations will convergence to a stable stage
#'distribution where each new stage vector is changing by the same proportion
#'(lambda).
#'
#'@param A A projection matrix
#'@param n An initial age or stage vector
#'@param iterations Number of iterations
#'@return A list with 5 items \item{lambda }{Estimate of lambda using change
#'between the last two population counts} \item{stable.stage }{Estimate of
#'stable stage distribution using proportions in last stage vector}
#'\item{stage.vector}{A matrix with the number of projected individuals in each
#'stage class} \item{pop.sizes}{Total number of projected individuals}
#'\item{pop.changes}{Proportional change in population size}
#'@author Chris Stubben
#'@seealso \code{\link{stage.vector.plot}} to plot stage vectors
#'@references see section 2.2 in Caswell 2001
#'@keywords survey
#'@examples
#'
#'## mean matrix from Freville et al 2004
#'stages<-c("seedling", "vegetative", "flowering")
#'A<-matrix(c(
#' 0, 0, 5.905,
#'0.368, 0.639, 0.025,
#'0.001, 0.152, 0.051
#'), nrow=3, byrow=TRUE,
#' dimnames=list(stages,stages)
#')
#'
#'n<-c(5,5,5)
#'p<-pop.projection(A,n, 15)
#'p
#'damping.ratio(A)
#'stage.vector.plot(p$stage.vectors, col=2:4)
#'
#'
#'####
#'
#'data(whale)
#'A<-whale
#'#n<-c(4,38,36,22)
#'n<-c(5,5,5,5)
#'p<-pop.projection(A,n, 15)
#'p
#'stage.vector.plot(p$stage.vectors, col=2:4, ylim=c(0, 0.6))
#'## convergence is slow with damping ratio close to 1
#'damping.ratio(A)
#'pop.projection(A,n, 100)$pop.changes
#'@export
pop.projection <- function(A,n,iterations=20)
{
x<-length(n)
t <- iterations
stage <- matrix(numeric(x*t), nrow=x) ## initialize a matrix to store projected stage vectors
pop <- numeric(t) ## and numeric vectors for projected population size
change <- numeric(t-1) ## and proportional changes in pop sizes
for( i in 1:t)
{
stage[,i] <- n
pop[i] <- sum(n)
if(i>1){change[i-1] <- pop[i]/pop[i-1] } ## calculates proportional changes in pop size
n <- A %*% n ## multiply matrix A by size vector n and
} ## set n equal to new vector
rownames(stage)<-rownames(A) ## and add row names.
colnames(stage)<-0:(t-1) ## start counting at zero?
w <- stage[,t] ## Estimate stable stage from last size iteration
pop.proj <- list(
lambda = pop[t]/pop[t-1], ## Change between the LAST two population counts
stable.stage = w/sum(w),
stage.vectors = stage,
pop.sizes = pop,
pop.changes = change
)
pop.proj
}
| /R/pop.projection.R | no_license | ashander/popbio | R | false | false | 2,968 | r | #'Calculate population growth rates by projection
#'
#'Calculates the population growth rate and stable stage distribution by
#'repeated projections of the equation \code{n(t+1)=An(t)}.
#'
#'Eventually, structured populations will convergence to a stable stage
#'distribution where each new stage vector is changing by the same proportion
#'(lambda).
#'
#'@param A A projection matrix
#'@param n An initial age or stage vector
#'@param iterations Number of iterations
#'@return A list with 5 items \item{lambda }{Estimate of lambda using change
#'between the last two population counts} \item{stable.stage }{Estimate of
#'stable stage distribution using proportions in last stage vector}
#'\item{stage.vector}{A matrix with the number of projected individuals in each
#'stage class} \item{pop.sizes}{Total number of projected individuals}
#'\item{pop.changes}{Proportional change in population size}
#'@author Chris Stubben
#'@seealso \code{\link{stage.vector.plot}} to plot stage vectors
#'@references see section 2.2 in Caswell 2001
#'@keywords survey
#'@examples
#'
#'## mean matrix from Freville et al 2004
#'stages<-c("seedling", "vegetative", "flowering")
#'A<-matrix(c(
#' 0, 0, 5.905,
#'0.368, 0.639, 0.025,
#'0.001, 0.152, 0.051
#'), nrow=3, byrow=TRUE,
#' dimnames=list(stages,stages)
#')
#'
#'n<-c(5,5,5)
#'p<-pop.projection(A,n, 15)
#'p
#'damping.ratio(A)
#'stage.vector.plot(p$stage.vectors, col=2:4)
#'
#'
#'####
#'
#'data(whale)
#'A<-whale
#'#n<-c(4,38,36,22)
#'n<-c(5,5,5,5)
#'p<-pop.projection(A,n, 15)
#'p
#'stage.vector.plot(p$stage.vectors, col=2:4, ylim=c(0, 0.6))
#'## convergence is slow with damping ratio close to 1
#'damping.ratio(A)
#'pop.projection(A,n, 100)$pop.changes
#'@export
pop.projection <- function(A,n,iterations=20)
{
x<-length(n)
t <- iterations
stage <- matrix(numeric(x*t), nrow=x) ## initialize a matrix to store projected stage vectors
pop <- numeric(t) ## and numeric vectors for projected population size
change <- numeric(t-1) ## and proportional changes in pop sizes
for( i in 1:t)
{
stage[,i] <- n
pop[i] <- sum(n)
if(i>1){change[i-1] <- pop[i]/pop[i-1] } ## calculates proportional changes in pop size
n <- A %*% n ## multiply matrix A by size vector n and
} ## set n equal to new vector
rownames(stage)<-rownames(A) ## and add row names.
colnames(stage)<-0:(t-1) ## start counting at zero?
w <- stage[,t] ## Estimate stable stage from last size iteration
pop.proj <- list(
lambda = pop[t]/pop[t-1], ## Change between the LAST two population counts
stable.stage = w/sum(w),
stage.vectors = stage,
pop.sizes = pop,
pop.changes = change
)
pop.proj
}
|
library(forecast)
library(fpp2)
library(xts)
library(readr)
library(tseries)
setwd('/home/noble_mannu/Documents/PhD/Third/STAT_2270_Data_Mining/Project/Analysis')
# setwd('C:/Users/Mitzi/Downloads')
df <- read.csv('data.csv')
data <- df$USD.Peso.mexicanoFIX
train <- data[261:1042] # Data up to 2019
train <- ts(train, frequency = 260)
test <- data[1043:1065] # Data up to Oct 2020
#El auto arima
#fit.arima <- auto.arima(train,lambda = NULL)
fit.arima <- Arima(train, order = c(2,1,0))
prono1<- forecast(fit.arima, h = 23)
plot(prono1)
mean(abs(100*(test - prono1$mean)/test))
| /CodigoARIMA_1.R | no_license | manuelgacos/Data_Mining_Project | R | false | false | 595 | r | library(forecast)
library(fpp2)
library(xts)
library(readr)
library(tseries)
setwd('/home/noble_mannu/Documents/PhD/Third/STAT_2270_Data_Mining/Project/Analysis')
# setwd('C:/Users/Mitzi/Downloads')
df <- read.csv('data.csv')
data <- df$USD.Peso.mexicanoFIX
train <- data[261:1042] # Data up to 2019
train <- ts(train, frequency = 260)
test <- data[1043:1065] # Data up to Oct 2020
#El auto arima
#fit.arima <- auto.arima(train,lambda = NULL)
fit.arima <- Arima(train, order = c(2,1,0))
prono1<- forecast(fit.arima, h = 23)
plot(prono1)
mean(abs(100*(test - prono1$mean)/test))
|
# make 3 variables withe the file potitions for training data
train_file<-"course3/UCI HAR Dataset/train/X_train.txt"
trainy_file<-"course3/UCI HAR Dataset/train/y_train.txt"
trainbub_file<-"course3/UCI HAR Dataset/train/subject_train.txt"
#Training data are the data in X_train file
traindata<-read.table(train_file)
#Trainingydata are the data in Y training file
trainydata<-read.table(trainy_file)
#Trainingsubdata are the data of the subject training file.
trainsubdata<-read.table(trainbub_file)
#I cbind the training Y data and the training subject data to the Trainingdata
traindata <- cbind(traindata,trainydata)
traindata <- cbind(traindata,trainsubdata)
# I Do the same for the test data
test_file<-"course3/UCI HAR Dataset/test/X_test.txt"
testy_file<-"course3/UCI HAR Dataset/test/y_test.txt"
testsub_file<-"course3/UCI HAR Dataset/test/subject_test.txt"
testdata<-read.table(test_file)
testydata<-read.table(testy_file)
testsubdata<-read.table(testsub_file)
testdata <- cbind(testdata,testydata)
testdata <- cbind(testdata,testsubdata)
# I read the features file for names of the previus data
head_file<-"course3/UCI HAR Dataset/features.txt"
namehead<-read.table(lfile)
# I add a row for the activities
newrow <- data.frame("562","Activity")
names(newrow)<-names(namehead)
namehead<-rbind(namehead,newrow)
#And a row for the subject
newrow <- data.frame("563","Subject")
names(newrow)<-names(namehead)
namehead<-rbind(namehead,newrow)
#I put the names of the columns to the headname variable
headname<-namehead$V2
#I make them lowercase
headname<-tolower(headname)
colnames(traindata)<-headname
colnames(testdata)<-headname
#And I bind by rows the two tables (training and tests)
mydata<-rbind(testdata,traindata)
#I find the columns whit the "mean()" and "std()" and i put them to a variable inordr
inordr<-sort(c(grep("mean\\(\\)",headname),grep("std\\(\\)",headname)))
#The variable lastdata has the inrdr columns plus the last two, activity and subject
lastdata<-mydata[c(562,563,inordr)]
#I rename the activity from numeric to descriptive value
lastdata$activity[lastdata$activity==1]<-"WALKING"
lastdata$activity[lastdata$activity==2]<-"WALKING_UPSTAIRS"
lastdata$activity[lastdata$activity==3]<-"WALKING_DOWNSTAIRS"
lastdata$activity[lastdata$activity==4]<-"SITTING"
lastdata$activity[lastdata$activity==5]<-"STANDING"
lastdata$activity[lastdata$activity==6]<-"LAYING"
#Finaly I create the aggregate table aggdata by activity and subject
aggdata<-aggregate(lastdata[,3:68],list(lastdata$activity,lastdata$subject),mean)
| /run_analysis.R | no_license | vagelisg/ProgrammingAssigment3 | R | false | false | 2,547 | r | # make 3 variables withe the file potitions for training data
train_file<-"course3/UCI HAR Dataset/train/X_train.txt"
trainy_file<-"course3/UCI HAR Dataset/train/y_train.txt"
trainbub_file<-"course3/UCI HAR Dataset/train/subject_train.txt"
#Training data are the data in X_train file
traindata<-read.table(train_file)
#Trainingydata are the data in Y training file
trainydata<-read.table(trainy_file)
#Trainingsubdata are the data of the subject training file.
trainsubdata<-read.table(trainbub_file)
#I cbind the training Y data and the training subject data to the Trainingdata
traindata <- cbind(traindata,trainydata)
traindata <- cbind(traindata,trainsubdata)
# I Do the same for the test data
test_file<-"course3/UCI HAR Dataset/test/X_test.txt"
testy_file<-"course3/UCI HAR Dataset/test/y_test.txt"
testsub_file<-"course3/UCI HAR Dataset/test/subject_test.txt"
testdata<-read.table(test_file)
testydata<-read.table(testy_file)
testsubdata<-read.table(testsub_file)
testdata <- cbind(testdata,testydata)
testdata <- cbind(testdata,testsubdata)
# I read the features file for names of the previus data
head_file<-"course3/UCI HAR Dataset/features.txt"
namehead<-read.table(lfile)
# I add a row for the activities
newrow <- data.frame("562","Activity")
names(newrow)<-names(namehead)
namehead<-rbind(namehead,newrow)
#And a row for the subject
newrow <- data.frame("563","Subject")
names(newrow)<-names(namehead)
namehead<-rbind(namehead,newrow)
#I put the names of the columns to the headname variable
headname<-namehead$V2
#I make them lowercase
headname<-tolower(headname)
colnames(traindata)<-headname
colnames(testdata)<-headname
#And I bind by rows the two tables (training and tests)
mydata<-rbind(testdata,traindata)
#I find the columns whit the "mean()" and "std()" and i put them to a variable inordr
inordr<-sort(c(grep("mean\\(\\)",headname),grep("std\\(\\)",headname)))
#The variable lastdata has the inrdr columns plus the last two, activity and subject
lastdata<-mydata[c(562,563,inordr)]
#I rename the activity from numeric to descriptive value
lastdata$activity[lastdata$activity==1]<-"WALKING"
lastdata$activity[lastdata$activity==2]<-"WALKING_UPSTAIRS"
lastdata$activity[lastdata$activity==3]<-"WALKING_DOWNSTAIRS"
lastdata$activity[lastdata$activity==4]<-"SITTING"
lastdata$activity[lastdata$activity==5]<-"STANDING"
lastdata$activity[lastdata$activity==6]<-"LAYING"
#Finaly I create the aggregate table aggdata by activity and subject
aggdata<-aggregate(lastdata[,3:68],list(lastdata$activity,lastdata$subject),mean)
|
testthat::test_that("get data for vector of countries and metrics from wgb.com works as expected", {
test_sovfi_data <- get_bond_data(country_iso = c("US", "ZA"),
metric = c("yield_5", "cds_5"),
start_date = "2020-08-20",
end_date = "2020-08-25",
frequency = "daily",
include_forecast = FALSE)
expected_sovfi_data <- tibble::tibble(country_iso = c(rep("US", 12), rep("ZA", 10)),
date = c(rep(seq(as.Date("2020-08-20"), as.Date("2020-08-25"), by = "day"), 2),
as.Date(c("2020-08-20", "2020-08-21", "2020-08-24", "2020-08-25")),
seq(as.Date("2020-08-20"), as.Date("2020-08-25"), by = "day")),
metric = c(rep("cds_5", 6), rep("yield_5", 6), rep("cds_5", 4), rep("yield_5", 6)),
value = c(18.5, 18.5, 18.5, 18.5, 18.5, 18.5,
0.272, 0.268, 0.268, 0.268, 0.282, 0.296,
295.03, 292.96, 283.16, 287.35,
7.435, 7.37, 7.37, 7.37, 7.4, 7.385))
testthat::expect_equal(test_sovfi_data, expected_sovfi_data)
})
| /tests/testthat/test-get_bond_data.R | no_license | ces0491/fdoR | R | false | false | 1,441 | r | testthat::test_that("get data for vector of countries and metrics from wgb.com works as expected", {
test_sovfi_data <- get_bond_data(country_iso = c("US", "ZA"),
metric = c("yield_5", "cds_5"),
start_date = "2020-08-20",
end_date = "2020-08-25",
frequency = "daily",
include_forecast = FALSE)
expected_sovfi_data <- tibble::tibble(country_iso = c(rep("US", 12), rep("ZA", 10)),
date = c(rep(seq(as.Date("2020-08-20"), as.Date("2020-08-25"), by = "day"), 2),
as.Date(c("2020-08-20", "2020-08-21", "2020-08-24", "2020-08-25")),
seq(as.Date("2020-08-20"), as.Date("2020-08-25"), by = "day")),
metric = c(rep("cds_5", 6), rep("yield_5", 6), rep("cds_5", 4), rep("yield_5", 6)),
value = c(18.5, 18.5, 18.5, 18.5, 18.5, 18.5,
0.272, 0.268, 0.268, 0.268, 0.282, 0.296,
295.03, 292.96, 283.16, 287.35,
7.435, 7.37, 7.37, 7.37, 7.4, 7.385))
testthat::expect_equal(test_sovfi_data, expected_sovfi_data)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wfs_lmi.R
\name{read_12miles}
\alias{read_12miles}
\title{Get 12 miles}
\usage{
read_12miles()
}
\value{
An sf object
}
\description{
Get 12 miles
}
| /man/read_12miles.Rd | no_license | einarhjorleifsson/gisland | R | false | true | 227 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wfs_lmi.R
\name{read_12miles}
\alias{read_12miles}
\title{Get 12 miles}
\usage{
read_12miles()
}
\value{
An sf object
}
\description{
Get 12 miles
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/save_nc.R
\name{save_nc}
\alias{save_nc}
\title{Save a tag dataset to a netCDF file.}
\usage{
save_nc(file, X, ...)
}
\arguments{
\item{file}{The name of the data and metadata file to be written. If \code{file} does not include a .nc suffix, this will be added automatically.}
\item{X}{An \code{animaltag} object, or a list of tag sensor and/or metadata lists. Alternatively, sensor and metadata lists may be input as multiple separate unnamed inputs. Only these kind of variables can be saved
in a NetCDF file because the supporting information in these structures is
needed to describe the contents of the file. For non-archive and non-portable
storage of variables, consider using \code{\link{save}} or various functions to write data to text files.}
\item{...}{Additional sensor or metadata lists, if user has not bundled them all into a list already but is providing individual structures.}
}
\description{
This function saves a tag dataset to a netCDF file (this is an archival file format supported by the tagtools package and suitable for submission to online data archives).
}
\details{
Warning: this will overwrite any previous NetCDF file with the same name. The file is assumed to be in the current working directory unless \code{file} includes file path information.
}
\examples{
\dontrun{save_nc('dog17_124a',A,M,P,info)
#or equivalently:
save_nc('dog17_124a',X=list(A,M,P,info))
#generates a file dog17_124a.nc and adds variables A, M and P, and a metadata structure.
}
}
| /R/tagtools/man/save_nc.Rd | no_license | FlukeAndFeather/TagTools | R | false | true | 1,567 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/save_nc.R
\name{save_nc}
\alias{save_nc}
\title{Save a tag dataset to a netCDF file.}
\usage{
save_nc(file, X, ...)
}
\arguments{
\item{file}{The name of the data and metadata file to be written. If \code{file} does not include a .nc suffix, this will be added automatically.}
\item{X}{An \code{animaltag} object, or a list of tag sensor and/or metadata lists. Alternatively, sensor and metadata lists may be input as multiple separate unnamed inputs. Only these kind of variables can be saved
in a NetCDF file because the supporting information in these structures is
needed to describe the contents of the file. For non-archive and non-portable
storage of variables, consider using \code{\link{save}} or various functions to write data to text files.}
\item{...}{Additional sensor or metadata lists, if user has not bundled them all into a list already but is providing individual structures.}
}
\description{
This function saves a tag dataset to a netCDF file (this is an archival file format supported by the tagtools package and suitable for submission to online data archives).
}
\details{
Warning: this will overwrite any previous NetCDF file with the same name. The file is assumed to be in the current working directory unless \code{file} includes file path information.
}
\examples{
\dontrun{save_nc('dog17_124a',A,M,P,info)
#or equivalently:
save_nc('dog17_124a',X=list(A,M,P,info))
#generates a file dog17_124a.nc and adds variables A, M and P, and a metadata structure.
}
}
|
#create variables and starting values
x=as.matrix(read.table("mvnprob.dat1")[1:100,1:7])
z=as.matrix(read.table("mvnprob.dat1")[1:221,8])
z <- BGGM::ptsd[,2:5] + 1
x <- rep(1, 221)
d=4
k=1
b<-matrix(0,(d*k))
s=cs=diag(d)
tz=matrix(0,d,6)
tz[,1]=-Inf; tz[,2]=0; tz[,6]=Inf
zstar=matrix(0,nrow(z),d)
tz[1,3]=qnorm(sum(z[,1]<=2)/nrow(z), mean=-qnorm(sum(z[,1]==1)/nrow(z),mean=0,sd=1),sd=1)
tz[1,4]=qnorm(sum(z[,1]<=3)/nrow(z), mean=-qnorm(sum(z[,1]==1)/nrow(z),mean=0,sd=1),
sd=1)
tz[1,5]=qnorm(sum(z[,1]<=4)/nrow(z), mean=-qnorm(sum(z[,1]==1)/nrow(z),mean=0,sd=1),sd=1)
tz[2,3]=qnorm(sum(z[,2]<=2)/nrow(z), mean=-qnorm(sum(z[,2]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[2,4]=qnorm(sum(z[,2]<=3)/nrow(z), mean=-qnorm(sum(z[,2]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[2,5]=qnorm(sum(z[,2]<=4)/nrow(z), mean=-qnorm(sum(z[,2]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[3,3]=qnorm(sum(z[,3]<=2)/nrow(z), mean=-qnorm(sum(z[,3]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[3,4]=qnorm(sum(z[,3]<=3)/nrow(z), mean=-qnorm(sum(z[,3]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[3,5]=qnorm(sum(z[,3]<=4)/nrow(z), mean=-qnorm(sum(z[,3]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[4,3]=qnorm(sum(z[,4]<=2)/nrow(z), mean=-qnorm(sum(z[,4]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[4,4]=qnorm(sum(z[,4]<=3)/nrow(z), mean=-qnorm(sum(z[,4]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[4,5]=qnorm(sum(z[,4]<=4)/nrow(z), mean=-qnorm(sum(z[,4]==1)/nrow(z),mean=0,sd=1), sd=1)
thresh_mat <- matrix(0, 6000, 3)
ctz=tz
acc1=acc2=acctot=0;
s <- psych::polychoric(z)
s <- s$rho
cor_save <- matrix(0, nrow = 6000, ncol = 6)
Psi <- diag(4)
for(i in 2:6000){
#draw latent data: one-iteration gibbs sampler for tmvn simulation
bb=matrix(b,k,d)
m=x%*%bb
for(j in 1:d){
mm= m[,j] + t(s[j,-j])%*%solve(s[-j,-j])%*%t((zstar[,-j]-m[,-j]))
ss=s[j,j] - t(s[j,-j])%*%solve(s[-j,-j])%*%s[j,-j]
zstar[,j]= qnorm(runif(nrow(z), min=pnorm(tz[j,z[,j]],mm , sqrt(ss)),
max=pnorm(tz[j,(z[,j] + 1)],mm,sqrt(ss))),
mean=mm,sd=sqrt(ss))
}
for(l in 3:5){
ctz[1,l]= qnorm(runif(1, min=pnorm(0, mean=tz[1,l],sd=.01),max=1), mean=tz[1,l],sd=.01)
}
r=as.matrix((pnorm(ctz[1,z[,1]+1]-m[,1],0,1)
-pnorm(ctz[1,z[,1]]-m[,1],0,1))
/
(pnorm(tz[1,z[,1]+1]-m[,1],0,1)
-pnorm(tz[1,z[,1]]-m[,1],0,1)))
r1 <- NA
for(p in 3:5){
r1[p] = t(log(r))%*%matrix(1,nrow(z)) + log((1-pnorm(-tz[1,p]/.01,0,1))/(1-pnorm(-ctz[1,p]/.01,0,1)))
}
r1 <- sum(r1[3:5])
for(t in 3:5){
ctz[2,t]= qnorm(runif(1, min=pnorm(0, mean=tz[2,t],sd=.01),max=1), mean=tz[2,t],sd=.01)
}
r=as.matrix((pnorm(ctz[2,z[,2]+1]-m[,2],0,1)
-pnorm(ctz[2,z[,2]]-m[,2],0,1))
/
(pnorm(tz[2,z[,2]+1]-m[,2],0,1)
-pnorm(tz[2,z[,2]]-m[,2],0,1)))
r2 <- NA
for(g in 3:5){
r2[g] = t(log(r))%*%matrix(1,nrow(z)) + log((1-pnorm(-tz[2,g]/.01,0,1))/(1-pnorm(-ctz[2,g]/.01,0,1)))
}
r2 <- sum(r2[3:5])
for(t in 3:5){
ctz[3,t]= qnorm(runif(1, min=pnorm(0, mean=tz[3,t],sd=.01),max=1), mean=tz[3,t],sd=.01)
}
r=as.matrix((pnorm(ctz[3,z[,3]+1]-m[,3],0,1)
-pnorm(ctz[3,z[,3]]-m[,3],0,1))
/
(pnorm(tz[3,z[,3]+1]-m[,3],0,1)
-pnorm(tz[3,z[,3]]-m[,3],0,1)))
for(g in 3:5){
r3[g] = t(log(r))%*%matrix(1,nrow(z)) + log((1-pnorm(-tz[3,g]/.01,0,1))/(1-pnorm(-ctz[3,g]/.01,0,1)))
}
r3 <- sum(r3[3:5])
for(t in 3:5){
ctz[4,t]= qnorm(runif(1, min=pnorm(0, mean=tz[4,t],sd=.01),max=1), mean=tz[4,t],sd=.01)
}
r=as.matrix((pnorm(ctz[4,z[,4]+1]-m[,4],0,1)
-pnorm(ctz[4,z[,4]]-m[,4],0,1))
/
(pnorm(tz[4,z[,4]+1]-m[,4],0,1)
-pnorm(tz[4,z[,4]]-m[,4],0,1)))
r4 <- NA
for(g in 3:5){
r4[g] = t(log(r))%*%matrix(1,nrow(z)) + log((1-pnorm(-tz[4,g]/.01,0,1))/(1-pnorm(-ctz[4,g]/.01,0,1)))
}
r4 <- sum(r4[3:5])
# sum of sums ?
if(sum(c(r1, r2, r3, r4)) > log(runif(1,0,1)) ){
tz[1,3]=ctz[1,3]; tz[1,4]=ctz[1,4]; tz[1,5]=ctz[1,5];
tz[2,3]=ctz[2,3]; tz[2,4]=ctz[2,4]; tz[2,5]=ctz[2,5];
tz[3,3]=ctz[3,3]; tz[3,4]=ctz[3,4]; tz[3,5]=ctz[3,5];
tz[4,3]=ctz[4,3]; tz[4,4]=ctz[4,4]; tz[4,5]=ctz[4,5]; acc1=acc1+1}
vb=solve(solve(s)%x%(t(x)%*%x))
set.seed(1)
MASS::mvrnorm(1, mn, vb)
mn=vb%*%(as.vector(t(x)%*%zstar%*%t(solve(s))))
set.seed(1)
b=mn+t(rnorm((d*k),0,1)%*%chol(vb))
#use metropolis-hastings sampling to draw sigma
#e=matrix((as.vector(zstar)-(diag(d)%x%x%*%b)),nrow(z),d)
e <- zstar - t(b %*% t(x))
v=t(e)%*%e
#like=-.5*((d+nrow(z)+1)*log(det(s)) + sum(diag(v%*%solve(s))))
s <- cov2cor(rWishart(1, 221, v + Psi)[,,1])
Psi <- rWishart(1, 3 + delta + p - 2, solve(s + diag(4), tol = 1e-20))[,,1]
#cs[upper.tri(cs)] <- s[upper.tri(s)] + rnorm(6,mean=0,sd=.025)
#
#
#cs <- BGGM:::symmteric_mat(cs)
# sigma <- rWishart(1, delta + n - 1, s + Psi)[,,1]
#
#
# Psi <- rWishart(1, 1 + delta + p - 2, solve(sigma + diag(4), tol = 1e-20))[,,1]
#
# cs <- cov2cor(sigma)
# if( det(cs) > 0 ){
#
# cslike=-.5*((d+nrow(z)+1)*log(det(cs)) + sum(diag(v%*%solve(cs))))
#
# if((cslike-like)>log(runif(1,0,1))){
#
# s = cs; acctot=acctot+1
#
# }
# }
cor_save[i,] <- s[upper.tri(s)]
thresh_mat[i,] <- tz[3,3:5]
}
Psi <- diag(4)
cor(zstar)
delta = 1
n = 221
samps <- unique(cor_save)
samps
colMeans(cor_save)
colMeans( thresh_mat)
cor_save
coda::traceplot(coda::as.mcmc( cor_save[2:6000 ,1]))
sd(samps[,3])
cor_save[1000:1500,]
colMeans(cor_save[1000:6000,])
hist(samps[,2])
Psi <- diag(3)
colMeans(samps)
apply(cor_save[1000:6000,], 2, sd)
compare <- psych::polychoric(z)
compare$rho[upper.tri(compare$rho)]
compare$tau
test <- polycor::polychor(z[,1], z[,3], std.err = T)
sqrt(test$var)
colMeans(thresholds)
sd(samps[,3])
sd(samps[,3])
sd(samps[,2])
thresholds <- unique(thresh_mat)
thresholds
hist(samps[,1])
test <- unique(cor_save)
test[[2]]
length(test)
mat_res <- matrix(0, nrow = length(test), 3)
for(i in 2:length(test)){
mat_res[i,] <- test[[i]][upper.tri(test[[i]]) ]
}
colMeans(mat_res)
colMeans(thresholds)
test <- polycor::polychor(z[,2], z[,3], std.err = T)
test$rho
sqrt(test$var)
| /testing.R | no_license | donaldRwilliams/Bayes_ord | R | false | false | 6,510 | r | #create variables and starting values
x=as.matrix(read.table("mvnprob.dat1")[1:100,1:7])
z=as.matrix(read.table("mvnprob.dat1")[1:221,8])
z <- BGGM::ptsd[,2:5] + 1
x <- rep(1, 221)
d=4
k=1
b<-matrix(0,(d*k))
s=cs=diag(d)
tz=matrix(0,d,6)
tz[,1]=-Inf; tz[,2]=0; tz[,6]=Inf
zstar=matrix(0,nrow(z),d)
tz[1,3]=qnorm(sum(z[,1]<=2)/nrow(z), mean=-qnorm(sum(z[,1]==1)/nrow(z),mean=0,sd=1),sd=1)
tz[1,4]=qnorm(sum(z[,1]<=3)/nrow(z), mean=-qnorm(sum(z[,1]==1)/nrow(z),mean=0,sd=1),
sd=1)
tz[1,5]=qnorm(sum(z[,1]<=4)/nrow(z), mean=-qnorm(sum(z[,1]==1)/nrow(z),mean=0,sd=1),sd=1)
tz[2,3]=qnorm(sum(z[,2]<=2)/nrow(z), mean=-qnorm(sum(z[,2]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[2,4]=qnorm(sum(z[,2]<=3)/nrow(z), mean=-qnorm(sum(z[,2]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[2,5]=qnorm(sum(z[,2]<=4)/nrow(z), mean=-qnorm(sum(z[,2]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[3,3]=qnorm(sum(z[,3]<=2)/nrow(z), mean=-qnorm(sum(z[,3]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[3,4]=qnorm(sum(z[,3]<=3)/nrow(z), mean=-qnorm(sum(z[,3]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[3,5]=qnorm(sum(z[,3]<=4)/nrow(z), mean=-qnorm(sum(z[,3]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[4,3]=qnorm(sum(z[,4]<=2)/nrow(z), mean=-qnorm(sum(z[,4]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[4,4]=qnorm(sum(z[,4]<=3)/nrow(z), mean=-qnorm(sum(z[,4]==1)/nrow(z),mean=0,sd=1), sd=1)
tz[4,5]=qnorm(sum(z[,4]<=4)/nrow(z), mean=-qnorm(sum(z[,4]==1)/nrow(z),mean=0,sd=1), sd=1)
thresh_mat <- matrix(0, 6000, 3)
ctz=tz
acc1=acc2=acctot=0;
s <- psych::polychoric(z)
s <- s$rho
cor_save <- matrix(0, nrow = 6000, ncol = 6)
Psi <- diag(4)
for(i in 2:6000){
#draw latent data: one-iteration gibbs sampler for tmvn simulation
bb=matrix(b,k,d)
m=x%*%bb
for(j in 1:d){
mm= m[,j] + t(s[j,-j])%*%solve(s[-j,-j])%*%t((zstar[,-j]-m[,-j]))
ss=s[j,j] - t(s[j,-j])%*%solve(s[-j,-j])%*%s[j,-j]
zstar[,j]= qnorm(runif(nrow(z), min=pnorm(tz[j,z[,j]],mm , sqrt(ss)),
max=pnorm(tz[j,(z[,j] + 1)],mm,sqrt(ss))),
mean=mm,sd=sqrt(ss))
}
for(l in 3:5){
ctz[1,l]= qnorm(runif(1, min=pnorm(0, mean=tz[1,l],sd=.01),max=1), mean=tz[1,l],sd=.01)
}
r=as.matrix((pnorm(ctz[1,z[,1]+1]-m[,1],0,1)
-pnorm(ctz[1,z[,1]]-m[,1],0,1))
/
(pnorm(tz[1,z[,1]+1]-m[,1],0,1)
-pnorm(tz[1,z[,1]]-m[,1],0,1)))
r1 <- NA
for(p in 3:5){
r1[p] = t(log(r))%*%matrix(1,nrow(z)) + log((1-pnorm(-tz[1,p]/.01,0,1))/(1-pnorm(-ctz[1,p]/.01,0,1)))
}
r1 <- sum(r1[3:5])
for(t in 3:5){
ctz[2,t]= qnorm(runif(1, min=pnorm(0, mean=tz[2,t],sd=.01),max=1), mean=tz[2,t],sd=.01)
}
r=as.matrix((pnorm(ctz[2,z[,2]+1]-m[,2],0,1)
-pnorm(ctz[2,z[,2]]-m[,2],0,1))
/
(pnorm(tz[2,z[,2]+1]-m[,2],0,1)
-pnorm(tz[2,z[,2]]-m[,2],0,1)))
r2 <- NA
for(g in 3:5){
r2[g] = t(log(r))%*%matrix(1,nrow(z)) + log((1-pnorm(-tz[2,g]/.01,0,1))/(1-pnorm(-ctz[2,g]/.01,0,1)))
}
r2 <- sum(r2[3:5])
for(t in 3:5){
ctz[3,t]= qnorm(runif(1, min=pnorm(0, mean=tz[3,t],sd=.01),max=1), mean=tz[3,t],sd=.01)
}
r=as.matrix((pnorm(ctz[3,z[,3]+1]-m[,3],0,1)
-pnorm(ctz[3,z[,3]]-m[,3],0,1))
/
(pnorm(tz[3,z[,3]+1]-m[,3],0,1)
-pnorm(tz[3,z[,3]]-m[,3],0,1)))
for(g in 3:5){
r3[g] = t(log(r))%*%matrix(1,nrow(z)) + log((1-pnorm(-tz[3,g]/.01,0,1))/(1-pnorm(-ctz[3,g]/.01,0,1)))
}
r3 <- sum(r3[3:5])
for(t in 3:5){
ctz[4,t]= qnorm(runif(1, min=pnorm(0, mean=tz[4,t],sd=.01),max=1), mean=tz[4,t],sd=.01)
}
r=as.matrix((pnorm(ctz[4,z[,4]+1]-m[,4],0,1)
-pnorm(ctz[4,z[,4]]-m[,4],0,1))
/
(pnorm(tz[4,z[,4]+1]-m[,4],0,1)
-pnorm(tz[4,z[,4]]-m[,4],0,1)))
r4 <- NA
for(g in 3:5){
r4[g] = t(log(r))%*%matrix(1,nrow(z)) + log((1-pnorm(-tz[4,g]/.01,0,1))/(1-pnorm(-ctz[4,g]/.01,0,1)))
}
r4 <- sum(r4[3:5])
# sum of sums ?
if(sum(c(r1, r2, r3, r4)) > log(runif(1,0,1)) ){
tz[1,3]=ctz[1,3]; tz[1,4]=ctz[1,4]; tz[1,5]=ctz[1,5];
tz[2,3]=ctz[2,3]; tz[2,4]=ctz[2,4]; tz[2,5]=ctz[2,5];
tz[3,3]=ctz[3,3]; tz[3,4]=ctz[3,4]; tz[3,5]=ctz[3,5];
tz[4,3]=ctz[4,3]; tz[4,4]=ctz[4,4]; tz[4,5]=ctz[4,5]; acc1=acc1+1}
vb=solve(solve(s)%x%(t(x)%*%x))
set.seed(1)
MASS::mvrnorm(1, mn, vb)
mn=vb%*%(as.vector(t(x)%*%zstar%*%t(solve(s))))
set.seed(1)
b=mn+t(rnorm((d*k),0,1)%*%chol(vb))
#use metropolis-hastings sampling to draw sigma
#e=matrix((as.vector(zstar)-(diag(d)%x%x%*%b)),nrow(z),d)
e <- zstar - t(b %*% t(x))
v=t(e)%*%e
#like=-.5*((d+nrow(z)+1)*log(det(s)) + sum(diag(v%*%solve(s))))
s <- cov2cor(rWishart(1, 221, v + Psi)[,,1])
Psi <- rWishart(1, 3 + delta + p - 2, solve(s + diag(4), tol = 1e-20))[,,1]
#cs[upper.tri(cs)] <- s[upper.tri(s)] + rnorm(6,mean=0,sd=.025)
#
#
#cs <- BGGM:::symmteric_mat(cs)
# sigma <- rWishart(1, delta + n - 1, s + Psi)[,,1]
#
#
# Psi <- rWishart(1, 1 + delta + p - 2, solve(sigma + diag(4), tol = 1e-20))[,,1]
#
# cs <- cov2cor(sigma)
# if( det(cs) > 0 ){
#
# cslike=-.5*((d+nrow(z)+1)*log(det(cs)) + sum(diag(v%*%solve(cs))))
#
# if((cslike-like)>log(runif(1,0,1))){
#
# s = cs; acctot=acctot+1
#
# }
# }
cor_save[i,] <- s[upper.tri(s)]
thresh_mat[i,] <- tz[3,3:5]
}
Psi <- diag(4)
cor(zstar)
delta = 1
n = 221
samps <- unique(cor_save)
samps
colMeans(cor_save)
colMeans( thresh_mat)
cor_save
coda::traceplot(coda::as.mcmc( cor_save[2:6000 ,1]))
sd(samps[,3])
cor_save[1000:1500,]
colMeans(cor_save[1000:6000,])
hist(samps[,2])
Psi <- diag(3)
colMeans(samps)
apply(cor_save[1000:6000,], 2, sd)
compare <- psych::polychoric(z)
compare$rho[upper.tri(compare$rho)]
compare$tau
test <- polycor::polychor(z[,1], z[,3], std.err = T)
sqrt(test$var)
colMeans(thresholds)
sd(samps[,3])
sd(samps[,3])
sd(samps[,2])
thresholds <- unique(thresh_mat)
thresholds
hist(samps[,1])
test <- unique(cor_save)
test[[2]]
length(test)
mat_res <- matrix(0, nrow = length(test), 3)
for(i in 2:length(test)){
mat_res[i,] <- test[[i]][upper.tri(test[[i]]) ]
}
colMeans(mat_res)
colMeans(thresholds)
test <- polycor::polychor(z[,2], z[,3], std.err = T)
test$rho
sqrt(test$var)
|
set.seed(1993)
data("tissue_gene_expression")
ind <- which(tissue_gene_expression$y %in% c("cerebellum", "hippocampus"))
y <- droplevels(tissue_gene_expression$y[ind])
x <- tissue_gene_expression$x[ind, ]
x <- x[, sample(ncol(x), 10)]
#Q1
fit_lda <- train(x, y, method = "lda")
fit_lda$results["Accuracy"]
#Q2
t(fit_lda$finalModel$means) %>% data.frame() %>%
mutate(predictor_name = rownames(.)) %>%
ggplot(aes(cerebellum, hippocampus, label = predictor_name)) +
geom_point() +
geom_text() +
geom_abline()
#Q3
set.seed(1993)
data("tissue_gene_expression")
ind <- which(tissue_gene_expression$y %in% c("cerebellum", "hippocampus"))
y <- droplevels(tissue_gene_expression$y[ind])
x <- tissue_gene_expression$x[ind, ]
x <- x[, sample(ncol(x), 10)]
fit_qda <- train(x, y, method = "qda")
fit_qda$results["Accuracy"]
#Q4
t(fit_qda$finalModel$means) %>% data.frame() %>%
mutate(predictor_name = rownames(.)) %>%
ggplot(aes(cerebellum, hippocampus, label = predictor_name)) +
geom_point() +
geom_text() +
geom_abline()
#Q5
fit_lda <- train(x, y, method = "lda", preProcess = "center")
fit_lda$results["Accuracy"]
t(fit_lda$finalModel$means) %>% data.frame() %>%
mutate(predictor_name = rownames(.)) %>%
ggplot(aes(predictor_name, hippocampus)) +
geom_point() +
coord_flip()
#Q6
set.seed(1993)
data("tissue_gene_expression")
y <- tissue_gene_expression$y
x <- tissue_gene_expression$x
x <- x[, sample(ncol(x), 10)]
fit_lda <- train(x, y, method = "lda", preProcess = c("center"))
fit_lda$results["Accuracy"]
| /gneratives models.R | no_license | thinhle1304/machinelearning | R | false | false | 1,538 | r | set.seed(1993)
data("tissue_gene_expression")
ind <- which(tissue_gene_expression$y %in% c("cerebellum", "hippocampus"))
y <- droplevels(tissue_gene_expression$y[ind])
x <- tissue_gene_expression$x[ind, ]
x <- x[, sample(ncol(x), 10)]
#Q1
fit_lda <- train(x, y, method = "lda")
fit_lda$results["Accuracy"]
#Q2
t(fit_lda$finalModel$means) %>% data.frame() %>%
mutate(predictor_name = rownames(.)) %>%
ggplot(aes(cerebellum, hippocampus, label = predictor_name)) +
geom_point() +
geom_text() +
geom_abline()
#Q3
set.seed(1993)
data("tissue_gene_expression")
ind <- which(tissue_gene_expression$y %in% c("cerebellum", "hippocampus"))
y <- droplevels(tissue_gene_expression$y[ind])
x <- tissue_gene_expression$x[ind, ]
x <- x[, sample(ncol(x), 10)]
fit_qda <- train(x, y, method = "qda")
fit_qda$results["Accuracy"]
#Q4
t(fit_qda$finalModel$means) %>% data.frame() %>%
mutate(predictor_name = rownames(.)) %>%
ggplot(aes(cerebellum, hippocampus, label = predictor_name)) +
geom_point() +
geom_text() +
geom_abline()
#Q5
fit_lda <- train(x, y, method = "lda", preProcess = "center")
fit_lda$results["Accuracy"]
t(fit_lda$finalModel$means) %>% data.frame() %>%
mutate(predictor_name = rownames(.)) %>%
ggplot(aes(predictor_name, hippocampus)) +
geom_point() +
coord_flip()
#Q6
set.seed(1993)
data("tissue_gene_expression")
y <- tissue_gene_expression$y
x <- tissue_gene_expression$x
x <- x[, sample(ncol(x), 10)]
fit_lda <- train(x, y, method = "lda", preProcess = c("center"))
fit_lda$results["Accuracy"]
|
library(utiml)
### Name: predict.BASELINEmodel
### Title: Predict Method for BASELINE
### Aliases: predict.BASELINEmodel
### ** Examples
model <- baseline(toyml)
pred <- predict(model, toyml)
| /data/genthat_extracted_code/utiml/examples/predict.BASELINEmodel.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 199 | r | library(utiml)
### Name: predict.BASELINEmodel
### Title: Predict Method for BASELINE
### Aliases: predict.BASELINEmodel
### ** Examples
model <- baseline(toyml)
pred <- predict(model, toyml)
|
# Modified/corrected/commented by Vijay Rajagopal
# - added split in radial versus axial distance of RyR cluster to Z-disc
# - added termination criteria
# - corrected and validated code
# Corrected by Cameron Walker
# - sped up (looping removed where possible)
# - torus metric edge correction implemented
# - quantiles added to Energy
# - iterative updating of metrics implemented
# Original ideas developed in collaboration with Evan Blumgart 02/02/11 (refer to Masters thesis from University of Auckland, 2011)
# This code implements the Reconstruction Algorithm using the mean and quantiles of the axial and radial distances of RyR clusters from the z-discs and the nearest neighbour distances of the RyR clusters as modelling metrics.
# FUTURE FEATURE: Can also using distance function and nearest neighbour distance variances as metrics
#The code first reads in the RyR and z-disk data from an experiment and calculates the nearest neighbour and distance functions.
#This is then set up as the target statistic that the reconstruction algorithm must recreate on a new z-disk dataset from a different experiement
#with no RyRs on it. It basically assumes that the RyR characteristics of the first experimental data is typical of the distribution in these cells. This assumption was validated in recent paper (submitted DATE)
#################
#options(warn=2) - uncomment this for debugging purposes
##################
###################################################
#CHANGE THIS TO POINT TO WHERE YOUR local machine ryr-simulator github source directory is.
###################################################
#setwd("/Users/xxxxxx/src/ryr-simulator/source")
source("settings.R")
path=getwd()
source(paste(path,"/nnd-calculators.R",sep="")) #additional functions for calculating nearest-neighborhood distances.
# Additional paths for input and output files. Master is the cell from which statistics are extracted. Target is the cell onto which RyR clusters will be simulated
#path2="/../input-files/master-cell/"
#path3="/../output-files/target-cell/"
#path4="/../input-files/target-cell/"
# read in the coordinates of all the observed RyR's inside sampling box of the experimental data (master cell) - stored in a file X.txt (read in micron and pixel versions)
X=read.csv(paste(path2,"X_micron.txt",sep=""),header=T)
X_pix=read.csv(paste(path2,"X_pixel.txt",sep=""),header=T)
# read in whole RyR data cloud of the experimental data stored in a file allX.txt
allX=read.csv(paste(path2,"allX_micron.txt",sep=""),header=T)
allX_pix=read.csv(paste(path2,"allX_pixel.txt",sep=""),header=T)
# read in non-myofibril voxels of the experimental data stored in a file W.txt
W=read.csv(paste(path2,"W_micron.txt",sep=""),header=T)
w=read.csv(paste(path2,"W_pixel.txt",sep=""),header=T)
# read in distance function of voxels in W for the experimental data. stored in a file d.txt
drad=read.csv(paste(path2,"d_radial_micron.txt",sep=""),header=T) #Radial distance from given voxel (W) to z-disk
daxi=read.csv(paste(path2,"d_axial_micron.txt",sep=""),header=T) #Axial distance from given voxel (W) to z-disk
# define box boundaries of the experimental data that we are using to calculate nearest neighbour and distance function statistics.
#note that directions x1, y1 and z1 have different meanings in different image processing/stats processing codes. So, when reading a file into this code
#be aware what coordinate system was used in the code that generated that image and the coordinate system used in this code.
l=apply(W,2,min)
u=apply(W,2,max)
vol_obsBox <- prod(u-l)
#Would like to use allX and X, but do not have any of the distance information
# for allX, so will treat X as allX and take a smaller block within X as X
u_block = 0.9*(u-(u-l)/2)+(u-l)/2
l_block = 0.9*(l-(u-l)/2)+(u-l)/2
block = apply( X,1,function(z){all((l_block<=z)&(z<=u_block))} )
X_block = X[block,]
allX = X
allX_pix=X_pix
X=X_block
# define voxel resolution
resx = 0.0732157
resy = 0.0732157
resz = 0.053535
res <-c(resx,resy,resz)
#set up look up table of axial and radial distances from each available voxel for RyR cluster simulation to z-disc
Drad = array(dim=c(max(w[,1]),max(w[,2]),max(w[,3])))
Drad[as.matrix(w)]<-drad$d
Daxi = array(dim=c(max(w[,1]),max(w[,2]),max(w[,3])))
Daxi[as.matrix(w)]<-daxi$d
#number of measures to compare
numMeasures = 9
#number of simulation patterns to generate.
numPatterns = 1
# compute the observed measures for distance (radial, axial and nearest-neighborhood)
obsdrad <- Drad[as.matrix(allX_pix)]
oDistRadMeasure=numeric(numMeasures)
oDistRadMeasure[1] <- mean(obsdrad)
oDistRadMeasure[2] <- sd(obsdrad)
oDistRadMeasure[3:9] <- quantile(obsdrad,seq(0.125,0.875,length=7))
#axial distances
obsdaxi <- Daxi[as.matrix(allX_pix)]
oDistAxiMeasure=numeric(numMeasures)
oDistAxiMeasure[1] <- mean(obsdaxi)
oDistAxiMeasure[2] <- sd(obsdaxi)
oDistAxiMeasure[3:9] <- quantile(obsdaxi,seq(0.125,0.875,length=7))
####replaced X with allX - July 22nd 2012
####introduced allX_pix instead of using res - Oct 23 2012
# compute mean of observed measures for nearest neigbour distance
#obsNNd = findObsNNDist_CGW(X,allX,l,u) # correct for this data - can't simulate all X as W is too small
#nearest neighborhood distances.
obsNNd = findObsNNDist_CGW(allX,allX,l,u)
oNNdMeasure=numeric(numMeasures)
oNNdMeasure[1] <- mean(obsNNd)
oNNdMeasure[2] <- sd(obsNNd)
oNNdMeasure[3:9] <- quantile(obsNNd,seq(0.125,0.875,length=7))
#set up histogram parameters
filename="master_cell"
main = "Master Cell"
#breaks in distance for each distance type
nndbreaks = c(0,0.2,0.4,0.6,0.8,1.2)
radbreaks = c(0,0.2,0.4,0.6,0.8)
# Start PNG device driver to save output to figure.png
png(filename=paste(path3,filename,"_obsdrad.png",sep=""), height=295, width=300,
bg="white")
hist(obsdrad,breaks=radbreaks,xlab="Radial Distance of RyR cluster from Z-disc",main=main)
dev.off()
png(filename=paste(path3,filename,"_obsdaxi.png",sep=""), height=295, width=300,
bg="white")
hist(obsdaxi,breaks="Scott",xlab="Axial Distance of RyR cluster from Z-disc",main=main)
dev.off()
png(filename=paste(path3,filename,"_obsnnd.png",sep=""), height=295, width=300,
bg="white")
hist(obsNNd,breaks=nndbreaks,xlab="Nearest Neighbour Distances for RyR clusters",main=main)
dev.off()
oldVol_obsBox = vol_obsBox
#####introduce intensity factor - October 11 2012
factor = 1 #no change of intensity
# factor = 0.7 #70% intensity
###FOLOWING USED IF CHANGING CELL - commented out here
##read in info for vijay's cell
##w = read.table(paste(path3,"Cell10_available_lowres_myo_mito_stack_correct_2012.txt",sep=""),header=T)
W=read.csv(paste(path4,"W_micron.txt",sep=""),header=T)
w = read.csv(paste(path4,"W_pixel.txt",sep=""),header=T)
#W = (w - 1)*res
##d = read.table(paste(path3,"Cell10_dFunc_avs_lowres_myo_mito_stack_correct_2012.txt",sep=""),header=T)
drad = read.csv(paste(path4,"d_radial_micron.txt",sep=""),header=T)
Drad = array(dim=c(max(w[,1]),max(w[,2]),max(w[,3])))
#D = array(dim=u/res+1)
Drad[as.matrix(w)]<-abs(drad$d)
#
daxi = read.csv(paste(path4,"d_axial_micron.txt",sep=""),header=T)
Daxi = array(dim=c(max(w[,1]),max(w[,2]),max(w[,3])))
#D = array(dim=u/res+1)
Daxi[as.matrix(w)]<-abs(daxi$d)
#
l=apply(W,2,min)
u=apply(W,2,max)
vol_obsBox <- prod(u-l)
u_block = 0.9*(u-(u-l)/2)+(u-l)/2
l_block = 0.9*(l-(u-l)/2)+(u-l)/2
#u_block = 0.9*(u-l)
#l_block = 0.1*(u-l)
#t1 <- proc.time() #timer
#N=floor((length(allX$x)/oldVol_obsBox)*vol_obsBox*factor)
sim_convgdE = numeric(numPatterns)
for (j in 1:numPatterns) {
# define initial simulated point pattern and data structures
simX=matrix(0,nrow=N,ncol=3)
ptsIndex=sample(1:length(W$x),N)
simX=as.matrix(W[ptsIndex,])
avail=(1:length(W$x))[-ptsIndex]
simdrad=Drad[as.matrix(w[ptsIndex,])]
simDistRadMeasure=numeric(numMeasures)
simDistRadMeasure[1] <- mean(simdrad)
simDistRadMeasure[2] <- sd(simdrad)
simDistRadMeasure[3:9] <- quantile(simdrad,seq(0.125,0.875,length=7))
simdaxi=Daxi[as.matrix(w[ptsIndex,])]
simDistAxiMeasure=numeric(numMeasures)
simDistAxiMeasure[1] <- mean(simdaxi)
simDistAxiMeasure[2] <- sd(simdaxi)
simDistAxiMeasure[3:9] <- quantile(simdaxi,seq(0.125,0.875,length=7))
simNNd = findObsNNDist_CGW(simX,simX,l,u)
indSimNNd= findWhichObsNNDist_CGW(simX,simX,l,u)
simNNdMeasure=numeric(numMeasures)
simNNdMeasure[1] <- mean(simNNd)
simNNdMeasure[2] <- sd(simNNd)
simNNdMeasure[3:9] <- quantile(simNNd,seq(0.125,0.875,length=7))
E <- sum((c(oDistRadMeasure[1:numMeasures],oDistAxiMeasure[1:numMeasures],oNNdMeasure[1:numMeasures])-c(simDistRadMeasure[1:numMeasures],simDistAxiMeasure[1:numMeasures],simNNdMeasure[1:numMeasures]))^2)
propE<-E;
cat(propE)
propSimDistRadMeasure=numeric(numMeasures)
propSimDistAxiMeasure=numeric(numMeasures)
propSimNNdMeasure=numeric(numMeasures)
i=0;
numIter = 200000
etol = 0.001
while((i<=numIter)&&(propE>etol) ) {
# while((propE>0.00005) ) {
i=i+1;
if (i%%100 == 0) {
cat(i,E,"\n")
}
draw1=sample(1:length(avail),1) #draw from sample of available point indices
draw2=sample(1:length(ptsIndex),1) #draw from index of ryr points currently estimated
propSimX = simX #set up proposed sim array strucure
propIndSimNNd = indSimNNd #indices of nearest neighbors
propSimX[draw2,]= as.matrix(W[avail[draw1],]) #put in coordinates of randomly chosen new point into proposed sim
propSimNNd = simNNd
#which points had removed point as nearest
gone=which(sapply(indSimNNd,function(z){match(draw2,z)})>0)
#find distance from each remaining point to new point
ndt = findObsNNDist_CGW(as.matrix(propSimX[-draw2,]),t(as.matrix(propSimX[draw2,])),l,u) #find nearest neighbor distances between prop sim x's points to new point draw2
#if new point is nearer than nearest, update
propSimNNd[-draw2] = apply(cbind(propSimNNd[-draw2],ndt),1,min)
propIndSimNNd[-draw2][which(propSimNNd[-draw2]==ndt)]=draw2
#store distance of nearest point to new point
propSimNNd[draw2] = min(ndt)
propIndSimNNd[draw2] = which.min(ndt)
#recalculate nearest point for any pts which had removed point as nearest
if (length(gone)>0) {
for (k in 1:length(gone)) {
#cat("test1",propSimNNd[gone[k]],"\n")
propSimNNd[gone[k]] = findObsNNDist_CGW(t(as.matrix(propSimX[gone[k],])),as.matrix(propSimX[-gone[k],]),l,u)
#cat("test2",propSimNNd[gone[k]],"\n")
propIndSimNNd[gone[k]] = findWhichObsNNDist_CGW(t(as.matrix(propSimX[gone[k],])),as.matrix(propSimX[-gone[k],]),l,u)
#cat("test3",propIndSimNNd[gone[k]],"\n")
}
}
propSimdrad = simdrad
propSimdrad[draw2] = Drad[as.matrix(w[avail[draw1],])]
propSimDistRadMeasure[1] <- mean(propSimdrad)
propSimDistRadMeasure[2] <- sd(propSimdrad)
propSimDistRadMeasure[3:9] <- quantile(propSimdrad,seq(0.125,0.875,length=7))
propSimdaxi = simdaxi
propSimdaxi[draw2] = Daxi[as.matrix(w[avail[draw1],])]
propSimDistAxiMeasure[1] <- mean(propSimdaxi)
propSimDistAxiMeasure[2] <- sd(propSimdaxi)
propSimDistAxiMeasure[3:9] <- quantile(propSimdaxi,seq(0.125,0.875,length=7))
propSimNNdMeasure[1] <- mean(propSimNNd)
propSimNNdMeasure[2] <- sd(propSimNNd)
propSimNNdMeasure[3:9] <- quantile(propSimNNd,seq(0.125,0.875,length=7))
propE = sum((c(oDistRadMeasure[1:numMeasures],oDistAxiMeasure[1:numMeasures],oNNdMeasure[1:numMeasures])-c(propSimDistRadMeasure[1:numMeasures],propSimDistAxiMeasure[1:numMeasures],propSimNNdMeasure[1:numMeasures]))^2)
if (propE < E) { # no probability of non-acceptance
cat(propE,"\n")
E <- propE
simDistRadMeasure <- propSimDistRadMeasure # this is the new accepted simulated distance function mean
simDistAxiMeasure <- propSimDistAxiMeasure # this is the new accepted simulated distance function mean
simNNdMeasure <- propSimNNdMeasure # this is the new accepted simulated mean distances mean
simX <- propSimX # this is the new accepted simulated point pattern
temp = ptsIndex[draw2]
ptsIndex[draw2] = avail[draw1]
avail[draw1] = temp
simdrad = propSimdrad
simdaxi = propSimdaxi
simNNd = propSimNNd
indSimNNd = propIndSimNNd
}
}
if(1){
sim_convgdE[j] <- E
write(t(simX),file=paste(path3,"simPP",j,".txt",sep=""),ncolumns=3,sep='\t')
}
else j=j-1
}
#t2 <- proc.time()
#print(t2-t1)
#write out the list of final E values for the each of the converged patterns
write(sim_convgdE,file=paste(path3,"sim_convgdE",".txt",sep=""),ncolumns=1,sep='\t')
for (j in 1:numPatterns) {
P=read.table(paste(path3,"simPP",j,".txt",sep=""),header=F)
block = apply( P,1,function(z){all((l_block<=z)&(z<=u_block))} )
P_block = P[block,]
write(t(P_block),file=paste(path3,"simPP_block",j,".txt",sep=""),ncolumns=3,sep='\t')
}
########plot a dynamic, interactive window to visualise the first RyR simulation
P=read.table(paste(path3,"simPP",1,".txt",sep=""),header=F)
library(rgl)
r3dDefaults$windowRect <- c(0,50, 500, 500)
plot3d(P)
library(tcltk)
tkmessageBox(title="",message="Rotate the plot by dragging with the mouse. Click OK to close",type="ok")
| /source/ryr-simulator.R | no_license | QILINY/RyR-simulator | R | false | false | 13,741 | r | # Modified/corrected/commented by Vijay Rajagopal
# - added split in radial versus axial distance of RyR cluster to Z-disc
# - added termination criteria
# - corrected and validated code
# Corrected by Cameron Walker
# - sped up (looping removed where possible)
# - torus metric edge correction implemented
# - quantiles added to Energy
# - iterative updating of metrics implemented
# Original ideas developed in collaboration with Evan Blumgart 02/02/11 (refer to Masters thesis from University of Auckland, 2011)
# This code implements the Reconstruction Algorithm using the mean and quantiles of the axial and radial distances of RyR clusters from the z-discs and the nearest neighbour distances of the RyR clusters as modelling metrics.
# FUTURE FEATURE: Can also using distance function and nearest neighbour distance variances as metrics
#The code first reads in the RyR and z-disk data from an experiment and calculates the nearest neighbour and distance functions.
#This is then set up as the target statistic that the reconstruction algorithm must recreate on a new z-disk dataset from a different experiement
#with no RyRs on it. It basically assumes that the RyR characteristics of the first experimental data is typical of the distribution in these cells. This assumption was validated in recent paper (submitted DATE)
#################
#options(warn=2) - uncomment this for debugging purposes
##################
###################################################
#CHANGE THIS TO POINT TO WHERE YOUR local machine ryr-simulator github source directory is.
###################################################
#setwd("/Users/xxxxxx/src/ryr-simulator/source")
source("settings.R")
path=getwd()
source(paste(path,"/nnd-calculators.R",sep="")) #additional functions for calculating nearest-neighborhood distances.
# Additional paths for input and output files. Master is the cell from which statistics are extracted. Target is the cell onto which RyR clusters will be simulated
#path2="/../input-files/master-cell/"
#path3="/../output-files/target-cell/"
#path4="/../input-files/target-cell/"
# read in the coordinates of all the observed RyR's inside sampling box of the experimental data (master cell) - stored in a file X.txt (read in micron and pixel versions)
X=read.csv(paste(path2,"X_micron.txt",sep=""),header=T)
X_pix=read.csv(paste(path2,"X_pixel.txt",sep=""),header=T)
# read in whole RyR data cloud of the experimental data stored in a file allX.txt
allX=read.csv(paste(path2,"allX_micron.txt",sep=""),header=T)
allX_pix=read.csv(paste(path2,"allX_pixel.txt",sep=""),header=T)
# read in non-myofibril voxels of the experimental data stored in a file W.txt
W=read.csv(paste(path2,"W_micron.txt",sep=""),header=T)
w=read.csv(paste(path2,"W_pixel.txt",sep=""),header=T)
# read in distance function of voxels in W for the experimental data. stored in a file d.txt
drad=read.csv(paste(path2,"d_radial_micron.txt",sep=""),header=T) #Radial distance from given voxel (W) to z-disk
daxi=read.csv(paste(path2,"d_axial_micron.txt",sep=""),header=T) #Axial distance from given voxel (W) to z-disk
# define box boundaries of the experimental data that we are using to calculate nearest neighbour and distance function statistics.
#note that directions x1, y1 and z1 have different meanings in different image processing/stats processing codes. So, when reading a file into this code
#be aware what coordinate system was used in the code that generated that image and the coordinate system used in this code.
l=apply(W,2,min)
u=apply(W,2,max)
vol_obsBox <- prod(u-l)
#Would like to use allX and X, but do not have any of the distance information
# for allX, so will treat X as allX and take a smaller block within X as X
u_block = 0.9*(u-(u-l)/2)+(u-l)/2
l_block = 0.9*(l-(u-l)/2)+(u-l)/2
block = apply( X,1,function(z){all((l_block<=z)&(z<=u_block))} )
X_block = X[block,]
allX = X
allX_pix=X_pix
X=X_block
# define voxel resolution
resx = 0.0732157
resy = 0.0732157
resz = 0.053535
res <-c(resx,resy,resz)
#set up look up table of axial and radial distances from each available voxel for RyR cluster simulation to z-disc
Drad = array(dim=c(max(w[,1]),max(w[,2]),max(w[,3])))
Drad[as.matrix(w)]<-drad$d
Daxi = array(dim=c(max(w[,1]),max(w[,2]),max(w[,3])))
Daxi[as.matrix(w)]<-daxi$d
#number of measures to compare
numMeasures = 9
#number of simulation patterns to generate.
numPatterns = 1
# compute the observed measures for distance (radial, axial and nearest-neighborhood)
obsdrad <- Drad[as.matrix(allX_pix)]
oDistRadMeasure=numeric(numMeasures)
oDistRadMeasure[1] <- mean(obsdrad)
oDistRadMeasure[2] <- sd(obsdrad)
oDistRadMeasure[3:9] <- quantile(obsdrad,seq(0.125,0.875,length=7))
#axial distances
obsdaxi <- Daxi[as.matrix(allX_pix)]
oDistAxiMeasure=numeric(numMeasures)
oDistAxiMeasure[1] <- mean(obsdaxi)
oDistAxiMeasure[2] <- sd(obsdaxi)
oDistAxiMeasure[3:9] <- quantile(obsdaxi,seq(0.125,0.875,length=7))
####replaced X with allX - July 22nd 2012
####introduced allX_pix instead of using res - Oct 23 2012
# compute mean of observed measures for nearest neigbour distance
#obsNNd = findObsNNDist_CGW(X,allX,l,u) # correct for this data - can't simulate all X as W is too small
#nearest neighborhood distances.
obsNNd = findObsNNDist_CGW(allX,allX,l,u)
oNNdMeasure=numeric(numMeasures)
oNNdMeasure[1] <- mean(obsNNd)
oNNdMeasure[2] <- sd(obsNNd)
oNNdMeasure[3:9] <- quantile(obsNNd,seq(0.125,0.875,length=7))
#set up histogram parameters
filename="master_cell"
main = "Master Cell"
#breaks in distance for each distance type
nndbreaks = c(0,0.2,0.4,0.6,0.8,1.2)
radbreaks = c(0,0.2,0.4,0.6,0.8)
# Start PNG device driver to save output to figure.png
png(filename=paste(path3,filename,"_obsdrad.png",sep=""), height=295, width=300,
bg="white")
hist(obsdrad,breaks=radbreaks,xlab="Radial Distance of RyR cluster from Z-disc",main=main)
dev.off()
png(filename=paste(path3,filename,"_obsdaxi.png",sep=""), height=295, width=300,
bg="white")
hist(obsdaxi,breaks="Scott",xlab="Axial Distance of RyR cluster from Z-disc",main=main)
dev.off()
png(filename=paste(path3,filename,"_obsnnd.png",sep=""), height=295, width=300,
bg="white")
hist(obsNNd,breaks=nndbreaks,xlab="Nearest Neighbour Distances for RyR clusters",main=main)
dev.off()
oldVol_obsBox = vol_obsBox
#####introduce intensity factor - October 11 2012
factor = 1 #no change of intensity
# factor = 0.7 #70% intensity
###FOLOWING USED IF CHANGING CELL - commented out here
##read in info for vijay's cell
##w = read.table(paste(path3,"Cell10_available_lowres_myo_mito_stack_correct_2012.txt",sep=""),header=T)
W=read.csv(paste(path4,"W_micron.txt",sep=""),header=T)
w = read.csv(paste(path4,"W_pixel.txt",sep=""),header=T)
#W = (w - 1)*res
##d = read.table(paste(path3,"Cell10_dFunc_avs_lowres_myo_mito_stack_correct_2012.txt",sep=""),header=T)
drad = read.csv(paste(path4,"d_radial_micron.txt",sep=""),header=T)
Drad = array(dim=c(max(w[,1]),max(w[,2]),max(w[,3])))
#D = array(dim=u/res+1)
Drad[as.matrix(w)]<-abs(drad$d)
#
daxi = read.csv(paste(path4,"d_axial_micron.txt",sep=""),header=T)
Daxi = array(dim=c(max(w[,1]),max(w[,2]),max(w[,3])))
#D = array(dim=u/res+1)
Daxi[as.matrix(w)]<-abs(daxi$d)
#
l=apply(W,2,min)
u=apply(W,2,max)
vol_obsBox <- prod(u-l)
u_block = 0.9*(u-(u-l)/2)+(u-l)/2
l_block = 0.9*(l-(u-l)/2)+(u-l)/2
#u_block = 0.9*(u-l)
#l_block = 0.1*(u-l)
#t1 <- proc.time() #timer
#N=floor((length(allX$x)/oldVol_obsBox)*vol_obsBox*factor)
sim_convgdE = numeric(numPatterns)
for (j in 1:numPatterns) {
# define initial simulated point pattern and data structures
simX=matrix(0,nrow=N,ncol=3)
ptsIndex=sample(1:length(W$x),N)
simX=as.matrix(W[ptsIndex,])
avail=(1:length(W$x))[-ptsIndex]
simdrad=Drad[as.matrix(w[ptsIndex,])]
simDistRadMeasure=numeric(numMeasures)
simDistRadMeasure[1] <- mean(simdrad)
simDistRadMeasure[2] <- sd(simdrad)
simDistRadMeasure[3:9] <- quantile(simdrad,seq(0.125,0.875,length=7))
simdaxi=Daxi[as.matrix(w[ptsIndex,])]
simDistAxiMeasure=numeric(numMeasures)
simDistAxiMeasure[1] <- mean(simdaxi)
simDistAxiMeasure[2] <- sd(simdaxi)
simDistAxiMeasure[3:9] <- quantile(simdaxi,seq(0.125,0.875,length=7))
simNNd = findObsNNDist_CGW(simX,simX,l,u)
indSimNNd= findWhichObsNNDist_CGW(simX,simX,l,u)
simNNdMeasure=numeric(numMeasures)
simNNdMeasure[1] <- mean(simNNd)
simNNdMeasure[2] <- sd(simNNd)
simNNdMeasure[3:9] <- quantile(simNNd,seq(0.125,0.875,length=7))
E <- sum((c(oDistRadMeasure[1:numMeasures],oDistAxiMeasure[1:numMeasures],oNNdMeasure[1:numMeasures])-c(simDistRadMeasure[1:numMeasures],simDistAxiMeasure[1:numMeasures],simNNdMeasure[1:numMeasures]))^2)
propE<-E;
cat(propE)
propSimDistRadMeasure=numeric(numMeasures)
propSimDistAxiMeasure=numeric(numMeasures)
propSimNNdMeasure=numeric(numMeasures)
i=0;
numIter = 200000
etol = 0.001
while((i<=numIter)&&(propE>etol) ) {
# while((propE>0.00005) ) {
i=i+1;
if (i%%100 == 0) {
cat(i,E,"\n")
}
draw1=sample(1:length(avail),1) #draw from sample of available point indices
draw2=sample(1:length(ptsIndex),1) #draw from index of ryr points currently estimated
propSimX = simX #set up proposed sim array strucure
propIndSimNNd = indSimNNd #indices of nearest neighbors
propSimX[draw2,]= as.matrix(W[avail[draw1],]) #put in coordinates of randomly chosen new point into proposed sim
propSimNNd = simNNd
#which points had removed point as nearest
gone=which(sapply(indSimNNd,function(z){match(draw2,z)})>0)
#find distance from each remaining point to new point
ndt = findObsNNDist_CGW(as.matrix(propSimX[-draw2,]),t(as.matrix(propSimX[draw2,])),l,u) #find nearest neighbor distances between prop sim x's points to new point draw2
#if new point is nearer than nearest, update
propSimNNd[-draw2] = apply(cbind(propSimNNd[-draw2],ndt),1,min)
propIndSimNNd[-draw2][which(propSimNNd[-draw2]==ndt)]=draw2
#store distance of nearest point to new point
propSimNNd[draw2] = min(ndt)
propIndSimNNd[draw2] = which.min(ndt)
#recalculate nearest point for any pts which had removed point as nearest
if (length(gone)>0) {
for (k in 1:length(gone)) {
#cat("test1",propSimNNd[gone[k]],"\n")
propSimNNd[gone[k]] = findObsNNDist_CGW(t(as.matrix(propSimX[gone[k],])),as.matrix(propSimX[-gone[k],]),l,u)
#cat("test2",propSimNNd[gone[k]],"\n")
propIndSimNNd[gone[k]] = findWhichObsNNDist_CGW(t(as.matrix(propSimX[gone[k],])),as.matrix(propSimX[-gone[k],]),l,u)
#cat("test3",propIndSimNNd[gone[k]],"\n")
}
}
propSimdrad = simdrad
propSimdrad[draw2] = Drad[as.matrix(w[avail[draw1],])]
propSimDistRadMeasure[1] <- mean(propSimdrad)
propSimDistRadMeasure[2] <- sd(propSimdrad)
propSimDistRadMeasure[3:9] <- quantile(propSimdrad,seq(0.125,0.875,length=7))
propSimdaxi = simdaxi
propSimdaxi[draw2] = Daxi[as.matrix(w[avail[draw1],])]
propSimDistAxiMeasure[1] <- mean(propSimdaxi)
propSimDistAxiMeasure[2] <- sd(propSimdaxi)
propSimDistAxiMeasure[3:9] <- quantile(propSimdaxi,seq(0.125,0.875,length=7))
propSimNNdMeasure[1] <- mean(propSimNNd)
propSimNNdMeasure[2] <- sd(propSimNNd)
propSimNNdMeasure[3:9] <- quantile(propSimNNd,seq(0.125,0.875,length=7))
propE = sum((c(oDistRadMeasure[1:numMeasures],oDistAxiMeasure[1:numMeasures],oNNdMeasure[1:numMeasures])-c(propSimDistRadMeasure[1:numMeasures],propSimDistAxiMeasure[1:numMeasures],propSimNNdMeasure[1:numMeasures]))^2)
if (propE < E) { # no probability of non-acceptance
cat(propE,"\n")
E <- propE
simDistRadMeasure <- propSimDistRadMeasure # this is the new accepted simulated distance function mean
simDistAxiMeasure <- propSimDistAxiMeasure # this is the new accepted simulated distance function mean
simNNdMeasure <- propSimNNdMeasure # this is the new accepted simulated mean distances mean
simX <- propSimX # this is the new accepted simulated point pattern
temp = ptsIndex[draw2]
ptsIndex[draw2] = avail[draw1]
avail[draw1] = temp
simdrad = propSimdrad
simdaxi = propSimdaxi
simNNd = propSimNNd
indSimNNd = propIndSimNNd
}
}
if(1){
sim_convgdE[j] <- E
write(t(simX),file=paste(path3,"simPP",j,".txt",sep=""),ncolumns=3,sep='\t')
}
else j=j-1
}
#t2 <- proc.time()
#print(t2-t1)
#write out the list of final E values for the each of the converged patterns
write(sim_convgdE,file=paste(path3,"sim_convgdE",".txt",sep=""),ncolumns=1,sep='\t')
for (j in 1:numPatterns) {
P=read.table(paste(path3,"simPP",j,".txt",sep=""),header=F)
block = apply( P,1,function(z){all((l_block<=z)&(z<=u_block))} )
P_block = P[block,]
write(t(P_block),file=paste(path3,"simPP_block",j,".txt",sep=""),ncolumns=3,sep='\t')
}
########plot a dynamic, interactive window to visualise the first RyR simulation
P=read.table(paste(path3,"simPP",1,".txt",sep=""),header=F)
library(rgl)
r3dDefaults$windowRect <- c(0,50, 500, 500)
plot3d(P)
library(tcltk)
tkmessageBox(title="",message="Rotate the plot by dragging with the mouse. Click OK to close",type="ok")
|
# 'B matrice p+1 x p
MakeH <- function(X = X, Z = Z, B = B, Sigma = Sigma) {
X = cbind(1, X)
I2 = which(colSums(Z) != 0)
Z = rbind(0, Z)
Z[1, I2] = 1 # on ajoute une constante a chaque ssreg
Z = cbind(0, Z)
I2 = I2 + 1
p2 = length(I2)
pz = sum(Z != 0)
p1 = ncol(X) - p2 # prendre donc en compte la constante
n = nrow(X)
H = matrix(0, ncol = (p1 + p2 + pz), nrow = (p1 + p2 + pz))
barZ = which(Z != 0, arr.ind = TRUE)
for (j in 1:p2) {
I1j = barZ[barZ[, 2] == I2[j], 1]
H[j, j] = -n / (Sigma[j]^2) + (1 / (Sigma[j]^4)) * t(X[, I2[j]] - X[, I1j] %*% B[I1j, I2[j]]) %*% (X[, I2[j]] - X[, I1j] %*% B[I1j, I2[j]])
debcolj = nrow(barZ[barZ[, 2] < I2[j], ])
colonne = (debcolj + 1):(debcolj + sum(Z[, I2[j]])) # sous-reg precedentes+
colonne = colonne + p2 # on decale du bloc H1
H[j, colonne] = (-2 / Sigma[j]^3) * t(X[, I1j]) %*% (X[, I2[j]] - X[, I1j] %*% B[I1j, I2[j]])
H[colonne, j] = H[j, colonne]
H[colonne, colonne] = (-1 / (Sigma[j]^2)) * t(X[, I1j]) %*% X[, I1j]
}
# require(matrixcalc)
# message(is.negative.definite(H))
# message(is.positive.definite(H))
return(H)
}
| /R/MakeH.R | no_license | cran/CorReg | R | false | false | 1,207 | r | # 'B matrice p+1 x p
MakeH <- function(X = X, Z = Z, B = B, Sigma = Sigma) {
X = cbind(1, X)
I2 = which(colSums(Z) != 0)
Z = rbind(0, Z)
Z[1, I2] = 1 # on ajoute une constante a chaque ssreg
Z = cbind(0, Z)
I2 = I2 + 1
p2 = length(I2)
pz = sum(Z != 0)
p1 = ncol(X) - p2 # prendre donc en compte la constante
n = nrow(X)
H = matrix(0, ncol = (p1 + p2 + pz), nrow = (p1 + p2 + pz))
barZ = which(Z != 0, arr.ind = TRUE)
for (j in 1:p2) {
I1j = barZ[barZ[, 2] == I2[j], 1]
H[j, j] = -n / (Sigma[j]^2) + (1 / (Sigma[j]^4)) * t(X[, I2[j]] - X[, I1j] %*% B[I1j, I2[j]]) %*% (X[, I2[j]] - X[, I1j] %*% B[I1j, I2[j]])
debcolj = nrow(barZ[barZ[, 2] < I2[j], ])
colonne = (debcolj + 1):(debcolj + sum(Z[, I2[j]])) # sous-reg precedentes+
colonne = colonne + p2 # on decale du bloc H1
H[j, colonne] = (-2 / Sigma[j]^3) * t(X[, I1j]) %*% (X[, I2[j]] - X[, I1j] %*% B[I1j, I2[j]])
H[colonne, j] = H[j, colonne]
H[colonne, colonne] = (-1 / (Sigma[j]^2)) * t(X[, I1j]) %*% X[, I1j]
}
# require(matrixcalc)
# message(is.negative.definite(H))
# message(is.positive.definite(H))
return(H)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{lingr_article}
\alias{lingr_article}
\title{Lingr custom format.}
\usage{
lingr_article(...)
}
\arguments{
\item{...}{Arguments passed to \link[rmarkdown]{pdf_document}.}
}
\description{
A custom format for linguistics articles. It provides the standard pandoc
YAML parameter, plus three new parameters: \code{version} for the document's
version, \code{version-date} for the date of the document's version (these parameters must be included as subkeys in \code{params}), and
\code{hide-version} which does not print the version on the document if set
to \code{true}.
}
| /man/lingr_article.Rd | permissive | stefanocoretta/lingr | R | false | true | 665 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{lingr_article}
\alias{lingr_article}
\title{Lingr custom format.}
\usage{
lingr_article(...)
}
\arguments{
\item{...}{Arguments passed to \link[rmarkdown]{pdf_document}.}
}
\description{
A custom format for linguistics articles. It provides the standard pandoc
YAML parameter, plus three new parameters: \code{version} for the document's
version, \code{version-date} for the date of the document's version (these parameters must be included as subkeys in \code{params}), and
\code{hide-version} which does not print the version on the document if set
to \code{true}.
}
|
rodeoConst= list(
# Regular expressions to detect valid identifiers
identifierPatterns= list(
core="[a-zA-Z]([a-zA-Z0-9_])*",
before= "(^|[^a-zA-Z0-9_])",
after= "([^a-zA-Z0-9_]|$)"
),
# Identifiers used in generated code
genIdent= list(
# vectors with actual values of vars, pars, rates, derivatives
vec= c(vars="var", pars="par", pros="pro", drvs="dydt"),
# index vectors for the 0D case (or 1st level, respectively)
ivec0D= c(vars="ivar0D", pars="ipar0D", pros="ipro0D"),
# index vectors for the current level
ivec= c(vars="ivar", pars="ipar", pros="ipro"),
# (pointers to) indices of variables in first level
ilist0D= c(vars="v0D", pars="p0D", pros="r0D"),
# (pointers to) indices of variables in current level
ilist= c(vars="v", pars="p", pros="r"),
# dimensions
len= c(vars="NVAR", pars="NPAR", pros="NPRO"),
# number of levels and index of current level
nLevels="NLVL", iLevel= "level"
),
reservedNames= c(
time="time", # external time
left="left", # reference to left element in a 1D model
right="right" # reference to right element in a 1D model
),
lang= c(r="r", fortran="f95")
) # End of list
| /rodeo/R/internal_constants.r | no_license | ingted/R-Examples | R | false | false | 1,264 | r |
rodeoConst= list(
# Regular expressions to detect valid identifiers
identifierPatterns= list(
core="[a-zA-Z]([a-zA-Z0-9_])*",
before= "(^|[^a-zA-Z0-9_])",
after= "([^a-zA-Z0-9_]|$)"
),
# Identifiers used in generated code
genIdent= list(
# vectors with actual values of vars, pars, rates, derivatives
vec= c(vars="var", pars="par", pros="pro", drvs="dydt"),
# index vectors for the 0D case (or 1st level, respectively)
ivec0D= c(vars="ivar0D", pars="ipar0D", pros="ipro0D"),
# index vectors for the current level
ivec= c(vars="ivar", pars="ipar", pros="ipro"),
# (pointers to) indices of variables in first level
ilist0D= c(vars="v0D", pars="p0D", pros="r0D"),
# (pointers to) indices of variables in current level
ilist= c(vars="v", pars="p", pros="r"),
# dimensions
len= c(vars="NVAR", pars="NPAR", pros="NPRO"),
# number of levels and index of current level
nLevels="NLVL", iLevel= "level"
),
reservedNames= c(
time="time", # external time
left="left", # reference to left element in a 1D model
right="right" # reference to right element in a 1D model
),
lang= c(r="r", fortran="f95")
) # End of list
|
ESPSuit=function(value, crop="wheat"){
#ESP suitability rating for cereals
if(crop=="wheat"){suitclass=ifelse(value>10,4,ifelse(value>5,3,ifelse(value>3,2,1)))}
else if(crop=="maize"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>2,2,1)))}
else if(crop=="rice"){suitclass=ifelse(value>50,4,ifelse(value>40,3,ifelse(value>15,2,1)))}
else if(crop=="sorghum"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="millet"){suitclass=ifelse(value>25,4,ifelse(value>15,3,ifelse(value>10,2,1)))}
else if(crop=="barley"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="oat"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for legumes
else if(crop=="bean"){suitclass=ifelse(value>20,4,ifelse(value>15,3,ifelse(value>10,2,1)))}
else if(crop=="groundnut"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="pea"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="gram"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="soybean"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="lentil"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for forests
else if(crop=="poplar"){suitclass=ifelse(value>20,4,ifelse(value>15,3,ifelse(value>10,2,1)))}
else if(crop=="grevillea"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="sesbania"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="calliandra"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="leucaena"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="acacia"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="eucalyptus"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="teak"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="maple"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="ash"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for tuber crops
else if(crop=="potato"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="sweetpotato"){suitclass=ifelse(value>10,4,ifelse(value>6,3,ifelse(value>1,2,1)))}
else if(crop=="cassava"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="carrot"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="turnip"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="radish"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for oilcrops
else if(crop=="sesame"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="sunflower"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="castor"){suitclass=ifelse(value>20,4,ifelse(value>10,3,ifelse(value>2,2,1)))}
else if(crop=="oilpalm"){suitclass=ifelse(value>20,4,ifelse(value>10,3,ifelse(value>2,2,1)))}
else if(crop=="mustard"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="safflower"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="olive"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="rapeseed"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for fruit crops
else if(crop=="mango"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="citrus"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="grape"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="pomegranate"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="banana"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="pineaple"){suitclass=ifelse(value>25,4,ifelse(value>15,3,ifelse(value>5,2,1)))}
else if(crop=="avocado"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="watermelon"){suitclass=ifelse(value>25,4,ifelse(value>15,3,ifelse(value>8,2,1)))}
else if(crop=="melon"){suitclass=ifelse(value>25,4,ifelse(value>20,3,ifelse(value>15,2,1)))}
else if(crop=="pawpaw"){suitclass=ifelse(value>25,4,ifelse(value>15,3,ifelse(value>8,2,1)))}
#ESP suitability for nut crops
else if(crop=="cashew"){suitclass=ifelse(value>50,4,ifelse(value>20,3,ifelse(value>10,2,1)))}
else if(crop=="pistachio"){suitclass=ifelse(value>30,4,ifelse(value>20,3,ifelse(value>10,2,1)))}
else if(crop=="almond"){suitclass=ifelse(value>30,4,ifelse(value>20,3,ifelse(value>5,2,1)))}
else if(crop=="coconut"){suitclass=ifelse(value>50,4,ifelse(value>20,3,ifelse(value>10,2,1)))}
#ESP suitability rating for vegetable crops
else if(crop=="tomato"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="cabbage"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="vegetable"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="broccoli"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="cauliflower"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="okra"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for industrial crops
else if(crop=="cotton"){suitclass=ifelse(value>20,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="sugarcane"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="tea"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="coffee"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="rubber"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="saffron"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="tobacco"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="pyrethrum"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="jute"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for spices
else if(crop=="chilli"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="pepper"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="ginger"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="lemongrass"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="vanilla"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="tumeric"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="cardamom"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="onion"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for other crops
else if(crop=="alfalfa"){suitclass=ifelse(value>50,4,ifelse(value>20,3,ifelse(value>8,2,1)))}
else if(crop=="rose"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="jasmine"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for fleshy crops
else if(crop=="yam"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="butternut"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="squash"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="pumpkin"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
return(suitclass)
}
| /R/ESPSuit.R | no_license | cran/soilassessment | R | false | false | 8,412 | r | ESPSuit=function(value, crop="wheat"){
#ESP suitability rating for cereals
if(crop=="wheat"){suitclass=ifelse(value>10,4,ifelse(value>5,3,ifelse(value>3,2,1)))}
else if(crop=="maize"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>2,2,1)))}
else if(crop=="rice"){suitclass=ifelse(value>50,4,ifelse(value>40,3,ifelse(value>15,2,1)))}
else if(crop=="sorghum"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="millet"){suitclass=ifelse(value>25,4,ifelse(value>15,3,ifelse(value>10,2,1)))}
else if(crop=="barley"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="oat"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for legumes
else if(crop=="bean"){suitclass=ifelse(value>20,4,ifelse(value>15,3,ifelse(value>10,2,1)))}
else if(crop=="groundnut"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="pea"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="gram"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="soybean"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="lentil"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for forests
else if(crop=="poplar"){suitclass=ifelse(value>20,4,ifelse(value>15,3,ifelse(value>10,2,1)))}
else if(crop=="grevillea"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="sesbania"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="calliandra"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="leucaena"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="acacia"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="eucalyptus"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="teak"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="maple"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="ash"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for tuber crops
else if(crop=="potato"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="sweetpotato"){suitclass=ifelse(value>10,4,ifelse(value>6,3,ifelse(value>1,2,1)))}
else if(crop=="cassava"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="carrot"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="turnip"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="radish"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for oilcrops
else if(crop=="sesame"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="sunflower"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="castor"){suitclass=ifelse(value>20,4,ifelse(value>10,3,ifelse(value>2,2,1)))}
else if(crop=="oilpalm"){suitclass=ifelse(value>20,4,ifelse(value>10,3,ifelse(value>2,2,1)))}
else if(crop=="mustard"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="safflower"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="olive"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="rapeseed"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for fruit crops
else if(crop=="mango"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="citrus"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="grape"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="pomegranate"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="banana"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="pineaple"){suitclass=ifelse(value>25,4,ifelse(value>15,3,ifelse(value>5,2,1)))}
else if(crop=="avocado"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="watermelon"){suitclass=ifelse(value>25,4,ifelse(value>15,3,ifelse(value>8,2,1)))}
else if(crop=="melon"){suitclass=ifelse(value>25,4,ifelse(value>20,3,ifelse(value>15,2,1)))}
else if(crop=="pawpaw"){suitclass=ifelse(value>25,4,ifelse(value>15,3,ifelse(value>8,2,1)))}
#ESP suitability for nut crops
else if(crop=="cashew"){suitclass=ifelse(value>50,4,ifelse(value>20,3,ifelse(value>10,2,1)))}
else if(crop=="pistachio"){suitclass=ifelse(value>30,4,ifelse(value>20,3,ifelse(value>10,2,1)))}
else if(crop=="almond"){suitclass=ifelse(value>30,4,ifelse(value>20,3,ifelse(value>5,2,1)))}
else if(crop=="coconut"){suitclass=ifelse(value>50,4,ifelse(value>20,3,ifelse(value>10,2,1)))}
#ESP suitability rating for vegetable crops
else if(crop=="tomato"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="cabbage"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="vegetable"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="broccoli"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="cauliflower"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="okra"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for industrial crops
else if(crop=="cotton"){suitclass=ifelse(value>20,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="sugarcane"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="tea"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="coffee"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="rubber"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="saffron"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="tobacco"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="pyrethrum"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="jute"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for spices
else if(crop=="chilli"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="pepper"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="ginger"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="lemongrass"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="vanilla"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="tumeric"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="cardamom"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="onion"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for other crops
else if(crop=="alfalfa"){suitclass=ifelse(value>50,4,ifelse(value>20,3,ifelse(value>8,2,1)))}
else if(crop=="rose"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="jasmine"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
#ESP suitability rating for fleshy crops
else if(crop=="yam"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="butternut"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="squash"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
else if(crop=="pumpkin"){suitclass=ifelse(value>15,4,ifelse(value>10,3,ifelse(value>5,2,1)))}
return(suitclass)
}
|
## Create one function which defines an environment to cache
## a matrix and its invserse, and a second function which uses
## the first function to retrieve calculate the inverse of new
## matricies or retrieve the cached inverse if already created
## Create special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(invrse) inv <<- invrse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## Compute inverse of matrix returned by makeCacheMatrix;
## if already calculated retrieve inverse from cache
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)){
message("get cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
| /cachematrix.R | no_license | MallikaJayaraman/ProgrammingAssignment2 | R | false | false | 971 | r | ## Create one function which defines an environment to cache
## a matrix and its invserse, and a second function which uses
## the first function to retrieve calculate the inverse of new
## matricies or retrieve the cached inverse if already created
## Create special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(invrse) inv <<- invrse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## Compute inverse of matrix returned by makeCacheMatrix;
## if already calculated retrieve inverse from cache
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)){
message("get cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
require("dplyr")
require("scales")
require("ggplot2")
require("foreach")
args <- commandArgs(TRUE)
filename1 <- args[1]
filename2 <- args[2]
#dfA <- read.csv("../Data/RandomWalkSimulation.csv")
#dfB <- read.csv("../Data/BFSSimulation.csv")
dfA <- read.csv(filename1)
dfB <- read.csv(filename2)
colnames(dfA) <- c("vertices","edges","algorithm","path_length","included_failure","space","time")
colnames(dfB) <- c("vertices","edges","algorithm","path_length","included_failure","space","time")
df <- rbind(dfA,dfB)
df <- tbl_df(df)
df <- arrange(df,included_failure,vertices)
df <- df[2:nrow(df),]
failureDf <- filter(df, included_failure == "True")
#selects the two algorithms being compared
algdf <- arrange(df,algorithm)
algA <- algdf[1,3]
algB <- algdf[nrow(algdf),3]
#png(paste(df$vertices, " Vertex Comparison of Path Length.png"))
graph <- ggplot(df, aes(x=edges,y=path_length)) + geom_line(data=subset(df, algorithm == "bfs"), color="grey") + geom_line(data=subset(df, algorithm == "randomwalk"), color="grey") + geom_jitter(size = .2, aes(color=algorithm)) + geom_smooth(data=subset(df, algorithm == "randomwalk"),se=TRUE,color="green") + geom_smooth(data=subset(df, algorithm == "bfs"),se=TRUE, color="red")
graph <- graph + ggtitle(paste(df$vertices, " Vertex Comparison of Path Length"))+ xlab("Number of Edges") + ylab("Path Length")
graph <- graph + scale_colour_hue(l=10) + geom_vline(data=failureDf,color="red",size=.2,aes(xintercept=edges))
graph
#dev.off()
#png(paste(df$vertices, " Vertex Comparison of Space Per Host.png"))
graph <- ggplot(df, aes(x=edges,y=space)) + geom_jitter(size = .2, aes(color=algorithm))
graph <- graph + geom_smooth(data=subset(df, algorithm == "randomwalk"),se=TRUE,color="green") + geom_smooth(data=subset(df, algorithm == "bfs"),se=TRUE, color="red")
graph <- graph + ggtitle(paste(df$vertices, " Vertex Comparison of Space Per Host"))+ xlab("Number of Edges") + ylab("Space per host in bytes")
graph <- graph + scale_colour_hue(l=10) + geom_vline(data=failureDf,color="red",size=.2,aes(xintercept=edges))
graph
#dev.off()
#png(paste(df$vertices, " Vertex Comparison of Running Time.png"))
graph <- ggplot(df, aes(x=edges,y=time)) + geom_jitter(size = .2, aes(color=algorithm))
graph <- graph + geom_smooth(data=subset(df, algorithm == "randomwalk"),se=TRUE,color="green") + geom_smooth(data=subset(df, algorithm == "bfs"),se=TRUE, color="red")
graph <- graph + ggtitle(paste(df$vertices, " Vertex Comparison of Running Time in Seconds"))+ xlab("Number of Edges") + ylab("Running Time (Seconds/Query)")
graph <- graph + scale_colour_hue(l=10) + geom_vline(data=failureDf,color="red",size=.2,aes(xintercept=edges))
graph
#dev.off()
| /R/compareData.R | permissive | rsbrisci/Math455RandomWalkProject | R | false | false | 2,683 | r | require("dplyr")
require("scales")
require("ggplot2")
require("foreach")
args <- commandArgs(TRUE)
filename1 <- args[1]
filename2 <- args[2]
#dfA <- read.csv("../Data/RandomWalkSimulation.csv")
#dfB <- read.csv("../Data/BFSSimulation.csv")
dfA <- read.csv(filename1)
dfB <- read.csv(filename2)
colnames(dfA) <- c("vertices","edges","algorithm","path_length","included_failure","space","time")
colnames(dfB) <- c("vertices","edges","algorithm","path_length","included_failure","space","time")
df <- rbind(dfA,dfB)
df <- tbl_df(df)
df <- arrange(df,included_failure,vertices)
df <- df[2:nrow(df),]
failureDf <- filter(df, included_failure == "True")
#selects the two algorithms being compared
algdf <- arrange(df,algorithm)
algA <- algdf[1,3]
algB <- algdf[nrow(algdf),3]
#png(paste(df$vertices, " Vertex Comparison of Path Length.png"))
graph <- ggplot(df, aes(x=edges,y=path_length)) + geom_line(data=subset(df, algorithm == "bfs"), color="grey") + geom_line(data=subset(df, algorithm == "randomwalk"), color="grey") + geom_jitter(size = .2, aes(color=algorithm)) + geom_smooth(data=subset(df, algorithm == "randomwalk"),se=TRUE,color="green") + geom_smooth(data=subset(df, algorithm == "bfs"),se=TRUE, color="red")
graph <- graph + ggtitle(paste(df$vertices, " Vertex Comparison of Path Length"))+ xlab("Number of Edges") + ylab("Path Length")
graph <- graph + scale_colour_hue(l=10) + geom_vline(data=failureDf,color="red",size=.2,aes(xintercept=edges))
graph
#dev.off()
#png(paste(df$vertices, " Vertex Comparison of Space Per Host.png"))
graph <- ggplot(df, aes(x=edges,y=space)) + geom_jitter(size = .2, aes(color=algorithm))
graph <- graph + geom_smooth(data=subset(df, algorithm == "randomwalk"),se=TRUE,color="green") + geom_smooth(data=subset(df, algorithm == "bfs"),se=TRUE, color="red")
graph <- graph + ggtitle(paste(df$vertices, " Vertex Comparison of Space Per Host"))+ xlab("Number of Edges") + ylab("Space per host in bytes")
graph <- graph + scale_colour_hue(l=10) + geom_vline(data=failureDf,color="red",size=.2,aes(xintercept=edges))
graph
#dev.off()
#png(paste(df$vertices, " Vertex Comparison of Running Time.png"))
graph <- ggplot(df, aes(x=edges,y=time)) + geom_jitter(size = .2, aes(color=algorithm))
graph <- graph + geom_smooth(data=subset(df, algorithm == "randomwalk"),se=TRUE,color="green") + geom_smooth(data=subset(df, algorithm == "bfs"),se=TRUE, color="red")
graph <- graph + ggtitle(paste(df$vertices, " Vertex Comparison of Running Time in Seconds"))+ xlab("Number of Edges") + ylab("Running Time (Seconds/Query)")
graph <- graph + scale_colour_hue(l=10) + geom_vline(data=failureDf,color="red",size=.2,aes(xintercept=edges))
graph
#dev.off()
|
#Make take a while to install
# install.packages("psych")
# install.packages("ggplot2")
library(psych)
library(ggplot2)
## Read the data into an object named data
data <- read.csv('new_prof_data.csv')
## Examine data:
names(data)
str(data)
summary(data)
head(data)
names(data) ## look up index
comp.data <- data[,3:8] ## extract data
names(comp.data) ## check
round(cor(comp.data), digits = 3) ## produces correlation matrix
pcaSolution <- prcomp(comp.data, center = TRUE, scale. = TRUE)
## Produced the object with standard deviatons of all variables
## and Rotation -- or the loadings on the principal components
print(pcaSolution) ## prints PCA solution
pcaSolution$sdev^2
plot(pcaSolution, type = "l", pch = 16, lwd = 2) ## generates a scree plot
| /PCA.R | no_license | abagherp/BigDataU | R | false | false | 761 | r | #Make take a while to install
# install.packages("psych")
# install.packages("ggplot2")
library(psych)
library(ggplot2)
## Read the data into an object named data
data <- read.csv('new_prof_data.csv')
## Examine data:
names(data)
str(data)
summary(data)
head(data)
names(data) ## look up index
comp.data <- data[,3:8] ## extract data
names(comp.data) ## check
round(cor(comp.data), digits = 3) ## produces correlation matrix
pcaSolution <- prcomp(comp.data, center = TRUE, scale. = TRUE)
## Produced the object with standard deviatons of all variables
## and Rotation -- or the loadings on the principal components
print(pcaSolution) ## prints PCA solution
pcaSolution$sdev^2
plot(pcaSolution, type = "l", pch = 16, lwd = 2) ## generates a scree plot
|
#### LOAD PACKAGES ####
library(tidyverse) # General data manipulation
library(lme4) # Mixed-effects modeling
library(lmerTest) # P-values for lme objects
library(table1) # For summary table
library(performance) # For model checking
library(influence.ME) # For Cook's influence
#### READ DATA ####
load("shp-data.RData")
#### FILTER FARMS ####
# Generate list of farms to filter by for Maria's interest in cover crop type
farmSub <- c(
'SHP2014IA001','SHP2014IA002','SHP2014IA004','SHP2014IA005','SHP2014IL001','SHP2014IL002','SHP2014IL005','SHP2014IN002',
'SHP2015IA003','SHP2015IA004','SHP2015IA005','SHP2015IA006','SHP2015IA007','SHP2015IA008','SHP2015IL001','SHP2015IL003',
'SHP2015IL004','SHP2015OH001','SHP2016IA002','SHP2016IA003','SHP2016IA004','SHP2016IA005','SHP2016IL003','SHP2016IN003',
'SHP2016IN005','SHP2016IN008','SHP2017IA002','SHP2017IA003','SHP2017IA004','SHP2017IA006','SHP2017IL003','SHP2017IL005',
'SHP2017IL009','SHP2017IN003','SHP2017IN004','SHP2017MO004','SHP2017MO005','SHP2017NE001','SHP2017NE002','SHP2017NE004',
'SHP2017OH003','SHP2017WI003','SHP2017WI004'
)
# final <- final %>% filter(IDtoUse %in% farmSub)
#### SUMMARY STATISTICS #####
summary.dat <- final %>%
select(
soil_texture_sand:water_capacity, aggregate_stability, ace_soil_protein_index, final_OM, respiration,
active_carbon, yrsTrt, `Cover Crop`, State
)
my.render.cont <- function(x) {
with(stats.apply.rounding(stats.default(x), digits = 2), c("",
"Mean (SD)" = sprintf("%s (± %s)", MEAN, SD)
))
}
table1(~ as.numeric(soil_texture_clay) + as.numeric(soil_texture_silt) + as.numeric(soil_texture_sand) + water_capacity
+ aggregate_stability + ace_soil_protein_index + final_OM + respiration + active_carbon + yrsTrt
| State,
data = summary.dat,
render.continuous = my.render.cont,
overall = NULL
)
table1(~ water_capacity + aggregate_stability + ace_soil_protein_index + final_OM + respiration + active_carbon
| `Cover Crop` + yrsTrt,
data = summary.dat %>% filter(yrsTrt != 0),
render.continuous = my.render.cont,
overall = NULL)
#### DATA MANIPULATION ####
## Create dummy variables
final <- final %>%
mutate(
CC = ifelse(`Cover Crop`=="Yes",1,0),
)
## Variable standardization
data.std <- final %>%
mutate(
c.CC = CC - mean(CC),
s.yrsTrt = (yrsTrt - mean(yrsTrt)) / 2 * sd(yrsTrt),
s.yr = (as.numeric(smpl_yr)-mean(as.numeric(smpl_yr))) / 2*sd(as.numeric(smpl_yr)),
s.clay = (as.numeric(soil_texture_clay) - mean(as.numeric(soil_texture_clay))) / 2 * sd(as.numeric(soil_texture_clay)),
s.silt = (as.numeric(soil_texture_silt) - mean(as.numeric(soil_texture_silt))) / 2 * sd(as.numeric(soil_texture_silt)),
IDtoUse = IDtoUse
)
#### 1. ALL DATA MODELS ####
#### 1a. Active Carbon ####
# Fit initial model
ac <- lmer(active_carbon ~ `Cover Crop`*yrsTrt + as.numeric(smpl_yr) + as.numeric(soil_texture_clay) + as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final)
# Check model
ac %>% r2()
summary(ac)
ac %>% performance::check_model()
#### 1b. Aggregate Stability ####
# Fit initial model
as <- lmer(log(aggregate_stability) ~ `Cover Crop`*yrsTrt + as.numeric(smpl_yr) + as.numeric(soil_texture_clay) + as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final
)
# Check model
as %>% r2()
as %>% summary()
as %>% check_model()
#### 1c. Protein ####
pro <- lmer(log(ace_soil_protein_index) ~ `Cover Crop`*yrsTrt + as.numeric(smpl_yr) + as.numeric(soil_texture_clay) + as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final)
pro %>% r2()
pro %>% summary()
pro %>% check_model()
#### 1d. Respiration ####
resp <- lmer(log(respiration) ~ `Cover Crop`*yrsTrt + as.numeric(smpl_yr) + as.numeric(soil_texture_clay) + as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final
)
resp %>% r2()
resp %>% summary()
resp %>% check_model()
#### 1e. Water Holding Capacity ####
whc <- lmer(water_capacity ~ `Cover Crop`*yrsTrt + as.numeric(smpl_yr) + as.numeric(soil_texture_clay) + as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final
)
whc %>% r2()
whc %>% summary()
whc %>% check_model()
#### 1f. Soil Organic Matter ####
som <- lmer(final_OM ~ `Cover Crop`*yrsTrt + as.numeric(smpl_yr) + as.numeric(soil_texture_clay) + as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final
)
som %>% r2()
som %>% summary()
som %>% check_model()
#### 2. MODELS BY YEAR ####
#### 2a. Active Carbon ####
# Run model for all data
ac.yr <- list()
for(i in 2015:2019){
ac.yr[[i]] <- lmer(active_carbon ~ CC + yrsTrt + as.numeric(soil_texture_clay) +
as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final %>%
filter(as.numeric(smpl_yr)==i)
)
}
ac.yr[[2015]] %>% r2()
ac.yr[[2015]] %>% summary()
ac.yr[[2015]] %>% check_model()
#### 2b. Aggregate Stability ####
as.yr <- list()
for(i in 2015:2019){
as.yr[[i]] <- lmer(log(aggregate_stability) ~ CC + yrsTrt + as.numeric(soil_texture_clay) +
as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final %>%
filter(as.numeric(smpl_yr)==i)
)
}
as.yr[[2015]] %>% r2()
as.yr[[2015]] %>% summary()
as.yr[[2015]] %>% check_model()
#### 2c. Protein #####
# Model for each year
pro.yr <- list()
for(i in 2015:2019){
pro.yr[[i]] <- lmer(log(ace_soil_protein_index) ~ CC + yrsTrt + as.numeric(soil_texture_clay) +
as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final %>%
filter(smpl_yr==i)
)
}
pro.yr[[2015]] %>% r2()
pro.yr[[2015]] %>% summary()
pro.yr[[2015]] %>% check_model()
#### 2d. Respiration ####
resp.yr <- list()
for(i in 2015:2019){
resp.yr[[i]] <- lmer(log(respiration) ~ CC + yrsTrt + as.numeric(soil_texture_clay) +
as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final %>%
filter(smpl_yr==i)
)
}
resp.yr[[2015]] %>% r2()
resp.yr[[2015]] %>% summary()
resp.yr[[2015]] %>% check_model()
#### 2e. Water Holding Capacity ####
whc.yr <- list()
for(i in 2015:2019){
whc.yr[[i]] <- lmer(water_capacity ~ CC + yrsTrt + as.numeric(soil_texture_clay) +
as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final %>%
filter(smpl_yr==i)
)
}
whc.yr[[2015]] %>% r2()
whc.yr[[2015]] %>% summary()
whc.yr[[2015]] %>% check_model()
#### 2f. Soil organic matter ####
som.yr <- list()
for(i in 2015:2019){
som.yr[[i]] <- lmer(final_OM ~ CC + yrsTrt + as.numeric(soil_texture_clay) +
as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final %>%
filter(smpl_yr==i)
)
}
som.yr[[2015]] %>% r2()
som.yr[[2015]] %>% summary()
som.yr[[2015]] %>% check_model()
#### EXPORT DATA ####
rm(final); rm(data.std); rm(summary.dat)
save.image("~/Box Sync/Work/Code/shp/shp-models.RData")
| /code/data-analysis.R | no_license | swood-ecology/soil-health-partnership | R | false | false | 7,087 | r | #### LOAD PACKAGES ####
library(tidyverse) # General data manipulation
library(lme4) # Mixed-effects modeling
library(lmerTest) # P-values for lme objects
library(table1) # For summary table
library(performance) # For model checking
library(influence.ME) # For Cook's influence
#### READ DATA ####
load("shp-data.RData")
#### FILTER FARMS ####
# Generate list of farms to filter by for Maria's interest in cover crop type
farmSub <- c(
'SHP2014IA001','SHP2014IA002','SHP2014IA004','SHP2014IA005','SHP2014IL001','SHP2014IL002','SHP2014IL005','SHP2014IN002',
'SHP2015IA003','SHP2015IA004','SHP2015IA005','SHP2015IA006','SHP2015IA007','SHP2015IA008','SHP2015IL001','SHP2015IL003',
'SHP2015IL004','SHP2015OH001','SHP2016IA002','SHP2016IA003','SHP2016IA004','SHP2016IA005','SHP2016IL003','SHP2016IN003',
'SHP2016IN005','SHP2016IN008','SHP2017IA002','SHP2017IA003','SHP2017IA004','SHP2017IA006','SHP2017IL003','SHP2017IL005',
'SHP2017IL009','SHP2017IN003','SHP2017IN004','SHP2017MO004','SHP2017MO005','SHP2017NE001','SHP2017NE002','SHP2017NE004',
'SHP2017OH003','SHP2017WI003','SHP2017WI004'
)
# final <- final %>% filter(IDtoUse %in% farmSub)
#### SUMMARY STATISTICS #####
summary.dat <- final %>%
select(
soil_texture_sand:water_capacity, aggregate_stability, ace_soil_protein_index, final_OM, respiration,
active_carbon, yrsTrt, `Cover Crop`, State
)
my.render.cont <- function(x) {
with(stats.apply.rounding(stats.default(x), digits = 2), c("",
"Mean (SD)" = sprintf("%s (± %s)", MEAN, SD)
))
}
table1(~ as.numeric(soil_texture_clay) + as.numeric(soil_texture_silt) + as.numeric(soil_texture_sand) + water_capacity
+ aggregate_stability + ace_soil_protein_index + final_OM + respiration + active_carbon + yrsTrt
| State,
data = summary.dat,
render.continuous = my.render.cont,
overall = NULL
)
table1(~ water_capacity + aggregate_stability + ace_soil_protein_index + final_OM + respiration + active_carbon
| `Cover Crop` + yrsTrt,
data = summary.dat %>% filter(yrsTrt != 0),
render.continuous = my.render.cont,
overall = NULL)
#### DATA MANIPULATION ####
## Create dummy variables
final <- final %>%
mutate(
CC = ifelse(`Cover Crop`=="Yes",1,0),
)
## Variable standardization
data.std <- final %>%
mutate(
c.CC = CC - mean(CC),
s.yrsTrt = (yrsTrt - mean(yrsTrt)) / 2 * sd(yrsTrt),
s.yr = (as.numeric(smpl_yr)-mean(as.numeric(smpl_yr))) / 2*sd(as.numeric(smpl_yr)),
s.clay = (as.numeric(soil_texture_clay) - mean(as.numeric(soil_texture_clay))) / 2 * sd(as.numeric(soil_texture_clay)),
s.silt = (as.numeric(soil_texture_silt) - mean(as.numeric(soil_texture_silt))) / 2 * sd(as.numeric(soil_texture_silt)),
IDtoUse = IDtoUse
)
#### 1. ALL DATA MODELS ####
#### 1a. Active Carbon ####
# Fit initial model
ac <- lmer(active_carbon ~ `Cover Crop`*yrsTrt + as.numeric(smpl_yr) + as.numeric(soil_texture_clay) + as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final)
# Check model
ac %>% r2()
summary(ac)
ac %>% performance::check_model()
#### 1b. Aggregate Stability ####
# Fit initial model
as <- lmer(log(aggregate_stability) ~ `Cover Crop`*yrsTrt + as.numeric(smpl_yr) + as.numeric(soil_texture_clay) + as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final
)
# Check model
as %>% r2()
as %>% summary()
as %>% check_model()
#### 1c. Protein ####
pro <- lmer(log(ace_soil_protein_index) ~ `Cover Crop`*yrsTrt + as.numeric(smpl_yr) + as.numeric(soil_texture_clay) + as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final)
pro %>% r2()
pro %>% summary()
pro %>% check_model()
#### 1d. Respiration ####
resp <- lmer(log(respiration) ~ `Cover Crop`*yrsTrt + as.numeric(smpl_yr) + as.numeric(soil_texture_clay) + as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final
)
resp %>% r2()
resp %>% summary()
resp %>% check_model()
#### 1e. Water Holding Capacity ####
whc <- lmer(water_capacity ~ `Cover Crop`*yrsTrt + as.numeric(smpl_yr) + as.numeric(soil_texture_clay) + as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final
)
whc %>% r2()
whc %>% summary()
whc %>% check_model()
#### 1f. Soil Organic Matter ####
som <- lmer(final_OM ~ `Cover Crop`*yrsTrt + as.numeric(smpl_yr) + as.numeric(soil_texture_clay) + as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final
)
som %>% r2()
som %>% summary()
som %>% check_model()
#### 2. MODELS BY YEAR ####
#### 2a. Active Carbon ####
# Run model for all data
ac.yr <- list()
for(i in 2015:2019){
ac.yr[[i]] <- lmer(active_carbon ~ CC + yrsTrt + as.numeric(soil_texture_clay) +
as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final %>%
filter(as.numeric(smpl_yr)==i)
)
}
ac.yr[[2015]] %>% r2()
ac.yr[[2015]] %>% summary()
ac.yr[[2015]] %>% check_model()
#### 2b. Aggregate Stability ####
as.yr <- list()
for(i in 2015:2019){
as.yr[[i]] <- lmer(log(aggregate_stability) ~ CC + yrsTrt + as.numeric(soil_texture_clay) +
as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final %>%
filter(as.numeric(smpl_yr)==i)
)
}
as.yr[[2015]] %>% r2()
as.yr[[2015]] %>% summary()
as.yr[[2015]] %>% check_model()
#### 2c. Protein #####
# Model for each year
pro.yr <- list()
for(i in 2015:2019){
pro.yr[[i]] <- lmer(log(ace_soil_protein_index) ~ CC + yrsTrt + as.numeric(soil_texture_clay) +
as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final %>%
filter(smpl_yr==i)
)
}
pro.yr[[2015]] %>% r2()
pro.yr[[2015]] %>% summary()
pro.yr[[2015]] %>% check_model()
#### 2d. Respiration ####
resp.yr <- list()
for(i in 2015:2019){
resp.yr[[i]] <- lmer(log(respiration) ~ CC + yrsTrt + as.numeric(soil_texture_clay) +
as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final %>%
filter(smpl_yr==i)
)
}
resp.yr[[2015]] %>% r2()
resp.yr[[2015]] %>% summary()
resp.yr[[2015]] %>% check_model()
#### 2e. Water Holding Capacity ####
whc.yr <- list()
for(i in 2015:2019){
whc.yr[[i]] <- lmer(water_capacity ~ CC + yrsTrt + as.numeric(soil_texture_clay) +
as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final %>%
filter(smpl_yr==i)
)
}
whc.yr[[2015]] %>% r2()
whc.yr[[2015]] %>% summary()
whc.yr[[2015]] %>% check_model()
#### 2f. Soil organic matter ####
som.yr <- list()
for(i in 2015:2019){
som.yr[[i]] <- lmer(final_OM ~ CC + yrsTrt + as.numeric(soil_texture_clay) +
as.numeric(soil_texture_silt) + (1|IDtoUse),
data = final %>%
filter(smpl_yr==i)
)
}
som.yr[[2015]] %>% r2()
som.yr[[2015]] %>% summary()
som.yr[[2015]] %>% check_model()
#### EXPORT DATA ####
rm(final); rm(data.std); rm(summary.dat)
save.image("~/Box Sync/Work/Code/shp/shp-models.RData")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rpraat.R
\name{seqM}
\alias{seqM}
\title{seqM}
\usage{
seqM(from = NA, to = NA, by = NA, length.out = NA)
}
\arguments{
\item{from}{starting value of the sequence (the first number)}
\item{to}{end value of the sequence (the last number or the boundary number)}
\item{by}{increment of the sequence (if specified, do not use the \code{length.out} parameter). If both \code{by} and \code{length.out} are not specified, then \code{by = +1}.}
\item{length.out}{desired length of the sequence (if specified, do not use the \code{by} parameter)}
}
\value{
returns a vector of type "integer" or "double"
}
\description{
Matlab-like behaviour of colon operator or linspace for creating sequences, for-loop friendly.
}
\details{
Like \code{seq()} but with Matlab-like behavior ([: operator] with \code{by} or [linspace] with \code{length.out}).
If I create a for-loop, I would like to get an empty vector for \code{3:1} (I want a default step +1)
and also an empty vector for \code{seq(3, 1, by = 1)} (not an error). This is solved by this \code{seqM} function.
}
\section{Comparison}{
\tabular{lllll}{
R: seqM \tab \tab Matlab \tab \tab R: seq \cr
seqM(1, 3) \tab [1] 1 2 3 \tab 1:3 \tab the same \tab the same \cr
seqM(1, 3, by=.8) \tab [1] 1.0 1.8 2.6 \tab 1:.8:3 \tab the same \tab the same \cr
seqM(1, 3, by=5) \tab [1] 1 \tab 1:5:3 \tab the same \tab the same \cr
seqM(3, 1) \tab integer(0) \tab 3:1 \tab the same \tab [1] 3 2 1 \cr
seqM(3, 1, by=+1) \tab integer(0) \tab 3:1:1 \tab the same \tab Error: wrong 'by' \cr
seqM(3, 1, by=-1) \tab [1] 3 2 1 \tab 3:-1:1 \tab the same \tab the same \cr
seqM(3, 1, by=-3) \tab [1] 3 \tab 3:-3:1 \tab the same \tab the same \cr
seqM(1, 3, len=5) \tab [1] 1.0 1.5 2.0 2.5 3.0 \tab linspace(1,3,5) \tab the same \tab the same \cr
seqM(1, 3, len=3) \tab [1] 1 2 3 \tab linspace(1,3,3) \tab the same \tab the same \cr
seqM(1, 3, len=2) \tab [1] 1 3 \tab linspace(1,3,2) \tab the same \tab the same \cr
seqM(1, 3, len=1) \tab [1] 3 \tab linspace(1,3,1) \tab the same \tab [1] 1 \cr
seqM(1, 3, len=0) \tab integer(0) + warning \tab linspace(1,3,0) \tab the same without warning \tab the same without warning \cr
seqM(3, 1, len=3) \tab [1] 3 2 1 \tab linspace(3,1,3) \tab the same \tab the same \cr
}
}
\examples{
seqM(1, 3)
seqM(1, 3, by=.8)
seqM(1, 3, by=5)
seqM(3, 1)
seqM(3, 1, by=+1)
seqM(3, 1, by=-1)
seqM(3, 1, by=-3)
seqM(1, 3, len=5)
seqM(1, 3, len=3)
seqM(1, 3, len=2)
seqM(1, 3, len=1)
seqM(1, 3, len=0)
seqM(3, 1, len=3)
}
\seealso{
\code{\link{round2}}, \code{\link{isNum}}, \code{\link{isInt}}, \code{\link{ifft}}.
}
| /man/seqM.Rd | permissive | cran/rPraat | R | false | true | 3,260 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rpraat.R
\name{seqM}
\alias{seqM}
\title{seqM}
\usage{
seqM(from = NA, to = NA, by = NA, length.out = NA)
}
\arguments{
\item{from}{starting value of the sequence (the first number)}
\item{to}{end value of the sequence (the last number or the boundary number)}
\item{by}{increment of the sequence (if specified, do not use the \code{length.out} parameter). If both \code{by} and \code{length.out} are not specified, then \code{by = +1}.}
\item{length.out}{desired length of the sequence (if specified, do not use the \code{by} parameter)}
}
\value{
returns a vector of type "integer" or "double"
}
\description{
Matlab-like behaviour of colon operator or linspace for creating sequences, for-loop friendly.
}
\details{
Like \code{seq()} but with Matlab-like behavior ([: operator] with \code{by} or [linspace] with \code{length.out}).
If I create a for-loop, I would like to get an empty vector for \code{3:1} (I want a default step +1)
and also an empty vector for \code{seq(3, 1, by = 1)} (not an error). This is solved by this \code{seqM} function.
}
\section{Comparison}{
\tabular{lllll}{
R: seqM \tab \tab Matlab \tab \tab R: seq \cr
seqM(1, 3) \tab [1] 1 2 3 \tab 1:3 \tab the same \tab the same \cr
seqM(1, 3, by=.8) \tab [1] 1.0 1.8 2.6 \tab 1:.8:3 \tab the same \tab the same \cr
seqM(1, 3, by=5) \tab [1] 1 \tab 1:5:3 \tab the same \tab the same \cr
seqM(3, 1) \tab integer(0) \tab 3:1 \tab the same \tab [1] 3 2 1 \cr
seqM(3, 1, by=+1) \tab integer(0) \tab 3:1:1 \tab the same \tab Error: wrong 'by' \cr
seqM(3, 1, by=-1) \tab [1] 3 2 1 \tab 3:-1:1 \tab the same \tab the same \cr
seqM(3, 1, by=-3) \tab [1] 3 \tab 3:-3:1 \tab the same \tab the same \cr
seqM(1, 3, len=5) \tab [1] 1.0 1.5 2.0 2.5 3.0 \tab linspace(1,3,5) \tab the same \tab the same \cr
seqM(1, 3, len=3) \tab [1] 1 2 3 \tab linspace(1,3,3) \tab the same \tab the same \cr
seqM(1, 3, len=2) \tab [1] 1 3 \tab linspace(1,3,2) \tab the same \tab the same \cr
seqM(1, 3, len=1) \tab [1] 3 \tab linspace(1,3,1) \tab the same \tab [1] 1 \cr
seqM(1, 3, len=0) \tab integer(0) + warning \tab linspace(1,3,0) \tab the same without warning \tab the same without warning \cr
seqM(3, 1, len=3) \tab [1] 3 2 1 \tab linspace(3,1,3) \tab the same \tab the same \cr
}
}
\examples{
seqM(1, 3)
seqM(1, 3, by=.8)
seqM(1, 3, by=5)
seqM(3, 1)
seqM(3, 1, by=+1)
seqM(3, 1, by=-1)
seqM(3, 1, by=-3)
seqM(1, 3, len=5)
seqM(1, 3, len=3)
seqM(1, 3, len=2)
seqM(1, 3, len=1)
seqM(1, 3, len=0)
seqM(3, 1, len=3)
}
\seealso{
\code{\link{round2}}, \code{\link{isNum}}, \code{\link{isInt}}, \code{\link{ifft}}.
}
|
u <- runif(1000)
Fx <- 1-(1/u)^5
fx <- 1/(1-u)^(1/5)
hist(Fx)
#hist(fx)
f=function(n, a=0.5, b=1) qpareto(runif(n),a,b)
| /Stochastic Signals and Systems/Homework/HW3/Code/Code/21518139_HW1/HW2_10.R | no_license | crclayton/notes | R | false | false | 130 | r | u <- runif(1000)
Fx <- 1-(1/u)^5
fx <- 1/(1-u)^(1/5)
hist(Fx)
#hist(fx)
f=function(n, a=0.5, b=1) qpareto(runif(n),a,b)
|
query <- "select * from a.feng_us_attribution_3touches_monthly"
data <- sqlQuery(.dwMatrix, query=query)
firsttouch <- reshape(data[, 1:3], v.names="firsttouch",
timevar="month", idvar="subchannel", direction="wide")
firsttouch <- firsttouch[, c("subchannel", sort(names(firsttouch)[2:7]))]
names(firsttouch) <- c("subchannel", "Jan", "Feb", "Mar", "Apr", "May", "Jun")
firsttouch <- firsttouch[order(firsttouch$Jan, decreasing=T), ]
firsttouch$subchannel <- gsub("[^a-zA-Z0-9\\ ]+", "-", firsttouch$subchannel)
firsttouch[is.na(firsttouch)] <- 0
write.csv(firsttouch, file="C:/Users/fyi/Desktop/firsttouch.csv", row.names=F)
lasttouch <- reshape(data[, c(1, 2, 4)], v.names="lasttouch",
timevar="month", idvar="subchannel", direction="wide")
lasttouch <- lasttouch[, c("subchannel", sort(names(lasttouch)[2:7]))]
names(lasttouch) <- c("subchannel", "Jan", "Feb", "Mar", "Apr", "May", "Jun")
lasttouch <- lasttouch[order(lasttouch$Jan, decreasing=T), ]
lasttouch$subchannel <- gsub("[^a-zA-Z0-9\\ ]+", "-", lasttouch$subchannel)
lasttouch[is.na(lasttouch)] <- 0
write.csv(lasttouch, file="C:/Users/fyi/Desktop/lasttouch.csv", row.names=F)
multitouch <- reshape(data[, c(1, 2, 5)], v.names="multitouch",
timevar="month", idvar="subchannel", direction="wide")
multitouch <- multitouch[, c("subchannel", sort(names(multitouch)[2:7]))]
names(multitouch) <- c("subchannel", "Jan", "Feb", "Mar", "Apr", "May", "Jun")
multitouch <- multitouch[order(multitouch$Jan, decreasing=T), ]
multitouch$subchannel <- gsub("[^a-zA-Z0-9\\ ]+", "-", multitouch$subchannel)
multitouch[is.na(multitouch)] <- 0
write.csv(multitouch, file="C:/Users/fyi/Desktop/multitouch.csv", row.names=F)
| /R/old/monthly_attribution1.R | no_license | cholita77/AttributionModel | R | false | false | 1,766 | r | query <- "select * from a.feng_us_attribution_3touches_monthly"
data <- sqlQuery(.dwMatrix, query=query)
firsttouch <- reshape(data[, 1:3], v.names="firsttouch",
timevar="month", idvar="subchannel", direction="wide")
firsttouch <- firsttouch[, c("subchannel", sort(names(firsttouch)[2:7]))]
names(firsttouch) <- c("subchannel", "Jan", "Feb", "Mar", "Apr", "May", "Jun")
firsttouch <- firsttouch[order(firsttouch$Jan, decreasing=T), ]
firsttouch$subchannel <- gsub("[^a-zA-Z0-9\\ ]+", "-", firsttouch$subchannel)
firsttouch[is.na(firsttouch)] <- 0
write.csv(firsttouch, file="C:/Users/fyi/Desktop/firsttouch.csv", row.names=F)
lasttouch <- reshape(data[, c(1, 2, 4)], v.names="lasttouch",
timevar="month", idvar="subchannel", direction="wide")
lasttouch <- lasttouch[, c("subchannel", sort(names(lasttouch)[2:7]))]
names(lasttouch) <- c("subchannel", "Jan", "Feb", "Mar", "Apr", "May", "Jun")
lasttouch <- lasttouch[order(lasttouch$Jan, decreasing=T), ]
lasttouch$subchannel <- gsub("[^a-zA-Z0-9\\ ]+", "-", lasttouch$subchannel)
lasttouch[is.na(lasttouch)] <- 0
write.csv(lasttouch, file="C:/Users/fyi/Desktop/lasttouch.csv", row.names=F)
multitouch <- reshape(data[, c(1, 2, 5)], v.names="multitouch",
timevar="month", idvar="subchannel", direction="wide")
multitouch <- multitouch[, c("subchannel", sort(names(multitouch)[2:7]))]
names(multitouch) <- c("subchannel", "Jan", "Feb", "Mar", "Apr", "May", "Jun")
multitouch <- multitouch[order(multitouch$Jan, decreasing=T), ]
multitouch$subchannel <- gsub("[^a-zA-Z0-9\\ ]+", "-", multitouch$subchannel)
multitouch[is.na(multitouch)] <- 0
write.csv(multitouch, file="C:/Users/fyi/Desktop/multitouch.csv", row.names=F)
|
#' `testthat` expectation for a Shiny update
#'
#' @param app A [ShinyDriver()] object.
#' @param output Character vector, the name(s) of the output widgets
#' that are required to update for the test to succeed.
#' @param ... Named arguments specifying updates for Shiny input
#' widgets.
#' @param timeout Timeout for the update to happen, in milliseconds.
#' @param iotype Type of the widget(s) to change. These are normally
#' input widgets.
#'
#' @export
#' @importFrom testthat expect
#' @importFrom utils compareVersion
#' @examples
#' \dontrun{
#' ## https://github.com/rstudio/shiny-examples/tree/main/050-kmeans-example
#' app <- ShinyDriver$new("050-kmeans-example")
#' expectUpdate(app, xcol = "Sepal.Width", output = "plot1")
#' expectUpdate(app, ycol = "Petal.Width", output = "plot1")
#' expectUpdate(app, clusters = 4, output = "plot1")
#' }
#' @keywords internal
expectUpdate <- function(app, output, ..., timeout = 3000,
iotype = c("auto", "input", "output")) {
app$expectUpdate(
output,
...,
timeout = timeout,
iotype = match.arg(iotype)
)
}
sd_expectUpdate <- function(self, private, output, ..., timeout,
iotype) {
"!DEBUG sd_expectUpdate `paste(output, collapse = ', ')`"
assert_that(is.character(output))
assert_that(is_all_named(inputs <- list(...)))
assert_that(is_count(timeout))
assert_that(is_string(iotype))
## Make note of the expected updates. They will be ticked off
## one by one by the JS event handler in shiny-tracer.js
js <- paste0(
"window.shinytest.updating.push('", output, "');",
collapse = "\n"
)
private$web$executeScript(js)
on.exit(
private$web$executeScript("window.shinytest.updating = [];"),
add = TRUE
)
## Do the changes to the inputs
for (n in names(inputs)) {
self$findWidget(n, iotype = iotype)$setValue(inputs[[n]])
}
"!DEBUG waiting for update"
## Wait for all the updates to happen, or a timeout
res <- private$web$waitFor(
"window.shinytest.updating.length == 0",
timeout = timeout
)
"!DEBUG update done (`if (res) 'done' else 'timeout'`)"
expect(
res,
sprintf(
strwrap(paste0(
"Updating %s did not update %s, or it is taking longer ",
"than %i ms.")),
paste(sQuote(names(inputs)), collapse = ", "),
paste(sQuote(output), collapse = ", "),
timeout
)
)
## "updating" is cleaned up automatically by on.exit()
}
| /R/expect.R | no_license | cran/shinytest | R | false | false | 2,479 | r |
#' `testthat` expectation for a Shiny update
#'
#' @param app A [ShinyDriver()] object.
#' @param output Character vector, the name(s) of the output widgets
#' that are required to update for the test to succeed.
#' @param ... Named arguments specifying updates for Shiny input
#' widgets.
#' @param timeout Timeout for the update to happen, in milliseconds.
#' @param iotype Type of the widget(s) to change. These are normally
#' input widgets.
#'
#' @export
#' @importFrom testthat expect
#' @importFrom utils compareVersion
#' @examples
#' \dontrun{
#' ## https://github.com/rstudio/shiny-examples/tree/main/050-kmeans-example
#' app <- ShinyDriver$new("050-kmeans-example")
#' expectUpdate(app, xcol = "Sepal.Width", output = "plot1")
#' expectUpdate(app, ycol = "Petal.Width", output = "plot1")
#' expectUpdate(app, clusters = 4, output = "plot1")
#' }
#' @keywords internal
expectUpdate <- function(app, output, ..., timeout = 3000,
iotype = c("auto", "input", "output")) {
app$expectUpdate(
output,
...,
timeout = timeout,
iotype = match.arg(iotype)
)
}
sd_expectUpdate <- function(self, private, output, ..., timeout,
iotype) {
"!DEBUG sd_expectUpdate `paste(output, collapse = ', ')`"
assert_that(is.character(output))
assert_that(is_all_named(inputs <- list(...)))
assert_that(is_count(timeout))
assert_that(is_string(iotype))
## Make note of the expected updates. They will be ticked off
## one by one by the JS event handler in shiny-tracer.js
js <- paste0(
"window.shinytest.updating.push('", output, "');",
collapse = "\n"
)
private$web$executeScript(js)
on.exit(
private$web$executeScript("window.shinytest.updating = [];"),
add = TRUE
)
## Do the changes to the inputs
for (n in names(inputs)) {
self$findWidget(n, iotype = iotype)$setValue(inputs[[n]])
}
"!DEBUG waiting for update"
## Wait for all the updates to happen, or a timeout
res <- private$web$waitFor(
"window.shinytest.updating.length == 0",
timeout = timeout
)
"!DEBUG update done (`if (res) 'done' else 'timeout'`)"
expect(
res,
sprintf(
strwrap(paste0(
"Updating %s did not update %s, or it is taking longer ",
"than %i ms.")),
paste(sQuote(names(inputs)), collapse = ", "),
paste(sQuote(output), collapse = ", "),
timeout
)
)
## "updating" is cleaned up automatically by on.exit()
}
|
context("bary2sph")
test_that("bary2sph works correctly", {
## Construct a diamond round the orgin
P <- rbind(c(0, 0, 1),
c(1, 0, 0),
c(0, 1, 0),
c(-1, 0, 0),
c(0, -1, 0),
c(0, 0, -1))
T <- rbind(c(1, 2, 3),
c(1, 3, 4),
c(1, 4, 5),
c(1, 5, 2),
c(6, 2, 3),
c(6, 3, 4),
c(6, 4, 5),
c(6, 5, 2))
Ib <- list(idx=c(1, 1, 1, 1, 2, 3, 5),
p=rbind(c(1, 0, 0),
c(0, 1, 0),
c(0, 0, 1),
c(0, 1/2, 1/2),
c(0, 0 , 1),
c(0, 0, 1),
c(1, 0, 0)))
expect_equal(bary2sph(Ib, T=T, P=P)/pi*2,
rbind(c(phi=1, lambda=0),
c(0, 0),
c(0, 1),
c(0, 0.5),
c(0, 2.0),
c(0,-1.0),
c(-1, 0.0)))
expect_equal(bary2sph(Ib=list(idx=c(1, NA), p=rbind(c(1, 0, 0), c(NA, NA, NA))), T=T, P=P)/pi*2,
rbind(c(phi=1, lambda=0),
c(NA, NA)))
})
| /pkg/retistruct/tests/testthat/test-bary2sph.R | no_license | davidcsterratt/retistruct | R | false | false | 1,186 | r | context("bary2sph")
test_that("bary2sph works correctly", {
## Construct a diamond round the orgin
P <- rbind(c(0, 0, 1),
c(1, 0, 0),
c(0, 1, 0),
c(-1, 0, 0),
c(0, -1, 0),
c(0, 0, -1))
T <- rbind(c(1, 2, 3),
c(1, 3, 4),
c(1, 4, 5),
c(1, 5, 2),
c(6, 2, 3),
c(6, 3, 4),
c(6, 4, 5),
c(6, 5, 2))
Ib <- list(idx=c(1, 1, 1, 1, 2, 3, 5),
p=rbind(c(1, 0, 0),
c(0, 1, 0),
c(0, 0, 1),
c(0, 1/2, 1/2),
c(0, 0 , 1),
c(0, 0, 1),
c(1, 0, 0)))
expect_equal(bary2sph(Ib, T=T, P=P)/pi*2,
rbind(c(phi=1, lambda=0),
c(0, 0),
c(0, 1),
c(0, 0.5),
c(0, 2.0),
c(0,-1.0),
c(-1, 0.0)))
expect_equal(bary2sph(Ib=list(idx=c(1, NA), p=rbind(c(1, 0, 0), c(NA, NA, NA))), T=T, P=P)/pi*2,
rbind(c(phi=1, lambda=0),
c(NA, NA)))
})
|
#Proyecto LP
#install.packages("randomcoloR")
library("randomcoloR")
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
datos=read.csv("../-Ecuador.csv")
#que empresas/empleadores ofrecen m�s empleo?
empleadores=table(datos$Empleador)
emplSorted=sort(empleadores,decreasing=TRUE)
#grafica de los 10 empleadores con mas ofertas en todo el ecuador
barplot(emplSorted[1:10],legend=TRUE, col = distinctColorPalette(10))
#grafica de los 10 empleadores con mas ofertas en todo el ecuador sin contar a los N/A
barplot(emplSorted[2:11],legend=TRUE, col = distinctColorPalette(10))
#Guayaquil
guayaquil=datos[datos$Localizacion=="Guayaquil",]
emplSortedGye=sort(table(guayaquil$Empleador),decreasing = TRUE)
pie(emplSortedGye[1:6],col = distinctColorPalette(6))
pie(emplSortedGye[2:7],col = distinctColorPalette(6))
#Quito
quito=datos[datos$Localizacion=="Quito",]
emplSortedQto=sort(table(quito$Empleador),decreasing = TRUE)
pie(emplSortedQto[1:6],col = distinctColorPalette(6))
pie(emplSortedQto[2:7],col = distinctColorPalette(6))
#Tiempo de relevancia de una oferta
t=sort(table(datos$Fecha),decreasing = TRUE)
plot(t[1:10],col=distinctColorPalette(10),type = "h")
library (ggplot2)
#cuales son los empleos menos solicitados?
empleos <- tolower( gsub("([A-Za-z]+).*", "\\1", datos$Trabajo))
#empleos <- empleos[ -c("graduado", "ingenier", "hca", "importante", "key","l", "lic", "liner", "polifuncional", "portoviejo", "profesionales", "programa", "project", "abogadoa", "necesito", "odont", "php" ,"matriceroa", "multinacional", "bluecard", "ec", "equipamiento", "especialista", "buscamos", "busco", "content", "control"), ]
frecuecias_empleos <- table(empleos)
empleos_ordenados <- sort(frecuecias_empleos,decreasing=FALSE)
#barplot(empleos_ordenados[1:20],legend=TRUE, col = distinctColorPalette(10))
dotchart(as.numeric(empleos_ordenados[1:15]),
labels = NULL, groups = NULL,
gcolor = par("fg"),
color = par("fg"),
main = "empleos menos solicitados",
xlab = "frecuecias"
)
#�Qu� cantones tiene menor oferta laboral y cuales son esos empleos?
cantones <- tolower( gsub("([A-Za-z]+).*", "\\1", datos$Localizacion))
frecuecias_cantones <- table(cantones)
cantones_ordenados <- sort(frecuecias_cantones,decreasing=FALSE)
barplot(cantones_ordenados[1:15],legend=TRUE, col = distinctColorPalette(10))
#empleos menos solicitados en ARENILLAS, MACHACHI, OTAVALO
datos[grep("Arenillas", datos$Localizacion),][1]
datos[grep("Machachi", datos$Localizacion),][1]
datos[grep("Otavalo", datos$Localizacion),][1]
install.packages("dplyr")
install.packages("tidyr")
install.packages("readxl")
install.packages("ggplot2")
library (ggplot2)
library (tidyr)
#Cual es la provincia con mayor oferta laboral en Ecuador?
data <- read.csv('../-Ecuador.csv') #cojer el csv y leerlo
#Conocer la frecuencia con la que se repite cada localizacion
tabla_lugar <- table(data$Localizacion)
write.table(tabla_lugar, 'OfertasLaborables.txt')
localizaciones <- read.table('OfertasLaborables.txt')
localizaciones <- as.data.frame.matrix(localizaciones)
names(localizaciones) <- c("Localizacion", "Oferta Laboral")
localizaciones <- localizaciones[localizaciones$`Oferta Laboral`>20, ]
x <- localizaciones[1]
x <- x$Localizacion
y <- localizaciones[2]
y <- y$`Oferta Laboral`
#Grafica de las Localizaciones que tienen mas de 20 ofertas laborales
barplot(y, main="Localizaciones vs Ofertas Laborables", xlab="Localizacion", ylab="#Ofertas Laborables", names.arg = x, border="blue", density=c(10,20,30,40,50))
#cuales carreras son las mas buscadas en cada provincia?
trabajos <- data[,1]
#se almacena cada trabajo en v
v <- c()
i <- 1
for (t in trabajos) {
trabajo <- as.list(strsplit(t, " ")[[1]])
trabajo <- toupper(trabajo)
v[i] <- trabajo
i <- i+1
}
#se quitan caracteres como "/" de algunos string y se los almacena en el vector "vector_empleo"
i <- 1
vector_empleo <- c()
for (t in v) {
trabajo <- as.list(strsplit(t, "/")[[1]])
vector_empleo[i] <- trabajo[1]
i <- i+1
}
data$Empleo = vector_empleo
data <- data[,-1]
data_EmpleoxLugar <- data[,2:4]
data_EmpleoxLugar <- data_EmpleoxLugar[,-2]
EmpleoxLugar <- unite(data_EmpleoxLugar, Empleo_Lugar, c(1:2), sep=",")
tabla_TrabajoxLugar <- table(EmpleoxLugar$Empleo_Lugar)
write.table(tabla_TrabajoxLugar, "TrabajoxLugar.txt")
TrabajoxLugar <- read.table("TrabajoxLugar.txt")
TrabajoxLugar <- as.data.frame.matrix(TrabajoxLugar)
#se separa la columna Var1 en dos
data_TrabajoxLugar <- within(data=TrabajoxLugar, Position<-data.frame(do.call('rbind',strsplit(as.character(Var1),",",fixed=TRUE))))
data_TrabajoxLugar <- data_TrabajoxLugar[,-1]
write.csv(data_TrabajoxLugar, "TrabajoxLugar.csv")
data_TrabajoxLugar <- read.csv("TrabajoxLugar.csv")
data_TrabajoxLugar <- data_TrabajoxLugar[,-1]
names(data_TrabajoxLugar) <- c("Cantidad de Empleos", "Localizacion","Tipo de Trabajo")
#ordenar por tipo de trabajo
data_TrabajoxLugar <- data_TrabajoxLugar[with(data_TrabajoxLugar, order(data_TrabajoxLugar$`Tipo de Trabajo`)), ]
#Se quitan las filas que no se utilizaran
data_TrabajoxLugar <- data_TrabajoxLugar[-2,]
data_TrabajoxLugar <- data_TrabajoxLugar[data_TrabajoxLugar$`Cantidad de Empleos`>=10,] #se escogen las localizaciones que registran mas de 10 empleos
data_TrabajoxLugar <- data_TrabajoxLugar[-29,]
data_TrabajoxLugar <- data_TrabajoxLugar[-28,]
data_TrabajoxLugar <- data_TrabajoxLugar[-18,]
data_TrabajoxLugar <- data_TrabajoxLugar[-27,]
#Grafica de la Cantidad de empleos en cada Localizacion
ggplot(data=data_TrabajoxLugar, aes(x=`Tipo de Trabajo`, y=`Cantidad de Empleos`, fill=Localizacion)) + geom_bar(stat="identity")
#Empresas que ofrecen mas tipos de empleos
View(data)
empleadores_empleos <- data[,-2]
empleadores_empleos <- empleadores_empleos[,-2]
empleadores_empleos <- unite(empleadores_empleos, Empleador_Empleo, c(1:2), sep=",")
tabla_empleadores_empleos <- table(empleadores_empleos $Empleador_Empleo)
write.table(tabla_empleadores_empleos, "EmpleadorXempleo.txt")
tabla_empleadores_empleos <- read.table("EmpleadorXempleo.txt")
empleadores_empleos <- as.data.frame.matrix(tabla_empleadores_empleos)
empleadores_empleos <- within(data=empleadores_empleos, Position<-data.frame(do.call('rbind',strsplit(as.character(Var1),",",fixed=TRUE))))
write.csv(empleadores_empleos, "empleadorXempleo.csv")
empleadores_empleos <- read.csv("empleadorXempleo.csv")
empleadores_empleos <- empleadores_empleos[,-(1:2)]
empleadores_empleos <- empleadores_empleos[,-(4:5)]
names(empleadores_empleos) <- c("Cantidad de Empleos", "Empleador","Tipo de Empleo")
#Se escogen las empresas que ofrecen mas de 10 empleos
empleadores_empleos <- empleadores_empleos[empleadores_empleos$`Cantidad de Empleos`>=10,]
empleadores_empleos <- empleadores_empleos [empleadores_empleos $Empleador!="N/A",]
empleadores_empleos <- empleadores_empleos [empleadores_empleos $`Tipo de Empleo`!="SE",]
empleadores_empleos <- empleadores_empleos[-4,]
mosaicplot(Empleador ~ `Tipo de Empleo`,data=empleadores_empleos ,color=c("#99cc99", "#cc9999", "#9999cc", "#9c9c9c", "lightskyblue2", "tomato"), las=1, main = " ")
#Empleos menos solicitados en base a fecha de publicacion
empleosXfecha <- data[,-1]
empleosXfecha <- empleosXfecha[,-1]
empleosXfecha[,1] = sapply(empleosXfecha[,1],as.character)
empleosXfecha[,1] = sapply(empleosXfecha[,1],as.numeric)
empleosXfecha[,2] = sapply(empleosXfecha[,2],as.character)
empleosXfecha <- empleosXfecha[empleosXfecha$Fecha>10,]
empleosXfecha <-empleosXfecha [complete.cases(empleosXfecha ),]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="SE",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="FINANCE,",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="RECEPCIONISTA.",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="CONTRATACIÓN...",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="CONTRATACIÓN",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="VACANTE",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="OFERTA",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="EMPLEO",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="URGENTE!!!",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="URGENTE",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="BACHILLER",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="ASISTENTES",]
ggplot(empleosXfecha, aes(x=Fecha,y=Empleo)) + geom_point()
| /R/ScriptLP.R | no_license | frajordo/LP-Proyecto | R | false | false | 8,419 | r | #Proyecto LP
#install.packages("randomcoloR")
library("randomcoloR")
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
datos=read.csv("../-Ecuador.csv")
#que empresas/empleadores ofrecen m�s empleo?
empleadores=table(datos$Empleador)
emplSorted=sort(empleadores,decreasing=TRUE)
#grafica de los 10 empleadores con mas ofertas en todo el ecuador
barplot(emplSorted[1:10],legend=TRUE, col = distinctColorPalette(10))
#grafica de los 10 empleadores con mas ofertas en todo el ecuador sin contar a los N/A
barplot(emplSorted[2:11],legend=TRUE, col = distinctColorPalette(10))
#Guayaquil
guayaquil=datos[datos$Localizacion=="Guayaquil",]
emplSortedGye=sort(table(guayaquil$Empleador),decreasing = TRUE)
pie(emplSortedGye[1:6],col = distinctColorPalette(6))
pie(emplSortedGye[2:7],col = distinctColorPalette(6))
#Quito
quito=datos[datos$Localizacion=="Quito",]
emplSortedQto=sort(table(quito$Empleador),decreasing = TRUE)
pie(emplSortedQto[1:6],col = distinctColorPalette(6))
pie(emplSortedQto[2:7],col = distinctColorPalette(6))
#Tiempo de relevancia de una oferta
t=sort(table(datos$Fecha),decreasing = TRUE)
plot(t[1:10],col=distinctColorPalette(10),type = "h")
library (ggplot2)
#cuales son los empleos menos solicitados?
empleos <- tolower( gsub("([A-Za-z]+).*", "\\1", datos$Trabajo))
#empleos <- empleos[ -c("graduado", "ingenier", "hca", "importante", "key","l", "lic", "liner", "polifuncional", "portoviejo", "profesionales", "programa", "project", "abogadoa", "necesito", "odont", "php" ,"matriceroa", "multinacional", "bluecard", "ec", "equipamiento", "especialista", "buscamos", "busco", "content", "control"), ]
frecuecias_empleos <- table(empleos)
empleos_ordenados <- sort(frecuecias_empleos,decreasing=FALSE)
#barplot(empleos_ordenados[1:20],legend=TRUE, col = distinctColorPalette(10))
dotchart(as.numeric(empleos_ordenados[1:15]),
labels = NULL, groups = NULL,
gcolor = par("fg"),
color = par("fg"),
main = "empleos menos solicitados",
xlab = "frecuecias"
)
#�Qu� cantones tiene menor oferta laboral y cuales son esos empleos?
cantones <- tolower( gsub("([A-Za-z]+).*", "\\1", datos$Localizacion))
frecuecias_cantones <- table(cantones)
cantones_ordenados <- sort(frecuecias_cantones,decreasing=FALSE)
barplot(cantones_ordenados[1:15],legend=TRUE, col = distinctColorPalette(10))
#empleos menos solicitados en ARENILLAS, MACHACHI, OTAVALO
datos[grep("Arenillas", datos$Localizacion),][1]
datos[grep("Machachi", datos$Localizacion),][1]
datos[grep("Otavalo", datos$Localizacion),][1]
install.packages("dplyr")
install.packages("tidyr")
install.packages("readxl")
install.packages("ggplot2")
library (ggplot2)
library (tidyr)
#Cual es la provincia con mayor oferta laboral en Ecuador?
data <- read.csv('../-Ecuador.csv') #cojer el csv y leerlo
#Conocer la frecuencia con la que se repite cada localizacion
tabla_lugar <- table(data$Localizacion)
write.table(tabla_lugar, 'OfertasLaborables.txt')
localizaciones <- read.table('OfertasLaborables.txt')
localizaciones <- as.data.frame.matrix(localizaciones)
names(localizaciones) <- c("Localizacion", "Oferta Laboral")
localizaciones <- localizaciones[localizaciones$`Oferta Laboral`>20, ]
x <- localizaciones[1]
x <- x$Localizacion
y <- localizaciones[2]
y <- y$`Oferta Laboral`
#Grafica de las Localizaciones que tienen mas de 20 ofertas laborales
barplot(y, main="Localizaciones vs Ofertas Laborables", xlab="Localizacion", ylab="#Ofertas Laborables", names.arg = x, border="blue", density=c(10,20,30,40,50))
#cuales carreras son las mas buscadas en cada provincia?
trabajos <- data[,1]
#se almacena cada trabajo en v
v <- c()
i <- 1
for (t in trabajos) {
trabajo <- as.list(strsplit(t, " ")[[1]])
trabajo <- toupper(trabajo)
v[i] <- trabajo
i <- i+1
}
#se quitan caracteres como "/" de algunos string y se los almacena en el vector "vector_empleo"
i <- 1
vector_empleo <- c()
for (t in v) {
trabajo <- as.list(strsplit(t, "/")[[1]])
vector_empleo[i] <- trabajo[1]
i <- i+1
}
data$Empleo = vector_empleo
data <- data[,-1]
data_EmpleoxLugar <- data[,2:4]
data_EmpleoxLugar <- data_EmpleoxLugar[,-2]
EmpleoxLugar <- unite(data_EmpleoxLugar, Empleo_Lugar, c(1:2), sep=",")
tabla_TrabajoxLugar <- table(EmpleoxLugar$Empleo_Lugar)
write.table(tabla_TrabajoxLugar, "TrabajoxLugar.txt")
TrabajoxLugar <- read.table("TrabajoxLugar.txt")
TrabajoxLugar <- as.data.frame.matrix(TrabajoxLugar)
#se separa la columna Var1 en dos
data_TrabajoxLugar <- within(data=TrabajoxLugar, Position<-data.frame(do.call('rbind',strsplit(as.character(Var1),",",fixed=TRUE))))
data_TrabajoxLugar <- data_TrabajoxLugar[,-1]
write.csv(data_TrabajoxLugar, "TrabajoxLugar.csv")
data_TrabajoxLugar <- read.csv("TrabajoxLugar.csv")
data_TrabajoxLugar <- data_TrabajoxLugar[,-1]
names(data_TrabajoxLugar) <- c("Cantidad de Empleos", "Localizacion","Tipo de Trabajo")
#ordenar por tipo de trabajo
data_TrabajoxLugar <- data_TrabajoxLugar[with(data_TrabajoxLugar, order(data_TrabajoxLugar$`Tipo de Trabajo`)), ]
#Se quitan las filas que no se utilizaran
data_TrabajoxLugar <- data_TrabajoxLugar[-2,]
data_TrabajoxLugar <- data_TrabajoxLugar[data_TrabajoxLugar$`Cantidad de Empleos`>=10,] #se escogen las localizaciones que registran mas de 10 empleos
data_TrabajoxLugar <- data_TrabajoxLugar[-29,]
data_TrabajoxLugar <- data_TrabajoxLugar[-28,]
data_TrabajoxLugar <- data_TrabajoxLugar[-18,]
data_TrabajoxLugar <- data_TrabajoxLugar[-27,]
#Grafica de la Cantidad de empleos en cada Localizacion
ggplot(data=data_TrabajoxLugar, aes(x=`Tipo de Trabajo`, y=`Cantidad de Empleos`, fill=Localizacion)) + geom_bar(stat="identity")
#Empresas que ofrecen mas tipos de empleos
View(data)
empleadores_empleos <- data[,-2]
empleadores_empleos <- empleadores_empleos[,-2]
empleadores_empleos <- unite(empleadores_empleos, Empleador_Empleo, c(1:2), sep=",")
tabla_empleadores_empleos <- table(empleadores_empleos $Empleador_Empleo)
write.table(tabla_empleadores_empleos, "EmpleadorXempleo.txt")
tabla_empleadores_empleos <- read.table("EmpleadorXempleo.txt")
empleadores_empleos <- as.data.frame.matrix(tabla_empleadores_empleos)
empleadores_empleos <- within(data=empleadores_empleos, Position<-data.frame(do.call('rbind',strsplit(as.character(Var1),",",fixed=TRUE))))
write.csv(empleadores_empleos, "empleadorXempleo.csv")
empleadores_empleos <- read.csv("empleadorXempleo.csv")
empleadores_empleos <- empleadores_empleos[,-(1:2)]
empleadores_empleos <- empleadores_empleos[,-(4:5)]
names(empleadores_empleos) <- c("Cantidad de Empleos", "Empleador","Tipo de Empleo")
#Se escogen las empresas que ofrecen mas de 10 empleos
empleadores_empleos <- empleadores_empleos[empleadores_empleos$`Cantidad de Empleos`>=10,]
empleadores_empleos <- empleadores_empleos [empleadores_empleos $Empleador!="N/A",]
empleadores_empleos <- empleadores_empleos [empleadores_empleos $`Tipo de Empleo`!="SE",]
empleadores_empleos <- empleadores_empleos[-4,]
mosaicplot(Empleador ~ `Tipo de Empleo`,data=empleadores_empleos ,color=c("#99cc99", "#cc9999", "#9999cc", "#9c9c9c", "lightskyblue2", "tomato"), las=1, main = " ")
#Empleos menos solicitados en base a fecha de publicacion
empleosXfecha <- data[,-1]
empleosXfecha <- empleosXfecha[,-1]
empleosXfecha[,1] = sapply(empleosXfecha[,1],as.character)
empleosXfecha[,1] = sapply(empleosXfecha[,1],as.numeric)
empleosXfecha[,2] = sapply(empleosXfecha[,2],as.character)
empleosXfecha <- empleosXfecha[empleosXfecha$Fecha>10,]
empleosXfecha <-empleosXfecha [complete.cases(empleosXfecha ),]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="SE",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="FINANCE,",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="RECEPCIONISTA.",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="CONTRATACIÓN...",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="CONTRATACIÓN",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="VACANTE",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="OFERTA",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="EMPLEO",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="URGENTE!!!",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="URGENTE",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="BACHILLER",]
empleosXfecha <- empleosXfecha[empleosXfecha$Empleo!="ASISTENTES",]
ggplot(empleosXfecha, aes(x=Fecha,y=Empleo)) + geom_point()
|
#setwd("~/CapstoneClass/ShinyApp")
library(tm)
library(stringr)
library(RWeka)
library(qdap)
library(shiny)
#library(rsconnect)
#library(shinyapps)
#deployApp(appName="DataScienceCapstoneProject")
#,appDir = getwd()
options(digits=4)
options(mc.cores=1)
#Bring in the Trigram information
trigramDF<-read.csv("trigramDF.csv")[,-1]
#trigramDF<-read.csv("https://dl.dropboxusercontent.com/u/81409190/CapstoneShinyData/trigramDF.csv")[,-1]
#Create trigam model with bigrams and unigrams for look up
bigram<-sub(" ","@@@@",trigramDF$tri)
bigram<-sub(" .*","",bigram)
trigramDF$bi<-sub("@@@@"," ",bigram)
rm(bigram)
trigramDF$uni<-sub(".* ","",trigramDF$bi)
trigramDF$w3<-sub(".* ","",trigramDF$tri)
#Bring in the Good-Turing Model.
GTCount<-(read.csv("GTCount.csv")[,-1])
#GTCount<-(read.csv("https://dl.dropboxusercontent.com/u/81409190/CapstoneShinyData/GTCount.csv")[,-1])
#bring in the Corpus for the document term matrix used in the Freq function
masterData<-readLines("master.txt",encoding="UTF-8")
#masterData<-readLines("https://dl.dropboxusercontent.com/u/81409190/CapstoneShinyData/master.txt",encoding="UTF-8")
masterCorpus<-VCorpus(VectorSource(masterData),
readerControl=list(language="en"))
rm(masterData)
docs<-tm_map(masterCorpus,PlainTextDocument)
tdm<-DocumentTermMatrix(masterCorpus)
rm(docs,masterCorpus)
#Clean up the user input
cleanInput<-function(word) {
word<-tolower(word)
word<-gsub("[^[:alnum:][:space:]\']","",word)
word<-gsub("^[ ]{1,10}","",word)
word<-gsub("[ ]{2,10}"," ",word)
return(word)
}
#Extract last two words from the input
lastTwo<-function(phrase) {
space<-max(gregexpr(pattern =" ",phrase)[[1]])
substring(phrase,space)<-"}"
phrase<-tail(strsplit(phrase,split=" ")[[1]],1)
phrase<-sub("}"," ",phrase)
return(phrase)
}
# Function that matches the first part of the input with the documen term matrix
# to, hopfully, add some accuracy.
freq<-function(phrase) {
freq<-findAssocs(tdm, rm_stopwords(strip(phrase), tm::stopwords("english"))[[1]], corlimit=0.01)
freqM<-data.frame(unlist(freq))
rm(freq)
freqM$comb<-row.names(freqM)
freqM$word<-substr(freqM$comb,1,gregexpr(pattern ="[.]",freqM$comb)[[1]][[1]]-1)
freqM$match<-substr(freqM$comb,gregexpr(pattern ="[.]",freqM$comb)[[1]][[1]]+1,
length(freqM$comb))
row.names(freqM)<-1:nrow(freqM)
names(freqM)<-c("corr","comb","word","w3")
freqM$corr<-as.numeric(freqM$corr)
freqDF<-data.frame(tapply(freqM$corr,freqM$w3,sum))
rm(freqM)
freqDF$w3<-row.names(freqDF)
names(freqDF)<-c("corr","w3")
freqDF<-freqDF[order(-freqDF$corr),]
freqDF$corr<-freqDF$corr*100
row.names(freqDF)<-1:nrow(freqDF)
return(freqDF)
}
# Create a table of output with the top [cluster] results
buildTable2<-function(subTri,cluster=7) {
rows<-nrow(subTri)
useTri<-subTri
useTri$rnk<-1
rowU<-1
if(rows==1){
#useTri<-subTri
return(useTri[,1:2])
} else {
for (i in 1:(rows-1)){
if ((useTri[i,2]-useTri[i+1,2])>.00001) {
(useTri[i+1,3]<-useTri[i,3]+1)
} else {
useTri[i+1,3]<-useTri[i,3]
}
}
useTri<-useTri[which(useTri$rnk<=cluster),]
return(useTri[,1:2])
}
}
# Main prediction function
predict<-function(input,answer,cluster=7,freqOp=FALSE){# his has cluster=7
orig.input<-input
oirg.answer<-answer
input<-cleanInput(input)
input<-lastTwo(input)
answer<-cleanInput(answer)
inputSize<-length(strsplit(input, " ")[[1]])
if (inputSize!=2) stop("There needs to be at least two words")
nCount<-sum(trigramDF[which(trigramDF$bi==input),1])
if(nCount==0) {
input<-gsub(".* ","",input)
nCount<-sum(trigramDF[which(trigramDF$uni==input),1])
if(nCount==0) stop ("This phrase goes beyond my ability to build an\n",
"algorithm to predict. Don't be proud you stumped me,\n",
"it wasn't that hard.")
#subset all the bigrams taht begin with the unigram
seekTri<-grepl(paste("^",input,"$",sep=""),trigramDF$uni)
subTri<-trigramDF[seekTri,]
subTri<-aggregate(subTri$Cnt,list(subTri$w3),sum)
names(subTri)<-c("w3","Cnt")
# Put in the Freq table stuff here.
if (freqOp==TRUE) {
freqDF<-freq(orig.input)
combine<-merge(freqDF,subTri)
combine$NewCnt<-combine$Cnt+combine$corr
combine<-combine[order(-combine$NewCnt),]
subTri<-combine[,c(1,4)]
names(subTri)<-c("w3","Cnt")
row.names(subTri)<-1:nrow(subTri)
}
subTri<-subTri[order(subTri$Cnt,decreasing=T),]
useTri<-buildTable2 (subTri)
for (i in 1:length(useTri$Cnt)){
count=useTri[i,2]
if(count<=5) {
useTri[i,2]<-GTCount[count+1,2]
}
}
} else {
# use all recorded trigrams taht begin with bigram
seekTri<-grepl(paste("^",input,"$",sep=""),trigramDF$bi)
subTri<-trigramDF[seekTri,]
subTri<-aggregate(subTri$Cnt,list(subTri$w3),sum)
names(subTri)<-c("w3","Cnt")
if (freqOp==TRUE) {
freqDF<-freq(orig.input)
combine<-merge(freqDF,subTri)
combine$NewCnt<-combine$Cnt+combine$corr
combine<-combine[order(-combine$NewCnt),]
subTri<-combine[,c(1,4)]
names(subTri)<-c("w3","Cnt")
row.names(subTri)<-1:nrow(subTri)
}
subTri<-subTri[order(-subTri$Cnt),]
useTri<-buildTable2(subTri)
for (i in 1:length(useTri$Cnt)) {
count=useTri[i,2]
if(count<=5) {
useTri[i,2]<-GTCount[count+1,4]
}
}
}
options(digits=4)
predictWord<-data.frame(Word=useTri$w3,
probability=(useTri$Cnt/nCount)*100,stringsAsFactors=FALSE)
print(paste("Words that might complete your phrase: ",orig.input))
if (answer %in% predictWord$Word) {
print(paste("Your answer [",oirg.answer,"] is in row "
,grep(answer,predictWord$Word)
," in the list."))
} else {""}
print(predictWord)
if (answer %in% predictWord$Word) {
print("Hey look I got it right!")
} else {cat("OK, you stumped my algorithm. \nDon't be too proud, it wasn't that hard to do.")}
# return(predictWord)
}
#source('server.R', local=TRUE)
shinyServer(
function(input, output) {
output$PredictFUN<-renderPrint({
predict(input$input,input$answer,freqOp=input$freq)
withProgress(message="Hold on, it's thinking",
min=1, max=15,
value = 0,
{n<-if(input$freq==FALSE){5} else {100}
for (i in 1:n) {
incProgress(1/n)
Sys.sleep(0.25)
}
})
})
output$trigramPlot<-renderPlot({
barplot(trigramDF[grepl(lastTwo(input$input),trigramDF$bi),1],
main="Counts of trigrams that match your phrase",
col=rainbow(nlevels(as.factor(trigramDF[grepl(lastTwo(input$input),trigramDF$bi),2]
))),horiz=TRUE,xlab="Counts",cex.names=.75,las=2,names.arg=(trigramDF[grepl(lastTwo(input$input),trigramDF$bi),2]))
})
}
)
| /server.R | no_license | Creisi/DataScienceCapstoneProject | R | false | false | 7,324 | r | #setwd("~/CapstoneClass/ShinyApp")
library(tm)
library(stringr)
library(RWeka)
library(qdap)
library(shiny)
#library(rsconnect)
#library(shinyapps)
#deployApp(appName="DataScienceCapstoneProject")
#,appDir = getwd()
options(digits=4)
options(mc.cores=1)
#Bring in the Trigram information
trigramDF<-read.csv("trigramDF.csv")[,-1]
#trigramDF<-read.csv("https://dl.dropboxusercontent.com/u/81409190/CapstoneShinyData/trigramDF.csv")[,-1]
#Create trigam model with bigrams and unigrams for look up
bigram<-sub(" ","@@@@",trigramDF$tri)
bigram<-sub(" .*","",bigram)
trigramDF$bi<-sub("@@@@"," ",bigram)
rm(bigram)
trigramDF$uni<-sub(".* ","",trigramDF$bi)
trigramDF$w3<-sub(".* ","",trigramDF$tri)
#Bring in the Good-Turing Model.
GTCount<-(read.csv("GTCount.csv")[,-1])
#GTCount<-(read.csv("https://dl.dropboxusercontent.com/u/81409190/CapstoneShinyData/GTCount.csv")[,-1])
#bring in the Corpus for the document term matrix used in the Freq function
masterData<-readLines("master.txt",encoding="UTF-8")
#masterData<-readLines("https://dl.dropboxusercontent.com/u/81409190/CapstoneShinyData/master.txt",encoding="UTF-8")
masterCorpus<-VCorpus(VectorSource(masterData),
readerControl=list(language="en"))
rm(masterData)
docs<-tm_map(masterCorpus,PlainTextDocument)
tdm<-DocumentTermMatrix(masterCorpus)
rm(docs,masterCorpus)
#Clean up the user input
cleanInput<-function(word) {
word<-tolower(word)
word<-gsub("[^[:alnum:][:space:]\']","",word)
word<-gsub("^[ ]{1,10}","",word)
word<-gsub("[ ]{2,10}"," ",word)
return(word)
}
#Extract last two words from the input
lastTwo<-function(phrase) {
space<-max(gregexpr(pattern =" ",phrase)[[1]])
substring(phrase,space)<-"}"
phrase<-tail(strsplit(phrase,split=" ")[[1]],1)
phrase<-sub("}"," ",phrase)
return(phrase)
}
# Function that matches the first part of the input with the documen term matrix
# to, hopfully, add some accuracy.
freq<-function(phrase) {
freq<-findAssocs(tdm, rm_stopwords(strip(phrase), tm::stopwords("english"))[[1]], corlimit=0.01)
freqM<-data.frame(unlist(freq))
rm(freq)
freqM$comb<-row.names(freqM)
freqM$word<-substr(freqM$comb,1,gregexpr(pattern ="[.]",freqM$comb)[[1]][[1]]-1)
freqM$match<-substr(freqM$comb,gregexpr(pattern ="[.]",freqM$comb)[[1]][[1]]+1,
length(freqM$comb))
row.names(freqM)<-1:nrow(freqM)
names(freqM)<-c("corr","comb","word","w3")
freqM$corr<-as.numeric(freqM$corr)
freqDF<-data.frame(tapply(freqM$corr,freqM$w3,sum))
rm(freqM)
freqDF$w3<-row.names(freqDF)
names(freqDF)<-c("corr","w3")
freqDF<-freqDF[order(-freqDF$corr),]
freqDF$corr<-freqDF$corr*100
row.names(freqDF)<-1:nrow(freqDF)
return(freqDF)
}
# Create a table of output with the top [cluster] results
buildTable2<-function(subTri,cluster=7) {
rows<-nrow(subTri)
useTri<-subTri
useTri$rnk<-1
rowU<-1
if(rows==1){
#useTri<-subTri
return(useTri[,1:2])
} else {
for (i in 1:(rows-1)){
if ((useTri[i,2]-useTri[i+1,2])>.00001) {
(useTri[i+1,3]<-useTri[i,3]+1)
} else {
useTri[i+1,3]<-useTri[i,3]
}
}
useTri<-useTri[which(useTri$rnk<=cluster),]
return(useTri[,1:2])
}
}
# Main prediction function
predict<-function(input,answer,cluster=7,freqOp=FALSE){# his has cluster=7
orig.input<-input
oirg.answer<-answer
input<-cleanInput(input)
input<-lastTwo(input)
answer<-cleanInput(answer)
inputSize<-length(strsplit(input, " ")[[1]])
if (inputSize!=2) stop("There needs to be at least two words")
nCount<-sum(trigramDF[which(trigramDF$bi==input),1])
if(nCount==0) {
input<-gsub(".* ","",input)
nCount<-sum(trigramDF[which(trigramDF$uni==input),1])
if(nCount==0) stop ("This phrase goes beyond my ability to build an\n",
"algorithm to predict. Don't be proud you stumped me,\n",
"it wasn't that hard.")
#subset all the bigrams taht begin with the unigram
seekTri<-grepl(paste("^",input,"$",sep=""),trigramDF$uni)
subTri<-trigramDF[seekTri,]
subTri<-aggregate(subTri$Cnt,list(subTri$w3),sum)
names(subTri)<-c("w3","Cnt")
# Put in the Freq table stuff here.
if (freqOp==TRUE) {
freqDF<-freq(orig.input)
combine<-merge(freqDF,subTri)
combine$NewCnt<-combine$Cnt+combine$corr
combine<-combine[order(-combine$NewCnt),]
subTri<-combine[,c(1,4)]
names(subTri)<-c("w3","Cnt")
row.names(subTri)<-1:nrow(subTri)
}
subTri<-subTri[order(subTri$Cnt,decreasing=T),]
useTri<-buildTable2 (subTri)
for (i in 1:length(useTri$Cnt)){
count=useTri[i,2]
if(count<=5) {
useTri[i,2]<-GTCount[count+1,2]
}
}
} else {
# use all recorded trigrams taht begin with bigram
seekTri<-grepl(paste("^",input,"$",sep=""),trigramDF$bi)
subTri<-trigramDF[seekTri,]
subTri<-aggregate(subTri$Cnt,list(subTri$w3),sum)
names(subTri)<-c("w3","Cnt")
if (freqOp==TRUE) {
freqDF<-freq(orig.input)
combine<-merge(freqDF,subTri)
combine$NewCnt<-combine$Cnt+combine$corr
combine<-combine[order(-combine$NewCnt),]
subTri<-combine[,c(1,4)]
names(subTri)<-c("w3","Cnt")
row.names(subTri)<-1:nrow(subTri)
}
subTri<-subTri[order(-subTri$Cnt),]
useTri<-buildTable2(subTri)
for (i in 1:length(useTri$Cnt)) {
count=useTri[i,2]
if(count<=5) {
useTri[i,2]<-GTCount[count+1,4]
}
}
}
options(digits=4)
predictWord<-data.frame(Word=useTri$w3,
probability=(useTri$Cnt/nCount)*100,stringsAsFactors=FALSE)
print(paste("Words that might complete your phrase: ",orig.input))
if (answer %in% predictWord$Word) {
print(paste("Your answer [",oirg.answer,"] is in row "
,grep(answer,predictWord$Word)
," in the list."))
} else {""}
print(predictWord)
if (answer %in% predictWord$Word) {
print("Hey look I got it right!")
} else {cat("OK, you stumped my algorithm. \nDon't be too proud, it wasn't that hard to do.")}
# return(predictWord)
}
#source('server.R', local=TRUE)
shinyServer(
function(input, output) {
output$PredictFUN<-renderPrint({
predict(input$input,input$answer,freqOp=input$freq)
withProgress(message="Hold on, it's thinking",
min=1, max=15,
value = 0,
{n<-if(input$freq==FALSE){5} else {100}
for (i in 1:n) {
incProgress(1/n)
Sys.sleep(0.25)
}
})
})
output$trigramPlot<-renderPlot({
barplot(trigramDF[grepl(lastTwo(input$input),trigramDF$bi),1],
main="Counts of trigrams that match your phrase",
col=rainbow(nlevels(as.factor(trigramDF[grepl(lastTwo(input$input),trigramDF$bi),2]
))),horiz=TRUE,xlab="Counts",cex.names=.75,las=2,names.arg=(trigramDF[grepl(lastTwo(input$input),trigramDF$bi),2]))
})
}
)
|
#' Start/Stop verbose output
#'
#' A verbose connection provides much more information about the flow of
#' information between the client and server. `hx_verbose()` differs
#' from [`httr::verbose()`] because it prints during `httrex` / `reprex`
#' rendering.
#'
#' @section Overriding during `httrex` rendering:
#' `httrex` allows for overriding options per knitr chunk using chunk options.
#' During a `httrex()` call the knitr options can be set by starting a line with
#' `#+` and then setting the options to R values.
#'
#' The available options are:
#' * If the chunk label is "setup", no verbose output is provided. This allows
#' sensitive details to be kept confidential, such as a password when logging in.
#' * `hx.data_in`, `hx.data_out`, `hx.info`, `hx.ssl` override the arguments to
#' `verbose()`.
#'
#' Example code that could be sent to `httrex()`:
#' ```
#' #+ setup # makes a chunk with label "setup" so there's no verbose output
#' library(crunch)
#' login()
#' #+ # Nothing after `#+` returns to defaults
#' listDatasets()
#' #+ hx.data_out=TRUE # Override so that `data_out` is printed
#' ```
#'
#' @section Prefixes:
#'
#' `verbose()` uses the following prefixes to distinguish between
#' different components of the http messages:
#'
#' * `*` informative curl messages
#' * `->` headers sent (out)
#' * `>>` data sent (out)
#' * `*>` ssl data sent (out)
#' * `<-` headers received (in)
#' * `<<` data received (in)
#' * `<*` ssl data received (in)
#'
#' @param data_out Show data sent to server
#' @param data_in Show data received from server
#' @param info Show informational text from curl. This is mainly useful for debugging
#' https and auth problems, so is disabled by default
#' @param ssl Show even data sent/recieved over SSL connections?
#'
#' @export
hx_set_verbose <- function(
data_out = TRUE,
data_in = TRUE,
info = FALSE,
ssl = FALSE
) {
formatting <- inline_format
formatting$initialize()
httr::set_config(hx_verbose(formatting, data_out, data_in, info, ssl))
options(httrex_finalize = formatting$finalize)
}
#' @rdname hx_set_verbose
#' @export
hx_stop_verbose <- function() {
httr::set_config(httr::config(debugfunction = NULL, verbose = FALSE))
options("httrex_finalize")[[1]]()
options(httrex_finalize = NULL)
}
hx_verbose <- function (
formatting,
data_out = TRUE,
data_in = TRUE,
info = FALSE,
ssl = FALSE
) {
httr::config(
debugfunction = httrex_new_debug(formatting, data_out, data_in, info, ssl),
verbose = TRUE
)
}
httrex_new_debug <- function(
formatting,
data_out = TRUE,
data_in = FALSE,
info = FALSE,
ssl = FALSE
) {
function(type, msg) {
if ((knitr::opts_current$get("label") %||% "") == "setup") return()
data_out <- knitr::opts_current$get("hx.data_out") %||% data_out
data_in <- knitr::opts_current$get("hx.data_in") %||% data_in
info <- knitr::opts_current$get("hx.info") %||% info
ssl <- knitr::opts_current$get("hx.ssl") %||% ssl
type_string <- htypes(type)
if (type_string == "info" && !info) return()
if (type_string == "dataIn" && !data_in) return()
if (type_string == "dataOut" && !data_out) return()
if (type_string == "sslDataIn" && (!ssl || !data_in)) return()
if (type_string == "sslDataOut" && (!ssl || !data_out)) return()
if (type_string %in% c("dataIn")) {
suppressWarnings(msg <- memDecompress(msg))
}
msg <- readBin(msg, character())
msg <- gsub("\\r?\\n\\r?", "\n", msg) # standardize new line format to \n
msg <- formatting$redact_messages(type, msg)
msg <- formatting$format_messages(type, msg)
formatting$accumulate_messages(msg)
}
}
| /R/verbose.R | permissive | gergness/httrex | R | false | false | 3,858 | r | #' Start/Stop verbose output
#'
#' A verbose connection provides much more information about the flow of
#' information between the client and server. `hx_verbose()` differs
#' from [`httr::verbose()`] because it prints during `httrex` / `reprex`
#' rendering.
#'
#' @section Overriding during `httrex` rendering:
#' `httrex` allows for overriding options per knitr chunk using chunk options.
#' During a `httrex()` call the knitr options can be set by starting a line with
#' `#+` and then setting the options to R values.
#'
#' The available options are:
#' * If the chunk label is "setup", no verbose output is provided. This allows
#' sensitive details to be kept confidential, such as a password when logging in.
#' * `hx.data_in`, `hx.data_out`, `hx.info`, `hx.ssl` override the arguments to
#' `verbose()`.
#'
#' Example code that could be sent to `httrex()`:
#' ```
#' #+ setup # makes a chunk with label "setup" so there's no verbose output
#' library(crunch)
#' login()
#' #+ # Nothing after `#+` returns to defaults
#' listDatasets()
#' #+ hx.data_out=TRUE # Override so that `data_out` is printed
#' ```
#'
#' @section Prefixes:
#'
#' `verbose()` uses the following prefixes to distinguish between
#' different components of the http messages:
#'
#' * `*` informative curl messages
#' * `->` headers sent (out)
#' * `>>` data sent (out)
#' * `*>` ssl data sent (out)
#' * `<-` headers received (in)
#' * `<<` data received (in)
#' * `<*` ssl data received (in)
#'
#' @param data_out Show data sent to server
#' @param data_in Show data received from server
#' @param info Show informational text from curl. This is mainly useful for debugging
#' https and auth problems, so is disabled by default
#' @param ssl Show even data sent/recieved over SSL connections?
#'
#' @export
hx_set_verbose <- function(
data_out = TRUE,
data_in = TRUE,
info = FALSE,
ssl = FALSE
) {
formatting <- inline_format
formatting$initialize()
httr::set_config(hx_verbose(formatting, data_out, data_in, info, ssl))
options(httrex_finalize = formatting$finalize)
}
#' @rdname hx_set_verbose
#' @export
hx_stop_verbose <- function() {
httr::set_config(httr::config(debugfunction = NULL, verbose = FALSE))
options("httrex_finalize")[[1]]()
options(httrex_finalize = NULL)
}
hx_verbose <- function (
formatting,
data_out = TRUE,
data_in = TRUE,
info = FALSE,
ssl = FALSE
) {
httr::config(
debugfunction = httrex_new_debug(formatting, data_out, data_in, info, ssl),
verbose = TRUE
)
}
httrex_new_debug <- function(
formatting,
data_out = TRUE,
data_in = FALSE,
info = FALSE,
ssl = FALSE
) {
function(type, msg) {
if ((knitr::opts_current$get("label") %||% "") == "setup") return()
data_out <- knitr::opts_current$get("hx.data_out") %||% data_out
data_in <- knitr::opts_current$get("hx.data_in") %||% data_in
info <- knitr::opts_current$get("hx.info") %||% info
ssl <- knitr::opts_current$get("hx.ssl") %||% ssl
type_string <- htypes(type)
if (type_string == "info" && !info) return()
if (type_string == "dataIn" && !data_in) return()
if (type_string == "dataOut" && !data_out) return()
if (type_string == "sslDataIn" && (!ssl || !data_in)) return()
if (type_string == "sslDataOut" && (!ssl || !data_out)) return()
if (type_string %in% c("dataIn")) {
suppressWarnings(msg <- memDecompress(msg))
}
msg <- readBin(msg, character())
msg <- gsub("\\r?\\n\\r?", "\n", msg) # standardize new line format to \n
msg <- formatting$redact_messages(type, msg)
msg <- formatting$format_messages(type, msg)
formatting$accumulate_messages(msg)
}
}
|
## Given a invertible matrix, the following functions
## will calculate the inverse matrix
## or retrieve the inverse matrix from the cache.
## Function "makeCacheMatrix" creates a special "matrix" object
## that can cache its inverse.
## makeVector contains 4 functions: set, get, setmean, getmean.
# get is a function that returns the vector x stored in the main function.
# set is a function that changes the vector stored in the main function.
# setmean and getmean are functions very similar to set and get.
# They don't calculate the mean, they simply store the value of the input in a variable m
# into the main function makeVector (setmean) and return it (getmean).
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Function "cacheSolve" computes the inverse of the special "matrix"
## (which is the input of cachemean) returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
## If the inverse has not been calculated, data gets the matrix stored with makeCacheMatrix,
## m calculates the inverse, and x$setmean(m) stores it in the object m in makeCacheMatrix.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
# Example
a <- diag(5,3)
a
CachedMarix <- makeCacheMatrix(a)
cacheSolve(CachedMarix)
b <- diag(2,6)
b
CachedMarix <- makeCacheMatrix(b)
cacheSolve(CachedMarix)
cacheSolve(CachedMarix) #getting cached data
| /cachematrix.R | no_license | metalgarurumon/ProgrammingAssignment2 | R | false | false | 1,972 | r | ## Given a invertible matrix, the following functions
## will calculate the inverse matrix
## or retrieve the inverse matrix from the cache.
## Function "makeCacheMatrix" creates a special "matrix" object
## that can cache its inverse.
## makeVector contains 4 functions: set, get, setmean, getmean.
# get is a function that returns the vector x stored in the main function.
# set is a function that changes the vector stored in the main function.
# setmean and getmean are functions very similar to set and get.
# They don't calculate the mean, they simply store the value of the input in a variable m
# into the main function makeVector (setmean) and return it (getmean).
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Function "cacheSolve" computes the inverse of the special "matrix"
## (which is the input of cachemean) returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
## If the inverse has not been calculated, data gets the matrix stored with makeCacheMatrix,
## m calculates the inverse, and x$setmean(m) stores it in the object m in makeCacheMatrix.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
# Example
a <- diag(5,3)
a
CachedMarix <- makeCacheMatrix(a)
cacheSolve(CachedMarix)
b <- diag(2,6)
b
CachedMarix <- makeCacheMatrix(b)
cacheSolve(CachedMarix)
cacheSolve(CachedMarix) #getting cached data
|
library(xtable)
library(mvmeta)
library(mvtnorm)
library(Matrix)
library(speedglm)
library(rjags)
tryCatch.W.E <- function(expr)
{
W <- NULL
w.handler <- function(w){ # warning handler
W <<- w
invokeRestart("muffleWarning")
}
list(value = withCallingHandlers(tryCatch(expr, error = function(e) e),
warning = w.handler),
warning = W)
}
##make ARMA(1,1) correlation matrix
ARMAcor <- function(phi, rho, n)
{
C <- matrix(1, nrow=n, ncol=n)
for(i in 1:n)
{
for(j in 1:n)
{
if(i != j)
{
C[i,j] <- phi*rho^abs(i-j)
}
}
}
C
}
##number of studies
I <- as.numeric(commandArgs()[4])
##number of parameters
p <- as.numeric(commandArgs()[5])
##between-study correlation
corrBtw <- as.numeric(commandArgs()[6])
##between-study variance
varBtw <- as.numeric(commandArgs()[7])
##batch number
batch <- as.numeric(commandArgs()[8])
##number of people in the study
nCases <- 500
nCtrls <- 500
n <- nCases+nCtrls
##
##I <- 10
##p <- 3
##corrBtw <- 0.5
##varBtw <- 1
##batch <- 10
##make between-study var-cov matrix
##Sigma <- matrix(corrBtw, nrow=p, ncol=p)
##diag(Sigma) <- 1
Sigma <- ARMAcor(phi=1, rho=corrBtw, n=p)
Sigma <- Sigma*varBtw
nrSims <- 2500
##nrSims <- 20
setwd("/data/bocasm/Research/Meta\ analysis/code/simulations/cost_of_estimation_AR1")
Beta0 <- -0.125*p
Beta0 <- rep(Beta0, I)
OverallMeans <- rep(0.2, p)
OverallMeansMat <- matrix(OverallMeans, nrow=1)
###################################################
####### Simulate based on this data #######
###################################################
print(date())
set.seed(batch*300+313)
##save prevalences for each site
simPrevSite <- matrix(NA, nrow=nrSims, ncol=I)
#save estimates for the study-specific means
StudyEst <- list()
##save estimates for the within-study variances
StudyEstVars <- list()
##for each site, save whether or not there is a warning for the logistic regression
StudyWarns <- matrix("", nrow=nrSims, ncol=I)
##save estimated Sigma
SigmaEst <- list()
##save estimated Sigma from univariate meta-analysis (so only diagonal entries)
SigmaEstUniv <- list()
##similarly, save estimated Sigmas from the Bayesian analyses
SigmaEstBayes <- SigmaEstUnivBayes <- list()
##save warnings due to mvmeta
MetaWarns <- rep("", nrSims)
##save estimates of mu from Bayesian approach
OverallEstBayes <- OverallEstUnivBayes <- matrix(NA, nrow=nrSims, ncol=p)
print(date())
#get variance-covariance matrices for all site
varCovsSitesAll <- list()
for(site in 1:I)
{
##varCovsSite <- matrix((site-1)/I, p, p)
##diag(varCovsSite) <- rep(1, p)
varCovsSite <- ARMAcor(phi=1, rho=(site-1)/I, n=p)
varCovsSitesAll[[site]] <- 4/n * solve(varCovsSite)
}
for(sim in 1:nrSims)
{
if(sim %% 10 == 0)
{
print(sim)
}
##simulate the study-specific means
studMeans <- rmvnorm(I, mean = OverallMeans,
sigma = Sigma, method="svd")
##for this simulation, get estimated means and variances
estMeansSim <- matrix(rep(0, p),
nrow = I, ncol = p)
estVarsSim <- list()
##save disease status for everyone, to get overall prevalence
diseaseAllSites <- c()
##get site-specific prevalence
prevSite <- rep(0, I)
for(site in 1:I)
{
OverallMeansMat <- studMeans[site, , drop=FALSE]
meansSite <- rep(0, p)
varCovsSite <- varCovsSitesAll[[site]]
##simulate values for the p variables from the multivariate normal for each dataset
##simulate 10 times the population size, since will be doing retrospective sampling
XSimSite <- rmvnorm(n*10, mean = meansSite, sigma = varCovsSite, method="svd")
##get estimated probabilities
logits <- Beta0[site]+.Internal(rowSums(OverallMeansMat[rep(1, nrow(XSimSite)), ] * XSimSite,
n*10, p, TRUE))
expLogits <- exp(logits)
probs <- expLogits/(1+expLogits)
probs <- probs[!is.na(probs)]
##get whether the individual is a case or control
CC <- rbinom(length(probs), 1, probs)
diseaseAllSites <- c(diseaseAllSites, CC)
prevSite[site] <- mean(CC)
##get cases at this site
casesSite <- which(CC == 1)
ctrlsSite <- which(CC == 0)
##sample number of cases and controls from the population generated
casesSite <- sample(casesSite, nCases)
ctrlsSite <- sample(ctrlsSite, nCtrls)
simPrevSite[sim, site] <-
length(casesSite)/(length(casesSite)+length(ctrlsSite))
dataSiteSamp <- cbind(c(rep(1, length(casesSite)),
rep(0, length(ctrlsSite))),
XSimSite[c(casesSite, ctrlsSite), ])
tmpX <- cbind(rep(1, n), dataSiteSamp[, 2:(p+1)])
colnames(tmpX) <- 0:p
##get estimates, while also catching warnings
logistSiteCatchWarn <- tryCatch.W.E(speedglm.wfit(y = dataSiteSamp[, 1],
X = tmpX,
family = binomial(link="logit")))
logistSite <- logistSiteCatchWarn$value
StudyWarns[sim, site] <- paste(logistSiteCatchWarn$warning, collapse="\n")
estMeansSim[site, ] <- logistSite$coefficients[2:(p+1)] ##coefficients(summary(logistSite))[-1, "Estimate"]
estVarsSim[[site]] <- summary(logistSite)$cov.scaled[2:(p+1), 2:(p+1)] ##vcov(logistSite)[-1,-1]
}
##save estimated effect sizes
StudyEst[[sim]] <- estMeansSim
##save estimated within-study variance-covariance matrices
StudyEstVars[[sim]] <- estVarsSim
##get estimates of Sigma, while also catching warnings
SigmaEstCatchWarn <- tryCatch.W.E(mvmeta(StudyEst[[sim]],
StudyEstVars[[sim]],
method="reml")$Psi)
SigmaEst[[sim]] <- SigmaEstCatchWarn$value
MetaWarns[sim] <- paste(SigmaEstCatchWarn$warning, collapse="\n")
##also get estimates of Sigma from univariate meta-analysis
SigmaEstUniv.sim <- rep(NA, p)
for(pp in 1:p)
{
SigmaEstUniv.sim[pp] <- mvmeta(StudyEst[[sim]][,pp],
S =
lapply(StudyEstVars[[sim]],
function(x){x[pp, pp, drop=FALSE]}),
method="reml")$Psi
}
SigmaEstUniv[[sim]] <- SigmaEstUniv.sim
##overall prevalence
prev <- mean(diseaseAllSites)
##now do the Bayesian analysis!
##first get the *precision* matrices and concatenate them!
Tau <- c()
for(i in 1:I)
{
Tau <- rbind(Tau, solve(StudyEstVars[[sim]][[i]]))
}
##also do this in the univariate case! (just take the inverse of the variances of the first component only)
TauUniv <- matrix(NA, ncol=1)
for(i in 1:I)
{
TauUniv[i] <- 1/StudyEstVars[[sim]][[i]][1,1]
}
###starting position for each study in Sigma2
pos <- 1+c(0:(I-1))*p
###ending position for each study in Sigma2
pos2 <- pos+p-1
###Omega2 is a fixed hyperparameter - it's a prior on the between-study *precision matrix*
Omega <- diag(1/runif(p,0,2)^2)
##create model for multivariate meta-analysis
jagsMultiv <- jags.model('example_MVMA.bug',
##'H:/Simina/metaAnalysis/example6.bug.txt',
data = list('p' = p,
'N' = I,
'y'=StudyEst[[sim]],
'pos'=pos,
'sigma'=Tau,
'pos2'=pos2,
'Omega'=Omega),
n.chains = 1,
n.adapt = 1000,
quiet=TRUE)
jagsMultivRes <- jags.samples(jagsMultiv,
c('intau','mu'),
50000,
thin=5)
##stop()
SigmaEstBayes[[sim]] <- solve(summary(jagsMultivRes$intau, FUN="median")[1]$stat)
OverallEstBayes[sim, ] <- summary(jagsMultivRes$mu, FUN="median")[1]$stat
##just save the estimate for the first variance with the univariate model - don't fit it for all p outcomes in this case
jagsUniv <- jags.model('example_UVMA.bug',
data = list('N' = I,
'y'=StudyEst[[sim]][,1],
'pos'=1:I,
'sigma'=TauUniv,
'pos2'=1:I,
'Omega'=Omega[1,1]),
n.chains = 1,
n.adapt = 1000,
quiet=TRUE)
jagsUnivRes <- jags.samples(jagsUniv,
c('intau','mu'),
50000,
thin=5)
SigmaEstUnivBayes[[sim]] <- 1/summary(jagsUnivRes$intau, FUN="median")[1]$stat
OverallEstUnivBayes[sim, 1] <- summary(jagsUnivRes$mu, FUN="median")[1]$stat
if(sim %% 50 == 0)
{
print(prev)
}
}
print(date())
save(list=c("StudyEst", "StudyEstVars",
"SigmaEst", "SigmaEstUniv",
"SigmaEstBayes", "SigmaEstUnivBayes",
"OverallEstBayes", "OverallEstUnivBayes",
"StudyWarns", "MetaWarns"),
file = paste("simResults/cost_of_estimation",
"I", I, "p", p, "corrBtw", corrBtw, "varBtw", varBtw,
"batch", batch,
".RData", sep="_"))
rm(list=ls())
| /make_Figure_S4/simulation_code/cost_of_estimation_code_for_Swarm_panels_ab.R | no_license | SiminaB/MVMA | R | false | false | 9,596 | r | library(xtable)
library(mvmeta)
library(mvtnorm)
library(Matrix)
library(speedglm)
library(rjags)
tryCatch.W.E <- function(expr)
{
W <- NULL
w.handler <- function(w){ # warning handler
W <<- w
invokeRestart("muffleWarning")
}
list(value = withCallingHandlers(tryCatch(expr, error = function(e) e),
warning = w.handler),
warning = W)
}
##make ARMA(1,1) correlation matrix
ARMAcor <- function(phi, rho, n)
{
C <- matrix(1, nrow=n, ncol=n)
for(i in 1:n)
{
for(j in 1:n)
{
if(i != j)
{
C[i,j] <- phi*rho^abs(i-j)
}
}
}
C
}
##number of studies
I <- as.numeric(commandArgs()[4])
##number of parameters
p <- as.numeric(commandArgs()[5])
##between-study correlation
corrBtw <- as.numeric(commandArgs()[6])
##between-study variance
varBtw <- as.numeric(commandArgs()[7])
##batch number
batch <- as.numeric(commandArgs()[8])
##number of people in the study
nCases <- 500
nCtrls <- 500
n <- nCases+nCtrls
##
##I <- 10
##p <- 3
##corrBtw <- 0.5
##varBtw <- 1
##batch <- 10
##make between-study var-cov matrix
##Sigma <- matrix(corrBtw, nrow=p, ncol=p)
##diag(Sigma) <- 1
Sigma <- ARMAcor(phi=1, rho=corrBtw, n=p)
Sigma <- Sigma*varBtw
nrSims <- 2500
##nrSims <- 20
setwd("/data/bocasm/Research/Meta\ analysis/code/simulations/cost_of_estimation_AR1")
Beta0 <- -0.125*p
Beta0 <- rep(Beta0, I)
OverallMeans <- rep(0.2, p)
OverallMeansMat <- matrix(OverallMeans, nrow=1)
###################################################
####### Simulate based on this data #######
###################################################
print(date())
set.seed(batch*300+313)
##save prevalences for each site
simPrevSite <- matrix(NA, nrow=nrSims, ncol=I)
#save estimates for the study-specific means
StudyEst <- list()
##save estimates for the within-study variances
StudyEstVars <- list()
##for each site, save whether or not there is a warning for the logistic regression
StudyWarns <- matrix("", nrow=nrSims, ncol=I)
##save estimated Sigma
SigmaEst <- list()
##save estimated Sigma from univariate meta-analysis (so only diagonal entries)
SigmaEstUniv <- list()
##similarly, save estimated Sigmas from the Bayesian analyses
SigmaEstBayes <- SigmaEstUnivBayes <- list()
##save warnings due to mvmeta
MetaWarns <- rep("", nrSims)
##save estimates of mu from Bayesian approach
OverallEstBayes <- OverallEstUnivBayes <- matrix(NA, nrow=nrSims, ncol=p)
print(date())
#get variance-covariance matrices for all site
varCovsSitesAll <- list()
for(site in 1:I)
{
##varCovsSite <- matrix((site-1)/I, p, p)
##diag(varCovsSite) <- rep(1, p)
varCovsSite <- ARMAcor(phi=1, rho=(site-1)/I, n=p)
varCovsSitesAll[[site]] <- 4/n * solve(varCovsSite)
}
for(sim in 1:nrSims)
{
if(sim %% 10 == 0)
{
print(sim)
}
##simulate the study-specific means
studMeans <- rmvnorm(I, mean = OverallMeans,
sigma = Sigma, method="svd")
##for this simulation, get estimated means and variances
estMeansSim <- matrix(rep(0, p),
nrow = I, ncol = p)
estVarsSim <- list()
##save disease status for everyone, to get overall prevalence
diseaseAllSites <- c()
##get site-specific prevalence
prevSite <- rep(0, I)
for(site in 1:I)
{
OverallMeansMat <- studMeans[site, , drop=FALSE]
meansSite <- rep(0, p)
varCovsSite <- varCovsSitesAll[[site]]
##simulate values for the p variables from the multivariate normal for each dataset
##simulate 10 times the population size, since will be doing retrospective sampling
XSimSite <- rmvnorm(n*10, mean = meansSite, sigma = varCovsSite, method="svd")
##get estimated probabilities
logits <- Beta0[site]+.Internal(rowSums(OverallMeansMat[rep(1, nrow(XSimSite)), ] * XSimSite,
n*10, p, TRUE))
expLogits <- exp(logits)
probs <- expLogits/(1+expLogits)
probs <- probs[!is.na(probs)]
##get whether the individual is a case or control
CC <- rbinom(length(probs), 1, probs)
diseaseAllSites <- c(diseaseAllSites, CC)
prevSite[site] <- mean(CC)
##get cases at this site
casesSite <- which(CC == 1)
ctrlsSite <- which(CC == 0)
##sample number of cases and controls from the population generated
casesSite <- sample(casesSite, nCases)
ctrlsSite <- sample(ctrlsSite, nCtrls)
simPrevSite[sim, site] <-
length(casesSite)/(length(casesSite)+length(ctrlsSite))
dataSiteSamp <- cbind(c(rep(1, length(casesSite)),
rep(0, length(ctrlsSite))),
XSimSite[c(casesSite, ctrlsSite), ])
tmpX <- cbind(rep(1, n), dataSiteSamp[, 2:(p+1)])
colnames(tmpX) <- 0:p
##get estimates, while also catching warnings
logistSiteCatchWarn <- tryCatch.W.E(speedglm.wfit(y = dataSiteSamp[, 1],
X = tmpX,
family = binomial(link="logit")))
logistSite <- logistSiteCatchWarn$value
StudyWarns[sim, site] <- paste(logistSiteCatchWarn$warning, collapse="\n")
estMeansSim[site, ] <- logistSite$coefficients[2:(p+1)] ##coefficients(summary(logistSite))[-1, "Estimate"]
estVarsSim[[site]] <- summary(logistSite)$cov.scaled[2:(p+1), 2:(p+1)] ##vcov(logistSite)[-1,-1]
}
##save estimated effect sizes
StudyEst[[sim]] <- estMeansSim
##save estimated within-study variance-covariance matrices
StudyEstVars[[sim]] <- estVarsSim
##get estimates of Sigma, while also catching warnings
SigmaEstCatchWarn <- tryCatch.W.E(mvmeta(StudyEst[[sim]],
StudyEstVars[[sim]],
method="reml")$Psi)
SigmaEst[[sim]] <- SigmaEstCatchWarn$value
MetaWarns[sim] <- paste(SigmaEstCatchWarn$warning, collapse="\n")
##also get estimates of Sigma from univariate meta-analysis
SigmaEstUniv.sim <- rep(NA, p)
for(pp in 1:p)
{
SigmaEstUniv.sim[pp] <- mvmeta(StudyEst[[sim]][,pp],
S =
lapply(StudyEstVars[[sim]],
function(x){x[pp, pp, drop=FALSE]}),
method="reml")$Psi
}
SigmaEstUniv[[sim]] <- SigmaEstUniv.sim
##overall prevalence
prev <- mean(diseaseAllSites)
##now do the Bayesian analysis!
##first get the *precision* matrices and concatenate them!
Tau <- c()
for(i in 1:I)
{
Tau <- rbind(Tau, solve(StudyEstVars[[sim]][[i]]))
}
##also do this in the univariate case! (just take the inverse of the variances of the first component only)
TauUniv <- matrix(NA, ncol=1)
for(i in 1:I)
{
TauUniv[i] <- 1/StudyEstVars[[sim]][[i]][1,1]
}
###starting position for each study in Sigma2
pos <- 1+c(0:(I-1))*p
###ending position for each study in Sigma2
pos2 <- pos+p-1
###Omega2 is a fixed hyperparameter - it's a prior on the between-study *precision matrix*
Omega <- diag(1/runif(p,0,2)^2)
##create model for multivariate meta-analysis
jagsMultiv <- jags.model('example_MVMA.bug',
##'H:/Simina/metaAnalysis/example6.bug.txt',
data = list('p' = p,
'N' = I,
'y'=StudyEst[[sim]],
'pos'=pos,
'sigma'=Tau,
'pos2'=pos2,
'Omega'=Omega),
n.chains = 1,
n.adapt = 1000,
quiet=TRUE)
jagsMultivRes <- jags.samples(jagsMultiv,
c('intau','mu'),
50000,
thin=5)
##stop()
SigmaEstBayes[[sim]] <- solve(summary(jagsMultivRes$intau, FUN="median")[1]$stat)
OverallEstBayes[sim, ] <- summary(jagsMultivRes$mu, FUN="median")[1]$stat
##just save the estimate for the first variance with the univariate model - don't fit it for all p outcomes in this case
jagsUniv <- jags.model('example_UVMA.bug',
data = list('N' = I,
'y'=StudyEst[[sim]][,1],
'pos'=1:I,
'sigma'=TauUniv,
'pos2'=1:I,
'Omega'=Omega[1,1]),
n.chains = 1,
n.adapt = 1000,
quiet=TRUE)
jagsUnivRes <- jags.samples(jagsUniv,
c('intau','mu'),
50000,
thin=5)
SigmaEstUnivBayes[[sim]] <- 1/summary(jagsUnivRes$intau, FUN="median")[1]$stat
OverallEstUnivBayes[sim, 1] <- summary(jagsUnivRes$mu, FUN="median")[1]$stat
if(sim %% 50 == 0)
{
print(prev)
}
}
print(date())
save(list=c("StudyEst", "StudyEstVars",
"SigmaEst", "SigmaEstUniv",
"SigmaEstBayes", "SigmaEstUnivBayes",
"OverallEstBayes", "OverallEstUnivBayes",
"StudyWarns", "MetaWarns"),
file = paste("simResults/cost_of_estimation",
"I", I, "p", p, "corrBtw", corrBtw, "varBtw", varBtw,
"batch", batch,
".RData", sep="_"))
rm(list=ls())
|
#this is for combining WM ROI activation w/ behavior data
#
library(plyr)
library(ggplot2)
library(dplyr)
#function for standard error
std <- function(x) sd(x)/sqrt(length(x))
delay_ld3<-read.delim(file="/Volumes/Phillips/P5/scripts/txt/delay_ld3_final_BA_spheres_max.txt",header=F)
delay_ld3<-delay_ld3/100
fornames<-read.delim(file="/Volumes/Phillips/P5/scripts/txt/20160722_subj_list.txt",header=F)
delay_ld3.1<-cbind(fornames$V1,delay_ld3)
colnames(delay_ld3.1)<-c("ID","LCING_delay_ld3","LDLPFC_BA46_delay_ld3","LDLPFC_BA9_delay_ld3","LFEF_delay_ld3","LIPL_delay_ld3","LVC_delay_ld3","LINS_delay_ld3","RCING_delay_ld3","RDLPFC_BA46_delay_ld3","RDLPFC_BA9_delay_ld3","RFEF_delay_ld3","RIPL_delay_ld3","RVC_delay_ld3","RINS_delay_ld3")
#take out 11333_20141017, 11369_20150519
bad_subs<-match(c("11333_20141017","11369_20150519"),delay_ld1.1$ID)
delay_ld3.2<-delay_ld3.1[-bad_subs,]
#write this table to Dropbox for ROI_mean_side_load.R)
write.table(delay_ld3.2,file="/Users/mariaj/Dropbox/delay_ld3_BA_spheres_max.txt")
#read in google doc
subj<-read.delim(file="/Volumes/Phillips/P5/scripts/SubjInfoGoogleSheet_wdx.txt")
data_ROIs<-merge(delay_ld3.2,subj,by.x="ID",by.y="MRID")
for (i in 2:15)
{
model_beh<-glm(data_ROIs[[i]]~as.factor(data_ROIs$Cohort)+data_ROIs$age+data_ROIs$sex,data=data_ROIs)
print((summary(model_beh)$coefficients[2,4]))
}
write.table(data_ROIs,file="/Users/m20160412_cue_ld3")
#compare only scz to controls
data_ROIs_dx<-data_ROIs[data_ROIs$confirmed_initial_dx1=="1" | data_ROIs$confirmed_initial_dx1=="3", ]
for (i in 2:9)
{
model_beh<-glm(data_ROIs_dx[[i]]~as.factor(data_ROIs_dx$confirmed_initial_dx1),data=data_ROIs_dx)
print((summary(model_beh)$coefficients[2,4]))
}
#new data table name
# put all % correct into one column (and load into another)
dt_RTAccLong <- data_ROIs
dt_RTAccLong.1<-dt_RTAccLong %>%
mutate( Dx = factor(confirmed_initial_dx1,labels=list('SCZ','Other','Control')) )
dt_RTAccLong.2<-dt_RTAccLong.1 %>%
mutate( Cohort = factor(Cohort,labels=list("First Episode","Control")) )
# get mean and sterror
RTAccsmy <- dt_RTAccLong.2 %>% group_by(Cohort)
#put in your region you are interested in
RTAccsmy.1<-RTAccsmy%>% summarise_each(funs(mean,std),LBA17_delay_ld3)
pdf(paste("LBA17_delay_ld3.pdf",sep="_"))
textSize <- 24
xlab<-" "
ylab<-"Parameter Estimate"
BA_plot<- ggplot(RTAccsmy.1) +
aes(y=mean,x=Cohort,fill=Cohort) +
geom_bar(stat='identity',position='dodge') +scale_fill_manual(values=c("light grey","gray34"))+
geom_errorbar(aes(ymin=mean-std,ymax=mean+std),position=position_dodge(.9),width=0.25) + xlab(xlab) + ylab(ylab) + theme_bw(base_size=textSize)+theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))+theme(legend.title=element_blank())+theme(legend.position="none")+coord_cartesian(ylim=c(-0.30,1.0))
BA_plot2<- BA_plot+ geom_point(data=dt_RTAccLong.2,
position=position_jitterdodge(jitter.width=.4, dodge.width=.9),
aes(color=Dx,group=Cohort,y=LBA17_delay_ld3,shape=Dx),alpha=.8,size=4) +scale_color_manual(values=c("red","black","gray8"))+scale_shape_manual(values=c(16,1,16,16))
print(BA_plot2)
dev.off()
# get mean and sterror
RTAccsmy <- dt_RTAccLong.2 %>% group_by(Cohort)
#put in your region you are interested in
RTAccsmy.1<-RTAccsmy%>% summarise_each(funs(mean,std),RBA17_delay_ld3)
pdf(paste("RBA17_delay_ld3.pdf",sep="_"))
textSize <- 24
xlab<-" "
ylab<-"Parameter Estimate"
BA_plot<- ggplot(RTAccsmy.1) +
aes(y=mean,x=Cohort,fill=Cohort) +
geom_bar(stat='identity',position='dodge') +scale_fill_manual(values=c("light grey","gray34"))+
geom_errorbar(aes(ymin=mean-std,ymax=mean+std),position=position_dodge(.9),width=0.25) + xlab(xlab) + ylab(ylab) + theme_bw(base_size=textSize)+theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))+theme(legend.title=element_blank())+theme(legend.position="none")+coord_cartesian(ylim=c(-0.30,1.0))
BA_plot2<- BA_plot+ geom_point(data=dt_RTAccLong.2,
position=position_jitterdodge(jitter.width=.4, dodge.width=.9),
aes(color=Dx,group=Cohort,y=RBA17_delay_ld3,shape=Dx),alpha=.8,size=4) +scale_color_manual(values=c("red","black","gray8","gray40"))+scale_shape_manual(values=c(16,1,16,16))
print(BA_plot2)
dev.off()
# get mean and sterror
RTAccsmy <- dt_RTAccLong.2 %>% group_by(Cohort)
#put in your region you are interested in
RTAccsmy.1<-RTAccsmy%>% summarise_each(funs(mean,std),LBA40_delay_ld3)
pdf(paste("LBA40_delay_ld3.pdf",sep="_"))
textSize <- 24
xlab<-" "
ylab<-"Parameter Estimate"
BA_plot<- ggplot(RTAccsmy.1) +
aes(y=mean,x=Cohort,fill=Cohort) +
geom_bar(stat='identity',position='dodge') +scale_fill_manual(values=c("light grey","gray34"))+
geom_errorbar(aes(ymin=mean-std,ymax=mean+std),position=position_dodge(.9),width=0.25) + xlab(xlab) + ylab(ylab) + theme_bw(base_size=textSize)+theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))+theme(legend.title=element_blank())+theme(legend.position="none")+coord_cartesian(ylim=c(-0.30,1.0))
BA_plot2<- BA_plot+ geom_point(data=dt_RTAccLong.2,
position=position_jitterdodge(jitter.width=.4, dodge.width=.9),
aes(color=Dx,group=Cohort,y=LBA40_delay_ld3,shape=Dx),alpha=.8,size=4) +scale_color_manual(values=c("red","black","gray8","gray40"))+scale_shape_manual(values=c(16,1,16,16))
print(BA_plot2)
dev.off()
# get mean and sterror
RTAccsmy <- dt_RTAccLong.2 %>% group_by(Cohort)
#put in your region you are interested in
RTAccsmy.1<-RTAccsmy%>% summarise_each(funs(mean,std),RBA40_delay_ld3)
pdf(paste("RBA40_delay_ld3.pdf",sep="_"))
textSize <- 24
xlab<-" "
ylab<-"Parameter Estimate"
BA_plot<- ggplot(RTAccsmy.1) +
aes(y=mean,x=Cohort,fill=Cohort) +
geom_bar(stat='identity',position='dodge') +scale_fill_manual(values=c("light grey","gray34"))+
geom_errorbar(aes(ymin=mean-std,ymax=mean+std),position=position_dodge(.9),width=0.25) + xlab(xlab) + ylab(ylab) + theme_bw(base_size=textSize)+theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))+theme(legend.title=element_blank())+theme(legend.position="none")+coord_cartesian(ylim=c(-0.30,1.0))
BA_plot2<- BA_plot+ geom_point(data=dt_RTAccLong.2,
position=position_jitterdodge(jitter.width=.4, dodge.width=.9),
aes(color=Dx,group=Cohort,y=RBA40_delay_ld3,shape=Dx),alpha=.8,size=4) +scale_color_manual(values=c("red","black","gray8","gray40"))+scale_shape_manual(values=c(16,1,16,16))
print(BA_plot2)
dev.off()
# get mean and sterror
RTAccsmy <- dt_RTAccLong.2 %>% group_by(Cohort)
#put in your region you are interested in
RTAccsmy.1<-RTAccsmy%>% summarise_each(funs(mean,std),LBA9_delay_ld3)
pdf(paste("LBA46_delay_ld3.pdf",sep="_"))
textSize <- 24
xlab<-" "
ylab<-"Parameter Estimate"
BA_plot<- ggplot(RTAccsmy.1) +
aes(y=mean,x=Cohort,fill=Cohort) +
geom_bar(stat='identity',position='dodge') +scale_fill_manual(values=c("light grey","gray34"))+
geom_errorbar(aes(ymin=mean-std,ymax=mean+std),position=position_dodge(.9),width=0.25) + xlab(xlab) + ylab(ylab) + theme_bw(base_size=textSize)+theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))+theme(legend.title=element_blank())+theme(legend.position="none")+coord_cartesian(ylim=c(-0.30,1.0))
BA_plot2<- BA_plot+ geom_point(data=dt_RTAccLong.2,
position=position_jitterdodge(jitter.width=.4, dodge.width=.9),
aes(color=Dx,group=Cohort,y=LBA9_delay_ld3,shape=Dx),alpha=.8,size=4) +scale_color_manual(values=c("red","black","gray8","gray40"))+scale_shape_manual(values=c(16,1,16,16))
print(BA_plot2)
dev.off()
# get mean and sterror
RTAccsmy <- dt_RTAccLong.2 %>% group_by(Cohort)
#put in your region you are interested in
RTAccsmy.1<-RTAccsmy%>% summarise_each(funs(mean,std),RBA9_delay_ld3)
pdf(paste("RBA9delay_ld3.pdf",sep="_"))
textSize <- 24
xlab<-" "
ylab<-"Parameter Estimate"
BA_plot<- ggplot(RTAccsmy.1) +
aes(y=mean,x=Cohort,fill=Cohort) +
geom_bar(stat='identity',position='dodge') +scale_fill_manual(values=c("light grey","gray34"))+
geom_errorbar(aes(ymin=mean-std,ymax=mean+std),position=position_dodge(.9),width=0.25) + xlab(xlab) + ylab(ylab) + theme_bw(base_size=textSize)+theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))+theme(legend.title=element_blank())+theme(legend.position="none")+coord_cartesian(ylim=c(-0.30,1.0))
BA_plot2<- BA_plot+ geom_point(data=dt_RTAccLong.2,
position=position_jitterdodge(jitter.width=.4, dodge.width=.9),
aes(color=Dx,group=Cohort,y=RBA9_delay_ld3,shape=Dx),alpha=.8,size=4) +scale_color_manual(values=c("red","black","gray8","gray40"))+scale_shape_manual(values=c(16,1,16,16))
print(BA_plot2)
dev.off()
#create bilateral ROIs
#data_ROIs$BBA17_delay_ld3<-(data_ROIs$LBA17_delay_ld3+data_ROIs$RBA17_delay_ld3)/2
#data_ROIs$BBA40_delay_ld3<-(data_ROIs$LBA40_delay_ld3+data_ROIs$RBA40_delay_ld3)/2
#data_ROIs$BBA46_delay_ld3<-(data_ROIs$LBA46_delay_ld3+data_ROIs$RBA46_delay_ld3)/2
#data_ROIs$BBA9_delay_ld3<-(data_ROIs$LBA46_delay_ld3+data_ROIs$RBA9_delay_ld3)/2
#write.table(data_ROIs[,c(1,22:24)],file="/Volumes/Phillips/P5/scripts/20160307_BLROIS_ld3.txt")
symp<-read.delim(file="/Users/mariaj/Dropbox/20160310_symptom_measures.txt")
data_symp<-merge(data_ROIs,symp,by.x="ID",by.y="ID")
cor.test(data_symp$LBA40_delay_ld3,data_symp$AVSN_OITM)
cor.test(data_symp$LBA40_delay_ld3,data_symp$AVSPITM)
cor.test(data_symp$LBA40_delay_ld3,data_symp$SANITM)
cor.test(data_symp$LBA40_delay_ld3,data_symp$SAPITM)
cor.test(data_symp$LBA40_delay_ld3,data_symp$BPRSNGS)
cor.test(data_symp$LBA40_delay_ld3,data_symp$BPRSPOS)
cor.test(data_symp$LBA40_delay_ld3,data_symp$IQ)
cor.test(data_symp$LBA9_delay_ld3,data_symp$AVSN_OITM)
cor.test(data_symp$LBA9_delay_ld3,data_symp$AVSPITM)
cor.test(data_symp$LBA9_delay_ld3,data_symp$SANITM)
cor.test(data_symp$LBA9_delay_ld3,data_symp$SAPITM)
cor.test(data_symp$LBA9_delay_ld3,data_symp$BPRSNGS)
cor.test(data_symp$LBA9_delay_ld3,data_symp$BPRSPOS)
cor.test(data_symp$LBA9_delay_ld3,data_symp$IQ)
cor.test(data_symp$RBA9_delay_ld3,data_symp$AVSN_OITM)
cor.test(data_symp$RBA9_delay_ld3,data_symp$AVSPITM)
cor.test(data_symp$RBA9_delay_ld3,data_symp$SANITM)
cor.test(data_symp$RBA9_delay_ld3,data_symp$SAPITM)
cor.test(data_symp$RBA9_delay_ld3,data_symp$BPRSNGS)
cor.test(data_symp$RBA9_delay_ld3,data_symp$BPRSPOS)
cor.test(data_symp$RBA9_delay_ld3,data_symp$IQ) | /scripts/WM/BA_ROIs_top25_ld3.R | no_license | LabNeuroCogDevel/SzAttWMproc | R | false | false | 11,138 | r | #this is for combining WM ROI activation w/ behavior data
#
library(plyr)
library(ggplot2)
library(dplyr)
#function for standard error
std <- function(x) sd(x)/sqrt(length(x))
delay_ld3<-read.delim(file="/Volumes/Phillips/P5/scripts/txt/delay_ld3_final_BA_spheres_max.txt",header=F)
delay_ld3<-delay_ld3/100
fornames<-read.delim(file="/Volumes/Phillips/P5/scripts/txt/20160722_subj_list.txt",header=F)
delay_ld3.1<-cbind(fornames$V1,delay_ld3)
colnames(delay_ld3.1)<-c("ID","LCING_delay_ld3","LDLPFC_BA46_delay_ld3","LDLPFC_BA9_delay_ld3","LFEF_delay_ld3","LIPL_delay_ld3","LVC_delay_ld3","LINS_delay_ld3","RCING_delay_ld3","RDLPFC_BA46_delay_ld3","RDLPFC_BA9_delay_ld3","RFEF_delay_ld3","RIPL_delay_ld3","RVC_delay_ld3","RINS_delay_ld3")
#take out 11333_20141017, 11369_20150519
bad_subs<-match(c("11333_20141017","11369_20150519"),delay_ld1.1$ID)
delay_ld3.2<-delay_ld3.1[-bad_subs,]
#write this table to Dropbox for ROI_mean_side_load.R)
write.table(delay_ld3.2,file="/Users/mariaj/Dropbox/delay_ld3_BA_spheres_max.txt")
#read in google doc
subj<-read.delim(file="/Volumes/Phillips/P5/scripts/SubjInfoGoogleSheet_wdx.txt")
data_ROIs<-merge(delay_ld3.2,subj,by.x="ID",by.y="MRID")
for (i in 2:15)
{
model_beh<-glm(data_ROIs[[i]]~as.factor(data_ROIs$Cohort)+data_ROIs$age+data_ROIs$sex,data=data_ROIs)
print((summary(model_beh)$coefficients[2,4]))
}
write.table(data_ROIs,file="/Users/m20160412_cue_ld3")
#compare only scz to controls
data_ROIs_dx<-data_ROIs[data_ROIs$confirmed_initial_dx1=="1" | data_ROIs$confirmed_initial_dx1=="3", ]
for (i in 2:9)
{
model_beh<-glm(data_ROIs_dx[[i]]~as.factor(data_ROIs_dx$confirmed_initial_dx1),data=data_ROIs_dx)
print((summary(model_beh)$coefficients[2,4]))
}
#new data table name
# put all % correct into one column (and load into another)
dt_RTAccLong <- data_ROIs
dt_RTAccLong.1<-dt_RTAccLong %>%
mutate( Dx = factor(confirmed_initial_dx1,labels=list('SCZ','Other','Control')) )
dt_RTAccLong.2<-dt_RTAccLong.1 %>%
mutate( Cohort = factor(Cohort,labels=list("First Episode","Control")) )
# get mean and sterror
RTAccsmy <- dt_RTAccLong.2 %>% group_by(Cohort)
#put in your region you are interested in
RTAccsmy.1<-RTAccsmy%>% summarise_each(funs(mean,std),LBA17_delay_ld3)
pdf(paste("LBA17_delay_ld3.pdf",sep="_"))
textSize <- 24
xlab<-" "
ylab<-"Parameter Estimate"
BA_plot<- ggplot(RTAccsmy.1) +
aes(y=mean,x=Cohort,fill=Cohort) +
geom_bar(stat='identity',position='dodge') +scale_fill_manual(values=c("light grey","gray34"))+
geom_errorbar(aes(ymin=mean-std,ymax=mean+std),position=position_dodge(.9),width=0.25) + xlab(xlab) + ylab(ylab) + theme_bw(base_size=textSize)+theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))+theme(legend.title=element_blank())+theme(legend.position="none")+coord_cartesian(ylim=c(-0.30,1.0))
BA_plot2<- BA_plot+ geom_point(data=dt_RTAccLong.2,
position=position_jitterdodge(jitter.width=.4, dodge.width=.9),
aes(color=Dx,group=Cohort,y=LBA17_delay_ld3,shape=Dx),alpha=.8,size=4) +scale_color_manual(values=c("red","black","gray8"))+scale_shape_manual(values=c(16,1,16,16))
print(BA_plot2)
dev.off()
# get mean and sterror
RTAccsmy <- dt_RTAccLong.2 %>% group_by(Cohort)
#put in your region you are interested in
RTAccsmy.1<-RTAccsmy%>% summarise_each(funs(mean,std),RBA17_delay_ld3)
pdf(paste("RBA17_delay_ld3.pdf",sep="_"))
textSize <- 24
xlab<-" "
ylab<-"Parameter Estimate"
BA_plot<- ggplot(RTAccsmy.1) +
aes(y=mean,x=Cohort,fill=Cohort) +
geom_bar(stat='identity',position='dodge') +scale_fill_manual(values=c("light grey","gray34"))+
geom_errorbar(aes(ymin=mean-std,ymax=mean+std),position=position_dodge(.9),width=0.25) + xlab(xlab) + ylab(ylab) + theme_bw(base_size=textSize)+theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))+theme(legend.title=element_blank())+theme(legend.position="none")+coord_cartesian(ylim=c(-0.30,1.0))
BA_plot2<- BA_plot+ geom_point(data=dt_RTAccLong.2,
position=position_jitterdodge(jitter.width=.4, dodge.width=.9),
aes(color=Dx,group=Cohort,y=RBA17_delay_ld3,shape=Dx),alpha=.8,size=4) +scale_color_manual(values=c("red","black","gray8","gray40"))+scale_shape_manual(values=c(16,1,16,16))
print(BA_plot2)
dev.off()
# get mean and sterror
RTAccsmy <- dt_RTAccLong.2 %>% group_by(Cohort)
#put in your region you are interested in
RTAccsmy.1<-RTAccsmy%>% summarise_each(funs(mean,std),LBA40_delay_ld3)
pdf(paste("LBA40_delay_ld3.pdf",sep="_"))
textSize <- 24
xlab<-" "
ylab<-"Parameter Estimate"
BA_plot<- ggplot(RTAccsmy.1) +
aes(y=mean,x=Cohort,fill=Cohort) +
geom_bar(stat='identity',position='dodge') +scale_fill_manual(values=c("light grey","gray34"))+
geom_errorbar(aes(ymin=mean-std,ymax=mean+std),position=position_dodge(.9),width=0.25) + xlab(xlab) + ylab(ylab) + theme_bw(base_size=textSize)+theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))+theme(legend.title=element_blank())+theme(legend.position="none")+coord_cartesian(ylim=c(-0.30,1.0))
BA_plot2<- BA_plot+ geom_point(data=dt_RTAccLong.2,
position=position_jitterdodge(jitter.width=.4, dodge.width=.9),
aes(color=Dx,group=Cohort,y=LBA40_delay_ld3,shape=Dx),alpha=.8,size=4) +scale_color_manual(values=c("red","black","gray8","gray40"))+scale_shape_manual(values=c(16,1,16,16))
print(BA_plot2)
dev.off()
# get mean and sterror
RTAccsmy <- dt_RTAccLong.2 %>% group_by(Cohort)
#put in your region you are interested in
RTAccsmy.1<-RTAccsmy%>% summarise_each(funs(mean,std),RBA40_delay_ld3)
pdf(paste("RBA40_delay_ld3.pdf",sep="_"))
textSize <- 24
xlab<-" "
ylab<-"Parameter Estimate"
BA_plot<- ggplot(RTAccsmy.1) +
aes(y=mean,x=Cohort,fill=Cohort) +
geom_bar(stat='identity',position='dodge') +scale_fill_manual(values=c("light grey","gray34"))+
geom_errorbar(aes(ymin=mean-std,ymax=mean+std),position=position_dodge(.9),width=0.25) + xlab(xlab) + ylab(ylab) + theme_bw(base_size=textSize)+theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))+theme(legend.title=element_blank())+theme(legend.position="none")+coord_cartesian(ylim=c(-0.30,1.0))
BA_plot2<- BA_plot+ geom_point(data=dt_RTAccLong.2,
position=position_jitterdodge(jitter.width=.4, dodge.width=.9),
aes(color=Dx,group=Cohort,y=RBA40_delay_ld3,shape=Dx),alpha=.8,size=4) +scale_color_manual(values=c("red","black","gray8","gray40"))+scale_shape_manual(values=c(16,1,16,16))
print(BA_plot2)
dev.off()
# get mean and sterror
RTAccsmy <- dt_RTAccLong.2 %>% group_by(Cohort)
#put in your region you are interested in
RTAccsmy.1<-RTAccsmy%>% summarise_each(funs(mean,std),LBA9_delay_ld3)
pdf(paste("LBA46_delay_ld3.pdf",sep="_"))
textSize <- 24
xlab<-" "
ylab<-"Parameter Estimate"
BA_plot<- ggplot(RTAccsmy.1) +
aes(y=mean,x=Cohort,fill=Cohort) +
geom_bar(stat='identity',position='dodge') +scale_fill_manual(values=c("light grey","gray34"))+
geom_errorbar(aes(ymin=mean-std,ymax=mean+std),position=position_dodge(.9),width=0.25) + xlab(xlab) + ylab(ylab) + theme_bw(base_size=textSize)+theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))+theme(legend.title=element_blank())+theme(legend.position="none")+coord_cartesian(ylim=c(-0.30,1.0))
BA_plot2<- BA_plot+ geom_point(data=dt_RTAccLong.2,
position=position_jitterdodge(jitter.width=.4, dodge.width=.9),
aes(color=Dx,group=Cohort,y=LBA9_delay_ld3,shape=Dx),alpha=.8,size=4) +scale_color_manual(values=c("red","black","gray8","gray40"))+scale_shape_manual(values=c(16,1,16,16))
print(BA_plot2)
dev.off()
# get mean and sterror
RTAccsmy <- dt_RTAccLong.2 %>% group_by(Cohort)
#put in your region you are interested in
RTAccsmy.1<-RTAccsmy%>% summarise_each(funs(mean,std),RBA9_delay_ld3)
pdf(paste("RBA9delay_ld3.pdf",sep="_"))
textSize <- 24
xlab<-" "
ylab<-"Parameter Estimate"
BA_plot<- ggplot(RTAccsmy.1) +
aes(y=mean,x=Cohort,fill=Cohort) +
geom_bar(stat='identity',position='dodge') +scale_fill_manual(values=c("light grey","gray34"))+
geom_errorbar(aes(ymin=mean-std,ymax=mean+std),position=position_dodge(.9),width=0.25) + xlab(xlab) + ylab(ylab) + theme_bw(base_size=textSize)+theme(panel.border = element_blank(), panel.grid.major = element_blank(),panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))+theme(legend.title=element_blank())+theme(legend.position="none")+coord_cartesian(ylim=c(-0.30,1.0))
BA_plot2<- BA_plot+ geom_point(data=dt_RTAccLong.2,
position=position_jitterdodge(jitter.width=.4, dodge.width=.9),
aes(color=Dx,group=Cohort,y=RBA9_delay_ld3,shape=Dx),alpha=.8,size=4) +scale_color_manual(values=c("red","black","gray8","gray40"))+scale_shape_manual(values=c(16,1,16,16))
print(BA_plot2)
dev.off()
#create bilateral ROIs
#data_ROIs$BBA17_delay_ld3<-(data_ROIs$LBA17_delay_ld3+data_ROIs$RBA17_delay_ld3)/2
#data_ROIs$BBA40_delay_ld3<-(data_ROIs$LBA40_delay_ld3+data_ROIs$RBA40_delay_ld3)/2
#data_ROIs$BBA46_delay_ld3<-(data_ROIs$LBA46_delay_ld3+data_ROIs$RBA46_delay_ld3)/2
#data_ROIs$BBA9_delay_ld3<-(data_ROIs$LBA46_delay_ld3+data_ROIs$RBA9_delay_ld3)/2
#write.table(data_ROIs[,c(1,22:24)],file="/Volumes/Phillips/P5/scripts/20160307_BLROIS_ld3.txt")
symp<-read.delim(file="/Users/mariaj/Dropbox/20160310_symptom_measures.txt")
data_symp<-merge(data_ROIs,symp,by.x="ID",by.y="ID")
cor.test(data_symp$LBA40_delay_ld3,data_symp$AVSN_OITM)
cor.test(data_symp$LBA40_delay_ld3,data_symp$AVSPITM)
cor.test(data_symp$LBA40_delay_ld3,data_symp$SANITM)
cor.test(data_symp$LBA40_delay_ld3,data_symp$SAPITM)
cor.test(data_symp$LBA40_delay_ld3,data_symp$BPRSNGS)
cor.test(data_symp$LBA40_delay_ld3,data_symp$BPRSPOS)
cor.test(data_symp$LBA40_delay_ld3,data_symp$IQ)
cor.test(data_symp$LBA9_delay_ld3,data_symp$AVSN_OITM)
cor.test(data_symp$LBA9_delay_ld3,data_symp$AVSPITM)
cor.test(data_symp$LBA9_delay_ld3,data_symp$SANITM)
cor.test(data_symp$LBA9_delay_ld3,data_symp$SAPITM)
cor.test(data_symp$LBA9_delay_ld3,data_symp$BPRSNGS)
cor.test(data_symp$LBA9_delay_ld3,data_symp$BPRSPOS)
cor.test(data_symp$LBA9_delay_ld3,data_symp$IQ)
cor.test(data_symp$RBA9_delay_ld3,data_symp$AVSN_OITM)
cor.test(data_symp$RBA9_delay_ld3,data_symp$AVSPITM)
cor.test(data_symp$RBA9_delay_ld3,data_symp$SANITM)
cor.test(data_symp$RBA9_delay_ld3,data_symp$SAPITM)
cor.test(data_symp$RBA9_delay_ld3,data_symp$BPRSNGS)
cor.test(data_symp$RBA9_delay_ld3,data_symp$BPRSPOS)
cor.test(data_symp$RBA9_delay_ld3,data_symp$IQ) |
#add all packages
library(lme4)
library(reshape)
library(foreign)
library(ggplot2)
library(plyr)
library(data.table)
library(reshape2)
library(Hmisc)
library(mgcv)
library(gdata)
library(car)
library(dplyr)
library(ggmap)
library(broom)
library(splines)
library(DataCombine)
library(e1071)
#load data
mod1 <-readRDS("/media/NAS/Uni/Projects/P031_MAIAC_France/2.work/WORKDIR/mod1.AQ.2003.PM25.c3.rds")
###base model
m1.formula <- as.formula(pm25~ aod
+dport.s+dtrain.s+daroad.s+dcoast.s+dwb.s
+NO2.s+SO2.s
+PM10ems.s
+ p.agric.s
+ p.open.s
+p.forest.s
+p.urban.s
+temp.s+elev_m+tden+pbl.s)
#svm
model <- svm( m1.formula, mod1)
mod1$pred.m1 <- predict(model)
print(summary(lm(pm25~pred.m1,data=mod1))$r.squared)
#example model
m1.formula <- as.formula(pm25 ~ aod
#temporal
+pbl.s+tempc+wdsp+NDVI
#spatial
poden+purban+ +elev_m+tden
#distances
+dport.s+dtrain.s+daroad.s+dcoast.s+dwb.s
#emission data
+NO2.s+SO2.s+PM10ems.s
#land use
+ p.agric.s+ p.open.s+p.forest.s +p.urban.s
#random component
+(1+aod|day/cid) )
| /Uni/Projects/code/P031.MAIAC.France/archive/svm.R | no_license | zeltak/org | R | false | false | 1,056 | r | #add all packages
library(lme4)
library(reshape)
library(foreign)
library(ggplot2)
library(plyr)
library(data.table)
library(reshape2)
library(Hmisc)
library(mgcv)
library(gdata)
library(car)
library(dplyr)
library(ggmap)
library(broom)
library(splines)
library(DataCombine)
library(e1071)
#load data
mod1 <-readRDS("/media/NAS/Uni/Projects/P031_MAIAC_France/2.work/WORKDIR/mod1.AQ.2003.PM25.c3.rds")
###base model
m1.formula <- as.formula(pm25~ aod
+dport.s+dtrain.s+daroad.s+dcoast.s+dwb.s
+NO2.s+SO2.s
+PM10ems.s
+ p.agric.s
+ p.open.s
+p.forest.s
+p.urban.s
+temp.s+elev_m+tden+pbl.s)
#svm
model <- svm( m1.formula, mod1)
mod1$pred.m1 <- predict(model)
print(summary(lm(pm25~pred.m1,data=mod1))$r.squared)
#example model
m1.formula <- as.formula(pm25 ~ aod
#temporal
+pbl.s+tempc+wdsp+NDVI
#spatial
poden+purban+ +elev_m+tden
#distances
+dport.s+dtrain.s+daroad.s+dcoast.s+dwb.s
#emission data
+NO2.s+SO2.s+PM10ems.s
#land use
+ p.agric.s+ p.open.s+p.forest.s +p.urban.s
#random component
+(1+aod|day/cid) )
|
#############################################
# #
# Name: Ashley Music #
# Date: 08/03/2020 #
# Subject: NN #
# Class: BDAT 625 #
# File Name: #
# Week6_Association_HW_Music_Ashley #
# #
#############################################
setwd("C:/Users/aggie/Desktop/Data Mining")
rm(list = ls())
install.packages('arules')
install.packages('recommenderlab')
library(arules)
library(recommenderlab)
library(readr)
CourseRatings <- read_csv("CourseRatings.csv")
View(CourseRatings)
# Consider a user-based collaborative filter. This requires computing correlations between all student pairs. For which students is it possible to compute correlations with E.N.? Compute them.
#EN and LN
EN <- (4+4+4+3)/4
LN <- (4+3+2+4+2)/5
DS <- (4+2+4)/3
#EN and LN's correlations. They both liked SQL, R Prog and Regression. Cor = .8703. Pretty similar
EN.LN <- (4-3.75)*(4-3)+(4-3.75)*(4-3)+(3-3.75)*(2-3)
EN.Squared <- (4-3.75)^2 + (4-3.75)^2 + (3-3.75)^2
En.Sqrt <- sqrt(EN.Squared)
LN.Squared <- (4-3)^2+ (4-3)^2 +(2-3)^2
LN.Sqrt <- sqrt(LN.Squared)
EN.LN.Cor <- EN.LN / (En.Sqrt * LN.Sqrt)
#EN and DS. They both like SQL, R Prog and DM in R. Cor of .003535512
EN.DS <- (4-3.75)*(4-3.33)+(4-3.75)*(2-3.33)+(4-3.75)*(4-3.33)
EN.Sq.2 <- (4-3.75)^2 + (4-3.75)^2 +(4-3.75)^2
EN.Sqrt.2 <- sqrt(EN.Sq.2)
DS.Squared <- (4-3.33)^2 + (2-3.33)^2 + (4-3.33)^2
DS.Sqrt <- sqrt(DS.Squared)
EN.DS.Cor <- EN.DS / (EN.Sqrt.2 * DS.Sqrt)
#Cosine similarities between EN and LN. Cosine is .64639
Cosine.num <- 4*4+4*4+3*2
Cosine.dem.en <- 4^2 + 4^2 + 4^3
Cosine.dem.en.sqrt <- sqrt(Cosine.dem.en)
Cosine.dem.ln <- 4^2 + 4^2 + 2^2
Cosine.dem.ln.sqrt <- sqrt(Cosine.dem.ln)
Cosine <- Cosine.num / (Cosine.dem.en.sqrt*Cosine.dem.ln.sqrt)
# Calculate two course pair correlations and report the results.
#SQL (3.4 Average) and Spatial (3.5 Average). Cor is .8424235
SQL <- (4+3+2+4+4)/5
Spatial <- (4+2+4+4+4+3)/6
SQL.Spatial <- (3-3.4)*(4-3.5)+(2-3.4)*(2-3.5)+(4-3.4)*(4-3.5)
SQL.Sq.2 <- (3-3.4)^2 + (2-3.4)^2 +(4-3.4)^2
SQL.Sq.2<- sqrt(SQL.Sq.2)
Spatial.Squared <- (4-3.5)^2 + (2-3.5)^2 + (4-3.5)^2
Spatial.Squared.2 <- sqrt(Spatial.Squared)
SQL.Spatial.Cor <- SQL.Spatial / (SQL.Sq.2 * Spatial.Squared.2 )
#SQL and Python (3.5 Average). Cor of -.9805807
Python <- (3+4)/2
SQL.Python <- (4-3.4)*(3-3.5)+(3-3.4)*(4-3.5)
SQL.Sq.2.3 <- (4-3.4)^2 + (3-3.4)^2
SQL.Sq.2.3 <- sqrt(SQL.Sq.2.3 )
Python.Squared <- (3-3.5)^2 + (4-3.5)^2
Python.Squared.2 <- sqrt(Python.Squared)
SQL.Python.Cor <- SQL.Python / (SQL.Sq.2.3 * Python.Squared.2 )
# 15 users [1:15,1] , 9 courses to rate [1:15, 2:10]
# Not all users have given recomendations.
CourseRatings <- as.matrix(CourseRatings)
r <- as(CourseRatings, "realRatingMatrix")
EN <- CourseRatings[ 4, ]
EN <- as.matrix(EN)
EN.Rec <- as(EN, "realRatingMatrix")
Course.Rec.item <- Recommender(r, "IBCF")
pred.2 <- predict(Course.Rec.item, r, type = "ratings")
as(pred.2, "matrix")
pred.3 <- predict(Course.Rec.item , EN.Rec, type = "ratings")
as(pred.3, "matrix")
| /Week6_Association_Music_Ashley.R | no_license | aggiemusic/R | R | false | false | 3,393 | r | #############################################
# #
# Name: Ashley Music #
# Date: 08/03/2020 #
# Subject: NN #
# Class: BDAT 625 #
# File Name: #
# Week6_Association_HW_Music_Ashley #
# #
#############################################
setwd("C:/Users/aggie/Desktop/Data Mining")
rm(list = ls())
install.packages('arules')
install.packages('recommenderlab')
library(arules)
library(recommenderlab)
library(readr)
CourseRatings <- read_csv("CourseRatings.csv")
View(CourseRatings)
# Consider a user-based collaborative filter. This requires computing correlations between all student pairs. For which students is it possible to compute correlations with E.N.? Compute them.
#EN and LN
EN <- (4+4+4+3)/4
LN <- (4+3+2+4+2)/5
DS <- (4+2+4)/3
#EN and LN's correlations. They both liked SQL, R Prog and Regression. Cor = .8703. Pretty similar
EN.LN <- (4-3.75)*(4-3)+(4-3.75)*(4-3)+(3-3.75)*(2-3)
EN.Squared <- (4-3.75)^2 + (4-3.75)^2 + (3-3.75)^2
En.Sqrt <- sqrt(EN.Squared)
LN.Squared <- (4-3)^2+ (4-3)^2 +(2-3)^2
LN.Sqrt <- sqrt(LN.Squared)
EN.LN.Cor <- EN.LN / (En.Sqrt * LN.Sqrt)
#EN and DS. They both like SQL, R Prog and DM in R. Cor of .003535512
EN.DS <- (4-3.75)*(4-3.33)+(4-3.75)*(2-3.33)+(4-3.75)*(4-3.33)
EN.Sq.2 <- (4-3.75)^2 + (4-3.75)^2 +(4-3.75)^2
EN.Sqrt.2 <- sqrt(EN.Sq.2)
DS.Squared <- (4-3.33)^2 + (2-3.33)^2 + (4-3.33)^2
DS.Sqrt <- sqrt(DS.Squared)
EN.DS.Cor <- EN.DS / (EN.Sqrt.2 * DS.Sqrt)
#Cosine similarities between EN and LN. Cosine is .64639
Cosine.num <- 4*4+4*4+3*2
Cosine.dem.en <- 4^2 + 4^2 + 4^3
Cosine.dem.en.sqrt <- sqrt(Cosine.dem.en)
Cosine.dem.ln <- 4^2 + 4^2 + 2^2
Cosine.dem.ln.sqrt <- sqrt(Cosine.dem.ln)
Cosine <- Cosine.num / (Cosine.dem.en.sqrt*Cosine.dem.ln.sqrt)
# Calculate two course pair correlations and report the results.
#SQL (3.4 Average) and Spatial (3.5 Average). Cor is .8424235
SQL <- (4+3+2+4+4)/5
Spatial <- (4+2+4+4+4+3)/6
SQL.Spatial <- (3-3.4)*(4-3.5)+(2-3.4)*(2-3.5)+(4-3.4)*(4-3.5)
SQL.Sq.2 <- (3-3.4)^2 + (2-3.4)^2 +(4-3.4)^2
SQL.Sq.2<- sqrt(SQL.Sq.2)
Spatial.Squared <- (4-3.5)^2 + (2-3.5)^2 + (4-3.5)^2
Spatial.Squared.2 <- sqrt(Spatial.Squared)
SQL.Spatial.Cor <- SQL.Spatial / (SQL.Sq.2 * Spatial.Squared.2 )
#SQL and Python (3.5 Average). Cor of -.9805807
Python <- (3+4)/2
SQL.Python <- (4-3.4)*(3-3.5)+(3-3.4)*(4-3.5)
SQL.Sq.2.3 <- (4-3.4)^2 + (3-3.4)^2
SQL.Sq.2.3 <- sqrt(SQL.Sq.2.3 )
Python.Squared <- (3-3.5)^2 + (4-3.5)^2
Python.Squared.2 <- sqrt(Python.Squared)
SQL.Python.Cor <- SQL.Python / (SQL.Sq.2.3 * Python.Squared.2 )
# 15 users [1:15,1] , 9 courses to rate [1:15, 2:10]
# Not all users have given recomendations.
CourseRatings <- as.matrix(CourseRatings)
r <- as(CourseRatings, "realRatingMatrix")
EN <- CourseRatings[ 4, ]
EN <- as.matrix(EN)
EN.Rec <- as(EN, "realRatingMatrix")
Course.Rec.item <- Recommender(r, "IBCF")
pred.2 <- predict(Course.Rec.item, r, type = "ratings")
as(pred.2, "matrix")
pred.3 <- predict(Course.Rec.item , EN.Rec, type = "ratings")
as(pred.3, "matrix")
|
#' @include AllGenerics.R
NULL
#' A container for storing views of preprocessed data
#'
#' This class directly extends the \code{BamViews} class and provides
#' views to intermediate data (saved to disk) from the analysis of one
#' or more bam files.
#'
#' @seealso See \code{\link{paths}} for the character-vector of file
#' paths to the intermediate data.
#'
#' @slot scale a length-one numeric vector. We scale numeric data by
#' the value of \code{scale}, round to the nearest integer, and then
#' save as an integer. This slot is for internal use only.
#'
#' @examples
#' PreprocessViews2()
#' paths(PreprocessViews2())
#' pviews <- PreprocessViews2()
#' paths(pviews) <- character()
#' paths(pviews)
#'
#' @export
setClass("PreprocessViews2", representation(scale="numeric"),
contains="BamViews")
setValidity("PreprocessViews2", function(object){
msg <- TRUE
if(!"range_index" %in% colnames(mcols(rowRanges(object)))){
msg <- "mcols of the rowRanges is missing a required column 'range_index'"
return(msg)
}
if(!identical(length(indexRanges(object)), length(rowRanges(object)))){
msg <- "length of indexRanges and rowRanges must be the same"
return(msg)
}
msg
})
#' Constructor for PreprocessViews2
#'
#' @return A \code{PreprocessViews2} object
#'
#' @export
#' @param object can be \code{missing} or an existing \code{BamViews} object
#' @rdname PreprocessViews2-class
PreprocessViews2 <- function(object){ ## A BamViews object
if(missing(object)){
object <- BamViews()
}
object <- as(object, "PreprocessViews2")
indexRanges(object) <- seq_len(nrow(object))
setScale(object) <- 1 ## by default
object
}
##
## Accessors / methods for PreprocessViews2
##
#' Accessor for reading assay data saved to disk
#'
#' We have adapted the assays method to read assay data from disk
#' using a \code{PreprocessViews2} object.
#'
#'
#' REFACTOR: both ... and withDimnames are ignored. we should just
#' provide a different generic/method.
#'
#'
#' @return a R x C matrix, where R is the length of
#' \code{rowRanges(x)} and C is the number of samples given by
#' \code{ncol(x)}
#'
#' @export
#'
#' @param x a \code{PreprocessViews2} object
#' @param ... ignored
#' @param withDimnames ignored
setMethod("assays", "PreprocessViews2", function(x, withDimnames=TRUE, ...){
result <- matrix(NA, nrow(x), ncol(x))
for(j in seq_len(ncol(x))){
result[, j] <- readRDS(paths(x)[j])[indexRanges(x)]
}
colnames(result) <- colnames(x)
if(getScale(x) != 1){
result <- result/getScale(x)
}
result
})
#' @rdname setScale-method
#' @export
#' @keywords internal
getScale <- function(x) x@scale
#' @aliases setScale,PreprocessViews2-method
#' @rdname setScale-method
setReplaceMethod("setScale", "PreprocessViews2", function(x, value){
x@scale <- value
x
})
#' @aliases indexRanges,PreprocessViews2-method
#' @rdname indexRanges-method
setMethod("indexRanges", "PreprocessViews2", function(object) {
rowRanges(object)$range_index
})
#' @rdname indexRanges-method
#' @aliases indexRanges<-,PreprocessViews2-method
setReplaceMethod("indexRanges", "PreprocessViews2", function(object, value) {
rowRanges(object)$range_index <- value
object
})
setMethod("paths", "PreprocessViews2", function(object) object@bamPaths)
setReplaceMethod("paths", "PreprocessViews2", function(object, value){
object@bamPaths <- value
object
})
#' @aliases rowRanges,PreprocessViews2,ANY-method
#' @rdname PreprocessViews2-class
setReplaceMethod("rowRanges", "PreprocessViews2", function(x, value){
x@bamRanges <- value
x
})
#' @aliases rowRanges,PreprocessViews2-method
#' @rdname PreprocessViews2-class
#' @param ... ignored
setMethod("rowRanges", "PreprocessViews2", function(x, ...) bamRanges(x))
#' Helper for creating filenames with .rds extension
#'
#' Intermediate files are stored in directories given by
#' \code{DataPaths}. The names of the intermediate files are formed
#' by concatenating the \code{colnames} of the \code{BamViews}-derived
#' object with the extension \code{.rds}.
#'
#'
#' @examples
#' library(Rsamtools)
#' extdir <- system.file("extdata", package="Rsamtools", mustWork=TRUE)
#' bamfile <- list.files(extdir, pattern="ex1.bam$", full.names=TRUE)
#' bview <- BamViews(bamPaths=bamfile)
#' rdsId(bview)
#'
#'
#' @export
#' @param x a \code{BamViews}-derived object
rdsId <- function(x) {
if(ncol(x) == 0) return(character())
paste0(colnames(x), ".rds")
}
##--------------------------------------------------
##
## Coercion
##
##--------------------------------------------------
## setAs is exported in methods, so we do not export here
#' Coerce a \code{PreprocessViews2} object to a \code{RangedSummarizedExperiment}
#'
#' This method pulls the assay data from disk through the views object
#' interface, and then creates a \code{SummarizedExperiment} object
#' with an assay named 'copy'.
#'
#' @examples
#' pviews <- PreprocessViews2()
#' as(pviews, "RangedSummarizedExperiment")
#'
#' @return a \code{RangedSummarizedExperiment}
#' @param from character string ('PreprocessViews2')
#' @param to character string ('RangedSummarizedExperiment')
#' @rdname PreprocessViews2-coercion
#' @docType methods
#' @name setAs
#' @aliases coerce,PreprocessViews2,RangedSummarizedExperiment-method
setAs("PreprocessViews2", "RangedSummarizedExperiment", function(from, to){
x <- assays(from)
rr <- rowRanges(from)
coldat <- bamSamples(from)
SummarizedExperiment(assays=SimpleList(copy=x),
rowRanges=rr,
colData=coldat)
})
##--------------------------------------------------
##
## Descriptive stats
##
##--------------------------------------------------
#' Compute the mean normalized read-depth for a set of intervals
#'
#' Calculates the mean normalized read-depth for a set of genomic
#' intervals in a \code{GRanges} object.
#'
#' @keywords internal
#'
#' @export
#' @param pviews a \code{PreprocessViews2} object
#' @param gr a \code{GRanges} object
granges_copynumber <- function(gr, pviews){
pviews <- pviews[, 1]
hits <- findOverlaps(gr, rowRanges(pviews))
if(!any(duplicated(names(gr))) && !is.null(names(gr))){
split_by <- factor(names(gr)[queryHits(hits)], levels=names(gr))
} else {
split_by <- queryHits(hits)
}
fc_context <- sapply(split(assays(pviews[subjectHits(hits), 1]), split_by), median)
fc_context
}
| /R/Preprocess-class.R | no_license | cancer-genomics/trellis | R | false | false | 6,426 | r | #' @include AllGenerics.R
NULL
#' A container for storing views of preprocessed data
#'
#' This class directly extends the \code{BamViews} class and provides
#' views to intermediate data (saved to disk) from the analysis of one
#' or more bam files.
#'
#' @seealso See \code{\link{paths}} for the character-vector of file
#' paths to the intermediate data.
#'
#' @slot scale a length-one numeric vector. We scale numeric data by
#' the value of \code{scale}, round to the nearest integer, and then
#' save as an integer. This slot is for internal use only.
#'
#' @examples
#' PreprocessViews2()
#' paths(PreprocessViews2())
#' pviews <- PreprocessViews2()
#' paths(pviews) <- character()
#' paths(pviews)
#'
#' @export
setClass("PreprocessViews2", representation(scale="numeric"),
contains="BamViews")
setValidity("PreprocessViews2", function(object){
msg <- TRUE
if(!"range_index" %in% colnames(mcols(rowRanges(object)))){
msg <- "mcols of the rowRanges is missing a required column 'range_index'"
return(msg)
}
if(!identical(length(indexRanges(object)), length(rowRanges(object)))){
msg <- "length of indexRanges and rowRanges must be the same"
return(msg)
}
msg
})
#' Constructor for PreprocessViews2
#'
#' @return A \code{PreprocessViews2} object
#'
#' @export
#' @param object can be \code{missing} or an existing \code{BamViews} object
#' @rdname PreprocessViews2-class
PreprocessViews2 <- function(object){ ## A BamViews object
if(missing(object)){
object <- BamViews()
}
object <- as(object, "PreprocessViews2")
indexRanges(object) <- seq_len(nrow(object))
setScale(object) <- 1 ## by default
object
}
##
## Accessors / methods for PreprocessViews2
##
#' Accessor for reading assay data saved to disk
#'
#' We have adapted the assays method to read assay data from disk
#' using a \code{PreprocessViews2} object.
#'
#'
#' REFACTOR: both ... and withDimnames are ignored. we should just
#' provide a different generic/method.
#'
#'
#' @return a R x C matrix, where R is the length of
#' \code{rowRanges(x)} and C is the number of samples given by
#' \code{ncol(x)}
#'
#' @export
#'
#' @param x a \code{PreprocessViews2} object
#' @param ... ignored
#' @param withDimnames ignored
setMethod("assays", "PreprocessViews2", function(x, withDimnames=TRUE, ...){
result <- matrix(NA, nrow(x), ncol(x))
for(j in seq_len(ncol(x))){
result[, j] <- readRDS(paths(x)[j])[indexRanges(x)]
}
colnames(result) <- colnames(x)
if(getScale(x) != 1){
result <- result/getScale(x)
}
result
})
#' @rdname setScale-method
#' @export
#' @keywords internal
getScale <- function(x) x@scale
#' @aliases setScale,PreprocessViews2-method
#' @rdname setScale-method
setReplaceMethod("setScale", "PreprocessViews2", function(x, value){
x@scale <- value
x
})
#' @aliases indexRanges,PreprocessViews2-method
#' @rdname indexRanges-method
setMethod("indexRanges", "PreprocessViews2", function(object) {
rowRanges(object)$range_index
})
#' @rdname indexRanges-method
#' @aliases indexRanges<-,PreprocessViews2-method
setReplaceMethod("indexRanges", "PreprocessViews2", function(object, value) {
rowRanges(object)$range_index <- value
object
})
setMethod("paths", "PreprocessViews2", function(object) object@bamPaths)
setReplaceMethod("paths", "PreprocessViews2", function(object, value){
object@bamPaths <- value
object
})
#' @aliases rowRanges,PreprocessViews2,ANY-method
#' @rdname PreprocessViews2-class
setReplaceMethod("rowRanges", "PreprocessViews2", function(x, value){
x@bamRanges <- value
x
})
#' @aliases rowRanges,PreprocessViews2-method
#' @rdname PreprocessViews2-class
#' @param ... ignored
setMethod("rowRanges", "PreprocessViews2", function(x, ...) bamRanges(x))
#' Helper for creating filenames with .rds extension
#'
#' Intermediate files are stored in directories given by
#' \code{DataPaths}. The names of the intermediate files are formed
#' by concatenating the \code{colnames} of the \code{BamViews}-derived
#' object with the extension \code{.rds}.
#'
#'
#' @examples
#' library(Rsamtools)
#' extdir <- system.file("extdata", package="Rsamtools", mustWork=TRUE)
#' bamfile <- list.files(extdir, pattern="ex1.bam$", full.names=TRUE)
#' bview <- BamViews(bamPaths=bamfile)
#' rdsId(bview)
#'
#'
#' @export
#' @param x a \code{BamViews}-derived object
rdsId <- function(x) {
if(ncol(x) == 0) return(character())
paste0(colnames(x), ".rds")
}
##--------------------------------------------------
##
## Coercion
##
##--------------------------------------------------
## setAs is exported in methods, so we do not export here
#' Coerce a \code{PreprocessViews2} object to a \code{RangedSummarizedExperiment}
#'
#' This method pulls the assay data from disk through the views object
#' interface, and then creates a \code{SummarizedExperiment} object
#' with an assay named 'copy'.
#'
#' @examples
#' pviews <- PreprocessViews2()
#' as(pviews, "RangedSummarizedExperiment")
#'
#' @return a \code{RangedSummarizedExperiment}
#' @param from character string ('PreprocessViews2')
#' @param to character string ('RangedSummarizedExperiment')
#' @rdname PreprocessViews2-coercion
#' @docType methods
#' @name setAs
#' @aliases coerce,PreprocessViews2,RangedSummarizedExperiment-method
setAs("PreprocessViews2", "RangedSummarizedExperiment", function(from, to){
x <- assays(from)
rr <- rowRanges(from)
coldat <- bamSamples(from)
SummarizedExperiment(assays=SimpleList(copy=x),
rowRanges=rr,
colData=coldat)
})
##--------------------------------------------------
##
## Descriptive stats
##
##--------------------------------------------------
#' Compute the mean normalized read-depth for a set of intervals
#'
#' Calculates the mean normalized read-depth for a set of genomic
#' intervals in a \code{GRanges} object.
#'
#' @keywords internal
#'
#' @export
#' @param pviews a \code{PreprocessViews2} object
#' @param gr a \code{GRanges} object
granges_copynumber <- function(gr, pviews){
pviews <- pviews[, 1]
hits <- findOverlaps(gr, rowRanges(pviews))
if(!any(duplicated(names(gr))) && !is.null(names(gr))){
split_by <- factor(names(gr)[queryHits(hits)], levels=names(gr))
} else {
split_by <- queryHits(hits)
}
fc_context <- sapply(split(assays(pviews[subjectHits(hits), 1]), split_by), median)
fc_context
}
|
## IF YOU HAVE ALREADY RUN PLOT1.R, FEEL FREE TO IGNORE THE SECTION OF CODE FROM HERE...
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
Data <- read.csv(unz(temp, "household_power_consumption.txt"), sep=";", nrows = 2880, skip = 66636, header = TRUE)
unlink(temp)
colnames(Data) <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage",
"Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
Data$Date <- as.Date(Data$Date, format = "%d/%m/%Y")
##... TO HERE.
png("plot2.png")
datetime <- as.POSIXct(paste(Data$Date, Data$Time), format="%Y-%m-%d %H:%M:%S")
plot.new()
with(Data, plot(datetime, Global_active_power, type="l",
xlab = "", ylab="Global Active Power (kilowatts)"))
dev.off() | /plot2.R | no_license | JackIsrael2020/ExData_Plotting1 | R | false | false | 846 | r | ## IF YOU HAVE ALREADY RUN PLOT1.R, FEEL FREE TO IGNORE THE SECTION OF CODE FROM HERE...
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
Data <- read.csv(unz(temp, "household_power_consumption.txt"), sep=";", nrows = 2880, skip = 66636, header = TRUE)
unlink(temp)
colnames(Data) <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage",
"Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
Data$Date <- as.Date(Data$Date, format = "%d/%m/%Y")
##... TO HERE.
png("plot2.png")
datetime <- as.POSIXct(paste(Data$Date, Data$Time), format="%Y-%m-%d %H:%M:%S")
plot.new()
with(Data, plot(datetime, Global_active_power, type="l",
xlab = "", ylab="Global Active Power (kilowatts)"))
dev.off() |
testlist <- list(a = 0L, b = 0L, x = c(201326847L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610128938-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 116 | r | testlist <- list(a = 0L, b = 0L, x = c(201326847L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
dat <- data.frame(
time = factor(c("Lunch","Dinner"), levels=c("Lunch","Dinner")),
total_bill = c(14.89, 17.23)
)
dat
#> time total_bill
#> 1 Lunch 14.89
#> 2 Dinner 17.23
# Load the ggplot2 package
library(ggplot2)
# Very basic bar graph
ggplot(data=dat, aes(x=time, y=total_bill)) +
geom_bar(stat="identity") | /analysis/rstudio/test.R | no_license | mephju/node-txn-cluster | R | false | false | 333 | r | dat <- data.frame(
time = factor(c("Lunch","Dinner"), levels=c("Lunch","Dinner")),
total_bill = c(14.89, 17.23)
)
dat
#> time total_bill
#> 1 Lunch 14.89
#> 2 Dinner 17.23
# Load the ggplot2 package
library(ggplot2)
# Very basic bar graph
ggplot(data=dat, aes(x=time, y=total_bill)) +
geom_bar(stat="identity") |
#' @examples
#' \dontrun{
#' res <- dfp_updateProducts(request_data)
#' }
| /examples/examples-dfp_updateProducts.R | no_license | StevenMMortimer/rdfp | R | false | false | 75 | r | #' @examples
#' \dontrun{
#' res <- dfp_updateProducts(request_data)
#' }
|
library(cellity)
library(org.Hs.eg.db)
data("extra_human_genes")
data("feature_info")
GO_terms <- feature_info[[1]]
common_features <- feature_info[[2]]
counts<-read.table("HTSEQ_Count/Count_HPAP001.txt",h=T,row.names = 1)
stats<-read.table("Stats_Celloine/SC_HPAP001",h=T)
stats<-stats[,-12]
counts = counts[which(rowSums(counts) > 0),which(colSums(counts) > 0)]
counts2 = counts[which(rowSums(counts) > 3000),which(colSums(counts) > 1000)]
counts_nm <- normalise_by_factor(counts2, colSums(counts2))
row.names(stats)<-stats[,1]
I<-colnames(counts2)
stats2<-stats[I,]
sample_features <- extract_features(counts_nm, stats2,common_features = common_features,GO_terms = GO_terms, extra_genes = extra_human_genes,organism = "human")
sample_features_all <- sample_features[[1]]
training_quality_PCA_allF <- assess_cell_quality_PCA(sample_features_all, file = "./HPAP001_QC_PCA_allF.pdf")
write.csv(training_quality_PCA_allF,"HPAP001.PCA_allF.csv")
sample_features_all <- sample_features[[2]]
training_quality_PCA_allF <- assess_cell_quality_PCA(sample_features_all, file = "./HPAP001_QC_PCA_comF.pdf")
write.csv(training_quality_PCA_allF,"HPAP001.PCA_comF.csv")
write.csv(sample_features[[1]],"HPAP001.All_Features.csv")
| /scRNA/Cellity_QC.R | no_license | Irfanwustl/Codes | R | false | false | 1,217 | r | library(cellity)
library(org.Hs.eg.db)
data("extra_human_genes")
data("feature_info")
GO_terms <- feature_info[[1]]
common_features <- feature_info[[2]]
counts<-read.table("HTSEQ_Count/Count_HPAP001.txt",h=T,row.names = 1)
stats<-read.table("Stats_Celloine/SC_HPAP001",h=T)
stats<-stats[,-12]
counts = counts[which(rowSums(counts) > 0),which(colSums(counts) > 0)]
counts2 = counts[which(rowSums(counts) > 3000),which(colSums(counts) > 1000)]
counts_nm <- normalise_by_factor(counts2, colSums(counts2))
row.names(stats)<-stats[,1]
I<-colnames(counts2)
stats2<-stats[I,]
sample_features <- extract_features(counts_nm, stats2,common_features = common_features,GO_terms = GO_terms, extra_genes = extra_human_genes,organism = "human")
sample_features_all <- sample_features[[1]]
training_quality_PCA_allF <- assess_cell_quality_PCA(sample_features_all, file = "./HPAP001_QC_PCA_allF.pdf")
write.csv(training_quality_PCA_allF,"HPAP001.PCA_allF.csv")
sample_features_all <- sample_features[[2]]
training_quality_PCA_allF <- assess_cell_quality_PCA(sample_features_all, file = "./HPAP001_QC_PCA_comF.pdf")
write.csv(training_quality_PCA_allF,"HPAP001.PCA_comF.csv")
write.csv(sample_features[[1]],"HPAP001.All_Features.csv")
|
# Packages ----------------------------------------------------------------
#These are the packages we are going to use for data scraping
library(rvest)
library(purrr)
library(xml2)
library(readr)
# Importing data ----------------------------------------------------------
# For this script I suppose you have created a data frame containing the title of each article's title followed by their URL. Each row title is a sequencial article ID (Article01, Article02 and so on…) on the first CSV row.
# Importing the file
base <- read.csv(file.choose(), row.names = 1, header= TRUE)
# Pulling out the address of the article
article_general <- as.vector(base[,2])
article_titles <- as.vector(base[,1])
#Cheking if it is a vector
str(article_general)
summary(article_general)
str(article_titles)
summary(article_titles)
View(article_titles)
# Defining Functions ------------------------------------------------------
# These functions will help us cleaning and processing the texts
# Function for extracting text
extracting_texts <- function(x){
webpage <- read_html(x)
text <- html_nodes(webpage,'p') #this "p"should chage depending on file
text_data <- html_text(text)
Article <- text_data
return(Article)
}
# Function for cleaning texts
clean.and.collapse <- function(x) {
temp<-gsub("\r?\n|\r", " ", x)
temp<-str_squish(temp)
temp<- paste(temp,collapse="\n")
temp <- as.character(temp)
return(temp)
}
#Cleaning the text and incrementing dataframe----------------------------------------------------------
# Extracting the text (change number in "[]" according to line)
Article <-extracting_texts (article_general[50])
#Inserting title at the Begging of the text
Article <- prepend(Article, article_titles[50],before = 1) ; View(Article)
#Cleaning the texts
# Delete pre and post-document data / observe the numbers in the text this stemp is only necessary if the function saves some heading information to the file
#Article<- Article[3:16]
Article <- clean.and.collapse(Article)
# Adding text to the dataframe
# Number in "[]" should change according to the line)
base[50,"Text"] <- Article | /Data_Scr.R | permissive | rll307/immigration | R | false | false | 2,124 | r |
# Packages ----------------------------------------------------------------
#These are the packages we are going to use for data scraping
library(rvest)
library(purrr)
library(xml2)
library(readr)
# Importing data ----------------------------------------------------------
# For this script I suppose you have created a data frame containing the title of each article's title followed by their URL. Each row title is a sequencial article ID (Article01, Article02 and so on…) on the first CSV row.
# Importing the file
base <- read.csv(file.choose(), row.names = 1, header= TRUE)
# Pulling out the address of the article
article_general <- as.vector(base[,2])
article_titles <- as.vector(base[,1])
#Cheking if it is a vector
str(article_general)
summary(article_general)
str(article_titles)
summary(article_titles)
View(article_titles)
# Defining Functions ------------------------------------------------------
# These functions will help us cleaning and processing the texts
# Function for extracting text
extracting_texts <- function(x){
webpage <- read_html(x)
text <- html_nodes(webpage,'p') #this "p"should chage depending on file
text_data <- html_text(text)
Article <- text_data
return(Article)
}
# Function for cleaning texts
clean.and.collapse <- function(x) {
temp<-gsub("\r?\n|\r", " ", x)
temp<-str_squish(temp)
temp<- paste(temp,collapse="\n")
temp <- as.character(temp)
return(temp)
}
#Cleaning the text and incrementing dataframe----------------------------------------------------------
# Extracting the text (change number in "[]" according to line)
Article <-extracting_texts (article_general[50])
#Inserting title at the Begging of the text
Article <- prepend(Article, article_titles[50],before = 1) ; View(Article)
#Cleaning the texts
# Delete pre and post-document data / observe the numbers in the text this stemp is only necessary if the function saves some heading information to the file
#Article<- Article[3:16]
Article <- clean.and.collapse(Article)
# Adding text to the dataframe
# Number in "[]" should change according to the line)
base[50,"Text"] <- Article |
library(matsbyname)
### Name: invert_byname
### Title: Invert a matrix
### Aliases: invert_byname
### ** Examples
m <- matrix(c(10,0,0,100), nrow = 2, dimnames = list(paste0("i", 1:2), paste0("c", 1:2))) %>%
setrowtype("Industry") %>% setcoltype("Commodity")
invert_byname(m)
matrixproduct_byname(m, invert_byname(m))
matrixproduct_byname(invert_byname(m), m)
invert_byname(list(m,m))
| /data/genthat_extracted_code/matsbyname/examples/invert_byname.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 394 | r | library(matsbyname)
### Name: invert_byname
### Title: Invert a matrix
### Aliases: invert_byname
### ** Examples
m <- matrix(c(10,0,0,100), nrow = 2, dimnames = list(paste0("i", 1:2), paste0("c", 1:2))) %>%
setrowtype("Industry") %>% setcoltype("Commodity")
invert_byname(m)
matrixproduct_byname(m, invert_byname(m))
matrixproduct_byname(invert_byname(m), m)
invert_byname(list(m,m))
|
library(fExtremes)
### Name: gpdRisk
### Title: GPD Distributions for Extreme Value Theory
### Aliases: gpdRisk gpdQPlot gpdQuantPlot gpdSfallPlot gpdShapePlot
### gpdTailPlot gpdRiskMeasures tailPlot tailSlider tailRisk
### Keywords: distribution
### ** Examples
## Load Data:
danish = as.timeSeries(data(danishClaims))
## Tail Plot:
x = as.timeSeries(data(danishClaims))
fit = gpdFit(x, u = 10)
tailPlot(fit)
## Try Tail Slider:
# tailSlider(x)
## Tail Risk:
tailRisk(fit)
| /data/genthat_extracted_code/fExtremes/examples/GpdRisk.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 508 | r | library(fExtremes)
### Name: gpdRisk
### Title: GPD Distributions for Extreme Value Theory
### Aliases: gpdRisk gpdQPlot gpdQuantPlot gpdSfallPlot gpdShapePlot
### gpdTailPlot gpdRiskMeasures tailPlot tailSlider tailRisk
### Keywords: distribution
### ** Examples
## Load Data:
danish = as.timeSeries(data(danishClaims))
## Tail Plot:
x = as.timeSeries(data(danishClaims))
fit = gpdFit(x, u = 10)
tailPlot(fit)
## Try Tail Slider:
# tailSlider(x)
## Tail Risk:
tailRisk(fit)
|
\name{extendLength}
\alias{extendLength}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Extend the length of a signal or matrix}
\description{
Extend the length of a signal or matrix by row
}
\usage{
extendLength(x, addLength = NULL, method = c("reflection", "open", "circular"), direction = c("right", "left", "both"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ a vector or matrix with column with each column as a signal }
\item{addLength}{ the length to be extended }
\item{method}{ three methods available, c("reflection", "open", "circular"). By default, it is "reflection". }
\item{direction}{ three options available: c("right", "left", "both") }
}
\value{
return the extended vector or matrix.
}
\author{ Pan Du }
\seealso{ \code{\link{extendNBase}}}
\examples{
# a = matrix(rnorm(9), 3)
# extendLength(a, 3, direction='right') ## not exposed function
}
\keyword{methods}
| /man/extendLength.Rd | no_license | zmzhang/RWPD | R | false | false | 956 | rd | \name{extendLength}
\alias{extendLength}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Extend the length of a signal or matrix}
\description{
Extend the length of a signal or matrix by row
}
\usage{
extendLength(x, addLength = NULL, method = c("reflection", "open", "circular"), direction = c("right", "left", "both"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ a vector or matrix with column with each column as a signal }
\item{addLength}{ the length to be extended }
\item{method}{ three methods available, c("reflection", "open", "circular"). By default, it is "reflection". }
\item{direction}{ three options available: c("right", "left", "both") }
}
\value{
return the extended vector or matrix.
}
\author{ Pan Du }
\seealso{ \code{\link{extendNBase}}}
\examples{
# a = matrix(rnorm(9), 3)
# extendLength(a, 3, direction='right') ## not exposed function
}
\keyword{methods}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/organizations.R
\name{create_org}
\alias{create_org}
\title{Create new organization}
\usage{
create_org(syn, name)
}
\arguments{
\item{syn}{Synapse client object}
\item{name}{Name of the organization (must be unique across Synapse)}
}
\description{
Create a new Synapse JSON schema services organization.
}
| /man/create_org.Rd | permissive | Sage-Bionetworks/schemann | R | false | true | 386 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/organizations.R
\name{create_org}
\alias{create_org}
\title{Create new organization}
\usage{
create_org(syn, name)
}
\arguments{
\item{syn}{Synapse client object}
\item{name}{Name of the organization (must be unique across Synapse)}
}
\description{
Create a new Synapse JSON schema services organization.
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
HACWeightC <- function(iLag) {
.Call(`_DriftBurstHypothesis_HACWeightC`, iLag)
}
AsymptoticVarianceC <- function(vIn, iLag) {
.Call(`_DriftBurstHypothesis_AsymptoticVarianceC`, vIn, iLag)
}
AutomaticLagSelectionC <- function(vX, dMu) {
.Call(`_DriftBurstHypothesis_AutomaticLagSelectionC`, vX, dMu)
}
DriftBurstLoopC <- function(vPreAveraged, diffedlogprices, vTime, vTesttime, iMeanBandwidth, iVarBandwidth, iPreAverage, iAcLag) {
.Call(`_DriftBurstHypothesis_DriftBurstLoopC`, vPreAveraged, diffedlogprices, vTime, vTesttime, iMeanBandwidth, iVarBandwidth, iPreAverage, iAcLag)
}
DriftBurstLoopCPAR <- function(vPreAveraged, diffedlogprices, vTime, vTesttime, iMeanBandwidth, iVarBandwidth, iPreAverage, iAcLag, iCores) {
.Call(`_DriftBurstHypothesis_DriftBurstLoopCPAR`, vPreAveraged, diffedlogprices, vTime, vTesttime, iMeanBandwidth, iVarBandwidth, iPreAverage, iAcLag, iCores)
}
cfilter <- function(x, filter) {
.Call(`_DriftBurstHypothesis_cfilter`, x, filter)
}
mldivide <- function(A, B) {
.Call(`_DriftBurstHypothesis_mldivide`, A, B)
}
| /DriftBurstHypothesis/R/RcppExports.R | no_license | akhikolla/InformationHouse | R | false | false | 1,209 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
HACWeightC <- function(iLag) {
.Call(`_DriftBurstHypothesis_HACWeightC`, iLag)
}
AsymptoticVarianceC <- function(vIn, iLag) {
.Call(`_DriftBurstHypothesis_AsymptoticVarianceC`, vIn, iLag)
}
AutomaticLagSelectionC <- function(vX, dMu) {
.Call(`_DriftBurstHypothesis_AutomaticLagSelectionC`, vX, dMu)
}
DriftBurstLoopC <- function(vPreAveraged, diffedlogprices, vTime, vTesttime, iMeanBandwidth, iVarBandwidth, iPreAverage, iAcLag) {
.Call(`_DriftBurstHypothesis_DriftBurstLoopC`, vPreAveraged, diffedlogprices, vTime, vTesttime, iMeanBandwidth, iVarBandwidth, iPreAverage, iAcLag)
}
DriftBurstLoopCPAR <- function(vPreAveraged, diffedlogprices, vTime, vTesttime, iMeanBandwidth, iVarBandwidth, iPreAverage, iAcLag, iCores) {
.Call(`_DriftBurstHypothesis_DriftBurstLoopCPAR`, vPreAveraged, diffedlogprices, vTime, vTesttime, iMeanBandwidth, iVarBandwidth, iPreAverage, iAcLag, iCores)
}
cfilter <- function(x, filter) {
.Call(`_DriftBurstHypothesis_cfilter`, x, filter)
}
mldivide <- function(A, B) {
.Call(`_DriftBurstHypothesis_mldivide`, A, B)
}
|
# clear variables and close windows
rm(list = ls(all = TRUE))
graphics.off()
# parameter settings
n = 100
beta = 0.5
#generate time-series data set
set.seed(123)
x = arima.sim(n = n, list(ma = beta), innov = rnorm(n))
x = matrix(x)
#generate candidates for estimated beta, -1<beta<1
betahat = seq(from = -0.99, to = 0.99, by = 0.02)
k= 100
li = c(1:k)
cli = c(1:k)
e = c(1:n)
e[1] = x[1]
# likelihood function, assuming standard normal distributed errors
for (i in 1:k){
b = betahat[i]
gamma0 = diag(1+b^2, n, n)
gamma1 = diag(b, n-1, n-1)
gamma1 = cbind(0, gamma1)
gamma1 = rbind(gamma1, 0)
tgamma1 = t(gamma1)
gamma = gamma0 + gamma1 + tgamma1
betacoef = (-b) ^ (1:(n-1))
#unconditional maximal likelihood function
li[i] = -n/2 * log(2*pi) - 1/2 * log(det(gamma)) - 1/2 * t(x) %*% solve(gamma) %*% x
#error terms
for (j in 2:n){
e[j] = x[j] + sum(betacoef[1:(j-1)] * x[(j-1):1, 1])
}
#conditional maximal likelihood function
cli[i] = -n/2 * log(2*pi) - 1/2 * log(det(gamma)) - 1/2 * sum(e^2)
}
#plot the likelihood functions
output = cbind(betahat, li, cli)
plot(output[,c(1,2)], col = 4, xlab = "Beta", ylab = "log-Likelihood",type = "l", lwd = 2,
main = paste("likelihood function of a MA(1) Process with n=", n, sep = ""))
abline(v = output[which.max(li),1], col = "blue")
points(output[, c(1,3)], type = "l", col = 2, lty = 2, lwd = 2)
abline(v = output[which.max(cli),1], col = "red")
| /SFM_MA1/SFEMA1_likelima1_100/SFEMA1_likelima1_100.R | no_license | QuantLet/SFM_Class_2019WS | R | false | false | 1,441 | r | # clear variables and close windows
rm(list = ls(all = TRUE))
graphics.off()
# parameter settings
n = 100
beta = 0.5
#generate time-series data set
set.seed(123)
x = arima.sim(n = n, list(ma = beta), innov = rnorm(n))
x = matrix(x)
#generate candidates for estimated beta, -1<beta<1
betahat = seq(from = -0.99, to = 0.99, by = 0.02)
k= 100
li = c(1:k)
cli = c(1:k)
e = c(1:n)
e[1] = x[1]
# likelihood function, assuming standard normal distributed errors
for (i in 1:k){
b = betahat[i]
gamma0 = diag(1+b^2, n, n)
gamma1 = diag(b, n-1, n-1)
gamma1 = cbind(0, gamma1)
gamma1 = rbind(gamma1, 0)
tgamma1 = t(gamma1)
gamma = gamma0 + gamma1 + tgamma1
betacoef = (-b) ^ (1:(n-1))
#unconditional maximal likelihood function
li[i] = -n/2 * log(2*pi) - 1/2 * log(det(gamma)) - 1/2 * t(x) %*% solve(gamma) %*% x
#error terms
for (j in 2:n){
e[j] = x[j] + sum(betacoef[1:(j-1)] * x[(j-1):1, 1])
}
#conditional maximal likelihood function
cli[i] = -n/2 * log(2*pi) - 1/2 * log(det(gamma)) - 1/2 * sum(e^2)
}
#plot the likelihood functions
output = cbind(betahat, li, cli)
plot(output[,c(1,2)], col = 4, xlab = "Beta", ylab = "log-Likelihood",type = "l", lwd = 2,
main = paste("likelihood function of a MA(1) Process with n=", n, sep = ""))
abline(v = output[which.max(li),1], col = "blue")
points(output[, c(1,3)], type = "l", col = 2, lty = 2, lwd = 2)
abline(v = output[which.max(cli),1], col = "red")
|
\name{FisherZ}
\alias{FisherZ}
\alias{FisherZInv}
\alias{CorCI}
\title{Fisher-Transformation for Correlation to z-Score}
\description{Convert a correlation to a z score or z to r using the Fisher transformation or find the confidence intervals for a specified correlation.}
\usage{
FisherZ(rho)
FisherZInv(z)
CorCI(rho, n, conf.level = 0.95, alternative = c("two.sided", "less", "greater"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{rho}{the Pearson's correlation coefficient}
\item{z}{a Fisher z transformed value}
\item{n}{sample size used for calculating the confidence intervals}
\item{alternative}{is a character string, one of \code{"greater"},
\code{"less"}, or \code{"two.sided"}, or the initial letter of each,
indicating the specification of the alternative hypothesis.
\code{"greater"} corresponds to positive association, \code{"less"} to negative association.}
\item{conf.level}{confidence level for the returned confidence
interval, restricted to lie between zero and one.}
}
\details{The sampling distribution of Pearson's r is not normally distributed. Fisher developed a transformation now called "Fisher's z-transformation" that converts Pearson's r's to the normally distributed variable z'. The formula for the transformation is:
\deqn{z_r = tanh^{-1} = \frac{1}{2}log\left ( \frac{1+r}{1-r}\right )}
}
\value{z value corresponding to r (in FisherZ) \cr
r corresponding to z (in FisherZInv) \cr
rho, lower and upper confidence intervals (CorCI) \cr
}
\seealso{\code{\link{cor.test}}}
\author{William Revelle <revelle@northwestern.edu>, \cr
slight modifications Andri Signorell <andri@signorell.net> based on R-Core code
}
\examples{
cors <- seq(-.9, .9, .1)
zs <- FisherZ(cors)
rs <- FisherZInv(zs)
round(zs, 2)
n <- 30
r <- seq(0, .9, .1)
rc <- t(sapply(r, CorCI, n=n))
t <- r * sqrt(n-2) / sqrt(1-r^2)
p <- (1 - pt(t, n-2)) / 2
r.rc <- data.frame(r=r, z=FisherZ(r), lower=rc[,2], upper=rc[,3], t=t, p=p)
round(r.rc,2)
}
\keyword{ multivariate }% at least one, from doc/KEYWORDS
\keyword{ models }% __ONLY ONE__ keyword per line
| /man/FisherZ.Rd | no_license | mainwaringb/DescTools | R | false | false | 2,122 | rd | \name{FisherZ}
\alias{FisherZ}
\alias{FisherZInv}
\alias{CorCI}
\title{Fisher-Transformation for Correlation to z-Score}
\description{Convert a correlation to a z score or z to r using the Fisher transformation or find the confidence intervals for a specified correlation.}
\usage{
FisherZ(rho)
FisherZInv(z)
CorCI(rho, n, conf.level = 0.95, alternative = c("two.sided", "less", "greater"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{rho}{the Pearson's correlation coefficient}
\item{z}{a Fisher z transformed value}
\item{n}{sample size used for calculating the confidence intervals}
\item{alternative}{is a character string, one of \code{"greater"},
\code{"less"}, or \code{"two.sided"}, or the initial letter of each,
indicating the specification of the alternative hypothesis.
\code{"greater"} corresponds to positive association, \code{"less"} to negative association.}
\item{conf.level}{confidence level for the returned confidence
interval, restricted to lie between zero and one.}
}
\details{The sampling distribution of Pearson's r is not normally distributed. Fisher developed a transformation now called "Fisher's z-transformation" that converts Pearson's r's to the normally distributed variable z'. The formula for the transformation is:
\deqn{z_r = tanh^{-1} = \frac{1}{2}log\left ( \frac{1+r}{1-r}\right )}
}
\value{z value corresponding to r (in FisherZ) \cr
r corresponding to z (in FisherZInv) \cr
rho, lower and upper confidence intervals (CorCI) \cr
}
\seealso{\code{\link{cor.test}}}
\author{William Revelle <revelle@northwestern.edu>, \cr
slight modifications Andri Signorell <andri@signorell.net> based on R-Core code
}
\examples{
cors <- seq(-.9, .9, .1)
zs <- FisherZ(cors)
rs <- FisherZInv(zs)
round(zs, 2)
n <- 30
r <- seq(0, .9, .1)
rc <- t(sapply(r, CorCI, n=n))
t <- r * sqrt(n-2) / sqrt(1-r^2)
p <- (1 - pt(t, n-2)) / 2
r.rc <- data.frame(r=r, z=FisherZ(r), lower=rc[,2], upper=rc[,3], t=t, p=p)
round(r.rc,2)
}
\keyword{ multivariate }% at least one, from doc/KEYWORDS
\keyword{ models }% __ONLY ONE__ keyword per line
|
x <-3
install.packages("tidyverse")
demo_table<-read.csv(file='demo.csv', check.names=F, stringsAsFactors = F)
library(jsonlite)
demo_table2 <- fromJSON(txt='demo.json')
#Filter by price and drivetrain
filter_table2<-subset(demo_table2, price > 10000 & drive =="4wd" & "clean" %in% title_status)
#Sampling
demo_table[sample(1:nrow(demo_table),3),]
library(tidyverse)
?mutate()
#add columns to original data frame
demo_table<- demo_table %>% mutate(Mileage_per_Year = Total_Miles/(2020-Year), IsActive= TRUE)
#create summary table by group used car data by condition of vehicle and mileage per condition
summarize_demo <- demo_table2 %>% group_by(condition) %>% summarise(Mean_Mileage = mean(odometer))
#create summary table with multiple columns
summarize_demo <- demo_table2 %>% group_by(condition)%>% summarize(Mean_Mileage=mean(odometer),Maximum_Price=max(price),Num_Vehicle= n())
#load demo2
demo_table3<- read.csv('demo2.csv', check.names = F, stringsAsFactors = F)
#use gather() to change dataset to a long format
long_table <- gather(demo_table3, key="Metric", value="Score",buying_price:popularity)
#use spread() to spread out long-format data frame
wide_table<- long_table%>% spread(key = "Metric",value="Score")
#Check to see if equal
all.equal(demo_table3, wide_table)
#call MPG dataset (built into R)
head(mpg)
#import dataset into ggplot2
plt<- ggplot(mpg, aes(x=class))
#plot a bar plot
plt+geom_bar()
#create a summary table
mpg_summary <- mpg%>%group_by(manufacturer) %>% summarize(Vehicle_Count=n())
#Import dataset into ggplot2
plt<- ggplot(mpg_summary, aes(x=manufacturer, y=Vehicle_Count))
#plot a bar plot
plt+geom_col()
#plot bar plot with labels
plt+geom_col() + xlab("Manufacturing Company")+ ylab("Number of Vehicles in Dataset")
#plot a boxplot with labels
plt+geom_col() + xlab("Manufacturing Company") +
ylab("Number of Vehicle in Dataset") +
#rotate the x-axis label
theme(axis.text.x=element_text(angle=45, hjust=1))
#create summary table
mpg_summary <- subset(mpg, manufacturer=="toyota") %>% group_by(cyl) %>% summarize (Mean_Hwy=mean(hwy))
#import dataset into ggplot2
plt<- ggplot(mpg_summary, aes(x=cyl, y=Mean_Hwy))
#plot line
plt+geom_line()
#add line plot with labels
plt+geom_line() + scale_x_discrete(limits=c(4,6,8)) + scale_y_continuous(breaks = c(15:30))
#scatterplot
#import dataset into ggplot2
plt<- ggplot(mpg, aes(x=displ, y=cty))
#add scatter plot with labels
plt+geom_point() + xlab("Engine Size (L)") + ylab ("City Fuel-Efficiency (MPG")
#add color=class
plt<- ggplot(mpg, aes(x=displ, y=cty, color=class))
# add color
plt+geom_point() + labs(x="Engine Size (L)", y="City Fuel-Efficiency (MPG)", color="Vehicle Class")
#import dataset into ggplot2
plt <- ggplot(mpg, aes(x=displ, y=cty, color=class, shape=drv))
#ad scatter plot with multiple aesthetics
plt + geom_point() + labs(x="Engine SIze (L)", y="City Fuel-Efficiency (MPG", color="Vehicle Class", shape="Type of Drive")
#boxplot
#import dataset into ggplot2
plt<- ggplot(mpg, aes(y=hwy))
#add boxplot
plt + geom_boxplot()
#create set of boxplots
#import dataset into ggplot2
plt<- ggplot(mpg,aes(x=manufacturer, y=hwy))
#add boxplot and rotate x-axis labels 45 degrees
plt+ geom_boxplot() + theme(axis.text.x=element_text(angle=45, hjust=1))
#heatmap
#create summary table
mpg_summary<- mpg %>% group_by(class, year) %>% summarize (Mean_Hwy=mean(hwy))
plt<- ggplot(mpg_summary, aes(x=class, y=factor(year), fill=Mean_Hwy))
#create heatmap with labels
plt +geom_tile() + labs(x="Vehicle Class", y="Vehicle Year", fill="Mean Highway (MPG)")
#create summary table
mpg_summary <- mpg %>% group_by(model, year) %>% summarize (Mean_Hwy=mean(hwy))
#import dataset into ggplot2
plt <- ggplot(mpg_summary, aes(x=model, y=factor(year), fill=Mean_Hwy))
#add heatmap with labels
plt + geom_tile() + labs(x="Model", y="Vehicle Year", fill = "Mean Highway (MPG)") +
#rotate x-axis labels 90 degrees
theme(axis.text.x = element_text(angle = 90, hjust=1, vjust=.5))
#import dataset into ggplot2
plt<- ggplot(mpg,aes(x=manufacturer, y=hwy))
#add boxplot
plt + geom_boxplot() +
#rotate x-axis labels 45 degrees
theme(axis.text.x = element_text(angle=45, hjust=1)) +
#overlay scatter plot on top
geom_point()
#create summary table
mpg_summary <- mpg %>% group_by(class) %>% summarize (Mean_Engine=mean(displ))
#import dataset into ggplot2
plt <- ggplot(mpg_summary, aes(x=class, y=Mean_Engine))
#add scatter plot
plt + geom_point(size=4) + labs(x="Vehicle Class", y="Mean Engine Size")
#import dataset into ggplot2
#provide context around standard deviation of engine size for each vehicle class
mpg_summary <- mpg %>% group_by(class) %>% summarize (Mean_Engine=mean(displ), SD_Engine=sd(displ))
plt <- ggplot(mpg_summary, aes(x=class, y=Mean_Engine))
#add scatter plot with labels
plt +geom_point(size=4) + labs(x="Vehicle Class", y="Mean Engine Size") +
#overlay with error bars
geom_errorbar(aes(ymin=Mean_Engine-SD_Engine,ymax=Mean_Engine+SD_Engine))
#convert to long format
mpg_long <- mpg %>% gather (key = "MPG_Type", value = "Rating", c(cty, hwy))
head(mpg_long)
#import dataset into ggplot2
plt <- ggplot(mpg_long, aes(x=manufacturer, y=Rating, color=MPG_Type))
# add boxplot with labels rotated 45 degrees
plt + geom_boxplot() + theme(axis.text.x=element_text(angle=45, hjust=1))
#import dataset into ggplot2
plt <- ggplot(mpg_long, aes(x=manufacturer, y=Rating, color=MPG_Type))
#create multiple boxplots, one for each MPG type
plt + geom_boxplot() + facet_wrap(vars(MPG_Type)) +
#rotate x-axis labels
theme(axis.text.x=element_text(angle=45, hjust=1), legend.position="none")+xlab ("Manufacturer")
#visualize distribution using density plot (qualitative)
ggplot(mtcars, aes(x=wt)) + geom_density()
#shapiro test
shapiro.test(mtcars$wt)
#sampling from used car dataset
#import used car dataset
population_table <- read.csv('used_car_data.csv', check.names=F, stringsAsFactors = F)
#import dataset into ggplot2
plt <- ggplot(population_table, aes(x=log10(Miles_Driven)))
#visualize distribution using density plot
plt + geom_density()
#one-sample t-test
#randomly sample 50 data points
sample_table <- population_table %>% sample_n(50)
#import dataset into ggplot2
plt <- ggplot(sample_table, aes(x=log10(Miles_Driven)))
#visualize distribution using density plot
plt+geom_density()
#compare sample versus population means
t.test(log10(sample_table$Miles_Driven), mu=mean(log10(population_table$Miles_Driven)))
#two- sample t-test
#generate 50 randomly sampled data points
sample_table <- population_table %>% sample_n(50)
#generate another 50 randomly sampled data points
sample_table2 <- population_table %>% sample_n(50)
#compare means of two samples
t.test(log10(sample_table$Miles_Driven), log10(sample_table2$Miles_Driven))
#paired t-test
#import dataset
mpg_data <- read.csv('mpg_modified.csv')
#select only data points where the year is 1999
mpg_1999 <- mpg_data %>% filter(year ==1999)
#select only data points where the year is 2008
mpg_2008 <- mpg_data %>% filter(year ==2008)
#compare the mean difference between two samples
t.test(mpg_1999$hwy, mpg_2008$hwy, paired=T)
#ANOVA testing
#filter columns from mtcars dataset
mtcars_filt <- mtcars[,c("hp", "cyl")]
#convert numeric column to factors
mtcars_filt$cyl <- factor(mtcars_filt$cyl)
#compare means across multiple levels
aov(hp ~ cyl, data=mtcars_filt)
#wrap aov() function in a summary()
summary(aov(hp~cyl, data=mtcars_filt))
#practice with Pearson correlation coefficient
head(mtcars)
#import dataset into ggplot2
plt <- ggplot(mtcars,aes(x=hp, y=qsec))
#create scatter plot
plt + geom_point()
#calculate correlation coefficient
cor(mtcars$hp, mtcars$qsec)
#another example of r-value
#read in dataset
used_cars <- read.csv('used_car_data.csv', stringsAsFactors = F)
head(used_cars)
#import dataset into ggplot2
plt <- ggplot(used_cars, aes(x=Miles_Driven, y=Selling_Price))
#create scatter plot
plt + geom_point()
#calculate correlation coefficient
cor(used_cars$Miles_Driven, used_cars$Selling_Price)
#correlation matrix
#convert dataframe into numeric matrix
used_matrix <- as.matrix(used_cars[,c("Selling_Price","Present_Price","Miles_Driven")])
cor(used_matrix)
#create linear regression model
lm(qsec ~ hp, mtcars)
#summarize linear model
summary(lm(qsec~hp, mtcars))
#visualize fitted line against dataset
#create linear model
model <-lm(qsec ~ hp, mtcars)
#determine y-axis values from linear model
yvals <- model$coefficients['hp']*mtcars$hp + model$coefficients ['(Intercept)']
#import dataset into ggplot2
plt <- ggplot(mtcars, aes(x=hp, y=qsec))
#plot scatter and linear model
plt + geom_point() + geom_line(aes(y=yvals), color="red")
#multiple linear regression model
lm(qsec ~ mpg + disp + drat + wt + hp, data=mtcars)
#generate summary statistics
summary(lm(qsec ~ mpg + disp + drat + wt + hp, data=mtcars))
#chi-squared test
#generate contingency table
tbl<-table(mpg$class, mpg$year)
#compare categorical distributions
chisq.test(tbl)
head(used_cars)
| /01_Demo/Module_15.R | no_license | hhnguyenn/MechaCars | R | false | false | 9,027 | r | x <-3
install.packages("tidyverse")
demo_table<-read.csv(file='demo.csv', check.names=F, stringsAsFactors = F)
library(jsonlite)
demo_table2 <- fromJSON(txt='demo.json')
#Filter by price and drivetrain
filter_table2<-subset(demo_table2, price > 10000 & drive =="4wd" & "clean" %in% title_status)
#Sampling
demo_table[sample(1:nrow(demo_table),3),]
library(tidyverse)
?mutate()
#add columns to original data frame
demo_table<- demo_table %>% mutate(Mileage_per_Year = Total_Miles/(2020-Year), IsActive= TRUE)
#create summary table by group used car data by condition of vehicle and mileage per condition
summarize_demo <- demo_table2 %>% group_by(condition) %>% summarise(Mean_Mileage = mean(odometer))
#create summary table with multiple columns
summarize_demo <- demo_table2 %>% group_by(condition)%>% summarize(Mean_Mileage=mean(odometer),Maximum_Price=max(price),Num_Vehicle= n())
#load demo2
demo_table3<- read.csv('demo2.csv', check.names = F, stringsAsFactors = F)
#use gather() to change dataset to a long format
long_table <- gather(demo_table3, key="Metric", value="Score",buying_price:popularity)
#use spread() to spread out long-format data frame
wide_table<- long_table%>% spread(key = "Metric",value="Score")
#Check to see if equal
all.equal(demo_table3, wide_table)
#call MPG dataset (built into R)
head(mpg)
#import dataset into ggplot2
plt<- ggplot(mpg, aes(x=class))
#plot a bar plot
plt+geom_bar()
#create a summary table
mpg_summary <- mpg%>%group_by(manufacturer) %>% summarize(Vehicle_Count=n())
#Import dataset into ggplot2
plt<- ggplot(mpg_summary, aes(x=manufacturer, y=Vehicle_Count))
#plot a bar plot
plt+geom_col()
#plot bar plot with labels
plt+geom_col() + xlab("Manufacturing Company")+ ylab("Number of Vehicles in Dataset")
#plot a boxplot with labels
plt+geom_col() + xlab("Manufacturing Company") +
ylab("Number of Vehicle in Dataset") +
#rotate the x-axis label
theme(axis.text.x=element_text(angle=45, hjust=1))
#create summary table
mpg_summary <- subset(mpg, manufacturer=="toyota") %>% group_by(cyl) %>% summarize (Mean_Hwy=mean(hwy))
#import dataset into ggplot2
plt<- ggplot(mpg_summary, aes(x=cyl, y=Mean_Hwy))
#plot line
plt+geom_line()
#add line plot with labels
plt+geom_line() + scale_x_discrete(limits=c(4,6,8)) + scale_y_continuous(breaks = c(15:30))
#scatterplot
#import dataset into ggplot2
plt<- ggplot(mpg, aes(x=displ, y=cty))
#add scatter plot with labels
plt+geom_point() + xlab("Engine Size (L)") + ylab ("City Fuel-Efficiency (MPG")
#add color=class
plt<- ggplot(mpg, aes(x=displ, y=cty, color=class))
# add color
plt+geom_point() + labs(x="Engine Size (L)", y="City Fuel-Efficiency (MPG)", color="Vehicle Class")
#import dataset into ggplot2
plt <- ggplot(mpg, aes(x=displ, y=cty, color=class, shape=drv))
#ad scatter plot with multiple aesthetics
plt + geom_point() + labs(x="Engine SIze (L)", y="City Fuel-Efficiency (MPG", color="Vehicle Class", shape="Type of Drive")
#boxplot
#import dataset into ggplot2
plt<- ggplot(mpg, aes(y=hwy))
#add boxplot
plt + geom_boxplot()
#create set of boxplots
#import dataset into ggplot2
plt<- ggplot(mpg,aes(x=manufacturer, y=hwy))
#add boxplot and rotate x-axis labels 45 degrees
plt+ geom_boxplot() + theme(axis.text.x=element_text(angle=45, hjust=1))
#heatmap
#create summary table
mpg_summary<- mpg %>% group_by(class, year) %>% summarize (Mean_Hwy=mean(hwy))
plt<- ggplot(mpg_summary, aes(x=class, y=factor(year), fill=Mean_Hwy))
#create heatmap with labels
plt +geom_tile() + labs(x="Vehicle Class", y="Vehicle Year", fill="Mean Highway (MPG)")
#create summary table
mpg_summary <- mpg %>% group_by(model, year) %>% summarize (Mean_Hwy=mean(hwy))
#import dataset into ggplot2
plt <- ggplot(mpg_summary, aes(x=model, y=factor(year), fill=Mean_Hwy))
#add heatmap with labels
plt + geom_tile() + labs(x="Model", y="Vehicle Year", fill = "Mean Highway (MPG)") +
#rotate x-axis labels 90 degrees
theme(axis.text.x = element_text(angle = 90, hjust=1, vjust=.5))
#import dataset into ggplot2
plt<- ggplot(mpg,aes(x=manufacturer, y=hwy))
#add boxplot
plt + geom_boxplot() +
#rotate x-axis labels 45 degrees
theme(axis.text.x = element_text(angle=45, hjust=1)) +
#overlay scatter plot on top
geom_point()
#create summary table
mpg_summary <- mpg %>% group_by(class) %>% summarize (Mean_Engine=mean(displ))
#import dataset into ggplot2
plt <- ggplot(mpg_summary, aes(x=class, y=Mean_Engine))
#add scatter plot
plt + geom_point(size=4) + labs(x="Vehicle Class", y="Mean Engine Size")
#import dataset into ggplot2
#provide context around standard deviation of engine size for each vehicle class
mpg_summary <- mpg %>% group_by(class) %>% summarize (Mean_Engine=mean(displ), SD_Engine=sd(displ))
plt <- ggplot(mpg_summary, aes(x=class, y=Mean_Engine))
#add scatter plot with labels
plt +geom_point(size=4) + labs(x="Vehicle Class", y="Mean Engine Size") +
#overlay with error bars
geom_errorbar(aes(ymin=Mean_Engine-SD_Engine,ymax=Mean_Engine+SD_Engine))
#convert to long format
mpg_long <- mpg %>% gather (key = "MPG_Type", value = "Rating", c(cty, hwy))
head(mpg_long)
#import dataset into ggplot2
plt <- ggplot(mpg_long, aes(x=manufacturer, y=Rating, color=MPG_Type))
# add boxplot with labels rotated 45 degrees
plt + geom_boxplot() + theme(axis.text.x=element_text(angle=45, hjust=1))
#import dataset into ggplot2
plt <- ggplot(mpg_long, aes(x=manufacturer, y=Rating, color=MPG_Type))
#create multiple boxplots, one for each MPG type
plt + geom_boxplot() + facet_wrap(vars(MPG_Type)) +
#rotate x-axis labels
theme(axis.text.x=element_text(angle=45, hjust=1), legend.position="none")+xlab ("Manufacturer")
#visualize distribution using density plot (qualitative)
ggplot(mtcars, aes(x=wt)) + geom_density()
#shapiro test
shapiro.test(mtcars$wt)
#sampling from used car dataset
#import used car dataset
population_table <- read.csv('used_car_data.csv', check.names=F, stringsAsFactors = F)
#import dataset into ggplot2
plt <- ggplot(population_table, aes(x=log10(Miles_Driven)))
#visualize distribution using density plot
plt + geom_density()
#one-sample t-test
#randomly sample 50 data points
sample_table <- population_table %>% sample_n(50)
#import dataset into ggplot2
plt <- ggplot(sample_table, aes(x=log10(Miles_Driven)))
#visualize distribution using density plot
plt+geom_density()
#compare sample versus population means
t.test(log10(sample_table$Miles_Driven), mu=mean(log10(population_table$Miles_Driven)))
#two- sample t-test
#generate 50 randomly sampled data points
sample_table <- population_table %>% sample_n(50)
#generate another 50 randomly sampled data points
sample_table2 <- population_table %>% sample_n(50)
#compare means of two samples
t.test(log10(sample_table$Miles_Driven), log10(sample_table2$Miles_Driven))
#paired t-test
#import dataset
mpg_data <- read.csv('mpg_modified.csv')
#select only data points where the year is 1999
mpg_1999 <- mpg_data %>% filter(year ==1999)
#select only data points where the year is 2008
mpg_2008 <- mpg_data %>% filter(year ==2008)
#compare the mean difference between two samples
t.test(mpg_1999$hwy, mpg_2008$hwy, paired=T)
#ANOVA testing
#filter columns from mtcars dataset
mtcars_filt <- mtcars[,c("hp", "cyl")]
#convert numeric column to factors
mtcars_filt$cyl <- factor(mtcars_filt$cyl)
#compare means across multiple levels
aov(hp ~ cyl, data=mtcars_filt)
#wrap aov() function in a summary()
summary(aov(hp~cyl, data=mtcars_filt))
#practice with Pearson correlation coefficient
head(mtcars)
#import dataset into ggplot2
plt <- ggplot(mtcars,aes(x=hp, y=qsec))
#create scatter plot
plt + geom_point()
#calculate correlation coefficient
cor(mtcars$hp, mtcars$qsec)
#another example of r-value
#read in dataset
used_cars <- read.csv('used_car_data.csv', stringsAsFactors = F)
head(used_cars)
#import dataset into ggplot2
plt <- ggplot(used_cars, aes(x=Miles_Driven, y=Selling_Price))
#create scatter plot
plt + geom_point()
#calculate correlation coefficient
cor(used_cars$Miles_Driven, used_cars$Selling_Price)
#correlation matrix
#convert dataframe into numeric matrix
used_matrix <- as.matrix(used_cars[,c("Selling_Price","Present_Price","Miles_Driven")])
cor(used_matrix)
#create linear regression model
lm(qsec ~ hp, mtcars)
#summarize linear model
summary(lm(qsec~hp, mtcars))
#visualize fitted line against dataset
#create linear model
model <-lm(qsec ~ hp, mtcars)
#determine y-axis values from linear model
yvals <- model$coefficients['hp']*mtcars$hp + model$coefficients ['(Intercept)']
#import dataset into ggplot2
plt <- ggplot(mtcars, aes(x=hp, y=qsec))
#plot scatter and linear model
plt + geom_point() + geom_line(aes(y=yvals), color="red")
#multiple linear regression model
lm(qsec ~ mpg + disp + drat + wt + hp, data=mtcars)
#generate summary statistics
summary(lm(qsec ~ mpg + disp + drat + wt + hp, data=mtcars))
#chi-squared test
#generate contingency table
tbl<-table(mpg$class, mpg$year)
#compare categorical distributions
chisq.test(tbl)
head(used_cars)
|
#' ICA Signal Extraction
#'
#' `step_ica` creates a *specification* of a recipe step
#' that will convert numeric data into one or more independent
#' components.
#'
#' @inheritParams step_center
#' @inherit step_center return
#' @param ... One or more selector functions to choose which
#' variables will be used to compute the components. See
#' [selections()] for more details. For the `tidy`
#' method, these are not currently used.
#' @param role For model terms created by this step, what analysis
#' role should they be assigned?. By default, the function assumes
#' that the new independent component columns created by the
#' original variables will be used as predictors in a model.
#' @param num_comp The number of ICA components to retain as new
#' predictors. If `num_comp` is greater than the number of columns
#' or the number of possible components, a smaller value will be
#' used.
#' @param options A list of options to
#' [fastICA::fastICA()]. No defaults are set here.
#' **Note** that the arguments `X` and `n.comp` should
#' not be passed here.
#' @param res The [fastICA::fastICA()] object is stored
#' here once this preprocessing step has be trained by
#' [prep.recipe()].
#' @param prefix A character string that will be the prefix to the
#' resulting new variables. See notes below.
#' @return An updated version of `recipe` with the new step
#' added to the sequence of existing steps (if any). For the
#' `tidy` method, a tibble with columns `terms` (the
#' selectors or variables selected), `value` (the loading),
#' and `component`.
#' @keywords datagen
#' @concept preprocessing
#' @concept ica
#' @concept projection_methods
#' @export
#' @details Independent component analysis (ICA) is a
#' transformation of a group of variables that produces a new set
#' of artificial features or components. ICA assumes that the
#' variables are mixtures of a set of distinct, non-Gaussian
#' signals and attempts to transform the data to isolate these
#' signals. Like PCA, the components are statistically independent
#' from one another. This means that they can be used to combat
#' large inter-variables correlations in a data set. Also like PCA,
#' it is advisable to center and scale the variables prior to
#' running ICA.
#'
#' This package produces components using the "FastICA"
#' methodology (see reference below). This step requires the
#' \pkg{dimRed} and \pkg{fastICA} packages. If not installed, the
#' step will stop with a note about installing these packages.
#'
#' The argument `num_comp` controls the number of components that
#' will be retained (the original variables that are used to derive
#' the components are removed from the data). The new components
#' will have names that begin with `prefix` and a sequence of
#' numbers. The variable names are padded with zeros. For example,
#' if `num_comp < 10`, their names will be `IC1` - `IC9`.
#' If `num_comp = 101`, the names would be `IC001` -
#' `IC101`.
#'
#' @references Hyvarinen, A., and Oja, E. (2000). Independent
#' component analysis: algorithms and applications. *Neural
#' Networks*, 13(4-5), 411-430.
#'
#' @examples
#' # from fastICA::fastICA
#' set.seed(131)
#' S <- matrix(runif(400), 200, 2)
#' A <- matrix(c(1, 1, -1, 3), 2, 2, byrow = TRUE)
#' X <- as.data.frame(S %*% A)
#'
#' tr <- X[1:100, ]
#' te <- X[101:200, ]
#'
#' rec <- recipe( ~ ., data = tr)
#'
#' ica_trans <- step_center(rec, V1, V2)
#' ica_trans <- step_scale(ica_trans, V1, V2)
#' ica_trans <- step_ica(ica_trans, V1, V2, num_comp = 2)
#'
#' if (require(dimRed) & require(fastICA)) {
#' ica_estimates <- prep(ica_trans, training = tr)
#' ica_data <- bake(ica_estimates, te)
#'
#' plot(te$V1, te$V2)
#' plot(ica_data$IC1, ica_data$IC2)
#'
#' tidy(ica_trans, number = 3)
#' tidy(ica_estimates, number = 3)
#' }
#' @seealso [step_pca()] [step_kpca()]
#' [step_isomap()] [recipe()] [prep.recipe()]
#' [bake.recipe()]
step_ica <-
function(recipe,
...,
role = "predictor",
trained = FALSE,
num_comp = 5,
options = list(),
res = NULL,
prefix = "IC",
skip = FALSE,
id = rand_id("ica")) {
recipes_pkg_check(c("dimRed", "fastICA"))
add_step(
recipe,
step_ica_new(
terms = ellipse_check(...),
role = role,
trained = trained,
num_comp = num_comp,
options = options,
res = res,
prefix = prefix,
skip = skip,
id = id
)
)
}
step_ica_new <-
function(terms, role, trained, num_comp, options, res, prefix, skip, id) {
step(
subclass = "ica",
terms = terms,
role = role,
trained = trained,
num_comp = num_comp,
options = options,
res = res,
prefix = prefix,
skip = skip,
id = id
)
}
#' @export
prep.step_ica <- function(x, training, info = NULL, ...) {
col_names <- terms_select(x$terms, info = info)
check_type(training[, col_names])
if (x$num_comp > 0) {
x$num_comp <- min(x$num_comp, length(col_names))
indc <- dimRed::FastICA(stdpars = x$options)
indc <-
try(
indc@fun(
dimRed::dimRedData(as.data.frame(training[, col_names, drop = FALSE])),
list(ndim = x$num_comp)
),
silent = TRUE
)
if (inherits(indc, "try-error")) {
stop("`step_ica` failed with error:\n", as.character(indc), call. = FALSE)
}
} else {
indc <- list(x_vars = col_names)
}
step_ica_new(
terms = x$terms,
role = x$role,
trained = TRUE,
num_comp = x$num_comp,
options = x$options,
res = indc,
prefix = x$prefix,
skip = x$skip,
id = x$id
)
}
#' @export
bake.step_ica <- function(object, new_data, ...) {
if (object$num_comp > 0) {
ica_vars <- colnames(environment(object$res@apply)$indata)
comps <-
object$res@apply(
dimRed::dimRedData(
as.data.frame(new_data[, ica_vars, drop = FALSE])
)
)@data
comps <- comps[, 1:object$num_comp, drop = FALSE]
colnames(comps) <- names0(ncol(comps), object$prefix)
new_data <- bind_cols(new_data, as_tibble(comps))
new_data <-
new_data[, !(colnames(new_data) %in% ica_vars), drop = FALSE]
}
as_tibble(new_data)
}
print.step_ica <-
function(x, width = max(20, options()$width - 29), ...) {
if (x$num_comp == 0) {
cat("No ICA components were extracted.\n")
} else {
cat("ICA extraction with ")
printer(colnames(x$res@org.data), x$terms, x$trained, width = width)
}
invisible(x)
}
#' @importFrom utils stack
#' @rdname step_ica
#' @param x A `step_ica` object.
#' @export
tidy.step_ica <- function(x, ...) {
if (is_trained(x)) {
if (x$num_comp > 0) {
rot <- dimRed::getRotationMatrix(x$res)
colnames(rot) <- names0(ncol(rot), x$prefix)
rot <- as.data.frame(rot)
vars <- colnames(x$res@org.data)
npc <- ncol(rot)
res <- utils::stack(rot)
colnames(res) <- c("value", "component")
res$component <- as.character(res$component)
res$terms <- rep(vars, npc)
res <- as_tibble(res)
} else {
res <- tibble(terms = x$res$x_vars, value = na_dbl, component = na_chr)
}
} else {
term_names <- sel2char(x$terms)
comp_names <- names0(x$num_comp, x$prefix)
res <- expand.grid(terms = term_names,
value = na_dbl,
component = comp_names)
res$terms <- as.character(res$terms)
res$component <- as.character(res$component)
res <- as_tibble(res)
}
res$id <- x$id
res
}
#' @rdname tunable.step
#' @export
tunable.step_ica <- function(x, ...) {
tibble::tibble(
name = "num_comp",
call_info = list(list(pkg = "dials", fun = "num_comp", range = c(1, 4))),
source = "recipe",
component = "step_ica",
component_id = x$id
)
}
| /R/ica.R | no_license | konradsemsch/recipes | R | false | false | 7,946 | r | #' ICA Signal Extraction
#'
#' `step_ica` creates a *specification* of a recipe step
#' that will convert numeric data into one or more independent
#' components.
#'
#' @inheritParams step_center
#' @inherit step_center return
#' @param ... One or more selector functions to choose which
#' variables will be used to compute the components. See
#' [selections()] for more details. For the `tidy`
#' method, these are not currently used.
#' @param role For model terms created by this step, what analysis
#' role should they be assigned?. By default, the function assumes
#' that the new independent component columns created by the
#' original variables will be used as predictors in a model.
#' @param num_comp The number of ICA components to retain as new
#' predictors. If `num_comp` is greater than the number of columns
#' or the number of possible components, a smaller value will be
#' used.
#' @param options A list of options to
#' [fastICA::fastICA()]. No defaults are set here.
#' **Note** that the arguments `X` and `n.comp` should
#' not be passed here.
#' @param res The [fastICA::fastICA()] object is stored
#' here once this preprocessing step has be trained by
#' [prep.recipe()].
#' @param prefix A character string that will be the prefix to the
#' resulting new variables. See notes below.
#' @return An updated version of `recipe` with the new step
#' added to the sequence of existing steps (if any). For the
#' `tidy` method, a tibble with columns `terms` (the
#' selectors or variables selected), `value` (the loading),
#' and `component`.
#' @keywords datagen
#' @concept preprocessing
#' @concept ica
#' @concept projection_methods
#' @export
#' @details Independent component analysis (ICA) is a
#' transformation of a group of variables that produces a new set
#' of artificial features or components. ICA assumes that the
#' variables are mixtures of a set of distinct, non-Gaussian
#' signals and attempts to transform the data to isolate these
#' signals. Like PCA, the components are statistically independent
#' from one another. This means that they can be used to combat
#' large inter-variables correlations in a data set. Also like PCA,
#' it is advisable to center and scale the variables prior to
#' running ICA.
#'
#' This package produces components using the "FastICA"
#' methodology (see reference below). This step requires the
#' \pkg{dimRed} and \pkg{fastICA} packages. If not installed, the
#' step will stop with a note about installing these packages.
#'
#' The argument `num_comp` controls the number of components that
#' will be retained (the original variables that are used to derive
#' the components are removed from the data). The new components
#' will have names that begin with `prefix` and a sequence of
#' numbers. The variable names are padded with zeros. For example,
#' if `num_comp < 10`, their names will be `IC1` - `IC9`.
#' If `num_comp = 101`, the names would be `IC001` -
#' `IC101`.
#'
#' @references Hyvarinen, A., and Oja, E. (2000). Independent
#' component analysis: algorithms and applications. *Neural
#' Networks*, 13(4-5), 411-430.
#'
#' @examples
#' # from fastICA::fastICA
#' set.seed(131)
#' S <- matrix(runif(400), 200, 2)
#' A <- matrix(c(1, 1, -1, 3), 2, 2, byrow = TRUE)
#' X <- as.data.frame(S %*% A)
#'
#' tr <- X[1:100, ]
#' te <- X[101:200, ]
#'
#' rec <- recipe( ~ ., data = tr)
#'
#' ica_trans <- step_center(rec, V1, V2)
#' ica_trans <- step_scale(ica_trans, V1, V2)
#' ica_trans <- step_ica(ica_trans, V1, V2, num_comp = 2)
#'
#' if (require(dimRed) & require(fastICA)) {
#' ica_estimates <- prep(ica_trans, training = tr)
#' ica_data <- bake(ica_estimates, te)
#'
#' plot(te$V1, te$V2)
#' plot(ica_data$IC1, ica_data$IC2)
#'
#' tidy(ica_trans, number = 3)
#' tidy(ica_estimates, number = 3)
#' }
#' @seealso [step_pca()] [step_kpca()]
#' [step_isomap()] [recipe()] [prep.recipe()]
#' [bake.recipe()]
step_ica <-
function(recipe,
...,
role = "predictor",
trained = FALSE,
num_comp = 5,
options = list(),
res = NULL,
prefix = "IC",
skip = FALSE,
id = rand_id("ica")) {
recipes_pkg_check(c("dimRed", "fastICA"))
add_step(
recipe,
step_ica_new(
terms = ellipse_check(...),
role = role,
trained = trained,
num_comp = num_comp,
options = options,
res = res,
prefix = prefix,
skip = skip,
id = id
)
)
}
step_ica_new <-
function(terms, role, trained, num_comp, options, res, prefix, skip, id) {
step(
subclass = "ica",
terms = terms,
role = role,
trained = trained,
num_comp = num_comp,
options = options,
res = res,
prefix = prefix,
skip = skip,
id = id
)
}
#' @export
prep.step_ica <- function(x, training, info = NULL, ...) {
col_names <- terms_select(x$terms, info = info)
check_type(training[, col_names])
if (x$num_comp > 0) {
x$num_comp <- min(x$num_comp, length(col_names))
indc <- dimRed::FastICA(stdpars = x$options)
indc <-
try(
indc@fun(
dimRed::dimRedData(as.data.frame(training[, col_names, drop = FALSE])),
list(ndim = x$num_comp)
),
silent = TRUE
)
if (inherits(indc, "try-error")) {
stop("`step_ica` failed with error:\n", as.character(indc), call. = FALSE)
}
} else {
indc <- list(x_vars = col_names)
}
step_ica_new(
terms = x$terms,
role = x$role,
trained = TRUE,
num_comp = x$num_comp,
options = x$options,
res = indc,
prefix = x$prefix,
skip = x$skip,
id = x$id
)
}
#' @export
bake.step_ica <- function(object, new_data, ...) {
if (object$num_comp > 0) {
ica_vars <- colnames(environment(object$res@apply)$indata)
comps <-
object$res@apply(
dimRed::dimRedData(
as.data.frame(new_data[, ica_vars, drop = FALSE])
)
)@data
comps <- comps[, 1:object$num_comp, drop = FALSE]
colnames(comps) <- names0(ncol(comps), object$prefix)
new_data <- bind_cols(new_data, as_tibble(comps))
new_data <-
new_data[, !(colnames(new_data) %in% ica_vars), drop = FALSE]
}
as_tibble(new_data)
}
print.step_ica <-
function(x, width = max(20, options()$width - 29), ...) {
if (x$num_comp == 0) {
cat("No ICA components were extracted.\n")
} else {
cat("ICA extraction with ")
printer(colnames(x$res@org.data), x$terms, x$trained, width = width)
}
invisible(x)
}
#' @importFrom utils stack
#' @rdname step_ica
#' @param x A `step_ica` object.
#' @export
tidy.step_ica <- function(x, ...) {
if (is_trained(x)) {
if (x$num_comp > 0) {
rot <- dimRed::getRotationMatrix(x$res)
colnames(rot) <- names0(ncol(rot), x$prefix)
rot <- as.data.frame(rot)
vars <- colnames(x$res@org.data)
npc <- ncol(rot)
res <- utils::stack(rot)
colnames(res) <- c("value", "component")
res$component <- as.character(res$component)
res$terms <- rep(vars, npc)
res <- as_tibble(res)
} else {
res <- tibble(terms = x$res$x_vars, value = na_dbl, component = na_chr)
}
} else {
term_names <- sel2char(x$terms)
comp_names <- names0(x$num_comp, x$prefix)
res <- expand.grid(terms = term_names,
value = na_dbl,
component = comp_names)
res$terms <- as.character(res$terms)
res$component <- as.character(res$component)
res <- as_tibble(res)
}
res$id <- x$id
res
}
#' @rdname tunable.step
#' @export
tunable.step_ica <- function(x, ...) {
tibble::tibble(
name = "num_comp",
call_info = list(list(pkg = "dials", fun = "num_comp", range = c(1, 4))),
source = "recipe",
component = "step_ica",
component_id = x$id
)
}
|
# cachematrix.R
# There are two functions makeCacheMatrix and cacheSolve
# makeCacheMatrix creates a special matrix, that can store the inverse of itself
# cacheSolve calculates the inverse of matrix, created with makeCacheMatrix
# makeCacheMatrix creates a special matrix, which is a list containing functions to:
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of the inverse of the matrix
# 4. get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
# set initial value for inverse of the matrix as NULL
s <- NULL
# set function resets the value of the matrix
set <- function(y) {
# assign new data to matrix
x <<- y
# reset stored value of inverse of the matrix
# so we can calculate the inverse of the matrix for new data
s <<- NULL
}
# get function returns value of the matrix
get <- function() x
# setSolve function sets the matrix inverse value
setSolve <- function(solve) s <<- solve
# getSolve function returns the matrix inverse value
getSolve <- function() s
# return cache matrix
list(set = set, get = get,
setSolve = setSolve,
getSolve = getSolve)
}
# cacheSolve calculates the inverse of the special matrix
# created with the makeCacheMatrix function
cacheSolve <- function(x, ...) {
# get matrix inverse value from the cache
s <- x$getSolve()
# return matrix inverse if it is not null
if(!is.null(s)) {
message("getting cached data")
return(s)
}
# otherwise, if there is no inverse in the cache
# get matrix data
data <- x$get()
# calculate the inverse of the matrix
s <- solve(data, ...)
# cache calculated inverse of the matrix
x$setSolve(s)
# return inverse matrix value
s
} | /r-programming/programming-assignment-2/cachematrix.R | no_license | stepankuzmin/coursera-data-science | R | false | false | 2,029 | r | # cachematrix.R
# There are two functions makeCacheMatrix and cacheSolve
# makeCacheMatrix creates a special matrix, that can store the inverse of itself
# cacheSolve calculates the inverse of matrix, created with makeCacheMatrix
# makeCacheMatrix creates a special matrix, which is a list containing functions to:
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of the inverse of the matrix
# 4. get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
# set initial value for inverse of the matrix as NULL
s <- NULL
# set function resets the value of the matrix
set <- function(y) {
# assign new data to matrix
x <<- y
# reset stored value of inverse of the matrix
# so we can calculate the inverse of the matrix for new data
s <<- NULL
}
# get function returns value of the matrix
get <- function() x
# setSolve function sets the matrix inverse value
setSolve <- function(solve) s <<- solve
# getSolve function returns the matrix inverse value
getSolve <- function() s
# return cache matrix
list(set = set, get = get,
setSolve = setSolve,
getSolve = getSolve)
}
# cacheSolve calculates the inverse of the special matrix
# created with the makeCacheMatrix function
cacheSolve <- function(x, ...) {
# get matrix inverse value from the cache
s <- x$getSolve()
# return matrix inverse if it is not null
if(!is.null(s)) {
message("getting cached data")
return(s)
}
# otherwise, if there is no inverse in the cache
# get matrix data
data <- x$get()
# calculate the inverse of the matrix
s <- solve(data, ...)
# cache calculated inverse of the matrix
x$setSolve(s)
# return inverse matrix value
s
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list-metadata.R
\name{sf_list_metadata}
\alias{sf_list_metadata}
\title{List All Objects of a Certain Metadata Type in Salesforce}
\usage{
sf_list_metadata(queries, verbose = FALSE)
}
\arguments{
\item{queries}{a \code{list} of \code{list}s with each element consisting of 2 components: 1)
the metadata type being requested and 2) the folder associated with the type that required for types
that use folders, such as Dashboard, Document, EmailTemplate, or Report.}
\item{verbose}{\code{logical}; an indicator of whether to print additional
detail for each API call, which is useful for debugging. More specifically, when
set to \code{TRUE} the URL, header, and body will be printed for each request,
along with additional diagnostic information where available.}
}
\value{
A \code{tbl_dfs} containing the queried metadata types
}
\description{
\ifelse{html}{\out{<a href='https://www.tidyverse.org/lifecycle/#experimental'><img src='figures/lifecycle-experimental.svg' alt='Experimental lifecycle'></a>}}{\strong{Experimental}}
This function takes a query of metadata types and returns a
summary of all objects in salesforce of the requested types
}
\note{
Only 3 queries can be specified at one time, so the list length must not exceed 3.
}
\examples{
\dontrun{
# pull back a list of all Custom Objects and Email Templates
my_queries <- list(list(type='CustomObject'),
list(folder='unfiled$public',
type='EmailTemplate'))
metadata_info <- sf_list_metadata(queries=my_queries)
}
}
\references{
\url{https://developer.salesforce.com/docs/atlas.en-us.api_meta.meta/api_meta/}
}
| /man/sf_list_metadata.Rd | permissive | carlganz/salesforcer | R | false | true | 1,699 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list-metadata.R
\name{sf_list_metadata}
\alias{sf_list_metadata}
\title{List All Objects of a Certain Metadata Type in Salesforce}
\usage{
sf_list_metadata(queries, verbose = FALSE)
}
\arguments{
\item{queries}{a \code{list} of \code{list}s with each element consisting of 2 components: 1)
the metadata type being requested and 2) the folder associated with the type that required for types
that use folders, such as Dashboard, Document, EmailTemplate, or Report.}
\item{verbose}{\code{logical}; an indicator of whether to print additional
detail for each API call, which is useful for debugging. More specifically, when
set to \code{TRUE} the URL, header, and body will be printed for each request,
along with additional diagnostic information where available.}
}
\value{
A \code{tbl_dfs} containing the queried metadata types
}
\description{
\ifelse{html}{\out{<a href='https://www.tidyverse.org/lifecycle/#experimental'><img src='figures/lifecycle-experimental.svg' alt='Experimental lifecycle'></a>}}{\strong{Experimental}}
This function takes a query of metadata types and returns a
summary of all objects in salesforce of the requested types
}
\note{
Only 3 queries can be specified at one time, so the list length must not exceed 3.
}
\examples{
\dontrun{
# pull back a list of all Custom Objects and Email Templates
my_queries <- list(list(type='CustomObject'),
list(folder='unfiled$public',
type='EmailTemplate'))
metadata_info <- sf_list_metadata(queries=my_queries)
}
}
\references{
\url{https://developer.salesforce.com/docs/atlas.en-us.api_meta.meta/api_meta/}
}
|
# Read this shape file with the rgdal library.
library(rgdal)
# gadm36_UKR_1.shp is already downloaded
getwd()
my_spdf <- readOGR(
dsn= paste0(getwd(),"/ukraine_shapefiles/Administation_levels/gadm36_UKR_1.shp") ,
verbose=FALSE
)
# Information about a file
summary(my_spdf)
length(my_spdf)
head(my_spdf@data)
library(broom) # to tidy a map
spdf_fortified <- tidy(my_spdf, region = "NAME_1") # Check the name of region carefully
# Plot it
library(ggplot2)
ggplot() +
geom_polygon(data = spdf_fortified,
aes( x = long, y = lat, group = group), fill="#69b3a2", color="white") +
theme_void()
| /ukraine_map.R | no_license | Okssana/geo_ukraine | R | false | false | 627 | r | # Read this shape file with the rgdal library.
library(rgdal)
# gadm36_UKR_1.shp is already downloaded
getwd()
my_spdf <- readOGR(
dsn= paste0(getwd(),"/ukraine_shapefiles/Administation_levels/gadm36_UKR_1.shp") ,
verbose=FALSE
)
# Information about a file
summary(my_spdf)
length(my_spdf)
head(my_spdf@data)
library(broom) # to tidy a map
spdf_fortified <- tidy(my_spdf, region = "NAME_1") # Check the name of region carefully
# Plot it
library(ggplot2)
ggplot() +
geom_polygon(data = spdf_fortified,
aes( x = long, y = lat, group = group), fill="#69b3a2", color="white") +
theme_void()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hgch_area_DatNum.R
\name{hgch_area_DatNum}
\alias{hgch_area_DatNum}
\title{Area chart Dat Num}
\usage{
hgch_area_DatNum(data, ...)
}
\arguments{
\item{data}{A data frame, data frame extension (e.g. a tibble), a
lazy data frame (e.g. from dbplyr or dtplyr), or fringe data (e.g from homodatum).}
\item{...}{Read \code{\link[dsvizopts]{chart_viz_options}} a general options summary to configure your hgchmagic plots.}
}
\description{
\code{hgch_area_DatNum()} Create a highcharter area plot based on a particular data type.
In this case, you can load data with only two columns, where the firts column is a
\strong{date column} and the second must be a \strong{numeric class column}, or be sure that
two firts columns they meet this condition
}
\section{Ftype}{
Dat-Num
}
\examples{
data <- sample_data("Dat-Num", n = 30)
hgch_area_DatNum(data)
hgch_area_DatNum(data)
# if you want to calculate the average instead of the sum, you can use agg inside a function
hgch_area_DatNum(data, agg = "mean")
# data with more of one column
data <- sample_data("Dat-Num-Cat-Cat-Cat-Num", n = 30)
hgch_area_DatNum(data)
# calculate percentage
hgch_area_DatNum(data, percentage = TRUE)
# numeric format
hgch_area_DatNum(data, percentage = TRUE, format_sample_num = "1.234,")
# You can call the mean and percentage in the tooltip plot
num_name <- names(data)[2]
data \%>\%
hgch_area_DatNum(agg = "mean",
tooltip = paste0("Average: {", num_name ,"} <br/> Percentage: {\%}\%"))
}
\seealso{
Other Dat-Num plots:
\code{\link{hgch_line_DatNum}()},
\code{\link{hgch_scatter_DatNum}()}
}
\concept{Dat-Num plots}
| /man/hgch_area_DatNum.Rd | no_license | lenafm/hgchmagic | R | false | true | 1,690 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hgch_area_DatNum.R
\name{hgch_area_DatNum}
\alias{hgch_area_DatNum}
\title{Area chart Dat Num}
\usage{
hgch_area_DatNum(data, ...)
}
\arguments{
\item{data}{A data frame, data frame extension (e.g. a tibble), a
lazy data frame (e.g. from dbplyr or dtplyr), or fringe data (e.g from homodatum).}
\item{...}{Read \code{\link[dsvizopts]{chart_viz_options}} a general options summary to configure your hgchmagic plots.}
}
\description{
\code{hgch_area_DatNum()} Create a highcharter area plot based on a particular data type.
In this case, you can load data with only two columns, where the firts column is a
\strong{date column} and the second must be a \strong{numeric class column}, or be sure that
two firts columns they meet this condition
}
\section{Ftype}{
Dat-Num
}
\examples{
data <- sample_data("Dat-Num", n = 30)
hgch_area_DatNum(data)
hgch_area_DatNum(data)
# if you want to calculate the average instead of the sum, you can use agg inside a function
hgch_area_DatNum(data, agg = "mean")
# data with more of one column
data <- sample_data("Dat-Num-Cat-Cat-Cat-Num", n = 30)
hgch_area_DatNum(data)
# calculate percentage
hgch_area_DatNum(data, percentage = TRUE)
# numeric format
hgch_area_DatNum(data, percentage = TRUE, format_sample_num = "1.234,")
# You can call the mean and percentage in the tooltip plot
num_name <- names(data)[2]
data \%>\%
hgch_area_DatNum(agg = "mean",
tooltip = paste0("Average: {", num_name ,"} <br/> Percentage: {\%}\%"))
}
\seealso{
Other Dat-Num plots:
\code{\link{hgch_line_DatNum}()},
\code{\link{hgch_scatter_DatNum}()}
}
\concept{Dat-Num plots}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tsinfer.R
\name{tsinfer}
\alias{tsinfer}
\title{Infer selection/drift parameters for time-series allele-frequency data}
\usage{
tsinfer(tvec, bvec, nvec, maxiter = 200, prec = 1e-06, iffreq = FALSE,
ifneut = FALSE, iffixedf0 = FALSE, verbose = FALSE, mins = -2,
maxs = 2, minalpha = 10, maxalpha = 1e+08, minf0 = -10, maxf0 = 10)
}
\arguments{
\item{tvec}{Time coordinates for the time-series (start with 0)}
\item{bvec}{Number of new form per time point}
\item{nvec}{Total number of samples per time point}
\item{maxiter}{Maximum number of iterations}
\item{prec}{Precision for optimisation}
\item{iffreq}{whether to assume that the sample frequencies are the population frequencies (default=FALSE)}
\item{ifneut}{whether to compute only the neutral model (default=FALSE)}
\item{iffixedf0}{whether to use the initial sample frequency as the initial frequency of the logistic component of the model (default=FALSE)}
\item{verbose}{whether to print intermediate output (detault=FALSE)}
\item{mins}{minimum s value to consider (default=-2)}
\item{maxs}{maximum s value to consider (default=2)}
\item{minalpha}{minimum alpha value to consider (default=10)}
\item{maxalpha}{maximum alpha value to consider (default=1e8)}
\item{minf0}{minimum f0 value to consider in log-odds (default=-10)}
\item{maxf0}{maximum f0 value to consider in log-odds (default=10)}
}
\value{
A list with the neutral and non-neutral parameter values and associated log-likelihoods
The output of the execution is the following list:
s = selection coefficient for non-neutral model
alpha = population size for non-neutral model
f0 = initial frequency for the logistic in non-neutral model
LL = log-likelihood of non-neutral model
s.0 = 0 (selection coefficient for the neutral model)
alpha.0 = population size for the neutral model
f0.0 = initial frequency for the logistic in neutral model
LL.0 = log-likelihood of neutral model
}
\description{
\code{tsinfer} computes the selection coefficient, population size and initial frequency for the logistic using a Gaussian approximation of the Kimura expression for time-series variant-frequency data.
}
\details{
Essential arguments are tvec (time point labels for time series starting at 0), bvec (number of new variants at each time point), and nvec (total number of samples at each time point).
}
| /man/tsinfer.Rd | permissive | LiYingWang/signatselect | R | false | true | 2,412 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tsinfer.R
\name{tsinfer}
\alias{tsinfer}
\title{Infer selection/drift parameters for time-series allele-frequency data}
\usage{
tsinfer(tvec, bvec, nvec, maxiter = 200, prec = 1e-06, iffreq = FALSE,
ifneut = FALSE, iffixedf0 = FALSE, verbose = FALSE, mins = -2,
maxs = 2, minalpha = 10, maxalpha = 1e+08, minf0 = -10, maxf0 = 10)
}
\arguments{
\item{tvec}{Time coordinates for the time-series (start with 0)}
\item{bvec}{Number of new form per time point}
\item{nvec}{Total number of samples per time point}
\item{maxiter}{Maximum number of iterations}
\item{prec}{Precision for optimisation}
\item{iffreq}{whether to assume that the sample frequencies are the population frequencies (default=FALSE)}
\item{ifneut}{whether to compute only the neutral model (default=FALSE)}
\item{iffixedf0}{whether to use the initial sample frequency as the initial frequency of the logistic component of the model (default=FALSE)}
\item{verbose}{whether to print intermediate output (detault=FALSE)}
\item{mins}{minimum s value to consider (default=-2)}
\item{maxs}{maximum s value to consider (default=2)}
\item{minalpha}{minimum alpha value to consider (default=10)}
\item{maxalpha}{maximum alpha value to consider (default=1e8)}
\item{minf0}{minimum f0 value to consider in log-odds (default=-10)}
\item{maxf0}{maximum f0 value to consider in log-odds (default=10)}
}
\value{
A list with the neutral and non-neutral parameter values and associated log-likelihoods
The output of the execution is the following list:
s = selection coefficient for non-neutral model
alpha = population size for non-neutral model
f0 = initial frequency for the logistic in non-neutral model
LL = log-likelihood of non-neutral model
s.0 = 0 (selection coefficient for the neutral model)
alpha.0 = population size for the neutral model
f0.0 = initial frequency for the logistic in neutral model
LL.0 = log-likelihood of neutral model
}
\description{
\code{tsinfer} computes the selection coefficient, population size and initial frequency for the logistic using a Gaussian approximation of the Kimura expression for time-series variant-frequency data.
}
\details{
Essential arguments are tvec (time point labels for time series starting at 0), bvec (number of new variants at each time point), and nvec (total number of samples at each time point).
}
|
#Unbiased
#libraries
library(caret)
library(caTools)
library(class)
library(e1071)
library(ggplot2)
library(gplots)
library(InformationValue)
library(randomForest)
library(ROCR)
#-------------------------------------------------------------------------------------------------------------------------------------
#Reading the dataset
bank_data <- read.csv('C:/Users/Dell1/Desktop/Multivariate Bank Marketing Dataset/bank-final-clean.csv',header = TRUE)
View(bank_data)
#-------------------------------------------------------------------------------------------------------------------------------------
#For unbiased data
#Taking random samples from dataset with 4640 yes and no
bank_no <- subset(bank_data, bank_data$y == 0)
bank_yes <- subset(bank_data, bank_data$y == 1)
bank_no <- bank_no[sample(nrow(bank_no)),]
bank_no <- bank_no[1:4640,]
df_bank_data <- rbind(bank_no, bank_yes)
#-------------------------------------------------------------------------------------------------------------------------------------
#calculating the number of yes and no values in the predictor variable
table(df_bank_data$y)
#Splitting the data set in 75:25
split <- sample.split(df_bank_data$y,SplitRatio = 0.75)
train <- subset(df_bank_data,split == TRUE)
test <- subset(df_bank_data,split == FALSE)
#-------------------------------------------------------------------------------------------------------------------------------------
#Variable Selection and Logistic regression
new_reg <- step(glm(formula = y~.,family = binomial, data = train))
#Training the Logistic Regression model with duration variable
lreg <- glm(formula = y ~ contact + duration + campaign + previous + blue.collar +
management + retired + self.employed + services + student +
technician + divorced + married + single + apr + aug + jul +
jun + mar + may + nov + oct + mon + thu + failure + nonexistent +
basic + illiterate + university.degree,family = binomial, data = train)
#testing the data set (LR)
predicted <- predict(lreg, type = "response", newdata = test)
prob_pred = ifelse(predicted > 0.4, 'yes','no')
table(test$y,prob_pred)
#ROC Curve
pred <- prediction(predicted, test$y)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
plot(perf, col=2, lwd = 3, main = "ROC Curve")
abline(0,1)
#AUC
auc_ROCR <- performance(pred, measure = "auc")
auc_ROCR <- auc_ROCR@y.values[[1]]
#-------------------------------------------------------------------------------------------------------------------------------------
#Logistic Regression Without duration variable
lreg1 <- glm(formula = y ~ contact + campaign + previous + blue.collar +
management + retired + self.employed + services + student +
technician + divorced + married + single + apr + aug + jul +
jun + mar + may + nov + oct + mon + thu + failure + nonexistent +
basic + illiterate + university.degree,family = binomial, data = train[-3])
predicted_1 <- predict(lreg1, type = "response", newdata = test)
prob_pred_1 = ifelse(predicted_1 > 0.5, 'yes','no')
table(test$y,prob_pred_1)
#ROC Curve
pred_1 <- prediction(predicted_1, test$y)
perf_1 <- performance(pred_1, measure = "tpr", x.measure = "fpr")
plot(perf_1, col=2, lwd = 3, main = "ROC Curve")
abline(0,1)
#AUC
auc_ROCR <- performance(pred_1, measure = "auc")
auc_ROCR <- auc_ROCR@y.values[[1]]
#-------------------------------------------------------------------------------------------------------------------------------------
#Random Forest
classifier_rf = randomForest(x = train[-6],y = as.factor(train$y) ,data = train, ntree = 20 , importance = TRUE)
y_pred <- predict(classifier_rf, newdata = test[-6])
table(test$y,y_pred)
plot(classifier_rf)
varImpPlot(classifier_rf)
#ROC Curve
pred <- prediction(y_pred, test$y)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
plot(perf, col=2, lwd = 3, main = "ROC Curve")
abline(0,1)
#Area Under the Curve
auc_ROCR <- performance(pred, measure = "auc")
auc_ROCR <- auc_ROCR@y.values[[1]]
# Random forest with AIC run values to reduce variables
rf_c = randomForest(x = train[-6][-7][-9][-10][-17][-21][-22][-23][-24]
[-27][-34][-35][-38][-39][-42][-44][-46],y = as.factor(train$y) ,data = train, ntree = 20 , importance = TRUE)
pred <- predict(rf_c, newdata = test[-6])
table(test$y,pred)
plot(rf_c)
#-------------------------------------------------------------------------------------------------------------------------------------
#KNN
knn_classifier <- knn(train=train[,-6],test = test[,-6], cl = train[,6], k =5 )
table(knn_classifier,test$y)
#KNN with AIC run values
knn_c <- knn(train=train[,-6][,-7][,-9][,-10][,-17][,-21][,-22][,-23][,-24]
[,-27][,-34][,-35][,-38][,-39][,-42][,-44][,-46],test = test[,-6][,-7][,-9][,-10][,-17][,-21][,-22][,-23][,-24]
[,-27][,-34][,-35][,-38][,-39][,-42][,-44][,-46], cl = train[,6], k =5 )
table(knn_c,test$y)
#-------------------------------------------------------------------------------------------------------------------------------------
#calculating model accuracies for KNN, RF and LR
model_accuracy <- c(76.16,85.94,83.83)
models_labels <- c('KNN','RF','LR')
accracy <- data.frame(Location =models_labels, Count = model_accuracy)
ggplot(
accracy
, aes(y = Count
, x = Location)) +
geom_col() + xlab("AIC Values") + ylab("Number of variables") + ggtitle("Number of Variables Vs AIC Values") | /final.r | no_license | shreyasmenon/Bank-TelemarketingData-Analysis | R | false | false | 5,630 | r | #Unbiased
#libraries
library(caret)
library(caTools)
library(class)
library(e1071)
library(ggplot2)
library(gplots)
library(InformationValue)
library(randomForest)
library(ROCR)
#-------------------------------------------------------------------------------------------------------------------------------------
#Reading the dataset
bank_data <- read.csv('C:/Users/Dell1/Desktop/Multivariate Bank Marketing Dataset/bank-final-clean.csv',header = TRUE)
View(bank_data)
#-------------------------------------------------------------------------------------------------------------------------------------
#For unbiased data
#Taking random samples from dataset with 4640 yes and no
bank_no <- subset(bank_data, bank_data$y == 0)
bank_yes <- subset(bank_data, bank_data$y == 1)
bank_no <- bank_no[sample(nrow(bank_no)),]
bank_no <- bank_no[1:4640,]
df_bank_data <- rbind(bank_no, bank_yes)
#-------------------------------------------------------------------------------------------------------------------------------------
#calculating the number of yes and no values in the predictor variable
table(df_bank_data$y)
#Splitting the data set in 75:25
split <- sample.split(df_bank_data$y,SplitRatio = 0.75)
train <- subset(df_bank_data,split == TRUE)
test <- subset(df_bank_data,split == FALSE)
#-------------------------------------------------------------------------------------------------------------------------------------
#Variable Selection and Logistic regression
new_reg <- step(glm(formula = y~.,family = binomial, data = train))
#Training the Logistic Regression model with duration variable
lreg <- glm(formula = y ~ contact + duration + campaign + previous + blue.collar +
management + retired + self.employed + services + student +
technician + divorced + married + single + apr + aug + jul +
jun + mar + may + nov + oct + mon + thu + failure + nonexistent +
basic + illiterate + university.degree,family = binomial, data = train)
#testing the data set (LR)
predicted <- predict(lreg, type = "response", newdata = test)
prob_pred = ifelse(predicted > 0.4, 'yes','no')
table(test$y,prob_pred)
#ROC Curve
pred <- prediction(predicted, test$y)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
plot(perf, col=2, lwd = 3, main = "ROC Curve")
abline(0,1)
#AUC
auc_ROCR <- performance(pred, measure = "auc")
auc_ROCR <- auc_ROCR@y.values[[1]]
#-------------------------------------------------------------------------------------------------------------------------------------
#Logistic Regression Without duration variable
lreg1 <- glm(formula = y ~ contact + campaign + previous + blue.collar +
management + retired + self.employed + services + student +
technician + divorced + married + single + apr + aug + jul +
jun + mar + may + nov + oct + mon + thu + failure + nonexistent +
basic + illiterate + university.degree,family = binomial, data = train[-3])
predicted_1 <- predict(lreg1, type = "response", newdata = test)
prob_pred_1 = ifelse(predicted_1 > 0.5, 'yes','no')
table(test$y,prob_pred_1)
#ROC Curve
pred_1 <- prediction(predicted_1, test$y)
perf_1 <- performance(pred_1, measure = "tpr", x.measure = "fpr")
plot(perf_1, col=2, lwd = 3, main = "ROC Curve")
abline(0,1)
#AUC
auc_ROCR <- performance(pred_1, measure = "auc")
auc_ROCR <- auc_ROCR@y.values[[1]]
#-------------------------------------------------------------------------------------------------------------------------------------
#Random Forest
classifier_rf = randomForest(x = train[-6],y = as.factor(train$y) ,data = train, ntree = 20 , importance = TRUE)
y_pred <- predict(classifier_rf, newdata = test[-6])
table(test$y,y_pred)
plot(classifier_rf)
varImpPlot(classifier_rf)
#ROC Curve
pred <- prediction(y_pred, test$y)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
plot(perf, col=2, lwd = 3, main = "ROC Curve")
abline(0,1)
#Area Under the Curve
auc_ROCR <- performance(pred, measure = "auc")
auc_ROCR <- auc_ROCR@y.values[[1]]
# Random forest with AIC run values to reduce variables
rf_c = randomForest(x = train[-6][-7][-9][-10][-17][-21][-22][-23][-24]
[-27][-34][-35][-38][-39][-42][-44][-46],y = as.factor(train$y) ,data = train, ntree = 20 , importance = TRUE)
pred <- predict(rf_c, newdata = test[-6])
table(test$y,pred)
plot(rf_c)
#-------------------------------------------------------------------------------------------------------------------------------------
#KNN
knn_classifier <- knn(train=train[,-6],test = test[,-6], cl = train[,6], k =5 )
table(knn_classifier,test$y)
#KNN with AIC run values
knn_c <- knn(train=train[,-6][,-7][,-9][,-10][,-17][,-21][,-22][,-23][,-24]
[,-27][,-34][,-35][,-38][,-39][,-42][,-44][,-46],test = test[,-6][,-7][,-9][,-10][,-17][,-21][,-22][,-23][,-24]
[,-27][,-34][,-35][,-38][,-39][,-42][,-44][,-46], cl = train[,6], k =5 )
table(knn_c,test$y)
#-------------------------------------------------------------------------------------------------------------------------------------
#calculating model accuracies for KNN, RF and LR
model_accuracy <- c(76.16,85.94,83.83)
models_labels <- c('KNN','RF','LR')
accracy <- data.frame(Location =models_labels, Count = model_accuracy)
ggplot(
accracy
, aes(y = Count
, x = Location)) +
geom_col() + xlab("AIC Values") + ylab("Number of variables") + ggtitle("Number of Variables Vs AIC Values") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{tidyeval}
\alias{tidyeval}
\alias{quo}
\alias{quos}
\alias{enquo}
\alias{sym}
\alias{syms}
\alias{ensym}
\alias{expr}
\alias{exprs}
\alias{enexpr}
\alias{quo_name}
\title{Tidy eval helpers}
\description{
These functions provide tidy eval-compatible ways to capture
symbols (\code{sym()}, \code{syms()}, \code{ensym()}), expressions (\code{expr()},
\code{exprs()}, \code{enexpr()}), and quosures (\code{quo()}, \code{quos()}, \code{enquo()}).
To learn more about tidy eval and how to use these tools, read
\url{http://rlang.tidyverse.org/articles/tidy-evaluation.html}
}
\keyword{internal}
| /man/tidyeval.Rd | permissive | dmi3kno/hocr | R | false | true | 681 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{tidyeval}
\alias{tidyeval}
\alias{quo}
\alias{quos}
\alias{enquo}
\alias{sym}
\alias{syms}
\alias{ensym}
\alias{expr}
\alias{exprs}
\alias{enexpr}
\alias{quo_name}
\title{Tidy eval helpers}
\description{
These functions provide tidy eval-compatible ways to capture
symbols (\code{sym()}, \code{syms()}, \code{ensym()}), expressions (\code{expr()},
\code{exprs()}, \code{enexpr()}), and quosures (\code{quo()}, \code{quos()}, \code{enquo()}).
To learn more about tidy eval and how to use these tools, read
\url{http://rlang.tidyverse.org/articles/tidy-evaluation.html}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{randomNumbers}
\alias{randomNumbers}
\title{randomNumbers}
\format{A vector with 200 numbers}
\usage{
randomNumbers
}
\description{
200 random numbers generated as the output of sample(200)
}
\keyword{datasets}
| /tenFun/man/randomNumbers.Rd | no_license | katherinesimeon/make-r-packages | R | false | true | 317 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{randomNumbers}
\alias{randomNumbers}
\title{randomNumbers}
\format{A vector with 200 numbers}
\usage{
randomNumbers
}
\description{
200 random numbers generated as the output of sample(200)
}
\keyword{datasets}
|
#' Create a Matrix of score values using a GRanges or own ranges
#'
#' This function can take a GRanges argument and use each range to check for
#' overlaps with any of the current ranges in the first argument and return a
#' score value from the corresponding metadata. This function will only operate
#' on fully disjoint ranges (see \code{isDisjoint} for details). It can only
#' work if metadata is present and there is a "score" column in the metadata.
#' Please see example on how to add metadata to a
#' \linkS4class{RangedRaggedAssay} or \link{GRangesList} class. This function
#' uses the \link{overlapsAny} function from the \code{GenomicRanges} package.
#'
#' @param x A \linkS4class{RangedRaggedAssay} or \link{GRangesList} class
#' @param i Argument from generic (default 1L)
#' @param mcolname A single string indicating the metadata column to use for
#' the values in the resulting assay matrix
#' @param background A default background value for the resulting assay matrix
#' (default NA). This works for non-matching sample and range pairs in the data
#' and will be imputed in the matrix (e.g., 2 for diploid genomes)
#' @param make.names logical (default FALSE) whether to create character format
#' ranges for the rows of the matrix (either from the \code{ranges} argument
#' or from the \code{RangedRaggedAssay} itself). Example character format:
#' "chr1:2-3:+"
#' @param ranges An optional \link{GRanges} object for comparing accross all
#' sample ranges and for superseding the rows for the resulting matrix
#' (default NULL)
#' @param type The type argument from \link{overlapsAny}
#' @param ... Unused argument
#'
#' @examples
#' example("RangedRaggedAssay")
#'
#' ## Add some phony metadata to the RangedRaggedAssay
#' metadata(myRRA) <- list(snparrray1 = DataFrame(score = 1),
#' snparray2 = DataFrame(score = 1),
#' snparray3 = DataFrame(score = 3))
#'
#' assay(myRRA, background = 2)
#'
#' @return A \code{matrix} of values from the score column of the metadata.
#' @seealso \link{overlapsAny}
#' @exportMethod assay
setMethod("assay", c("RangedRaggedAssay", "missing"),
function(x, i, mcolname = "score", background = NA,
make.names = FALSE, ranges = NULL, type = "any", ...) {
if (!all(GenomicRanges::isDisjoint(x)))
stop("only disjoint ranges supported")
if (!is.numeric(mcols(x[[1L]])[[mcolname]]))
stop("metadata column is not numeric")
if (!is.null(ranges)) {
if (!is(ranges, "GRanges"))
stop("ranges must be a GRanges object")
if (make.names || is.null(names(ranges))) {
rowNames <- as.character(ranges)
} else {
rowNames <- names(ranges)
}
} else {
rowNames <- rownames(x)
if (make.names) {
rangeNames <- unique(as.character(
unlist(x, use.names = FALSE)))
if (length(unique(rowNames)) != length(rangeNames))
stop("feature names not unique accross ranges")
rowNames <- rangeNames
}
ranges <- GenomicRanges::GRanges(unlist(x, use.names = FALSE))
}
newMatrix <-
do.call(cbind,
lapply(seq_along(x),
function(j, obj) {
MValues <- ifelse(
IRanges::overlapsAny(ranges, obj[[j]],
type = type),
as.numeric(mcols(
obj[[j]])[[mcolname]]
),
background)
return(MValues)
}, obj = x))
colnames(newMatrix) <- colnames(x)
rownames(newMatrix) <- rowNames
return(newMatrix)
})
#' @describeIn ExperimentList Get the assay data for the default ANY class
setMethod("assay", c("ANY", "missing"), function(x, i, ...) {
if (is(x, "ExpressionSet"))
return(Biobase::exprs(x))
return(x)
})
#' @describeIn ExperimentList Get the assay data from each element in the
#' \link{ExperimentList}
#' @param i missing argument
#' @aliases assay,ExperimentList,missing-method
setMethod("assay", c("ExperimentList", "missing"), function(x, i, ...) {
lapply(x, FUN = function(y) assay(y))
})
#' @describeIn MultiAssayExperiment Get the assay data for a
#' \link{MultiAssayExperiment} as a \code{list}
#' @aliases assay,MultiAssayExperiment,missing-method
setMethod("assay", c("MultiAssayExperiment", "missing"), function(x, i, ...) {
assay(experiments(x))
})
| /R/assay-methods.R | no_license | copen/MultiAssayExperiment | R | false | false | 4,971 | r | #' Create a Matrix of score values using a GRanges or own ranges
#'
#' This function can take a GRanges argument and use each range to check for
#' overlaps with any of the current ranges in the first argument and return a
#' score value from the corresponding metadata. This function will only operate
#' on fully disjoint ranges (see \code{isDisjoint} for details). It can only
#' work if metadata is present and there is a "score" column in the metadata.
#' Please see example on how to add metadata to a
#' \linkS4class{RangedRaggedAssay} or \link{GRangesList} class. This function
#' uses the \link{overlapsAny} function from the \code{GenomicRanges} package.
#'
#' @param x A \linkS4class{RangedRaggedAssay} or \link{GRangesList} class
#' @param i Argument from generic (default 1L)
#' @param mcolname A single string indicating the metadata column to use for
#' the values in the resulting assay matrix
#' @param background A default background value for the resulting assay matrix
#' (default NA). This works for non-matching sample and range pairs in the data
#' and will be imputed in the matrix (e.g., 2 for diploid genomes)
#' @param make.names logical (default FALSE) whether to create character format
#' ranges for the rows of the matrix (either from the \code{ranges} argument
#' or from the \code{RangedRaggedAssay} itself). Example character format:
#' "chr1:2-3:+"
#' @param ranges An optional \link{GRanges} object for comparing accross all
#' sample ranges and for superseding the rows for the resulting matrix
#' (default NULL)
#' @param type The type argument from \link{overlapsAny}
#' @param ... Unused argument
#'
#' @examples
#' example("RangedRaggedAssay")
#'
#' ## Add some phony metadata to the RangedRaggedAssay
#' metadata(myRRA) <- list(snparrray1 = DataFrame(score = 1),
#' snparray2 = DataFrame(score = 1),
#' snparray3 = DataFrame(score = 3))
#'
#' assay(myRRA, background = 2)
#'
#' @return A \code{matrix} of values from the score column of the metadata.
#' @seealso \link{overlapsAny}
#' @exportMethod assay
setMethod("assay", c("RangedRaggedAssay", "missing"),
function(x, i, mcolname = "score", background = NA,
make.names = FALSE, ranges = NULL, type = "any", ...) {
if (!all(GenomicRanges::isDisjoint(x)))
stop("only disjoint ranges supported")
if (!is.numeric(mcols(x[[1L]])[[mcolname]]))
stop("metadata column is not numeric")
if (!is.null(ranges)) {
if (!is(ranges, "GRanges"))
stop("ranges must be a GRanges object")
if (make.names || is.null(names(ranges))) {
rowNames <- as.character(ranges)
} else {
rowNames <- names(ranges)
}
} else {
rowNames <- rownames(x)
if (make.names) {
rangeNames <- unique(as.character(
unlist(x, use.names = FALSE)))
if (length(unique(rowNames)) != length(rangeNames))
stop("feature names not unique accross ranges")
rowNames <- rangeNames
}
ranges <- GenomicRanges::GRanges(unlist(x, use.names = FALSE))
}
newMatrix <-
do.call(cbind,
lapply(seq_along(x),
function(j, obj) {
MValues <- ifelse(
IRanges::overlapsAny(ranges, obj[[j]],
type = type),
as.numeric(mcols(
obj[[j]])[[mcolname]]
),
background)
return(MValues)
}, obj = x))
colnames(newMatrix) <- colnames(x)
rownames(newMatrix) <- rowNames
return(newMatrix)
})
#' @describeIn ExperimentList Get the assay data for the default ANY class
setMethod("assay", c("ANY", "missing"), function(x, i, ...) {
if (is(x, "ExpressionSet"))
return(Biobase::exprs(x))
return(x)
})
#' @describeIn ExperimentList Get the assay data from each element in the
#' \link{ExperimentList}
#' @param i missing argument
#' @aliases assay,ExperimentList,missing-method
setMethod("assay", c("ExperimentList", "missing"), function(x, i, ...) {
lapply(x, FUN = function(y) assay(y))
})
#' @describeIn MultiAssayExperiment Get the assay data for a
#' \link{MultiAssayExperiment} as a \code{list}
#' @aliases assay,MultiAssayExperiment,missing-method
setMethod("assay", c("MultiAssayExperiment", "missing"), function(x, i, ...) {
assay(experiments(x))
})
|
## Assignment 2 is to write an R script which is to write 2
## functions: makeCacheMatrix, setinverse
## makeCacheMatrix takes in a matrix, computse and caches its
## inverse.
## cacheSolve checks the "inverse" matrix has been cashed then
## it computes its inverse.
## First start with makeCacheMatrix: this function takes in a
## "special" matrix object to cache
## its inverse. It returns a list of functions: set, get,
## setinverse,getinverse functions.
makeCacheMatrix <- function(x = matrix()) {
## initialize the "special" matrix object, inverse
inverse <- NULL
## function set to set x
set <- function(y) {
x <<- y
inverse <<- NULL
}
## function get to get x
get <- function() x
## function setinverse to set inverse
setinverse <- function(inverseValue)
inverse <<- inverseValue
## function getinverse to get inverse
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The 2nd function is to compute the inverse of the matrix
## returned by makeCacheMatrix above.
## If the inverse has already been calculated then the
## cacheSolve should
## retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## retrieve inverse from makeCacheMatrix
inverse <- x$getinverse()
## if inverse exist, put up a message and return inverse
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
## if inverse does not exist, get the input matrix,
## compute the inverse,
## set the inverse and return the inverse
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
| /cachematrix.R | no_license | QuekNguanTuan/ProgrammingAssignment2 | R | false | false | 1,956 | r | ## Assignment 2 is to write an R script which is to write 2
## functions: makeCacheMatrix, setinverse
## makeCacheMatrix takes in a matrix, computse and caches its
## inverse.
## cacheSolve checks the "inverse" matrix has been cashed then
## it computes its inverse.
## First start with makeCacheMatrix: this function takes in a
## "special" matrix object to cache
## its inverse. It returns a list of functions: set, get,
## setinverse,getinverse functions.
makeCacheMatrix <- function(x = matrix()) {
## initialize the "special" matrix object, inverse
inverse <- NULL
## function set to set x
set <- function(y) {
x <<- y
inverse <<- NULL
}
## function get to get x
get <- function() x
## function setinverse to set inverse
setinverse <- function(inverseValue)
inverse <<- inverseValue
## function getinverse to get inverse
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The 2nd function is to compute the inverse of the matrix
## returned by makeCacheMatrix above.
## If the inverse has already been calculated then the
## cacheSolve should
## retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## retrieve inverse from makeCacheMatrix
inverse <- x$getinverse()
## if inverse exist, put up a message and return inverse
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
## if inverse does not exist, get the input matrix,
## compute the inverse,
## set the inverse and return the inverse
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/macie2_operations.R
\name{macie2_list_resource_profile_detections}
\alias{macie2_list_resource_profile_detections}
\title{Retrieves information about the types and amount of sensitive data that
Amazon Macie found in an S3 bucket}
\usage{
macie2_list_resource_profile_detections(
maxResults = NULL,
nextToken = NULL,
resourceArn
)
}
\arguments{
\item{maxResults}{The maximum number of items to include in each page of a paginated
response.}
\item{nextToken}{The nextToken string that specifies which page of results to return in a
paginated response.}
\item{resourceArn}{[required] The Amazon Resource Name (ARN) of the S3 bucket that the request applies
to.}
}
\description{
Retrieves information about the types and amount of sensitive data that Amazon Macie found in an S3 bucket.
See \url{https://www.paws-r-sdk.com/docs/macie2_list_resource_profile_detections/} for full documentation.
}
\keyword{internal}
| /cran/paws.security.identity/man/macie2_list_resource_profile_detections.Rd | permissive | paws-r/paws | R | false | true | 998 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/macie2_operations.R
\name{macie2_list_resource_profile_detections}
\alias{macie2_list_resource_profile_detections}
\title{Retrieves information about the types and amount of sensitive data that
Amazon Macie found in an S3 bucket}
\usage{
macie2_list_resource_profile_detections(
maxResults = NULL,
nextToken = NULL,
resourceArn
)
}
\arguments{
\item{maxResults}{The maximum number of items to include in each page of a paginated
response.}
\item{nextToken}{The nextToken string that specifies which page of results to return in a
paginated response.}
\item{resourceArn}{[required] The Amazon Resource Name (ARN) of the S3 bucket that the request applies
to.}
}
\description{
Retrieves information about the types and amount of sensitive data that Amazon Macie found in an S3 bucket.
See \url{https://www.paws-r-sdk.com/docs/macie2_list_resource_profile_detections/} for full documentation.
}
\keyword{internal}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.