content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MAR.R
\name{MAR}
\alias{MAR}
\title{Missing data spike-in in MAR pattern}
\usage{
MAR(X_hat, MD_pattern, NA_fraction, min_PDM = 10)
}
\arguments{
\item{X_hat}{Simulated matrix with no missingness (Simulated_matrix output from the \code{\link{simulate}} function)}
\item{MD_pattern}{Missing data pattern in the original dataset (MD_Pattern output from the \code{\link{get_data}} function)}
\item{NA_fraction}{Fraction of missingness in the original dataset (Fraction_missingness output from the \code{\link{get_data}} function)}
\item{min_PDM}{All patterns with number of observations less than this number will be removed from the missing data generation. This argument is necessary to be carefully set, as the function will fail or generate erroneous missing data patterns with very complicated missing data patterns. The default is 10, but for large datasets this number needs to be set higher to avoid errors. Please select a value based on the min_PDM_thresholds output from the \code{\link{get_data}} function}
}
\value{
\item{MAR_matrix}{Matrix with MAR pre-defined missingness pattern}
\item{Summary}{Summary of MAR_matrix including number of missing values per variable}
}
\description{
\code{\link{MAR}} spikes in missingness using missing-at-random (MAR) pattern
}
\details{
This function uses the generated simulated matrix and generates missing datapoints in a missing-at-random
pattern for each variable using the \code{\link[mice]{ampute}} function, considering the fraction of missingness in
the original dataset and the original missingness pattern. The characteristic of the MAR pattern is that
the missingness in a variable is dependent on the distribution of other variable(s). Please note that after the missing data spike-in,
the function will remove rows with 100\% missing data.
}
\examples{
cleaned <- clean(clindata_miss, missingness_coding = -9)
metadata <- get_data(cleaned)
simulated <- simulate(rownum = metadata$Rows, colnum = metadata$Columns,
cormat = metadata$Corr_matrix)
MAR(simulated$Simulated_matrix,
MD_pattern = metadata$MD_Pattern,
NA_fraction = metadata$Fraction_missingness,
min_PDM = 10)
}
| /man/MAR.Rd | permissive | ZipZaap/missCompare | R | false | true | 2,228 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MAR.R
\name{MAR}
\alias{MAR}
\title{Missing data spike-in in MAR pattern}
\usage{
MAR(X_hat, MD_pattern, NA_fraction, min_PDM = 10)
}
\arguments{
\item{X_hat}{Simulated matrix with no missingness (Simulated_matrix output from the \code{\link{simulate}} function)}
\item{MD_pattern}{Missing data pattern in the original dataset (MD_Pattern output from the \code{\link{get_data}} function)}
\item{NA_fraction}{Fraction of missingness in the original dataset (Fraction_missingness output from the \code{\link{get_data}} function)}
\item{min_PDM}{All patterns with number of observations less than this number will be removed from the missing data generation. This argument is necessary to be carefully set, as the function will fail or generate erroneous missing data patterns with very complicated missing data patterns. The default is 10, but for large datasets this number needs to be set higher to avoid errors. Please select a value based on the min_PDM_thresholds output from the \code{\link{get_data}} function}
}
\value{
\item{MAR_matrix}{Matrix with MAR pre-defined missingness pattern}
\item{Summary}{Summary of MAR_matrix including number of missing values per variable}
}
\description{
\code{\link{MAR}} spikes in missingness using missing-at-random (MAR) pattern
}
\details{
This function uses the generated simulated matrix and generates missing datapoints in a missing-at-random
pattern for each variable using the \code{\link[mice]{ampute}} function, considering the fraction of missingness in
the original dataset and the original missingness pattern. The characteristic of the MAR pattern is that
the missingness in a variable is dependent on the distribution of other variable(s). Please note that after the missing data spike-in,
the function will remove rows with 100\% missing data.
}
\examples{
cleaned <- clean(clindata_miss, missingness_coding = -9)
metadata <- get_data(cleaned)
simulated <- simulate(rownum = metadata$Rows, colnum = metadata$Columns,
cormat = metadata$Corr_matrix)
MAR(simulated$Simulated_matrix,
MD_pattern = metadata$MD_Pattern,
NA_fraction = metadata$Fraction_missingness,
min_PDM = 10)
}
|
######################################################################
# diag.R
#
# Brian S Yandell
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License,
# version 3, as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose. See the GNU
# General Public License, version 3, for more details.
#
# A copy of the GNU General Public License, version 3, is available
# at http://www.r-project.org/Licenses/GPL-3
#
# Contains: dist.qtlnet, edgematch.qtlnet, mds.qtlnet, plotbic.qtlnet,
#
######################################################################
dist.qtlnet <- function(qtlnet.object, min.prob = 0.9, method = "manhattan", cex = 5)
{
## Fold to unique edges; threshold on min.prob.
M <- apply(qtlnet.object$Mav, 3, function(x) 1 * (fold.M(x) >= min.prob))
mbic <- meanbic(qtlnet.object)
wh <- which.min(mbic)
out <- list(sum = apply(M, 2, sum),
common = apply(M, 2, function(x,y) sum(x*y), M[,wh]),
wh = wh)
graphics::plot(jitter(out$sum), jitter(out$common), xlab = "edges", ylab = "common with best")
graphics::abline(0,1,col = "gray")
graphics::abline(h = out$sum[wh], v = out$sum[wh], col = "gray")
out
}
######################################################################
edgematch.qtlnet <- function(qtlnet.object, min.prob = 0.9, method = "manhattan", cex = 5)
{
## Deconvolve this fold to find out what pairs.
## Fold to unique edges; threshold on min.prob.
M <- apply(qtlnet.object$Mav, 3, function(x) 1 * (abs(fold.M(x)) >= min.prob))
mbic <- meanbic(qtlnet.object)
wh <- which.min(mbic)
common <- apply(M, 2, function(x,y) x*y == 1, M[,wh])
extra <- apply(M, 2, function(x,y) x*(1-y) == 1, M[,wh])
graphics::plot(c(1, nrow(common)), c(1, ncol(common)), type = "n", xlab = "edge", ylab = "run")
graphics::abline(v = which(common[,wh]), col = "black")
for(i in seq(ncol(common))) {
if(i != wh & any(extra[,i]))
graphics::points(which(extra[,i]), rep(i, sum(extra[,i])), col = "gray")
graphics::points(which(common[,i]), rep(i, sum(common[,i])), col = ifelse(i==wh, "red", "black"))
}
}
######################################################################
mds.qtlnet <- function(qtlnet.object, min.prob = 0.9, method = "manhattan", cex = 5)
{
M <- apply(qtlnet.object$Mav, 3, fold.M)
d <- stats::dist(t(M), method) # euclidean distances between the cols
fit <- stats::cmdscale(d,eig=TRUE, k=2) # k is the number of dim
## plot solution
x <- fit$points[,1]
y <- fit$points[,2]
mbic <- meanbic(qtlnet.object)
wh <- which.min(mbic)
rbic <- range(mbic)
cex <- 1 + (cex - 1) * (rbic[2] - mbic) / diff(rbic)
graphics::plot(x, y, xlab="Coordinate 1", ylab="Coordinate 2",
main="Metric MDS", type="p", cex = cex)
graphics::points(x[wh], y[wh], cex = cex[wh], col = "red")
## graphics::text(x, y, labels = row.names(M), cex=.7)
invisible(list(M=M, d=d, fit = fit, mbic = mbic))
}
######################################################################
plotbic.qtlnet <- function(x, ..., smooth = TRUE)
{
nSamples <- attr(x, "nSamples")
runs <- length(nSamples)
burnin <- attr(x, "burnin")
rngfn <- function(x, burnin) {
tmp <- which(seq(x) >= burnin * length(x))
range(x[tmp])
}
plotfn <- function(post.bic, burnin, col = "gray") {
tmp <- which(seq(post.bic) >= burnin * length(post.bic))
graphics::lines(tmp, post.bic[tmp], col = col)
}
splotfn <- function(post.bic, burnin) {
tmp <- which(seq(post.bic) >= burnin * length(post.bic))
graphics::lines(tmp, stats::lowess(post.bic[tmp])$y, col = "black")
}
bicol <- ifelse(smooth, "gray", "black")
if(runs == 1) {
range.bic <- rngfn(x$post.bic, burnin)
graphics::plot(c(1,max(nSamples)),range.bic, type = "n",
xlab = "Sample Index", ylab = "BIC")
plotfn(x$post.bic, burnin, bicol)
}
else {
run.id <- rep(seq(runs), nSamples)
range.bic <- range(unlist(tapply(x$post.bic, run.id,
rngfn, burnin)))
graphics::plot(c(1,max(nSamples)),range.bic, type = "n",
xlab = "Sample Index", ylab = "BIC")
tapply(x$post.bic, run.id, plotfn, burnin, bicol)
if(smooth)
tapply(x$post.bic, run.id, splotfn, burnin)
}
graphics::title(paste("BIC samples for", runs, "MCMC", ifelse(runs == 1, "run", "runs")))
}
######################################################################
newfun <- function(qtlnet.object, burnin = attr(qtlnet.object, "burnin"),
wh = which.min(meanbic(qtlnet.object, burnin)))
{
## Nice idea, but not working the way I thought.
## Want sumM to be score of posterior for edge.
M1 <- qtlnet.object$M[,,wh]
M1 <- t(apply(M1, 1,
function(x) {
s <- sum(x)
if(s > 0)
x <- x / s
x
}))
sumM <- M2 <- M1
for(i in seq(2, nrow(M1) - 1)) {
M2 <- M1 %*% M2
sumM <- sumM + M2
}
upM <- (sumM + t(sumM))[upper.tri(sumM)]
runM <- apply(qtlnet.object$M, 1:2, sum)
runM <- (runM + t(runM))[upper.tri(runM)]
graphics::plot(upM,runM)
M <- qtlnet.object$M[,,wh]
whs <- which(M[upper.tri(M)] > 0.9 | t(M)[upper.tri(M)] > 0.9)
graphics::points(upM[whs], runM[whs], col = "red")
## This is easier to understand.
## Does M1 have an edge, and does it agree with most of the runs?
M1u <- (M1+t(M1))[upper.tri(M1)]
graphics::plot(M1u, runM)
graphics::points(M1u[whs], runM[whs], col = "red")
graphics::abline(v=0.9)
}
zero.M <- function(qtlnet.object, run = which.min(mbic),
burnin = attr(qtlnet.object, "burnin"))
{
## apply(out.qtlnet$Mav,3, function(x) sum(x >.9))
## round(apply(out.qtlnet$Mav,3, function(x) mean(x[x >.9])),3)
nSamples <- attr(qtlnet.object, "nSamples")
runs <- length(nSamples)
run.id <- rep(seq(runs), nSamples)
M0 <- attr(qtlnet.object, "M0")
ravel <- row(as.matrix(M0)) > col(as.matrix(M0))
tmpfn <- function(post.model, burnin, ravel) {
M <- apply(model2M(post.model), 1:2, sum)
(M[ravel] + t(M)[ravel]) == 0
}
cat("Extracting network matrices...\n")
out <- matrix(unlist(tapply(qtlnet.object$post.model, run.id, tmpfn,
burnin, ravel)),
ncol = runs)
mbic <- meanbic(qtlnet.object, burnin)
wh <- which.min(mbic)
data.frame(nonzero = apply(out, 2, sum),
agree = apply(out, 2, function(x,y) sum(x == y & y > 0),
out[,run]),
mean.bic = mbic,
stringsAsFactors = TRUE)
}
best.qtlnet <- function(x, burnin = attr(x, "burnin"),
wh = which.min(meanbic(x, burnin)))
{
subset(x, wh)
}
meanbic <- function(qtlnet.object, burnin = attr(qtlnet.object, "burnin"))
{
nSamples <- attr(qtlnet.object, "nSamples")
runs <- length(nSamples)
run.id <- rep(seq(runs), nSamples)
tmpfn <- function(x, burnin) {
tmp <- which(seq(x) >= burnin * length(x))
mean(x[tmp])
}
mbic <- tapply(qtlnet.object$post.bic, run.id, tmpfn, burnin)
}
######################################################################
fold.M <- function(x) {
low <- upper.tri(x)
t(x)[low] - x[low]
}
######################################################################
| /R/diag.R | no_license | byandell/qtlnet | R | false | false | 7,561 | r | ######################################################################
# diag.R
#
# Brian S Yandell
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License,
# version 3, as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose. See the GNU
# General Public License, version 3, for more details.
#
# A copy of the GNU General Public License, version 3, is available
# at http://www.r-project.org/Licenses/GPL-3
#
# Contains: dist.qtlnet, edgematch.qtlnet, mds.qtlnet, plotbic.qtlnet,
#
######################################################################
dist.qtlnet <- function(qtlnet.object, min.prob = 0.9, method = "manhattan", cex = 5)
{
## Fold to unique edges; threshold on min.prob.
M <- apply(qtlnet.object$Mav, 3, function(x) 1 * (fold.M(x) >= min.prob))
mbic <- meanbic(qtlnet.object)
wh <- which.min(mbic)
out <- list(sum = apply(M, 2, sum),
common = apply(M, 2, function(x,y) sum(x*y), M[,wh]),
wh = wh)
graphics::plot(jitter(out$sum), jitter(out$common), xlab = "edges", ylab = "common with best")
graphics::abline(0,1,col = "gray")
graphics::abline(h = out$sum[wh], v = out$sum[wh], col = "gray")
out
}
######################################################################
edgematch.qtlnet <- function(qtlnet.object, min.prob = 0.9, method = "manhattan", cex = 5)
{
## Deconvolve this fold to find out what pairs.
## Fold to unique edges; threshold on min.prob.
M <- apply(qtlnet.object$Mav, 3, function(x) 1 * (abs(fold.M(x)) >= min.prob))
mbic <- meanbic(qtlnet.object)
wh <- which.min(mbic)
common <- apply(M, 2, function(x,y) x*y == 1, M[,wh])
extra <- apply(M, 2, function(x,y) x*(1-y) == 1, M[,wh])
graphics::plot(c(1, nrow(common)), c(1, ncol(common)), type = "n", xlab = "edge", ylab = "run")
graphics::abline(v = which(common[,wh]), col = "black")
for(i in seq(ncol(common))) {
if(i != wh & any(extra[,i]))
graphics::points(which(extra[,i]), rep(i, sum(extra[,i])), col = "gray")
graphics::points(which(common[,i]), rep(i, sum(common[,i])), col = ifelse(i==wh, "red", "black"))
}
}
######################################################################
mds.qtlnet <- function(qtlnet.object, min.prob = 0.9, method = "manhattan", cex = 5)
{
M <- apply(qtlnet.object$Mav, 3, fold.M)
d <- stats::dist(t(M), method) # euclidean distances between the cols
fit <- stats::cmdscale(d,eig=TRUE, k=2) # k is the number of dim
## plot solution
x <- fit$points[,1]
y <- fit$points[,2]
mbic <- meanbic(qtlnet.object)
wh <- which.min(mbic)
rbic <- range(mbic)
cex <- 1 + (cex - 1) * (rbic[2] - mbic) / diff(rbic)
graphics::plot(x, y, xlab="Coordinate 1", ylab="Coordinate 2",
main="Metric MDS", type="p", cex = cex)
graphics::points(x[wh], y[wh], cex = cex[wh], col = "red")
## graphics::text(x, y, labels = row.names(M), cex=.7)
invisible(list(M=M, d=d, fit = fit, mbic = mbic))
}
######################################################################
plotbic.qtlnet <- function(x, ..., smooth = TRUE)
{
nSamples <- attr(x, "nSamples")
runs <- length(nSamples)
burnin <- attr(x, "burnin")
rngfn <- function(x, burnin) {
tmp <- which(seq(x) >= burnin * length(x))
range(x[tmp])
}
plotfn <- function(post.bic, burnin, col = "gray") {
tmp <- which(seq(post.bic) >= burnin * length(post.bic))
graphics::lines(tmp, post.bic[tmp], col = col)
}
splotfn <- function(post.bic, burnin) {
tmp <- which(seq(post.bic) >= burnin * length(post.bic))
graphics::lines(tmp, stats::lowess(post.bic[tmp])$y, col = "black")
}
bicol <- ifelse(smooth, "gray", "black")
if(runs == 1) {
range.bic <- rngfn(x$post.bic, burnin)
graphics::plot(c(1,max(nSamples)),range.bic, type = "n",
xlab = "Sample Index", ylab = "BIC")
plotfn(x$post.bic, burnin, bicol)
}
else {
run.id <- rep(seq(runs), nSamples)
range.bic <- range(unlist(tapply(x$post.bic, run.id,
rngfn, burnin)))
graphics::plot(c(1,max(nSamples)),range.bic, type = "n",
xlab = "Sample Index", ylab = "BIC")
tapply(x$post.bic, run.id, plotfn, burnin, bicol)
if(smooth)
tapply(x$post.bic, run.id, splotfn, burnin)
}
graphics::title(paste("BIC samples for", runs, "MCMC", ifelse(runs == 1, "run", "runs")))
}
######################################################################
newfun <- function(qtlnet.object, burnin = attr(qtlnet.object, "burnin"),
wh = which.min(meanbic(qtlnet.object, burnin)))
{
## Nice idea, but not working the way I thought.
## Want sumM to be score of posterior for edge.
M1 <- qtlnet.object$M[,,wh]
M1 <- t(apply(M1, 1,
function(x) {
s <- sum(x)
if(s > 0)
x <- x / s
x
}))
sumM <- M2 <- M1
for(i in seq(2, nrow(M1) - 1)) {
M2 <- M1 %*% M2
sumM <- sumM + M2
}
upM <- (sumM + t(sumM))[upper.tri(sumM)]
runM <- apply(qtlnet.object$M, 1:2, sum)
runM <- (runM + t(runM))[upper.tri(runM)]
graphics::plot(upM,runM)
M <- qtlnet.object$M[,,wh]
whs <- which(M[upper.tri(M)] > 0.9 | t(M)[upper.tri(M)] > 0.9)
graphics::points(upM[whs], runM[whs], col = "red")
## This is easier to understand.
## Does M1 have an edge, and does it agree with most of the runs?
M1u <- (M1+t(M1))[upper.tri(M1)]
graphics::plot(M1u, runM)
graphics::points(M1u[whs], runM[whs], col = "red")
graphics::abline(v=0.9)
}
zero.M <- function(qtlnet.object, run = which.min(mbic),
burnin = attr(qtlnet.object, "burnin"))
{
## apply(out.qtlnet$Mav,3, function(x) sum(x >.9))
## round(apply(out.qtlnet$Mav,3, function(x) mean(x[x >.9])),3)
nSamples <- attr(qtlnet.object, "nSamples")
runs <- length(nSamples)
run.id <- rep(seq(runs), nSamples)
M0 <- attr(qtlnet.object, "M0")
ravel <- row(as.matrix(M0)) > col(as.matrix(M0))
tmpfn <- function(post.model, burnin, ravel) {
M <- apply(model2M(post.model), 1:2, sum)
(M[ravel] + t(M)[ravel]) == 0
}
cat("Extracting network matrices...\n")
out <- matrix(unlist(tapply(qtlnet.object$post.model, run.id, tmpfn,
burnin, ravel)),
ncol = runs)
mbic <- meanbic(qtlnet.object, burnin)
wh <- which.min(mbic)
data.frame(nonzero = apply(out, 2, sum),
agree = apply(out, 2, function(x,y) sum(x == y & y > 0),
out[,run]),
mean.bic = mbic,
stringsAsFactors = TRUE)
}
best.qtlnet <- function(x, burnin = attr(x, "burnin"),
wh = which.min(meanbic(x, burnin)))
{
subset(x, wh)
}
meanbic <- function(qtlnet.object, burnin = attr(qtlnet.object, "burnin"))
{
nSamples <- attr(qtlnet.object, "nSamples")
runs <- length(nSamples)
run.id <- rep(seq(runs), nSamples)
tmpfn <- function(x, burnin) {
tmp <- which(seq(x) >= burnin * length(x))
mean(x[tmp])
}
mbic <- tapply(qtlnet.object$post.bic, run.id, tmpfn, burnin)
}
######################################################################
fold.M <- function(x) {
low <- upper.tri(x)
t(x)[low] - x[low]
}
######################################################################
|
\name{lcy.table.split}
\alias{lcy.table.split}
\title{
split each element of a given column and duplicate the rows.
}
\description{
Split each row of string given the column, and duplicate the rows. splited values are filled in the new rows of the column
}
\usage{
lcy.table.split(table, column.id = 1, split = ",")
}
\arguments{
\item{table}{
data.frame or matrix. It should have column names if a character is provided for column.id instead of integer. If there are rownames the rownames will be gone.
}
\item{column.id}{
can be an integer or a character to specify which column is used to apply strsplit function. Default the first column is used. It should have column names if a character is provided for column.id instead of integer.
}
\item{split}{
delimiter to split character (default ',').
}
}
\details{
}
\value{
return a data frame or matrix
}
\author{
Chengyu Liu <Chengyu.liu@helsinki.fi>
}
\keyword{ strsplit }
\keyword{ split }
\keyword{ table }
| /lcyR/man/lcy.table.split.Rd | no_license | farscape2012/rpkg | R | false | false | 998 | rd | \name{lcy.table.split}
\alias{lcy.table.split}
\title{
split each element of a given column and duplicate the rows.
}
\description{
Split each row of string given the column, and duplicate the rows. splited values are filled in the new rows of the column
}
\usage{
lcy.table.split(table, column.id = 1, split = ",")
}
\arguments{
\item{table}{
data.frame or matrix. It should have column names if a character is provided for column.id instead of integer. If there are rownames the rownames will be gone.
}
\item{column.id}{
can be an integer or a character to specify which column is used to apply strsplit function. Default the first column is used. It should have column names if a character is provided for column.id instead of integer.
}
\item{split}{
delimiter to split character (default ',').
}
}
\details{
}
\value{
return a data frame or matrix
}
\author{
Chengyu Liu <Chengyu.liu@helsinki.fi>
}
\keyword{ strsplit }
\keyword{ split }
\keyword{ table }
|
## Setting Working Directory ##
geywd()
setwd("./R/Course 4/ProgrammingAssignment1")
## Downloading and Reading Data ##
fileUrl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temp <- tempfile()
download.file(fileUrl, temp, mode="wb")
unzip(temp)
unlink(temp)
data_all <- read.table("./household_power_consumption.txt", header = TRUE, sep=";", na.strings = "?",
colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric","numeric","numeric","numeric"))
data <- subset(data_all, Date=="1/2/2007"|Date=="2/2/2007")
data$DateTime <- strptime(paste(data$Date,data$Time), format="%d/%m/%Y %H:%M:%S")
## Plotting and Saving as png ##
hist(data$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.copy(png, "plot1.png", width=480, height=480)
dev.off() | /plot1.R | no_license | tutuyu113/ExData_Plotting1 | R | false | false | 931 | r | ## Setting Working Directory ##
geywd()
setwd("./R/Course 4/ProgrammingAssignment1")
## Downloading and Reading Data ##
fileUrl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temp <- tempfile()
download.file(fileUrl, temp, mode="wb")
unzip(temp)
unlink(temp)
data_all <- read.table("./household_power_consumption.txt", header = TRUE, sep=";", na.strings = "?",
colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric","numeric","numeric","numeric"))
data <- subset(data_all, Date=="1/2/2007"|Date=="2/2/2007")
data$DateTime <- strptime(paste(data$Date,data$Time), format="%d/%m/%Y %H:%M:%S")
## Plotting and Saving as png ##
hist(data$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.copy(png, "plot1.png", width=480, height=480)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy_stats.emmGrid.R
\name{tidy_stats.emmGrid}
\alias{tidy_stats.emmGrid}
\title{Create a tidy stats data frame from an emmGrid object}
\usage{
\method{tidy_stats}{emmGrid}(model, args = NULL)
}
\arguments{
\item{model}{Output of emmeans's \code{emmeans} without pairwise comparisons.}
\item{args}{Unused.}
}
\description{
\code{tidy_stats.emmGrid} takes an emmGrid object and converts the object to a
tidy stats data frame.
}
\examples{
if(!requireNamespace("emmeans", quietly = TRUE)) {
message(paste0("Package 'emmeans' is needed for this example to work. ",
"Please install it."), .call = FALSE)
} else {
# Load data
pigs <- emmeans::pigs
# Conduct a linear regression
pigs.lm1 <- lm(log(conc) ~ source + factor(percent), data = pigs)
# Tidy stats
tidy_stats(pigs.lm1)
}
}
| /man/tidy_stats.emmGrid.Rd | permissive | ikbentimkramer/tidystats-v0.3 | R | false | true | 894 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy_stats.emmGrid.R
\name{tidy_stats.emmGrid}
\alias{tidy_stats.emmGrid}
\title{Create a tidy stats data frame from an emmGrid object}
\usage{
\method{tidy_stats}{emmGrid}(model, args = NULL)
}
\arguments{
\item{model}{Output of emmeans's \code{emmeans} without pairwise comparisons.}
\item{args}{Unused.}
}
\description{
\code{tidy_stats.emmGrid} takes an emmGrid object and converts the object to a
tidy stats data frame.
}
\examples{
if(!requireNamespace("emmeans", quietly = TRUE)) {
message(paste0("Package 'emmeans' is needed for this example to work. ",
"Please install it."), .call = FALSE)
} else {
# Load data
pigs <- emmeans::pigs
# Conduct a linear regression
pigs.lm1 <- lm(log(conc) ~ source + factor(percent), data = pigs)
# Tidy stats
tidy_stats(pigs.lm1)
}
}
|
########################################################################################################################
#' The show method for CellBaseR class
#'
#' @param object an object of class CellBaseR
setMethod("show",signature = "CellBaseR",definition = function(object){
cat("An object of class ", class(object), "\n", sep = "")
cat("| it holds the configuration for querying the Cellbase databases\n")
cat("| to change the default species from human use CellBaseR(species='')")
})
########################################################################################################################
#' The show method for CellBaseParam class
#'
#' @param object an object of class CellBaseParam
setMethod("show",signature = "CellBaseParam",definition = function(object){
cat("An object of class ", class(object), "\n", sep = "")
cat("use this object to control what results are returned from the
CellBaseR methods")
})
#' The show method for CellBaseResponse class
#'
#' @param object an object of class CellBaseResponse
setMethod("show",signature = "CellBaseResponse", definition = function(object){
cat("An object of class ", class(object), "\n", sep = "")
cat(" containing ", nrow(object@cbData), " rows and ",
ncol(object@cbData), " columns.\n", sep = "")
cat(" to get the annotated dataframe use cbData()")
})
| /cellbase-client/src/main/R/R/show-methods.R | permissive | opencb/cellbase | R | false | false | 1,364 | r | ########################################################################################################################
#' The show method for CellBaseR class
#'
#' @param object an object of class CellBaseR
setMethod("show",signature = "CellBaseR",definition = function(object){
cat("An object of class ", class(object), "\n", sep = "")
cat("| it holds the configuration for querying the Cellbase databases\n")
cat("| to change the default species from human use CellBaseR(species='')")
})
########################################################################################################################
#' The show method for CellBaseParam class
#'
#' @param object an object of class CellBaseParam
setMethod("show",signature = "CellBaseParam",definition = function(object){
cat("An object of class ", class(object), "\n", sep = "")
cat("use this object to control what results are returned from the
CellBaseR methods")
})
#' The show method for CellBaseResponse class
#'
#' @param object an object of class CellBaseResponse
setMethod("show",signature = "CellBaseResponse", definition = function(object){
cat("An object of class ", class(object), "\n", sep = "")
cat(" containing ", nrow(object@cbData), " rows and ",
ncol(object@cbData), " columns.\n", sep = "")
cat(" to get the annotated dataframe use cbData()")
})
|
.Random.seed <-
c(403L, 10L, 2007098775L, 1876235518L, 414449336L, 363421851L,
-328200455L, 852623432L, 1144968054L, -385038671L, 1222182627L,
1466070450L, -1926799868L, -662448825L, 258134045L, 1707947668L,
-601850310L, 1997344901L, 492492127L, 14515686L, 163714160L,
69224339L, -787902415L, -1196040480L, -1161390210L, -979268087L,
1189865019L, 1480275978L, 1000105964L, -963031953L, -1052892251L,
-1912749220L, -1598820558L, 1560043053L, 1328387303L, -1417472498L,
-1918104600L, 869332459L, 864788105L, -1639723496L, 1862255942L,
1155838881L, 295861811L, 1124889506L, 1671988884L, 1312654615L,
1937720077L, 903049764L, -1781810678L, 1253884789L, 125896399L,
1497270742L, -1659837056L, -936682045L, -246809759L, -1764695600L,
200702254L, 42582233L, -767190293L, -2077980838L, 1363476764L,
1548486687L, 333501557L, -1515859700L, -1221116702L, -1748642371L,
1255760439L, 328495326L, -886850536L, -2096979077L, 1151095577L,
-132616280L, 1685778262L, 976181201L, 2022671235L, 1593533970L,
812056996L, 2995559L, -1645462851L, 1307261492L, -310558310L,
-407298587L, 1619622271L, 1092047110L, -431609200L, -417137869L,
1140162769L, -782364032L, 1059701598L, 1994354857L, 827102043L,
1798250858L, 594232332L, 1684415567L, -117140219L, -176269892L,
1869471890L, 1859523469L, -368057401L, -238774098L, 219715848L,
-460511477L, -1351638551L, -1234590024L, 1460408678L, -1561406975L,
-559084397L, 924743490L, 785385716L, 1183991031L, -73753363L,
1594183556L, 1896366506L, -1318455531L, 424355247L, -1952297546L,
-283790368L, 84368163L, -631186367L, 2005380912L, -1606173362L,
-376569927L, -1706477109L, 1312832122L, 1707782012L, 1547370431L,
-579409771L, -1179019604L, 476234882L, 420717661L, 1437772631L,
-764561858L, 1355807352L, -998340261L, -1026250567L, -946452216L,
478886838L, 1142126833L, 1577316131L, -2053736078L, 439233476L,
1144735623L, 1187450205L, 2072404820L, -1532724870L, 163558597L,
-240327137L, 1235022246L, 1766630192L, -1558425133L, 208410609L,
1060155680L, -1203023938L, -487505975L, 690671483L, 815379786L,
-1328152148L, -613213009L, -277707163L, 1250213404L, 45805682L,
204089069L, -96859353L, 1461544782L, 1087229864L, 350378923L,
-1665967927L, 854876376L, 1342504326L, 906253409L, 1260918259L,
575558754L, 703517396L, 1510167511L, -1506129459L, 1352711524L,
407812426L, 1239646389L, -1752029169L, 792215830L, 531307456L,
1888925571L, 582981025L, 946231696L, -252353938L, 1997190169L,
-2050381653L, 485891226L, -294445988L, 424282463L, -56345803L,
759340748L, -422294494L, -430223747L, -1964231817L, 129436830L,
-373032104L, 849379003L, 891363417L, -617765912L, 926262806L,
-651081327L, -1007392189L, -2023708334L, 1988320740L, -1389657049L,
291795069L, 1183014260L, -1564995238L, 1814157221L, -343689281L,
-756200378L, -2077697328L, 895427827L, -880688623L, -437586880L,
1884862494L, 1298712297L, -1434725349L, -254182358L, 1444962124L,
491570447L, 1140836421L, -201068036L, 577413202L, -541308467L,
-1175659641L, 220581486L, -2136162744L, 353567058L, -176580896L,
1801586380L, -1382807392L, -1049180286L, -1677777176L, 1224978156L,
1195118652L, 1937221234L, -2015087504L, -1745597916L, 111663160L,
225923098L, -1099209008L, -654492612L, -1920234348L, 11798658L,
-289591360L, 482343228L, -1794697584L, -811270110L, 1110325256L,
-1269642100L, 230937628L, 230632082L, -322506624L, 111105044L,
1723713832L, -41547206L, 935335120L, -1343908756L, -1575584076L,
220858386L, 1385007072L, -664431668L, -309006880L, 373416482L,
1332673832L, 525505804L, -120910276L, 1486225650L, -767652112L,
2038779716L, -2070995368L, 413465146L, -1404444944L, -1447874372L,
-2047073772L, 1380951234L, -695643040L, -2142069380L, 376979664L,
-884739038L, 63950504L, 173777804L, -1653260868L, -720720846L,
1245497152L, 349525172L, -606951608L, 783907514L, 556605200L,
-768619284L, 1744540756L, 1990987538L, -2027649120L, -1795836980L,
1996907360L, 1642632386L, 1435891240L, 1600230828L, -702690500L,
948565874L, -906021072L, 2041533988L, -2139450312L, -527978278L,
-1150319216L, -1844417796L, 1068778388L, 755238722L, -1611936256L,
-1835534788L, 1858353744L, 528590498L, 490668872L, 485650956L,
-6821668L, 525806162L, -1081724800L, 1673005908L, -822569496L,
-676438278L, 995535696L, 1483168812L, -1405982348L, -955795822L,
2040172128L, 1677444748L, 1257547616L, -88208670L, 650219688L,
876604620L, 701211132L, -1172467790L, -181114384L, 48149956L,
-620618920L, -1718980742L, -779795024L, 1978623420L, 643974228L,
2054969282L, 604624672L, -1693602884L, -722745776L, -1193932318L,
-999668376L, -108372660L, 2005873916L, -49321230L, -1886715136L,
1777503860L, -2037017592L, -746493510L, 1849816016L, 1530209772L,
-111472748L, 561970898L, 907557472L, 1158839628L, 335597856L,
-998666622L, 1464690152L, 1697244140L, -1717167556L, 1606239858L,
1628011376L, 1665323300L, -1563315656L, 1842411930L, -1930768944L,
-21623364L, -1781754732L, -1955293054L, -1929634368L, 5082940L,
724304016L, 66194210L, -2082551288L, -1502664436L, 418826652L,
158452242L, -1543262336L, -1946367212L, 440140072L, -302430150L,
-1572725808L, 748072556L, -850496972L, -2131269998L, -52568736L,
494750028L, -1980533792L, -245341790L, -830031448L, 873526924L,
-592790084L, 1821700722L, -1430189072L, 1235337796L, -256085928L,
-369703878L, -565037968L, 978691132L, -582965996L, -370242878L,
-152095008L, 1221725308L, -529102128L, 692742562L, -324592600L,
-1318967540L, -737636932L, -1993578702L, -1965707584L, -1184608204L,
-1196175672L, -1357412166L, -1361173872L, -10453268L, 301783636L,
-1493455214L, -1049842400L, -1400134196L, -1375891744L, 1378796738L,
953547816L, 688272300L, 597339836L, -289187854L, 526875184L,
866362276L, 1924656568L, 1008652122L, -106110832L, -191557764L,
-1612286700L, -1246992062L, 1916835072L, -45940292L, -1128749744L,
-1391742686L, 1795723848L, 446088204L, -338152868L, -1945541166L,
1736007168L, 155842900L, -1352315672L, 1900345594L, 1058405072L,
745668396L, 167126260L, -1034273390L, -1793642144L, -2126663668L,
-516009479L, -2125024981L, -453404004L, -734433078L, 789494207L,
-1253667575L, -1807675874L, 91411404L, -158793315L, -2138391513L,
1446666832L, 1137946350L, -346167397L, -242919939L, 1805699306L,
-212927608L, -1678478031L, -1164268125L, -1575863900L, -2089739182L,
1400489895L, 1856214001L, 1719478070L, 1145963476L, -1494914715L,
-651643537L, -790771800L, 53488902L, 267039795L, 1433883285L,
393536242L, -547296448L, -2019180375L, 779126043L, 1943012076L,
-205955334L, -528224465L, 29282105L, -1492528818L, -795906148L,
-1731592947L, -1798477961L, -1120328832L, -1852687906L, -884195669L,
-474332755L, 1949357594L, -2003973928L, -55151295L, -1190101517L,
386919444L, 21082530L, 1850762935L, 1728934017L, -1030731642L,
-577053500L, 469809653L, 1403270239L, -1154505032L, -874158186L,
-1509982813L, 17408357L, 313133186L, 2115985904L, -36522087L,
-933105269L, 976735356L, 2120623786L, -1819475745L, 596846185L,
-366988162L, -1418716052L, -38454083L, -1297492601L, -1335217808L,
-2144754674L, 365517115L, -613177827L, 217807562L, 2129229288L,
-35104047L, -431970621L, 13143492L, 989989490L, -1228844217L,
128117521L, -1201514858L, -647222284L, -2047617659L, -8320817L,
-1589403192L, -1698930586L, 2140681875L, 39708149L, -1898006574L,
-1040563168L, 1591907849L, 1733168955L, -2031402356L, 1141217050L,
1151515023L, 1948784345L, -1435354770L, 402425084L, 656186989L,
-1395189225L, -1606709280L, -1019180098L, 520972683L, -1727596019L,
-316803526L, 1242635896L, -311716319L, 2099505363L, 235050996L,
-1003683710L, 801724695L, -1681821727L, 1463937318L, 1687308324L,
1858874069L, -32631169L, -808282344L, -881584586L, 1509533891L,
1446446341L, -90379102L, -11062640L, -1952686535L, 862854891L,
-669278244L, 210543114L, 1611293951L, 1296642377L, -1639603490L,
-2069213044L, -337164195L, 1559029351L, -324692080L, -423167058L,
-1977564581L, -606290371L, 701784234L, -2090024632L, 1256244593L,
-702696733L, -226215452L, 2009838866L, -2051061529L, -2002235727L,
137966838L, 171427604L, -257626075L, -1201855185L, 496667368L,
889546694L, 384727667L, -852382507L, -1553477966L, -388802944L,
1738038889L, -1104444709L, -2053644756L, -332562886L, 255326447L,
-1956281479L, -690229234L, -554437156L, -954755507L, 135875044L
)
| /mypackage_lab4/R/mypackage_lab4-internal.R | no_license | Violetags92/lab_4_R | R | false | false | 8,238 | r | .Random.seed <-
c(403L, 10L, 2007098775L, 1876235518L, 414449336L, 363421851L,
-328200455L, 852623432L, 1144968054L, -385038671L, 1222182627L,
1466070450L, -1926799868L, -662448825L, 258134045L, 1707947668L,
-601850310L, 1997344901L, 492492127L, 14515686L, 163714160L,
69224339L, -787902415L, -1196040480L, -1161390210L, -979268087L,
1189865019L, 1480275978L, 1000105964L, -963031953L, -1052892251L,
-1912749220L, -1598820558L, 1560043053L, 1328387303L, -1417472498L,
-1918104600L, 869332459L, 864788105L, -1639723496L, 1862255942L,
1155838881L, 295861811L, 1124889506L, 1671988884L, 1312654615L,
1937720077L, 903049764L, -1781810678L, 1253884789L, 125896399L,
1497270742L, -1659837056L, -936682045L, -246809759L, -1764695600L,
200702254L, 42582233L, -767190293L, -2077980838L, 1363476764L,
1548486687L, 333501557L, -1515859700L, -1221116702L, -1748642371L,
1255760439L, 328495326L, -886850536L, -2096979077L, 1151095577L,
-132616280L, 1685778262L, 976181201L, 2022671235L, 1593533970L,
812056996L, 2995559L, -1645462851L, 1307261492L, -310558310L,
-407298587L, 1619622271L, 1092047110L, -431609200L, -417137869L,
1140162769L, -782364032L, 1059701598L, 1994354857L, 827102043L,
1798250858L, 594232332L, 1684415567L, -117140219L, -176269892L,
1869471890L, 1859523469L, -368057401L, -238774098L, 219715848L,
-460511477L, -1351638551L, -1234590024L, 1460408678L, -1561406975L,
-559084397L, 924743490L, 785385716L, 1183991031L, -73753363L,
1594183556L, 1896366506L, -1318455531L, 424355247L, -1952297546L,
-283790368L, 84368163L, -631186367L, 2005380912L, -1606173362L,
-376569927L, -1706477109L, 1312832122L, 1707782012L, 1547370431L,
-579409771L, -1179019604L, 476234882L, 420717661L, 1437772631L,
-764561858L, 1355807352L, -998340261L, -1026250567L, -946452216L,
478886838L, 1142126833L, 1577316131L, -2053736078L, 439233476L,
1144735623L, 1187450205L, 2072404820L, -1532724870L, 163558597L,
-240327137L, 1235022246L, 1766630192L, -1558425133L, 208410609L,
1060155680L, -1203023938L, -487505975L, 690671483L, 815379786L,
-1328152148L, -613213009L, -277707163L, 1250213404L, 45805682L,
204089069L, -96859353L, 1461544782L, 1087229864L, 350378923L,
-1665967927L, 854876376L, 1342504326L, 906253409L, 1260918259L,
575558754L, 703517396L, 1510167511L, -1506129459L, 1352711524L,
407812426L, 1239646389L, -1752029169L, 792215830L, 531307456L,
1888925571L, 582981025L, 946231696L, -252353938L, 1997190169L,
-2050381653L, 485891226L, -294445988L, 424282463L, -56345803L,
759340748L, -422294494L, -430223747L, -1964231817L, 129436830L,
-373032104L, 849379003L, 891363417L, -617765912L, 926262806L,
-651081327L, -1007392189L, -2023708334L, 1988320740L, -1389657049L,
291795069L, 1183014260L, -1564995238L, 1814157221L, -343689281L,
-756200378L, -2077697328L, 895427827L, -880688623L, -437586880L,
1884862494L, 1298712297L, -1434725349L, -254182358L, 1444962124L,
491570447L, 1140836421L, -201068036L, 577413202L, -541308467L,
-1175659641L, 220581486L, -2136162744L, 353567058L, -176580896L,
1801586380L, -1382807392L, -1049180286L, -1677777176L, 1224978156L,
1195118652L, 1937221234L, -2015087504L, -1745597916L, 111663160L,
225923098L, -1099209008L, -654492612L, -1920234348L, 11798658L,
-289591360L, 482343228L, -1794697584L, -811270110L, 1110325256L,
-1269642100L, 230937628L, 230632082L, -322506624L, 111105044L,
1723713832L, -41547206L, 935335120L, -1343908756L, -1575584076L,
220858386L, 1385007072L, -664431668L, -309006880L, 373416482L,
1332673832L, 525505804L, -120910276L, 1486225650L, -767652112L,
2038779716L, -2070995368L, 413465146L, -1404444944L, -1447874372L,
-2047073772L, 1380951234L, -695643040L, -2142069380L, 376979664L,
-884739038L, 63950504L, 173777804L, -1653260868L, -720720846L,
1245497152L, 349525172L, -606951608L, 783907514L, 556605200L,
-768619284L, 1744540756L, 1990987538L, -2027649120L, -1795836980L,
1996907360L, 1642632386L, 1435891240L, 1600230828L, -702690500L,
948565874L, -906021072L, 2041533988L, -2139450312L, -527978278L,
-1150319216L, -1844417796L, 1068778388L, 755238722L, -1611936256L,
-1835534788L, 1858353744L, 528590498L, 490668872L, 485650956L,
-6821668L, 525806162L, -1081724800L, 1673005908L, -822569496L,
-676438278L, 995535696L, 1483168812L, -1405982348L, -955795822L,
2040172128L, 1677444748L, 1257547616L, -88208670L, 650219688L,
876604620L, 701211132L, -1172467790L, -181114384L, 48149956L,
-620618920L, -1718980742L, -779795024L, 1978623420L, 643974228L,
2054969282L, 604624672L, -1693602884L, -722745776L, -1193932318L,
-999668376L, -108372660L, 2005873916L, -49321230L, -1886715136L,
1777503860L, -2037017592L, -746493510L, 1849816016L, 1530209772L,
-111472748L, 561970898L, 907557472L, 1158839628L, 335597856L,
-998666622L, 1464690152L, 1697244140L, -1717167556L, 1606239858L,
1628011376L, 1665323300L, -1563315656L, 1842411930L, -1930768944L,
-21623364L, -1781754732L, -1955293054L, -1929634368L, 5082940L,
724304016L, 66194210L, -2082551288L, -1502664436L, 418826652L,
158452242L, -1543262336L, -1946367212L, 440140072L, -302430150L,
-1572725808L, 748072556L, -850496972L, -2131269998L, -52568736L,
494750028L, -1980533792L, -245341790L, -830031448L, 873526924L,
-592790084L, 1821700722L, -1430189072L, 1235337796L, -256085928L,
-369703878L, -565037968L, 978691132L, -582965996L, -370242878L,
-152095008L, 1221725308L, -529102128L, 692742562L, -324592600L,
-1318967540L, -737636932L, -1993578702L, -1965707584L, -1184608204L,
-1196175672L, -1357412166L, -1361173872L, -10453268L, 301783636L,
-1493455214L, -1049842400L, -1400134196L, -1375891744L, 1378796738L,
953547816L, 688272300L, 597339836L, -289187854L, 526875184L,
866362276L, 1924656568L, 1008652122L, -106110832L, -191557764L,
-1612286700L, -1246992062L, 1916835072L, -45940292L, -1128749744L,
-1391742686L, 1795723848L, 446088204L, -338152868L, -1945541166L,
1736007168L, 155842900L, -1352315672L, 1900345594L, 1058405072L,
745668396L, 167126260L, -1034273390L, -1793642144L, -2126663668L,
-516009479L, -2125024981L, -453404004L, -734433078L, 789494207L,
-1253667575L, -1807675874L, 91411404L, -158793315L, -2138391513L,
1446666832L, 1137946350L, -346167397L, -242919939L, 1805699306L,
-212927608L, -1678478031L, -1164268125L, -1575863900L, -2089739182L,
1400489895L, 1856214001L, 1719478070L, 1145963476L, -1494914715L,
-651643537L, -790771800L, 53488902L, 267039795L, 1433883285L,
393536242L, -547296448L, -2019180375L, 779126043L, 1943012076L,
-205955334L, -528224465L, 29282105L, -1492528818L, -795906148L,
-1731592947L, -1798477961L, -1120328832L, -1852687906L, -884195669L,
-474332755L, 1949357594L, -2003973928L, -55151295L, -1190101517L,
386919444L, 21082530L, 1850762935L, 1728934017L, -1030731642L,
-577053500L, 469809653L, 1403270239L, -1154505032L, -874158186L,
-1509982813L, 17408357L, 313133186L, 2115985904L, -36522087L,
-933105269L, 976735356L, 2120623786L, -1819475745L, 596846185L,
-366988162L, -1418716052L, -38454083L, -1297492601L, -1335217808L,
-2144754674L, 365517115L, -613177827L, 217807562L, 2129229288L,
-35104047L, -431970621L, 13143492L, 989989490L, -1228844217L,
128117521L, -1201514858L, -647222284L, -2047617659L, -8320817L,
-1589403192L, -1698930586L, 2140681875L, 39708149L, -1898006574L,
-1040563168L, 1591907849L, 1733168955L, -2031402356L, 1141217050L,
1151515023L, 1948784345L, -1435354770L, 402425084L, 656186989L,
-1395189225L, -1606709280L, -1019180098L, 520972683L, -1727596019L,
-316803526L, 1242635896L, -311716319L, 2099505363L, 235050996L,
-1003683710L, 801724695L, -1681821727L, 1463937318L, 1687308324L,
1858874069L, -32631169L, -808282344L, -881584586L, 1509533891L,
1446446341L, -90379102L, -11062640L, -1952686535L, 862854891L,
-669278244L, 210543114L, 1611293951L, 1296642377L, -1639603490L,
-2069213044L, -337164195L, 1559029351L, -324692080L, -423167058L,
-1977564581L, -606290371L, 701784234L, -2090024632L, 1256244593L,
-702696733L, -226215452L, 2009838866L, -2051061529L, -2002235727L,
137966838L, 171427604L, -257626075L, -1201855185L, 496667368L,
889546694L, 384727667L, -852382507L, -1553477966L, -388802944L,
1738038889L, -1104444709L, -2053644756L, -332562886L, 255326447L,
-1956281479L, -690229234L, -554437156L, -954755507L, 135875044L
)
|
rankall <- function(outcome, num){
hos_df <- read.csv("Hospital-data-overview_ass3-wk4/data/outcome-of-care-measures.csv", colClasses = "character")
hos_df <- hos_df[, c(2,7,11,17,23)]
colnames(hos_df) <- c("Hospital name", "State", "heart attack", "heart failure", "pneumonia")
if ( !(outcome %in% names(hos_df)) ){
return( print("invalid outcome"))
}
filt <- is.na(as.numeric(hos_df[,outcome]))
hos_df <- hos_df[!filt,]
hos_df["Rank"] <- 1:length(hos_df$State)
hos_df <- hos_df[order(hos_df$State),]
ranked_df <- hos_df[0,]
for (st in unique(hos_df$State)){
bystate <- hos_df[hos_df$State == st,]
bystate <- bystate[ order(as.numeric(bystate[,outcome]), bystate[,"Hospital name"]),]
bystate["Rank"] <- 1:table(hos_df$State)[[st]]
ranked_df[length(ranked_df$State)+1:length(bystate$State),] <- bystate
}
if (num == "best"){
numf <- function() {1}
} else if (num == "worst"){
numf <- function() {max(ranked_df$Rank[ranked_df$State == st])}
} else {
numf <- function() {num}
}
result <- ranked_df[0,c("Hospital name", "State")]
for (st in unique(hos_df$State)){
if (numf() > table(hos_df$State)[[st]]){
result[length(result$State)+1,] <- c(NA, st)
} else {
result[length(result$State)+1,] <- ranked_df[ranked_df$State == st & numf() == ranked_df[,"Rank"], c("Hospital name","State")]
}
}
colnames(result) <- c("hospital", "state")
result
}
| /R-Programming/Hospital-data-overview_ass3-wk4/rankall.R | permissive | vanAkim/Data.Science.Specialization | R | false | false | 1,671 | r | rankall <- function(outcome, num){
hos_df <- read.csv("Hospital-data-overview_ass3-wk4/data/outcome-of-care-measures.csv", colClasses = "character")
hos_df <- hos_df[, c(2,7,11,17,23)]
colnames(hos_df) <- c("Hospital name", "State", "heart attack", "heart failure", "pneumonia")
if ( !(outcome %in% names(hos_df)) ){
return( print("invalid outcome"))
}
filt <- is.na(as.numeric(hos_df[,outcome]))
hos_df <- hos_df[!filt,]
hos_df["Rank"] <- 1:length(hos_df$State)
hos_df <- hos_df[order(hos_df$State),]
ranked_df <- hos_df[0,]
for (st in unique(hos_df$State)){
bystate <- hos_df[hos_df$State == st,]
bystate <- bystate[ order(as.numeric(bystate[,outcome]), bystate[,"Hospital name"]),]
bystate["Rank"] <- 1:table(hos_df$State)[[st]]
ranked_df[length(ranked_df$State)+1:length(bystate$State),] <- bystate
}
if (num == "best"){
numf <- function() {1}
} else if (num == "worst"){
numf <- function() {max(ranked_df$Rank[ranked_df$State == st])}
} else {
numf <- function() {num}
}
result <- ranked_df[0,c("Hospital name", "State")]
for (st in unique(hos_df$State)){
if (numf() > table(hos_df$State)[[st]]){
result[length(result$State)+1,] <- c(NA, st)
} else {
result[length(result$State)+1,] <- ranked_df[ranked_df$State == st & numf() == ranked_df[,"Rank"], c("Hospital name","State")]
}
}
colnames(result) <- c("hospital", "state")
result
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
cmatrix <- x$get()
inv <- solve(cmatrix, ...)
x$setinverse(inv)
inv
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | Inabee1226/ProgrammingAssignment2 | R | false | false | 829 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
cmatrix <- x$get()
inv <- solve(cmatrix, ...)
x$setinverse(inv)
inv
## Return a matrix that is the inverse of 'x'
}
|
train = read.csv("train2016.csv")
test = read.csv("test2016.csv")
train$YOB = ifelse(!is.na(train$YOB) & train$YOB < 1930, NA, train$YOB)
train$YOB = ifelse(!is.na(train$YOB) & train$YOB > 2000, NA, train$YOB)
test$YOB = ifelse(!is.na(test$YOB) & test$YOB < 1930, NA, test$YOB)
test$YOB = ifelse(!is.na(test$YOB) & test$YOB > 2000, NA, test$YOB)
train[is.na(train$YOB), "YOB"] = round(mean(train$YOB, na.rm = TRUE))
test[is.na(train$YOB), "YOB"] = round(mean(test$YOB, na.rm = TRUE))
library(caTools)
set.seed(123)
spl = sample.split(train$Party, SplitRatio = 0.8)
dataTrain = subset(train, spl == TRUE)
dataTest = subset(train, spl == FALSE)
formula = Party ~ Income + HouseholdStatus + EducationLevel + Gender + Q109244 + Q115611 + Q113181 + Q115195 + Q98197 - 1
library(glmnet)
x.train = model.matrix(formula, data = dataTrain)
y.train = dataTrain$Party
x.test = model.matrix(formula, data = dataTest)
#ridge = glmnet(x.train, y.train, family = "binomial", alpha = 0)
#plot(ridge, xvar = "lambda", label = TRUE)
cv.ridge = cv.glmnet(x.train, y.train, family = "binomial", alpha = 0)
plot(cv.ridge)
predTest = predict(cv.ridge, newx = x.test, s = "lambda.min")
predTestLabels = as.factor(ifelse(predTest < 0, "Democrat", "Republican"))
table(dataTest$Party, predTestLabels)
(403 + 323) / nrow(dataTest)
test$Party = ""
x.submit = model.matrix(formula, data = test)
predSubmit = predict(cv.ridge, newx = x.submit, s = "lambda.min")
predSubmitLabels = as.factor(ifelse(predSubmit < 0, "Democrat", "Republican"))
submission = data.frame(USER_ID = test$USER_ID, Predictions = predSubmitLabels)
write.csv(submission, "ridge_regression_30.csv", row.names = FALSE)
| /ridge_regression_30 code from insad.R | no_license | DJamesWilliamson/K_comp | R | false | false | 1,672 | r | train = read.csv("train2016.csv")
test = read.csv("test2016.csv")
train$YOB = ifelse(!is.na(train$YOB) & train$YOB < 1930, NA, train$YOB)
train$YOB = ifelse(!is.na(train$YOB) & train$YOB > 2000, NA, train$YOB)
test$YOB = ifelse(!is.na(test$YOB) & test$YOB < 1930, NA, test$YOB)
test$YOB = ifelse(!is.na(test$YOB) & test$YOB > 2000, NA, test$YOB)
train[is.na(train$YOB), "YOB"] = round(mean(train$YOB, na.rm = TRUE))
test[is.na(train$YOB), "YOB"] = round(mean(test$YOB, na.rm = TRUE))
library(caTools)
set.seed(123)
spl = sample.split(train$Party, SplitRatio = 0.8)
dataTrain = subset(train, spl == TRUE)
dataTest = subset(train, spl == FALSE)
formula = Party ~ Income + HouseholdStatus + EducationLevel + Gender + Q109244 + Q115611 + Q113181 + Q115195 + Q98197 - 1
library(glmnet)
x.train = model.matrix(formula, data = dataTrain)
y.train = dataTrain$Party
x.test = model.matrix(formula, data = dataTest)
#ridge = glmnet(x.train, y.train, family = "binomial", alpha = 0)
#plot(ridge, xvar = "lambda", label = TRUE)
cv.ridge = cv.glmnet(x.train, y.train, family = "binomial", alpha = 0)
plot(cv.ridge)
predTest = predict(cv.ridge, newx = x.test, s = "lambda.min")
predTestLabels = as.factor(ifelse(predTest < 0, "Democrat", "Republican"))
table(dataTest$Party, predTestLabels)
(403 + 323) / nrow(dataTest)
test$Party = ""
x.submit = model.matrix(formula, data = test)
predSubmit = predict(cv.ridge, newx = x.submit, s = "lambda.min")
predSubmitLabels = as.factor(ifelse(predSubmit < 0, "Democrat", "Republican"))
submission = data.frame(USER_ID = test$USER_ID, Predictions = predSubmitLabels)
write.csv(submission, "ridge_regression_30.csv", row.names = FALSE)
|
# load and recode data
data(politics_and_need) # load data set
d <- politics_and_need # rename data set
d$dem_governor <- 1 - d$gop_governor # create dem. gov. indicator
d$st_percent_uninsured <- rescale(d$percent_uninsured) # standardize
f <- oppose_expansion ~ dem_governor + percent_favorable_aca + gop_leg +
st_percent_uninsured + bal2012 + multiplier + percent_nonwhite + percent_metro
m <- glm(f, data = d, family = binomial)
m.tol <- glm(f, data = d, family = binomial, control = list(epsilon = 10e-100))
m.firth <- logistf(f, d)
mf <- model.frame(f, data = d)
X <- model.matrix(mf, data = d)
y <- d$oppose_expansion
b <- coef(m)
b.dif <- b[2] - b[1]
ll_fn <- function(b, X, y) {
p <- plogis(X%*%b)
ll <- sum(y*log(p) + (1 - y)*log(1 - p))
return(ll)
}
n_pts <- 200
b1 <- seq(-10, 1, length.out = n_pts)
ll <- numeric(n_pts)
for (i in 1:n_pts) {
b.star <- b
b.star[2] <- b1[i]
ll[i] <- ll_fn(b.star, X, y)
}
###############################
## theorem 1 figures
###############################
shade <- 100
col1a <- rgb(shade, shade, shade, 150, maxColorValue = 255)
col2a <- rgb(shade, shade, shade, 150, maxColorValue = 255)
col1 <- rgb(shade, shade, shade, 255, maxColorValue = 255)
col2 <- rgb(shade, shade, shade, 255, maxColorValue = 255)
lik_fn <- function(b, X, y) {
p <- plogis(X%*%b)
ll <- sum(y*log(p) + (1 - y)*log(1 - p))
lik <- exp(ll)
return(lik)
}
post_fn1 <- function(b, X, y) {
p <- plogis(X%*%b)
lp <- sum(y*log(p) + (1 - y)*log(1 - p)) + log(dcauchy(b[2], 0, 2.5))
post <- exp(lp)
return(post)
}
post_fn2 <- function(b, X, y) {
p <- plogis(X%*%b)
lp <- sum(y*log(p) + (1 - y)*log(1 - p)) + log(dnorm(b[2], 0, 2.5))
post <- exp(lp)
return(post)
}
n_pts <- 200
b1 <- seq(-20, 10, length.out = n_pts)
y <- d$oppose_expansion
b <- coef(m)
lik <- post1 <- post2 <- numeric(n_pts)
for (i in 1:n_pts) {
b.star <- b
b.star[2] <- b1[i]
lik[i] <- lik_fn(b.star, X, y)
post1[i] <- post_fn1(b.star, X, y)
post2[i] <- post_fn2(b.star, X, y)
}
prior1 <- dcauchy(b1, 0, 2.5)
prior2 <- dnorm(b1, 0, 2.5)
zeros <- rep(0, length(b1))
# normal + lik + post
pdf("doc/figs/thm-1-illustrated.pdf", height = 4, width = 6)
par(mfrow = c(2, 1), mar = c(.5, .5, .5, .5), oma = c(3, 1, 1, 1), xaxs = "r", yaxs = "r")
eplot(xlim = 1.04*mm(b1), ylim = c(0, 1.2),
xlab = "Coefficient for Separating Variable",
anny = FALSE)
text(-17, 1, "Likelihood", cex = 1, pos = 3)
polygon(c(b1, rev(b1)), c(prior2/max(prior2), zeros), col = col1a, lty = 0)
text(3, .75, "Normal\nPrior", cex = 1, pos = 3, col = "black")
polygon(c(b1, rev(b1)), c(post2/max(post2), zeros), col = col2a, lty = 0)
text(b1[which(post2 == max(post2))], 1, "Posterior", cex = 1, pos = 3, col = "black")
lines(b1, lik/max(lik), lwd = 5, col = "black", xpd = NA)
#dev.off()
# cauchy + lik + post
eplot(xlim = 1.04*mm(b1), ylim = c(0, 1.2),
xlab = "Coefficient for Separating Variable",
anny = FALSE)
text(-17, 1, "Likelihood", cex = 1, pos = 3)
polygon(c(b1, rev(b1)), c(prior1/max(prior1), zeros), col = col1a, lty = 0)
text(3, .75, "Cauchy\nPrior", cex = 1, pos = 3, col = "black")
polygon(c(b1, rev(b1)), c(post1/max(post1), zeros), col = col2a, lty = 0)
text(b1[which(post1 == max(post1))], 1, "Posterior", cex = 1, pos = 3, col = "black")
lines(b1, lik/max(lik), lwd = 5, col = "black", xpd = NA)
dev.off()
| /R/thm-1-illustrated.R | no_license | hal2001/priors-for-separation | R | false | false | 3,375 | r |
# load and recode data
data(politics_and_need) # load data set
d <- politics_and_need # rename data set
d$dem_governor <- 1 - d$gop_governor # create dem. gov. indicator
d$st_percent_uninsured <- rescale(d$percent_uninsured) # standardize
f <- oppose_expansion ~ dem_governor + percent_favorable_aca + gop_leg +
st_percent_uninsured + bal2012 + multiplier + percent_nonwhite + percent_metro
m <- glm(f, data = d, family = binomial)
m.tol <- glm(f, data = d, family = binomial, control = list(epsilon = 10e-100))
m.firth <- logistf(f, d)
mf <- model.frame(f, data = d)
X <- model.matrix(mf, data = d)
y <- d$oppose_expansion
b <- coef(m)
b.dif <- b[2] - b[1]
ll_fn <- function(b, X, y) {
p <- plogis(X%*%b)
ll <- sum(y*log(p) + (1 - y)*log(1 - p))
return(ll)
}
n_pts <- 200
b1 <- seq(-10, 1, length.out = n_pts)
ll <- numeric(n_pts)
for (i in 1:n_pts) {
b.star <- b
b.star[2] <- b1[i]
ll[i] <- ll_fn(b.star, X, y)
}
###############################
## theorem 1 figures
###############################
shade <- 100
col1a <- rgb(shade, shade, shade, 150, maxColorValue = 255)
col2a <- rgb(shade, shade, shade, 150, maxColorValue = 255)
col1 <- rgb(shade, shade, shade, 255, maxColorValue = 255)
col2 <- rgb(shade, shade, shade, 255, maxColorValue = 255)
lik_fn <- function(b, X, y) {
p <- plogis(X%*%b)
ll <- sum(y*log(p) + (1 - y)*log(1 - p))
lik <- exp(ll)
return(lik)
}
post_fn1 <- function(b, X, y) {
p <- plogis(X%*%b)
lp <- sum(y*log(p) + (1 - y)*log(1 - p)) + log(dcauchy(b[2], 0, 2.5))
post <- exp(lp)
return(post)
}
post_fn2 <- function(b, X, y) {
p <- plogis(X%*%b)
lp <- sum(y*log(p) + (1 - y)*log(1 - p)) + log(dnorm(b[2], 0, 2.5))
post <- exp(lp)
return(post)
}
n_pts <- 200
b1 <- seq(-20, 10, length.out = n_pts)
y <- d$oppose_expansion
b <- coef(m)
lik <- post1 <- post2 <- numeric(n_pts)
for (i in 1:n_pts) {
b.star <- b
b.star[2] <- b1[i]
lik[i] <- lik_fn(b.star, X, y)
post1[i] <- post_fn1(b.star, X, y)
post2[i] <- post_fn2(b.star, X, y)
}
prior1 <- dcauchy(b1, 0, 2.5)
prior2 <- dnorm(b1, 0, 2.5)
zeros <- rep(0, length(b1))
# normal + lik + post
pdf("doc/figs/thm-1-illustrated.pdf", height = 4, width = 6)
par(mfrow = c(2, 1), mar = c(.5, .5, .5, .5), oma = c(3, 1, 1, 1), xaxs = "r", yaxs = "r")
eplot(xlim = 1.04*mm(b1), ylim = c(0, 1.2),
xlab = "Coefficient for Separating Variable",
anny = FALSE)
text(-17, 1, "Likelihood", cex = 1, pos = 3)
polygon(c(b1, rev(b1)), c(prior2/max(prior2), zeros), col = col1a, lty = 0)
text(3, .75, "Normal\nPrior", cex = 1, pos = 3, col = "black")
polygon(c(b1, rev(b1)), c(post2/max(post2), zeros), col = col2a, lty = 0)
text(b1[which(post2 == max(post2))], 1, "Posterior", cex = 1, pos = 3, col = "black")
lines(b1, lik/max(lik), lwd = 5, col = "black", xpd = NA)
#dev.off()
# cauchy + lik + post
eplot(xlim = 1.04*mm(b1), ylim = c(0, 1.2),
xlab = "Coefficient for Separating Variable",
anny = FALSE)
text(-17, 1, "Likelihood", cex = 1, pos = 3)
polygon(c(b1, rev(b1)), c(prior1/max(prior1), zeros), col = col1a, lty = 0)
text(3, .75, "Cauchy\nPrior", cex = 1, pos = 3, col = "black")
polygon(c(b1, rev(b1)), c(post1/max(post1), zeros), col = col2a, lty = 0)
text(b1[which(post1 == max(post1))], 1, "Posterior", cex = 1, pos = 3, col = "black")
lines(b1, lik/max(lik), lwd = 5, col = "black", xpd = NA)
dev.off()
|
png(filename = './Project1/plot2.png', width = 480, height = 480, units='px')
plot(data$Time, data$Global_active_power, xlab = '', ylab = 'Global Active Power (kilowatt)', type = 'l')
dev.off() | /Plot2.R | no_license | sabyasachi24/ExData_Plotting1 | R | false | false | 194 | r | png(filename = './Project1/plot2.png', width = 480, height = 480, units='px')
plot(data$Time, data$Global_active_power, xlab = '', ylab = 'Global Active Power (kilowatt)', type = 'l')
dev.off() |
test_that("can pin_find() entries across all boards", {
withr::local_options(lifecycle_verbosity = "quiet")
local_register(board_temp("test1"))
local_register(board_temp("test2"))
pin(list(x = 1), "one", board = "test1")
pin(list(x = 2), "two", board = "test2")
out <- pin_find()
expect_equal(out$name[out$board != "local"], c("one", "two"))
out <- pin_find("one", board = c("test1", "test2"))
expect_equal(out$name, "one")
out <- pin_find("one", board = "test1")
expect_equal(out$name, "one")
out <- pin_find("one", board = character())
expect_equal(out$name, character())
out <- pin_find("one", board = board_get("test1"))
expect_equal(out$name, "one")
})
| /tests/testthat/test-pin_find.R | permissive | dfalbel/pins | R | false | false | 694 | r | test_that("can pin_find() entries across all boards", {
withr::local_options(lifecycle_verbosity = "quiet")
local_register(board_temp("test1"))
local_register(board_temp("test2"))
pin(list(x = 1), "one", board = "test1")
pin(list(x = 2), "two", board = "test2")
out <- pin_find()
expect_equal(out$name[out$board != "local"], c("one", "two"))
out <- pin_find("one", board = c("test1", "test2"))
expect_equal(out$name, "one")
out <- pin_find("one", board = "test1")
expect_equal(out$name, "one")
out <- pin_find("one", board = character())
expect_equal(out$name, character())
out <- pin_find("one", board = board_get("test1"))
expect_equal(out$name, "one")
})
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "fri_c3_500_25")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.ada", par.vals = list(), predict.type = "prob")
#:# hash
#:# c9873c4dc2d96a3d903455de17fa5ba7
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_fri_c3_500_25/classification_binaryClass/c9873c4dc2d96a3d903455de17fa5ba7/code.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 690 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "fri_c3_500_25")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.ada", par.vals = list(), predict.type = "prob")
#:# hash
#:# c9873c4dc2d96a3d903455de17fa5ba7
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funcCalIndices.R
\name{calMoranGlo}
\alias{calMoranGlo}
\title{computes specific Moran criterion}
\usage{
calMoranGlo(matNZone, vectMean, meanTot, vectSurface)
}
\arguments{
\item{matNZone}{xxxx}
\item{vectMean}{xxxx}
\item{meanTot}{xxxx}
\item{vectSurface}{xxxx}
}
\value{
a ?
}
\description{
computes specific Moran criterion
}
\details{
description, a paragraph
}
\examples{
# not run
}
| /man/calMoranGlo.Rd | no_license | hazaeljones/geozoning | R | false | true | 471 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funcCalIndices.R
\name{calMoranGlo}
\alias{calMoranGlo}
\title{computes specific Moran criterion}
\usage{
calMoranGlo(matNZone, vectMean, meanTot, vectSurface)
}
\arguments{
\item{matNZone}{xxxx}
\item{vectMean}{xxxx}
\item{meanTot}{xxxx}
\item{vectSurface}{xxxx}
}
\value{
a ?
}
\description{
computes specific Moran criterion
}
\details{
description, a paragraph
}
\examples{
# not run
}
|
#---------------------------------------------------------------------#
# Assignment AMPBA Batch 13 #
#---------------------------------------------------------------------#
library("shiny")
#Loading necessary files
library(udpipe)
library(lattice)
library(wordcloud)
library(RColorBrewer)
library(sentimentr)
library(dplyr)
library(ggplot2) # for plotting
library(tidytext) # for analyzing text in tidy manner
library(wordcloud)
library(tidyr)
options(shiny.maxRequestSize=30*1024^2)
# Define Server function
shinyServer(function(input, output) {
Dataset <- reactive({
if (is.null(input$file)) { return(NULL) }
else{
reviews_df <- as.data.frame(read.csv(input$file$datapath,stringsAsFactors = FALSE))
english_model = udpipe_load_model(input$file1$datapath)
df1 <- as.data.frame(udpipe_annotate(english_model, x = reviews_df$Reviewer.Comment,parser = "none",trace = FALSE))[,c(4,6)]
df1$token <- tolower(df1$token)
df1$token_final <- 'NA'
feat_list <- as.list(strsplit(input$searchInput, ",")[[1]])
feature_list <- tolower(feat_list)
for (feature in feature_list){
feature <- stringr::str_replace_all(feature,"[\\s]+", "")
print(feature)
df1$token_final <- ifelse(grepl(feature,df1$token),feature,df1$token_final)
}
df2 <- unique(df1$sentence[df1$token_final != 'NA'])
return(df2)
}
})
Dataset1 <- reactive({
if (is.null(input$file)) { return(NULL) }
else{
reviews_df <- as.data.frame(read.csv(input$file$datapath,stringsAsFactors = FALSE))
english_model = udpipe_load_model(input$file1$datapath)
df1 <- as.data.frame(udpipe_annotate(english_model, x = reviews_df$Reviewer.Comment,parser = "none",trace = FALSE))[,c(4,6)]
df1$token <- tolower(df1$token)
df1$token_final <- 'NA'
feat_list <- as.list(strsplit(input$searchInput, ",")[[1]])
feature_list <- tolower(feat_list)
for (feature in feature_list){
feature <- stringr::str_replace_all(feature,"[\\s]+", "")
print(feature)
df1$token_final <- ifelse(grepl(feature,df1$token),feature,df1$token_final)
}
return(df1)
}
})
output$plot1 = renderPlot({
df1 <- Dataset1()
df4 <- df1 %>%
count(token_final, sort = TRUE)%>%
filter(token_final != 'NA')%>% # n is wordcount colname.
mutate(token_final = reorder(token_final, n))
df4$n<-df4$n/sum(df4$n)
df4 %>%
ggplot(aes(token_final, n)) +
geom_bar(stat = "identity", col = "red", fill = "red") +
xlab('Key Words') + ylab('relative frequency') +
coord_flip()
})
output$wordcloud1 = renderPlot({
df1 <- Dataset1()
df4 <- df1 %>%
count(token_final, sort = TRUE)%>%
filter(token_final != 'NA')%>% # n is wordcount colname.
mutate(token_final = reorder(token_final, n))
df4$n<-df4$n/sum(df4$n)
pal <- brewer.pal(8,"Dark2")
set.seed(1234)
df4 %>%
with(wordcloud(token_final, n,scale = c(3.5, 0.25), random.order = FALSE, max.words = 50, colors=pal))
})
output$comments = renderPrint({
comments <- Dataset()
comments
})
})
| /server.R | no_license | Pundareek/Car-Review-Analysis | R | false | false | 3,459 | r | #---------------------------------------------------------------------#
# Assignment AMPBA Batch 13 #
#---------------------------------------------------------------------#
library("shiny")
#Loading necessary files
library(udpipe)
library(lattice)
library(wordcloud)
library(RColorBrewer)
library(sentimentr)
library(dplyr)
library(ggplot2) # for plotting
library(tidytext) # for analyzing text in tidy manner
library(wordcloud)
library(tidyr)
options(shiny.maxRequestSize=30*1024^2)
# Define Server function
shinyServer(function(input, output) {
Dataset <- reactive({
if (is.null(input$file)) { return(NULL) }
else{
reviews_df <- as.data.frame(read.csv(input$file$datapath,stringsAsFactors = FALSE))
english_model = udpipe_load_model(input$file1$datapath)
df1 <- as.data.frame(udpipe_annotate(english_model, x = reviews_df$Reviewer.Comment,parser = "none",trace = FALSE))[,c(4,6)]
df1$token <- tolower(df1$token)
df1$token_final <- 'NA'
feat_list <- as.list(strsplit(input$searchInput, ",")[[1]])
feature_list <- tolower(feat_list)
for (feature in feature_list){
feature <- stringr::str_replace_all(feature,"[\\s]+", "")
print(feature)
df1$token_final <- ifelse(grepl(feature,df1$token),feature,df1$token_final)
}
df2 <- unique(df1$sentence[df1$token_final != 'NA'])
return(df2)
}
})
Dataset1 <- reactive({
if (is.null(input$file)) { return(NULL) }
else{
reviews_df <- as.data.frame(read.csv(input$file$datapath,stringsAsFactors = FALSE))
english_model = udpipe_load_model(input$file1$datapath)
df1 <- as.data.frame(udpipe_annotate(english_model, x = reviews_df$Reviewer.Comment,parser = "none",trace = FALSE))[,c(4,6)]
df1$token <- tolower(df1$token)
df1$token_final <- 'NA'
feat_list <- as.list(strsplit(input$searchInput, ",")[[1]])
feature_list <- tolower(feat_list)
for (feature in feature_list){
feature <- stringr::str_replace_all(feature,"[\\s]+", "")
print(feature)
df1$token_final <- ifelse(grepl(feature,df1$token),feature,df1$token_final)
}
return(df1)
}
})
output$plot1 = renderPlot({
df1 <- Dataset1()
df4 <- df1 %>%
count(token_final, sort = TRUE)%>%
filter(token_final != 'NA')%>% # n is wordcount colname.
mutate(token_final = reorder(token_final, n))
df4$n<-df4$n/sum(df4$n)
df4 %>%
ggplot(aes(token_final, n)) +
geom_bar(stat = "identity", col = "red", fill = "red") +
xlab('Key Words') + ylab('relative frequency') +
coord_flip()
})
output$wordcloud1 = renderPlot({
df1 <- Dataset1()
df4 <- df1 %>%
count(token_final, sort = TRUE)%>%
filter(token_final != 'NA')%>% # n is wordcount colname.
mutate(token_final = reorder(token_final, n))
df4$n<-df4$n/sum(df4$n)
pal <- brewer.pal(8,"Dark2")
set.seed(1234)
df4 %>%
with(wordcloud(token_final, n,scale = c(3.5, 0.25), random.order = FALSE, max.words = 50, colors=pal))
})
output$comments = renderPrint({
comments <- Dataset()
comments
})
})
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/bone.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.02,family="gaussian",standardize=TRUE)
sink('./bone_011.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/bone/bone_011.R | no_license | esbgkannan/QSMART | R | false | false | 345 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/bone.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.02,family="gaussian",standardize=TRUE)
sink('./bone_011.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spuval.R
\name{spuval}
\alias{spuval}
\title{The function is to calculate the SPU statistics}
\usage{
spuval(U, V, gamma1, gamma2, K, weight = F)
}
\arguments{
\item{U, }{the Score vector}
\item{V, }{the variance-covariance matrix of U}
\item{gamma1, }{power candidates}
\item{gamma2, }{power candidates}
\item{K, }{number of traits}
\item{weight, }{TRUE or FALSE, default is FALSE}
}
\value{
the SPU statistics
}
\description{
The function is to calculate the SPU statistics
}
\author{
Zhiyuan (Jason) Xu, Yiwei Zhang and Wei Pan
}
\references{
Wei Pan, Junghi Kim, Yiwei Zhang, Xiaotong Shen and Peng Wei (2014) A powerful and adaptive
association test for rare variants, Genetics, 197(4), 1081-95
Yiwei Zhang, Zhiyuan Xu, Xiaotong Shen, Wei Pan (2014) Testing for association with multiple
traits in generalized estimation equations, with application to neuroimaging data. Neuroimage.
96:309-25
}
| /man/spuval.Rd | no_license | jasonzyx/APaSPU | R | false | true | 985 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spuval.R
\name{spuval}
\alias{spuval}
\title{The function is to calculate the SPU statistics}
\usage{
spuval(U, V, gamma1, gamma2, K, weight = F)
}
\arguments{
\item{U, }{the Score vector}
\item{V, }{the variance-covariance matrix of U}
\item{gamma1, }{power candidates}
\item{gamma2, }{power candidates}
\item{K, }{number of traits}
\item{weight, }{TRUE or FALSE, default is FALSE}
}
\value{
the SPU statistics
}
\description{
The function is to calculate the SPU statistics
}
\author{
Zhiyuan (Jason) Xu, Yiwei Zhang and Wei Pan
}
\references{
Wei Pan, Junghi Kim, Yiwei Zhang, Xiaotong Shen and Peng Wei (2014) A powerful and adaptive
association test for rare variants, Genetics, 197(4), 1081-95
Yiwei Zhang, Zhiyuan Xu, Xiaotong Shen, Wei Pan (2014) Testing for association with multiple
traits in generalized estimation equations, with application to neuroimaging data. Neuroimage.
96:309-25
}
|
require(data.table)
require(survival)
data("GBSG2", package = "TH.data")
# Score as a function of beta - S(b)
fn_score <- function(dt, stratum=NULL, t, event, treatment, treat_lvl, b){
# Convert to a data.table and create strata column
data <- as.data.table(dt)
if(is.null(stratum)){
data[,strata:=1]
stratum <- "strata"
}
# Set column names to avoid using get
setnames(data, c(t, treatment, event), c("time","treatment","event"))
# Convert treatment variable to indicator
data[,X:=fifelse(treatment==treat_lvl,1,0)]
# Add exp vars
data[,`:=`(E_exp=exp(b*X), E_exp_x=exp(b*X)*X)]
# Remove ties
keys <- c("strata","time")
setorderv(data, keys, c(1,-1))
data_cum <- data[,.(.N, D=sum(event), E_exp_x=sum(E_exp_x), E_exp=sum(E_exp)), by = keys][
,.(strata, time, D, R=cumsum(N), E=cumsum(E_exp_x)/cumsum(E_exp))]
# Scoring calculation
data_sc <- data[data_cum, on = keys, .(time, U=event*(X-E), I=event*(1-i.E)*i.E*(i.R-i.D)/(i.R-1))]
S_b <- data_sc[!is.na(I)][,.(U=sum(U), I=sum(I), sc=sum(U)^2/sum(I))]
return(S_b[,sc])
}
# Cox PH estimate
fn_cox <- function(dt, t, event, treatment){
stime <- with(dt, Surv(time, cens))
form <- as.formula(paste0("stime~",treatment))
fit <- coxph(form, data = dt)
sum_fit <- summary(fit)
return(sum_fit$coefficients[,c("coef", "se(coef)")])
}
# Interpolate to find the intersection with the relevant quantile
# Note: on the sqrt scale for speed
fn_interpolate <- function(g, alpha, dt, stratum=NULL, t, event, treatment, treat_lvl){
S_grid <- sapply(g, function(b) fn_score(dt=dt, stratum=stratum, t=t, event=event, treatment=treatment, treat_lvl=treat_lvl, b=b))
s <- spline(g, sqrt(S_grid))
int <- approx(x = s$y, y = s$x, xout = qnorm(1-alpha/2))
return(int$y)
}
# Combines the score function and interpolation to output a score CI
# Note: Assumes a 'wide' 99.7% Wald interval to determine range to search for the bounds
fn_score_ci <- function(alpha=0.05, dt, stratum=NULL, t, event, treatment, treat_lvl){
# Estimate the cox coefficient
est_cox <- fn_cox(dt=dt, t=t, event=event, treatment=treatment)
# Create a 99% Wilcoxon CI for the range to search
grid_b_low <- seq(est_cox["coef"] - 2.96*est_cox["se(coef)"], est_cox["coef"], length.out = 5)
grid_b_up <- seq(est_cox["coef"], est_cox["coef"] + 2.96*est_cox["se(coef)"], length.out = 5)
# Interpolate along the grid to find the intersection with relevant quantile
ci_low <- fn_interpolate(g=grid_b_low, alpha=alpha, dt=dt, stratum=stratum, t=t, event=event, treatment=treatment, treat_lvl=treat_lvl)
ci_up <- fn_interpolate(g=grid_b_up, alpha=alpha, dt=dt, stratum=stratum, t=t, event=event, treatment=treatment, treat_lvl=treat_lvl)
ci <- cbind(ci_low, ci_up)
colnames(ci) <- paste0(round(c(alpha/2,1-alpha/2) * 100,1)," %")
return(ci)
}
# Example usage
fn_score_ci(alpha=0.05, dt=GBSG2, stratum=NULL, t="time", event="cens", treatment="horTh", treat_lvl="yes")
| /e1_sas_to_r/score_ci.R | no_license | asewak/score_intervals | R | false | false | 2,975 | r | require(data.table)
require(survival)
data("GBSG2", package = "TH.data")
# Score as a function of beta - S(b)
fn_score <- function(dt, stratum=NULL, t, event, treatment, treat_lvl, b){
# Convert to a data.table and create strata column
data <- as.data.table(dt)
if(is.null(stratum)){
data[,strata:=1]
stratum <- "strata"
}
# Set column names to avoid using get
setnames(data, c(t, treatment, event), c("time","treatment","event"))
# Convert treatment variable to indicator
data[,X:=fifelse(treatment==treat_lvl,1,0)]
# Add exp vars
data[,`:=`(E_exp=exp(b*X), E_exp_x=exp(b*X)*X)]
# Remove ties
keys <- c("strata","time")
setorderv(data, keys, c(1,-1))
data_cum <- data[,.(.N, D=sum(event), E_exp_x=sum(E_exp_x), E_exp=sum(E_exp)), by = keys][
,.(strata, time, D, R=cumsum(N), E=cumsum(E_exp_x)/cumsum(E_exp))]
# Scoring calculation
data_sc <- data[data_cum, on = keys, .(time, U=event*(X-E), I=event*(1-i.E)*i.E*(i.R-i.D)/(i.R-1))]
S_b <- data_sc[!is.na(I)][,.(U=sum(U), I=sum(I), sc=sum(U)^2/sum(I))]
return(S_b[,sc])
}
# Cox PH estimate
fn_cox <- function(dt, t, event, treatment){
stime <- with(dt, Surv(time, cens))
form <- as.formula(paste0("stime~",treatment))
fit <- coxph(form, data = dt)
sum_fit <- summary(fit)
return(sum_fit$coefficients[,c("coef", "se(coef)")])
}
# Interpolate to find the intersection with the relevant quantile
# Note: on the sqrt scale for speed
fn_interpolate <- function(g, alpha, dt, stratum=NULL, t, event, treatment, treat_lvl){
S_grid <- sapply(g, function(b) fn_score(dt=dt, stratum=stratum, t=t, event=event, treatment=treatment, treat_lvl=treat_lvl, b=b))
s <- spline(g, sqrt(S_grid))
int <- approx(x = s$y, y = s$x, xout = qnorm(1-alpha/2))
return(int$y)
}
# Combines the score function and interpolation to output a score CI
# Note: Assumes a 'wide' 99.7% Wald interval to determine range to search for the bounds
fn_score_ci <- function(alpha=0.05, dt, stratum=NULL, t, event, treatment, treat_lvl){
# Estimate the cox coefficient
est_cox <- fn_cox(dt=dt, t=t, event=event, treatment=treatment)
# Create a 99% Wilcoxon CI for the range to search
grid_b_low <- seq(est_cox["coef"] - 2.96*est_cox["se(coef)"], est_cox["coef"], length.out = 5)
grid_b_up <- seq(est_cox["coef"], est_cox["coef"] + 2.96*est_cox["se(coef)"], length.out = 5)
# Interpolate along the grid to find the intersection with relevant quantile
ci_low <- fn_interpolate(g=grid_b_low, alpha=alpha, dt=dt, stratum=stratum, t=t, event=event, treatment=treatment, treat_lvl=treat_lvl)
ci_up <- fn_interpolate(g=grid_b_up, alpha=alpha, dt=dt, stratum=stratum, t=t, event=event, treatment=treatment, treat_lvl=treat_lvl)
ci <- cbind(ci_low, ci_up)
colnames(ci) <- paste0(round(c(alpha/2,1-alpha/2) * 100,1)," %")
return(ci)
}
# Example usage
fn_score_ci(alpha=0.05, dt=GBSG2, stratum=NULL, t="time", event="cens", treatment="horTh", treat_lvl="yes")
|
#
#
#
library('caret')
library('pROC')
source('../functions.r')
# 1) SPECIFY THE DATA FOLDER (WITH THE dataset.rds FILE PRODUCED BY ONE OF Code/preprocessing/extract_*.r SCRIPTS)
datafolder <- '8ch700ms'
dataset <- readRDS(paste('../../Data/', datafolder, '/dataset.rds', sep=''))
# 5) PRODUCE AN OBJECT classifier HERE (USE THE FULL TRAINING SET)
gbmGrid <- expand.grid(interaction.depth=20, n.trees=500, shrinkage=0.05)
trcontrol <- trainControl(method='none')
classifier <- train(class ~., data = dataset$train, 'gbm', trControl=trcontrol, tuneGrid = gbmGrid)
# predict on test dataset and store the file
predicted <- predict(classifier, newdata=dataset$test, type="prob")$positive
result <- data.frame(read.table('../../Results/SampleSubmission.csv', sep = ',', header = T))
result$Prediction = predicted
write.table(result, paste('../../Results/subX_', datafolder, '_', mlmethod, '.csv', sep=''), sep = ',', quote = F, row.names = F, col.names = T)
| /Code/models/sub22_8ch700ms_gbm.r | no_license | KnightofDawn/Kaggle-BCI-Challenge | R | false | false | 964 | r | #
#
#
library('caret')
library('pROC')
source('../functions.r')
# 1) SPECIFY THE DATA FOLDER (WITH THE dataset.rds FILE PRODUCED BY ONE OF Code/preprocessing/extract_*.r SCRIPTS)
datafolder <- '8ch700ms'
dataset <- readRDS(paste('../../Data/', datafolder, '/dataset.rds', sep=''))
# 5) PRODUCE AN OBJECT classifier HERE (USE THE FULL TRAINING SET)
gbmGrid <- expand.grid(interaction.depth=20, n.trees=500, shrinkage=0.05)
trcontrol <- trainControl(method='none')
classifier <- train(class ~., data = dataset$train, 'gbm', trControl=trcontrol, tuneGrid = gbmGrid)
# predict on test dataset and store the file
predicted <- predict(classifier, newdata=dataset$test, type="prob")$positive
result <- data.frame(read.table('../../Results/SampleSubmission.csv', sep = ',', header = T))
result$Prediction = predicted
write.table(result, paste('../../Results/subX_', datafolder, '_', mlmethod, '.csv', sep=''), sep = ',', quote = F, row.names = F, col.names = T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isInstalled.R
\name{is_installed}
\alias{is_installed}
\title{Is Installed.}
\usage{
is_installed(mypkg)
}
\arguments{
\item{mypkg}{character vector of one or more packages}
}
\value{
\code{TRUE} or \code{FALSE}, indicating whether package is installed.
}
\description{
Checks whether a package is installed.
}
\details{
[INSERT].
}
\examples{
is_installed("nlme")
}
\seealso{
Other packages:
\code{\link{getDependencies}()},
\code{\link{load_or_install}()}
}
\concept{packages}
| /man/is_installed.Rd | permissive | DevPsyLab/petersenlab | R | false | true | 558 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isInstalled.R
\name{is_installed}
\alias{is_installed}
\title{Is Installed.}
\usage{
is_installed(mypkg)
}
\arguments{
\item{mypkg}{character vector of one or more packages}
}
\value{
\code{TRUE} or \code{FALSE}, indicating whether package is installed.
}
\description{
Checks whether a package is installed.
}
\details{
[INSERT].
}
\examples{
is_installed("nlme")
}
\seealso{
Other packages:
\code{\link{getDependencies}()},
\code{\link{load_or_install}()}
}
\concept{packages}
|
#' Format the body of a tibble
#'
#' @description
#' For easier customization, the formatting of a tibble is split
#' into three components: header, body, and footer.
#' The `tbl_format_body()` method is responsible for formatting the body
#' of a tibble.
#'
#' Override this method if you need to change the appearance of all parts
#' of the body.
#' If you only need to change the appearance of a single data type,
#' override [vctrs::vec_ptype_abbr()] and [pillar_shaft()] for this data type.
#'
#' @inheritParams ellipsis::dots_empty
#' @param x A tibble-like object.
#' @param setup A setup object returned from [tbl_format_setup()].
#'
#' @return
#' A character vector.
#'
#' @export
tbl_format_body <- function(x, setup, ...) {
check_dots_empty()
UseMethod("tbl_format_body")
}
#' @export
tbl_format_body.tbl <- function(x, setup, ...) {
force(setup)
setup$body
}
#' @export
tbl_format_body.pillar_tbl_format_setup <- function(x, ...) {
new_vertical(c(
cli::style_bold("<tbl_format_body(setup)>"),
tbl_format_body(x$x, setup = x)
))
}
| /R/tbl-format-body.R | permissive | Tubbz-alt/pillar | R | false | false | 1,067 | r | #' Format the body of a tibble
#'
#' @description
#' For easier customization, the formatting of a tibble is split
#' into three components: header, body, and footer.
#' The `tbl_format_body()` method is responsible for formatting the body
#' of a tibble.
#'
#' Override this method if you need to change the appearance of all parts
#' of the body.
#' If you only need to change the appearance of a single data type,
#' override [vctrs::vec_ptype_abbr()] and [pillar_shaft()] for this data type.
#'
#' @inheritParams ellipsis::dots_empty
#' @param x A tibble-like object.
#' @param setup A setup object returned from [tbl_format_setup()].
#'
#' @return
#' A character vector.
#'
#' @export
tbl_format_body <- function(x, setup, ...) {
check_dots_empty()
UseMethod("tbl_format_body")
}
#' @export
tbl_format_body.tbl <- function(x, setup, ...) {
force(setup)
setup$body
}
#' @export
tbl_format_body.pillar_tbl_format_setup <- function(x, ...) {
new_vertical(c(
cli::style_bold("<tbl_format_body(setup)>"),
tbl_format_body(x$x, setup = x)
))
}
|
\name{FlInv.drclass}
\alias{FlInv.drclass}
\title{FlInv.drclass}
\description{No description, see information about the package \code{\link{fitDRC}} or in the code of it.}
| /man/FlInv.drclass.Rd | no_license | cran/fitDRC | R | false | false | 172 | rd | \name{FlInv.drclass}
\alias{FlInv.drclass}
\title{FlInv.drclass}
\description{No description, see information about the package \code{\link{fitDRC}} or in the code of it.}
|
\name{V.ratio}
\alias{V.ratio}
\title{ Calculates the variance-ratio as suggested by Schluter (1984) }
\description{
A of species association is provided by the ratio of the variance in total species number (or total density of individuals) in samples to the sum of the variances of the individual species.
}
\usage{
V.ratio(web)
}
\arguments{
\item{web}{ A matrix with pollinators in columns and plants in rows. For biogeographical applications: rows are islands (or sites). }
}
\details{
This is a rather straight-forward index, which is described and evaluated extensively in Schluter (1984). He also warns against overinterpretation of the value. In principle, V-ratios larger than 1 indicate positive, smaller than 1 negative associations. Ecologically, competition can lead to small or large values, depending on their exact effects (see discussion in the Schluter paper).
}
\value{
Returns the V-ratio, i.e. a single value representing the ratio of variance in species number and variance in individual numbers within species.
}
\references{
Gotelli, N.J. and Rohde, K. (2002) Co-occurrence of ectoparasites of marine fishes: a null model analysis. \emph{Ecology Letters} \bold{5}, 86--94
Schluter, D. (1984) A variance test for detecting species associations, with some example applications. \emph{Ecology} \bold{65}, 998--1005
}
\author{ Carsten F. Dormann}
\note{
Any quantitative matrix is first transformed into a binary (presence-absence) matrix!
Do not interpret without first reading the paper! It's worth it! See also applications in other studies, such as Gotelli and Rohde (2002).
}
\seealso{ \code{\link{C.score}} for another measure of species associations. }
\examples{
data(Safariland)
V.ratio(Safariland)
}
\keyword{ package }
| /bipartite/man/V.ratio.Rd | no_license | biometry/bipartite | R | false | false | 1,765 | rd | \name{V.ratio}
\alias{V.ratio}
\title{ Calculates the variance-ratio as suggested by Schluter (1984) }
\description{
A of species association is provided by the ratio of the variance in total species number (or total density of individuals) in samples to the sum of the variances of the individual species.
}
\usage{
V.ratio(web)
}
\arguments{
\item{web}{ A matrix with pollinators in columns and plants in rows. For biogeographical applications: rows are islands (or sites). }
}
\details{
This is a rather straight-forward index, which is described and evaluated extensively in Schluter (1984). He also warns against overinterpretation of the value. In principle, V-ratios larger than 1 indicate positive, smaller than 1 negative associations. Ecologically, competition can lead to small or large values, depending on their exact effects (see discussion in the Schluter paper).
}
\value{
Returns the V-ratio, i.e. a single value representing the ratio of variance in species number and variance in individual numbers within species.
}
\references{
Gotelli, N.J. and Rohde, K. (2002) Co-occurrence of ectoparasites of marine fishes: a null model analysis. \emph{Ecology Letters} \bold{5}, 86--94
Schluter, D. (1984) A variance test for detecting species associations, with some example applications. \emph{Ecology} \bold{65}, 998--1005
}
\author{ Carsten F. Dormann}
\note{
Any quantitative matrix is first transformed into a binary (presence-absence) matrix!
Do not interpret without first reading the paper! It's worth it! See also applications in other studies, such as Gotelli and Rohde (2002).
}
\seealso{ \code{\link{C.score}} for another measure of species associations. }
\examples{
data(Safariland)
V.ratio(Safariland)
}
\keyword{ package }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_train_ind.R
\name{get_train_ind}
\alias{get_train_ind}
\title{Get Indicies of Training Sample}
\usage{
get_train_ind(ids, population, pred_cat)
}
\arguments{
\item{ids}{data.table with keep variables}
\item{population}{character, name of population}
\item{pred_cat}{character, name of prediction category}
}
\value{
integer vector of the row indicies of observations in the training
set
}
\description{
Get Indicies of Training Sample
}
| /man/get_train_ind.Rd | permissive | evanjflack/cfo.behavioral | R | false | true | 522 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_train_ind.R
\name{get_train_ind}
\alias{get_train_ind}
\title{Get Indicies of Training Sample}
\usage{
get_train_ind(ids, population, pred_cat)
}
\arguments{
\item{ids}{data.table with keep variables}
\item{population}{character, name of population}
\item{pred_cat}{character, name of prediction category}
}
\value{
integer vector of the row indicies of observations in the training
set
}
\description{
Get Indicies of Training Sample
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/personalizeevents_operations.R
\name{personalizeevents_put_events}
\alias{personalizeevents_put_events}
\title{Records user interaction event data}
\usage{
personalizeevents_put_events(trackingId, userId, sessionId, eventList)
}
\arguments{
\item{trackingId}{[required] The tracking ID for the event. The ID is generated by a call to the
\href{https://docs.aws.amazon.com/personalize/latest/dg/API_CreateEventTracker.html}{CreateEventTracker}
API.}
\item{userId}{The user associated with the event.}
\item{sessionId}{[required] The session ID associated with the user's visit. Your application
generates the sessionId when a user first visits your website or uses
your application. Amazon Personalize uses the sessionId to associate
events with the user before they log in. For more information see
event-record-api.}
\item{eventList}{[required] A list of event data from the session.}
}
\description{
Records user interaction event data. For more information see
event-record-api.
}
\section{Request syntax}{
\preformatted{svc$put_events(
trackingId = "string",
userId = "string",
sessionId = "string",
eventList = list(
list(
eventId = "string",
eventType = "string",
eventValue = 123.0,
itemId = "string",
properties = "string",
sentAt = as.POSIXct(
"2015-01-01"
),
recommendationId = "string",
impression = list(
"string"
)
)
)
)
}
}
\keyword{internal}
| /paws/man/personalizeevents_put_events.Rd | permissive | sanchezvivi/paws | R | false | true | 1,533 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/personalizeevents_operations.R
\name{personalizeevents_put_events}
\alias{personalizeevents_put_events}
\title{Records user interaction event data}
\usage{
personalizeevents_put_events(trackingId, userId, sessionId, eventList)
}
\arguments{
\item{trackingId}{[required] The tracking ID for the event. The ID is generated by a call to the
\href{https://docs.aws.amazon.com/personalize/latest/dg/API_CreateEventTracker.html}{CreateEventTracker}
API.}
\item{userId}{The user associated with the event.}
\item{sessionId}{[required] The session ID associated with the user's visit. Your application
generates the sessionId when a user first visits your website or uses
your application. Amazon Personalize uses the sessionId to associate
events with the user before they log in. For more information see
event-record-api.}
\item{eventList}{[required] A list of event data from the session.}
}
\description{
Records user interaction event data. For more information see
event-record-api.
}
\section{Request syntax}{
\preformatted{svc$put_events(
trackingId = "string",
userId = "string",
sessionId = "string",
eventList = list(
list(
eventId = "string",
eventType = "string",
eventValue = 123.0,
itemId = "string",
properties = "string",
sentAt = as.POSIXct(
"2015-01-01"
),
recommendationId = "string",
impression = list(
"string"
)
)
)
)
}
}
\keyword{internal}
|
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix())
{
## Initialize the inverse property
m <- NULL
## Method used to set the matrix
set <- function(y)
{
x <<- y
m <<- NULL
}
## Method used to get the matrix
get <- function()
{
x
}
## Method used to set the inverse of the matrix
setInverse <- function(inverse)
{
m <<- inverse
}
## Method used to get the inverse of the matrix
getInverse <- function()
{
m
}
## Return list of the former 2 methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" created by makeCacheMatrix above.
## If inverse has already been calculated, then it should retrieve the inverse from the cache.
cacheSolve <- function(x)
{
## return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Return the inverse if it is already set
if(!is.null(m))
{
message("Attaining cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data)
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
| /ProgrammingAssignment2.R | no_license | pkpraveen2006/R_Assignment2 | R | false | false | 1,321 | r | ## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix())
{
## Initialize the inverse property
m <- NULL
## Method used to set the matrix
set <- function(y)
{
x <<- y
m <<- NULL
}
## Method used to get the matrix
get <- function()
{
x
}
## Method used to set the inverse of the matrix
setInverse <- function(inverse)
{
m <<- inverse
}
## Method used to get the inverse of the matrix
getInverse <- function()
{
m
}
## Return list of the former 2 methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" created by makeCacheMatrix above.
## If inverse has already been calculated, then it should retrieve the inverse from the cache.
cacheSolve <- function(x)
{
## return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Return the inverse if it is already set
if(!is.null(m))
{
message("Attaining cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data)
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
|
##########################################################################################
# Aim: Simulates data from simulation Scenrio I #
# #
# Required packages: MASS and splines from CRAN #
# #
# Author: Dimitris Rizopoulos #
##########################################################################################
library("MASS")
library("splines")
n <- 1000 # number of subjects
K <- 15 # number of planned repeated measurements per subject, per outcome
t.max <- 15 # maximum follow-up time
################################################
# parameters for the linear mixed effects model
betas <- c("Group0" = 3.4554, "Group1" = 2.9470,
"Group0:Time1" = 1.0027, "Group1:Time1" = 0.9709,
"Group0:Time2" = 4.1290, "Group1:Time2" = 4.0893,
"Group0:Time3" = 6.2182, "Group1:Time3" = 6.6909)
sigma.y <- 0.564 # measurement error standard deviation
# parameters for the survival model
gammas <- c("(Intercept)" = -5.7296, "Group" = 0.48) # coefficients for baseline covariates
alpha <- 0.4672 # association parameter
phi <- 0.9518 # shape for the Weibull baseline hazard
meanCens0 <- 10 # mean of the uniform censoring distribution for group 0
meanCens1 <- 14 # mean of the uniform censoring distribution for group 1
D <- matrix(c(0.5686193, 0.2126076, 0.1547322, 0.4354939,
0.2126076, 1.6721086, 2.3299235, 2.1926166,
0.1547322, 2.329923, 5.0230656, 2.8873934,
0.4354939, 2.1926166, 2.8873934, 4.0286104), 4, 4)
D <- (D + t(D)) / 2
################################################
Bkn <- c(0, 13)
kn <- c(2.5, 6)
# at which time points longitudinal measurements are supposed to be taken
times <- c(replicate(n, c(0, sort(runif(K-1, 0, t.max)))))
group <- rep(0:1, each = n/2) # group indicator, i.e., '0' placebo, '1' active treatment
DF <- data.frame(year = times, group = factor(rep(group, each = K)))
# design matrices for the longitudinal measurement model
X <- model.matrix(~ 0 + group + group:ns(year, knots = kn, Boundary.knots = Bkn), data = DF)
Z <- model.matrix(~ ns(year, knots = kn, Boundary.knots = Bkn), data = DF)
# design matrix for the survival model
W <- cbind("(Intercept)" = 1, "Group" = group)
################################################
#simulate random effects
b <- mvrnorm(n, rep(0, nrow(D)), D)
# simulate longitudinal responses
id <- rep(1:n, each = K)
eta.y <- as.vector(X %*% betas + rowSums(Z * b[id, ]))
y <- rnorm(n * K, eta.y, sigma.y)
# simulate event times
eta.t <- as.vector(W %*% gammas)
invS <- function (t, u, i) {
h <- function (s) {
group0 <- 1 - group[i]
group1 <- group[i]
BS <- ns(s, knots = kn, Boundary.knots = Bkn)
XX <- cbind(group0, group1, group0*BS[, 1], group1*BS[, 1],
group0*BS[, 2], group1*BS[, 2], group0*BS[, 3], group1*BS[, 3])
ZZ <- cbind(1, BS)
f1 <- as.vector(XX %*% betas + rowSums(ZZ * b[rep(i, nrow(ZZ)), ]))
exp(log(phi) + (phi - 1) * log(s) + eta.t[i] + f1 * alpha)
}
integrate(h, lower = 0, upper = t)$value + log(u)
}
u <- runif(n)
trueTimes <- numeric(n)
for (i in 1:n) {
Up <- 50
tries <- 5
Root <- try(uniroot(invS, interval = c(1e-05, Up), u = u[i], i = i)$root, TRUE)
while(inherits(Root, "try-error") && tries > 0) {
tries <- tries - 1
Up <- Up + 200
Root <- try(uniroot(invS, interval = c(1e-05, Up), u = u[i], i = i)$root, TRUE)
}
trueTimes[i] <- if (!inherits(Root, "try-error")) Root else NA
}
na.ind <- !is.na(trueTimes)
trueTimes <- trueTimes[na.ind]
W <- W[na.ind, , drop = FALSE]
group <- group[na.ind]
long.na.ind <- rep(na.ind, each = K)
y <- y[long.na.ind]
X <- X[long.na.ind, , drop = FALSE]
Z <- Z[long.na.ind, , drop = FALSE]
DF <- DF[long.na.ind, ]
n <- length(trueTimes)
# simulate censoring times from a Beta distribution, with the mean depending on group
# and calculate the observed event times, i.e., min(true event times, censoring times)
Ctimes <- numeric(n)
Ctimes[group == 0] <- runif(sum(group == 0), 0, 2 * meanCens0)
Ctimes[group == 1] <- runif(sum(group == 1), 0, 2 * meanCens1)
Time <- pmin(trueTimes, Ctimes)
event <- as.numeric(trueTimes <= Ctimes) # event indicator
################################################
# keep the nonmissing cases, i.e., drop the longitudinal measurements
# that were taken after the observed event time for each subject.
ind <- times[long.na.ind] <= rep(Time, each = K)
y <- y[ind]
X <- X[ind, , drop = FALSE]
Z <- Z[ind, , drop = FALSE]
id <- id[long.na.ind][ind]
id <- match(id, unique(id))
dat <- DF[ind, ]
dat$id <- id
dat$y <- y
dat$Time <- Time[id]
dat$event <- event[id]
names(dat) <- c("time", "group", "id", "y", "Time", "event")
#summary(tapply(id, id, length))
#table(event)
#n
#mean(event)
#summary(Time)
set <- sample(unique(id), 500)
train_data <- dat[!dat$id %in% set, ]
train_data$id <- match(train_data$id, unique(train_data$id))
test_data <- dat[dat$id %in% set, ]
test_data$id <- match(test_data$id, unique(test_data$id))
trueValues <- list(betas = betas, phi = phi, gammas = gammas, alpha = alpha,
b_test = b[sort(set), ], Bkn = Bkn, kn = kn)
# delete all unused objects
rm(y, X, Z, id, n, na.ind, long.na.ind, ind, Ctimes, Time, event, W,
betas, sigma.y, gammas, alpha, eta.t, eta.y, phi, t.max,
trueTimes, u, Root, invS, D, b, K, set, dat,
times, group, i, tries, Up, Bkn, kn, DF, meanCens0, meanCens1)
| /Simulation/SimulateI.R | no_license | drizopoulos/jm_and_lm | R | false | false | 5,752 | r | ##########################################################################################
# Aim: Simulates data from simulation Scenrio I #
# #
# Required packages: MASS and splines from CRAN #
# #
# Author: Dimitris Rizopoulos #
##########################################################################################
library("MASS")
library("splines")
n <- 1000 # number of subjects
K <- 15 # number of planned repeated measurements per subject, per outcome
t.max <- 15 # maximum follow-up time
################################################
# parameters for the linear mixed effects model
betas <- c("Group0" = 3.4554, "Group1" = 2.9470,
"Group0:Time1" = 1.0027, "Group1:Time1" = 0.9709,
"Group0:Time2" = 4.1290, "Group1:Time2" = 4.0893,
"Group0:Time3" = 6.2182, "Group1:Time3" = 6.6909)
sigma.y <- 0.564 # measurement error standard deviation
# parameters for the survival model
gammas <- c("(Intercept)" = -5.7296, "Group" = 0.48) # coefficients for baseline covariates
alpha <- 0.4672 # association parameter
phi <- 0.9518 # shape for the Weibull baseline hazard
meanCens0 <- 10 # mean of the uniform censoring distribution for group 0
meanCens1 <- 14 # mean of the uniform censoring distribution for group 1
D <- matrix(c(0.5686193, 0.2126076, 0.1547322, 0.4354939,
0.2126076, 1.6721086, 2.3299235, 2.1926166,
0.1547322, 2.329923, 5.0230656, 2.8873934,
0.4354939, 2.1926166, 2.8873934, 4.0286104), 4, 4)
D <- (D + t(D)) / 2
################################################
Bkn <- c(0, 13)
kn <- c(2.5, 6)
# at which time points longitudinal measurements are supposed to be taken
times <- c(replicate(n, c(0, sort(runif(K-1, 0, t.max)))))
group <- rep(0:1, each = n/2) # group indicator, i.e., '0' placebo, '1' active treatment
DF <- data.frame(year = times, group = factor(rep(group, each = K)))
# design matrices for the longitudinal measurement model
X <- model.matrix(~ 0 + group + group:ns(year, knots = kn, Boundary.knots = Bkn), data = DF)
Z <- model.matrix(~ ns(year, knots = kn, Boundary.knots = Bkn), data = DF)
# design matrix for the survival model
W <- cbind("(Intercept)" = 1, "Group" = group)
################################################
#simulate random effects
b <- mvrnorm(n, rep(0, nrow(D)), D)
# simulate longitudinal responses
id <- rep(1:n, each = K)
eta.y <- as.vector(X %*% betas + rowSums(Z * b[id, ]))
y <- rnorm(n * K, eta.y, sigma.y)
# simulate event times
eta.t <- as.vector(W %*% gammas)
invS <- function (t, u, i) {
h <- function (s) {
group0 <- 1 - group[i]
group1 <- group[i]
BS <- ns(s, knots = kn, Boundary.knots = Bkn)
XX <- cbind(group0, group1, group0*BS[, 1], group1*BS[, 1],
group0*BS[, 2], group1*BS[, 2], group0*BS[, 3], group1*BS[, 3])
ZZ <- cbind(1, BS)
f1 <- as.vector(XX %*% betas + rowSums(ZZ * b[rep(i, nrow(ZZ)), ]))
exp(log(phi) + (phi - 1) * log(s) + eta.t[i] + f1 * alpha)
}
integrate(h, lower = 0, upper = t)$value + log(u)
}
u <- runif(n)
trueTimes <- numeric(n)
for (i in 1:n) {
Up <- 50
tries <- 5
Root <- try(uniroot(invS, interval = c(1e-05, Up), u = u[i], i = i)$root, TRUE)
while(inherits(Root, "try-error") && tries > 0) {
tries <- tries - 1
Up <- Up + 200
Root <- try(uniroot(invS, interval = c(1e-05, Up), u = u[i], i = i)$root, TRUE)
}
trueTimes[i] <- if (!inherits(Root, "try-error")) Root else NA
}
na.ind <- !is.na(trueTimes)
trueTimes <- trueTimes[na.ind]
W <- W[na.ind, , drop = FALSE]
group <- group[na.ind]
long.na.ind <- rep(na.ind, each = K)
y <- y[long.na.ind]
X <- X[long.na.ind, , drop = FALSE]
Z <- Z[long.na.ind, , drop = FALSE]
DF <- DF[long.na.ind, ]
n <- length(trueTimes)
# simulate censoring times from a Beta distribution, with the mean depending on group
# and calculate the observed event times, i.e., min(true event times, censoring times)
Ctimes <- numeric(n)
Ctimes[group == 0] <- runif(sum(group == 0), 0, 2 * meanCens0)
Ctimes[group == 1] <- runif(sum(group == 1), 0, 2 * meanCens1)
Time <- pmin(trueTimes, Ctimes)
event <- as.numeric(trueTimes <= Ctimes) # event indicator
################################################
# keep the nonmissing cases, i.e., drop the longitudinal measurements
# that were taken after the observed event time for each subject.
ind <- times[long.na.ind] <= rep(Time, each = K)
y <- y[ind]
X <- X[ind, , drop = FALSE]
Z <- Z[ind, , drop = FALSE]
id <- id[long.na.ind][ind]
id <- match(id, unique(id))
dat <- DF[ind, ]
dat$id <- id
dat$y <- y
dat$Time <- Time[id]
dat$event <- event[id]
names(dat) <- c("time", "group", "id", "y", "Time", "event")
#summary(tapply(id, id, length))
#table(event)
#n
#mean(event)
#summary(Time)
set <- sample(unique(id), 500)
train_data <- dat[!dat$id %in% set, ]
train_data$id <- match(train_data$id, unique(train_data$id))
test_data <- dat[dat$id %in% set, ]
test_data$id <- match(test_data$id, unique(test_data$id))
trueValues <- list(betas = betas, phi = phi, gammas = gammas, alpha = alpha,
b_test = b[sort(set), ], Bkn = Bkn, kn = kn)
# delete all unused objects
rm(y, X, Z, id, n, na.ind, long.na.ind, ind, Ctimes, Time, event, W,
betas, sigma.y, gammas, alpha, eta.t, eta.y, phi, t.max,
trueTimes, u, Root, invS, D, b, K, set, dat,
times, group, i, tries, Up, Bkn, kn, DF, meanCens0, meanCens1)
|
#' get_bbox
#'
#' Converts a string of latitudes and longitudes into a square matrix to be
#' passed as a \code{bbox} argument (to \code{\link{extract_osm_objects}},
#' \code{\link{osm_basemap}}, or \code{\link{make_osm_map}}).
#'
#' @param latlon A vector of (longitude, latitude, longitude, latitude) values.
#' @return A 2-by-2 matrix of 4 elements with columns of min and max values, and
#' rows of x and y values.
#' @export
#'
#' @examples
#' bbox <- get_bbox (c (-0.15, 51.5, -0.1, 51.52))
get_bbox <- function (latlon) {
if (missing (latlon))
stop ("latlon must be supplied")
if (!is.numeric (latlon))
stop ("latlon is not numeric")
if (length (latlon) < 4)
stop ("latlon must have length = 4")
if (length (latlon) > 4) {
warning ("latlon has length > 4; only first 4 elements will be used")
latlon <- latlon [1:4]
}
if (latlon [1] > latlon [3]) latlon [c (1, 3)] <- latlon [c (3, 1)]
if (latlon [2] > latlon [4]) latlon [c (2, 4)] <- latlon [c (4, 2)]
bbox <- matrix (latlon, nrow = 2, ncol = 2)
rownames (bbox) <- c ("x", "y")
bbox <- data.frame (bbox)
names (bbox) <- c ("min", "max")
as.matrix (bbox)
}
| /R/get-bbox.R | no_license | cran/osmplotr | R | false | false | 1,207 | r | #' get_bbox
#'
#' Converts a string of latitudes and longitudes into a square matrix to be
#' passed as a \code{bbox} argument (to \code{\link{extract_osm_objects}},
#' \code{\link{osm_basemap}}, or \code{\link{make_osm_map}}).
#'
#' @param latlon A vector of (longitude, latitude, longitude, latitude) values.
#' @return A 2-by-2 matrix of 4 elements with columns of min and max values, and
#' rows of x and y values.
#' @export
#'
#' @examples
#' bbox <- get_bbox (c (-0.15, 51.5, -0.1, 51.52))
get_bbox <- function (latlon) {
if (missing (latlon))
stop ("latlon must be supplied")
if (!is.numeric (latlon))
stop ("latlon is not numeric")
if (length (latlon) < 4)
stop ("latlon must have length = 4")
if (length (latlon) > 4) {
warning ("latlon has length > 4; only first 4 elements will be used")
latlon <- latlon [1:4]
}
if (latlon [1] > latlon [3]) latlon [c (1, 3)] <- latlon [c (3, 1)]
if (latlon [2] > latlon [4]) latlon [c (2, 4)] <- latlon [c (4, 2)]
bbox <- matrix (latlon, nrow = 2, ncol = 2)
rownames (bbox) <- c ("x", "y")
bbox <- data.frame (bbox)
names (bbox) <- c ("min", "max")
as.matrix (bbox)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graphs.R
\name{list_edges}
\alias{list_edges}
\title{Edge lists}
\usage{
list_edges(graphLs)
}
\arguments{
\item{edgeLs}{}
}
\value{
}
\description{
Edge lists
}
| /man/list_edges.Rd | no_license | robitalec/ScaleInMultilayerNetworks | R | false | true | 241 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graphs.R
\name{list_edges}
\alias{list_edges}
\title{Edge lists}
\usage{
list_edges(graphLs)
}
\arguments{
\item{edgeLs}{}
}
\value{
}
\description{
Edge lists
}
|
context("Test des fonctions de correction de la prédiction")
data_secteur_semestre <- data.frame(
periode = c(as.Date("2015-02-01"), as.Date("2015-03-01")),
count = c(100, 150),
count_new_outcome = c(10, 30),
prop_new_outcome = c(0.1, 0.2)
)
testthat::test_that("add_missing_first_month_aux ajoute bien la ligne comme attendue", {
actual <- add_missing_first_month_aux(data_secteur_semestre)
expected <- bind_rows(
data.frame(periode = as.Date("2015-01-01"), count = 125, count_new_outcome = 20, prop_new_outcome = 20 / 125),
data_secteur_semestre
)
expect_equal(actual, expected)
})
testthat::test_that("add_missing_first_month ajoute bien la ligne comme attendue", {
# On croise deux secteurs et deux semestres
df <- bind_cols(
bind_rows(
data_secteur_semestre,
data_secteur_semestre,
data_secteur_semestre,
data_secteur_semestre
),
n_month_period = rep(c(as.Date("2015-01-01"), as.Date("2015-04-01")), 4),
secteur = rep(c("secteur2", "secteur1"), each = 4),
nom_secteur = rep(c("secteur2", "secteur1"), each = 4)
)
actual <- add_missing_first_month(df)
expect_true(all(table(actual$n_month_period) == c(6, 4)))
expect_true(all(table(actual$n_month_period, actual$secteur) == matrix(c(3, 2, 3, 2), nrow = 2)))
})
testthat::test_that("compute_sectorial_correction works as expected", {
actual <- compute_sectorial_correction(get_test_task(), generate_failure_data())
expect_true("correction_sector" %in% names(actual))
expected <- c(1.7559, 0.7533)
expect_equal(actual$correction_sector, expected, tolerance = 1e-3)
})
| /tests/testthat/test-post_sector_correction.R | permissive | signaux-faibles/rsignauxfaibles | R | false | false | 1,614 | r | context("Test des fonctions de correction de la prédiction")
data_secteur_semestre <- data.frame(
periode = c(as.Date("2015-02-01"), as.Date("2015-03-01")),
count = c(100, 150),
count_new_outcome = c(10, 30),
prop_new_outcome = c(0.1, 0.2)
)
testthat::test_that("add_missing_first_month_aux ajoute bien la ligne comme attendue", {
actual <- add_missing_first_month_aux(data_secteur_semestre)
expected <- bind_rows(
data.frame(periode = as.Date("2015-01-01"), count = 125, count_new_outcome = 20, prop_new_outcome = 20 / 125),
data_secteur_semestre
)
expect_equal(actual, expected)
})
testthat::test_that("add_missing_first_month ajoute bien la ligne comme attendue", {
# On croise deux secteurs et deux semestres
df <- bind_cols(
bind_rows(
data_secteur_semestre,
data_secteur_semestre,
data_secteur_semestre,
data_secteur_semestre
),
n_month_period = rep(c(as.Date("2015-01-01"), as.Date("2015-04-01")), 4),
secteur = rep(c("secteur2", "secteur1"), each = 4),
nom_secteur = rep(c("secteur2", "secteur1"), each = 4)
)
actual <- add_missing_first_month(df)
expect_true(all(table(actual$n_month_period) == c(6, 4)))
expect_true(all(table(actual$n_month_period, actual$secteur) == matrix(c(3, 2, 3, 2), nrow = 2)))
})
testthat::test_that("compute_sectorial_correction works as expected", {
actual <- compute_sectorial_correction(get_test_task(), generate_failure_data())
expect_true("correction_sector" %in% names(actual))
expected <- c(1.7559, 0.7533)
expect_equal(actual$correction_sector, expected, tolerance = 1e-3)
})
|
#' Random Projection Model
#'
#' In mathematics and statistics, random projection
#' is a technique used to reduce the dimensionality
#' of a set of points which lie in Euclidean space.
#' Random projection methods are powerful methods
#' known for their simplicity and less erroneous
#' output compared with other methods. According to
#' experimental results, random projection preserve
#' distances well, but empirical results are sparse.
#' They have been applied to many natural language
#' tasks under the name of random indexing. The core
#' idea behind random projection is given in the
#' Johnson-Lindenstrauss lemma which states that if
#' points in a vector space are of sufficiently high
#' dimension, then they may be projected into a suitable
#' lower-dimensional space in a way which approximately
#' preserves the distances between the points.
#'
#' @param text An object inheriting of class \code{document} or \code{corpus}.
#' @param ... Any other options to pass to the model
#' \url{https://zgornel.github.io/StringAnalysis.jl/dev/examples/#Dimensionality-reduction-1}.
#'
#' @examples
#' \dontrun{
#' # Use stringanalysis backend!
#' init_stringanalysis()
#'
#' # build document
#' doc1 <- string_document("First document.")
#' doc2 <- string_document("Second document.")
#'
#' crps <- corpus(doc1, doc2)
#' dtm <- document_term_matrix(crps)
#' model <- rp_model(dtm)
#' }
#'
#' @name rp_model
rp_model <- function(text, ...) UseMethod("rp_model")
#' @rdname rp_model
#' @method rp_model dtm
rp_model.dtm <- function(text, ...){
assert_that(has_sa())
call_julia("RPModel", text, ...)
}
#' @rdname rp_model
rp_model.corpus <- function(text, ...){
assert_that(has_sa())
call_julia("rp", text, ...)
} | /R/string_analysis.R | permissive | news-r/textanalysis | R | false | false | 1,749 | r | #' Random Projection Model
#'
#' In mathematics and statistics, random projection
#' is a technique used to reduce the dimensionality
#' of a set of points which lie in Euclidean space.
#' Random projection methods are powerful methods
#' known for their simplicity and less erroneous
#' output compared with other methods. According to
#' experimental results, random projection preserve
#' distances well, but empirical results are sparse.
#' They have been applied to many natural language
#' tasks under the name of random indexing. The core
#' idea behind random projection is given in the
#' Johnson-Lindenstrauss lemma which states that if
#' points in a vector space are of sufficiently high
#' dimension, then they may be projected into a suitable
#' lower-dimensional space in a way which approximately
#' preserves the distances between the points.
#'
#' @param text An object inheriting of class \code{document} or \code{corpus}.
#' @param ... Any other options to pass to the model
#' \url{https://zgornel.github.io/StringAnalysis.jl/dev/examples/#Dimensionality-reduction-1}.
#'
#' @examples
#' \dontrun{
#' # Use stringanalysis backend!
#' init_stringanalysis()
#'
#' # build document
#' doc1 <- string_document("First document.")
#' doc2 <- string_document("Second document.")
#'
#' crps <- corpus(doc1, doc2)
#' dtm <- document_term_matrix(crps)
#' model <- rp_model(dtm)
#' }
#'
#' @name rp_model
rp_model <- function(text, ...) UseMethod("rp_model")
#' @rdname rp_model
#' @method rp_model dtm
rp_model.dtm <- function(text, ...){
assert_that(has_sa())
call_julia("RPModel", text, ...)
}
#' @rdname rp_model
rp_model.corpus <- function(text, ...){
assert_that(has_sa())
call_julia("rp", text, ...)
} |
#PRODUKTIVITET OG BESKÆFTIGELSE - EU KLEMS DATA
{
#EU KLEMS is an industry level panel dataset covering OECD countries since 1970,
#it contains detailed data for 32 industries in both the market and non-market economy
#Methods used in the artcile "Robocalypse now?":
#-They Focus on non-farm employment, and omit the poorly measured Private household sector, and Public administration,
# Defense and Extraterritorial organizations, which are almost entirely non-market sectors.
#They operationalize the measurement of EMPLOYMENT and PRODUCTIVITY as follows.
#The primary EMPLOYMENT measure is the number of persons engaged in work, though we have also experimented with excluding the self-employed and obtain similar results.
#The primary LABOR PRODUCTIVITY measure is real gross output per worker, because measurement of value-added outside of manufacturing is typically somewhat speculative
#- They also present a set of models using value-added per worker and value added based total factor productivity.
#- These alternative measures yield qualitatively similar findings, although total factor productivity growth seems to have the most strongly positive effect on employment.
}
# Libraries ---------------------------------------------------------------
library(readr)
library(readxl)
library(reshape2)
library(fpp2)
library(tidyverse)
library(xts)
library(plm)
library(ggplot2)
library(ggthemes)
library(dplyr)
{
country="DK"
country="FR"
dataset_1 <- read_excel("Data/FR_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
dataset_2 <- read_excel("Data/FR_output_17ii.xlsx", sheet = "GO_QI")
dataset_1 <- read_excel("Data/DK_output_17ii.xlsx", sheet = "EMP")
dataset_2 <- read_excel("Data/DK_output_17ii.xlsx", sheet = "GO")
dataset_2 <- read_excel("Data/DK_output_17ii.xlsx", sheet = "GO_QI") #Gross output, faste priser 2010=100
measure_1="EMP"
measure_2="GO_QI"
dataset_1 <- read_excel("Data/DK_output_17ii.xlsx", sheet = "COMP")
dataset_2 <- read_excel("Data/DK_output_17ii.xlsx", sheet = "VA")
dataset_3 <- read_excel("Data/DK_output_17ii.xlsx", sheet = "LAB")
measure_1="COMP"
measure_2="VA"
measure_3="LAB"
}
CGR = function(x){
sapply(1:length(x), function(y){
prod(1+x[1:y]) - 1
})
}
func_labshare <- function(dataset_1, dataset_2, dataset_3, country, measure_1="COMP", measure_2="VA", measure_3="LAB") {
colnames(dataset_1) <- gsub(measure_1, "", colnames(dataset_1))
colnames(dataset_2) <- gsub(measure_2, "", colnames(dataset_2))
colnames(dataset_3) <- gsub(measure_3, "", colnames(dataset_3))
dataset_1<- melt(dataset_1,
id.vars=c("desc", "code"),
variable.name="year",
value.name= measure_1)
dataset_2 <- melt(dataset_2,
id.vars=c("desc", "code"),
variable.name="year",
value.name= measure_2)
dataset_3 <- melt(dataset_3,
id.vars=c("desc", "code"),
variable.name="year",
value.name= measure_3)
data = merge(dataset_1, dataset_2, by=c("desc","code", "year"), all.x = TRUE)
data = merge(data, dataset_3, by=c("desc","code", "year"), all.x = TRUE)
data <- na.omit(data)
sapply(data, class)
data$year <- as.numeric(as.character(data[,"year"])) #ændres fordi "year" er en factor variabel
data$code =gsub("-", "t", data[,2])
data$country = country
data$LS = data$LAB/data$VA
data$LSe = data$COMP/data$VA
data = data %>% filter(code=="TOT")
#data$indeksLS = (data$LS/0.6880021)*100
#data$indeksLSe = (data$LSe/0.5954859)*100
pdata = pdata.frame(data, index = c("code", "year"))
#Kumulativ vækst (viser det samme som indeks....)
{
pdata$LS_diff = diff(pdata$LS, lag = 1, shift = "time")
pdata$LSe_diff = diff(pdata$LSe, lag = 1, shift = "time")
pdata$LS_changes <- pdata$LS_diff/lag(pdata$LS, k = 1, shift = "time")
pdata$LSe_changes <- pdata$LSe_diff/lag(pdata$LSe, k = 1, shift = "time")
pdata$LS_CGR = order_by(pdata$year, CGR(pdata$LS_changes[-1])*100)
pdata$LSe_CGR = order_by(pdata$year, CGR(pdata$LSe_changes[-1])*100)
#pdata = pdata.frame(pdata, index = c("code", "year"))
pdata$LS_CGR <- lag(pdata$LS_CGR, k=1, shift="time")
pdata$LS_CGR = ifelse(is.na(pdata$LS_CGR)==T,0,pdata$LS_CGR)
pdata$LSe_CGR <- lag(pdata$LSe_CGR, k=1, shift="time")
pdata$LSe_CGR = ifelse(is.na(pdata$LSe_CGR)==T,0,pdata$LSe_CGR)
}
pdata
}
func_empprod <- function(dataset_1, dataset_2, country, measure_1="EMP", measure_2="GO", Emma, method) {
colnames(dataset_1) <- gsub(measure_1, "", colnames(dataset_1))
colnames(dataset_2) <- gsub(measure_2, "", colnames(dataset_2))
dataset_1<- melt(dataset_1,
# ID variables - all the variables to keep but not split apart on
id.vars=c("desc", "code"),
# The source columns (not necessary here) # measure.vars=c("1970","1971",...),
# Name of the destination column that will identify the original column that the measurement came from
variable.name="year",
value.name= measure_1)
dataset_2 <- melt(dataset_2,
id.vars=c("desc", "code"),
variable.name="year",
value.name= "GO")
data = merge(dataset_1, dataset_2, by=c("desc","code", "year"), all.x = TRUE)
data <- na.omit(data)
#sapply(data, class)
data$year <- as.numeric(as.character(data[,"year"])) #ændres fordi "year" er en factor variabel
data$GO <- as.numeric(as.character(data[,"GO"]))
data$code =gsub("-", "t", data[,2])
data$country = country
if (method=="AS") {
#AutorSalomons Industrier:
data$sel_industries <-factor(ifelse( data$code %in% c("TOT", "MARKT", "A","C","G","H","J","OtU","O","RtS","T","U"), 0,1))
data$branche <- ifelse(data$code %in% c("B", "DtE", "F"), "b1",
ifelse(data$code %in% c("10t12", "13t15", "16t18", "19", "20t21", "22t23","24t25", "26t27", "28", "29t30","31t33"), "b2", #kan man ikke bare bruge C, Total Manufacturing?
ifelse(data$code %in% c("P","Q","R", "S"), "b3",
ifelse(data$code %in% c("53", "58t60", "61", "62t63", "K", "MtN"), "b4",
ifelse(data$code %in% c("45", "46", "47", "49t52", "I", "L"), "b5",
"b0")))))
data$branche_desc <- ifelse(data$branche=="b1","Mining, utilities, and construction",
ifelse(data$branche=="b2","Manufacturing",
ifelse(data$branche=="b3","Education and health services",
ifelse(data$branche=="b4","High-tech services",
ifelse(data$branche=="b5","Low-tech services",
"Not relevant"
)))))
} else {
#Brancher, 10:
data$sel_industries <-factor(ifelse( data$code %in% c("A","B", "DtE", "F","10t12", "13t15", "16t18", "19", "20t21", "22t23","24t25", "26t27", "28", "29t30","31t33",
"53","58t60", "61", "62t63", "K", "MtN","45", "46", "47", "49t52", "I", "L"), 1,0)) #alt pånær O, P, Q (skal RtS, T og U også fjernes?)
data$branche <- ifelse(data$code=="A", "b1",
ifelse(data$code %in% c("B","10t12", "13t15", "16t18", "19", "20t21", "22t23","24t25", "26t27", "28", "29t30","31t33", "DtE"), "b2",
ifelse(data$code=="F", "b3",
ifelse(data$code %in% c("45", "46", "47","49t52","53", "I"), "b4",
ifelse(data$code %in% c("58t60", "61", "62t63"), "b5",
ifelse(data$code=="K", "b6",
ifelse(data$code=="L", "b7",
ifelse(data$code=="MtN", "b8",
# ifelse(data$code %in% c("O","P","Q"), "b9",
#ifelse(data$code %in% c("R","S","T","U"), "b10",
"b0"))))))))
data$branche_desc <- ifelse(data$branche=="b1","Landbrug, skovbrug og fiskeri",
ifelse(data$branche=="b2","Industri, råstofindvinding og forsyningsvirksomhed",
ifelse(data$branche=="b3","Bygge og anlæg",
ifelse(data$branche=="b4","Handel og transport mv.",
ifelse(data$branche=="b5","Information og kommunikation",
ifelse(data$branche=="b6", "Finansiering og forsikring",
ifelse(data$branche=="b7","Ejendomshandel og udlejning",
ifelse(data$branche=="b8","Erhvervsservice",
# ifelse(data$branche=="b9","Offentlig administration, undervisning og sundhed",
# ifelse(data$branche=="b10","Kultur, fritid og anden service",
"Ikke relevant"))))))))
}
#angivelse af branche/industri totaler
t4 <- data %>% filter(branche!="b0") %>% group_by(year, branche, branche_desc) %>% summarize(EMP=sum(EMP),GO=sum(GO))
data2 <- data.frame(desc= t4$branche_desc,
code=t4$branche,
year=t4$year,
EMP=t4$EMP,
GO=t4$GO,
country=country,
sel_industries=0,
branche="b-tot",
branche_desc="Branche Total")
#udregning af lande total, hvis visse brancher udelades (fx landbrug, offentlig sektor)
#Nedenstående skal bruges til hvis vores udvalgte brancher adskiller sig fra "TOTAL INDUSTRIES"
b <- data2 %>% filter(code=="b1")
b_2 <- data2 %>% filter(code=="b2")
b_3 <- data2 %>% filter(code=="b3")
b_4 <- data2 %>% filter(code=="b4")
b_5 <- data2 %>% filter(code=="b5")
if (method!="AS") {
b_6 <- data2 %>% filter(code=="b6")
b_7 <- data2 %>% filter(code=="b7")
b_8 <- data2 %>% filter(code=="b8")
#b_9 <- data2 %>% filter(code=="b9")
#b_10 <- data2 %>% filter(code=="b10")
b$EMP = b$EMP + b_2$EMP + b_3$EMP + b_4$EMP + b_5$EMP + b_6$EMP + b_7$EMP + b_8$EMP #+ b_10$EMP + b_9$EMP
b$GO = b$GO + b_2$GO + b_3$GO + b_4$GO + b_5$GO + b_6$GO + b_7$GO + b_8$GO #+ b_10$GO + b_9$GO
b$desc = "TOTAL INDUSTRIES-MunkNielsen"
b$code = "TOT_MN"
b$branche = "TOT"
} else {
b$EMP = b$EMP + b_2$EMP + b_3$EMP + b_4$EMP + b_5$EMP
b$GO = b$GO + b_2$GO + b_3$GO + b_4$GO + b_5$GO
b$desc = "TOTAL INDUSTRIES-AutorSalomons"
b$code = "TOT_AS"
b$branche = "TOT" #lettere at de begge hedder "TOT" i brancher når der skal filtreres
}
b$branche_desc = "Lande Total"
data_fin <- rbind(data, data2, b)
pdata = pdata.frame(data_fin, index = c("code", "year"))
#Kodning af variable:
pdata$emp_log <- log(pdata$EMP)
pdata$emp_diff = diff(pdata$EMP, lag = 1, shift = "time")
pdata$emp_logdiff = diff(pdata$emp_log, lag = 1, shift = "time")
pdata$emp_changes <- pdata$emp_diff/lag(pdata$EMP, k = 1, shift = "time")*100
pdata$emp_logchanges = diff(pdata$emp_log, lag = 1, shift = "time")*100
pdata$prod <- pdata$GO/pdata$EMP
pdata$prod_diff <- diff(pdata$prod, lag = 1, shift = "time")
pdata$prod_changes <- pdata$prod_diff/lag(pdata$prod, k = 1, shift = "time")*100
pdata$prod_changes2 <- pdata$prod_diff/lag(pdata$prod, k = 1, shift = "time")
pdata$prod_log <- log(pdata$prod)
pdata$prod_logchanges<- diff(pdata$prod_log, lag = 1, shift = "time")*100
pdata$prod_logdiff<- diff(pdata$prod_log, lag = 1, shift = "time")
if (Emma==F) {
pdata = pdata %>% group_by(code) %>% mutate(prod_CGR_logchanges = order_by(year,cumprod(1+prod_logdiff[-1])*100)) #metode 1
pdata = pdata %>% group_by(code) %>% mutate(prod_logCGR = order_by(year, CGR(prod_logdiff[-1])*100)) #metode 2
pdata = pdata %>% group_by(code) %>% mutate(prod_CGR= order_by(year, CGR(prod_changes2[-1])*100))
#df = pdata %>% group_by(code) %>% mutate(cumsum = cumsum())
pdata <- pdata %>% select(year, country, code, desc, sel_industries, branche, branche_desc, EMP, emp_logchanges, GO, prod, prod_logchanges,prod_changes, prod_CGR, prod_logCGR, prod_CGR_logchanges) %>%
filter(code!="b0",code!="s0")
pdata = pdata.frame(pdata, index = c("code", "year"))
pdata$prod_logCGR <- lag(pdata$prod_logCGR, k=1, shift="time")
pdata$prod_CGR <- lag(pdata$prod_CGR, k=1, shift="time")
}
if (Emma==T) {
pdata <- pdata %>% select(year, country, code, desc, sel_industries, branche, branche_desc, EMP, emp_logchanges, GO, prod_logchanges) %>%
filter(code!="b0")
#pdata = pdata.frame(pdata, index = c("code", "year"))
}
pdata
}
func_regpanel <- function(dataset_1, type) {
if (type==1) {
tot = dataset_1 %>% filter(branche=="TOT")
tot$EMP_tot = tot$EMP
tot$GO_tot = tot$GO
tot <- tot %>% select(year, EMP_tot, GO_tot)
ind = dataset_1 %>% filter(sel_industries==1)
b <- dataset_1 %>% filter(branche=="b-tot")
b$branche = b$code
b$prod_logchanges_b = b$prod_logchanges
b$EMP_b = b$EMP
b$GO_b = b$GO
b = b %>% select(year, branche, EMP_b, GO_b)
ind = merge(ind, b, by=c("year", "branche"), all.x = TRUE)
#----------- nedenstående skal bruges hvis vi siger sektor minus industri i vores beta2 variable--------------
b1 = b %>% filter(branche=="b1") %>% mutate(EMP_b1=EMP_b) %>% mutate(GO_b1=GO_b) %>% select(year, EMP_b1,GO_b1)
b2 = b %>% filter(branche=="b2") %>% mutate(EMP_b2=EMP_b) %>% mutate(GO_b2=GO_b) %>% select(EMP_b2, GO_b2)
b3 = b %>% filter(branche=="b3") %>% mutate(EMP_b3=EMP_b) %>% mutate(GO_b3=GO_b) %>% select(EMP_b3, GO_b3)
b4 = b %>% filter(branche=="b4") %>% mutate(EMP_b4=EMP_b) %>% mutate(GO_b4=GO_b) %>% select(EMP_b4, GO_b4)
b5 = b %>% filter(branche=="b5") %>% mutate(EMP_b5=EMP_b) %>% mutate(GO_b5=GO_b) %>% select(EMP_b5, GO_b5)
#b1 = b %>% filter(branche=="b1") %>% mutate(prod_logchanges_b1=prod_logchanges_b) %>% mutate(EMP_b1=EMP_b) %>% mutate(GO_b1=GO_b) %>% select(year, prod_logchanges_b1, EMP_b1,GO_b1)
#b2 = b %>% filter(branche=="b2") %>% mutate(prod_logchanges_b2=prod_logchanges_b) %>% mutate(EMP_b2=EMP_b) %>% mutate(GO_b2=GO_b) %>% select(prod_logchanges_b2, EMP_b2, GO_b2)
#b3 = b %>% filter(branche=="b3") %>% mutate(prod_logchanges_b3=prod_logchanges_b) %>% mutate(EMP_b3=EMP_b) %>% mutate(GO_b3=GO_b) %>% select(prod_logchanges_b3, EMP_b3, GO_b3)
#b4 = b %>% filter(branche=="b4") %>% mutate(prod_logchanges_b4=prod_logchanges_b) %>% mutate(EMP_b4=EMP_b) %>% mutate(GO_b4=GO_b) %>% select(prod_logchanges_b4, EMP_b4, GO_b4)
#b5 = b %>% filter(branche=="b5") %>% mutate(prod_logchanges_b5=prod_logchanges_b) %>% mutate(EMP_b5=EMP_b) %>% mutate(GO_b5=GO_b) %>% select(prod_logchanges_b5, EMP_b5, GO_b5)
#----------nedenstående skal bruges hvis vi bruger sektor produktiviteter som vores beta1 variable----
# b1 = b %>% filter(branche=="b1") %>% mutate(prod_logchanges_b1=prod_logchanges_b) %>% select(year, prod_logchanges_b1)
#b2 = b %>% filter(branche=="b2") %>% mutate(prod_logchanges_b2=prod_logchanges_b) %>% select(prod_logchanges_b2)
#b3 = b %>% filter(branche=="b3") %>% mutate(prod_logchanges_b3=prod_logchanges_b) %>% select(prod_logchanges_b3)
#b4 = b %>% filter(branche=="b4") %>% mutate(prod_logchanges_b4=prod_logchanges_b) %>% select(prod_logchanges_b4)
#b5 = b %>% filter(branche=="b5") %>% mutate(prod_logchanges_b5=prod_logchanges_b) %>% select(prod_logchanges_b5)
#-------------------------------------------------------------------------------------------------------
test = b %>% count(branche) %>% nrow
if (test==8) {
b6 = b %>% filter(branche=="b6") %>% mutate(prod_logchanges_b6=prod_logchanges_b) %>% select(prod_logchanges_b6)
b7 = b %>% filter(branche=="b7") %>% mutate(prod_logchanges_b7=prod_logchanges_b) %>% select(prod_logchanges_b7)
b8 = b %>% filter(branche=="b8") %>% mutate(prod_logchanges_b8=prod_logchanges_b) %>% select(prod_logchanges_b8)
b = cbind(b1,b2,b3,b4,b5,b6,b7,b8)
} else {
b = cbind(b1,b2,b3,b4,b5)
}
ind = merge(ind, b, by=c("year"), all.x = TRUE)
ind = merge(ind, tot, by=c("year"), all.x = TRUE)
ind$wgt_i = ind$EMP/ind$EMP_tot
ind$wgt_b = ind$EMP_b/ind$EMP_tot
ind = pdata.frame(ind, index = c("code", "year"))
#Beta2 variable og lags, mikro + makro
ind$dLP_CwoI = diff(log((ind$GO_tot-ind$GO)/(ind$EMP_tot-ind$EMP)), lag = 1, shift = "time")*100
ind$dLP_CwoI_lag1 = lag(ind$dLP_CwoI, k = 1, shift = "time")
ind$dLP_CwoI_lag2 = lag(ind$dLP_CwoI, k = 2, shift = "time")
ind$dLP_CwoI_lag3 = lag(ind$dLP_CwoI, k = 3, shift = "time")
#Beta2 variable og lags, sektor spillover
ind$dLP_BwoI_b1 = ifelse(ind$branche=="b1", diff(log((ind$GO_b1-ind$GO)/(ind$EMP_b1-ind$EMP)), lag = 1, shift = "time")*100, diff(log(ind$GO_b1/ind$EMP_b1), lag = 1, shift = "time")*100)
ind$dLP_BwoI_b2 = ifelse(ind$branche=="b2", diff(log((ind$GO_b2-ind$GO)/(ind$EMP_b2-ind$EMP)), lag = 1, shift = "time")*100, diff(log(ind$GO_b2/ind$EMP_b2), lag = 1, shift = "time")*100)
ind$dLP_BwoI_b3 = ifelse(ind$branche=="b3", diff(log((ind$GO_b3-ind$GO)/(ind$EMP_b3-ind$EMP)), lag = 1, shift = "time")*100, diff(log(ind$GO_b3/ind$EMP_b3), lag = 1, shift = "time")*100)
ind$dLP_BwoI_b4 = ifelse(ind$branche=="b4", diff(log((ind$GO_b4-ind$GO)/(ind$EMP_b4-ind$EMP)), lag = 1, shift = "time")*100, diff(log(ind$GO_b4/ind$EMP_b4), lag = 1, shift = "time")*100)
ind$dLP_BwoI_b5 = ifelse(ind$branche=="b5", diff(log((ind$GO_b5-ind$GO)/(ind$EMP_b5-ind$EMP)), lag = 1, shift = "time")*100, diff(log(ind$GO_b5/ind$EMP_b5), lag = 1, shift = "time")*100)
ind$dLP_BwoI_b1_lag1 = lag(ind$dLP_BwoI_b1, k = 1, shift = "time")
ind$dLP_BwoI_b1_lag2 = lag(ind$dLP_BwoI_b1, k = 2, shift = "time")
ind$dLP_BwoI_b1_lag3 = lag(ind$dLP_BwoI_b1, k = 3, shift = "time")
ind$dLP_BwoI_b2_lag1 = lag(ind$dLP_BwoI_b2, k = 1, shift = "time")
ind$dLP_BwoI_b2_lag2 = lag(ind$dLP_BwoI_b2, k = 2, shift = "time")
ind$dLP_BwoI_b2_lag3 = lag(ind$dLP_BwoI_b2, k = 3, shift = "time")
ind$dLP_BwoI_b3_lag1 = lag(ind$dLP_BwoI_b3, k = 1, shift = "time")
ind$dLP_BwoI_b3_lag2 = lag(ind$dLP_BwoI_b3, k = 2, shift = "time")
ind$dLP_BwoI_b3_lag3 = lag(ind$dLP_BwoI_b3, k = 3, shift = "time")
ind$dLP_BwoI_b4_lag1 = lag(ind$dLP_BwoI_b4, k = 1, shift = "time")
ind$dLP_BwoI_b4_lag2 = lag(ind$dLP_BwoI_b4, k = 2, shift = "time")
ind$dLP_BwoI_b4_lag3 = lag(ind$dLP_BwoI_b4, k = 3, shift = "time")
ind$dLP_BwoI_b5_lag1 = lag(ind$dLP_BwoI_b5, k = 1, shift = "time")
ind$dLP_BwoI_b5_lag2 = lag(ind$dLP_BwoI_b5, k = 2, shift = "time")
ind$dLP_BwoI_b5_lag3 = lag(ind$dLP_BwoI_b5, k = 3, shift = "time")
#beta1 variable, sectoral spillover:
ind = na.omit(ind)
ind$dLP_I_b1 = ifelse(ind$branche=="b1", ind$prod_logchanges, 0)
ind$dLP_I_b1_dum = ifelse(ind$dLP_I_b1==0, 0, 1)
ind$dLP_I_b2 = ifelse(ind$branche=="b2", ind$prod_logchanges, 0)
ind$dLP_I_b2_dum = ifelse(ind$dLP_I_b2==0, 0, 1)
ind$dLP_I_b3 = ifelse(ind$branche=="b3", ind$prod_logchanges, 0)
ind$dLP_I_b3_dum = ifelse(ind$dLP_I_b3==0, 0, 1)
ind$dLP_I_b4 = ifelse(ind$branche=="b4", ind$prod_logchanges, 0)
ind$dLP_I_b4_dum = ifelse(ind$dLP_I_b4==0, 0, 1)
ind$dLP_I_b5 = ifelse(ind$branche=="b5", ind$prod_logchanges, 0)
ind$dLP_I_b5_dum = ifelse(ind$dLP_I_b5==0, 0, 1)
#what to do with zeros ?
#In most applications, removing the records with zeros would be wrong for two reasons:
#(1) it reduces the amount of data, thereby increasing the uncertainties and (2) it could bias the results.
#One practical method to cope with such data is described in my answer at stats.stackexchange.com/a/1795: create "dummy" variables to indicate the zeros
#How should I transform non-negative data including zeros? https://stats.stackexchange.com/questions/1444/how-should-i-transform-non-negative-data-including-zeros
# nedenstående skal bruges hvis vi siger total minus sektor i vores beta2 variable
#ind$prod_logchanges_c1 = ifelse(ind$branche=="b1", diff(log((ind$GO_tot-ind$GO_b)/(ind$EMP_tot-ind$EMP_b)), lag = 1, shift = "time")*100, diff(log(ind$GO_tot/ind$EMP_tot), lag = 1, shift = "time")*100)
#ind$prod_logchanges_c2 = ifelse(ind$branche=="b2", diff(log((ind$GO_tot-ind$GO_b)/(ind$EMP_tot-ind$EMP_b)), lag = 1, shift = "time")*100, diff(log(ind$GO_tot/ind$EMP_tot), lag = 1, shift = "time")*100)
#ind$prod_logchanges_c3 = ifelse(ind$branche=="b3", diff(log((ind$GO_tot-ind$GO_b)/(ind$EMP_tot-ind$EMP_b)), lag = 1, shift = "time")*100, diff(log(ind$GO_tot/ind$EMP_tot), lag = 1, shift = "time")*100)
#ind$prod_logchanges_c4 = ifelse(ind$branche=="b4", diff(log((ind$GO_tot-ind$GO_b)/(ind$EMP_tot-ind$EMP_b)), lag = 1, shift = "time")*100, diff(log(ind$GO_tot/ind$EMP_tot), lag = 1, shift = "time")*100)
#ind$prod_logchanges_c5 = ifelse(ind$branche=="b5", diff(log((ind$GO_tot-ind$GO_b)/(ind$EMP_tot-ind$EMP_b)), lag = 1, shift = "time")*100, diff(log(ind$GO_tot/ind$EMP_tot), lag = 1, shift = "time")*100)
ind
} else if (type==2) {
b = dataset_1 %>% filter(branche=="b-tot")
sumEMP <- b %>% group_by(year) %>% summarize(sum_EMP=sum(EMP))
b = merge(b, sumEMP, by=c("year"), all.x = TRUE)
b$share_EMP = (b$EMP/b$sum_EMP)*100
b = pdata.frame(b, index = c("code", "year"))
b$share_EMP_ppchange = diff(b$share_EMP, lag = 1, shift = "time")
b$share_EMP_ppchange = ifelse(is.na(b$share_EMP_ppchange)==T,0,b$share_EMP_ppchange)
b = b %>% group_by(code) %>% mutate(cumsum_EMP = cumsum(share_EMP_ppchange))
b$year = lubridate::ymd(b$year, truncated = 2L)
b
} else if (type==3) {
tot = dataset_1 %>% filter(code=="TOT")
tot1 = dataset_1 %>% filter(branche=="TOT")
#tot$year = lubridate::ymd(tot$year, truncated = 2L)
tot
} else {
NA
}
}
#key_table <- read_csv("EUklems-data-master/key_table.csv")
# Country data -----------------------------------------------------
# nogle af industrierne (eller subkategorierne) findes ikke i alle lande, fx findes 45,46,47 ikke i Frankrig før 1995, selvom overkategorien G findes
# Danmark
DK_emp <- read_excel("Data/DK_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#DK_go <- read_excel("Data/DK_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
DK_gop <- read_excel("Data/DK_output_17ii.xlsx", sheet = "GO_QI") #Gross output, volume (2010 prices)
DK_comp <- read_excel("Data/DK_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
DK_lab <- read_excel("Data/DK_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
DK_va <- read_excel("Data/DK_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Employment and productivty
DK_ep = func_empprod(DK_emp, DK_gop,"DK", "EMP", "GO_QI", F)
DK_ep = func_empprod(DK_emp, DK_gop,"DK", "EMP", "GO_QI", T, "AS")
#PLM analyse
DK_ind = func_regpanel(DK_ep, 1)
DK_tot = func_regpanel(DK_ep, 3)
#deskriptiv
DK_b = func_regpanel(DK_ep, 2)
#Labour share
DK_ls = func_labshare(DK_comp, DK_va, DK_lab, "DK", "COMP", "VA", "LAB")
DK_ls$year = lubridate::ymd(DK_ls$year, truncated = 2L)
# USA
US_emp <- read_excel("Data/US_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#US_go <- read_excel("Data/US_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
US_gop <- read_excel("Data/US_output_17ii.xlsx", sheet = "GO_QI") #Gross Output at current basic prices (in millions of national currency)
US_comp <- read_excel("Data/US_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
US_lab <- read_excel("Data/US_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
US_va <- read_excel("Data/US_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
US_ls = func_labshare(US_comp, US_va, US_lab, "US", "COMP", "VA", "LAB")
US_ls$year = lubridate::ymd(US_ls$year, truncated = 2L)
#Employment and productivty
US_ep = func_empprod(US_emp, US_gop,"US", "EMP", "GO_QI", F)
US_ep = func_empprod(US_emp, US_gop,"US", "EMP", "GO_QI", T,"AS")
#PLM analyse
US_ind = func_regpanel(US_ep, 1)
US_tot = func_regpanel(US_ep, 3)
#deskriptiv
US_b = func_regpanel(US_ep, 2)
# UK . faste priser findes ikke
UK_emp <- read_excel("Data/UK_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#UK_go <- read_excel("Data/UK_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
UK_gop <- read_excel("Data/UK_output_17ii.xlsx", sheet = "GO_QI") #Gross Output at current basic prices (in millions of national currency)
UK_comp <- read_excel("Data/UK_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
UK_lab <- read_excel("Data/UK_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
UK_va <- read_excel("Data/UK_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
UK_ls = func_labshare(UK_comp, UK_va, UK_lab, "UK", "COMP", "VA", "LAB")
UK_ls$year = lubridate::ymd(UK_ls$year, truncated = 2L)
#Employment and productivty
#Employment and productivty
UK_ep = func_empprod(UK_emp, UK_gop,"UK", "EMP", "GO_QI", F)
UK_ep = func_empprod(UK_emp, UK_gop,"UK", "EMP", "GO_QI", T, "AS")
#PLM analyse
UK_ind = func_regpanel(UK_ep, 1)
UK_tot = func_regpanel(UK_ep, 3)
#deskriptiv
UK_b = func_regpanel(UK_ep, 2)
# Tyskland
DE_emp <- read_excel("Data/DE_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#DE_go <- read_excel("Data/DE_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
DE_gop <- read_excel("Data/DE_output_17ii.xlsx", sheet = "GO_QI") #Gross Output at current basic prices (in millions of national currency)
DE_comp <- read_excel("Data/DE_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
DE_lab <- read_excel("Data/DE_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
DE_va <- read_excel("Data/DE_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
DE_ls = func_labshare(DE_comp, DE_va, DE_lab, "DE", "COMP", "VA", "LAB")
DE_ls$year = lubridate::ymd(DE_ls$year, truncated = 2L)
#Employment and productivty
DE_ep = func_empprod(DE_emp, DE_gop,"DE", "EMP", "GO_QI", F)
DE_ep = func_empprod(DE_emp, DE_gop,"DE", "EMP", "GO_QI", T, "AS")
#PLM analyse
DE_ind = func_regpanel(DE_ep, 1)
DE_tot = func_regpanel(DE_ep, 3)
#deskriptiv
DE_b = func_regpanel(DE_ep, 2)
# Holland
NL_emp <- read_excel("Data/NL_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thoNLands)
#NL_go <- read_excel("Data/NL_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
NL_gop <- read_excel("Data/NL_output_17ii.xlsx", sheet = "GO_QI") #Gross Output at current basic prices (in millions of national currency)
NL_comp <- read_excel("Data/NL_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
NL_lab <- read_excel("Data/NL_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
NL_va <- read_excel("Data/NL_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
NL_ls = func_labshare(NL_comp, NL_va, NL_lab, "NL", "COMP", "VA", "LAB")
NL_ls$year = lubridate::ymd(NL_ls$year, truncated = 2L)
#Employment and productivty
NL_ep = func_empprod(NL_emp, NL_gop,"NL", "EMP", "GO_QI", F)
NL_ep = func_empprod(NL_emp, NL_gop,"NL", "EMP", "GO_QI", T, "AS")
#PLM analyse
NL_ind = func_regpanel(NL_ep, 1)
NL_tot = func_regpanel(NL_ep, 3)
#deskriptiv
NL_b = func_regpanel(NL_ep, 2)
# Sverige
SE_emp <- read_excel("Data/SE_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#SE_go <- read_excel("Data/SE_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
SE_gop <- read_excel("Data/SE_output_17ii.xlsx", sheet = "GO_QI") #Gross Output at current basic prices (in millions of national currency)
SE_comp <- read_excel("Data/SE_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
SE_lab <- read_excel("Data/SE_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
SE_va <- read_excel("Data/SE_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
SE_ls = func_labshare(SE_comp, SE_va, SE_lab, "SE", "COMP", "VA", "LAB")
SE_ls$year = lubridate::ymd(SE_ls$year, truncated = 2L)
#Employment and productivty
SE_ep = func_empprod(SE_emp, SE_gop,"SE", "EMP", "GO_QI", F)
SE_ep = func_empprod(SE_emp, SE_gop,"SE", "EMP", "GO_QI", T, "AS")
#PLM analyse
SE_ind = func_regpanel(SE_ep, 1)
SE_tot = func_regpanel(SE_ep, 3)
#deskriptiv
SE_b = func_regpanel(SE_ep, 2)
# Østrig
AT_emp = read_excel("Data/AT_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#AT_go = read_excel("Data/AT_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
AT_gop = read_excel("Data/AT_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
AT_comp = read_excel("Data/AT_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
AT_lab = read_excel("Data/AT_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
AT_va = read_excel("Data/AT_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
AT_ls = func_labshare(AT_comp, AT_va, AT_lab, "AT", "COMP", "VA", "LAB")
AT_ls$year = lubridate::ymd(AT_ls$year, truncated = 2L)
#Employment and productivty
AT_ep = func_empprod(AT_emp, AT_gop,"AT", "EMP", "GO_QI", F)
AT_ep = func_empprod(AT_emp, AT_gop,"AT", "EMP", "GO_QI", T, "AS")
#PLM analyse
AT_ind = func_regpanel(AT_ep, 1)
AT_tot = func_regpanel(AT_ep, 3)
#deskriptiv
AT_b = func_regpanel(AT_ep, 2)
# Belgium
BE_emp = read_excel("Data/BE_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#BE_go = read_excel("Data/BE_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
BE_gop = read_excel("Data/BE_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
BE_comp = read_excel("Data/BE_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
BE_lab = read_excel("Data/BE_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
BE_va = read_excel("Data/BE_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
BE_ls = func_labshare(BE_comp, BE_va, BE_lab, "BE", "COMP", "VA", "LAB")
BE_ls$year = lubridate::ymd(BE_ls$year, truncated = 2L)
#Employment and productivty
BE_ep = func_empprod(BE_emp, BE_gop,"BE", "EMP", "GO_QI", F)
BE_ep = func_empprod(BE_emp, BE_gop,"BE", "EMP", "GO_QI", T, "AS")
#PLM analyse
BE_ind = func_regpanel(BE_ep, 1)
BE_tot = func_regpanel(BE_ep, 3)
#deskriptiv
BE_b = func_regpanel(BE_ep, 2)
# Tjekkiet
CZ_emp = read_excel("Data/CZ_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#CZ_go = read_excel("Data/CZ_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
CZ_gop = read_excel("Data/CZ_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
CZ_comp = read_excel("Data/CZ_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
CZ_lab = read_excel("Data/CZ_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
CZ_va = read_excel("Data/CZ_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
CZ_ls = func_labshare(CZ_comp, CZ_va, CZ_lab, "CZ", "COMP", "VA", "LAB")
CZ_ls$year = lubridate::ymd(CZ_ls$year, truncated = 2L)
#Employment and productivty
CZ_ep = func_empprod(CZ_emp, CZ_gop,"CZ", "EMP", "GO_QI", F)
CZ_ep = func_empprod(CZ_emp, CZ_gop,"CZ", "EMP", "GO_QI", T, "AS")
#PLM analyse
CZ_ind = func_regpanel(CZ_ep, 1)
CZ_tot = func_regpanel(CZ_ep, 3)
#deskriptiv
CZ_b = func_regpanel(CZ_ep, 2)
# Finland
FI_emp = read_excel("Data/FI_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#FI_go = read_excel("Data/FI_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
FI_gop = read_excel("Data/FI_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
FI_comp = read_excel("Data/FI_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
FI_lab = read_excel("Data/FI_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
FI_va = read_excel("Data/FI_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
FI_ls = func_labshare(FI_comp, FI_va, FI_lab, "FI", "COMP", "VA", "LAB")
FI_ls$year = lubridate::ymd(FI_ls$year, truncated = 2L)
#Employment and productivty
FI_ep = func_empprod(FI_emp, FI_gop,"FI", "EMP", "GO_QI", F)
FI_ep = func_empprod(FI_emp, FI_gop,"FI", "EMP", "GO_QI", T, "AS")
#PLM analyse
FI_ind = func_regpanel(FI_ep, 1)
FI_tot = func_regpanel(FI_ep, 3)
#deskriptiv
FI_b = func_regpanel(FI_ep, 2)
# Frankrig
FR_emp = read_excel("Data/FR_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#FR_go = read_excel("Data/FR_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
FR_gop = read_excel("Data/FR_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
FR_comp = read_excel("Data/FR_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
FR_lab = read_excel("Data/FR_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
FR_va = read_excel("Data/FR_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
FR_ls = func_labshare(FR_comp, FR_va, FR_lab, "FR", "COMP", "VA", "LAB")
FR_ls$year = lubridate::ymd(FR_ls$year, truncated = 2L)
#Employment and productivty
FR_ep = func_empprod(FR_emp, FR_gop,"FR", "EMP", "GO_QI", F)
FR_ep = func_empprod(FR_emp, FR_gop,"FR", "EMP", "GO_QI", T, "AS")
#PLM analyse
FR_ind = func_regpanel(FR_ep, 1)
FR_tot = func_regpanel(FR_ep, 3)
#deskriptiv
FR_b = func_regpanel(FR_ep, 2)
# Grækenland
EL_emp = read_excel("Data/EL_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#EL_go = read_excel("Data/EL_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
EL_gop = read_excel("Data/EL_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
EL_comp = read_excel("Data/EL_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
EL_lab = read_excel("Data/EL_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
EL_va = read_excel("Data/EL_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
EL_ls = func_labshare(EL_comp, EL_va, EL_lab, "EL", "COMP", "VA", "LAB")
EL_ls$year = lubridate::ymd(EL_ls$year, truncated = 2L)
#Employment and productivty
EL_ep = func_empprod(EL_emp, EL_gop,"EL", "EMP", "GO_QI", F)
EL_ep = func_empprod(EL_emp, EL_gop,"EL", "EMP", "GO_QI", T, "AS")
#PLM analyse
EL_ind = func_regpanel(EL_ep, 1)
EL_tot = func_regpanel(EL_ep, 3)
#deskriptiv
EL_b = func_regpanel(EL_ep, 2)
# Italien
IT_emp = read_excel("Data/IT_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#IT_go = read_excel("Data/IT_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
IT_gop = read_excel("Data/IT_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
IT_comp = read_excel("Data/IT_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
IT_lab = read_excel("Data/IT_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
IT_va = read_excel("Data/IT_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
IT_ls = func_labshare(IT_comp, IT_va, IT_lab, "IT", "COMP", "VA", "LAB")
IT_ls$year = lubridate::ymd(IT_ls$year, truncated = 2L)
#Employment and productivty
IT_ep = func_empprod(IT_emp, IT_gop,"IT", "EMP", "GO_QI", F)
IT_ep = func_empprod(IT_emp, IT_gop,"IT", "EMP", "GO_QI", T, "AS")
#PLM analyse
IT_ind = func_regpanel(IT_ep, 1)
IT_tot = func_regpanel(IT_ep, 3)
#deskriptiv
IT_b = func_regpanel(IT_ep, 2)
# Letland
LV_emp = read_excel("Data/LV_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#LV_go = read_excel("Data/LV_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
LV_gop = read_excel("Data/LV_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
LV_comp = read_excel("Data/LV_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
LV_lab = read_excel("Data/LV_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
LV_va = read_excel("Data/LV_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
LV_ls = func_labshare(LV_comp, LV_va, LV_lab, "LV", "COMP", "VA", "LAB")
LV_ls$year = lubridate::ymd(LV_ls$year, truncated = 2L)
#Employment and productivty
LV_ep = func_empprod(LV_emp, LV_gop,"LV", "EMP", "GO_QI", F)
LV_ep = func_empprod(LV_emp, LV_gop,"LV", "EMP", "GO_QI", T, "AS")
#PLM analyse
LV_ind = func_regpanel(LV_ep, 1)
LV_tot = func_regpanel(LV_ep, 3)
#deskriptiv
LV_b = func_regpanel(LV_ep, 2)
# Luxenborg
LU_emp = read_excel("Data/LU_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#LU_go = read_excel("Data/LU_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
LU_gop = read_excel("Data/LU_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
LU_comp = read_excel("Data/LU_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
LU_lab = read_excel("Data/LU_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
LU_va = read_excel("Data/LU_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
LU_ls = func_labshare(LU_comp, LU_va, LU_lab, "LU", "COMP", "VA", "LAB")
LU_ls$year = lubridate::ymd(LU_ls$year, truncated = 2L)
#Employment and productivty
LU_ep = func_empprod(LU_emp, LU_gop,"LU", "EMP", "GO_QI", F)
LU_ep = func_empprod(LU_emp, LU_gop,"LU", "EMP", "GO_QI", T, "AS")
#PLM analyse
LU_ind = func_regpanel(LU_ep, 1)
LU_tot = func_regpanel(LU_ep, 3)
#deskriptiv
LU_b = func_regpanel(LU_ep, 2)
# Slovakiet
SK_emp = read_excel("Data/SK_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#SK_go = read_excel("Data/SK_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
SK_gop = read_excel("Data/SK_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
SK_comp = read_excel("Data/SK_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
SK_lab = read_excel("Data/SK_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
SK_va = read_excel("Data/SK_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
SK_ls = func_labshare(SK_comp, SK_va, SK_lab, "SK", "COMP", "VA", "LAB")
SK_ls$year = lubridate::ymd(SK_ls$year, truncated = 2L)
#Employment and productivty
SK_ep = func_empprod(SK_emp, SK_gop,"SK", "EMP", "GO_QI", F)
SK_ep = func_empprod(SK_emp, SK_gop,"SK", "EMP", "GO_QI", T, "AS")
#PLM analyse
SK_ind = func_regpanel(SK_ep, 1)
SK_tot = func_regpanel(SK_ep, 3)
#deskriptiv
SK_b = func_regpanel(SK_ep, 2)
# Slovenien
SI_emp = read_excel("Data/SI_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#SI_go = read_excel("Data/SI_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
SI_gop = read_excel("Data/SI_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
SI_comp = read_excel("Data/SI_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
SI_lab = read_excel("Data/SI_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
SI_va = read_excel("Data/SI_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
SI_ls = func_labshare(SI_comp, SI_va, SI_lab, "SI", "COMP", "VA", "LAB")
SI_ls$year = lubridate::ymd(SI_ls$year, truncated = 2L)
#Employment and productivty
SI_ep = func_empprod(SI_emp, SI_gop,"SI", "EMP", "GO_QI", F)
SI_ep = func_empprod(SI_emp, SI_gop,"SI", "EMP", "GO_QI", T, "AS")
#PLM analyse
SI_ind = func_regpanel(SI_ep, 1)
SI_tot = func_regpanel(SI_ep, 3)
#deskriptiv
SI_b = func_regpanel(SI_ep, 2)
# Descriptive -------------------------------------------------------------
min <- as.Date("1995-1-1")
max <- NA
# DESCRIPTIVE - Labour share of national income
{
#Aggregate Labour Share of National Invome, Total Labour Force
{ggplot(DK_ls, aes(year, LS)) +
geom_point() +
geom_line() +
geom_smooth(method = "lm") +
xlab("Time") + ylab("") +
ggtitle("Aggregate Labour Share of National Income, Total Labour Force") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
#Aggregate Labour Share of National Income, Employees
{ggplot(DK_ls, aes(year, LSe)) +
geom_point() +
geom_line() +
geom_smooth(method = "lm") +
xlab("Time") + ylab("") +
ggtitle("Aggregate Labour Share of National Income, Employees") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
{ggplot(US_ls, aes(year, LSe)) +
geom_point() +
geom_line() +
geom_smooth(method = "lm") +
xlab("Time") + ylab("") +
ggtitle("USA - Aggregate Labour Share of National Income, Employees") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #US
{ggplot(UK_ls, aes(year, LSe)) +
geom_point() +
geom_line() +
geom_smooth(method = "lm") +
xlab("Time") + ylab("") +
ggtitle("Storbritannien - Aggregate Labour Share of National Income, Employees") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #UK
{ggplot(GE_ls, aes(year, LSe)) +
geom_point() +
geom_line() +
geom_smooth(method = "lm") +
xlab("Time") + ylab("") +
ggtitle("Tyskland - Aggregate Labour Share of National Income, Employees") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #GE
{ggplot(NL_ls, aes(year, LSe)) +
geom_point() +
geom_line() +
geom_smooth(method = "lm") +
xlab("Time") + ylab("") +
ggtitle("Holland - Aggregate Labour Share of National Income, Employees") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #NL
{ggplot(SE_ls, aes(year, LSe)) +
geom_point() +
geom_line() +
geom_smooth(method = "lm") +
xlab("Time") + ylab("") +
ggtitle("Sverige - Aggregate Labour Share of National Income, Employees") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #SE
#Aggregate Labour Share of National Income, comparison
{ggplot(data = DK_ls) +
geom_line(aes(x = year, y = LS, color = "LS"),) +
geom_line(aes(x = year, y = LSe, color = "LSe"),) +
scale_color_manual(name = "Colors", values = c("LS" = "blue", "LSe" = "red")) +
xlab("Time") + ylab("") +
ggtitle("Aggregate Labour Share of National Income, comparison") +
guides(colour=guide_legend(title="")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
#Indekstal
{ggplot(data = DK_ls) +
geom_line(aes(x = year, y = indeksLS, color = "LS"),) +
geom_line(aes(x = year, y = indeksLSe, color = "LSe"),) +
scale_color_manual(name = "Colors", values = c("LS" = "blue", "LSe" = "red")) +
xlab("Time") + ylab("") +
ggtitle("Indekstal, 1975=100") +
guides(colour=guide_legend(title="")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
#Cumulative growth of Labour Share of National Income, comparison
{ggplot(data = DK_ls) +
geom_line(aes(x = year, y = LS_CGR, color = "LS"),) +
geom_line(aes(x = year, y = LSe_CGR, color = "LSe"),) +
scale_color_manual(name = "Colors", values = c("LS" = "blue", "LSe" = "red")) +
xlab("Time") + ylab("") +
ggtitle("") +
guides(colour=guide_legend(title="")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
}
# DESCRIPTIVE - Sectoral employment and productivty
{
#Branchebeskæftigelse
{ggplot(data=DK_b, aes(x=year, y=EMP, group=desc, colour=desc)) +
geom_point() +
geom_line() +
xlab("Time") + ylab("Number of persons engaged in work (thousands)") +
ggtitle("Employment by Sector") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
#scale_x_date(date_labels = "%Y") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
#theme(axis.text.x = element_text(angle = 90, hjust = 1))
#scale_color_economist()
} #DK
#Produktivitet- og beskæftigelsesvækst
{ggplot(data = DK_tot) +
geom_line(aes(x = year, y = emp_logchanges, color = "emp_logchanges"),) +
geom_line(aes(x = year, y = prod_logchanges, color = "prod_logchanges"),) +
scale_color_manual(name = "Colors", values = c("emp_logchanges" = "blue", "prod_logchanges" = "red")) +
xlab("Time") + ylab("") +
ggtitle("Produktivitet- og beskæftigelsesvækst i DK") +
guides(colour=guide_legend(title="")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
#In order to get a legend, you have to map something to color within aes.
#You can then use scale_color_manual to define the colors for the mapped character values.
} #DK
{ggplot(data = UK_tot) +
geom_line(aes(x = year, y = emp_logchanges, color = "emp_logchanges"),) +
geom_line(aes(x = year, y = prod_logchanges, color = "prod_logchanges"),) +
scale_color_manual(name = "Colors", values = c("emp_logchanges" = "blue", "prod_logchanges" = "red")) +
xlab("Time") + ylab("") +
ggtitle("Produktivitet- og beskæftigelsesvækst i UK") +
guides(colour=guide_legend(title="")) +
theme_economist() +
theme(legend.position="right")
#scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
#scale_x_date(limits = c(min, max))
} #UK
#Beskæftigelsesvækst fordelt på brancher
{ggplot(data=DK_b, aes(x=year, y=emp_logchanges, group=desc, colour=desc)) +
geom_point() +
geom_line() +
xlab("Time") + ylab("") +
ggtitle("Beskæftigelsesvækst fordelt på brancher") +
guides(colour=guide_legend(title="Sector")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
#Kumulativ produktivitetsvækst fordelt på brancher
{ggplot(data=DK_b, aes(x=year, y=prod_logCGR, group=desc, colour=desc)) +
geom_point() +
geom_line() +
xlab("Time") + ylab("100 * kumulativ log ændring") +
ggtitle("Kumulativ produktivitetsvækst") +
guides(colour=guide_legend(title="Sector")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
{ggplot(data=UK_b, aes(x=year, y=prod_CGR, group=desc, colour=desc)) +
geom_point() +
geom_line() +
xlab("Time") + ylab(")") +
ggtitle("Kumulativ produktivitetsvækst UK") +
guides(colour=guide_legend(title="Sector")) +
theme_economist() +
theme(legend.position="right")
#scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
#scale_x_date(limits = c(min, max))
} #UK
#Kumulativ ændring i beskæftigelse fordelt på brancher
{ggplot(data=DK_b, aes(x=year, y=cumsum_EMP, group=desc, colour=desc)) +
geom_point() +
geom_line() +
xlab("Time") + ylab("") +
ggtitle("Kumulativ ændring i beskæftigelse") +
guides(colour=guide_legend(title="Sector")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
}
# Country panel -----------------------------------------------------
c_panel = rbind(DK_tot, SE_tot, US_tot, NL_tot, DE_tot, AT_tot, BE_tot, CZ_tot, EL_tot, FI_tot, FR_tot, IT_tot, LU_tot, SI_tot, SK_tot) # LV_tot
c_panel = pdata.frame(c_panel, index = c("country", "year"))
c_panel$prod_logchanges_lag1 = lag(c_panel$prod_logchanges, k = 1, shift = "time")
c_panel$prod_logchanges_lag2 = lag(c_panel$prod_logchanges_lag1, k = 1, shift = "time")
c_panel$prod_logchanges_lag3 = lag(c_panel$prod_logchanges_lag2, k = 1, shift = "time")
c_panel = na.omit(c_panel)
lsdv.c_pool1 = lm(emp_logchanges ~ prod_logchanges, data=c_panel)
lsdv.c_fec1 = lm(emp_logchanges ~ prod_logchanges + factor(country) -1, data=c_panel)
lsdv.c_feci1 = lm(emp_logchanges ~ prod_logchanges + factor(country) + factor(year) -1, data=c_panel)
lsdv.c_pool2 = lm(emp_logchanges ~ prod_logchanges + prod_logchanges_lag1 + prod_logchanges_lag2 + prod_logchanges_lag3, data=c_panel)
lsdv.c_fec2 = lm(emp_logchanges ~ prod_logchanges + prod_logchanges_lag1 + prod_logchanges_lag2 + prod_logchanges_lag3 + factor(country) - 1, data=c_panel)
lsdv.c_feci2 = lm(emp_logchanges ~ prod_logchanges + prod_logchanges_lag1 + prod_logchanges_lag2 + prod_logchanges_lag3 + factor(country) + factor(year) -1, data=c_panel)
summary(lsdv.c_pool1)
lsdv.c_pool1_coef = coeftest(lsdv.c_pool1, vcov. = vcovHC, type = "HC1")
lsdv.c_fec1_coef = coeftest(lsdv.c_fec1, vcov. = vcovHC, type = "HC1")
lsdv.c_feci1_coef = coeftest(lsdv.c_feci1, vcov. = vcovHC, type = "HC1")
lsdv.c_pool2_coef = coeftest(lsdv.c_pool2, vcov. = vcovHC, type = "HC1")
lsdv.c_fec2_coef = coeftest(lsdv.c_fec2, vcov. = vcovHC, type = "HC1")
lsdv.c_feci2_coef = coeftest(lsdv.c_feci2, vcov. = vcovHC, type = "HC1")
#coeftest(fixed.dum, vcov. = vcovHC, method = "arellano")
write.csv(cbind(lsdv.c_pool_coef, lsdv.c_feci_coef, lsdv.c_fecy_coef, lsdv.c_feyi_coef), "fixeddum_ci_panel.csv")
#Tester resultater ved brug af plm istedet:
{
model_linear1 = emp_logchanges ~ prod_logchanges
C0_pool = plm(model_linear1, data = c_panel, index = c("country", "year"), model = "pooling")
C0_fd = plm(model_linear1, data = c_panel, index = c("country", "year"), model = "fd")
C0_fe = plm(model_linear1, data = c_panel, index = c("country", "year"), model = "within")
summary(C0_pool)
summary(C0_fd)
summary(C0_fe)
C2_pool = plm(model_linear2, data = c_panel, index = c("country", "year"), model = "pooling")
C2_fd = plm(model_linear2, data = c_panel, index = c("country", "year"), model = "fd")
C2_fe = plm(model_linear2, data = c_panel, index = c("country", "year"), model = "within", effect = "individual")
C2_fe_tw = plm(model_linear2, data = c_panel, index = c("country", "year"), model = "within", effect = "twoway")
summary(C2_pool)
summary(C2_fd)
summary(C2_fe)
summary(C2_fe_tw)
}
# Country industry panel --------------------------------------------------
#AS: Industry-by-country fixed effects are already implicitly taken out by first-differencing in the stacked firstdifference model.
ci_panel = rbind(DK_ind, SE_ind, US_ind, NL_ind, DE_ind, AT_ind, BE_ind, CZ_ind, EL_ind, FI_ind, FR_ind, IT_ind , LU_ind, SI_ind, SK_ind) #, LV_ind)
ci_panel = ci_panel %>% select(year, country, code, desc, emp_logchanges, prod_logchanges, wgt)
ci_panel$id = ci_panel %>% group_indices(code, country)
ci_panel$prod_logchanges_wgt = ci_panel$prod_logchanges*ci_panel$wgt
ci_panel$emp_logchanges_wgt = ci_panel$emp_logchanges*ci_panel$wgt
ci_panel = na.omit(ci_panel) #obs vigtigt at køre efter unødvendige variable er fjernet
model_linear1 = emp_logchanges_wgt ~ prod_logchanges_wgt
model_linear1 = emp_logchanges ~ prod_logchanges
ci.reg <- plm(model_linear1, data = ci_panel, index = c("id", "year"), model = "within")
summary(ci.reg)
fixed.dum = lm(emp_logchanges ~ prod_logchanges, data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt, data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + factor(country), data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + factor(country) + factor(code), data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + factor(country) + factor(code) + factor(year), data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + factor(code) + factor(year), data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + factor(country) + factor(year), data=ci_panel)
summary(fixed.dum)
#options(digits = 3)
#pols = coeftest(poolOLS, vcov. = vcovHC, method = "arellano")
#FE-modeller
FixedEffects_indi <- plm(model_linear1, data = dk, index = c("code", "year"), weight=wgt, model = "within", effect = "individual")
FixedEffects_time <- plm(model_linear1, data = dk, index = c("code", "year"), weight=wgt, model = "within", effect = "time")
FixedEffects_twoway <- plm(model_linear1, data = dk, index = c("code", "year"), weight=wgt, model = "within", effect = "twoway")
summary(FixedEffects_indi)
summary(FixedEffects_time)
summary(FixedEffects_twoway)
options(digits = 3)
options("scipen"=100, "digits"=4)
coeftest(FixedEffects_indi, vcov. = vcovHC, type = "HC1")
fe = coeftest(FixedEffects_indi, vcov. = vcovHC, method = "arellano")
attributes(Arellano)
Arellano
# Sammensætning af mikro og makroelasticiteter --------------------------------------------------
#hvad gør vi med lande hvor nogle industrier mangler?
#model_linear2 = emp_logchanges ~ prod_logchanges + avgLP_oi + avgLP_oi_lag1 + avgLP_oi_lag2 + avgLP_oi_lag3
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + avgLP_oi + avgLP_oi_lag1 + avgLP_oi_lag2 + avgLP_oi_lag3 + factor(country) + factor(code) + factor(year), data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + avgLP_oi + avgLP_oi_lag1 + avgLP_oi_lag2 + avgLP_oi_lag3 + factor(country) + factor(year), data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + avgLP_oi + avgLP_oi_lag1 + avgLP_oi_lag2 + avgLP_oi_lag3 + factor(country) , data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + avgLP_oi + avgLP_oi_lag1 + avgLP_oi_lag2 + avgLP_oi_lag3 , data=ci_panel)
summary(fixed.dum)
# Sector spillover -------------------------------------------------
# How to deal with NA in a panel data regression? Link: https://stackoverflow.com/questions/14427781/how-to-deal-with-na-in-a-panel-data-regression------
#Skal det vægtes? Og hvad skal vægtes?
ci_panel_ss = rbind(DK_ind, SE_ind, US_ind, NL_ind, DE_ind, AT_ind, BE_ind, CZ_ind, EL_ind, FI_ind, FR_ind, IT_ind , LU_ind, SI_ind, SK_ind) #, LV_ind)
ci_panel_ss = ci_panel_ss%>% select(year, country, code, desc, branche, branche_desc, wgt_i, wgt_b, emp_logchanges,
dLP_I_b1, dLP_I_b2, dLP_I_b3, dLP_I_b4, dLP_I_b5,
dLP_I_b1_dum, dLP_I_b2_dum, dLP_I_b3_dum, dLP_I_b4_dum, dLP_I_b5_dum,
dLP_BwoI_b1, dLP_BwoI_b1_lag1, dLP_BwoI_b1_lag2, dLP_BwoI_b1_lag3,
dLP_BwoI_b2, dLP_BwoI_b2_lag1, dLP_BwoI_b2_lag2, dLP_BwoI_b2_lag3,
dLP_BwoI_b3, dLP_BwoI_b3_lag1, dLP_BwoI_b3_lag2, dLP_BwoI_b3_lag3,
dLP_BwoI_b4, dLP_BwoI_b4_lag1, dLP_BwoI_b4_lag2, dLP_BwoI_b4_lag3,
dLP_BwoI_b5, dLP_BwoI_b5_lag1, dLP_BwoI_b5_lag2, dLP_BwoI_b5_lag3)
ci_panel_ss = as.data.frame(ci_panel_ss)
is.pconsecutive(ci_panel_ss)
ci_panel_ss$id = ci_panel_ss %>% group_indices(code, country)
base_model = {emp_logchanges ~ dLP_I_b1 + dLP_I_b2 + dLP_I_b3 + dLP_I_b4 + dLP_I_b5 +
dLP_BwoI_b1 + dLP_BwoI_b1_lag1 + dLP_BwoI_b1_lag2 + dLP_BwoI_b1_lag3 +
dLP_BwoI_b2 + dLP_BwoI_b2_lag1 + dLP_BwoI_b2_lag2 + dLP_BwoI_b2_lag3 +
dLP_BwoI_b3 + dLP_BwoI_b3_lag1 + dLP_BwoI_b3_lag2 + dLP_BwoI_b3_lag3 +
dLP_BwoI_b4 + dLP_BwoI_b4_lag1 + dLP_BwoI_b4_lag2 + dLP_BwoI_b4_lag3 +
dLP_BwoI_b5 + dLP_BwoI_b5_lag1 + dLP_BwoI_b5_lag2 + dLP_BwoI_b5_lag3}
is.pconsecutive(ci_panel_ss)
pTestData <- pdata.frame(TestData, index=c("ID", "Time"))
pTestData$Y_diff <- plm::lag(pTestData$Y) - pTestData$Y
pTestData$X_diff <- plm::lag(pTestData$X) - pTestData$X
fdmod <- plm(Y_diff ~ X_diff, data = pTestData, model = "pooling")
length(residuals(fdmod)) # 10
nrow(fdmod$model) # 10
#ci_panel_ss$id = ci_panel_ss %>% group_indices(code, country)
#ci_panel_ss$prod_logchanges_wgt = ci_panel_ss$prod_logchanges*ci_panel$wgt
#ci_panel_ss$emp_logchanges_wgt = ci_panel_ss$emp_logchanges*ci_panel$wgt
lsdv.ss_pool = {lm(emp_logchanges ~ dLP_I_b1 + dLP_I_b2 + dLP_I_b3 + dLP_I_b4 + dLP_I_b5 +
dLP_BwoI_b1 + dLP_BwoI_b1_lag1 + dLP_BwoI_b1_lag2 + dLP_BwoI_b1_lag3 +
dLP_BwoI_b2 + dLP_BwoI_b2_lag1 + dLP_BwoI_b2_lag2 + dLP_BwoI_b2_lag3 +
dLP_BwoI_b3 + dLP_BwoI_b3_lag1 + dLP_BwoI_b3_lag2 + dLP_BwoI_b3_lag3 +
dLP_BwoI_b4 + dLP_BwoI_b4_lag1 + dLP_BwoI_b4_lag2 + dLP_BwoI_b4_lag3 +
dLP_BwoI_b5 + dLP_BwoI_b5_lag1 + dLP_BwoI_b5_lag2 + dLP_BwoI_b5_lag3, data=ci_panel_ss)}
lsdv.ss_fecy = {lm(emp_logchanges ~ dLP_I_b1 + dLP_I_b2 + dLP_I_b3 + dLP_I_b4 + dLP_I_b5 +
dLP_BwoI_b1 + dLP_BwoI_b1_lag1 + dLP_BwoI_b1_lag2 + dLP_BwoI_b1_lag3 +
dLP_BwoI_b2 + dLP_BwoI_b2_lag1 + dLP_BwoI_b2_lag2 + dLP_BwoI_b2_lag3 +
dLP_BwoI_b3 + dLP_BwoI_b3_lag1 + dLP_BwoI_b3_lag2 + dLP_BwoI_b3_lag3 +
dLP_BwoI_b4 + dLP_BwoI_b4_lag1 + dLP_BwoI_b4_lag2 + dLP_BwoI_b4_lag3 +
dLP_BwoI_b5 + dLP_BwoI_b5_lag1 + dLP_BwoI_b5_lag2 + dLP_BwoI_b5_lag3 +
factor(country) + factor(year) -1, data=ci_panel_ss)}
summary(lsdv.ss_fecy)
lsdv.ss_feci = {lm(emp_logchanges ~ dLP_I_b1 + dLP_I_b2 + dLP_I_b3 + dLP_I_b4 + dLP_I_b5 +
dLP_BwoI_b1 + dLP_BwoI_b1_lag1 + dLP_BwoI_b1_lag2 + dLP_BwoI_b1_lag3 +
dLP_BwoI_b2 + dLP_BwoI_b2_lag1 + dLP_BwoI_b2_lag2 + dLP_BwoI_b2_lag3 +
dLP_BwoI_b3 + dLP_BwoI_b3_lag1 + dLP_BwoI_b3_lag2 + dLP_BwoI_b3_lag3 +
dLP_BwoI_b4 + dLP_BwoI_b4_lag1 + dLP_BwoI_b4_lag2 + dLP_BwoI_b4_lag3 +
dLP_BwoI_b5 + dLP_BwoI_b5_lag1 + dLP_BwoI_b5_lag2 + dLP_BwoI_b5_lag3 +
factor(country) + factor(code), data=ci_panel_ss)}
lsdv.ss_feyi = {lm(emp_logchanges ~ dLP_I_b1 + dLP_I_b2 + dLP_I_b3 + dLP_I_b4 + dLP_I_b5 +
dLP_BwoI_b1 + dLP_BwoI_b1_lag1 + dLP_BwoI_b1_lag2 + dLP_BwoI_b1_lag3 +
dLP_BwoI_b2 + dLP_BwoI_b2_lag1 + dLP_BwoI_b2_lag2 + dLP_BwoI_b2_lag3 +
dLP_BwoI_b3 + dLP_BwoI_b3_lag1 + dLP_BwoI_b3_lag2 + dLP_BwoI_b3_lag3 +
dLP_BwoI_b4 + dLP_BwoI_b4_lag1 + dLP_BwoI_b4_lag2 + dLP_BwoI_b4_lag3 +
dLP_BwoI_b5 + dLP_BwoI_b5_lag1 + dLP_BwoI_b5_lag2 + dLP_BwoI_b5_lag3 +
factor(year) + factor(code), data=ci_panel_ss)}
lsdv.ss_fecyi = {lm(emp_logchanges ~ dLP_I_b1 + dLP_I_b2 + dLP_I_b3 + dLP_I_b4 + dLP_I_b5 +
dLP_BwoI_b1 + dLP_BwoI_b1_lag1 + dLP_BwoI_b1_lag2 + dLP_BwoI_b1_lag3 +
dLP_BwoI_b2 + dLP_BwoI_b2_lag1 + dLP_BwoI_b2_lag2 + dLP_BwoI_b2_lag3 +
dLP_BwoI_b3 + dLP_BwoI_b3_lag1 + dLP_BwoI_b3_lag2 + dLP_BwoI_b3_lag3 +
dLP_BwoI_b4 + dLP_BwoI_b4_lag1 + dLP_BwoI_b4_lag2 + dLP_BwoI_b4_lag3 +
dLP_BwoI_b5 + dLP_BwoI_b5_lag1 + dLP_BwoI_b5_lag2 + dLP_BwoI_b5_lag3 +
factor(country) + factor(year) + factor(code), data=ci_panel_ss)}
options(digits = 3)
options("scipen"=100, "digits"=4)
library(lmtest)
lsdv.ss_pool_coef = coeftest(lsdv.ss_pool, vcov. = vcovHC, type = "HC1")
lsdv.ss_feci_coef = coeftest(lsdv.ss_feci, vcov. = vcovHC, type = "HC1")
lsdv.ss_fecy_coef = coeftest(lsdv.ss_fecy, vcov. = vcovHC, type = "HC1")
lsdv.ss_feyi_coef = coeftest(lsdv.ss_feyi, vcov. = vcovHC, type = "HC1")
lsdv.ss_fecyi_coef = coeftest(lsdv.ss_feyi, vcov. = vcovHC, type = "HC1")
#coeftest(fixed.dum, vcov. = vcovHC, method = "arellano")
write.csv(cbind(lsdv.ss_pool_coef, lsdv.ss_feci_coef, lsdv.ss_fecy_coef, lsdv.ss_feyi_coef), "fixeddum_ci_panel.csv")
# Skills..... --------------------------------------------------
# TIME SERIES - Import and preparation of data --------------------------------------------------------------------
EMP <- read_excel("DK_output_17ii.xlsx", sheet = "EMP_2")
GO <- read_excel("DK_output_17ii.xlsx", sheet = "GO_2")
GO_QI <- read_excel("DK_output_17ii.xlsx", sheet = "GO_QI_2")
data <- data.frame(emp_tot = EMP$TOT,
emp_markt = EMP$MARKT,
emp = (EMP$TOT-EMP$A-EMP$O-EMP$T),
go_tot = GO$TOT,
go_markt = GO$MARKT,
go = (GO$TOT-GO$A-GO$O-GO$T),
goqi_tot=GO_QI$TOT,
goqi_markt=GO_QI$MARKT,
go_real = (GO_QI$TOT-GO_QI$A-GO_QI$O-GO_QI$T))
data$emp_log <- log(data$emp)
data$emp_diff <- diff.xts(data$emp)
data$emp_ldiff <- diff.xts(data$emp_log)
data$emp_changes <- data$emp_diff/lag.xts(data$emp,1)*100
data$emp_lchanges <- (data$emp_ldiff/lag.xts(data$emp_log,1))*100
data <- data %>% mutate(prod=go/emp)
data$prod_log <- log(data$prod)
data$prod_diff <- diff.xts(data$prod)
data$prod_ldiff <- diff.xts(data$prod_log)
data$prod_changes <- data$prod_diff/lag.xts(data$prod,1)*100
data$prod_lchanges <- data$prod_ldiff/lag.xts(data$prod_log,1)*100
data.ts <- ts(data, start=1975)
data.ts <- na.omit(data.ts)
autoplot(data.ts[,c("prod_lchanges","prod_changes")])
autoplot(data.ts[,c("emp_lchanges","emp_changes")])
autoplot(data.ts[,c("prod_lchanges","emp_lchanges")])
autoplot(data.ts[,c("prod_changes","emp_changes")])
reg_dk <- lm(emp_lchanges~ prod_lchanges+lag(prod_lchanges,1)+lag(prod_lchanges,2)+lag(prod_lchanges,3), data = data.ts)
reg_dk2 <- lm(emp_lchanges~ prod_lchanges, data = data.ts)
summary(reg_dk)
summary(reg_dk2)
| /Produktivet_og_beskæftigelse_EUKLEMS.R | no_license | erikmnielsen/Speciale-oecon | R | false | false | 67,764 | r | #PRODUKTIVITET OG BESKÆFTIGELSE - EU KLEMS DATA
{
#EU KLEMS is an industry level panel dataset covering OECD countries since 1970,
#it contains detailed data for 32 industries in both the market and non-market economy
#Methods used in the artcile "Robocalypse now?":
#-They Focus on non-farm employment, and omit the poorly measured Private household sector, and Public administration,
# Defense and Extraterritorial organizations, which are almost entirely non-market sectors.
#They operationalize the measurement of EMPLOYMENT and PRODUCTIVITY as follows.
#The primary EMPLOYMENT measure is the number of persons engaged in work, though we have also experimented with excluding the self-employed and obtain similar results.
#The primary LABOR PRODUCTIVITY measure is real gross output per worker, because measurement of value-added outside of manufacturing is typically somewhat speculative
#- They also present a set of models using value-added per worker and value added based total factor productivity.
#- These alternative measures yield qualitatively similar findings, although total factor productivity growth seems to have the most strongly positive effect on employment.
}
# Libraries ---------------------------------------------------------------
library(readr)
library(readxl)
library(reshape2)
library(fpp2)
library(tidyverse)
library(xts)
library(plm)
library(ggplot2)
library(ggthemes)
library(dplyr)
{
country="DK"
country="FR"
dataset_1 <- read_excel("Data/FR_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
dataset_2 <- read_excel("Data/FR_output_17ii.xlsx", sheet = "GO_QI")
dataset_1 <- read_excel("Data/DK_output_17ii.xlsx", sheet = "EMP")
dataset_2 <- read_excel("Data/DK_output_17ii.xlsx", sheet = "GO")
dataset_2 <- read_excel("Data/DK_output_17ii.xlsx", sheet = "GO_QI") #Gross output, faste priser 2010=100
measure_1="EMP"
measure_2="GO_QI"
dataset_1 <- read_excel("Data/DK_output_17ii.xlsx", sheet = "COMP")
dataset_2 <- read_excel("Data/DK_output_17ii.xlsx", sheet = "VA")
dataset_3 <- read_excel("Data/DK_output_17ii.xlsx", sheet = "LAB")
measure_1="COMP"
measure_2="VA"
measure_3="LAB"
}
CGR = function(x){
sapply(1:length(x), function(y){
prod(1+x[1:y]) - 1
})
}
func_labshare <- function(dataset_1, dataset_2, dataset_3, country, measure_1="COMP", measure_2="VA", measure_3="LAB") {
colnames(dataset_1) <- gsub(measure_1, "", colnames(dataset_1))
colnames(dataset_2) <- gsub(measure_2, "", colnames(dataset_2))
colnames(dataset_3) <- gsub(measure_3, "", colnames(dataset_3))
dataset_1<- melt(dataset_1,
id.vars=c("desc", "code"),
variable.name="year",
value.name= measure_1)
dataset_2 <- melt(dataset_2,
id.vars=c("desc", "code"),
variable.name="year",
value.name= measure_2)
dataset_3 <- melt(dataset_3,
id.vars=c("desc", "code"),
variable.name="year",
value.name= measure_3)
data = merge(dataset_1, dataset_2, by=c("desc","code", "year"), all.x = TRUE)
data = merge(data, dataset_3, by=c("desc","code", "year"), all.x = TRUE)
data <- na.omit(data)
sapply(data, class)
data$year <- as.numeric(as.character(data[,"year"])) #ændres fordi "year" er en factor variabel
data$code =gsub("-", "t", data[,2])
data$country = country
data$LS = data$LAB/data$VA
data$LSe = data$COMP/data$VA
data = data %>% filter(code=="TOT")
#data$indeksLS = (data$LS/0.6880021)*100
#data$indeksLSe = (data$LSe/0.5954859)*100
pdata = pdata.frame(data, index = c("code", "year"))
#Kumulativ vækst (viser det samme som indeks....)
{
pdata$LS_diff = diff(pdata$LS, lag = 1, shift = "time")
pdata$LSe_diff = diff(pdata$LSe, lag = 1, shift = "time")
pdata$LS_changes <- pdata$LS_diff/lag(pdata$LS, k = 1, shift = "time")
pdata$LSe_changes <- pdata$LSe_diff/lag(pdata$LSe, k = 1, shift = "time")
pdata$LS_CGR = order_by(pdata$year, CGR(pdata$LS_changes[-1])*100)
pdata$LSe_CGR = order_by(pdata$year, CGR(pdata$LSe_changes[-1])*100)
#pdata = pdata.frame(pdata, index = c("code", "year"))
pdata$LS_CGR <- lag(pdata$LS_CGR, k=1, shift="time")
pdata$LS_CGR = ifelse(is.na(pdata$LS_CGR)==T,0,pdata$LS_CGR)
pdata$LSe_CGR <- lag(pdata$LSe_CGR, k=1, shift="time")
pdata$LSe_CGR = ifelse(is.na(pdata$LSe_CGR)==T,0,pdata$LSe_CGR)
}
pdata
}
func_empprod <- function(dataset_1, dataset_2, country, measure_1="EMP", measure_2="GO", Emma, method) {
colnames(dataset_1) <- gsub(measure_1, "", colnames(dataset_1))
colnames(dataset_2) <- gsub(measure_2, "", colnames(dataset_2))
dataset_1<- melt(dataset_1,
# ID variables - all the variables to keep but not split apart on
id.vars=c("desc", "code"),
# The source columns (not necessary here) # measure.vars=c("1970","1971",...),
# Name of the destination column that will identify the original column that the measurement came from
variable.name="year",
value.name= measure_1)
dataset_2 <- melt(dataset_2,
id.vars=c("desc", "code"),
variable.name="year",
value.name= "GO")
data = merge(dataset_1, dataset_2, by=c("desc","code", "year"), all.x = TRUE)
data <- na.omit(data)
#sapply(data, class)
data$year <- as.numeric(as.character(data[,"year"])) #ændres fordi "year" er en factor variabel
data$GO <- as.numeric(as.character(data[,"GO"]))
data$code =gsub("-", "t", data[,2])
data$country = country
if (method=="AS") {
#AutorSalomons Industrier:
data$sel_industries <-factor(ifelse( data$code %in% c("TOT", "MARKT", "A","C","G","H","J","OtU","O","RtS","T","U"), 0,1))
data$branche <- ifelse(data$code %in% c("B", "DtE", "F"), "b1",
ifelse(data$code %in% c("10t12", "13t15", "16t18", "19", "20t21", "22t23","24t25", "26t27", "28", "29t30","31t33"), "b2", #kan man ikke bare bruge C, Total Manufacturing?
ifelse(data$code %in% c("P","Q","R", "S"), "b3",
ifelse(data$code %in% c("53", "58t60", "61", "62t63", "K", "MtN"), "b4",
ifelse(data$code %in% c("45", "46", "47", "49t52", "I", "L"), "b5",
"b0")))))
data$branche_desc <- ifelse(data$branche=="b1","Mining, utilities, and construction",
ifelse(data$branche=="b2","Manufacturing",
ifelse(data$branche=="b3","Education and health services",
ifelse(data$branche=="b4","High-tech services",
ifelse(data$branche=="b5","Low-tech services",
"Not relevant"
)))))
} else {
#Brancher, 10:
data$sel_industries <-factor(ifelse( data$code %in% c("A","B", "DtE", "F","10t12", "13t15", "16t18", "19", "20t21", "22t23","24t25", "26t27", "28", "29t30","31t33",
"53","58t60", "61", "62t63", "K", "MtN","45", "46", "47", "49t52", "I", "L"), 1,0)) #alt pånær O, P, Q (skal RtS, T og U også fjernes?)
data$branche <- ifelse(data$code=="A", "b1",
ifelse(data$code %in% c("B","10t12", "13t15", "16t18", "19", "20t21", "22t23","24t25", "26t27", "28", "29t30","31t33", "DtE"), "b2",
ifelse(data$code=="F", "b3",
ifelse(data$code %in% c("45", "46", "47","49t52","53", "I"), "b4",
ifelse(data$code %in% c("58t60", "61", "62t63"), "b5",
ifelse(data$code=="K", "b6",
ifelse(data$code=="L", "b7",
ifelse(data$code=="MtN", "b8",
# ifelse(data$code %in% c("O","P","Q"), "b9",
#ifelse(data$code %in% c("R","S","T","U"), "b10",
"b0"))))))))
data$branche_desc <- ifelse(data$branche=="b1","Landbrug, skovbrug og fiskeri",
ifelse(data$branche=="b2","Industri, råstofindvinding og forsyningsvirksomhed",
ifelse(data$branche=="b3","Bygge og anlæg",
ifelse(data$branche=="b4","Handel og transport mv.",
ifelse(data$branche=="b5","Information og kommunikation",
ifelse(data$branche=="b6", "Finansiering og forsikring",
ifelse(data$branche=="b7","Ejendomshandel og udlejning",
ifelse(data$branche=="b8","Erhvervsservice",
# ifelse(data$branche=="b9","Offentlig administration, undervisning og sundhed",
# ifelse(data$branche=="b10","Kultur, fritid og anden service",
"Ikke relevant"))))))))
}
#angivelse af branche/industri totaler
t4 <- data %>% filter(branche!="b0") %>% group_by(year, branche, branche_desc) %>% summarize(EMP=sum(EMP),GO=sum(GO))
data2 <- data.frame(desc= t4$branche_desc,
code=t4$branche,
year=t4$year,
EMP=t4$EMP,
GO=t4$GO,
country=country,
sel_industries=0,
branche="b-tot",
branche_desc="Branche Total")
#udregning af lande total, hvis visse brancher udelades (fx landbrug, offentlig sektor)
#Nedenstående skal bruges til hvis vores udvalgte brancher adskiller sig fra "TOTAL INDUSTRIES"
b <- data2 %>% filter(code=="b1")
b_2 <- data2 %>% filter(code=="b2")
b_3 <- data2 %>% filter(code=="b3")
b_4 <- data2 %>% filter(code=="b4")
b_5 <- data2 %>% filter(code=="b5")
if (method!="AS") {
b_6 <- data2 %>% filter(code=="b6")
b_7 <- data2 %>% filter(code=="b7")
b_8 <- data2 %>% filter(code=="b8")
#b_9 <- data2 %>% filter(code=="b9")
#b_10 <- data2 %>% filter(code=="b10")
b$EMP = b$EMP + b_2$EMP + b_3$EMP + b_4$EMP + b_5$EMP + b_6$EMP + b_7$EMP + b_8$EMP #+ b_10$EMP + b_9$EMP
b$GO = b$GO + b_2$GO + b_3$GO + b_4$GO + b_5$GO + b_6$GO + b_7$GO + b_8$GO #+ b_10$GO + b_9$GO
b$desc = "TOTAL INDUSTRIES-MunkNielsen"
b$code = "TOT_MN"
b$branche = "TOT"
} else {
b$EMP = b$EMP + b_2$EMP + b_3$EMP + b_4$EMP + b_5$EMP
b$GO = b$GO + b_2$GO + b_3$GO + b_4$GO + b_5$GO
b$desc = "TOTAL INDUSTRIES-AutorSalomons"
b$code = "TOT_AS"
b$branche = "TOT" #lettere at de begge hedder "TOT" i brancher når der skal filtreres
}
b$branche_desc = "Lande Total"
data_fin <- rbind(data, data2, b)
pdata = pdata.frame(data_fin, index = c("code", "year"))
#Kodning af variable:
pdata$emp_log <- log(pdata$EMP)
pdata$emp_diff = diff(pdata$EMP, lag = 1, shift = "time")
pdata$emp_logdiff = diff(pdata$emp_log, lag = 1, shift = "time")
pdata$emp_changes <- pdata$emp_diff/lag(pdata$EMP, k = 1, shift = "time")*100
pdata$emp_logchanges = diff(pdata$emp_log, lag = 1, shift = "time")*100
pdata$prod <- pdata$GO/pdata$EMP
pdata$prod_diff <- diff(pdata$prod, lag = 1, shift = "time")
pdata$prod_changes <- pdata$prod_diff/lag(pdata$prod, k = 1, shift = "time")*100
pdata$prod_changes2 <- pdata$prod_diff/lag(pdata$prod, k = 1, shift = "time")
pdata$prod_log <- log(pdata$prod)
pdata$prod_logchanges<- diff(pdata$prod_log, lag = 1, shift = "time")*100
pdata$prod_logdiff<- diff(pdata$prod_log, lag = 1, shift = "time")
if (Emma==F) {
pdata = pdata %>% group_by(code) %>% mutate(prod_CGR_logchanges = order_by(year,cumprod(1+prod_logdiff[-1])*100)) #metode 1
pdata = pdata %>% group_by(code) %>% mutate(prod_logCGR = order_by(year, CGR(prod_logdiff[-1])*100)) #metode 2
pdata = pdata %>% group_by(code) %>% mutate(prod_CGR= order_by(year, CGR(prod_changes2[-1])*100))
#df = pdata %>% group_by(code) %>% mutate(cumsum = cumsum())
pdata <- pdata %>% select(year, country, code, desc, sel_industries, branche, branche_desc, EMP, emp_logchanges, GO, prod, prod_logchanges,prod_changes, prod_CGR, prod_logCGR, prod_CGR_logchanges) %>%
filter(code!="b0",code!="s0")
pdata = pdata.frame(pdata, index = c("code", "year"))
pdata$prod_logCGR <- lag(pdata$prod_logCGR, k=1, shift="time")
pdata$prod_CGR <- lag(pdata$prod_CGR, k=1, shift="time")
}
if (Emma==T) {
pdata <- pdata %>% select(year, country, code, desc, sel_industries, branche, branche_desc, EMP, emp_logchanges, GO, prod_logchanges) %>%
filter(code!="b0")
#pdata = pdata.frame(pdata, index = c("code", "year"))
}
pdata
}
func_regpanel <- function(dataset_1, type) {
if (type==1) {
tot = dataset_1 %>% filter(branche=="TOT")
tot$EMP_tot = tot$EMP
tot$GO_tot = tot$GO
tot <- tot %>% select(year, EMP_tot, GO_tot)
ind = dataset_1 %>% filter(sel_industries==1)
b <- dataset_1 %>% filter(branche=="b-tot")
b$branche = b$code
b$prod_logchanges_b = b$prod_logchanges
b$EMP_b = b$EMP
b$GO_b = b$GO
b = b %>% select(year, branche, EMP_b, GO_b)
ind = merge(ind, b, by=c("year", "branche"), all.x = TRUE)
#----------- nedenstående skal bruges hvis vi siger sektor minus industri i vores beta2 variable--------------
b1 = b %>% filter(branche=="b1") %>% mutate(EMP_b1=EMP_b) %>% mutate(GO_b1=GO_b) %>% select(year, EMP_b1,GO_b1)
b2 = b %>% filter(branche=="b2") %>% mutate(EMP_b2=EMP_b) %>% mutate(GO_b2=GO_b) %>% select(EMP_b2, GO_b2)
b3 = b %>% filter(branche=="b3") %>% mutate(EMP_b3=EMP_b) %>% mutate(GO_b3=GO_b) %>% select(EMP_b3, GO_b3)
b4 = b %>% filter(branche=="b4") %>% mutate(EMP_b4=EMP_b) %>% mutate(GO_b4=GO_b) %>% select(EMP_b4, GO_b4)
b5 = b %>% filter(branche=="b5") %>% mutate(EMP_b5=EMP_b) %>% mutate(GO_b5=GO_b) %>% select(EMP_b5, GO_b5)
#b1 = b %>% filter(branche=="b1") %>% mutate(prod_logchanges_b1=prod_logchanges_b) %>% mutate(EMP_b1=EMP_b) %>% mutate(GO_b1=GO_b) %>% select(year, prod_logchanges_b1, EMP_b1,GO_b1)
#b2 = b %>% filter(branche=="b2") %>% mutate(prod_logchanges_b2=prod_logchanges_b) %>% mutate(EMP_b2=EMP_b) %>% mutate(GO_b2=GO_b) %>% select(prod_logchanges_b2, EMP_b2, GO_b2)
#b3 = b %>% filter(branche=="b3") %>% mutate(prod_logchanges_b3=prod_logchanges_b) %>% mutate(EMP_b3=EMP_b) %>% mutate(GO_b3=GO_b) %>% select(prod_logchanges_b3, EMP_b3, GO_b3)
#b4 = b %>% filter(branche=="b4") %>% mutate(prod_logchanges_b4=prod_logchanges_b) %>% mutate(EMP_b4=EMP_b) %>% mutate(GO_b4=GO_b) %>% select(prod_logchanges_b4, EMP_b4, GO_b4)
#b5 = b %>% filter(branche=="b5") %>% mutate(prod_logchanges_b5=prod_logchanges_b) %>% mutate(EMP_b5=EMP_b) %>% mutate(GO_b5=GO_b) %>% select(prod_logchanges_b5, EMP_b5, GO_b5)
#----------nedenstående skal bruges hvis vi bruger sektor produktiviteter som vores beta1 variable----
# b1 = b %>% filter(branche=="b1") %>% mutate(prod_logchanges_b1=prod_logchanges_b) %>% select(year, prod_logchanges_b1)
#b2 = b %>% filter(branche=="b2") %>% mutate(prod_logchanges_b2=prod_logchanges_b) %>% select(prod_logchanges_b2)
#b3 = b %>% filter(branche=="b3") %>% mutate(prod_logchanges_b3=prod_logchanges_b) %>% select(prod_logchanges_b3)
#b4 = b %>% filter(branche=="b4") %>% mutate(prod_logchanges_b4=prod_logchanges_b) %>% select(prod_logchanges_b4)
#b5 = b %>% filter(branche=="b5") %>% mutate(prod_logchanges_b5=prod_logchanges_b) %>% select(prod_logchanges_b5)
#-------------------------------------------------------------------------------------------------------
test = b %>% count(branche) %>% nrow
if (test==8) {
b6 = b %>% filter(branche=="b6") %>% mutate(prod_logchanges_b6=prod_logchanges_b) %>% select(prod_logchanges_b6)
b7 = b %>% filter(branche=="b7") %>% mutate(prod_logchanges_b7=prod_logchanges_b) %>% select(prod_logchanges_b7)
b8 = b %>% filter(branche=="b8") %>% mutate(prod_logchanges_b8=prod_logchanges_b) %>% select(prod_logchanges_b8)
b = cbind(b1,b2,b3,b4,b5,b6,b7,b8)
} else {
b = cbind(b1,b2,b3,b4,b5)
}
ind = merge(ind, b, by=c("year"), all.x = TRUE)
ind = merge(ind, tot, by=c("year"), all.x = TRUE)
ind$wgt_i = ind$EMP/ind$EMP_tot
ind$wgt_b = ind$EMP_b/ind$EMP_tot
ind = pdata.frame(ind, index = c("code", "year"))
#Beta2 variable og lags, mikro + makro
ind$dLP_CwoI = diff(log((ind$GO_tot-ind$GO)/(ind$EMP_tot-ind$EMP)), lag = 1, shift = "time")*100
ind$dLP_CwoI_lag1 = lag(ind$dLP_CwoI, k = 1, shift = "time")
ind$dLP_CwoI_lag2 = lag(ind$dLP_CwoI, k = 2, shift = "time")
ind$dLP_CwoI_lag3 = lag(ind$dLP_CwoI, k = 3, shift = "time")
#Beta2 variable og lags, sektor spillover
ind$dLP_BwoI_b1 = ifelse(ind$branche=="b1", diff(log((ind$GO_b1-ind$GO)/(ind$EMP_b1-ind$EMP)), lag = 1, shift = "time")*100, diff(log(ind$GO_b1/ind$EMP_b1), lag = 1, shift = "time")*100)
ind$dLP_BwoI_b2 = ifelse(ind$branche=="b2", diff(log((ind$GO_b2-ind$GO)/(ind$EMP_b2-ind$EMP)), lag = 1, shift = "time")*100, diff(log(ind$GO_b2/ind$EMP_b2), lag = 1, shift = "time")*100)
ind$dLP_BwoI_b3 = ifelse(ind$branche=="b3", diff(log((ind$GO_b3-ind$GO)/(ind$EMP_b3-ind$EMP)), lag = 1, shift = "time")*100, diff(log(ind$GO_b3/ind$EMP_b3), lag = 1, shift = "time")*100)
ind$dLP_BwoI_b4 = ifelse(ind$branche=="b4", diff(log((ind$GO_b4-ind$GO)/(ind$EMP_b4-ind$EMP)), lag = 1, shift = "time")*100, diff(log(ind$GO_b4/ind$EMP_b4), lag = 1, shift = "time")*100)
ind$dLP_BwoI_b5 = ifelse(ind$branche=="b5", diff(log((ind$GO_b5-ind$GO)/(ind$EMP_b5-ind$EMP)), lag = 1, shift = "time")*100, diff(log(ind$GO_b5/ind$EMP_b5), lag = 1, shift = "time")*100)
ind$dLP_BwoI_b1_lag1 = lag(ind$dLP_BwoI_b1, k = 1, shift = "time")
ind$dLP_BwoI_b1_lag2 = lag(ind$dLP_BwoI_b1, k = 2, shift = "time")
ind$dLP_BwoI_b1_lag3 = lag(ind$dLP_BwoI_b1, k = 3, shift = "time")
ind$dLP_BwoI_b2_lag1 = lag(ind$dLP_BwoI_b2, k = 1, shift = "time")
ind$dLP_BwoI_b2_lag2 = lag(ind$dLP_BwoI_b2, k = 2, shift = "time")
ind$dLP_BwoI_b2_lag3 = lag(ind$dLP_BwoI_b2, k = 3, shift = "time")
ind$dLP_BwoI_b3_lag1 = lag(ind$dLP_BwoI_b3, k = 1, shift = "time")
ind$dLP_BwoI_b3_lag2 = lag(ind$dLP_BwoI_b3, k = 2, shift = "time")
ind$dLP_BwoI_b3_lag3 = lag(ind$dLP_BwoI_b3, k = 3, shift = "time")
ind$dLP_BwoI_b4_lag1 = lag(ind$dLP_BwoI_b4, k = 1, shift = "time")
ind$dLP_BwoI_b4_lag2 = lag(ind$dLP_BwoI_b4, k = 2, shift = "time")
ind$dLP_BwoI_b4_lag3 = lag(ind$dLP_BwoI_b4, k = 3, shift = "time")
ind$dLP_BwoI_b5_lag1 = lag(ind$dLP_BwoI_b5, k = 1, shift = "time")
ind$dLP_BwoI_b5_lag2 = lag(ind$dLP_BwoI_b5, k = 2, shift = "time")
ind$dLP_BwoI_b5_lag3 = lag(ind$dLP_BwoI_b5, k = 3, shift = "time")
#beta1 variable, sectoral spillover:
ind = na.omit(ind)
ind$dLP_I_b1 = ifelse(ind$branche=="b1", ind$prod_logchanges, 0)
ind$dLP_I_b1_dum = ifelse(ind$dLP_I_b1==0, 0, 1)
ind$dLP_I_b2 = ifelse(ind$branche=="b2", ind$prod_logchanges, 0)
ind$dLP_I_b2_dum = ifelse(ind$dLP_I_b2==0, 0, 1)
ind$dLP_I_b3 = ifelse(ind$branche=="b3", ind$prod_logchanges, 0)
ind$dLP_I_b3_dum = ifelse(ind$dLP_I_b3==0, 0, 1)
ind$dLP_I_b4 = ifelse(ind$branche=="b4", ind$prod_logchanges, 0)
ind$dLP_I_b4_dum = ifelse(ind$dLP_I_b4==0, 0, 1)
ind$dLP_I_b5 = ifelse(ind$branche=="b5", ind$prod_logchanges, 0)
ind$dLP_I_b5_dum = ifelse(ind$dLP_I_b5==0, 0, 1)
#what to do with zeros ?
#In most applications, removing the records with zeros would be wrong for two reasons:
#(1) it reduces the amount of data, thereby increasing the uncertainties and (2) it could bias the results.
#One practical method to cope with such data is described in my answer at stats.stackexchange.com/a/1795: create "dummy" variables to indicate the zeros
#How should I transform non-negative data including zeros? https://stats.stackexchange.com/questions/1444/how-should-i-transform-non-negative-data-including-zeros
# nedenstående skal bruges hvis vi siger total minus sektor i vores beta2 variable
#ind$prod_logchanges_c1 = ifelse(ind$branche=="b1", diff(log((ind$GO_tot-ind$GO_b)/(ind$EMP_tot-ind$EMP_b)), lag = 1, shift = "time")*100, diff(log(ind$GO_tot/ind$EMP_tot), lag = 1, shift = "time")*100)
#ind$prod_logchanges_c2 = ifelse(ind$branche=="b2", diff(log((ind$GO_tot-ind$GO_b)/(ind$EMP_tot-ind$EMP_b)), lag = 1, shift = "time")*100, diff(log(ind$GO_tot/ind$EMP_tot), lag = 1, shift = "time")*100)
#ind$prod_logchanges_c3 = ifelse(ind$branche=="b3", diff(log((ind$GO_tot-ind$GO_b)/(ind$EMP_tot-ind$EMP_b)), lag = 1, shift = "time")*100, diff(log(ind$GO_tot/ind$EMP_tot), lag = 1, shift = "time")*100)
#ind$prod_logchanges_c4 = ifelse(ind$branche=="b4", diff(log((ind$GO_tot-ind$GO_b)/(ind$EMP_tot-ind$EMP_b)), lag = 1, shift = "time")*100, diff(log(ind$GO_tot/ind$EMP_tot), lag = 1, shift = "time")*100)
#ind$prod_logchanges_c5 = ifelse(ind$branche=="b5", diff(log((ind$GO_tot-ind$GO_b)/(ind$EMP_tot-ind$EMP_b)), lag = 1, shift = "time")*100, diff(log(ind$GO_tot/ind$EMP_tot), lag = 1, shift = "time")*100)
ind
} else if (type==2) {
b = dataset_1 %>% filter(branche=="b-tot")
sumEMP <- b %>% group_by(year) %>% summarize(sum_EMP=sum(EMP))
b = merge(b, sumEMP, by=c("year"), all.x = TRUE)
b$share_EMP = (b$EMP/b$sum_EMP)*100
b = pdata.frame(b, index = c("code", "year"))
b$share_EMP_ppchange = diff(b$share_EMP, lag = 1, shift = "time")
b$share_EMP_ppchange = ifelse(is.na(b$share_EMP_ppchange)==T,0,b$share_EMP_ppchange)
b = b %>% group_by(code) %>% mutate(cumsum_EMP = cumsum(share_EMP_ppchange))
b$year = lubridate::ymd(b$year, truncated = 2L)
b
} else if (type==3) {
tot = dataset_1 %>% filter(code=="TOT")
tot1 = dataset_1 %>% filter(branche=="TOT")
#tot$year = lubridate::ymd(tot$year, truncated = 2L)
tot
} else {
NA
}
}
#key_table <- read_csv("EUklems-data-master/key_table.csv")
# Country data -----------------------------------------------------
# nogle af industrierne (eller subkategorierne) findes ikke i alle lande, fx findes 45,46,47 ikke i Frankrig før 1995, selvom overkategorien G findes
# Danmark
DK_emp <- read_excel("Data/DK_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#DK_go <- read_excel("Data/DK_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
DK_gop <- read_excel("Data/DK_output_17ii.xlsx", sheet = "GO_QI") #Gross output, volume (2010 prices)
DK_comp <- read_excel("Data/DK_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
DK_lab <- read_excel("Data/DK_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
DK_va <- read_excel("Data/DK_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Employment and productivty
DK_ep = func_empprod(DK_emp, DK_gop,"DK", "EMP", "GO_QI", F)
DK_ep = func_empprod(DK_emp, DK_gop,"DK", "EMP", "GO_QI", T, "AS")
#PLM analyse
DK_ind = func_regpanel(DK_ep, 1)
DK_tot = func_regpanel(DK_ep, 3)
#deskriptiv
DK_b = func_regpanel(DK_ep, 2)
#Labour share
DK_ls = func_labshare(DK_comp, DK_va, DK_lab, "DK", "COMP", "VA", "LAB")
DK_ls$year = lubridate::ymd(DK_ls$year, truncated = 2L)
# USA
US_emp <- read_excel("Data/US_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#US_go <- read_excel("Data/US_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
US_gop <- read_excel("Data/US_output_17ii.xlsx", sheet = "GO_QI") #Gross Output at current basic prices (in millions of national currency)
US_comp <- read_excel("Data/US_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
US_lab <- read_excel("Data/US_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
US_va <- read_excel("Data/US_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
US_ls = func_labshare(US_comp, US_va, US_lab, "US", "COMP", "VA", "LAB")
US_ls$year = lubridate::ymd(US_ls$year, truncated = 2L)
#Employment and productivty
US_ep = func_empprod(US_emp, US_gop,"US", "EMP", "GO_QI", F)
US_ep = func_empprod(US_emp, US_gop,"US", "EMP", "GO_QI", T,"AS")
#PLM analyse
US_ind = func_regpanel(US_ep, 1)
US_tot = func_regpanel(US_ep, 3)
#deskriptiv
US_b = func_regpanel(US_ep, 2)
# UK . faste priser findes ikke
UK_emp <- read_excel("Data/UK_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#UK_go <- read_excel("Data/UK_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
UK_gop <- read_excel("Data/UK_output_17ii.xlsx", sheet = "GO_QI") #Gross Output at current basic prices (in millions of national currency)
UK_comp <- read_excel("Data/UK_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
UK_lab <- read_excel("Data/UK_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
UK_va <- read_excel("Data/UK_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
UK_ls = func_labshare(UK_comp, UK_va, UK_lab, "UK", "COMP", "VA", "LAB")
UK_ls$year = lubridate::ymd(UK_ls$year, truncated = 2L)
#Employment and productivty
#Employment and productivty
UK_ep = func_empprod(UK_emp, UK_gop,"UK", "EMP", "GO_QI", F)
UK_ep = func_empprod(UK_emp, UK_gop,"UK", "EMP", "GO_QI", T, "AS")
#PLM analyse
UK_ind = func_regpanel(UK_ep, 1)
UK_tot = func_regpanel(UK_ep, 3)
#deskriptiv
UK_b = func_regpanel(UK_ep, 2)
# Tyskland
DE_emp <- read_excel("Data/DE_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#DE_go <- read_excel("Data/DE_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
DE_gop <- read_excel("Data/DE_output_17ii.xlsx", sheet = "GO_QI") #Gross Output at current basic prices (in millions of national currency)
DE_comp <- read_excel("Data/DE_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
DE_lab <- read_excel("Data/DE_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
DE_va <- read_excel("Data/DE_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
DE_ls = func_labshare(DE_comp, DE_va, DE_lab, "DE", "COMP", "VA", "LAB")
DE_ls$year = lubridate::ymd(DE_ls$year, truncated = 2L)
#Employment and productivty
DE_ep = func_empprod(DE_emp, DE_gop,"DE", "EMP", "GO_QI", F)
DE_ep = func_empprod(DE_emp, DE_gop,"DE", "EMP", "GO_QI", T, "AS")
#PLM analyse
DE_ind = func_regpanel(DE_ep, 1)
DE_tot = func_regpanel(DE_ep, 3)
#deskriptiv
DE_b = func_regpanel(DE_ep, 2)
# Holland
NL_emp <- read_excel("Data/NL_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thoNLands)
#NL_go <- read_excel("Data/NL_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
NL_gop <- read_excel("Data/NL_output_17ii.xlsx", sheet = "GO_QI") #Gross Output at current basic prices (in millions of national currency)
NL_comp <- read_excel("Data/NL_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
NL_lab <- read_excel("Data/NL_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
NL_va <- read_excel("Data/NL_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
NL_ls = func_labshare(NL_comp, NL_va, NL_lab, "NL", "COMP", "VA", "LAB")
NL_ls$year = lubridate::ymd(NL_ls$year, truncated = 2L)
#Employment and productivty
NL_ep = func_empprod(NL_emp, NL_gop,"NL", "EMP", "GO_QI", F)
NL_ep = func_empprod(NL_emp, NL_gop,"NL", "EMP", "GO_QI", T, "AS")
#PLM analyse
NL_ind = func_regpanel(NL_ep, 1)
NL_tot = func_regpanel(NL_ep, 3)
#deskriptiv
NL_b = func_regpanel(NL_ep, 2)
# Sverige
SE_emp <- read_excel("Data/SE_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#SE_go <- read_excel("Data/SE_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
SE_gop <- read_excel("Data/SE_output_17ii.xlsx", sheet = "GO_QI") #Gross Output at current basic prices (in millions of national currency)
SE_comp <- read_excel("Data/SE_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
SE_lab <- read_excel("Data/SE_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
SE_va <- read_excel("Data/SE_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
SE_ls = func_labshare(SE_comp, SE_va, SE_lab, "SE", "COMP", "VA", "LAB")
SE_ls$year = lubridate::ymd(SE_ls$year, truncated = 2L)
#Employment and productivty
SE_ep = func_empprod(SE_emp, SE_gop,"SE", "EMP", "GO_QI", F)
SE_ep = func_empprod(SE_emp, SE_gop,"SE", "EMP", "GO_QI", T, "AS")
#PLM analyse
SE_ind = func_regpanel(SE_ep, 1)
SE_tot = func_regpanel(SE_ep, 3)
#deskriptiv
SE_b = func_regpanel(SE_ep, 2)
# Østrig
AT_emp = read_excel("Data/AT_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#AT_go = read_excel("Data/AT_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
AT_gop = read_excel("Data/AT_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
AT_comp = read_excel("Data/AT_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
AT_lab = read_excel("Data/AT_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
AT_va = read_excel("Data/AT_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
AT_ls = func_labshare(AT_comp, AT_va, AT_lab, "AT", "COMP", "VA", "LAB")
AT_ls$year = lubridate::ymd(AT_ls$year, truncated = 2L)
#Employment and productivty
AT_ep = func_empprod(AT_emp, AT_gop,"AT", "EMP", "GO_QI", F)
AT_ep = func_empprod(AT_emp, AT_gop,"AT", "EMP", "GO_QI", T, "AS")
#PLM analyse
AT_ind = func_regpanel(AT_ep, 1)
AT_tot = func_regpanel(AT_ep, 3)
#deskriptiv
AT_b = func_regpanel(AT_ep, 2)
# Belgium
BE_emp = read_excel("Data/BE_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#BE_go = read_excel("Data/BE_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
BE_gop = read_excel("Data/BE_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
BE_comp = read_excel("Data/BE_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
BE_lab = read_excel("Data/BE_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
BE_va = read_excel("Data/BE_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
BE_ls = func_labshare(BE_comp, BE_va, BE_lab, "BE", "COMP", "VA", "LAB")
BE_ls$year = lubridate::ymd(BE_ls$year, truncated = 2L)
#Employment and productivty
BE_ep = func_empprod(BE_emp, BE_gop,"BE", "EMP", "GO_QI", F)
BE_ep = func_empprod(BE_emp, BE_gop,"BE", "EMP", "GO_QI", T, "AS")
#PLM analyse
BE_ind = func_regpanel(BE_ep, 1)
BE_tot = func_regpanel(BE_ep, 3)
#deskriptiv
BE_b = func_regpanel(BE_ep, 2)
# Tjekkiet
CZ_emp = read_excel("Data/CZ_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#CZ_go = read_excel("Data/CZ_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
CZ_gop = read_excel("Data/CZ_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
CZ_comp = read_excel("Data/CZ_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
CZ_lab = read_excel("Data/CZ_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
CZ_va = read_excel("Data/CZ_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
CZ_ls = func_labshare(CZ_comp, CZ_va, CZ_lab, "CZ", "COMP", "VA", "LAB")
CZ_ls$year = lubridate::ymd(CZ_ls$year, truncated = 2L)
#Employment and productivty
CZ_ep = func_empprod(CZ_emp, CZ_gop,"CZ", "EMP", "GO_QI", F)
CZ_ep = func_empprod(CZ_emp, CZ_gop,"CZ", "EMP", "GO_QI", T, "AS")
#PLM analyse
CZ_ind = func_regpanel(CZ_ep, 1)
CZ_tot = func_regpanel(CZ_ep, 3)
#deskriptiv
CZ_b = func_regpanel(CZ_ep, 2)
# Finland
FI_emp = read_excel("Data/FI_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#FI_go = read_excel("Data/FI_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
FI_gop = read_excel("Data/FI_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
FI_comp = read_excel("Data/FI_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
FI_lab = read_excel("Data/FI_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
FI_va = read_excel("Data/FI_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
FI_ls = func_labshare(FI_comp, FI_va, FI_lab, "FI", "COMP", "VA", "LAB")
FI_ls$year = lubridate::ymd(FI_ls$year, truncated = 2L)
#Employment and productivty
FI_ep = func_empprod(FI_emp, FI_gop,"FI", "EMP", "GO_QI", F)
FI_ep = func_empprod(FI_emp, FI_gop,"FI", "EMP", "GO_QI", T, "AS")
#PLM analyse
FI_ind = func_regpanel(FI_ep, 1)
FI_tot = func_regpanel(FI_ep, 3)
#deskriptiv
FI_b = func_regpanel(FI_ep, 2)
# Frankrig
FR_emp = read_excel("Data/FR_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#FR_go = read_excel("Data/FR_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
FR_gop = read_excel("Data/FR_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
FR_comp = read_excel("Data/FR_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
FR_lab = read_excel("Data/FR_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
FR_va = read_excel("Data/FR_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
FR_ls = func_labshare(FR_comp, FR_va, FR_lab, "FR", "COMP", "VA", "LAB")
FR_ls$year = lubridate::ymd(FR_ls$year, truncated = 2L)
#Employment and productivty
FR_ep = func_empprod(FR_emp, FR_gop,"FR", "EMP", "GO_QI", F)
FR_ep = func_empprod(FR_emp, FR_gop,"FR", "EMP", "GO_QI", T, "AS")
#PLM analyse
FR_ind = func_regpanel(FR_ep, 1)
FR_tot = func_regpanel(FR_ep, 3)
#deskriptiv
FR_b = func_regpanel(FR_ep, 2)
# Grækenland
EL_emp = read_excel("Data/EL_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#EL_go = read_excel("Data/EL_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
EL_gop = read_excel("Data/EL_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
EL_comp = read_excel("Data/EL_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
EL_lab = read_excel("Data/EL_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
EL_va = read_excel("Data/EL_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
EL_ls = func_labshare(EL_comp, EL_va, EL_lab, "EL", "COMP", "VA", "LAB")
EL_ls$year = lubridate::ymd(EL_ls$year, truncated = 2L)
#Employment and productivty
EL_ep = func_empprod(EL_emp, EL_gop,"EL", "EMP", "GO_QI", F)
EL_ep = func_empprod(EL_emp, EL_gop,"EL", "EMP", "GO_QI", T, "AS")
#PLM analyse
EL_ind = func_regpanel(EL_ep, 1)
EL_tot = func_regpanel(EL_ep, 3)
#deskriptiv
EL_b = func_regpanel(EL_ep, 2)
# Italien
IT_emp = read_excel("Data/IT_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#IT_go = read_excel("Data/IT_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
IT_gop = read_excel("Data/IT_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
IT_comp = read_excel("Data/IT_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
IT_lab = read_excel("Data/IT_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
IT_va = read_excel("Data/IT_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
IT_ls = func_labshare(IT_comp, IT_va, IT_lab, "IT", "COMP", "VA", "LAB")
IT_ls$year = lubridate::ymd(IT_ls$year, truncated = 2L)
#Employment and productivty
IT_ep = func_empprod(IT_emp, IT_gop,"IT", "EMP", "GO_QI", F)
IT_ep = func_empprod(IT_emp, IT_gop,"IT", "EMP", "GO_QI", T, "AS")
#PLM analyse
IT_ind = func_regpanel(IT_ep, 1)
IT_tot = func_regpanel(IT_ep, 3)
#deskriptiv
IT_b = func_regpanel(IT_ep, 2)
# Letland
LV_emp = read_excel("Data/LV_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#LV_go = read_excel("Data/LV_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
LV_gop = read_excel("Data/LV_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
LV_comp = read_excel("Data/LV_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
LV_lab = read_excel("Data/LV_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
LV_va = read_excel("Data/LV_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
LV_ls = func_labshare(LV_comp, LV_va, LV_lab, "LV", "COMP", "VA", "LAB")
LV_ls$year = lubridate::ymd(LV_ls$year, truncated = 2L)
#Employment and productivty
LV_ep = func_empprod(LV_emp, LV_gop,"LV", "EMP", "GO_QI", F)
LV_ep = func_empprod(LV_emp, LV_gop,"LV", "EMP", "GO_QI", T, "AS")
#PLM analyse
LV_ind = func_regpanel(LV_ep, 1)
LV_tot = func_regpanel(LV_ep, 3)
#deskriptiv
LV_b = func_regpanel(LV_ep, 2)
# Luxenborg
LU_emp = read_excel("Data/LU_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#LU_go = read_excel("Data/LU_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
LU_gop = read_excel("Data/LU_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
LU_comp = read_excel("Data/LU_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
LU_lab = read_excel("Data/LU_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
LU_va = read_excel("Data/LU_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
LU_ls = func_labshare(LU_comp, LU_va, LU_lab, "LU", "COMP", "VA", "LAB")
LU_ls$year = lubridate::ymd(LU_ls$year, truncated = 2L)
#Employment and productivty
LU_ep = func_empprod(LU_emp, LU_gop,"LU", "EMP", "GO_QI", F)
LU_ep = func_empprod(LU_emp, LU_gop,"LU", "EMP", "GO_QI", T, "AS")
#PLM analyse
LU_ind = func_regpanel(LU_ep, 1)
LU_tot = func_regpanel(LU_ep, 3)
#deskriptiv
LU_b = func_regpanel(LU_ep, 2)
# Slovakiet
SK_emp = read_excel("Data/SK_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#SK_go = read_excel("Data/SK_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
SK_gop = read_excel("Data/SK_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
SK_comp = read_excel("Data/SK_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
SK_lab = read_excel("Data/SK_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
SK_va = read_excel("Data/SK_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
SK_ls = func_labshare(SK_comp, SK_va, SK_lab, "SK", "COMP", "VA", "LAB")
SK_ls$year = lubridate::ymd(SK_ls$year, truncated = 2L)
#Employment and productivty
SK_ep = func_empprod(SK_emp, SK_gop,"SK", "EMP", "GO_QI", F)
SK_ep = func_empprod(SK_emp, SK_gop,"SK", "EMP", "GO_QI", T, "AS")
#PLM analyse
SK_ind = func_regpanel(SK_ep, 1)
SK_tot = func_regpanel(SK_ep, 3)
#deskriptiv
SK_b = func_regpanel(SK_ep, 2)
# Slovenien
SI_emp = read_excel("Data/SI_output_17ii.xlsx", sheet = "EMP") #Number of persons engaged (thousands)
#SI_go = read_excel("Data/SI_output_17ii.xlsx", sheet = "GO") #Gross Output at current basic prices (in millions of national currency)
SI_gop = read_excel("Data/SI_output_17ii.xlsx", sheet = "GO_QI") #Gross output, price indices, 2010 = 100
SI_comp = read_excel("Data/SI_output_17ii.xlsx", sheet = "COMP") #Compensation of employees (in millions of national currency)
SI_lab = read_excel("Data/SI_output_17ii.xlsx", sheet = "LAB") #Labour compensation (in millions of national currency)
SI_va = read_excel("Data/SI_output_17ii.xlsx", sheet = "VA") #Gross value added at current basic prices (in millions of national currency), svarer labour + capital compensation
#Labour share
SI_ls = func_labshare(SI_comp, SI_va, SI_lab, "SI", "COMP", "VA", "LAB")
SI_ls$year = lubridate::ymd(SI_ls$year, truncated = 2L)
#Employment and productivty
SI_ep = func_empprod(SI_emp, SI_gop,"SI", "EMP", "GO_QI", F)
SI_ep = func_empprod(SI_emp, SI_gop,"SI", "EMP", "GO_QI", T, "AS")
#PLM analyse
SI_ind = func_regpanel(SI_ep, 1)
SI_tot = func_regpanel(SI_ep, 3)
#deskriptiv
SI_b = func_regpanel(SI_ep, 2)
# Descriptive -------------------------------------------------------------
min <- as.Date("1995-1-1")
max <- NA
# DESCRIPTIVE - Labour share of national income
{
#Aggregate Labour Share of National Invome, Total Labour Force
{ggplot(DK_ls, aes(year, LS)) +
geom_point() +
geom_line() +
geom_smooth(method = "lm") +
xlab("Time") + ylab("") +
ggtitle("Aggregate Labour Share of National Income, Total Labour Force") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
#Aggregate Labour Share of National Income, Employees
{ggplot(DK_ls, aes(year, LSe)) +
geom_point() +
geom_line() +
geom_smooth(method = "lm") +
xlab("Time") + ylab("") +
ggtitle("Aggregate Labour Share of National Income, Employees") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
{ggplot(US_ls, aes(year, LSe)) +
geom_point() +
geom_line() +
geom_smooth(method = "lm") +
xlab("Time") + ylab("") +
ggtitle("USA - Aggregate Labour Share of National Income, Employees") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #US
{ggplot(UK_ls, aes(year, LSe)) +
geom_point() +
geom_line() +
geom_smooth(method = "lm") +
xlab("Time") + ylab("") +
ggtitle("Storbritannien - Aggregate Labour Share of National Income, Employees") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #UK
{ggplot(GE_ls, aes(year, LSe)) +
geom_point() +
geom_line() +
geom_smooth(method = "lm") +
xlab("Time") + ylab("") +
ggtitle("Tyskland - Aggregate Labour Share of National Income, Employees") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #GE
{ggplot(NL_ls, aes(year, LSe)) +
geom_point() +
geom_line() +
geom_smooth(method = "lm") +
xlab("Time") + ylab("") +
ggtitle("Holland - Aggregate Labour Share of National Income, Employees") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #NL
{ggplot(SE_ls, aes(year, LSe)) +
geom_point() +
geom_line() +
geom_smooth(method = "lm") +
xlab("Time") + ylab("") +
ggtitle("Sverige - Aggregate Labour Share of National Income, Employees") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #SE
#Aggregate Labour Share of National Income, comparison
{ggplot(data = DK_ls) +
geom_line(aes(x = year, y = LS, color = "LS"),) +
geom_line(aes(x = year, y = LSe, color = "LSe"),) +
scale_color_manual(name = "Colors", values = c("LS" = "blue", "LSe" = "red")) +
xlab("Time") + ylab("") +
ggtitle("Aggregate Labour Share of National Income, comparison") +
guides(colour=guide_legend(title="")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
#Indekstal
{ggplot(data = DK_ls) +
geom_line(aes(x = year, y = indeksLS, color = "LS"),) +
geom_line(aes(x = year, y = indeksLSe, color = "LSe"),) +
scale_color_manual(name = "Colors", values = c("LS" = "blue", "LSe" = "red")) +
xlab("Time") + ylab("") +
ggtitle("Indekstal, 1975=100") +
guides(colour=guide_legend(title="")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
#Cumulative growth of Labour Share of National Income, comparison
{ggplot(data = DK_ls) +
geom_line(aes(x = year, y = LS_CGR, color = "LS"),) +
geom_line(aes(x = year, y = LSe_CGR, color = "LSe"),) +
scale_color_manual(name = "Colors", values = c("LS" = "blue", "LSe" = "red")) +
xlab("Time") + ylab("") +
ggtitle("") +
guides(colour=guide_legend(title="")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
}
# DESCRIPTIVE - Sectoral employment and productivty
{
#Branchebeskæftigelse
{ggplot(data=DK_b, aes(x=year, y=EMP, group=desc, colour=desc)) +
geom_point() +
geom_line() +
xlab("Time") + ylab("Number of persons engaged in work (thousands)") +
ggtitle("Employment by Sector") +
guides(colour=guide_legend(title="Branche")) +
theme_economist() +
theme(legend.position="right") +
#scale_x_date(date_labels = "%Y") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
#theme(axis.text.x = element_text(angle = 90, hjust = 1))
#scale_color_economist()
} #DK
#Produktivitet- og beskæftigelsesvækst
{ggplot(data = DK_tot) +
geom_line(aes(x = year, y = emp_logchanges, color = "emp_logchanges"),) +
geom_line(aes(x = year, y = prod_logchanges, color = "prod_logchanges"),) +
scale_color_manual(name = "Colors", values = c("emp_logchanges" = "blue", "prod_logchanges" = "red")) +
xlab("Time") + ylab("") +
ggtitle("Produktivitet- og beskæftigelsesvækst i DK") +
guides(colour=guide_legend(title="")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
#In order to get a legend, you have to map something to color within aes.
#You can then use scale_color_manual to define the colors for the mapped character values.
} #DK
{ggplot(data = UK_tot) +
geom_line(aes(x = year, y = emp_logchanges, color = "emp_logchanges"),) +
geom_line(aes(x = year, y = prod_logchanges, color = "prod_logchanges"),) +
scale_color_manual(name = "Colors", values = c("emp_logchanges" = "blue", "prod_logchanges" = "red")) +
xlab("Time") + ylab("") +
ggtitle("Produktivitet- og beskæftigelsesvækst i UK") +
guides(colour=guide_legend(title="")) +
theme_economist() +
theme(legend.position="right")
#scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
#scale_x_date(limits = c(min, max))
} #UK
#Beskæftigelsesvækst fordelt på brancher
{ggplot(data=DK_b, aes(x=year, y=emp_logchanges, group=desc, colour=desc)) +
geom_point() +
geom_line() +
xlab("Time") + ylab("") +
ggtitle("Beskæftigelsesvækst fordelt på brancher") +
guides(colour=guide_legend(title="Sector")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
#Kumulativ produktivitetsvækst fordelt på brancher
{ggplot(data=DK_b, aes(x=year, y=prod_logCGR, group=desc, colour=desc)) +
geom_point() +
geom_line() +
xlab("Time") + ylab("100 * kumulativ log ændring") +
ggtitle("Kumulativ produktivitetsvækst") +
guides(colour=guide_legend(title="Sector")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
{ggplot(data=UK_b, aes(x=year, y=prod_CGR, group=desc, colour=desc)) +
geom_point() +
geom_line() +
xlab("Time") + ylab(")") +
ggtitle("Kumulativ produktivitetsvækst UK") +
guides(colour=guide_legend(title="Sector")) +
theme_economist() +
theme(legend.position="right")
#scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
#scale_x_date(limits = c(min, max))
} #UK
#Kumulativ ændring i beskæftigelse fordelt på brancher
{ggplot(data=DK_b, aes(x=year, y=cumsum_EMP, group=desc, colour=desc)) +
geom_point() +
geom_line() +
xlab("Time") + ylab("") +
ggtitle("Kumulativ ændring i beskæftigelse") +
guides(colour=guide_legend(title="Sector")) +
theme_economist() +
theme(legend.position="right") +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
scale_x_date(limits = c(min, max))
} #DK
}
# Country panel -----------------------------------------------------
c_panel = rbind(DK_tot, SE_tot, US_tot, NL_tot, DE_tot, AT_tot, BE_tot, CZ_tot, EL_tot, FI_tot, FR_tot, IT_tot, LU_tot, SI_tot, SK_tot) # LV_tot
c_panel = pdata.frame(c_panel, index = c("country", "year"))
c_panel$prod_logchanges_lag1 = lag(c_panel$prod_logchanges, k = 1, shift = "time")
c_panel$prod_logchanges_lag2 = lag(c_panel$prod_logchanges_lag1, k = 1, shift = "time")
c_panel$prod_logchanges_lag3 = lag(c_panel$prod_logchanges_lag2, k = 1, shift = "time")
c_panel = na.omit(c_panel)
lsdv.c_pool1 = lm(emp_logchanges ~ prod_logchanges, data=c_panel)
lsdv.c_fec1 = lm(emp_logchanges ~ prod_logchanges + factor(country) -1, data=c_panel)
lsdv.c_feci1 = lm(emp_logchanges ~ prod_logchanges + factor(country) + factor(year) -1, data=c_panel)
lsdv.c_pool2 = lm(emp_logchanges ~ prod_logchanges + prod_logchanges_lag1 + prod_logchanges_lag2 + prod_logchanges_lag3, data=c_panel)
lsdv.c_fec2 = lm(emp_logchanges ~ prod_logchanges + prod_logchanges_lag1 + prod_logchanges_lag2 + prod_logchanges_lag3 + factor(country) - 1, data=c_panel)
lsdv.c_feci2 = lm(emp_logchanges ~ prod_logchanges + prod_logchanges_lag1 + prod_logchanges_lag2 + prod_logchanges_lag3 + factor(country) + factor(year) -1, data=c_panel)
summary(lsdv.c_pool1)
lsdv.c_pool1_coef = coeftest(lsdv.c_pool1, vcov. = vcovHC, type = "HC1")
lsdv.c_fec1_coef = coeftest(lsdv.c_fec1, vcov. = vcovHC, type = "HC1")
lsdv.c_feci1_coef = coeftest(lsdv.c_feci1, vcov. = vcovHC, type = "HC1")
lsdv.c_pool2_coef = coeftest(lsdv.c_pool2, vcov. = vcovHC, type = "HC1")
lsdv.c_fec2_coef = coeftest(lsdv.c_fec2, vcov. = vcovHC, type = "HC1")
lsdv.c_feci2_coef = coeftest(lsdv.c_feci2, vcov. = vcovHC, type = "HC1")
#coeftest(fixed.dum, vcov. = vcovHC, method = "arellano")
write.csv(cbind(lsdv.c_pool_coef, lsdv.c_feci_coef, lsdv.c_fecy_coef, lsdv.c_feyi_coef), "fixeddum_ci_panel.csv")
#Tester resultater ved brug af plm istedet:
{
model_linear1 = emp_logchanges ~ prod_logchanges
C0_pool = plm(model_linear1, data = c_panel, index = c("country", "year"), model = "pooling")
C0_fd = plm(model_linear1, data = c_panel, index = c("country", "year"), model = "fd")
C0_fe = plm(model_linear1, data = c_panel, index = c("country", "year"), model = "within")
summary(C0_pool)
summary(C0_fd)
summary(C0_fe)
C2_pool = plm(model_linear2, data = c_panel, index = c("country", "year"), model = "pooling")
C2_fd = plm(model_linear2, data = c_panel, index = c("country", "year"), model = "fd")
C2_fe = plm(model_linear2, data = c_panel, index = c("country", "year"), model = "within", effect = "individual")
C2_fe_tw = plm(model_linear2, data = c_panel, index = c("country", "year"), model = "within", effect = "twoway")
summary(C2_pool)
summary(C2_fd)
summary(C2_fe)
summary(C2_fe_tw)
}
# Country industry panel --------------------------------------------------
#AS: Industry-by-country fixed effects are already implicitly taken out by first-differencing in the stacked firstdifference model.
ci_panel = rbind(DK_ind, SE_ind, US_ind, NL_ind, DE_ind, AT_ind, BE_ind, CZ_ind, EL_ind, FI_ind, FR_ind, IT_ind , LU_ind, SI_ind, SK_ind) #, LV_ind)
ci_panel = ci_panel %>% select(year, country, code, desc, emp_logchanges, prod_logchanges, wgt)
ci_panel$id = ci_panel %>% group_indices(code, country)
ci_panel$prod_logchanges_wgt = ci_panel$prod_logchanges*ci_panel$wgt
ci_panel$emp_logchanges_wgt = ci_panel$emp_logchanges*ci_panel$wgt
ci_panel = na.omit(ci_panel) #obs vigtigt at køre efter unødvendige variable er fjernet
model_linear1 = emp_logchanges_wgt ~ prod_logchanges_wgt
model_linear1 = emp_logchanges ~ prod_logchanges
ci.reg <- plm(model_linear1, data = ci_panel, index = c("id", "year"), model = "within")
summary(ci.reg)
fixed.dum = lm(emp_logchanges ~ prod_logchanges, data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt, data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + factor(country), data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + factor(country) + factor(code), data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + factor(country) + factor(code) + factor(year), data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + factor(code) + factor(year), data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + factor(country) + factor(year), data=ci_panel)
summary(fixed.dum)
#options(digits = 3)
#pols = coeftest(poolOLS, vcov. = vcovHC, method = "arellano")
#FE-modeller
FixedEffects_indi <- plm(model_linear1, data = dk, index = c("code", "year"), weight=wgt, model = "within", effect = "individual")
FixedEffects_time <- plm(model_linear1, data = dk, index = c("code", "year"), weight=wgt, model = "within", effect = "time")
FixedEffects_twoway <- plm(model_linear1, data = dk, index = c("code", "year"), weight=wgt, model = "within", effect = "twoway")
summary(FixedEffects_indi)
summary(FixedEffects_time)
summary(FixedEffects_twoway)
options(digits = 3)
options("scipen"=100, "digits"=4)
coeftest(FixedEffects_indi, vcov. = vcovHC, type = "HC1")
fe = coeftest(FixedEffects_indi, vcov. = vcovHC, method = "arellano")
attributes(Arellano)
Arellano
# Sammensætning af mikro og makroelasticiteter --------------------------------------------------
#hvad gør vi med lande hvor nogle industrier mangler?
#model_linear2 = emp_logchanges ~ prod_logchanges + avgLP_oi + avgLP_oi_lag1 + avgLP_oi_lag2 + avgLP_oi_lag3
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + avgLP_oi + avgLP_oi_lag1 + avgLP_oi_lag2 + avgLP_oi_lag3 + factor(country) + factor(code) + factor(year), data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + avgLP_oi + avgLP_oi_lag1 + avgLP_oi_lag2 + avgLP_oi_lag3 + factor(country) + factor(year), data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + avgLP_oi + avgLP_oi_lag1 + avgLP_oi_lag2 + avgLP_oi_lag3 + factor(country) , data=ci_panel)
fixed.dum = lm(emp_logchanges_wgt ~ prod_logchanges_wgt + avgLP_oi + avgLP_oi_lag1 + avgLP_oi_lag2 + avgLP_oi_lag3 , data=ci_panel)
summary(fixed.dum)
# Sector spillover -------------------------------------------------
# How to deal with NA in a panel data regression? Link: https://stackoverflow.com/questions/14427781/how-to-deal-with-na-in-a-panel-data-regression------
#Skal det vægtes? Og hvad skal vægtes?
ci_panel_ss = rbind(DK_ind, SE_ind, US_ind, NL_ind, DE_ind, AT_ind, BE_ind, CZ_ind, EL_ind, FI_ind, FR_ind, IT_ind , LU_ind, SI_ind, SK_ind) #, LV_ind)
ci_panel_ss = ci_panel_ss%>% select(year, country, code, desc, branche, branche_desc, wgt_i, wgt_b, emp_logchanges,
dLP_I_b1, dLP_I_b2, dLP_I_b3, dLP_I_b4, dLP_I_b5,
dLP_I_b1_dum, dLP_I_b2_dum, dLP_I_b3_dum, dLP_I_b4_dum, dLP_I_b5_dum,
dLP_BwoI_b1, dLP_BwoI_b1_lag1, dLP_BwoI_b1_lag2, dLP_BwoI_b1_lag3,
dLP_BwoI_b2, dLP_BwoI_b2_lag1, dLP_BwoI_b2_lag2, dLP_BwoI_b2_lag3,
dLP_BwoI_b3, dLP_BwoI_b3_lag1, dLP_BwoI_b3_lag2, dLP_BwoI_b3_lag3,
dLP_BwoI_b4, dLP_BwoI_b4_lag1, dLP_BwoI_b4_lag2, dLP_BwoI_b4_lag3,
dLP_BwoI_b5, dLP_BwoI_b5_lag1, dLP_BwoI_b5_lag2, dLP_BwoI_b5_lag3)
ci_panel_ss = as.data.frame(ci_panel_ss)
is.pconsecutive(ci_panel_ss)
ci_panel_ss$id = ci_panel_ss %>% group_indices(code, country)
base_model = {emp_logchanges ~ dLP_I_b1 + dLP_I_b2 + dLP_I_b3 + dLP_I_b4 + dLP_I_b5 +
dLP_BwoI_b1 + dLP_BwoI_b1_lag1 + dLP_BwoI_b1_lag2 + dLP_BwoI_b1_lag3 +
dLP_BwoI_b2 + dLP_BwoI_b2_lag1 + dLP_BwoI_b2_lag2 + dLP_BwoI_b2_lag3 +
dLP_BwoI_b3 + dLP_BwoI_b3_lag1 + dLP_BwoI_b3_lag2 + dLP_BwoI_b3_lag3 +
dLP_BwoI_b4 + dLP_BwoI_b4_lag1 + dLP_BwoI_b4_lag2 + dLP_BwoI_b4_lag3 +
dLP_BwoI_b5 + dLP_BwoI_b5_lag1 + dLP_BwoI_b5_lag2 + dLP_BwoI_b5_lag3}
is.pconsecutive(ci_panel_ss)
pTestData <- pdata.frame(TestData, index=c("ID", "Time"))
pTestData$Y_diff <- plm::lag(pTestData$Y) - pTestData$Y
pTestData$X_diff <- plm::lag(pTestData$X) - pTestData$X
fdmod <- plm(Y_diff ~ X_diff, data = pTestData, model = "pooling")
length(residuals(fdmod)) # 10
nrow(fdmod$model) # 10
#ci_panel_ss$id = ci_panel_ss %>% group_indices(code, country)
#ci_panel_ss$prod_logchanges_wgt = ci_panel_ss$prod_logchanges*ci_panel$wgt
#ci_panel_ss$emp_logchanges_wgt = ci_panel_ss$emp_logchanges*ci_panel$wgt
lsdv.ss_pool = {lm(emp_logchanges ~ dLP_I_b1 + dLP_I_b2 + dLP_I_b3 + dLP_I_b4 + dLP_I_b5 +
dLP_BwoI_b1 + dLP_BwoI_b1_lag1 + dLP_BwoI_b1_lag2 + dLP_BwoI_b1_lag3 +
dLP_BwoI_b2 + dLP_BwoI_b2_lag1 + dLP_BwoI_b2_lag2 + dLP_BwoI_b2_lag3 +
dLP_BwoI_b3 + dLP_BwoI_b3_lag1 + dLP_BwoI_b3_lag2 + dLP_BwoI_b3_lag3 +
dLP_BwoI_b4 + dLP_BwoI_b4_lag1 + dLP_BwoI_b4_lag2 + dLP_BwoI_b4_lag3 +
dLP_BwoI_b5 + dLP_BwoI_b5_lag1 + dLP_BwoI_b5_lag2 + dLP_BwoI_b5_lag3, data=ci_panel_ss)}
lsdv.ss_fecy = {lm(emp_logchanges ~ dLP_I_b1 + dLP_I_b2 + dLP_I_b3 + dLP_I_b4 + dLP_I_b5 +
dLP_BwoI_b1 + dLP_BwoI_b1_lag1 + dLP_BwoI_b1_lag2 + dLP_BwoI_b1_lag3 +
dLP_BwoI_b2 + dLP_BwoI_b2_lag1 + dLP_BwoI_b2_lag2 + dLP_BwoI_b2_lag3 +
dLP_BwoI_b3 + dLP_BwoI_b3_lag1 + dLP_BwoI_b3_lag2 + dLP_BwoI_b3_lag3 +
dLP_BwoI_b4 + dLP_BwoI_b4_lag1 + dLP_BwoI_b4_lag2 + dLP_BwoI_b4_lag3 +
dLP_BwoI_b5 + dLP_BwoI_b5_lag1 + dLP_BwoI_b5_lag2 + dLP_BwoI_b5_lag3 +
factor(country) + factor(year) -1, data=ci_panel_ss)}
summary(lsdv.ss_fecy)
lsdv.ss_feci = {lm(emp_logchanges ~ dLP_I_b1 + dLP_I_b2 + dLP_I_b3 + dLP_I_b4 + dLP_I_b5 +
dLP_BwoI_b1 + dLP_BwoI_b1_lag1 + dLP_BwoI_b1_lag2 + dLP_BwoI_b1_lag3 +
dLP_BwoI_b2 + dLP_BwoI_b2_lag1 + dLP_BwoI_b2_lag2 + dLP_BwoI_b2_lag3 +
dLP_BwoI_b3 + dLP_BwoI_b3_lag1 + dLP_BwoI_b3_lag2 + dLP_BwoI_b3_lag3 +
dLP_BwoI_b4 + dLP_BwoI_b4_lag1 + dLP_BwoI_b4_lag2 + dLP_BwoI_b4_lag3 +
dLP_BwoI_b5 + dLP_BwoI_b5_lag1 + dLP_BwoI_b5_lag2 + dLP_BwoI_b5_lag3 +
factor(country) + factor(code), data=ci_panel_ss)}
lsdv.ss_feyi = {lm(emp_logchanges ~ dLP_I_b1 + dLP_I_b2 + dLP_I_b3 + dLP_I_b4 + dLP_I_b5 +
dLP_BwoI_b1 + dLP_BwoI_b1_lag1 + dLP_BwoI_b1_lag2 + dLP_BwoI_b1_lag3 +
dLP_BwoI_b2 + dLP_BwoI_b2_lag1 + dLP_BwoI_b2_lag2 + dLP_BwoI_b2_lag3 +
dLP_BwoI_b3 + dLP_BwoI_b3_lag1 + dLP_BwoI_b3_lag2 + dLP_BwoI_b3_lag3 +
dLP_BwoI_b4 + dLP_BwoI_b4_lag1 + dLP_BwoI_b4_lag2 + dLP_BwoI_b4_lag3 +
dLP_BwoI_b5 + dLP_BwoI_b5_lag1 + dLP_BwoI_b5_lag2 + dLP_BwoI_b5_lag3 +
factor(year) + factor(code), data=ci_panel_ss)}
lsdv.ss_fecyi = {lm(emp_logchanges ~ dLP_I_b1 + dLP_I_b2 + dLP_I_b3 + dLP_I_b4 + dLP_I_b5 +
dLP_BwoI_b1 + dLP_BwoI_b1_lag1 + dLP_BwoI_b1_lag2 + dLP_BwoI_b1_lag3 +
dLP_BwoI_b2 + dLP_BwoI_b2_lag1 + dLP_BwoI_b2_lag2 + dLP_BwoI_b2_lag3 +
dLP_BwoI_b3 + dLP_BwoI_b3_lag1 + dLP_BwoI_b3_lag2 + dLP_BwoI_b3_lag3 +
dLP_BwoI_b4 + dLP_BwoI_b4_lag1 + dLP_BwoI_b4_lag2 + dLP_BwoI_b4_lag3 +
dLP_BwoI_b5 + dLP_BwoI_b5_lag1 + dLP_BwoI_b5_lag2 + dLP_BwoI_b5_lag3 +
factor(country) + factor(year) + factor(code), data=ci_panel_ss)}
options(digits = 3)
options("scipen"=100, "digits"=4)
library(lmtest)
lsdv.ss_pool_coef = coeftest(lsdv.ss_pool, vcov. = vcovHC, type = "HC1")
lsdv.ss_feci_coef = coeftest(lsdv.ss_feci, vcov. = vcovHC, type = "HC1")
lsdv.ss_fecy_coef = coeftest(lsdv.ss_fecy, vcov. = vcovHC, type = "HC1")
lsdv.ss_feyi_coef = coeftest(lsdv.ss_feyi, vcov. = vcovHC, type = "HC1")
lsdv.ss_fecyi_coef = coeftest(lsdv.ss_feyi, vcov. = vcovHC, type = "HC1")
#coeftest(fixed.dum, vcov. = vcovHC, method = "arellano")
write.csv(cbind(lsdv.ss_pool_coef, lsdv.ss_feci_coef, lsdv.ss_fecy_coef, lsdv.ss_feyi_coef), "fixeddum_ci_panel.csv")
# Skills..... --------------------------------------------------
# TIME SERIES - Import and preparation of data --------------------------------------------------------------------
EMP <- read_excel("DK_output_17ii.xlsx", sheet = "EMP_2")
GO <- read_excel("DK_output_17ii.xlsx", sheet = "GO_2")
GO_QI <- read_excel("DK_output_17ii.xlsx", sheet = "GO_QI_2")
data <- data.frame(emp_tot = EMP$TOT,
emp_markt = EMP$MARKT,
emp = (EMP$TOT-EMP$A-EMP$O-EMP$T),
go_tot = GO$TOT,
go_markt = GO$MARKT,
go = (GO$TOT-GO$A-GO$O-GO$T),
goqi_tot=GO_QI$TOT,
goqi_markt=GO_QI$MARKT,
go_real = (GO_QI$TOT-GO_QI$A-GO_QI$O-GO_QI$T))
data$emp_log <- log(data$emp)
data$emp_diff <- diff.xts(data$emp)
data$emp_ldiff <- diff.xts(data$emp_log)
data$emp_changes <- data$emp_diff/lag.xts(data$emp,1)*100
data$emp_lchanges <- (data$emp_ldiff/lag.xts(data$emp_log,1))*100
data <- data %>% mutate(prod=go/emp)
data$prod_log <- log(data$prod)
data$prod_diff <- diff.xts(data$prod)
data$prod_ldiff <- diff.xts(data$prod_log)
data$prod_changes <- data$prod_diff/lag.xts(data$prod,1)*100
data$prod_lchanges <- data$prod_ldiff/lag.xts(data$prod_log,1)*100
data.ts <- ts(data, start=1975)
data.ts <- na.omit(data.ts)
autoplot(data.ts[,c("prod_lchanges","prod_changes")])
autoplot(data.ts[,c("emp_lchanges","emp_changes")])
autoplot(data.ts[,c("prod_lchanges","emp_lchanges")])
autoplot(data.ts[,c("prod_changes","emp_changes")])
reg_dk <- lm(emp_lchanges~ prod_lchanges+lag(prod_lchanges,1)+lag(prod_lchanges,2)+lag(prod_lchanges,3), data = data.ts)
reg_dk2 <- lm(emp_lchanges~ prod_lchanges, data = data.ts)
summary(reg_dk)
summary(reg_dk2)
|
#Network construction and consensus module detection
##Use data prepared in WGCNA script
library(WGCNA)
setwd('/Volumes/nordborg/pub/forPieter/WGCNA/')
nSets = 2
setLabels = c("16C", "6C")
#Form multi-set expression data
multiExpr = vector(mode = "list", length = nSets)
multiExpr[[1]] = list(data = as.data.frame(expr16C))
multiExpr[[2]] = list(data = as.data.frame(expr6C))
# Check that the data has the correct format for many functions operating on multiple sets:
exprSize = checkSets(multiExpr)
# Check that all genes and samples have sufficiently low numbers of missing values.
gsg = goodSamplesGenesMS(multiExpr, verbose = 3);
gsg$allOK #it's false so we have to correct it
if (!gsg$allOK) {
# Print information about the removed genes:
if (sum(!gsg$goodGenes) > 0)
printFlush(paste("Removing genes:", paste(names(multiExpr[[1]]$data)[!gsg$goodGenes],
collapse = ", ")))
for (set in 1:exprSize$nSets) {
if (sum(!gsg$goodSamples[[set]]))
printFlush(paste("In set", setLabels[set], "removing samples",
paste(rownames(multiExpr[[set]]$data)[!gsg$goodSamples[[set]]], collapse = ", ")))
# Remove the offending genes and samples
multiExpr[[set]]$data = multiExpr[[set]]$data[gsg$goodSamples[[set]], gsg$goodGenes];
}
# Update exprSize
exprSize = checkSets(multiExpr)
}
#cluster on distance every sample
sampleTrees = list()
for (set in 1:nSets) {
sampleTrees[[set]] = hclust(dist(multiExpr[[set]]$data), method = "average")
}
#plot dendogram
#pdf(file = "SampleClustering.pdf", width = 12, height = 12);
par(mfrow=c(2,1))
par(mar = c(0, 4, 2, 0))
for (set in 1:nSets) {
plot(sampleTrees[[set]], main = paste("Sample clustering on all genes in", setLabels[set]),
xlab="", sub="", cex = 0.7)}
dev.off()
collectGarbage()
# Choose a set of soft-thresholding powers
powers = c(seq(4,10,by=1), seq(12,20, by=2));
# Initialize a list to hold the results of scale-free analysis
powerTables = vector(mode = "list", length = nSets);
# Call the network topology analysis function for each set in turn
for (set in 1:nSets) {
powerTables[[set]] = list(data = pickSoftThreshold(multiExpr[[set]]$data, powerVector=powers, verbose = 2)[[2]])
}
collectGarbage()
# Plot the results:
colors = c("black", "red")
# Will plot these columns of the returned scale free analysis tables
plotCols = c(2,5,6,7)
colNames = c("Scale Free Topology Model Fit", "Mean connectivity", "Median connectivity",
"Max connectivity")
# Get the minima and maxima of the plotted points
ylim = matrix(NA, nrow = 2, ncol = 4)
for (set in 1:nSets) {
for (col in 1:length(plotCols)) {
ylim[1, col] = min(ylim[1, col], powerTables[[set]]$data[, plotCols[col]], na.rm = TRUE);
ylim[2, col] = max(ylim[2, col], powerTables[[set]]$data[, plotCols[col]], na.rm = TRUE);
}
}
# Plot the quantities in the chosen columns vs. the soft thresholding power
sizeGrWindow(8, 6)
#pdf(file = "Plots/scaleFreeAnalysis.pdf", wi = 8, he = 6)
par(mfcol = c(2,2));
par(mar = c(4.2, 4.2 , 2.2, 0.5))
cex1 = 0.7;
for (col in 1:length(plotCols)) for (set in 1:nSets) {
if (set==1) {
plot(powerTables[[set]]$data[,1], -sign(powerTables[[set]]$data[,3])*powerTables[[set]]$data[,2],
xlab="Soft Threshold (power)", ylab=colNames[col],type="n", ylim = ylim[, col],
main = colNames[col]);
addGrid();
}
if (col==1) {
text(powerTables[[set]]$data[,1], -sign(powerTables[[set]]$data[,3])*powerTables[[set]]$data[,2],
labels=powers, cex=cex1, col=colors[set])
} else {
text(powerTables[[set]]$data[,1], powerTables[[set]]$data[,plotCols[col]],
labels=powers,cex=cex1,col=colors[set])
}
if (col==1) {
legend("bottomright", legend = setLabels, col = colors, pch = 20)
} else {
legend("topright", legend = setLabels, col = colors, pch = 20)
}
}
dev.off()
#network construction
bnet = blockwiseConsensusModules(multiExpr, maxBlockSize = 20000,
power = 14, TOMType = "unsigned", minModuleSize = 30, deepSplit = 2, pamRespectsDendro = FALSE,
reassignThreshold = 0, mergeCutHeight = 0.25,
numericLabels = TRUE,
saveTOMs = TRUE,
verbose = 5)
#see the result
consMEs = bnet$multiMEs;
moduleLabels = bnet$colors;
# Convert the numeric labels to color labels
moduleColors = labels2colors(moduleLabels)
consTree = bnet$dendrograms[[1]];
bwLabels = matchLabels(bnet$colors, moduleLabels, pThreshold = 1e-7);
bwColors = labels2colors(bwLabels)
table(bwLabels)
bwLabels
#plot the dendrogram and module color
# Here we show a more flexible way of plotting several trees and colors on one page
sizeGrWindow(12,6)
#pdf(file = "Plots/BlockwiseGeneDendrosAndColors.pdf", wi = 12, he = 6);
# Use the layout function for more involved screen sectioning
layout(matrix(c(1:4), 2, 2), heights = c(0.8, 0.2), widths = c(1,1))
#layout.show(4);
nBlocks = length(bnet$dendrograms)
# Plot the dendrogram and the module colors underneath for each block
for (block in 1:nBlocks) {
plotDendroAndColors(bnet$dendrograms[[block]], moduleColors[bnet$blockGenes[[block]]],
"Module colors",
main = paste("Gene dendrogram and module colors in block", block),
dendroLabels = FALSE, hang = 0.03,
addGuide = TRUE, guideHang = 0.05,
setLayout = FALSE)
}
dev.off()
| /Scripts/Consensus total.R | no_license | picla/coExpression | R | false | false | 5,508 | r | #Network construction and consensus module detection
##Use data prepared in WGCNA script
library(WGCNA)
setwd('/Volumes/nordborg/pub/forPieter/WGCNA/')
nSets = 2
setLabels = c("16C", "6C")
#Form multi-set expression data
multiExpr = vector(mode = "list", length = nSets)
multiExpr[[1]] = list(data = as.data.frame(expr16C))
multiExpr[[2]] = list(data = as.data.frame(expr6C))
# Check that the data has the correct format for many functions operating on multiple sets:
exprSize = checkSets(multiExpr)
# Check that all genes and samples have sufficiently low numbers of missing values.
gsg = goodSamplesGenesMS(multiExpr, verbose = 3);
gsg$allOK #it's false so we have to correct it
if (!gsg$allOK) {
# Print information about the removed genes:
if (sum(!gsg$goodGenes) > 0)
printFlush(paste("Removing genes:", paste(names(multiExpr[[1]]$data)[!gsg$goodGenes],
collapse = ", ")))
for (set in 1:exprSize$nSets) {
if (sum(!gsg$goodSamples[[set]]))
printFlush(paste("In set", setLabels[set], "removing samples",
paste(rownames(multiExpr[[set]]$data)[!gsg$goodSamples[[set]]], collapse = ", ")))
# Remove the offending genes and samples
multiExpr[[set]]$data = multiExpr[[set]]$data[gsg$goodSamples[[set]], gsg$goodGenes];
}
# Update exprSize
exprSize = checkSets(multiExpr)
}
#cluster on distance every sample
sampleTrees = list()
for (set in 1:nSets) {
sampleTrees[[set]] = hclust(dist(multiExpr[[set]]$data), method = "average")
}
#plot dendogram
#pdf(file = "SampleClustering.pdf", width = 12, height = 12);
par(mfrow=c(2,1))
par(mar = c(0, 4, 2, 0))
for (set in 1:nSets) {
plot(sampleTrees[[set]], main = paste("Sample clustering on all genes in", setLabels[set]),
xlab="", sub="", cex = 0.7)}
dev.off()
collectGarbage()
# Choose a set of soft-thresholding powers
powers = c(seq(4,10,by=1), seq(12,20, by=2));
# Initialize a list to hold the results of scale-free analysis
powerTables = vector(mode = "list", length = nSets);
# Call the network topology analysis function for each set in turn
for (set in 1:nSets) {
powerTables[[set]] = list(data = pickSoftThreshold(multiExpr[[set]]$data, powerVector=powers, verbose = 2)[[2]])
}
collectGarbage()
# Plot the results:
colors = c("black", "red")
# Will plot these columns of the returned scale free analysis tables
plotCols = c(2,5,6,7)
colNames = c("Scale Free Topology Model Fit", "Mean connectivity", "Median connectivity",
"Max connectivity")
# Get the minima and maxima of the plotted points
ylim = matrix(NA, nrow = 2, ncol = 4)
for (set in 1:nSets) {
for (col in 1:length(plotCols)) {
ylim[1, col] = min(ylim[1, col], powerTables[[set]]$data[, plotCols[col]], na.rm = TRUE);
ylim[2, col] = max(ylim[2, col], powerTables[[set]]$data[, plotCols[col]], na.rm = TRUE);
}
}
# Plot the quantities in the chosen columns vs. the soft thresholding power
sizeGrWindow(8, 6)
#pdf(file = "Plots/scaleFreeAnalysis.pdf", wi = 8, he = 6)
par(mfcol = c(2,2));
par(mar = c(4.2, 4.2 , 2.2, 0.5))
cex1 = 0.7;
for (col in 1:length(plotCols)) for (set in 1:nSets) {
if (set==1) {
plot(powerTables[[set]]$data[,1], -sign(powerTables[[set]]$data[,3])*powerTables[[set]]$data[,2],
xlab="Soft Threshold (power)", ylab=colNames[col],type="n", ylim = ylim[, col],
main = colNames[col]);
addGrid();
}
if (col==1) {
text(powerTables[[set]]$data[,1], -sign(powerTables[[set]]$data[,3])*powerTables[[set]]$data[,2],
labels=powers, cex=cex1, col=colors[set])
} else {
text(powerTables[[set]]$data[,1], powerTables[[set]]$data[,plotCols[col]],
labels=powers,cex=cex1,col=colors[set])
}
if (col==1) {
legend("bottomright", legend = setLabels, col = colors, pch = 20)
} else {
legend("topright", legend = setLabels, col = colors, pch = 20)
}
}
dev.off()
#network construction
bnet = blockwiseConsensusModules(multiExpr, maxBlockSize = 20000,
power = 14, TOMType = "unsigned", minModuleSize = 30, deepSplit = 2, pamRespectsDendro = FALSE,
reassignThreshold = 0, mergeCutHeight = 0.25,
numericLabels = TRUE,
saveTOMs = TRUE,
verbose = 5)
#see the result
consMEs = bnet$multiMEs;
moduleLabels = bnet$colors;
# Convert the numeric labels to color labels
moduleColors = labels2colors(moduleLabels)
consTree = bnet$dendrograms[[1]];
bwLabels = matchLabels(bnet$colors, moduleLabels, pThreshold = 1e-7);
bwColors = labels2colors(bwLabels)
table(bwLabels)
bwLabels
#plot the dendrogram and module color
# Here we show a more flexible way of plotting several trees and colors on one page
sizeGrWindow(12,6)
#pdf(file = "Plots/BlockwiseGeneDendrosAndColors.pdf", wi = 12, he = 6);
# Use the layout function for more involved screen sectioning
layout(matrix(c(1:4), 2, 2), heights = c(0.8, 0.2), widths = c(1,1))
#layout.show(4);
nBlocks = length(bnet$dendrograms)
# Plot the dendrogram and the module colors underneath for each block
for (block in 1:nBlocks) {
plotDendroAndColors(bnet$dendrograms[[block]], moduleColors[bnet$blockGenes[[block]]],
"Module colors",
main = paste("Gene dendrogram and module colors in block", block),
dendroLabels = FALSE, hang = 0.03,
addGuide = TRUE, guideHang = 0.05,
setLayout = FALSE)
}
dev.off()
|
.julia <- new.env(parent = emptyenv())
.julia$initialized <- FALSE
julia <- new.env(parent = .julia)
julia_locate <- function(JULIA_HOME = NULL){
if (is.null(JULIA_HOME)) {
JULIA_HOME <- getOption("JULIA_HOME")
}
if (is.null(JULIA_HOME)) {
JULIA_HOME <- if (Sys.getenv("JULIA_HOME") == "") {
NULL
} else{
Sys.getenv("JULIA_HOME")
}
}
if (is.null(JULIA_HOME)) {
## In macOS, the environment variables, e.g., PATH of a GUI is set by launchctl not the SHELL.
## You may need to do bash -l -c "which julia" to determine the path to julia.
## This fixes the issue that in macOS, R.app GUI cannot find julia.
## Thank @randy3k
julia_bin <- Sys.which("julia")
if (julia_bin == "") {
if (.Platform$OS.type == "unix") {
julia_bin <- system2("bash", "-l -c 'which julia'", stdout = TRUE)[1]
} else {
julia_bin <- "julia"
}
}
tryCatch(system2(julia_bin, "-E \"try println(JULIA_HOME) catch e println(Sys.BINDIR) end;\"", stdout = TRUE)[1],
warning = function(war) {},
error = function(err) NULL)
}
else {
tryCatch(system2(file.path(JULIA_HOME, "julia"),
"-E \"try println(JULIA_HOME) catch e println(Sys.BINDIR) end;\"", stdout = TRUE)[1],
warning = function(war) {},
error = function(err) NULL)
}
}
## This function exists because of issue # 14577
## <https://github.com/JuliaLang/julia/issues/14577> in julia v0.6.0,
## which is fixed now.
## We need to call julia from the command line to precompile packages.
## It is currently used in julia_setup in zzz.R and julia_library in package.R
julia_line <- function(command, ...){
system2(file.path(.julia$bin_dir, "julia"), shQuote(command), ...)
}
newer <- function(x, y){
x <- substring(x, 1, 5)
y <- substring(y, 1, 5)
utils::compareVersion(x, y) >= 0
}
| /R/aaa.R | permissive | alexewd/JuliaCall | R | false | false | 2,042 | r | .julia <- new.env(parent = emptyenv())
.julia$initialized <- FALSE
julia <- new.env(parent = .julia)
julia_locate <- function(JULIA_HOME = NULL){
if (is.null(JULIA_HOME)) {
JULIA_HOME <- getOption("JULIA_HOME")
}
if (is.null(JULIA_HOME)) {
JULIA_HOME <- if (Sys.getenv("JULIA_HOME") == "") {
NULL
} else{
Sys.getenv("JULIA_HOME")
}
}
if (is.null(JULIA_HOME)) {
## In macOS, the environment variables, e.g., PATH of a GUI is set by launchctl not the SHELL.
## You may need to do bash -l -c "which julia" to determine the path to julia.
## This fixes the issue that in macOS, R.app GUI cannot find julia.
## Thank @randy3k
julia_bin <- Sys.which("julia")
if (julia_bin == "") {
if (.Platform$OS.type == "unix") {
julia_bin <- system2("bash", "-l -c 'which julia'", stdout = TRUE)[1]
} else {
julia_bin <- "julia"
}
}
tryCatch(system2(julia_bin, "-E \"try println(JULIA_HOME) catch e println(Sys.BINDIR) end;\"", stdout = TRUE)[1],
warning = function(war) {},
error = function(err) NULL)
}
else {
tryCatch(system2(file.path(JULIA_HOME, "julia"),
"-E \"try println(JULIA_HOME) catch e println(Sys.BINDIR) end;\"", stdout = TRUE)[1],
warning = function(war) {},
error = function(err) NULL)
}
}
## This function exists because of issue # 14577
## <https://github.com/JuliaLang/julia/issues/14577> in julia v0.6.0,
## which is fixed now.
## We need to call julia from the command line to precompile packages.
## It is currently used in julia_setup in zzz.R and julia_library in package.R
julia_line <- function(command, ...){
system2(file.path(.julia$bin_dir, "julia"), shQuote(command), ...)
}
newer <- function(x, y){
x <- substring(x, 1, 5)
y <- substring(y, 1, 5)
utils::compareVersion(x, y) >= 0
}
|
BestSlope = function(x, y, adm="Extravascular", TOL=1e-4)
{
# Author: Kyun-Seop Bae k@acr.kr
# Last modification: 2017.7.19
# Called by : sNCA
# Calls : Slope, UT
# INPUT
# x: time or similar vector
# y: concentration or similar vector
# adm: method of drug administration "Bolus", "Infusion", or "Extravascular"
# TOL: Tolerance, see Phoneix WinNonlin 6.4 User's Guide p33
# RETURNS
Result = c(R2 = NA, # R square
R2ADJ = NA, # R square adjusted
LAMZNPT = 0, # Number of points for Lambda z
LAMZ = NA, # Lambda z, terminal slope as a positive number
b0 = NA, # intercept from OLS, i.e. simple linear regression
CORRXY = NA, # Correlation of x, y
LAMZLL = NA, # Lower time for lambda z
LAMZUL = NA, # Upper time for lambda z
CLSTP = NA) # Concentration last predicted in original scale
# Input Check
n = length(x)
if (n != length(y) | !is.numeric(x) | !is.numeric(y) | length(y[y < 0]) > 0) {
Result["LAMZNPT"] = 0
return(Result)
}
if (length(unique(y)) == 1) { # Case of all the same values
Result["LAMZNPT"] = 0
Result["b0"] = unique(y)
return(Result)
}
if (UT(adm) == "BOLUS") {
locStart = which.max(y) # From Tmax (for Bolus)
} else {
locStart = which.max(y) + 1 # From next to Tmax (for the others)
}
locLast = max(which(y > 0)) # Till non-zero concentration
if (locLast - locStart < 2) { # Too few to fit, if this is 2, R2ADJ becomes NaN.
Result["LAMZNPT"] = 0
return(Result)
}
tmpMat = matrix(nrow=(locLast - locStart - 1), ncol=length(Result))
colnames(tmpMat) = names(Result)
for (i in locStart:(locLast - 2)) {
tmpMat[i - locStart + 1,] = Slope(x[i:locLast], log(y[i:locLast]))
}
tmpMat = tmpMat[tmpMat[,"LAMZNPT"] > 2,,drop=FALSE]
if (is.matrix(tmpMat) & nrow(tmpMat) > 0) {
maxAdjRsq = max(tmpMat[,"R2ADJ"])
OKs = ifelse(abs(maxAdjRsq - tmpMat[,"R2ADJ"]) < TOL, TRUE, FALSE)
nMax = max(tmpMat[OKs,"LAMZNPT"])
Result = tmpMat[OKs & tmpMat[,"LAMZNPT"]==nMax,]
} else {
Result["LAMZNPT"] = 0
}
return(Result)
}
| /R/BestSlope.R | no_license | asancpt/pkr | R | false | false | 2,228 | r | BestSlope = function(x, y, adm="Extravascular", TOL=1e-4)
{
# Author: Kyun-Seop Bae k@acr.kr
# Last modification: 2017.7.19
# Called by : sNCA
# Calls : Slope, UT
# INPUT
# x: time or similar vector
# y: concentration or similar vector
# adm: method of drug administration "Bolus", "Infusion", or "Extravascular"
# TOL: Tolerance, see Phoneix WinNonlin 6.4 User's Guide p33
# RETURNS
Result = c(R2 = NA, # R square
R2ADJ = NA, # R square adjusted
LAMZNPT = 0, # Number of points for Lambda z
LAMZ = NA, # Lambda z, terminal slope as a positive number
b0 = NA, # intercept from OLS, i.e. simple linear regression
CORRXY = NA, # Correlation of x, y
LAMZLL = NA, # Lower time for lambda z
LAMZUL = NA, # Upper time for lambda z
CLSTP = NA) # Concentration last predicted in original scale
# Input Check
n = length(x)
if (n != length(y) | !is.numeric(x) | !is.numeric(y) | length(y[y < 0]) > 0) {
Result["LAMZNPT"] = 0
return(Result)
}
if (length(unique(y)) == 1) { # Case of all the same values
Result["LAMZNPT"] = 0
Result["b0"] = unique(y)
return(Result)
}
if (UT(adm) == "BOLUS") {
locStart = which.max(y) # From Tmax (for Bolus)
} else {
locStart = which.max(y) + 1 # From next to Tmax (for the others)
}
locLast = max(which(y > 0)) # Till non-zero concentration
if (locLast - locStart < 2) { # Too few to fit, if this is 2, R2ADJ becomes NaN.
Result["LAMZNPT"] = 0
return(Result)
}
tmpMat = matrix(nrow=(locLast - locStart - 1), ncol=length(Result))
colnames(tmpMat) = names(Result)
for (i in locStart:(locLast - 2)) {
tmpMat[i - locStart + 1,] = Slope(x[i:locLast], log(y[i:locLast]))
}
tmpMat = tmpMat[tmpMat[,"LAMZNPT"] > 2,,drop=FALSE]
if (is.matrix(tmpMat) & nrow(tmpMat) > 0) {
maxAdjRsq = max(tmpMat[,"R2ADJ"])
OKs = ifelse(abs(maxAdjRsq - tmpMat[,"R2ADJ"]) < TOL, TRUE, FALSE)
nMax = max(tmpMat[OKs,"LAMZNPT"])
Result = tmpMat[OKs & tmpMat[,"LAMZNPT"]==nMax,]
} else {
Result["LAMZNPT"] = 0
}
return(Result)
}
|
dev.off() # Clear the graph window
cat('\014') # Clear the console
rm(list=ls()) # Clear all user objects from the environment
# Import dataset
# Set working directory, change it to your work folder
setwd("~/Google Drive/ADS Master/2019 Fall/IST687/Final_Project")
# Adding jsonlite to our R library
library(jsonlite)
# Creating a dataframe from the json data.
df <- jsonlite::fromJSON("fall2019-survey-M03.json")
# Structure of the dataset
str(df)
# Summary of the dataset
summary(df)
# Viewing the dataset
View(df)
# Text Mining -- Sentiment Analysis
install.packages("tm")
library(tm)
install.packages("quanteda")
library(quanteda)
install.packages("tidyverse")
library(tidyverse)
install.packages("sentimentr")
library(sentimentr)
# Import diictories for positive and negative words
posWords <- scan("positive-words.txt",character(0),sep = "\n")
posWords <- posWords[-1:-34]
negWords <- scan("negative-words.txt",character(0),sep = "\n")
negWords <- negWords[-1:-34]
# Get the text from the dataset and create a word corpus
dfText <- df$freeText[-which(is.na(df$freeText))]
words.vec <- VectorSource(dfText)
words.corpus <- Corpus(words.vec)
words.corpus <- tm_map(words.corpus, content_transformer(tolower))
words.corpus <- tm_map(words.corpus, removePunctuation)
words.corpus <- tm_map(words.corpus, removeNumbers)
words.corpus <- tm_map(words.corpus, removeWords, stopwords("english"))
# Create the term-document matrix
tdm <- TermDocumentMatrix(words.corpus)
inspect(tdm)
# Count positive and negative word numbers
m <- as.matrix(tdm)
wordCounts <- rowSums(m)
wordCounts <- sort(wordCounts, decreasing = TRUE)
matchedP <- match(names(wordCounts), posWords, nomatch = 0)
matchedN <- match(names(wordCounts), negWords, nomatch = 0)
wordsP <- wordCounts[which(matchedP != 0)]
wordsN <- wordCounts[which(matchedN != 0)]
# Calculate the percentage of positive and negative words
totWordsNum <- sum(wordCounts)
posWordsNum <- sum(wordsP)
ratioP <- posWordsNum/totWordsNum
ratioP
negWordsNum <- sum(wordsN)
ratioN <- negWordsNum/totWordsNum
ratioN
# Analysis for each record:
sentence_freetext <- get_sentences(df$freeText) # TO get sentences from the string in each freeText
sentence_freetext
sentiment_for_all <- sentence_freetext %>% sentiment_by(by = NULL) # to get average sentiment for the text
View(sentiment_for_all)
sentence_freetext %>% sentiment_by(by = NULL) %>% highlight() # html file highlighting the text into 2 colors based on sentiment and the avg sentiment for that particular text
# Word Cloud:
toParas <- Corpus(words.vec)
toParas <- corpus(toParas)
paras <- corpus_reshape(toParas, to="paragraphs")
dfTextClean <- dfm(paras, stem=TRUE, remove_punct=TRUE, remove=stopwords("english"))
dfTextClean <- dfm_trim(dfTextClean, min_termfreq=3)
textplot_wordcloud(dfTextClean, color = rainbow(8))
| /SEAirlines_TextMining.R | no_license | ssingh56/customer_churn_prediction | R | false | false | 2,918 | r | dev.off() # Clear the graph window
cat('\014') # Clear the console
rm(list=ls()) # Clear all user objects from the environment
# Import dataset
# Set working directory, change it to your work folder
setwd("~/Google Drive/ADS Master/2019 Fall/IST687/Final_Project")
# Adding jsonlite to our R library
library(jsonlite)
# Creating a dataframe from the json data.
df <- jsonlite::fromJSON("fall2019-survey-M03.json")
# Structure of the dataset
str(df)
# Summary of the dataset
summary(df)
# Viewing the dataset
View(df)
# Text Mining -- Sentiment Analysis
install.packages("tm")
library(tm)
install.packages("quanteda")
library(quanteda)
install.packages("tidyverse")
library(tidyverse)
install.packages("sentimentr")
library(sentimentr)
# Import diictories for positive and negative words
posWords <- scan("positive-words.txt",character(0),sep = "\n")
posWords <- posWords[-1:-34]
negWords <- scan("negative-words.txt",character(0),sep = "\n")
negWords <- negWords[-1:-34]
# Get the text from the dataset and create a word corpus
dfText <- df$freeText[-which(is.na(df$freeText))]
words.vec <- VectorSource(dfText)
words.corpus <- Corpus(words.vec)
words.corpus <- tm_map(words.corpus, content_transformer(tolower))
words.corpus <- tm_map(words.corpus, removePunctuation)
words.corpus <- tm_map(words.corpus, removeNumbers)
words.corpus <- tm_map(words.corpus, removeWords, stopwords("english"))
# Create the term-document matrix
tdm <- TermDocumentMatrix(words.corpus)
inspect(tdm)
# Count positive and negative word numbers
m <- as.matrix(tdm)
wordCounts <- rowSums(m)
wordCounts <- sort(wordCounts, decreasing = TRUE)
matchedP <- match(names(wordCounts), posWords, nomatch = 0)
matchedN <- match(names(wordCounts), negWords, nomatch = 0)
wordsP <- wordCounts[which(matchedP != 0)]
wordsN <- wordCounts[which(matchedN != 0)]
# Calculate the percentage of positive and negative words
totWordsNum <- sum(wordCounts)
posWordsNum <- sum(wordsP)
ratioP <- posWordsNum/totWordsNum
ratioP
negWordsNum <- sum(wordsN)
ratioN <- negWordsNum/totWordsNum
ratioN
# Analysis for each record:
sentence_freetext <- get_sentences(df$freeText) # TO get sentences from the string in each freeText
sentence_freetext
sentiment_for_all <- sentence_freetext %>% sentiment_by(by = NULL) # to get average sentiment for the text
View(sentiment_for_all)
sentence_freetext %>% sentiment_by(by = NULL) %>% highlight() # html file highlighting the text into 2 colors based on sentiment and the avg sentiment for that particular text
# Word Cloud:
toParas <- Corpus(words.vec)
toParas <- corpus(toParas)
paras <- corpus_reshape(toParas, to="paragraphs")
dfTextClean <- dfm(paras, stem=TRUE, remove_punct=TRUE, remove=stopwords("english"))
dfTextClean <- dfm_trim(dfTextClean, min_termfreq=3)
textplot_wordcloud(dfTextClean, color = rainbow(8))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{zero.bounded.density}
\alias{zero.bounded.density}
\title{Zero Bounded Density}
\usage{
zero.bounded.density(x, bw = "SJ", n = 1001)
}
\arguments{
\item{x}{data, as a numeric vector}
\item{bw}{The smoothing bandwidth to be used. See 'bw.nrd'}
\item{n}{number of points to use in kernel density estimate. See \code{\link[stats]{density}}}
}
\value{
data frame with back-transformed log density estimate
}
\description{
Zero bounded density using log density transform
}
\details{
Provides a zero bounded density estimate of a parameter.
Kernel Density Estimation used by the \code{\link[stats]{density}} function will cause problems
at the left hand end because it will put some weight on negative values.
One useful approach is to transform to logs, estimate the density using KDE, and then transform back.
}
\references{
M. P. Wand, J. S. Marron and D. Ruppert, 1991. Transformations in Density Estimation. Journal of the American Statistical Association. 86(414):343-353 \url{http://www.jstor.org/stable/2290569}
}
\author{
\href{http://stats.stackexchange.com/q/6588/2750}{Rob Hyndman}
}
| /base/utils/man/zero.bounded.density.Rd | permissive | ashiklom/pecan | R | false | true | 1,185 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{zero.bounded.density}
\alias{zero.bounded.density}
\title{Zero Bounded Density}
\usage{
zero.bounded.density(x, bw = "SJ", n = 1001)
}
\arguments{
\item{x}{data, as a numeric vector}
\item{bw}{The smoothing bandwidth to be used. See 'bw.nrd'}
\item{n}{number of points to use in kernel density estimate. See \code{\link[stats]{density}}}
}
\value{
data frame with back-transformed log density estimate
}
\description{
Zero bounded density using log density transform
}
\details{
Provides a zero bounded density estimate of a parameter.
Kernel Density Estimation used by the \code{\link[stats]{density}} function will cause problems
at the left hand end because it will put some weight on negative values.
One useful approach is to transform to logs, estimate the density using KDE, and then transform back.
}
\references{
M. P. Wand, J. S. Marron and D. Ruppert, 1991. Transformations in Density Estimation. Journal of the American Statistical Association. 86(414):343-353 \url{http://www.jstor.org/stable/2290569}
}
\author{
\href{http://stats.stackexchange.com/q/6588/2750}{Rob Hyndman}
}
|
library(janitor)
library(tidyverse)
library(lmerTest)
library(emmeans)
library(openxlsx)
# Module definition, new method
anova_UI <- function(id) {
ns <- NS(id)
tagList(
fluidRow(
column(10,
wellPanel(
h4("Data inputs"),
fluidRow( column(6,
fileInput(ns("anova_input_file"),
"Choose your file containing the outcome variable",
accept = c(".xlsx",
".xls"))),
column(6,
fileInput(ns("anova_input_group"),
"Choose your file containing the groups",
accept = c(".xlsx",
".xls")))),
h4("Select a group variable to perform ANOVA test"),
uiOutput(ns("select_group")),
h4("Diagnostic plots"),
tabsetPanel(
tabPanel("Box plot", plotOutput(ns("bar_plot"))),
tabPanel("Normality check", plotOutput(ns("normcheck")))
),
h4("ANOVA results"),
tabsetPanel(
tabPanel("ANOVA table",
tableOutput(ns("anova_tab"))),
tabPanel("Estimated marginal means",
tableOutput(ns("group_means"))),
tabPanel("Tukey Pairwise comparison",
tableOutput(ns("pairwise_compare")))
)
)
) # wellPanel wrapper
) # column wrapper
) # fluidRow wrapper
}
anova_Server <- function(id) {
moduleServer(
id,
function(input, output, session) {
ns <- NS(id)
# Data from the the report ----------------------------------------------
anova_data <-
reactive({
file <- input$anova_input_file
req(file)
read.xlsx(file$datapath, sheet = "Person Results")
})
# User input data containing the groups to compare -----------------------
group_data <-
reactive({
file <- input$anova_input_group
req(file)
read.xlsx(file$datapath, sheet = 1)
})
# Create drop-down menu for group comparison -------------------------------
output$select_group <-
renderUI({
group_dat <-
group_data() %>% clean_names()
selectInput(ns("category"), "", names(group_dat)[-1])
})
# Get the final data -----------------------------------------------------
final_dat <- reactive({
anova_dat <- anova_data() %>%
clean_names() %>%
select(participant_id, ability_theta)
group_dat <- group_data()%>%
clean_names() %>%
select(participant_id, matches(req(input$category)))
names(group_dat) <-
gsub(input$category, "Group", names(group_dat))
final_dat <-
anova_dat %>% left_join(group_dat)
final_dat
})
# Boxplot ----------------------------------------------------------------
output$bar_plot <- renderPlot({
ggplot(final_dat(), aes(x = Group, y = ability_theta)) +
geom_boxplot() +
xlab(req(input$category)) +
ylab("Ability measure") +
theme_classic()
})
# Fitting the ANOVA model ------------------------------------------------
fit_anova <-
reactive({
lm(ability_theta ~ Group,
data = final_dat())
})
# Testing for normality plot ---------------------------------------------
output$normcheck <- renderPlot({
s20x::normcheck(resid(fit_anova()), s = TRUE)
})
# ANOVA results ----------------------------------------------------------
output$anova_tab <- renderTable({
tab <- anova(fit_anova()) %>% as.data.frame()
tab
}, rownames = TRUE, digits = 4, hover = TRUE, striped = TRUE,
bordered = TRUE)
# Estimated marginal means -----------------------------------------------
output$group_means <- renderTable({
tab <-
emmeans(fit_anova(), ~ Group) %>%
as.data.frame()
tab
}, digits = 4, hover = TRUE, striped = TRUE, bordered = TRUE)
## Tukey Honest Significant Differences ----------------------------------
output$pairwise_compare <- renderTable({
tab <-
pairs(emmeans(fit_anova(), ~ Group),
adjust = "none") %>%
rbind(adjust = "tukey") %>%
as.data.frame()
tab
}, digits = 4, hover = TRUE, striped = TRUE,bordered = TRUE)
}
)
}
| /R/anova_module.R | no_license | kcha193/anova_test | R | false | false | 4,387 | r |
library(janitor)
library(tidyverse)
library(lmerTest)
library(emmeans)
library(openxlsx)
# Module definition, new method
anova_UI <- function(id) {
ns <- NS(id)
tagList(
fluidRow(
column(10,
wellPanel(
h4("Data inputs"),
fluidRow( column(6,
fileInput(ns("anova_input_file"),
"Choose your file containing the outcome variable",
accept = c(".xlsx",
".xls"))),
column(6,
fileInput(ns("anova_input_group"),
"Choose your file containing the groups",
accept = c(".xlsx",
".xls")))),
h4("Select a group variable to perform ANOVA test"),
uiOutput(ns("select_group")),
h4("Diagnostic plots"),
tabsetPanel(
tabPanel("Box plot", plotOutput(ns("bar_plot"))),
tabPanel("Normality check", plotOutput(ns("normcheck")))
),
h4("ANOVA results"),
tabsetPanel(
tabPanel("ANOVA table",
tableOutput(ns("anova_tab"))),
tabPanel("Estimated marginal means",
tableOutput(ns("group_means"))),
tabPanel("Tukey Pairwise comparison",
tableOutput(ns("pairwise_compare")))
)
)
) # wellPanel wrapper
) # column wrapper
) # fluidRow wrapper
}
anova_Server <- function(id) {
moduleServer(
id,
function(input, output, session) {
ns <- NS(id)
# Data from the the report ----------------------------------------------
anova_data <-
reactive({
file <- input$anova_input_file
req(file)
read.xlsx(file$datapath, sheet = "Person Results")
})
# User input data containing the groups to compare -----------------------
group_data <-
reactive({
file <- input$anova_input_group
req(file)
read.xlsx(file$datapath, sheet = 1)
})
# Create drop-down menu for group comparison -------------------------------
output$select_group <-
renderUI({
group_dat <-
group_data() %>% clean_names()
selectInput(ns("category"), "", names(group_dat)[-1])
})
# Get the final data -----------------------------------------------------
final_dat <- reactive({
anova_dat <- anova_data() %>%
clean_names() %>%
select(participant_id, ability_theta)
group_dat <- group_data()%>%
clean_names() %>%
select(participant_id, matches(req(input$category)))
names(group_dat) <-
gsub(input$category, "Group", names(group_dat))
final_dat <-
anova_dat %>% left_join(group_dat)
final_dat
})
# Boxplot ----------------------------------------------------------------
output$bar_plot <- renderPlot({
ggplot(final_dat(), aes(x = Group, y = ability_theta)) +
geom_boxplot() +
xlab(req(input$category)) +
ylab("Ability measure") +
theme_classic()
})
# Fitting the ANOVA model ------------------------------------------------
fit_anova <-
reactive({
lm(ability_theta ~ Group,
data = final_dat())
})
# Testing for normality plot ---------------------------------------------
output$normcheck <- renderPlot({
s20x::normcheck(resid(fit_anova()), s = TRUE)
})
# ANOVA results ----------------------------------------------------------
output$anova_tab <- renderTable({
tab <- anova(fit_anova()) %>% as.data.frame()
tab
}, rownames = TRUE, digits = 4, hover = TRUE, striped = TRUE,
bordered = TRUE)
# Estimated marginal means -----------------------------------------------
output$group_means <- renderTable({
tab <-
emmeans(fit_anova(), ~ Group) %>%
as.data.frame()
tab
}, digits = 4, hover = TRUE, striped = TRUE, bordered = TRUE)
## Tukey Honest Significant Differences ----------------------------------
output$pairwise_compare <- renderTable({
tab <-
pairs(emmeans(fit_anova(), ~ Group),
adjust = "none") %>%
rbind(adjust = "tukey") %>%
as.data.frame()
tab
}, digits = 4, hover = TRUE, striped = TRUE,bordered = TRUE)
}
)
}
|
plotcomp <-
function(xp, groups, y, alpha, col=rainbow(length(groupsun)),
xlim, ylim, ...) {
if(missing(xlim)) xlim <- range(xp[,1])
if(missing(ylim)) ylim <- range(xp[,2])
groupsun <- levels(groups)
if(missing(col)) {
col <- rainbow(length(groupsun))
colconv <- rainbow(length(groupsun), alpha=alpha)
}
else {
colconv <- col
}
xpsub <- xp[groups==groupsun[1],]
if(!missing(y)) {
yun <- levels(y)
pchall <- as.character(as.numeric(as.numeric(y==yun[2])) + 1)
plot(xpsub, xlim=xlim, ylim=ylim, col=col[1], cex=0.7, pch=pchall[groups==groupsun[1]], ...)
}
else
plot(xpsub, xlim=xlim, ylim=ylim, col=col[1], pch=20, ...)
dens2 <- MASS::kde2d(xpsub[,1], xpsub[,2], lims=c(min(xpsub[,1])-sd(xpsub[,1]), max(xpsub[,1])+sd(xpsub[,1]), min(xpsub[,2])-
sd(xpsub[,2]), max(xpsub[,2])+sd(xpsub[,2])))
contour(dens2, add=TRUE, col=colconv[1], nlevels=5)
for(i in 2:length(groupsun)) {
xpsub <- xp[groups==groupsun[i],]
if(!missing(y))
points(xpsub[,1], xpsub[,2], col=col[i], cex=0.7, pch=pchall[groups==groupsun[i]])
else
points(xpsub[,1], xpsub[,2], col=col[i], pch=20)
dens2 <- MASS::kde2d(xpsub[,1], xpsub[,2], lims=c(min(xpsub[,1])-sd(xpsub[,1]), max(xpsub[,1])+sd(xpsub[,1]), min(xpsub[,2])-
sd(xpsub[,2]), max(xpsub[,2])+sd(xpsub[,2])))
contour(dens2, add=TRUE, col=colconv[i], nlevels=5)
}
return(list(col=col))
}
| /R/plotcomp.R | no_license | cran/bapred | R | false | false | 1,466 | r | plotcomp <-
function(xp, groups, y, alpha, col=rainbow(length(groupsun)),
xlim, ylim, ...) {
if(missing(xlim)) xlim <- range(xp[,1])
if(missing(ylim)) ylim <- range(xp[,2])
groupsun <- levels(groups)
if(missing(col)) {
col <- rainbow(length(groupsun))
colconv <- rainbow(length(groupsun), alpha=alpha)
}
else {
colconv <- col
}
xpsub <- xp[groups==groupsun[1],]
if(!missing(y)) {
yun <- levels(y)
pchall <- as.character(as.numeric(as.numeric(y==yun[2])) + 1)
plot(xpsub, xlim=xlim, ylim=ylim, col=col[1], cex=0.7, pch=pchall[groups==groupsun[1]], ...)
}
else
plot(xpsub, xlim=xlim, ylim=ylim, col=col[1], pch=20, ...)
dens2 <- MASS::kde2d(xpsub[,1], xpsub[,2], lims=c(min(xpsub[,1])-sd(xpsub[,1]), max(xpsub[,1])+sd(xpsub[,1]), min(xpsub[,2])-
sd(xpsub[,2]), max(xpsub[,2])+sd(xpsub[,2])))
contour(dens2, add=TRUE, col=colconv[1], nlevels=5)
for(i in 2:length(groupsun)) {
xpsub <- xp[groups==groupsun[i],]
if(!missing(y))
points(xpsub[,1], xpsub[,2], col=col[i], cex=0.7, pch=pchall[groups==groupsun[i]])
else
points(xpsub[,1], xpsub[,2], col=col[i], pch=20)
dens2 <- MASS::kde2d(xpsub[,1], xpsub[,2], lims=c(min(xpsub[,1])-sd(xpsub[,1]), max(xpsub[,1])+sd(xpsub[,1]), min(xpsub[,2])-
sd(xpsub[,2]), max(xpsub[,2])+sd(xpsub[,2])))
contour(dens2, add=TRUE, col=colconv[i], nlevels=5)
}
return(list(col=col))
}
|
# Martin Holdrege
# Script started May 26, 2021
# Purpose of this code is to take the data compiled in
# '04_compile_ChemCorrect_output.R' and calculate proportional water
# uptake
# Next steps--filter out cool samples with a negative slope (i.e. those)
# that shouldn't be hot, but have memory.
# dependencies ------------------------------------------------------------
library(tidyverse)
# read in data ------------------------------------------------------------
# compiled chemcorrect output
cc1 <- read_csv("data-processed/hw_combined_cc_output.csv")
# compiled uncorrected data (for determining which samples are 'good')
raw1 <- read_csv("data-processed/hw_combined_picarro_output.csv") %>%
janitor::clean_names()
# parsing -----------------------------------------------------------------
cc2 <- cc1 %>%
# 2 sampling events
mutate(sample_event = ifelse(lubridate::month(date_inject) == 5,
1 , 2),
run = str_replace(run, "_\\d$", ""))
# categorizing good/bad samples ------------------------------------------
# calculate slope
calc_slope <- function(y) {
if(sum(!is.na(y)) < 2) {
return(NA_real_)
}
x <- 1:length(y)
mod <- lm(y ~ x)
mod$coefficients[2]
}
# creating a lookup vector, to convert line number into
# the unique sampling occasion. I.e. this is the nth vial, to be sampled
n_lines <- 694
vial_inj_lookup <- vector(mode = "numeric", length = n_lines)
# first vial is measured 10 times
vial_inj_lookup[1:10] <- 1
# remaining samples are measured 6 times
vial_inj_lookup[11:n_lines] <- 0:(n_lines-11) %/% 6 + 2
names(vial_inj_lookup) <- 1:n_lines
raw_means1 <- raw1 %>%
# vial number (i.e. consecutive numbering that vials were actually measured,
# so have an identifier to tell what the 'previous' vial was, to measure
# memory of hot samples)
mutate(vial_nr = vial_inj_lookup[line]) %>%
# only keep last 3 samples
filter(inj_nr > 3) %>%
group_by(run, port, vial_nr, identifier_1, identifier_2) %>%
summarize(slope = calc_slope(y = d_d_h_mean),
d_d_h_sd = sd(d_d_h_mean, na.rm = TRUE),
d_d_h_mean = mean(d_d_h_mean, na.rm = TRUE)) %>%
arrange(run, vial_nr) %>%
group_by(run) %>%
# difference between mean of this sample and the previous sample
mutate(d_h_diff = c(NA, diff(d_d_h_mean)))
# samples are bad if the previous sample was very hot and there
# is a lot of difference between injections
raw_means2 <- raw_means1 %>%
mutate(is_bad = ifelse(d_d_h_sd > 10 & slope < 0 & d_h_diff < -200,
TRUE, FALSE))
sum(raw_means2$is_bad, na.rm = TRUE)
# * removing bad values ---------------------------------------------------
cc3 <- raw_means2 %>%
filter(!(identifier_2 %in% c("Dummy", "Standard", "Tap"))) %>%
select(run, identifier_1, is_bad) %>%
mutate(identifier_1 = as.numeric(identifier_1)) %>%
right_join(cc2, by = c("run", "identifier_1" = "vial_num")) %>%
# excluding samples affected by drift from previous hot sample
filter(!is_bad)
# checking control data ---------------------------------------------------
# data was collected prior to event 1 to make sure no tracer was detected
control <- cc3 %>%
filter(date == "2020-07-12")
control %>%
group_by(depth) %>%
summarize(cal_2h_mean = mean(cal_2h_mean))
hist(control$raw_2h_mean) # some high (contaminated?) values
hist(control$raw_18o_mean)
cc4 <- cc3 %>%
filter(date != "2020-07-12")
| /scripts/05_calc_prop_water_uptake.R | no_license | MartinHoldrege/tracer | R | false | false | 3,481 | r | # Martin Holdrege
# Script started May 26, 2021
# Purpose of this code is to take the data compiled in
# '04_compile_ChemCorrect_output.R' and calculate proportional water
# uptake
# Next steps--filter out cool samples with a negative slope (i.e. those)
# that shouldn't be hot, but have memory.
# dependencies ------------------------------------------------------------
library(tidyverse)
# read in data ------------------------------------------------------------
# compiled chemcorrect output
cc1 <- read_csv("data-processed/hw_combined_cc_output.csv")
# compiled uncorrected data (for determining which samples are 'good')
raw1 <- read_csv("data-processed/hw_combined_picarro_output.csv") %>%
janitor::clean_names()
# parsing -----------------------------------------------------------------
cc2 <- cc1 %>%
# 2 sampling events
mutate(sample_event = ifelse(lubridate::month(date_inject) == 5,
1 , 2),
run = str_replace(run, "_\\d$", ""))
# categorizing good/bad samples ------------------------------------------
# calculate slope
calc_slope <- function(y) {
if(sum(!is.na(y)) < 2) {
return(NA_real_)
}
x <- 1:length(y)
mod <- lm(y ~ x)
mod$coefficients[2]
}
# creating a lookup vector, to convert line number into
# the unique sampling occasion. I.e. this is the nth vial, to be sampled
n_lines <- 694
vial_inj_lookup <- vector(mode = "numeric", length = n_lines)
# first vial is measured 10 times
vial_inj_lookup[1:10] <- 1
# remaining samples are measured 6 times
vial_inj_lookup[11:n_lines] <- 0:(n_lines-11) %/% 6 + 2
names(vial_inj_lookup) <- 1:n_lines
raw_means1 <- raw1 %>%
# vial number (i.e. consecutive numbering that vials were actually measured,
# so have an identifier to tell what the 'previous' vial was, to measure
# memory of hot samples)
mutate(vial_nr = vial_inj_lookup[line]) %>%
# only keep last 3 samples
filter(inj_nr > 3) %>%
group_by(run, port, vial_nr, identifier_1, identifier_2) %>%
summarize(slope = calc_slope(y = d_d_h_mean),
d_d_h_sd = sd(d_d_h_mean, na.rm = TRUE),
d_d_h_mean = mean(d_d_h_mean, na.rm = TRUE)) %>%
arrange(run, vial_nr) %>%
group_by(run) %>%
# difference between mean of this sample and the previous sample
mutate(d_h_diff = c(NA, diff(d_d_h_mean)))
# samples are bad if the previous sample was very hot and there
# is a lot of difference between injections
raw_means2 <- raw_means1 %>%
mutate(is_bad = ifelse(d_d_h_sd > 10 & slope < 0 & d_h_diff < -200,
TRUE, FALSE))
sum(raw_means2$is_bad, na.rm = TRUE)
# * removing bad values ---------------------------------------------------
cc3 <- raw_means2 %>%
filter(!(identifier_2 %in% c("Dummy", "Standard", "Tap"))) %>%
select(run, identifier_1, is_bad) %>%
mutate(identifier_1 = as.numeric(identifier_1)) %>%
right_join(cc2, by = c("run", "identifier_1" = "vial_num")) %>%
# excluding samples affected by drift from previous hot sample
filter(!is_bad)
# checking control data ---------------------------------------------------
# data was collected prior to event 1 to make sure no tracer was detected
control <- cc3 %>%
filter(date == "2020-07-12")
control %>%
group_by(depth) %>%
summarize(cal_2h_mean = mean(cal_2h_mean))
hist(control$raw_2h_mean) # some high (contaminated?) values
hist(control$raw_18o_mean)
cc4 <- cc3 %>%
filter(date != "2020-07-12")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/joins.R
\name{shape_for_route}
\alias{shape_for_route}
\title{Get a set of shapes for a route}
\usage{
shape_for_route(g1, select_route_id, select_service_id)
}
\arguments{
\item{a}{dataframe output by join_mega_and_hf_routes()}
\item{route_id}{the id of the route}
\item{service_id}{the service for which to get stops}
}
\value{
shapes for a route
}
\description{
Get a set of shapes for a route
}
\keyword{internal}
| /man/shape_for_route.Rd | no_license | r-transit/trread | R | false | true | 498 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/joins.R
\name{shape_for_route}
\alias{shape_for_route}
\title{Get a set of shapes for a route}
\usage{
shape_for_route(g1, select_route_id, select_service_id)
}
\arguments{
\item{a}{dataframe output by join_mega_and_hf_routes()}
\item{route_id}{the id of the route}
\item{service_id}{the service for which to get stops}
}
\value{
shapes for a route
}
\description{
Get a set of shapes for a route
}
\keyword{internal}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/influxdb_explore_schema.R
\name{show_tag_keys}
\alias{show_tag_keys}
\title{show_series}
\usage{
show_tag_keys(con, db, from = NULL)
}
\arguments{
\item{con}{An influx_connection object (s. \code{influx_connection}).}
\item{db}{Sets the target database for the query.}
\item{from}{Query a specific measurement.}
}
\value{
A list of character vectors containing tag keys.
}
\description{
Show tag keys
}
\details{
This function is a convenient wrapper for showing all unique tag keys
associated with each measurement by calling
\code{influx_query} with the corresponding query.
The query can include a measurement (\code{from}) and tag key value (\code{where})
conditions, so only certain tag keys are shown.
}
\author{
Dominik Leutnant (\email{leutnant@fh-muenster.de})
}
\references{
\url{https://influxdb.com/docs/v0.9/query_language/schema_exploration.html}
}
\seealso{
\code{\link[influxdbr]{influx_connection}}
}
| /man/show_tag_keys.Rd | no_license | openanalytics/influxdbr | R | false | false | 1,007 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/influxdb_explore_schema.R
\name{show_tag_keys}
\alias{show_tag_keys}
\title{show_series}
\usage{
show_tag_keys(con, db, from = NULL)
}
\arguments{
\item{con}{An influx_connection object (s. \code{influx_connection}).}
\item{db}{Sets the target database for the query.}
\item{from}{Query a specific measurement.}
}
\value{
A list of character vectors containing tag keys.
}
\description{
Show tag keys
}
\details{
This function is a convenient wrapper for showing all unique tag keys
associated with each measurement by calling
\code{influx_query} with the corresponding query.
The query can include a measurement (\code{from}) and tag key value (\code{where})
conditions, so only certain tag keys are shown.
}
\author{
Dominik Leutnant (\email{leutnant@fh-muenster.de})
}
\references{
\url{https://influxdb.com/docs/v0.9/query_language/schema_exploration.html}
}
\seealso{
\code{\link[influxdbr]{influx_connection}}
}
|
.sb_invert <- function(hex_color, dark_color="black", light_color="white",
na_color="white") {
hex_color <- gsub("#", "", hex_color)
R <- suppressWarnings(as.integer(paste("0x", substr(hex_color,1,2), sep="")))
G <- suppressWarnings(as.integer(paste("0x", substr(hex_color,3,4), sep="")))
B <- suppressWarnings(as.integer(paste("0x", substr(hex_color,5,6), sep="")))
YIQ <- ((R*299) + (G*587) + (B*114)) / 1000
return(
ifelse(is.na(YIQ), na_color,
ifelse(
YIQ >= 128, dark_color, light_color)
)
)
}
# sanity checks for country values
validate_countries <- function(country_data, country_col, merge.x, ignore_dupes=FALSE) {
good_country<- country_data[,country_col] %in% country_coords[,merge.x]
if (any(!good_country)) {
invalid <- country_data[,country_col][which(!good_country)]
country_data <- country_data[which(good_country),]
warning("Found invalid country values: ", invalid)
}
if (!ignore_dupes) {
dupes <- duplicated(country_data[,country_col])
if (any(dupes)) {
country_data <- country_data[which(!dupes),]
warning("Removing duplicate country rows")
}
}
return(country_data)
}
"%||%" <- function(a, b) { if (!is.null(a)) a else b }
.pt <- 2.84527559055118
| /R/utils.R | no_license | delabj/AfricaCountryBins | R | false | false | 1,294 | r | .sb_invert <- function(hex_color, dark_color="black", light_color="white",
na_color="white") {
hex_color <- gsub("#", "", hex_color)
R <- suppressWarnings(as.integer(paste("0x", substr(hex_color,1,2), sep="")))
G <- suppressWarnings(as.integer(paste("0x", substr(hex_color,3,4), sep="")))
B <- suppressWarnings(as.integer(paste("0x", substr(hex_color,5,6), sep="")))
YIQ <- ((R*299) + (G*587) + (B*114)) / 1000
return(
ifelse(is.na(YIQ), na_color,
ifelse(
YIQ >= 128, dark_color, light_color)
)
)
}
# sanity checks for country values
validate_countries <- function(country_data, country_col, merge.x, ignore_dupes=FALSE) {
good_country<- country_data[,country_col] %in% country_coords[,merge.x]
if (any(!good_country)) {
invalid <- country_data[,country_col][which(!good_country)]
country_data <- country_data[which(good_country),]
warning("Found invalid country values: ", invalid)
}
if (!ignore_dupes) {
dupes <- duplicated(country_data[,country_col])
if (any(dupes)) {
country_data <- country_data[which(!dupes),]
warning("Removing duplicate country rows")
}
}
return(country_data)
}
"%||%" <- function(a, b) { if (!is.null(a)) a else b }
.pt <- 2.84527559055118
|
\name{Mean}
\alias{Mean}
\title{Mean without NA}
\description{
mean without \code{NA} values.
}
\usage{
Mean(x)
}
\arguments{
\item{x}{a vector of numerics}
}
\details{
It removes \code{NA} in the input vector.
}
\value{
mean value
}
\author{Kyun-Seop Bae k@acr.kr}
| /man/Mean.Rd | no_license | asancpt/sasLM | R | false | false | 296 | rd | \name{Mean}
\alias{Mean}
\title{Mean without NA}
\description{
mean without \code{NA} values.
}
\usage{
Mean(x)
}
\arguments{
\item{x}{a vector of numerics}
}
\details{
It removes \code{NA} in the input vector.
}
\value{
mean value
}
\author{Kyun-Seop Bae k@acr.kr}
|
library(qat)
### Name: qat_plot_noc_rule_1d
### Title: Plot a NOC rule result
### Aliases: qat_plot_noc_rule_1d
### Keywords: ts
### ** Examples
vec <- c(1,2,3,4,4,4,5,5,4,3,NaN,3,2,1)
result <- qat_analyse_noc_rule_1d(vec, 1)
# this example produce a file exampleplot_noc.png in the current directory
qat_plot_noc_rule_1d(result$flagvector, "exampleplot_noc", measurement_vector=vec,
max_return_elements=result$max_return_elements, measurement_name="Result of Check")
| /data/genthat_extracted_code/qat/examples/qat_plot_noc_rule_1d.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 477 | r | library(qat)
### Name: qat_plot_noc_rule_1d
### Title: Plot a NOC rule result
### Aliases: qat_plot_noc_rule_1d
### Keywords: ts
### ** Examples
vec <- c(1,2,3,4,4,4,5,5,4,3,NaN,3,2,1)
result <- qat_analyse_noc_rule_1d(vec, 1)
# this example produce a file exampleplot_noc.png in the current directory
qat_plot_noc_rule_1d(result$flagvector, "exampleplot_noc", measurement_vector=vec,
max_return_elements=result$max_return_elements, measurement_name="Result of Check")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/full_alpha_hex.R
\name{full_alpha_hex}
\alias{full_alpha_hex}
\title{Convert to full alpha hex}
\usage{
full_alpha_hex(color, alpha)
}
\description{
Convert color with transparency to the equivalent color without transparency.
}
\author{
Anthony Reinhard (Twitter: reinhurdler)
}
| /man/full_alpha_hex.Rd | permissive | norment/normentR | R | false | true | 358 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/full_alpha_hex.R
\name{full_alpha_hex}
\alias{full_alpha_hex}
\title{Convert to full alpha hex}
\usage{
full_alpha_hex(color, alpha)
}
\description{
Convert color with transparency to the equivalent color without transparency.
}
\author{
Anthony Reinhard (Twitter: reinhurdler)
}
|
testlist <- list(latLongs = structure(c(-Inf, 4.94065645841247e-324, -Inf, -Inf, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40), .Dim = c(2L, 7L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result) | /MGDrivE/inst/testfiles/calcCos/libFuzzer_calcCos/calcCos_valgrind_files/1612727428-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 388 | r | testlist <- list(latLongs = structure(c(-Inf, 4.94065645841247e-324, -Inf, -Inf, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40, 1.65257130664663e+40), .Dim = c(2L, 7L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result) |
## Yiling Chen
## Created: February 4, 2021
## HW#1
#####################################################################
#1
#####################################################################
rm(list = ls())
#set working directory
##1a~b
setwd("/Users/elenachen/Desktop/DataMiningII/Hw1")
library(ISLR)
summary(College)
View(College)
pairs(College[2:18])
##1c~e
Elite = rep("No",nrow(College))
Elite[College$Top10perc > 50] ="Yes"
Elite = as.factor(Elite)
College = data.frame(College, Elite)
View(College)
table(College$Elite)
#78Elite schools
table(College$Elite,College$Private)
#67/78 Elite schools are private
##1f
EliteYes <- subset(College, College$Elite == 'Yes')
View(EliteYes)
result.meanYes <- mean(EliteYes$Grad.Rate)
print(result.meanYes)
#83.38
EliteNo <- subset(College, College$Elite == "No")
result.meanNo <- mean(EliteNo$Grad.Rate)
print(result.meanNo)
#63.46
#####################################################################
#2
#####################################################################
library(ISLR)
library(corrplot)
library(dplyr)
View(Auto)
#2a~b
y <- c(Auto)
is.na(y)
sapply(Auto, class)
summary(Auto)
continous <- sapply(Auto, is.numeric)
#2c
twoC<-sapply(Auto[,continous], function(x) signif(c(mean(x),sd(x)),2))
rownames(twoC) <- c("Mean","SD")
twoC
#2d
newset <-sapply(Auto[-5:-55,continous], function(x) signif(c(range(x),mean(x),sd(x)),2))
rownames(newset) <- c("Min","Max","Mean","SD")
newset
#2e
df <-Auto
df$origin <- as.character(df$origin)
df[df == "1"] <-"American"
df[df == "2"] <-"European"
df[df == "3"] <-"Japanese"
df = subset(df, select=-c(name))
#View(df)
head(df)
#2f
pairs(Auto[1:7])
auto <- as.matrix(Auto[1:7])
corralation <-cor(auto)
corrplot(corralation,method = "number")
#2g
twog <-sapply(Auto[,continous], function(x) signif(c(range(x),mean(x),sd(x)),2))
rownames(twog) <- c("Min","Max","Mean","SD")
twog
#22low,35med,48high
my_mpg <- rep("low",nrow(Auto))
my_mpg[Auto$mpg >22] = "med"
my_mpg[Auto$mpg >35] = "High"
my_mpg <- as.factor(my_mpg)
new_Auto <-data.frame(Auto,my_mpg)
#View(new_Auto)
#saveRDS(new_Auto,"new_Auto.rds")
| /USML_HW1.R | no_license | c95019124/DataMining_R | R | false | false | 2,110 | r | ## Yiling Chen
## Created: February 4, 2021
## HW#1
#####################################################################
#1
#####################################################################
rm(list = ls())
#set working directory
##1a~b
setwd("/Users/elenachen/Desktop/DataMiningII/Hw1")
library(ISLR)
summary(College)
View(College)
pairs(College[2:18])
##1c~e
Elite = rep("No",nrow(College))
Elite[College$Top10perc > 50] ="Yes"
Elite = as.factor(Elite)
College = data.frame(College, Elite)
View(College)
table(College$Elite)
#78Elite schools
table(College$Elite,College$Private)
#67/78 Elite schools are private
##1f
EliteYes <- subset(College, College$Elite == 'Yes')
View(EliteYes)
result.meanYes <- mean(EliteYes$Grad.Rate)
print(result.meanYes)
#83.38
EliteNo <- subset(College, College$Elite == "No")
result.meanNo <- mean(EliteNo$Grad.Rate)
print(result.meanNo)
#63.46
#####################################################################
#2
#####################################################################
library(ISLR)
library(corrplot)
library(dplyr)
View(Auto)
#2a~b
y <- c(Auto)
is.na(y)
sapply(Auto, class)
summary(Auto)
continous <- sapply(Auto, is.numeric)
#2c
twoC<-sapply(Auto[,continous], function(x) signif(c(mean(x),sd(x)),2))
rownames(twoC) <- c("Mean","SD")
twoC
#2d
newset <-sapply(Auto[-5:-55,continous], function(x) signif(c(range(x),mean(x),sd(x)),2))
rownames(newset) <- c("Min","Max","Mean","SD")
newset
#2e
df <-Auto
df$origin <- as.character(df$origin)
df[df == "1"] <-"American"
df[df == "2"] <-"European"
df[df == "3"] <-"Japanese"
df = subset(df, select=-c(name))
#View(df)
head(df)
#2f
pairs(Auto[1:7])
auto <- as.matrix(Auto[1:7])
corralation <-cor(auto)
corrplot(corralation,method = "number")
#2g
twog <-sapply(Auto[,continous], function(x) signif(c(range(x),mean(x),sd(x)),2))
rownames(twog) <- c("Min","Max","Mean","SD")
twog
#22low,35med,48high
my_mpg <- rep("low",nrow(Auto))
my_mpg[Auto$mpg >22] = "med"
my_mpg[Auto$mpg >35] = "High"
my_mpg <- as.factor(my_mpg)
new_Auto <-data.frame(Auto,my_mpg)
#View(new_Auto)
#saveRDS(new_Auto,"new_Auto.rds")
|
#################################
# use qualtrics data in R object
#################################
use.qualtrics <- function(object,responses=NULL,ranks=NULL,covariates = NULL,respondentID = NULL,letter="F"){
###### Load data and detect dimensions of things
# Load CSV Results
qualtrics_results <- as.vector(object)
# Extract variable names/question names
var_names <- q_names <- colnames(qualtrics_results)
# The rest is the raw data
qualtrics_data <- qualtrics_results
colnames(qualtrics_data) <- var_names
# Make respondent index
respondent_index <- 1:nrow(qualtrics_data)
# Find the attribute names and number of tasks
attr_regexp <- paste(c("^",letter,"-[0-9]+-[0-9]+(?!-)"),collapse="")
attr_name_cols <- grep(attr_regexp, var_names, perl=TRUE)
# remove trailing whitespace
qualtrics_data[attr_name_cols] <- lapply(qualtrics_data[attr_name_cols], function (x) sub("\\s+$", "", x))
# Parse to matrix
attr_name_matrix <- matrix(unlist(strsplit(var_names[attr_name_cols],"-")),nrow=3,ncol=length(attr_name_cols))
colnames(attr_name_matrix) <- var_names[attr_name_cols]
attr_name_matrix <- attr_name_matrix[2:nrow(attr_name_matrix),]
attr_name_matrix <- as.data.frame(t(attr_name_matrix))
num_tasks <-unique(as.integer(attr_name_matrix[,1]))
# Find the level names and number of profiles
level_regexp <- paste(c("^",letter,"-[0-9]+-[0-9]+-[0-9]"),collapse="")
level_name_cols <- grep(level_regexp, var_names, perl=TRUE)
num_profiles <- length(unique(do.call(rbind,strsplit(var_names[level_name_cols],"-"))[,3]))
# Convert to matrix
level_name_matrix <- matrix(unlist(strsplit(var_names[level_name_cols],"-")),nrow=4,ncol=length(level_name_cols))
colnames(level_name_matrix) <- var_names[level_name_cols]
level_name_matrix <- level_name_matrix[2:nrow(level_name_matrix),]
level_name_matrix <- as.data.frame(t(level_name_matrix))
# Unique attributes
all_attr <- c()
for (attr_vec in attr_name_cols) {
all_attr <- c(all_attr,qualtrics_data[,attr_vec])
}
## Remove any trailing white spaces in strings
all_attr <- gsub(pattern = "\\s+$", replacement = "", all_attr)
#no missing values
unique_attr <- unique(all_attr)[nchar(unique(all_attr)) != 0]
#and no na's
unique_attr <- unique_attr[!is.na(unique_attr)]
####### Checks on input
# Are there any responses or ranks
if (is.null(responses) & is.null(ranks)) {
stop("Either responses or ranks must be non-NULL")
return(NULL)
}
# If there are responses, are there the right number?
if (!is.null(responses) && length(num_tasks) != length(responses)) {
# If number of responses doesn't match num_tasks
stop("Error: Number of response columns doesn't equal number of tasks in data")
return(NULL)
}
# If there are ranks, are there the right number?
if (!is.null(ranks) && length(num_tasks) != length(ranks)/num_profiles) {
# If number of ranks doesn't match num_tasks
stop("Error: Number of rank columns doesn't equal number of tasks times number of profiles in data")
return(NULL)
}
# If no attributes fit the description
if (length(attr_name_cols) == 0) {
stop("Error: Cannot find any columns designating attributes and levels. Please make sure the input file originated from a Qualtrics survey designed using the Conjoint SDT")
return(NULL)
}
# Check whether attribute columns are empty or not
for (attr_column in attr_name_cols) {
if (is.null(unique(qualtrics_data[,attr_column]))) {
stop(paste("Error, attribute column ", var_names[attr_column], " has no attribute names - recommend deleting this column"))
} else if (unique(qualtrics_data[,attr_column])[1] == "" & length(unique(qualtrics_data[,attr_column])) == 1) {
stop(paste("Error, attribute column ", var_names[attr_column], " has no attribute names - recommend deleting this column"))
}
}
# Check whether level columns are empty or not
for (lev_column in level_name_cols) {
if (is.null(unique(qualtrics_data[,lev_column]))) {
stop(paste("Error, level column ", var_names[lev_column], " has no attribute names - recommend deleting this column"))
} else if (unique(qualtrics_data[,lev_column])[1] == "" & length(unique(qualtrics_data[,lev_column])) == 1) {
stop(paste("Error, level column ", var_names[lev_column], " has no attribute names - recommend deleting this column"))
}
}
# If respondentID is not null
if (!is.null(respondentID)){
respondent_index <- qualtrics_data[,which(q_names %in% respondentID)]
}else{
respondent_index <- 1:nrow(qualtrics_data)
}
# Get the response rows
if (is.character(responses[1])){
response_vars <- which(q_names %in% responses)
}else{
response_vars <- responses
}
# Make Sure no reserved characters are used in attribute names
if (sum(sum(grepl("\\[|\\]", unique(all_attr))),
sum(grepl("[\\$\\*\\+\\?\\^\\{\\}\\|\\(\\)\\/\\'\"]",unique(all_attr))))>0){
stop(paste("Error, attribute level has special characters"))
} else {
#grepl(paste0("^",unique(all_attr[unique(all_attr)!=""]),"_[0-9]+-[0-9]+$"), )
if (sum(grepl("^attribute_[0-9]+$",unique(all_attr)))>0) {
stop (paste("Error, attribute_[0-9]+ is reserved for the function."))
}
if (sum(grepl("^selected_[0-9]+-[0-9]$",unique(all_attr)))>0) {
stop (paste("Error, selected_[0-9]+-[0-9] is reserved for the function."))
}
}
# Initialize output dataframe
colnames(qualtrics_data)[which(q_names %in% covariates)] <- covariates
out_data_set_cols <- c(which(q_names %in% respondentID),
which(q_names %in% covariates),
(attr_name_cols),
(level_name_cols)
)
out_data_dataset <- qualtrics_data[,out_data_set_cols]
id_var_name <- colnames(out_data_dataset)[which(q_names %in% respondentID)]
# Parameters
num_tasks <- unique(as.integer(attr_name_matrix[,1]))
num_profiles <- as.integer(unique(level_name_matrix[,2]))
num_attr <- unique(as.integer(attr_name_matrix[,2]))
# Replace all - with _ in "F-X-Y"
attr_regexp <- paste(c("^",letter,"-[0-9]+-[0-9]$"),collapse="")
temp.col.index <- grep(attr_regexp,colnames(out_data_dataset), perl=TRUE)
colnames(out_data_dataset)[temp.col.index] <- gsub("-","_",colnames(out_data_dataset)[temp.col.index])
# Replace all - with _ in "F-X-Y-Z"
level_regexp <- paste(c("^",letter,"-[0-9]+-[0-9]+-[0-9]$"),collapse="")
temp.col.index <- grep(level_regexp,colnames(out_data_dataset), perl=TRUE)
colnames(out_data_dataset)[temp.col.index] <- gsub("-","_",colnames(out_data_dataset)[temp.col.index])
# Clean attribute names
for (i in num_attr){
temp.cmd <- paste0("out_data_dataset$attribute_",i,"<-","out_data_dataset$'",letter,"_1_",i,"'")
eval(parse(text=temp.cmd))
}
temp_regexp <- paste(c("^",letter,"_[0-9]+_[0-9]$"),collapse="")
temp.col.index <- grep(temp_regexp,colnames(out_data_dataset), perl=TRUE)
out_data_dataset <- out_data_dataset[,-temp.col.index]
# Test Selected
test.selected <- sum(!unique(unlist(qualtrics_data[,response_vars])) %in% c("",num_profiles))==0
if (!test.selected){
stop(paste0("Responses can only take values among (",paste(num_profiles, collapse = ","),")"))
return (NULL)
}
# Generate Selected
if (is.null(ranks)){
for (i in num_tasks){
temp.cmd <- paste0("temp.selected","<-","qualtrics_data[,",response_vars[i],"]")
eval(parse(text=temp.cmd))
for (j in num_profiles){
temp.cmd <- paste0("out_data_dataset$'selected_",j,"-",i,"'<-","ifelse(temp.selected==j,1,0)")
eval(parse(text=temp.cmd))
temp.cmd <- paste0("out_data_dataset$'selected_",j,"-",i,"'[temp.selected","=='']","<-","''")
eval(parse(text=temp.cmd))
}
}
} else {
ranks_col <- which(q_names %in% ranks)
for (i in num_tasks){
for (j in num_profiles){
temp.cmd <- paste0("out_data_dataset$'selected_",j,"-",i,"'<-","qualtrics_data[,",ranks_col[(i-1)*length(num_profiles)+j],"]")
eval(parse(text=temp.cmd))
}
}
}
# Remove row if attribute name is empty and trim all attribute entries
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
for (i in num_attr){
temp.cmd <- paste0("out_data_dataset","<-subset(out_data_dataset, attribute_",i,"!='')")
eval(parse(text=temp.cmd))
temp.cmd <- paste0("out_data_dataset$attribute_",i," <- ","trim(out_data_dataset$attribute_",i,")")
eval(parse(text=temp.cmd))
}
# Generate Attributes
attribute_var_names <- unique(unlist(out_data_dataset[,grep("attribute_[0-9]+$", colnames(out_data_dataset))]))
attribute_var_names_label <- gsub(" ",".", attribute_var_names)
for (i in num_tasks){
for (j in num_profiles){
for (r in length(attribute_var_names)){
temp.cmd <- paste0("out_data_dataset$'",attribute_var_names[r],"_",j,"-",i,"'<-''")
eval(parse(text=temp.cmd))
temp.cmd <- paste0("out_data_dataset$'",attribute_var_names[r],".rowpos_",j,"-",i,"'<-''")
eval(parse(text=temp.cmd))
}
}
}
for (i in num_tasks){
for (j in num_profiles){
for (k in num_attr){
for (r in attribute_var_names){
temp.cmd <- paste0("out_data_dataset$'",r,"_",j,"-",i,"'",
"[out_data_dataset$attribute_",k,"=='",r,
"']<-out_data_dataset$",letter,"_",i,"_",j,"_",k,
"[out_data_dataset$attribute_",k,"=='",r,"']")
eval(parse(text=temp.cmd))
temp.cmd <- paste0("out_data_dataset$'",r,".rowpos","_",j,"-",i,"'",
"[out_data_dataset$attribute_",k,"=='",r,
"']<-k")
eval(parse(text=temp.cmd))
}
}
}
}
temp_regexp <- paste(c("^",letter,"_[0-9]+_[0-9]+_[0-9]$"),collapse="")
temp.col.index <- grep(temp_regexp,colnames(out_data_dataset), perl=TRUE)
out_data_dataset <- out_data_dataset[,-temp.col.index]
# Delete attribute names
regex.temp <- paste0("^attribute","_[0-9]+","$")
out_data_dataset <- out_data_dataset[,-grep(regex.temp, colnames(out_data_dataset))]
# Reshape the dataset Batch 1 - Round/Task
regex.temp <- paste(paste0("^",attribute_var_names,"_[0-9]+-[0-9]+","$"),collapse="|")
regex.temp.2 <- paste(paste0("^",attribute_var_names,".rowpos_[0-9]+-[0-9]+","$"),collapse="|")
varying.temp <- colnames(out_data_dataset)[grep(regex.temp, colnames(out_data_dataset))]
varying.temp.2 <- colnames(out_data_dataset)[grep(regex.temp.2, colnames(out_data_dataset))]
varying.temp.3 <- colnames(out_data_dataset)[grep("^selected_[0-9]+-[0-9]+$", colnames(out_data_dataset))]
varying.temp <- c(varying.temp, varying.temp.2, varying.temp.3)
out_data_dataset <- reshape(out_data_dataset,
idvar = id_var_name,
varying = varying.temp,
sep = "-",
timevar = "task",
times = num_tasks,
new.row.names = 1:(length(num_tasks)*nrow(out_data_dataset)),
direction = "long")
# Reshape the dataset Batch 2 - Profile
regex.temp <- paste(paste0("^",attribute_var_names,"_[0-9]+","$"), collapse="|")
regex.temp.2 <- paste(paste0("^",attribute_var_names,".rowpos_[0-9]+","$"), collapse="|")
varying.temp <- colnames(out_data_dataset)[grep(regex.temp, colnames(out_data_dataset))]
varying.temp.2 <- colnames(out_data_dataset)[grep(regex.temp.2, colnames(out_data_dataset))]
varying.temp.3 <- colnames(out_data_dataset)[grep("^selected_[0-9]+$", colnames(out_data_dataset))]
varying.temp <- c(varying.temp, varying.temp.2, varying.temp.3)
out_data_dataset <- reshape(out_data_dataset,
idvar = id_var_name,
varying = varying.temp,
sep = "_",
timevar = "profile",
times = num_profiles,
new.row.names = 1:(length(num_profiles)*nrow(out_data_dataset)),
direction = "long")
## Post-processiong
colnames(out_data_dataset)<- gsub(" ",".",colnames(out_data_dataset))
for (m in attribute_var_names_label){
out_data_dataset[[m]] <- as.factor(out_data_dataset[[m]])
}
colnames(out_data_dataset)[which(colnames(out_data_dataset)==id_var_name)] <- "respondent"
out_data_dataset$respondentIndex <- as.factor(out_data_dataset$respondent)
out_data_dataset$respondentIndex <- as.integer(out_data_dataset$respondentIndex)
out_data_dataset$selected <- as.integer(out_data_dataset$selected)
out_data_dataset$task <- as.integer(out_data_dataset$task)
out_data_dataset$profile <- as.integer(out_data_dataset$profile)
# Return dataset
return(out_data_dataset)
} | /usequaltrics.R | no_license | cdermont/r_cjoint_cd | R | false | false | 13,055 | r | #################################
# use qualtrics data in R object
#################################
use.qualtrics <- function(object,responses=NULL,ranks=NULL,covariates = NULL,respondentID = NULL,letter="F"){
###### Load data and detect dimensions of things
# Load CSV Results
qualtrics_results <- as.vector(object)
# Extract variable names/question names
var_names <- q_names <- colnames(qualtrics_results)
# The rest is the raw data
qualtrics_data <- qualtrics_results
colnames(qualtrics_data) <- var_names
# Make respondent index
respondent_index <- 1:nrow(qualtrics_data)
# Find the attribute names and number of tasks
attr_regexp <- paste(c("^",letter,"-[0-9]+-[0-9]+(?!-)"),collapse="")
attr_name_cols <- grep(attr_regexp, var_names, perl=TRUE)
# remove trailing whitespace
qualtrics_data[attr_name_cols] <- lapply(qualtrics_data[attr_name_cols], function (x) sub("\\s+$", "", x))
# Parse to matrix
attr_name_matrix <- matrix(unlist(strsplit(var_names[attr_name_cols],"-")),nrow=3,ncol=length(attr_name_cols))
colnames(attr_name_matrix) <- var_names[attr_name_cols]
attr_name_matrix <- attr_name_matrix[2:nrow(attr_name_matrix),]
attr_name_matrix <- as.data.frame(t(attr_name_matrix))
num_tasks <-unique(as.integer(attr_name_matrix[,1]))
# Find the level names and number of profiles
level_regexp <- paste(c("^",letter,"-[0-9]+-[0-9]+-[0-9]"),collapse="")
level_name_cols <- grep(level_regexp, var_names, perl=TRUE)
num_profiles <- length(unique(do.call(rbind,strsplit(var_names[level_name_cols],"-"))[,3]))
# Convert to matrix
level_name_matrix <- matrix(unlist(strsplit(var_names[level_name_cols],"-")),nrow=4,ncol=length(level_name_cols))
colnames(level_name_matrix) <- var_names[level_name_cols]
level_name_matrix <- level_name_matrix[2:nrow(level_name_matrix),]
level_name_matrix <- as.data.frame(t(level_name_matrix))
# Unique attributes
all_attr <- c()
for (attr_vec in attr_name_cols) {
all_attr <- c(all_attr,qualtrics_data[,attr_vec])
}
## Remove any trailing white spaces in strings
all_attr <- gsub(pattern = "\\s+$", replacement = "", all_attr)
#no missing values
unique_attr <- unique(all_attr)[nchar(unique(all_attr)) != 0]
#and no na's
unique_attr <- unique_attr[!is.na(unique_attr)]
####### Checks on input
# Are there any responses or ranks
if (is.null(responses) & is.null(ranks)) {
stop("Either responses or ranks must be non-NULL")
return(NULL)
}
# If there are responses, are there the right number?
if (!is.null(responses) && length(num_tasks) != length(responses)) {
# If number of responses doesn't match num_tasks
stop("Error: Number of response columns doesn't equal number of tasks in data")
return(NULL)
}
# If there are ranks, are there the right number?
if (!is.null(ranks) && length(num_tasks) != length(ranks)/num_profiles) {
# If number of ranks doesn't match num_tasks
stop("Error: Number of rank columns doesn't equal number of tasks times number of profiles in data")
return(NULL)
}
# If no attributes fit the description
if (length(attr_name_cols) == 0) {
stop("Error: Cannot find any columns designating attributes and levels. Please make sure the input file originated from a Qualtrics survey designed using the Conjoint SDT")
return(NULL)
}
# Check whether attribute columns are empty or not
for (attr_column in attr_name_cols) {
if (is.null(unique(qualtrics_data[,attr_column]))) {
stop(paste("Error, attribute column ", var_names[attr_column], " has no attribute names - recommend deleting this column"))
} else if (unique(qualtrics_data[,attr_column])[1] == "" & length(unique(qualtrics_data[,attr_column])) == 1) {
stop(paste("Error, attribute column ", var_names[attr_column], " has no attribute names - recommend deleting this column"))
}
}
# Check whether level columns are empty or not
for (lev_column in level_name_cols) {
if (is.null(unique(qualtrics_data[,lev_column]))) {
stop(paste("Error, level column ", var_names[lev_column], " has no attribute names - recommend deleting this column"))
} else if (unique(qualtrics_data[,lev_column])[1] == "" & length(unique(qualtrics_data[,lev_column])) == 1) {
stop(paste("Error, level column ", var_names[lev_column], " has no attribute names - recommend deleting this column"))
}
}
# If respondentID is not null
if (!is.null(respondentID)){
respondent_index <- qualtrics_data[,which(q_names %in% respondentID)]
}else{
respondent_index <- 1:nrow(qualtrics_data)
}
# Get the response rows
if (is.character(responses[1])){
response_vars <- which(q_names %in% responses)
}else{
response_vars <- responses
}
# Make Sure no reserved characters are used in attribute names
if (sum(sum(grepl("\\[|\\]", unique(all_attr))),
sum(grepl("[\\$\\*\\+\\?\\^\\{\\}\\|\\(\\)\\/\\'\"]",unique(all_attr))))>0){
stop(paste("Error, attribute level has special characters"))
} else {
#grepl(paste0("^",unique(all_attr[unique(all_attr)!=""]),"_[0-9]+-[0-9]+$"), )
if (sum(grepl("^attribute_[0-9]+$",unique(all_attr)))>0) {
stop (paste("Error, attribute_[0-9]+ is reserved for the function."))
}
if (sum(grepl("^selected_[0-9]+-[0-9]$",unique(all_attr)))>0) {
stop (paste("Error, selected_[0-9]+-[0-9] is reserved for the function."))
}
}
# Initialize output dataframe
colnames(qualtrics_data)[which(q_names %in% covariates)] <- covariates
out_data_set_cols <- c(which(q_names %in% respondentID),
which(q_names %in% covariates),
(attr_name_cols),
(level_name_cols)
)
out_data_dataset <- qualtrics_data[,out_data_set_cols]
id_var_name <- colnames(out_data_dataset)[which(q_names %in% respondentID)]
# Parameters
num_tasks <- unique(as.integer(attr_name_matrix[,1]))
num_profiles <- as.integer(unique(level_name_matrix[,2]))
num_attr <- unique(as.integer(attr_name_matrix[,2]))
# Replace all - with _ in "F-X-Y"
attr_regexp <- paste(c("^",letter,"-[0-9]+-[0-9]$"),collapse="")
temp.col.index <- grep(attr_regexp,colnames(out_data_dataset), perl=TRUE)
colnames(out_data_dataset)[temp.col.index] <- gsub("-","_",colnames(out_data_dataset)[temp.col.index])
# Replace all - with _ in "F-X-Y-Z"
level_regexp <- paste(c("^",letter,"-[0-9]+-[0-9]+-[0-9]$"),collapse="")
temp.col.index <- grep(level_regexp,colnames(out_data_dataset), perl=TRUE)
colnames(out_data_dataset)[temp.col.index] <- gsub("-","_",colnames(out_data_dataset)[temp.col.index])
# Clean attribute names
for (i in num_attr){
temp.cmd <- paste0("out_data_dataset$attribute_",i,"<-","out_data_dataset$'",letter,"_1_",i,"'")
eval(parse(text=temp.cmd))
}
temp_regexp <- paste(c("^",letter,"_[0-9]+_[0-9]$"),collapse="")
temp.col.index <- grep(temp_regexp,colnames(out_data_dataset), perl=TRUE)
out_data_dataset <- out_data_dataset[,-temp.col.index]
# Test Selected
test.selected <- sum(!unique(unlist(qualtrics_data[,response_vars])) %in% c("",num_profiles))==0
if (!test.selected){
stop(paste0("Responses can only take values among (",paste(num_profiles, collapse = ","),")"))
return (NULL)
}
# Generate Selected
if (is.null(ranks)){
for (i in num_tasks){
temp.cmd <- paste0("temp.selected","<-","qualtrics_data[,",response_vars[i],"]")
eval(parse(text=temp.cmd))
for (j in num_profiles){
temp.cmd <- paste0("out_data_dataset$'selected_",j,"-",i,"'<-","ifelse(temp.selected==j,1,0)")
eval(parse(text=temp.cmd))
temp.cmd <- paste0("out_data_dataset$'selected_",j,"-",i,"'[temp.selected","=='']","<-","''")
eval(parse(text=temp.cmd))
}
}
} else {
ranks_col <- which(q_names %in% ranks)
for (i in num_tasks){
for (j in num_profiles){
temp.cmd <- paste0("out_data_dataset$'selected_",j,"-",i,"'<-","qualtrics_data[,",ranks_col[(i-1)*length(num_profiles)+j],"]")
eval(parse(text=temp.cmd))
}
}
}
# Remove row if attribute name is empty and trim all attribute entries
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
for (i in num_attr){
temp.cmd <- paste0("out_data_dataset","<-subset(out_data_dataset, attribute_",i,"!='')")
eval(parse(text=temp.cmd))
temp.cmd <- paste0("out_data_dataset$attribute_",i," <- ","trim(out_data_dataset$attribute_",i,")")
eval(parse(text=temp.cmd))
}
# Generate Attributes
attribute_var_names <- unique(unlist(out_data_dataset[,grep("attribute_[0-9]+$", colnames(out_data_dataset))]))
attribute_var_names_label <- gsub(" ",".", attribute_var_names)
for (i in num_tasks){
for (j in num_profiles){
for (r in length(attribute_var_names)){
temp.cmd <- paste0("out_data_dataset$'",attribute_var_names[r],"_",j,"-",i,"'<-''")
eval(parse(text=temp.cmd))
temp.cmd <- paste0("out_data_dataset$'",attribute_var_names[r],".rowpos_",j,"-",i,"'<-''")
eval(parse(text=temp.cmd))
}
}
}
for (i in num_tasks){
for (j in num_profiles){
for (k in num_attr){
for (r in attribute_var_names){
temp.cmd <- paste0("out_data_dataset$'",r,"_",j,"-",i,"'",
"[out_data_dataset$attribute_",k,"=='",r,
"']<-out_data_dataset$",letter,"_",i,"_",j,"_",k,
"[out_data_dataset$attribute_",k,"=='",r,"']")
eval(parse(text=temp.cmd))
temp.cmd <- paste0("out_data_dataset$'",r,".rowpos","_",j,"-",i,"'",
"[out_data_dataset$attribute_",k,"=='",r,
"']<-k")
eval(parse(text=temp.cmd))
}
}
}
}
temp_regexp <- paste(c("^",letter,"_[0-9]+_[0-9]+_[0-9]$"),collapse="")
temp.col.index <- grep(temp_regexp,colnames(out_data_dataset), perl=TRUE)
out_data_dataset <- out_data_dataset[,-temp.col.index]
# Delete attribute names
regex.temp <- paste0("^attribute","_[0-9]+","$")
out_data_dataset <- out_data_dataset[,-grep(regex.temp, colnames(out_data_dataset))]
# Reshape the dataset Batch 1 - Round/Task
regex.temp <- paste(paste0("^",attribute_var_names,"_[0-9]+-[0-9]+","$"),collapse="|")
regex.temp.2 <- paste(paste0("^",attribute_var_names,".rowpos_[0-9]+-[0-9]+","$"),collapse="|")
varying.temp <- colnames(out_data_dataset)[grep(regex.temp, colnames(out_data_dataset))]
varying.temp.2 <- colnames(out_data_dataset)[grep(regex.temp.2, colnames(out_data_dataset))]
varying.temp.3 <- colnames(out_data_dataset)[grep("^selected_[0-9]+-[0-9]+$", colnames(out_data_dataset))]
varying.temp <- c(varying.temp, varying.temp.2, varying.temp.3)
out_data_dataset <- reshape(out_data_dataset,
idvar = id_var_name,
varying = varying.temp,
sep = "-",
timevar = "task",
times = num_tasks,
new.row.names = 1:(length(num_tasks)*nrow(out_data_dataset)),
direction = "long")
# Reshape the dataset Batch 2 - Profile
regex.temp <- paste(paste0("^",attribute_var_names,"_[0-9]+","$"), collapse="|")
regex.temp.2 <- paste(paste0("^",attribute_var_names,".rowpos_[0-9]+","$"), collapse="|")
varying.temp <- colnames(out_data_dataset)[grep(regex.temp, colnames(out_data_dataset))]
varying.temp.2 <- colnames(out_data_dataset)[grep(regex.temp.2, colnames(out_data_dataset))]
varying.temp.3 <- colnames(out_data_dataset)[grep("^selected_[0-9]+$", colnames(out_data_dataset))]
varying.temp <- c(varying.temp, varying.temp.2, varying.temp.3)
out_data_dataset <- reshape(out_data_dataset,
idvar = id_var_name,
varying = varying.temp,
sep = "_",
timevar = "profile",
times = num_profiles,
new.row.names = 1:(length(num_profiles)*nrow(out_data_dataset)),
direction = "long")
## Post-processiong
colnames(out_data_dataset)<- gsub(" ",".",colnames(out_data_dataset))
for (m in attribute_var_names_label){
out_data_dataset[[m]] <- as.factor(out_data_dataset[[m]])
}
colnames(out_data_dataset)[which(colnames(out_data_dataset)==id_var_name)] <- "respondent"
out_data_dataset$respondentIndex <- as.factor(out_data_dataset$respondent)
out_data_dataset$respondentIndex <- as.integer(out_data_dataset$respondentIndex)
out_data_dataset$selected <- as.integer(out_data_dataset$selected)
out_data_dataset$task <- as.integer(out_data_dataset$task)
out_data_dataset$profile <- as.integer(out_data_dataset$profile)
# Return dataset
return(out_data_dataset)
} |
# runall
source("~/Projects/covid19-dq-dpc/R/00_prepare_session.R")
source("~/Projects/covid19-dq-dpc/R/01_prepare_data.R")
source("~/Projects/covid19-dq-dpc/R/02_apply_checks.R")
source("~/Projects/covid19-dq-dpc/R/03_report_results.R")
source("~/Projects/covid19-dq-dpc/R/99_initial_loading.R")
source("~/Projects/covid19-dq-dpc/R/04_tweet_report.R")
barplot(results_delta)
| /R/_runall.R | no_license | covid19-dq-monitor/covid19-dq-dpc | R | false | false | 379 | r | # runall
source("~/Projects/covid19-dq-dpc/R/00_prepare_session.R")
source("~/Projects/covid19-dq-dpc/R/01_prepare_data.R")
source("~/Projects/covid19-dq-dpc/R/02_apply_checks.R")
source("~/Projects/covid19-dq-dpc/R/03_report_results.R")
source("~/Projects/covid19-dq-dpc/R/99_initial_loading.R")
source("~/Projects/covid19-dq-dpc/R/04_tweet_report.R")
barplot(results_delta)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DE_PART_results_functions.R
\name{volcanoplot_alt}
\alias{volcanoplot_alt}
\title{Create a volcano plot}
\usage{
volcanoplot_alt(
DE_res,
genes_of_interest = c(),
filter_choice = "padj",
l2FC_thresh = 1,
p_thresh = 0.05,
plot_title = "Volcano plot",
label_top_n = 0,
show_non_sig_interest = TRUE
)
}
\arguments{
\item{DE_res}{The differential expression results to be plotted}
\item{genes_of_interest}{A vector containing gene names to be labelled in the plot
To not label any genes, leave as default or provide an empty vector.}
\item{filter_choice}{Either padj or pvalue, the choice will be used to filter for
significance}
\item{l2FC_thresh}{The log2FoldChange threshold used to establish significance}
\item{p_thresh}{The pvalue or padj threshold used to establish significance}
\item{plot_title}{The title to be give to the plot}
\item{label_top_n}{The number of genes to label. Genes will be taken in order of
significance where the genes with the lowest adjusted p-value are taken first.}
\item{show_non_sig_interest}{A boolean to indicate if the non-significant genes of
interest should be shown.}
}
\value{
The ggplot object for the volcano plot
}
\description{
The function creates a 'four quadrant' volcano plot, where the FDR and log2FoldChange
thresholds dictate the significant up-regulated category, the significant downregulated
category, the significant low regulation category and the non-significant category
Genes of interest are labeled in a rectangle for visibility and will have the same
color as the category in which they are in.
Top significant genes are also labelled in rectangles but will have black text
in order to distinguish them from the genes of interest
The plot is created using ggplot2, to save a plot the ggsave() function is recommended
It is also recommended to use the following parameters to save the plot.
dpi=300 width=21 height=19 units='cm'
}
\examples{
TS_object<-create_example_object_for_R()
TS_object <- normalize_timeSeries_with_deseq2(time_object=TS_object)
#Perform conditional differential gene expression analysis
TS_object<-conditional_DE_wrapper(TS_object,vignette_run=TRUE)
DE_res<-slot(TS_object,'DE_results')$conditional$IgM_vs_LPS_TP_1$DE_raw_data
v_plot<-volcanoplot_alt(DE_res = DE_res)
}
| /man/volcanoplot_alt.Rd | permissive | Ylefol/TimeSeriesAnalysis | R | false | true | 2,357 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DE_PART_results_functions.R
\name{volcanoplot_alt}
\alias{volcanoplot_alt}
\title{Create a volcano plot}
\usage{
volcanoplot_alt(
DE_res,
genes_of_interest = c(),
filter_choice = "padj",
l2FC_thresh = 1,
p_thresh = 0.05,
plot_title = "Volcano plot",
label_top_n = 0,
show_non_sig_interest = TRUE
)
}
\arguments{
\item{DE_res}{The differential expression results to be plotted}
\item{genes_of_interest}{A vector containing gene names to be labelled in the plot
To not label any genes, leave as default or provide an empty vector.}
\item{filter_choice}{Either padj or pvalue, the choice will be used to filter for
significance}
\item{l2FC_thresh}{The log2FoldChange threshold used to establish significance}
\item{p_thresh}{The pvalue or padj threshold used to establish significance}
\item{plot_title}{The title to be give to the plot}
\item{label_top_n}{The number of genes to label. Genes will be taken in order of
significance where the genes with the lowest adjusted p-value are taken first.}
\item{show_non_sig_interest}{A boolean to indicate if the non-significant genes of
interest should be shown.}
}
\value{
The ggplot object for the volcano plot
}
\description{
The function creates a 'four quadrant' volcano plot, where the FDR and log2FoldChange
thresholds dictate the significant up-regulated category, the significant downregulated
category, the significant low regulation category and the non-significant category
Genes of interest are labeled in a rectangle for visibility and will have the same
color as the category in which they are in.
Top significant genes are also labelled in rectangles but will have black text
in order to distinguish them from the genes of interest
The plot is created using ggplot2, to save a plot the ggsave() function is recommended
It is also recommended to use the following parameters to save the plot.
dpi=300 width=21 height=19 units='cm'
}
\examples{
TS_object<-create_example_object_for_R()
TS_object <- normalize_timeSeries_with_deseq2(time_object=TS_object)
#Perform conditional differential gene expression analysis
TS_object<-conditional_DE_wrapper(TS_object,vignette_run=TRUE)
DE_res<-slot(TS_object,'DE_results')$conditional$IgM_vs_LPS_TP_1$DE_raw_data
v_plot<-volcanoplot_alt(DE_res = DE_res)
}
|
#Get descriptives
library(xtable)
library(psych)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) #remove this code before chtc
dir()[grep(".csv", dir())]
data <- read.csv("pisa.2018.usa.imp.read.csv")
library(mctest)
mod <- lm(PV1READ ~ ., data = data)
mctest(mod)
imcdiag(mod)
data$Female <- ifelse(data$Female=="Female",1,0)
xtable(describe(data)[,c("mean", "sd", "median", "min", "max", "skew", "kurtosis")])
#results table
dir("./Results/")
N200 <- read.xlsx("./Results/all.results.xlsx", sheetIndex = 1, as.data.frame = TRUE)
N200_res <- N200[1:11,]
rownames(N200_res) <- N200_res[,1]
N200_res$NA. <- NULL
N200_res <- N200_res[c("pred.cov", "bias", "rmspe", "KL", "Pred Error"),]
methods <- colnames(N200_res)
N200_res <- apply(N200_res, 1, as.numeric)
row.names(N200_res) <- methods
xtable(N200_res, digits = 3)
N200_pmp <- N200[13:15,]
colnames(N200_pmp) <- N200_pmp[1,]
N200_pmp <- N200_pmp[2:3,]
rownames(N200_pmp) <- N200_pmp[,1]
N200_pmp$'NA' <- NULL
N200_pmp <- N200_pmp[,c("mean", "sd", "median", "min", "max")]
N200_pmp <- apply(N200_pmp, 2, as.numeric)
rownames(N200_pmp) <- c("Tot.PMP", "Best.PMP")
#
N1000 <- read.xlsx("./Results/all.results.xlsx", sheetIndex = 2, as.data.frame = TRUE)
N1000_res <- N1000[1:11,]
rownames(N1000_res) <- N1000_res[,1]
N1000_res$NA. <- NULL
N1000_res <- N1000_res[c("pred.cov", "bias", "rmspe", "KL", "Pred Error"),]
methods <- colnames(N1000_res)
N1000_res <- apply(N1000_res, 1, as.numeric)
row.names(N1000_res) <- methods
xtable(N1000_res, digits = 3)
N1000_pmp <- N1000[13:15,]
colnames(N1000_pmp) <- N1000_pmp[1,]
N1000_pmp <- N1000_pmp[2:3,]
rownames(N1000_pmp) <- N1000_pmp[,1]
N1000_pmp$'NA' <- NULL
N1000_pmp <- N1000_pmp[,c("mean", "sd", "median", "min", "max")]
N1000_pmp <- apply(N1000_pmp, 2, as.numeric)
rownames(N1000_pmp) <- c("Tot.PMP", "Best.PMP")
xtable(rbind(N200_pmp,N1000_pmp))
desc_study1 <- read.xlsx("./../../../../Box/BDB/Data/Copy of descriptive.xlsx", sheetIndex = 2)
xtable(desc_study1)
| /Descriptive.R | no_license | Sinan-Yavuz/RegressionModelComparison | R | false | false | 2,003 | r |
#Get descriptives
library(xtable)
library(psych)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) #remove this code before chtc
dir()[grep(".csv", dir())]
data <- read.csv("pisa.2018.usa.imp.read.csv")
library(mctest)
mod <- lm(PV1READ ~ ., data = data)
mctest(mod)
imcdiag(mod)
data$Female <- ifelse(data$Female=="Female",1,0)
xtable(describe(data)[,c("mean", "sd", "median", "min", "max", "skew", "kurtosis")])
#results table
dir("./Results/")
N200 <- read.xlsx("./Results/all.results.xlsx", sheetIndex = 1, as.data.frame = TRUE)
N200_res <- N200[1:11,]
rownames(N200_res) <- N200_res[,1]
N200_res$NA. <- NULL
N200_res <- N200_res[c("pred.cov", "bias", "rmspe", "KL", "Pred Error"),]
methods <- colnames(N200_res)
N200_res <- apply(N200_res, 1, as.numeric)
row.names(N200_res) <- methods
xtable(N200_res, digits = 3)
N200_pmp <- N200[13:15,]
colnames(N200_pmp) <- N200_pmp[1,]
N200_pmp <- N200_pmp[2:3,]
rownames(N200_pmp) <- N200_pmp[,1]
N200_pmp$'NA' <- NULL
N200_pmp <- N200_pmp[,c("mean", "sd", "median", "min", "max")]
N200_pmp <- apply(N200_pmp, 2, as.numeric)
rownames(N200_pmp) <- c("Tot.PMP", "Best.PMP")
#
N1000 <- read.xlsx("./Results/all.results.xlsx", sheetIndex = 2, as.data.frame = TRUE)
N1000_res <- N1000[1:11,]
rownames(N1000_res) <- N1000_res[,1]
N1000_res$NA. <- NULL
N1000_res <- N1000_res[c("pred.cov", "bias", "rmspe", "KL", "Pred Error"),]
methods <- colnames(N1000_res)
N1000_res <- apply(N1000_res, 1, as.numeric)
row.names(N1000_res) <- methods
xtable(N1000_res, digits = 3)
N1000_pmp <- N1000[13:15,]
colnames(N1000_pmp) <- N1000_pmp[1,]
N1000_pmp <- N1000_pmp[2:3,]
rownames(N1000_pmp) <- N1000_pmp[,1]
N1000_pmp$'NA' <- NULL
N1000_pmp <- N1000_pmp[,c("mean", "sd", "median", "min", "max")]
N1000_pmp <- apply(N1000_pmp, 2, as.numeric)
rownames(N1000_pmp) <- c("Tot.PMP", "Best.PMP")
xtable(rbind(N200_pmp,N1000_pmp))
desc_study1 <- read.xlsx("./../../../../Box/BDB/Data/Copy of descriptive.xlsx", sheetIndex = 2)
xtable(desc_study1)
|
#' Mathieu's Custom Theme
#'
#' Custom ggplot2 themes for plots
#'
#' @param title.label Title
#' @param x.label X Label
#' @param y.label Y Label
#' @param subtitle.label Subtitle
#' @param base_size Font size
#' @param watermark.coord Coordinates for watermark
#' @param watermark.size Size of the watermark
#' @param watermark.alpha Transparency of the watermark
#' @param watermark.angle Angle of the watermark
#' @param watermark.color Color of the watermark
#'
#' @return The ggplot2 theme
#' @export
#'
#' @examples
#' ggplot(mtcars, aes(x=wt,y=qsec)) +
#' geom_point() +
#' theme_mathieu(title.label="mathieuR Theme Example",
#' subtitle.label="Compares wt and qsec of the mtcars dataset",
#' x.label = "Weight",
#' y.label = "QSEC",
#' watermark.coord=c(mean(mtcars$wt), mean(mtcars$qsec)))
theme_mathieu <- function(title.label,
x.label="",
y.label="",
subtitle.label="",
base_size=14,
watermark.coord=NULL,
watermark.size=15,
watermark.alpha=0.15,
watermark.angle=30,
watermark.color="red"){
if (!is.null(watermark.coord)){
watermark <- paste0("@mathieubray ",lubridate::year(lubridate::today()))
} else {
watermark <- ""
}
list(ggplot2::theme_bw(base_size = base_size),
ggplot2::ggtitle(title.label, subtitle=subtitle.label),
xlab(x.label),
ylab(y.label),
ggplot2::annotate("text",x=watermark.coord[1],y=watermark.coord[2],col=watermark.color,label=watermark,
alpha=watermark.alpha,cex=watermark.size,fontface="bold",angle=watermark.angle)
)
}
| /R/theme_mathieu.R | no_license | mathieubray/mathieuR | R | false | false | 1,756 | r | #' Mathieu's Custom Theme
#'
#' Custom ggplot2 themes for plots
#'
#' @param title.label Title
#' @param x.label X Label
#' @param y.label Y Label
#' @param subtitle.label Subtitle
#' @param base_size Font size
#' @param watermark.coord Coordinates for watermark
#' @param watermark.size Size of the watermark
#' @param watermark.alpha Transparency of the watermark
#' @param watermark.angle Angle of the watermark
#' @param watermark.color Color of the watermark
#'
#' @return The ggplot2 theme
#' @export
#'
#' @examples
#' ggplot(mtcars, aes(x=wt,y=qsec)) +
#' geom_point() +
#' theme_mathieu(title.label="mathieuR Theme Example",
#' subtitle.label="Compares wt and qsec of the mtcars dataset",
#' x.label = "Weight",
#' y.label = "QSEC",
#' watermark.coord=c(mean(mtcars$wt), mean(mtcars$qsec)))
theme_mathieu <- function(title.label,
x.label="",
y.label="",
subtitle.label="",
base_size=14,
watermark.coord=NULL,
watermark.size=15,
watermark.alpha=0.15,
watermark.angle=30,
watermark.color="red"){
if (!is.null(watermark.coord)){
watermark <- paste0("@mathieubray ",lubridate::year(lubridate::today()))
} else {
watermark <- ""
}
list(ggplot2::theme_bw(base_size = base_size),
ggplot2::ggtitle(title.label, subtitle=subtitle.label),
xlab(x.label),
ylab(y.label),
ggplot2::annotate("text",x=watermark.coord[1],y=watermark.coord[2],col=watermark.color,label=watermark,
alpha=watermark.alpha,cex=watermark.size,fontface="bold",angle=watermark.angle)
)
}
|
# Function to sum up all the same names
sumNames <- function(state,cnames){
state_new <- as.data.frame(matrix(NA, length(cnames), length(names(state))))
names(state_new) <- names(state)
state_new$cnames <- cnames
is.nan.data.frame <- function(x){
do.call(cbind, lapply(x, is.nan))}
state[is.nan.data.frame(state)] <- NA
for (i in 1:length(cnames)){
tmpidx <- which(cnames[i] == state$cnames)
state_new$F0[i] <- mean(state$F0[tmpidx], na.rm = T)
state_new$Catch[i] <- sum(state$Catch[tmpidx], na.rm = T)
state_new$Landings[i] <- sum(state$Landings[tmpidx], na.rm = T)
state_new$Biomass[i] <- sum(state$Biomass[tmpidx], na.rm = T)
state_new$SSB[i] <- sum(state$SSB[tmpidx], na.rm = T)
state_new$M[i] <- mean(state$M[tmpidx], na.rm = T)
state_new$Fmsy[i] <- mean(state$Fmsy[tmpidx], na.rm = T)
state_new$k[i] <- mean(state$k[tmpidx], na.rm = T)
state_new$t0[i] <- mean(state$t0[tmpidx], na.rm = T)
state_new$wInf[i] <- mean(state$wInf[tmpidx], na.rm = T)
}
state_new$Biomass[state_new$Biomass == 0] <- NA
state_new$Catch[state_new$Catch == 0] <- NA
state_new$Landings[state_new$Landings == 0] <- NA
state_new$SSB[state_new$SSB == 0] <- NA
return(state_new)
}
| /analysis/includes/size_spectra/sumNames.R | permissive | Philipp-Neubauer/large-marine-sizespectrum | R | false | false | 1,294 | r | # Function to sum up all the same names
sumNames <- function(state,cnames){
state_new <- as.data.frame(matrix(NA, length(cnames), length(names(state))))
names(state_new) <- names(state)
state_new$cnames <- cnames
is.nan.data.frame <- function(x){
do.call(cbind, lapply(x, is.nan))}
state[is.nan.data.frame(state)] <- NA
for (i in 1:length(cnames)){
tmpidx <- which(cnames[i] == state$cnames)
state_new$F0[i] <- mean(state$F0[tmpidx], na.rm = T)
state_new$Catch[i] <- sum(state$Catch[tmpidx], na.rm = T)
state_new$Landings[i] <- sum(state$Landings[tmpidx], na.rm = T)
state_new$Biomass[i] <- sum(state$Biomass[tmpidx], na.rm = T)
state_new$SSB[i] <- sum(state$SSB[tmpidx], na.rm = T)
state_new$M[i] <- mean(state$M[tmpidx], na.rm = T)
state_new$Fmsy[i] <- mean(state$Fmsy[tmpidx], na.rm = T)
state_new$k[i] <- mean(state$k[tmpidx], na.rm = T)
state_new$t0[i] <- mean(state$t0[tmpidx], na.rm = T)
state_new$wInf[i] <- mean(state$wInf[tmpidx], na.rm = T)
}
state_new$Biomass[state_new$Biomass == 0] <- NA
state_new$Catch[state_new$Catch == 0] <- NA
state_new$Landings[state_new$Landings == 0] <- NA
state_new$SSB[state_new$SSB == 0] <- NA
return(state_new)
}
|
context("names_RLum")
test_that("Test whether function works", {
testthat::skip_on_cran()
data(ExampleData.RLum.Analysis, envir = environment())
expect_silent(names_RLum(IRSAR.RF.Data))
expect_is(names_RLum(IRSAR.RF.Data), "character")
})
| /data/genthat_extracted_code/Luminescence/tests/test_names_RLum.R | no_license | surayaaramli/typeRrh | R | false | false | 250 | r | context("names_RLum")
test_that("Test whether function works", {
testthat::skip_on_cran()
data(ExampleData.RLum.Analysis, envir = environment())
expect_silent(names_RLum(IRSAR.RF.Data))
expect_is(names_RLum(IRSAR.RF.Data), "character")
})
|
## A pair of functions that cache the inverse of a matrix
## Creates a matrix object that can cache its inverse
makeCacheMatrix <- function( m = matrix() ) {
## Initialize the inverse property
i <- NULL
## Method to set the matrix
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
## Method the get the matrix
get <- function() {
## Return the matrix
m
}
## Method to set the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
i
}
## Return a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## above. If the inverse has already been calculated (and the matrix has not
## changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
| /cachematrix.R | no_license | vinindersingh/ProgrammingAssignment2 | R | false | false | 1,458 | r | ## A pair of functions that cache the inverse of a matrix
## Creates a matrix object that can cache its inverse
makeCacheMatrix <- function( m = matrix() ) {
## Initialize the inverse property
i <- NULL
## Method to set the matrix
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
## Method the get the matrix
get <- function() {
## Return the matrix
m
}
## Method to set the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
i
}
## Return a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## above. If the inverse has already been calculated (and the matrix has not
## changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
|
trail_msg <- "Trailing semicolons are not needed."
comp_msg <- "Compound semicolons are not needed. Replace them by a newline."
test_that("Lint all semicolons", {
linter <- semicolon_terminator_linter()
# No semicolon
expect_lint("", NULL, linter)
expect_lint("a <- 1", NULL, linter)
expect_lint("function() {a <- 1}", NULL, linter)
expect_lint("a <- \"foo;bar\"", NULL, linter)
expect_lint("function() {a <- \"foo;bar\"}", NULL, linter)
expect_lint("a <- FALSE # ok; cool!", NULL, linter)
expect_lint("function() {\na <- FALSE # ok; cool!\n}", NULL, linter)
# Trailing semicolons
expect_lint("a <- 1;",
list(message = trail_msg, line_number = 1L, column_number = 7L),
linter)
expect_lint("function(){a <- 1;}",
list(message = trail_msg, line_number = 1L, column_number = 18L),
linter)
expect_lint("a <- 1; \n",
list(message = trail_msg, line_number = 1L, column_number = 7L),
linter)
expect_lint("function(){a <- 1; \n}",
list(message = trail_msg, line_number = 1L, column_number = 18L),
linter)
# Compound semicolons
expect_lint("a <- 1;b <- 2",
list(message = comp_msg, line_number = 1L, column_number = 7L),
linter)
expect_lint("function() {a <- 1;b <- 2}\n",
list(message = comp_msg, line_number = 1L, column_number = 19L),
linter)
expect_lint("foo <-\n 1 ; foo <- 1.23",
list(message = comp_msg, line_number = 2L, column_number = 6L),
linter)
expect_lint("function(){\nfoo <-\n 1 ; foo <- 1.23\n}",
list(message = comp_msg, line_number = 3L, column_number = 6L),
linter)
# Multiple, mixed semicolons", {
expect_lint("a <- 1 ; b <- 2;\nc <- 3;",
list(
list(message = comp_msg, line_number = 1L, column_number = 8L),
list(message = trail_msg, line_number = 1L, column_number = 16L),
list(message = trail_msg, line_number = 2L, column_number = 7L)
),
linter)
expect_lint("function() { a <- 1 ; b <- 2;\nc <- 3;}",
list(
list(message = comp_msg, line_number = 1L, column_number = 21L),
list(message = trail_msg, line_number = 1L, column_number = 29L),
list(message = trail_msg, line_number = 2L, column_number = 7L)
),
linter)
})
test_that("Compound semicolons only", {
linter <- semicolon_terminator_linter(semicolon = "compound")
expect_lint("a <- 1;", NULL, linter)
expect_lint("function(){a <- 1;}", NULL, linter)
expect_lint("a <- 1; \n", NULL, linter)
expect_lint("function(){a <- 1; \n}", NULL, linter)
})
test_that("Trailing semicolons only", {
linter <- semicolon_terminator_linter(semicolon = "trailing")
expect_lint("a <- 1;b <- 2", NULL, linter)
expect_lint("function() {a <- 1;b <- 2}\n", NULL, linter)
expect_lint("f <-\n 1 ;f <- 1.23", NULL, linter)
expect_lint("function(){\nf <-\n 1 ;f <- 1.23\n}", NULL, linter)
})
| /tests/testthat/test-semicolon_terminator_linter.R | permissive | fabian-s/lintr | R | false | false | 3,120 | r | trail_msg <- "Trailing semicolons are not needed."
comp_msg <- "Compound semicolons are not needed. Replace them by a newline."
test_that("Lint all semicolons", {
linter <- semicolon_terminator_linter()
# No semicolon
expect_lint("", NULL, linter)
expect_lint("a <- 1", NULL, linter)
expect_lint("function() {a <- 1}", NULL, linter)
expect_lint("a <- \"foo;bar\"", NULL, linter)
expect_lint("function() {a <- \"foo;bar\"}", NULL, linter)
expect_lint("a <- FALSE # ok; cool!", NULL, linter)
expect_lint("function() {\na <- FALSE # ok; cool!\n}", NULL, linter)
# Trailing semicolons
expect_lint("a <- 1;",
list(message = trail_msg, line_number = 1L, column_number = 7L),
linter)
expect_lint("function(){a <- 1;}",
list(message = trail_msg, line_number = 1L, column_number = 18L),
linter)
expect_lint("a <- 1; \n",
list(message = trail_msg, line_number = 1L, column_number = 7L),
linter)
expect_lint("function(){a <- 1; \n}",
list(message = trail_msg, line_number = 1L, column_number = 18L),
linter)
# Compound semicolons
expect_lint("a <- 1;b <- 2",
list(message = comp_msg, line_number = 1L, column_number = 7L),
linter)
expect_lint("function() {a <- 1;b <- 2}\n",
list(message = comp_msg, line_number = 1L, column_number = 19L),
linter)
expect_lint("foo <-\n 1 ; foo <- 1.23",
list(message = comp_msg, line_number = 2L, column_number = 6L),
linter)
expect_lint("function(){\nfoo <-\n 1 ; foo <- 1.23\n}",
list(message = comp_msg, line_number = 3L, column_number = 6L),
linter)
# Multiple, mixed semicolons", {
expect_lint("a <- 1 ; b <- 2;\nc <- 3;",
list(
list(message = comp_msg, line_number = 1L, column_number = 8L),
list(message = trail_msg, line_number = 1L, column_number = 16L),
list(message = trail_msg, line_number = 2L, column_number = 7L)
),
linter)
expect_lint("function() { a <- 1 ; b <- 2;\nc <- 3;}",
list(
list(message = comp_msg, line_number = 1L, column_number = 21L),
list(message = trail_msg, line_number = 1L, column_number = 29L),
list(message = trail_msg, line_number = 2L, column_number = 7L)
),
linter)
})
test_that("Compound semicolons only", {
linter <- semicolon_terminator_linter(semicolon = "compound")
expect_lint("a <- 1;", NULL, linter)
expect_lint("function(){a <- 1;}", NULL, linter)
expect_lint("a <- 1; \n", NULL, linter)
expect_lint("function(){a <- 1; \n}", NULL, linter)
})
test_that("Trailing semicolons only", {
linter <- semicolon_terminator_linter(semicolon = "trailing")
expect_lint("a <- 1;b <- 2", NULL, linter)
expect_lint("function() {a <- 1;b <- 2}\n", NULL, linter)
expect_lint("f <-\n 1 ;f <- 1.23", NULL, linter)
expect_lint("function(){\nf <-\n 1 ;f <- 1.23\n}", NULL, linter)
})
|
# =================================================================================================
# get_full_data
# =================================================================================================
#
#' Get the full trajectory data (raw)
#'
#' Get all available trajectory data for the years 2018-2020, in raw format.
#'
#' @title get_full_data
#'
#' @return A 65,975,278 x 6 tibble of all available trajectory data.
#'
#' @examples
#' \dontrun{
#' full_data <- get_full_data()
#' }
#'
#' @export
get_full_data <- function() {
root <- "https://nhorton.people.amherst.edu/valleybikes/"
file_pattern <- "VB_Routes_Data_[0-9]{4}_[0-9]{2}_[0-9]{2}\\.csv\\.gz"
files <- root %>%
readLines() %>%
stringr::str_extract_all(pattern = file_pattern) %>%
unlist()
file_urls <- paste0(root, files)
clust <- parallel::makeCluster(parallel::detectCores())
full_data <- parallel::parLapply(clust, file_urls, data.table::fread, skip = 2,
colClasses = c("character", "character", "character",
"numeric", "numeric", "character")) %>%
data.table::rbindlist() %>%
janitor::clean_names() %>%
dplyr::distinct() %>%
tibble::as_tibble()
parallel::stopCluster(clust)
return(full_data)
}
# =================================================================================================
# aggregate_trips
# =================================================================================================
#
#' Aggregate trip data.
#'
#' Create a one-row-per-trip dataset from the output of \code{get_full_data}.
#'
#' @title aggregate_trips
#'
#' @param full_data The full trajectory data (as output by \code{get_full_data}).
#'
#' @return A tibble of all available trip data.
#'
#' @examples
#' \dontrun{
#' full_data <- get_full_data()
#' trips <- aggregate_trips(full_data)
#' }
#'
#' @import data.table
#'
#' @export
aggregate_trips <- function(full_data) {
# using data.table for efficiency
data.table::setDT(full_data)
full_data[, date := fasttime::fastPOSIXct(date)]
full_data_clean <- stats::na.omit(full_data)
full_data_clean <- full_data_clean[data.table::between(date, as.POSIXct("2018-06-28"), Sys.Date())]
trips <- full_data_clean[, list(user_id = data.table::first(user_id),
bike = data.table::first(bike),
start_time = data.table::first(date),
end_time = data.table::last(date),
start_latitude = data.table::first(latitude),
start_longitude = data.table::first(longitude),
end_latitude = data.table::last(latitude),
end_longitude = data.table::last(longitude)),
by = route_id]
trips[, duration := as.numeric(end_time) - as.numeric(start_time)]
utils::data("stations", package = "valleybikeData", envir = environment())
station_locations <- dplyr::select(stations, name, latitude, longitude)
trips <- trips %>%
fuzzyjoin::geo_left_join(
station_locations,
by = c("start_latitude" = "latitude", "start_longitude" = "longitude"),
method = "haversine",
unit = "km",
max_dist = 0.05
) %>%
fuzzyjoin::geo_left_join(
station_locations,
by = c("end_latitude" = "latitude", "end_longitude" = "longitude"),
method = "haversine",
unit = "km",
max_dist = 0.05
) %>%
dplyr::select(
route_id,
user_id,
bike,
start_time,
end_time,
start_station = name.x,
start_latitude,
start_longitude,
end_station = name.y,
end_latitude,
end_longitude,
duration
) %>%
tibble::as_tibble()
return(trips)
}
# =================================================================================================
# aggregate_users
# =================================================================================================
#
#' Aggregate user data.
#'
#' Create a one-row-per-user dataset from the output of \code{aggregate_trips}.
#'
#' @title aggregate_users
#'
#' @param trip_data The one-row-per-trip data (as output by \code{aggregate_trips}).
#'
#' @return A tibble of all available user data.
#'
#' @examples
#' \dontrun{
#' full_data <- get_full_data()
#' trips <- aggregate_trips(full_data)
#' users <- aggregate_users(trips)
#' }
#'
#' @export
aggregate_users <- function(trip_data) {
users <- trip_data %>%
dplyr::group_by(user_id) %>%
dplyr::summarize(
trips = dplyr::n(),
min_trip_duration = min(duration, na.rm = TRUE),
mean_trip_duration = mean(duration, na.rm = TRUE),
median_trip_duration = stats::median(duration, na.rm = TRUE),
max_trip_duration = max(duration, na.rm = TRUE),
first_trip_time = min(start_time, na.rm = TRUE),
last_trip_time = max(start_time, na.rm = TRUE),
top_start_station = names(which.max(table(start_station))) %>%
{ifelse(is.null(.), NA, .)},
top_start_station_trips = max(table(start_station)) %>%
{ifelse(. == -Inf, NA, .)},
top_end_station = names(which.max(table(end_station))) %>%
{ifelse(is.null(.), NA, .)},
top_end_station_trips = max(table(end_station)) %>%
{ifelse(. == -Inf, NA, .)}
)
return(users)
}
| /R/aggregate.R | permissive | Amherst-Statistics/valleybikeData | R | false | false | 5,442 | r | # =================================================================================================
# get_full_data
# =================================================================================================
#
#' Get the full trajectory data (raw)
#'
#' Get all available trajectory data for the years 2018-2020, in raw format.
#'
#' @title get_full_data
#'
#' @return A 65,975,278 x 6 tibble of all available trajectory data.
#'
#' @examples
#' \dontrun{
#' full_data <- get_full_data()
#' }
#'
#' @export
get_full_data <- function() {
root <- "https://nhorton.people.amherst.edu/valleybikes/"
file_pattern <- "VB_Routes_Data_[0-9]{4}_[0-9]{2}_[0-9]{2}\\.csv\\.gz"
files <- root %>%
readLines() %>%
stringr::str_extract_all(pattern = file_pattern) %>%
unlist()
file_urls <- paste0(root, files)
clust <- parallel::makeCluster(parallel::detectCores())
full_data <- parallel::parLapply(clust, file_urls, data.table::fread, skip = 2,
colClasses = c("character", "character", "character",
"numeric", "numeric", "character")) %>%
data.table::rbindlist() %>%
janitor::clean_names() %>%
dplyr::distinct() %>%
tibble::as_tibble()
parallel::stopCluster(clust)
return(full_data)
}
# =================================================================================================
# aggregate_trips
# =================================================================================================
#
#' Aggregate trip data.
#'
#' Create a one-row-per-trip dataset from the output of \code{get_full_data}.
#'
#' @title aggregate_trips
#'
#' @param full_data The full trajectory data (as output by \code{get_full_data}).
#'
#' @return A tibble of all available trip data.
#'
#' @examples
#' \dontrun{
#' full_data <- get_full_data()
#' trips <- aggregate_trips(full_data)
#' }
#'
#' @import data.table
#'
#' @export
aggregate_trips <- function(full_data) {
# using data.table for efficiency
data.table::setDT(full_data)
full_data[, date := fasttime::fastPOSIXct(date)]
full_data_clean <- stats::na.omit(full_data)
full_data_clean <- full_data_clean[data.table::between(date, as.POSIXct("2018-06-28"), Sys.Date())]
trips <- full_data_clean[, list(user_id = data.table::first(user_id),
bike = data.table::first(bike),
start_time = data.table::first(date),
end_time = data.table::last(date),
start_latitude = data.table::first(latitude),
start_longitude = data.table::first(longitude),
end_latitude = data.table::last(latitude),
end_longitude = data.table::last(longitude)),
by = route_id]
trips[, duration := as.numeric(end_time) - as.numeric(start_time)]
utils::data("stations", package = "valleybikeData", envir = environment())
station_locations <- dplyr::select(stations, name, latitude, longitude)
trips <- trips %>%
fuzzyjoin::geo_left_join(
station_locations,
by = c("start_latitude" = "latitude", "start_longitude" = "longitude"),
method = "haversine",
unit = "km",
max_dist = 0.05
) %>%
fuzzyjoin::geo_left_join(
station_locations,
by = c("end_latitude" = "latitude", "end_longitude" = "longitude"),
method = "haversine",
unit = "km",
max_dist = 0.05
) %>%
dplyr::select(
route_id,
user_id,
bike,
start_time,
end_time,
start_station = name.x,
start_latitude,
start_longitude,
end_station = name.y,
end_latitude,
end_longitude,
duration
) %>%
tibble::as_tibble()
return(trips)
}
# =================================================================================================
# aggregate_users
# =================================================================================================
#
#' Aggregate user data.
#'
#' Create a one-row-per-user dataset from the output of \code{aggregate_trips}.
#'
#' @title aggregate_users
#'
#' @param trip_data The one-row-per-trip data (as output by \code{aggregate_trips}).
#'
#' @return A tibble of all available user data.
#'
#' @examples
#' \dontrun{
#' full_data <- get_full_data()
#' trips <- aggregate_trips(full_data)
#' users <- aggregate_users(trips)
#' }
#'
#' @export
aggregate_users <- function(trip_data) {
users <- trip_data %>%
dplyr::group_by(user_id) %>%
dplyr::summarize(
trips = dplyr::n(),
min_trip_duration = min(duration, na.rm = TRUE),
mean_trip_duration = mean(duration, na.rm = TRUE),
median_trip_duration = stats::median(duration, na.rm = TRUE),
max_trip_duration = max(duration, na.rm = TRUE),
first_trip_time = min(start_time, na.rm = TRUE),
last_trip_time = max(start_time, na.rm = TRUE),
top_start_station = names(which.max(table(start_station))) %>%
{ifelse(is.null(.), NA, .)},
top_start_station_trips = max(table(start_station)) %>%
{ifelse(. == -Inf, NA, .)},
top_end_station = names(which.max(table(end_station))) %>%
{ifelse(is.null(.), NA, .)},
top_end_station_trips = max(table(end_station)) %>%
{ifelse(. == -Inf, NA, .)}
)
return(users)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalisation.R
\name{library_size_scaling_factors}
\alias{library_size_scaling_factors}
\title{calculate scaling factors for library size}
\usage{
library_size_scaling_factors(se)
}
\arguments{
\item{se}{a SummarizedExperiment object such as 'bait_windows' from atacr::make_counts()}
}
\description{
calculate scaling factors for library size
}
| /man/library_size_scaling_factors.Rd | no_license | TeamMacLean/atacr | R | false | true | 424 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalisation.R
\name{library_size_scaling_factors}
\alias{library_size_scaling_factors}
\title{calculate scaling factors for library size}
\usage{
library_size_scaling_factors(se)
}
\arguments{
\item{se}{a SummarizedExperiment object such as 'bait_windows' from atacr::make_counts()}
}
\description{
calculate scaling factors for library size
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/saveModel.R
\name{getModelQueryObjectFromModel}
\alias{getModelQueryObjectFromModel}
\title{getModelQueryObjectFromModel: return query object from a model.
This is the object we use to generate our unique ids.}
\usage{
getModelQueryObjectFromModel(model, model_type = "inla",
latent = FALSE)
}
\arguments{
\item{model}{= Model object to get query object for}
\item{model_type}{= Model Type string. Default to inla}
\item{latent}{= Bool determing if we are saving a latent model or a smooth model}
}
\value{
An object containing the observed and the model_type fields
}
\description{
getModelQueryObjectFromModel: return query object from a model.
This is the object we use to generate our unique ids.
}
| /modelServR/man/getModelQueryObjectFromModel.Rd | permissive | vdedyukhin/FluMapModel | R | false | true | 785 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/saveModel.R
\name{getModelQueryObjectFromModel}
\alias{getModelQueryObjectFromModel}
\title{getModelQueryObjectFromModel: return query object from a model.
This is the object we use to generate our unique ids.}
\usage{
getModelQueryObjectFromModel(model, model_type = "inla",
latent = FALSE)
}
\arguments{
\item{model}{= Model object to get query object for}
\item{model_type}{= Model Type string. Default to inla}
\item{latent}{= Bool determing if we are saving a latent model or a smooth model}
}
\value{
An object containing the observed and the model_type fields
}
\description{
getModelQueryObjectFromModel: return query object from a model.
This is the object we use to generate our unique ids.
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.4,family="gaussian",standardize=FALSE)
sink('./breast_050.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/ReliefF/breast/breast_050.R | no_license | esbgkannan/QSMART | R | false | false | 346 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.4,family="gaussian",standardize=FALSE)
sink('./breast_050.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/man/neighbors.Rd | no_license | hillhillll/LSAfun | R | false | false | 2,374 | rd | ||
loss <- read.csv("loss")
labels <- read.csv("labels.csv", header = FALSE)
data <- read.csv("data.csv", header = FALSE)
quants <- read.csv("quants.csv", header = FALSE)
preds <- read.csv("preds",header = FALSE)
loss <- loss[,1]
labels <- labels[,1]
quants <- quants[,1]
preds <- preds[,1]
max.epoc <- 1999
sig <- 1/100
post_med <- 0.5*rowMeans(data)
quant = as.numeric(rep(0,1500))
losses = as.numeric(rep(0,1500))
pred_losses = as.numeric(rep(0,1500))
for(i in 1:1500){
quant[i] <- post_med[i] + qnorm(quants[i])/sqrt(200)
losses[i] <- (1-quants[i]) * pmax(quant[i]-labels[i],0)+(quants[i])*pmax(labels[i] - quant[i],0)
pred_losses[i] <- (1-quants[i]) * pmax(preds[i]-labels[i],0)+(quants[i])*pmax(labels[i] - preds[i],0)
}
post_loss = mean(losses)
width <- 2
pred_se = sd(losses)/sqrt(1500)
colors <- c(rgb(56.25/255,34.50/255,113.25/255,0.3), rgb(0,0,1,0.8))
inds.to.use <- round(seq(from = 1, to = max.epoc, length.out = 1999))*1000
pdf("gauss_cont.pdf", width = 6, height = 4)
plot(inds.to.use,loss, type = 'n', yaxs ='i', xlab = "Number of Simulated Datasets", ylab = "Standardized Risk", ylim = c(0.00,10))
lines(inds.to.use,predict(loess(loss/post_loss~c(1:1999), span = 0.1,degree = 1 )), lwd = width, col = colors[1])
abline(h= 1, lwd= width,lty = 2, col = "red")
legend(x = 'topright', legend=c("RNN", "Posterior Quantiles"),
col=c(colors[1],"red"), lty=c(1,2), cex=0.8)
arrows(max(inds.to.use), (tail(loss, n=1)-1.96*pred_se)/post_loss,max(inds.to.use),(tail(loss, n=1)+1.96*pred_se)/post_loss,length=0.05, angle=90, code=3)
dev.off()
| /gaussian/cont/makeplots.R | no_license | thayerf/post-RNN | R | false | false | 1,566 | r | loss <- read.csv("loss")
labels <- read.csv("labels.csv", header = FALSE)
data <- read.csv("data.csv", header = FALSE)
quants <- read.csv("quants.csv", header = FALSE)
preds <- read.csv("preds",header = FALSE)
loss <- loss[,1]
labels <- labels[,1]
quants <- quants[,1]
preds <- preds[,1]
max.epoc <- 1999
sig <- 1/100
post_med <- 0.5*rowMeans(data)
quant = as.numeric(rep(0,1500))
losses = as.numeric(rep(0,1500))
pred_losses = as.numeric(rep(0,1500))
for(i in 1:1500){
quant[i] <- post_med[i] + qnorm(quants[i])/sqrt(200)
losses[i] <- (1-quants[i]) * pmax(quant[i]-labels[i],0)+(quants[i])*pmax(labels[i] - quant[i],0)
pred_losses[i] <- (1-quants[i]) * pmax(preds[i]-labels[i],0)+(quants[i])*pmax(labels[i] - preds[i],0)
}
post_loss = mean(losses)
width <- 2
pred_se = sd(losses)/sqrt(1500)
colors <- c(rgb(56.25/255,34.50/255,113.25/255,0.3), rgb(0,0,1,0.8))
inds.to.use <- round(seq(from = 1, to = max.epoc, length.out = 1999))*1000
pdf("gauss_cont.pdf", width = 6, height = 4)
plot(inds.to.use,loss, type = 'n', yaxs ='i', xlab = "Number of Simulated Datasets", ylab = "Standardized Risk", ylim = c(0.00,10))
lines(inds.to.use,predict(loess(loss/post_loss~c(1:1999), span = 0.1,degree = 1 )), lwd = width, col = colors[1])
abline(h= 1, lwd= width,lty = 2, col = "red")
legend(x = 'topright', legend=c("RNN", "Posterior Quantiles"),
col=c(colors[1],"red"), lty=c(1,2), cex=0.8)
arrows(max(inds.to.use), (tail(loss, n=1)-1.96*pred_se)/post_loss,max(inds.to.use),(tail(loss, n=1)+1.96*pred_se)/post_loss,length=0.05, angle=90, code=3)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_migration_bornSameStateMCA_1991.R
\name{build_migration_bornSameStateMCA_1991}
\alias{build_migration_bornSameStateMCA_1991}
\title{Builds a synthetic variable for education attainment - 2010}
\usage{
build_migration_bornSameStateMCA_1991(CensusData)
}
\description{
Builds a synthetic variable for education attainment - 2010
}
| /man/build_migration_bornSameStateMCA_1991.Rd | no_license | antrologos/harmonizeIBGE | R | false | true | 413 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_migration_bornSameStateMCA_1991.R
\name{build_migration_bornSameStateMCA_1991}
\alias{build_migration_bornSameStateMCA_1991}
\title{Builds a synthetic variable for education attainment - 2010}
\usage{
build_migration_bornSameStateMCA_1991(CensusData)
}
\description{
Builds a synthetic variable for education attainment - 2010
}
|
require("fftwtools")
require("pracma")
require("data.table")
require("gstat")
require(sp)
require("stringr")
require(gridExtra)
require(ggplot2)
#require(reshape2)
# for reporting
require(png)
require(grid)
if(getRversion() >= "3.1.0") utils::suppressForeignCheck(c("r", "roughness", "dir.hor", "id","name","press","ang1","ang2","ang3","anis1","anis2","id"))
#' Generate a pdf report for all AFM images in a directory
#'
#' A function to generate a pdf report for each \code{\link{AFMImage}} in a directory. Images should be in export Nanoscope format as the \code{\link{importFromNanoscope}} function will be used.
#'
#' @param imageDirectory a directory where are located image as Nanoscope export format
#' @param imageNumber (optional) an image number in the directory. If it is set only the selected image will be processed.
#' @author M.Beauvais
#' @export
#' @examples
#' \dontrun{
#' library(AFM)
#' # A report will be generated for all the images in imageDirectory directory
#' # imageDirectory="c:/images"
#' imageDirectory=tempdir()
#' exit<-generateReportFromNanoscopeImageDirectory(imageDirectory)
#'
#' # A report will be generated for the fifth image in the imageDirectory directory
#' exit<-generateReportFromNanoscopeImageDirectory(imageDirectory,5)
#' }
generateReportFromNanoscopeImageDirectory<-function(imageDirectory, imageNumber) {
filesToProcess<-list.files(imageDirectory, include.dirs = FALSE, recursive = FALSE,full.names = TRUE,pattern = "\\.txt$")
if (!missing(imageNumber)) {
if (imageNumber<=length(filesToProcess)){
filesToProcess<-filesToProcess[imageNumber]
}else{
print(paste("Selected image number",imageNumber,paste("exceeds the number of image in directory (",length(filesToProcess),")", sep="")))
return(FALSE)
}
}
for(fullfilename in filesToProcess){
AFMImage<-importFromNanoscope(fullfilename)
generateReport(AFMImage)
}
return(TRUE)
}
#' Generate an analysis report for one AFMImage
#'
#' A function to analyse an \code{\link{AFMImage}} and save on disk the analysis. The analysis are saved in outputs directory located in the image directory.
#' All the rdata and image files in the reportDirectory directory are loaded to generate one report for one \code{\link{AFMImage}}.
#'
#' @param AFMImage an \code{\link{AFMImage}} to be analysed
#' @author M.Beauvais
#' @export
#' @examples
#' \dontrun{
#' library(AFM)
#'
#' # Analyse the AFMImageOfRegularPeaks AFMImage sample from this package
#' data("AFMImageOfRegularPeaks")
#' AFMImage<-AFMImageOfRegularPeaks
#'
#' # exportDirectory="C:/Users/my_windows_login" or exportDirectory="/home/ubuntu"
#' exportDirectory=tempdir()
#' AFMImage@@fullfilename<-paste(exportDirectory,"AFMImageOfRegularPeaks.txt",sep="/")
#'
#' # Start to check if your sample is normaly distributed and isotropic.
#' generateCheckReport(AFMImage)
#' # If the sample is normaly distributed and isotropic, generate a full report
#' generateReport(AFMImage)
#'
#'
#' # Analyse your own AFM image from nanoscope analysis (TM) software tool
#' anotherAFMImage<-importFromNanoscope("c:/users/my_windows_login/myimage.txt")
#' # Start to check if your sample is normaly distributed and isotropic.
#' generateCheckReport(anotherAFMImage)
#' # If your sample is normaly distributed and isotropic, generate a full report
#' generateReport(anotherAFMImage)
#' }
generateReport <- function(AFMImage) {
sampleName<-basename(AFMImage@fullfilename)
sampleDirectory<-dirname(AFMImage@fullfilename)
print(paste("generating a full Report for", sampleName, "in", sampleDirectory))
reportDirectory<-paste(sampleDirectory, "outputs", sep="/")
createReportDirectory(reportDirectory)
AFMImageAnalyser<-new("AFMImageAnalyser", AFMImage=AFMImage, fullfilename= AFMImage@fullfilename)
AFMImageAnalyser<-analyse(AFMImageAnalyser=AFMImageAnalyser)
putAnalysisOnDisk(AFMImageAnalyser=AFMImageAnalyser, AFMImage=AFMImage)
# # find rdata file for the AFMImage
# rdata_directoryfiles<-list.files(reportDirectory,
# include.dirs = FALSE, recursive = FALSE, full.names = TRUE,
# pattern = paste(sampleName,"AFMImageAnalyser.rda$",sep="-"))
# if (length(rdata_directoryfiles)>0) {
reportFullfilename<-paste(reportDirectory, paste(sampleName,"fullreport.pdf",sep="-"),sep="/")
generateAFMImageReport(AFMImageAnalyser, reportFullfilename, isCheckReport = FALSE)
# }else{
# print("analysis not found...")
# print(paste(sampleName,"AFMImageAnalyser.rda",sep="-"))
# }
print("done")
}
#' Generate a check report for one AFMImage
#'
#' Generate a check report in pdf format in order to analyse the distribution and the isotropy of heights of the \code{\link{AFMImage}}.
#'
#' @param AFMImage an \code{\link{AFMImage}} imported from Nanoscope Analysis(TM) with \code{\link{importFromNanoscope}} or created manually \code{\link{AFMImage}}
#' @author M.Beauvais
#' @export
#' @examples
#' \dontrun{
#' library(AFM)
#'
#' # Analyse the AFMImageOfRegularPeaks AFMImage sample from this package
#' data("AFMImageOfRegularPeaks")
#' AFMImage<-AFMImageOfRegularPeaks
#' # exportDirectory="C:/Users/my_windows_login" or exportDirectory="/home/ubuntu"
#' exportDirectory=tempdir()
#' AFMImage@@fullfilename<-paste(exportDirectory,"AFMImageOfRegularPeaks.txt",sep="/")
#'
#' # Start to check if your sample is normaly distributed and isotropic.
#' generateCheckReport(AFMImage)
#' # If the sample is normaly distributed and isotropic, generate a full report
#' generateReport(AFMImage)
#'
#' # Analyse your own AFM image from nanoscope analysis (TM) software tool
#' anotherAFMImage<-importFromNanoscope("c:/users/me/myimage.txt")
#' # Start to check if your sample is normaly distributed and isotropic.
#' generateCheckReport(anotherAFMImage)
#' # If your sample is normaly distributed and isotropic, generate a full report
#' generateReport(anotherAFMImage)
#' }
generateCheckReport <- function(AFMImage) {
sampleName<-basename(AFMImage@fullfilename)
sampleDirectory<-dirname(AFMImage@fullfilename)
print(paste("Generating a check report for", sampleName, "in", sampleDirectory))
reportDirectory<-paste(sampleDirectory, "outputs", sep="/")
createReportDirectory(reportDirectory)
AFMImageAnalyser<-new("AFMImageAnalyser", AFMImage= AFMImage, fullfilename = AFMImage@fullfilename)
AFMImageAnalyser<-checkIsotropy(AFMImage,AFMImageAnalyser)
putAnalysisOnDisk(AFMImageAnalyser, AFMImage)
# sampleName<-basename(AFMImage@fullfilename)
# rdata_directoryfiles<-list.files(reportDirectory,
# include.dirs = FALSE, recursive = FALSE, full.names = TRUE,
# pattern = paste(sampleName,"AFMImageAnalyser.rda$",sep="-"))
# if (length(rdata_directoryfiles)>0) {
reportFullfilename<-paste(reportDirectory, paste(sampleName,"checkreport.pdf",sep="-"),sep="/")
generateAFMImageReport(AFMImageAnalyser, reportFullfilename, isCheckReport = TRUE)
# }else{
# print("analysis not found...")
# print(paste(sampleName,"AFMImageAnalyser.rda",sep="-"))
# }
print("done")
}
#' @title Generate an analysis report from an AFMImageAnalyser object
#'
#' @description \code{generateAFMImageReport} generates a report from an AFMImageAnalyser object
#'
#' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}} to be used to produce report
#' @param reportFullfilename location on disk where to save the generated report
#' @param isCheckReport TRUE to generate a check report must be generated, FALSE to generate a full report
#' @author M.Beauvais
#' @export
generateAFMImageReport<-function(AFMImageAnalyser, reportFullfilename, isCheckReport){
numberOfModelsPerPage=3
if (missing(isCheckReport)) {
isCheckReport = TRUE
}
AFMImage<-AFMImageAnalyser@AFMImage
# # load AFMImageAnalyser in rda format
# print(paste("loading", basename(oneAFMImageAnalyserFile)))
#
# x = load(file = oneAFMImageAnalyserFile)
# AFMImageAnalyser= get(x)
# rm(x)
#
fullfilename <- AFMImageAnalyser@AFMImage@fullfilename
sampleName<-basename(AFMImageAnalyser@AFMImage@fullfilename)
reportDirectory=dirname(AFMImageAnalyser@fullfilename)
createReportDirectory(reportDirectory)
#
# # load image in rda format
# afmImageFullfilename<-paste(dirname(oneAFMImageAnalyserFile) ,paste(sampleName, "AFMImage.rda", sep="-"),sep="/")
# print(paste("loading", basename(afmImageFullfilename)))
# x = load(file = afmImageFullfilename)
# AFMImage= get(x)
# rm(x)
#
#
#
# print(paste("processing image", sampleName))
#
# save all images necessary for the report on disk
putImagesFromAnalysisOnDisk(AFMImageAnalyser, AFMImage, reportDirectory)
print(paste("creating", basename(reportFullfilename), "..."))
pdf(reportFullfilename, width=8.27, height=11.69)
# first page
rglImagefullfilename<-get3DImageFullfilename(reportDirectory, sampleName)
print(paste("reading", basename(rglImagefullfilename), "..."))
img <- readPNG(rglImagefullfilename)
roughnesses<-getRoughnessParameters(AFMImageAnalyser@AFMImage)
basicImageInfo<-data.table(name=c("Scan size",
"Samples per line",
"Lines",
"Total Rrms",
"Ra (mean roughness)"),
values=c(paste(AFMImageAnalyser@AFMImage@scansize,"nm"),
paste(as.character(AFMImageAnalyser@AFMImage@samplesperline)),
paste(as.character(AFMImageAnalyser@AFMImage@lines)),
paste(round(roughnesses$totalRMSRoughness_TotalRrms, digits=4),"nm"),
paste(round(roughnesses$MeanRoughness_Ra, digits=4),"nm")))
imageInformationDTPlot<-getGgplotFromDataTable(basicImageInfo,
removeRowNames= TRUE,
removeColNames=TRUE)
grid.newpage() # Open a new page on grid device
pushViewport(viewport(layout = grid.layout(5, 4)))
vp1<-viewport(layout.pos.row = 2:3, layout.pos.col = 1:4)
grid.raster(img,vp=vp1)
vp0<-viewport(layout.pos.row = 1, layout.pos.col = 2:3)
grid.text(sampleName, vp=vp0, gp=gpar(fontsize=20, col="black"))
vp2<-viewport(layout.pos.row = 4:5, layout.pos.col = 1:4)
print(imageInformationDTPlot,vp=vp2)
# page for checking
# normality / omni direction of samples
if (!length(AFMImageAnalyser@variogramAnalysis@directionalVariograms)==0) {
exportpng2FullFilename<-getDirectionalVarioPngFullfilename(reportDirectory, sampleName)
print(paste("reading",basename(exportpng2FullFilename)))
directionalVariograms<-readPNG(exportpng2FullFilename)
grid.newpage() # Open a new page on grid device
pushViewport(viewport(layout = grid.layout(4, 4)))
qq <- checkNormalityQQ(AFMImage)
m <- checkNormalityDensity(AFMImage)
vp2<- viewport(layout.pos.row = 1:2, layout.pos.col = 1:2)
print(qq, vp = vp2)
vp3<-viewport(layout.pos.row = 1:2, layout.pos.col = 3:4)
print(m, vp = vp3)
vp4<-viewport(layout.pos.row = 3:4, layout.pos.col = 1:4)
grid.raster(directionalVariograms,vp=vp4)
}
if (!isCheckReport) {
# get variogram model evaluation
if (!length(AFMImageAnalyser@variogramAnalysis@variogramModels)==0) {
mergedDT<-getDTModelEvaluation(AFMImageAnalyser@variogramAnalysis)
print(mergedDT)
sillrangeDT<-getDTModelSillRange(AFMImageAnalyser@variogramAnalysis)
setkey(sillrangeDT, "model")
name<-press<-NULL
sampleDT <- mergedDT[name==basename(AFMImageAnalyser@AFMImage@fullfilename)]
setkey(sampleDT, "model")
#sampleDT <- sampleDT[cor>0.98]
sampleDT<-merge(sampleDT, sillrangeDT, by="model")
sampleDT<-sampleDT[,name:=NULL]
sampleDT <- unique(sampleDT)
sampleDT <- sampleDT[order(-rank(cor), rank(press))]
print(basename(AFMImageAnalyser@AFMImage@fullfilename))
print(basename(AFMImageAnalyser@fullfilename))
print(sampleDT)
summarySampleDT<-copy(sampleDT)
summarySampleDT$press<-round(sampleDT$press)
summarySampleDT$sill<-round(sampleDT$sill)
summarySampleDT$range<-round(sampleDT$range)
print("plotting variogram table...")
existsVariogramModel<-TRUE
if (nrow(sampleDT)!=0) {
plotBestVariogramModelsTable<-getGgplotFromDataTable(summarySampleDT,
removeRowNames=TRUE,
removeColNames=FALSE)
}else{
print("no good variogram table...")
existsVariogramModel<-FALSE
sampleDT <- mergedDT[name==basename(fullfilename)]
sampleDT <- unique(sampleDT)
sampleDT <- sampleDT[order(-rank(cor), rank(press))]
plotBestVariogramModelsTable<-getGgplotFromDataTable(summarySampleDT,
removeRowNames=TRUE,
removeColNames=FALSE)
}
#print(plotBestVariogramModelsTable)
}
# best variogram models page
if (!length(AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram)==0) {
# chosen sample
grid.newpage() # Open a new page on grid device
pushViewport(viewport(layout = grid.layout(7, 2)))
sampleSpplotfullfilename<-getSpplotImagefullfilename(reportDirectory, sampleName)
print(paste("reading",basename(sampleSpplotfullfilename)))
sampleImg <- readPNG(sampleSpplotfullfilename)
sampleSpplotfullfilename<-getVarioPngchosenFitSample(reportDirectory, sampleName)
print(paste("reading",basename(sampleSpplotfullfilename)))
chosenFitSampleImg <- readPNG(sampleSpplotfullfilename)
vp0<- viewport(layout.pos.row = 1, layout.pos.col = 1:2)
grid.text("Variogram analysis", vp=vp0, gp=gpar(fontsize=20, col="black"))
vp1<- viewport(layout.pos.row = 2:3, layout.pos.col = 1)
grid.raster(sampleImg,vp=vp1)
#vp3<-viewport(layout.pos.row = 9, layout.pos.col = 1)
#grid.text("Original", vp=vp3, gp=gpar(fontsize=10, col="black"))
vp2<- viewport(layout.pos.row = 2:3, layout.pos.col = 2)
grid.raster(chosenFitSampleImg,vp=vp2)
#vp4<-viewport(layout.pos.row = 9, layout.pos.col = 2)
#grid.text("Sample", vp=vp4, gp=gpar(fontsize=10, col="black"))
totalVariogramModels=length(AFMImageAnalyser@variogramAnalysis@variogramModels)
#print(totalVariogramModels)
if (totalVariogramModels>0) {
vp5<-viewport(layout.pos.row = 4:7, layout.pos.col = 1:2)
print(plotBestVariogramModelsTable,vp=vp5)
printVariogramModelEvaluations(AFMImageAnalyser, sampleDT, numberOfModelsPerPage)
}
}
# Roughness against length scale
if (!length(AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)==0) {
grid.newpage() # Open a new page on grid device
pushViewport(viewport(layout = grid.layout(7, 2)))
vp0<-viewport(layout.pos.row = 1, layout.pos.col = 1:2)
grid.text("Roughness vs. lengthscale", vp=vp0, gp=gpar(fontsize=20, col="black"))
exportCsvFullFilename<-getRoughnessAgainstLengthscale(reportDirectory, sampleName)
print(paste("reading",basename(exportCsvFullFilename)))
samplePredictedImg <- readPNG(exportCsvFullFilename)
vp1<-viewport(layout.pos.row = 2:4, layout.pos.col = 1)
grid.raster(samplePredictedImg,vp=vp1)
exportCsvFullFilename<-getRoughnessAgainstLengthscale10nm(reportDirectory, sampleName)
print(paste("reading",basename(exportCsvFullFilename)))
samplePredictedImg <- readPNG(exportCsvFullFilename)
vp1<-viewport(layout.pos.row = 2:4, layout.pos.col = 2)
grid.raster(samplePredictedImg,vp=vp1)
for(i in c(0,1)) {
exportpng2FullFilename=getRoughnessAgainstLengthscaleIntersection(reportDirectory, paste(sampleName,i*2,sep="-"))
if (file.exists(exportpng2FullFilename)) {
print("intersection inserted...")
img<-readPNG(exportpng2FullFilename)
vp2<-viewport(layout.pos.row = 5:7, layout.pos.col = i+1)
grid.raster(img,vp=vp2)
}
}
}
# export fractal dimension
if (!length(AFMImageAnalyser@fdAnalysis@fractalDimensionMethods)==0) {
grid.newpage() # Open a new page on grid device
pushViewport(viewport(layout = grid.layout(7, 4)))
vp0<-viewport(layout.pos.row = 1, layout.pos.col = 1:4)
grid.text("Fractal dimension analysis", vp=vp0, gp=gpar(fontsize=20, col="black"))
n=length(AFMImageAnalyser@fdAnalysis@fractalDimensionMethods)
print(n)
if (n>0) {
sampleDT <- data.table(
fd_method= c(sapply(1:n, function(i) AFMImageAnalyser@fdAnalysis@fractalDimensionMethods[[i]]@fd_method)),
fd= c(sapply(1:n, function(i) AFMImageAnalyser@fdAnalysis@fractalDimensionMethods[[i]]@fd)),
fd_scale= c(sapply(1:n, function(i) AFMImageAnalyser@fdAnalysis@fractalDimensionMethods[[i]]@fd_scale)))
# sampleDT <- data.table( AFMImageAnalyser@fdAnalysis@fractalDimensionMethods)
# setnames(sampleDT,c("method","scale"),c("fd_method","fd_scale"))
print(sampleDT)
#sampleDT <- sampleDT[,c(2,13,14,15), with = FALSE]
setkey(sampleDT, "fd_method")
sampleDT <- unique(sampleDT)
name<-NULL
plotFractalDimensionTable<-getGgplotFromDataTable(sampleDT[, name:=NULL])
vp3<-viewport(layout.pos.row = 2:3, layout.pos.col = 1:4)
print(plotFractalDimensionTable,vp=vp3)
exportpng2FullFilename=getFractalDimensionsPngFullfilename(reportDirectory, sampleName, "isotropic")
if (file.exists(exportpng2FullFilename)) {
img<-readPNG(exportpng2FullFilename)
vp4<-viewport(layout.pos.row = 4:5, layout.pos.col = 1:2)
grid.raster(img,vp=vp4)
}
exportpng2FullFilename=getFractalDimensionsPngFullfilename(reportDirectory, sampleName, "squareincr")
if (file.exists(exportpng2FullFilename)) {
img<-readPNG(exportpng2FullFilename)
vp5<-viewport(layout.pos.row = 4:5, layout.pos.col = 3:4)
grid.raster(img,vp=vp5)
}
exportpng2FullFilename=getFractalDimensionsPngFullfilename(reportDirectory, sampleName, "filter1")
if (file.exists(exportpng2FullFilename)) {
img<-readPNG(exportpng2FullFilename)
vp6<-viewport(layout.pos.row = 6:7, layout.pos.col = 1:2)
grid.raster(img,vp=vp6)
}
}
}
}
dev.off()
rm(AFMImageAnalyser)
}
#' @title printVariogramModelEvaluations
#'
#' @description \code{printVariogramModelEvaluations} generates a graphic element containing the evaluation of all variogram models
#'
#' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}} to be used to produce report
#' @param numberOfModelsPerPage numeric to specify the number of model evaluations per pages
#' @param sampleDT a data.table containg the evaluation information
#' @author M.Beauvais
#' @export
printVariogramModelEvaluations<-function(AFMImageAnalyser, sampleDT, numberOfModelsPerPage){
error<-predicted<-realH<-nbPointsPercent<-numberOfPoints<-NULL
#####################
# new page for experimental variogram and models
experimentalVariogramDT<-AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram
experimentalVariogramDT$name<-"Experimental"
#drops <- c("dir.hor","dir.ver","id","np")
experimentalVariogramDT<-experimentalVariogramDT[,c("dir.hor","dir.ver","id","np"):=NULL]
#names(experimentalVariogramDT)
sampleName<-basename(AFMImageAnalyser@AFMImage@fullfilename)
sampleSpplotfullfilename<-getSpplotImagefullfilename(tempdir(), sampleName)
saveSpplotFromAFMImage(AFMImageAnalyser@AFMImage, sampleSpplotfullfilename, withoutLegend=TRUE)
#print(paste("reading",basename(sampleSpplotfullfilename)))
#sampleImg<-getSpplotFromAFMImage(AFMImage=AFMImageAnalyser@AFMImage, expectedWidth=80, expectHeight=60, withoutLegend=TRUE)
sampleImg <- readPNG(sampleSpplotfullfilename)
allVarioModels<-str_sub(sampleDT$model,-3)
i<-1
for (i in seq(1:length(allVarioModels))) {
indexInPage<-i%%numberOfModelsPerPage
if (indexInPage==1) {
# Open a new page
grid.newpage()
pushViewport(viewport(layout = grid.layout(numberOfModelsPerPage*2, 3)))
}
if (indexInPage==0)indexInPage=numberOfModelsPerPage
#print(indexInPage)
#plot experimental variogram and model variogram
vp1<-viewport(layout.pos.row = (indexInPage-1)*2+1, layout.pos.col = 2)
grid.raster(sampleImg,vp=vp1)
# pushViewport(vp1)
# print(sampleImg,newpage=FALSE)
# popViewport(1)
#print(i)
allVariogramModelEvaluation<-AFMImageAnalyser@variogramAnalysis@variogramModels
for (j in seq(1:length(allVariogramModelEvaluation))) {
if (allVariogramModelEvaluation[j][[1]]@fit.v[2]$model==allVarioModels[i]) break;
}
#print(j)
#print(allVariogramModelEvaluation[j][[1]]@fit.v[2]$model)
#predictedfullfilename<-getSpplotPredictedImageFullfilename(reportDirectory, sampleName, allVarioModels[i])
modelName<-allVariogramModelEvaluation[j][[1]]@fit.v[2]$model
part_valid_pr<-AFMImageAnalyser@variogramAnalysis@variogramModels[[i]]@mykrige
cuts<-AFMImageAnalyser@variogramAnalysis@cuts
withoutLegend<-TRUE
colLimit<-length(cuts)+3
cols <- getSpplotColors(colLimit)
# density plot for percent error (over total amplitude)
amplitudeReal<-abs(max(AFMImageAnalyser@AFMImage@data$h)-min(AFMImageAnalyser@AFMImage@data$h))
statsHeightsDT<-data.table(realH=AFMImageAnalyser@AFMImage@data$h, predicted = as.vector(unlist(part_valid_pr["var1.pred"]@data)))
statsHeightsDT[,error:=(predicted-realH)/amplitudeReal,]
statsHeightsDT
resStatsDT<-data.table(step=c(0), numberOfPoints=c(0))
totalPoints<-length(statsHeightsDT$error)
for (i in seq(0,max(statsHeightsDT$error), by=(max(statsHeightsDT$error)/20))) {
nbPoints<-length(statsHeightsDT$error[abs(statsHeightsDT$error)<i])
resStatsDT<-rbind(resStatsDT, data.table(step=c(i), numberOfPoints=c(nbPoints)))
}
resStatsDT<-resStatsDT[-c(1,2),]
resStatsDT
resStatsDT[, nbPointsPercent:=numberOfPoints/totalPoints,]
resStatsDT
errorData<-data.frame(error=statsHeightsDT$error)
predictedspplotfullfilename<-getSpplotPredictedImageFullfilename(tempdir(), sampleName, modelName)
saveSpplotFromKrige(predictedspplotfullfilename, modelName, part_valid_pr,cuts, withoutLegend = TRUE)
# TODO save on disk as png and read
# read image on disk
#print(paste("reading", basename(predictedspplotfullfilename)))
samplePredictedImg <- readPNG(predictedspplotfullfilename)
#samplePredictedImg<-spplot(part_valid_pr["var1.pred"], cuts=cuts, col.regions=cols,key=list(lines=FALSE, col="transparent"))
vp2<-viewport(layout.pos.row = (indexInPage-1)*2+1, layout.pos.col = 3)
grid.raster(samplePredictedImg,vp=vp2)
# pushViewport(vp2)
# print(samplePredictedImg,newpage=FALSE)
# popViewport(1)
ang1<-ang2<-ang3<-anis1<-anis2<-name<-NULL
fit.v<-allVariogramModelEvaluation[j][[1]]@fit.v
vgm1<-vgm(fit.v[2]$psill, fit.v[2]$model, fit.v[2]$range, kappa = fit.v[2]$kappa, anis = c(fit.v[2]$anis1, fit.v[2]$anis2), add.to = vgm(fit.v[1]$psill, fit.v[1]$model, fit.v[1]$range,kappa = fit.v[1]$kappa,anis = c(fit.v[1]$anis1, fit.v[1]$anis2)))
newModelDT<-data.table(vgm1)
setnames(newModelDT, "psill", "sill" )
newModelDT<-rbind(newModelDT, sampleDT[i], fill=TRUE)
newModelDT<- newModelDT[, ang1:=NULL]
newModelDT<- newModelDT[, ang2:=NULL]
newModelDT<- newModelDT[, ang3:=NULL]
newModelDT<- newModelDT[, anis1:=NULL]
newModelDT<- newModelDT[, anis2:=NULL]
plotVariogramModelTable<-getGgplotFromDataTable(newModelDT[,name:=NULL])
vp4<-viewport(layout.pos.row = (indexInPage-1)*2+1+1, layout.pos.col = 2:3)
print(vp=vp4, plotVariogramModelTable, row.names= FALSE, include.rownames=FALSE)
# variogram from model
myvgm<-experimentalVariogramDT
experimentalVariogramDTnrow=nrow(myvgm)
class(myvgm) = c("gstatVariogram", "data.frame")
myvgm$np=rep(1,experimentalVariogramDTnrow)
myvgm$dir.hor=rep(0,experimentalVariogramDTnrow)
myvgm$dir.ver=rep(0,experimentalVariogramDTnrow)
myvgm$id=rep(factor("var1"),experimentalVariogramDTnrow)
begin<-(indexInPage-1)*2+1
vp3<-viewport(layout.pos.row = begin:(begin+1), layout.pos.col = 1, width=100, height=100)
vgLine <- rbind(
cbind(variogramLine(vgm1, maxdist = max(myvgm$dist)), id = "Raw")
)
p1<-ggplot(myvgm, aes(x = dist, y = gamma, colour = id)) + geom_line(data = vgLine) + geom_point()
p1 <- p1 + ylab("semivariance")
p1 <- p1 + xlab("distance (nm)")
p1 <- p1 + ggtitle("Semivariogram")
p1 <- p1 + guides(colour=FALSE)
p1 <- p1 + expand_limits(y = 0)
print(p1,vp=vp3)
grid.newpage()
vp5<-viewport(layout.pos.row = begin:(begin+2), layout.pos.col = 1, width=200, height=200)
p1<-ggplot(myvgm, aes(x = dist, y = gamma, colour = id)) + geom_line(data = vgLine) + geom_point()
p1 <- ggplot(errorData,aes(error, fill =c(1))) + geom_density(alpha = 0.2) +
guides(fill=FALSE)+
theme(legend.position="none")
# p1 + ylab("semivariance")
# p1 <- p1 + xlab("distance (nm)")
# p1 <- p1 + ggtitle("Semivariogram")
# p1 <- p1 + guides(colour=FALSE)
# p1 <- p1 + expand_limits(y = 0)
print(p1,vp=vp5)
plotVariogramModelTable<-getGgplotFromDataTable(resStatsDT)
vp6<-viewport(layout.pos.row = (indexInPage-1)*2+1+1, layout.pos.col = 2:3)
print(vp=vp6, plotVariogramModelTable, row.names= FALSE, include.rownames=FALSE)
}
}
getGgplotFromDataTable<-function(DT, removeRowNames, removeColNames) {
if (missing(removeRowNames)) removeRowNames<-TRUE
if (missing(removeColNames)) removeColNames<-FALSE
mytheme <- gridExtra::ttheme_default(
core = list(fg_params=list(cex = 0.8)),
colhead = list(fg_params=list(cex = 0.9)),
rowhead = list(fg_params=list(cex = 0.9)))
qplotFromDataTable<- qplot(1:10, 1:10, geom = "blank") +
theme_bw() +
theme(line = element_blank(), text = element_blank())
if ((removeRowNames)&&(removeColNames)) {
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, rows = NULL, cols=NULL))
}else{
if (removeRowNames) {
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, rows = NULL))
}else{
if (removeColNames) {
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, cols = NULL))
}else{
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme))
}
}
}
return(qplotFromDataTable)
}
#' Put the images from all analysis on disk
#'
#' A function to put on disk all the images from variogram, PSD Analysis of an \code{\link{AFMImage}}
#' An AFM Image 3D representation is saved on disk thanks to the \code{\link{rgl}} package.
#' On Unix system, it is necessary to have a X server connection to be able to use the \code{\link{rgl}} package.
#'
#' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}}
#' @param AFMImage an \code{\link{AFMImage}}
#' @param exportDirectory where the images will be stored
#' @export
#' @author M.Beauvais
putImagesFromAnalysisOnDisk<-function(AFMImageAnalyser, AFMImage, exportDirectory) {
exportFractalDimImagesForReport(AFMImage, exportDirectory)
exportPSDImagesForReport(AFMImageAnalyser, AFMImage, exportDirectory)
exportVariogramImagesForReport(AFMImageAnalyser, AFMImage, exportDirectory)
export3DImageForReport(AFMImage, exportDirectory)
}
exportVariogramImagesForReport<- function(AFMImageAnalyser, AFMImage, exportDirectory) {
class(AFMImageAnalyser)="AFMImageAnalyser"
class(AFMImageAnalyser@variogramAnalysis)="AFMImageVariogramAnalysis"
sampleName<-basename(AFMImage@fullfilename)
# ssplot of real sample for comparison with predicted sample from each variogram model
spplotImagefullfilename<-getSpplotImagefullfilename(exportDirectory, sampleName)
saveSpplotFromAFMImage(AFMImage, spplotImagefullfilename, withoutLegend=TRUE)
# directional variograms files
if (!length(AFMImageAnalyser@variogramAnalysis@directionalVariograms)==0) {
exportCsvFullFilename<-getDirectionalVarioCsvFullfilename(exportDirectory, sampleName)
print(paste("saving", basename(exportCsvFullFilename)))
tryCatch({
write.table(AFMImageAnalyser@variogramAnalysis@directionalVariograms, exportCsvFullFilename, sep=",")
}, error = function(e){
print("error",e)
})
dvarios<-AFMImageAnalyser@variogramAnalysis@directionalVariograms
dist<-gamma<-dir.hor<-NULL
p1 <- ggplot(dvarios, aes(x=dist, y=gamma, color=as.factor(dir.hor) , shape=as.factor(dir.hor)))
p1 <- p1 + geom_point()
p1 <- p1 + ylab("semivariance")
p1 <- p1 + xlab("distance (nm)")
p1 <- p1 + ggtitle("Semivariogram")
p1 <- p1 + expand_limits(y = 0)
p1 <- p1 + guides(colour=FALSE)
#print(p1)
exportpng2FullFilename<-getDirectionalVarioPngFullfilename(exportDirectory, sampleName)
print(paste("saving", basename(exportpng2FullFilename)))
png(filename=exportpng2FullFilename, units = "px", width=800, height=800)
print(p1)
dev.off()
}
# omnidirectional variogram files
if (!length(AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram)==0) {
exportCsvFullFilename<-getOmnidirectionalVarioCsvFullfilename(exportDirectory, sampleName)
print(paste("saving", basename(exportCsvFullFilename)))
AFMImageVariogram<-AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram
class(AFMImageVariogram)=c("gstatVariogram","data.frame")
tryCatch({
write.table(AFMImageVariogram, exportCsvFullFilename, sep=",")
}, error = function(e){
print("error",e)
})
myvgm<-AFMImageVariogram
experimentalVariogramDTnrow=nrow(myvgm)
class(myvgm) = c("gstatVariogram", "data.frame")
myvgm$np=rep(1,experimentalVariogramDTnrow)
myvgm$dir.hor=rep(0,experimentalVariogramDTnrow)
myvgm$dir.ver=rep(0,experimentalVariogramDTnrow)
myvgm$id=rep(factor("var1"),experimentalVariogramDTnrow)
dist<-gamma<-id<-NULL
p1<-ggplot(myvgm, aes(x = dist, y = gamma, colour = id)) + geom_point()
p1 <- p1 + ylab("semivariance")
p1 <- p1 + xlab("distance (nm)")
p1 <- p1 + ggtitle("Semivariogram")
p1 <- p1 + expand_limits(y = 0)
p1 <- p1 + guides(colour=FALSE)
exportpng2FullFilename<-getOmnidirectionalVarioPngFullfilename(exportDirectory, sampleName)
print(paste("saving", basename(exportpng2FullFilename)))
png(filename=exportpng2FullFilename, units = "px", width=800, height=800)
print(p1)
dev.off()
# chosen sample plot
TheData<-as.data.frame(AFMImage@data)
TheData=na.omit(TheData)
part_model <- TheData[AFMImageAnalyser@variogramAnalysis@chosenFitSample, ]
coordinates(part_model) = ~x+y
proj4string(part_model)=CRS("+init")
is.projected(part_model)
pchosenFitSample<-spplot(part_model, col.regions="black",contour=TRUE,key=list(lines=FALSE, col="transparent"))
expectedWidth = 400
expectHeight = 300
exportpngFullFilename<-getVarioPngchosenFitSample(exportDirectory, sampleName)
print(paste("saving", basename(exportpngFullFilename)))
png(filename=exportpngFullFilename, units = "px", width=expectedWidth, height=expectHeight)
print(pchosenFitSample)
dev.off()
# save images from variogram modeling
totalVariogramModels=length(AFMImageAnalyser@variogramAnalysis@variogramModels)
#print(totalVariogramModels)
if (totalVariogramModels>0) {
fullfilename<-AFMImage@fullfilename
cuts<-AFMImageAnalyser@variogramAnalysis@cuts
for (i in seq(1,totalVariogramModels)) {
#print(AFMImageAnalyser@variogramAnalysis@variogramModels[[i]]@res)
testedModel<-AFMImageAnalyser@variogramAnalysis@variogramModels[[i]]@model
print(testedModel)
if (testedModel=="Wav2") {
vgm<-vgm( 5, "Exp", 1, add.to = vgm(5, "Wav", 1, nugget = 2.5))
}else{
vgm<-vgm(5,testedModel,1,0)
}
mykrige<-AFMImageAnalyser@variogramAnalysis@variogramModels[[i]]@mykrige
predictedspplotfullfilename<-getSpplotPredictedImageFullfilename(exportDirectory, sampleName, testedModel)
saveSpplotFromKrige(predictedspplotfullfilename, vgm,mykrige,cuts, withoutLegend = TRUE)
predictedAFMImage<-getAFMImageFromKrige(AFMImage, vgm, mykrige)
class(predictedAFMImage) = c("AFMImage")
#displayIn3D(predictedAFMImage,1024, full2Dfilename,noLight=TRUE))
export3DImageForReport(predictedAFMImage, exportDirectory)
}
}
}
}
exportPSDImagesForReport<-function(AFMImageAnalyser, AFMImage, exportDirectory) {
#class(AFMImageAnalyser)="AFMImageAnalyser"
#class(AFMImageAnalyser@psdAnalysis)="AFMImagePSDAnalysis"
filename<-basename(AFMImage@fullfilename)
# export Roughness against lengthscale graph
if (!length(AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)==0) {
data<-AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale
r<-roughness<-NULL
p1 <- ggplot(data, aes(x=r, y=roughness, colour= basename(filename)))
p1 <- p1 + geom_point()
p1 <- p1 + geom_line()
p1 <- p1 + ylab("roughness (nm)")
p1 <- p1 + xlab("lengthscale (nm)")
p1 <- p1 + guides(colour=FALSE)
aIntercept<-AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2@yintersept
aSlope<-AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2@slope
if (length(AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2)!=0){
p1 <- p1 + geom_abline(intercept = aIntercept,
slope = aSlope,
size=1.2)
}
print(paste(aIntercept, aSlope))
pngFilename=paste(filename,"roughness-against-lengthscale.png",sep="-")
exportpngFullFilename<-paste(exportDirectory, pngFilename, sep="/")
print(paste("saving", basename(exportpngFullFilename)))
png(filename=exportpngFullFilename, units = "px", width=800, height=800)
print(p1)
dev.off()
# focus on the first 10nm
newdata<-data[r<10,]
r<-roughness<-NULL
p1 <- ggplot(newdata, aes(x=r, y=roughness, colour= basename(filename)))
p1 <- p1 + geom_point()
p1 <- p1 + geom_line()
p1 <- p1 + ylab("roughness (nm)")
p1 <- p1 + xlab("lengthscale (nm)")
p1 <- p1 + guides(colour=FALSE)
# if (length(AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis1)!=0){
# p1 <- p1 + geom_abline(intercept = AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis1@yintersept,
# slope = AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis1@slope,
# size=1.2)
# }
pngFilename=paste(filename,"roughness-against-lengthscale-10nm.png",sep="-")
exportpngFullFilename<-paste(exportDirectory, pngFilename, sep="/")
print(paste("saving", basename(exportpngFullFilename)))
png(filename=exportpngFullFilename, units = "px", width=800, height=800)
print(p1)
dev.off()
# save intersections images
if (!length(AFMImageAnalyser@psdAnalysis@intersections)==0) {
saveOnDiskIntersectionForRoughnessAgainstLengthscale(AFMImageAnalyser, exportDirectory)
}
}
}
getGgplotFromDataTable<-function(DT, removeRowNames, removeColNames) {
if (missing(removeRowNames)) removeRowNames<-TRUE
if (missing(removeColNames)) removeColNames<-FALSE
mytheme <- gridExtra::ttheme_default(
core = list(fg_params=list(cex = 0.8)),
colhead = list(fg_params=list(cex = 0.9)),
rowhead = list(fg_params=list(cex = 0.9)))
qplotFromDataTable<- qplot(1:15, 1:15, geom = "blank") +
theme_bw() +
theme(line = element_blank(), text = element_blank())
if ((removeRowNames)&&(removeColNames)) {
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, rows = NULL, cols=NULL))
}else{
if (removeRowNames) {
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, rows = NULL))
}else{
if (removeColNames) {
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, cols = NULL))
}else{
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme))
}
}
}
return(qplotFromDataTable)
}
export3DImageForReport<-function(AFMImage, exportDirectory, noLight) {
library(AFM)
library(rgl)
sampleName<-basename(AFMImage@fullfilename)
rglImagefullfilename<-get3DImageFullfilename(exportDirectory, sampleName)
if (displayIn3D(AFMImage, width=1024, fullfilename=rglImagefullfilename,changeViewpoint=TRUE, noLight= noLight)) {
rgl.viewpoint(zoom=2)
rgl.close()
}
}
export3DImageForReport<-function(AFMImage, exportDirectory) {
sampleName<-basename(AFMImage@fullfilename)
rglImagefullfilename<-get3DImageFullfilename(exportDirectory, sampleName)
if (displayIn3D(AFMImage, width=1024, fullfilename=rglImagefullfilename,noLight=FALSE)) {
rgl.close()
}
}
createReportDirectory<-function(reportDirectory) {
if (!file.exists(reportDirectory)){
print(paste("creating report directory",reportDirectory))
dir.create(file.path(reportDirectory), showWarnings = FALSE)
}
if (!isReportDirectoryWritePermissionCorrect(reportDirectory)) {
stop(paste("Error: can't write to output directory", reportDirectory))
}
print(paste("report directory is", reportDirectory))
}
isReportDirectoryWritePermissionCorrect<-function(reportDirectory) {
tryCatch({
fullfilename=paste(reportDirectory, "permCheck.txt", sep="/")
fileConn<-file(fullfilename)
writeLines(c("Hello","World"), fileConn)
close(fileConn)
file.remove(fullfilename)
return(TRUE)
}, error = function(e){
close(fileConn)
return(FALSE)
})
}
| /R/AFMReportMaker.R | no_license | cran/AFM | R | false | false | 40,145 | r | require("fftwtools")
require("pracma")
require("data.table")
require("gstat")
require(sp)
require("stringr")
require(gridExtra)
require(ggplot2)
#require(reshape2)
# for reporting
require(png)
require(grid)
if(getRversion() >= "3.1.0") utils::suppressForeignCheck(c("r", "roughness", "dir.hor", "id","name","press","ang1","ang2","ang3","anis1","anis2","id"))
#' Generate a pdf report for all AFM images in a directory
#'
#' A function to generate a pdf report for each \code{\link{AFMImage}} in a directory. Images should be in export Nanoscope format as the \code{\link{importFromNanoscope}} function will be used.
#'
#' @param imageDirectory a directory where are located image as Nanoscope export format
#' @param imageNumber (optional) an image number in the directory. If it is set only the selected image will be processed.
#' @author M.Beauvais
#' @export
#' @examples
#' \dontrun{
#' library(AFM)
#' # A report will be generated for all the images in imageDirectory directory
#' # imageDirectory="c:/images"
#' imageDirectory=tempdir()
#' exit<-generateReportFromNanoscopeImageDirectory(imageDirectory)
#'
#' # A report will be generated for the fifth image in the imageDirectory directory
#' exit<-generateReportFromNanoscopeImageDirectory(imageDirectory,5)
#' }
generateReportFromNanoscopeImageDirectory<-function(imageDirectory, imageNumber) {
filesToProcess<-list.files(imageDirectory, include.dirs = FALSE, recursive = FALSE,full.names = TRUE,pattern = "\\.txt$")
if (!missing(imageNumber)) {
if (imageNumber<=length(filesToProcess)){
filesToProcess<-filesToProcess[imageNumber]
}else{
print(paste("Selected image number",imageNumber,paste("exceeds the number of image in directory (",length(filesToProcess),")", sep="")))
return(FALSE)
}
}
for(fullfilename in filesToProcess){
AFMImage<-importFromNanoscope(fullfilename)
generateReport(AFMImage)
}
return(TRUE)
}
#' Generate an analysis report for one AFMImage
#'
#' A function to analyse an \code{\link{AFMImage}} and save on disk the analysis. The analysis are saved in outputs directory located in the image directory.
#' All the rdata and image files in the reportDirectory directory are loaded to generate one report for one \code{\link{AFMImage}}.
#'
#' @param AFMImage an \code{\link{AFMImage}} to be analysed
#' @author M.Beauvais
#' @export
#' @examples
#' \dontrun{
#' library(AFM)
#'
#' # Analyse the AFMImageOfRegularPeaks AFMImage sample from this package
#' data("AFMImageOfRegularPeaks")
#' AFMImage<-AFMImageOfRegularPeaks
#'
#' # exportDirectory="C:/Users/my_windows_login" or exportDirectory="/home/ubuntu"
#' exportDirectory=tempdir()
#' AFMImage@@fullfilename<-paste(exportDirectory,"AFMImageOfRegularPeaks.txt",sep="/")
#'
#' # Start to check if your sample is normaly distributed and isotropic.
#' generateCheckReport(AFMImage)
#' # If the sample is normaly distributed and isotropic, generate a full report
#' generateReport(AFMImage)
#'
#'
#' # Analyse your own AFM image from nanoscope analysis (TM) software tool
#' anotherAFMImage<-importFromNanoscope("c:/users/my_windows_login/myimage.txt")
#' # Start to check if your sample is normaly distributed and isotropic.
#' generateCheckReport(anotherAFMImage)
#' # If your sample is normaly distributed and isotropic, generate a full report
#' generateReport(anotherAFMImage)
#' }
generateReport <- function(AFMImage) {
sampleName<-basename(AFMImage@fullfilename)
sampleDirectory<-dirname(AFMImage@fullfilename)
print(paste("generating a full Report for", sampleName, "in", sampleDirectory))
reportDirectory<-paste(sampleDirectory, "outputs", sep="/")
createReportDirectory(reportDirectory)
AFMImageAnalyser<-new("AFMImageAnalyser", AFMImage=AFMImage, fullfilename= AFMImage@fullfilename)
AFMImageAnalyser<-analyse(AFMImageAnalyser=AFMImageAnalyser)
putAnalysisOnDisk(AFMImageAnalyser=AFMImageAnalyser, AFMImage=AFMImage)
# # find rdata file for the AFMImage
# rdata_directoryfiles<-list.files(reportDirectory,
# include.dirs = FALSE, recursive = FALSE, full.names = TRUE,
# pattern = paste(sampleName,"AFMImageAnalyser.rda$",sep="-"))
# if (length(rdata_directoryfiles)>0) {
reportFullfilename<-paste(reportDirectory, paste(sampleName,"fullreport.pdf",sep="-"),sep="/")
generateAFMImageReport(AFMImageAnalyser, reportFullfilename, isCheckReport = FALSE)
# }else{
# print("analysis not found...")
# print(paste(sampleName,"AFMImageAnalyser.rda",sep="-"))
# }
print("done")
}
#' Generate a check report for one AFMImage
#'
#' Generate a check report in pdf format in order to analyse the distribution and the isotropy of heights of the \code{\link{AFMImage}}.
#'
#' @param AFMImage an \code{\link{AFMImage}} imported from Nanoscope Analysis(TM) with \code{\link{importFromNanoscope}} or created manually \code{\link{AFMImage}}
#' @author M.Beauvais
#' @export
#' @examples
#' \dontrun{
#' library(AFM)
#'
#' # Analyse the AFMImageOfRegularPeaks AFMImage sample from this package
#' data("AFMImageOfRegularPeaks")
#' AFMImage<-AFMImageOfRegularPeaks
#' # exportDirectory="C:/Users/my_windows_login" or exportDirectory="/home/ubuntu"
#' exportDirectory=tempdir()
#' AFMImage@@fullfilename<-paste(exportDirectory,"AFMImageOfRegularPeaks.txt",sep="/")
#'
#' # Start to check if your sample is normaly distributed and isotropic.
#' generateCheckReport(AFMImage)
#' # If the sample is normaly distributed and isotropic, generate a full report
#' generateReport(AFMImage)
#'
#' # Analyse your own AFM image from nanoscope analysis (TM) software tool
#' anotherAFMImage<-importFromNanoscope("c:/users/me/myimage.txt")
#' # Start to check if your sample is normaly distributed and isotropic.
#' generateCheckReport(anotherAFMImage)
#' # If your sample is normaly distributed and isotropic, generate a full report
#' generateReport(anotherAFMImage)
#' }
generateCheckReport <- function(AFMImage) {
sampleName<-basename(AFMImage@fullfilename)
sampleDirectory<-dirname(AFMImage@fullfilename)
print(paste("Generating a check report for", sampleName, "in", sampleDirectory))
reportDirectory<-paste(sampleDirectory, "outputs", sep="/")
createReportDirectory(reportDirectory)
AFMImageAnalyser<-new("AFMImageAnalyser", AFMImage= AFMImage, fullfilename = AFMImage@fullfilename)
AFMImageAnalyser<-checkIsotropy(AFMImage,AFMImageAnalyser)
putAnalysisOnDisk(AFMImageAnalyser, AFMImage)
# sampleName<-basename(AFMImage@fullfilename)
# rdata_directoryfiles<-list.files(reportDirectory,
# include.dirs = FALSE, recursive = FALSE, full.names = TRUE,
# pattern = paste(sampleName,"AFMImageAnalyser.rda$",sep="-"))
# if (length(rdata_directoryfiles)>0) {
reportFullfilename<-paste(reportDirectory, paste(sampleName,"checkreport.pdf",sep="-"),sep="/")
generateAFMImageReport(AFMImageAnalyser, reportFullfilename, isCheckReport = TRUE)
# }else{
# print("analysis not found...")
# print(paste(sampleName,"AFMImageAnalyser.rda",sep="-"))
# }
print("done")
}
#' @title Generate an analysis report from an AFMImageAnalyser object
#'
#' @description \code{generateAFMImageReport} generates a report from an AFMImageAnalyser object
#'
#' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}} to be used to produce report
#' @param reportFullfilename location on disk where to save the generated report
#' @param isCheckReport TRUE to generate a check report must be generated, FALSE to generate a full report
#' @author M.Beauvais
#' @export
generateAFMImageReport<-function(AFMImageAnalyser, reportFullfilename, isCheckReport){
numberOfModelsPerPage=3
if (missing(isCheckReport)) {
isCheckReport = TRUE
}
AFMImage<-AFMImageAnalyser@AFMImage
# # load AFMImageAnalyser in rda format
# print(paste("loading", basename(oneAFMImageAnalyserFile)))
#
# x = load(file = oneAFMImageAnalyserFile)
# AFMImageAnalyser= get(x)
# rm(x)
#
fullfilename <- AFMImageAnalyser@AFMImage@fullfilename
sampleName<-basename(AFMImageAnalyser@AFMImage@fullfilename)
reportDirectory=dirname(AFMImageAnalyser@fullfilename)
createReportDirectory(reportDirectory)
#
# # load image in rda format
# afmImageFullfilename<-paste(dirname(oneAFMImageAnalyserFile) ,paste(sampleName, "AFMImage.rda", sep="-"),sep="/")
# print(paste("loading", basename(afmImageFullfilename)))
# x = load(file = afmImageFullfilename)
# AFMImage= get(x)
# rm(x)
#
#
#
# print(paste("processing image", sampleName))
#
# save all images necessary for the report on disk
putImagesFromAnalysisOnDisk(AFMImageAnalyser, AFMImage, reportDirectory)
print(paste("creating", basename(reportFullfilename), "..."))
pdf(reportFullfilename, width=8.27, height=11.69)
# first page
rglImagefullfilename<-get3DImageFullfilename(reportDirectory, sampleName)
print(paste("reading", basename(rglImagefullfilename), "..."))
img <- readPNG(rglImagefullfilename)
roughnesses<-getRoughnessParameters(AFMImageAnalyser@AFMImage)
basicImageInfo<-data.table(name=c("Scan size",
"Samples per line",
"Lines",
"Total Rrms",
"Ra (mean roughness)"),
values=c(paste(AFMImageAnalyser@AFMImage@scansize,"nm"),
paste(as.character(AFMImageAnalyser@AFMImage@samplesperline)),
paste(as.character(AFMImageAnalyser@AFMImage@lines)),
paste(round(roughnesses$totalRMSRoughness_TotalRrms, digits=4),"nm"),
paste(round(roughnesses$MeanRoughness_Ra, digits=4),"nm")))
imageInformationDTPlot<-getGgplotFromDataTable(basicImageInfo,
removeRowNames= TRUE,
removeColNames=TRUE)
grid.newpage() # Open a new page on grid device
pushViewport(viewport(layout = grid.layout(5, 4)))
vp1<-viewport(layout.pos.row = 2:3, layout.pos.col = 1:4)
grid.raster(img,vp=vp1)
vp0<-viewport(layout.pos.row = 1, layout.pos.col = 2:3)
grid.text(sampleName, vp=vp0, gp=gpar(fontsize=20, col="black"))
vp2<-viewport(layout.pos.row = 4:5, layout.pos.col = 1:4)
print(imageInformationDTPlot,vp=vp2)
# page for checking
# normality / omni direction of samples
if (!length(AFMImageAnalyser@variogramAnalysis@directionalVariograms)==0) {
exportpng2FullFilename<-getDirectionalVarioPngFullfilename(reportDirectory, sampleName)
print(paste("reading",basename(exportpng2FullFilename)))
directionalVariograms<-readPNG(exportpng2FullFilename)
grid.newpage() # Open a new page on grid device
pushViewport(viewport(layout = grid.layout(4, 4)))
qq <- checkNormalityQQ(AFMImage)
m <- checkNormalityDensity(AFMImage)
vp2<- viewport(layout.pos.row = 1:2, layout.pos.col = 1:2)
print(qq, vp = vp2)
vp3<-viewport(layout.pos.row = 1:2, layout.pos.col = 3:4)
print(m, vp = vp3)
vp4<-viewport(layout.pos.row = 3:4, layout.pos.col = 1:4)
grid.raster(directionalVariograms,vp=vp4)
}
if (!isCheckReport) {
# get variogram model evaluation
if (!length(AFMImageAnalyser@variogramAnalysis@variogramModels)==0) {
mergedDT<-getDTModelEvaluation(AFMImageAnalyser@variogramAnalysis)
print(mergedDT)
sillrangeDT<-getDTModelSillRange(AFMImageAnalyser@variogramAnalysis)
setkey(sillrangeDT, "model")
name<-press<-NULL
sampleDT <- mergedDT[name==basename(AFMImageAnalyser@AFMImage@fullfilename)]
setkey(sampleDT, "model")
#sampleDT <- sampleDT[cor>0.98]
sampleDT<-merge(sampleDT, sillrangeDT, by="model")
sampleDT<-sampleDT[,name:=NULL]
sampleDT <- unique(sampleDT)
sampleDT <- sampleDT[order(-rank(cor), rank(press))]
print(basename(AFMImageAnalyser@AFMImage@fullfilename))
print(basename(AFMImageAnalyser@fullfilename))
print(sampleDT)
summarySampleDT<-copy(sampleDT)
summarySampleDT$press<-round(sampleDT$press)
summarySampleDT$sill<-round(sampleDT$sill)
summarySampleDT$range<-round(sampleDT$range)
print("plotting variogram table...")
existsVariogramModel<-TRUE
if (nrow(sampleDT)!=0) {
plotBestVariogramModelsTable<-getGgplotFromDataTable(summarySampleDT,
removeRowNames=TRUE,
removeColNames=FALSE)
}else{
print("no good variogram table...")
existsVariogramModel<-FALSE
sampleDT <- mergedDT[name==basename(fullfilename)]
sampleDT <- unique(sampleDT)
sampleDT <- sampleDT[order(-rank(cor), rank(press))]
plotBestVariogramModelsTable<-getGgplotFromDataTable(summarySampleDT,
removeRowNames=TRUE,
removeColNames=FALSE)
}
#print(plotBestVariogramModelsTable)
}
# best variogram models page
if (!length(AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram)==0) {
# chosen sample
grid.newpage() # Open a new page on grid device
pushViewport(viewport(layout = grid.layout(7, 2)))
sampleSpplotfullfilename<-getSpplotImagefullfilename(reportDirectory, sampleName)
print(paste("reading",basename(sampleSpplotfullfilename)))
sampleImg <- readPNG(sampleSpplotfullfilename)
sampleSpplotfullfilename<-getVarioPngchosenFitSample(reportDirectory, sampleName)
print(paste("reading",basename(sampleSpplotfullfilename)))
chosenFitSampleImg <- readPNG(sampleSpplotfullfilename)
vp0<- viewport(layout.pos.row = 1, layout.pos.col = 1:2)
grid.text("Variogram analysis", vp=vp0, gp=gpar(fontsize=20, col="black"))
vp1<- viewport(layout.pos.row = 2:3, layout.pos.col = 1)
grid.raster(sampleImg,vp=vp1)
#vp3<-viewport(layout.pos.row = 9, layout.pos.col = 1)
#grid.text("Original", vp=vp3, gp=gpar(fontsize=10, col="black"))
vp2<- viewport(layout.pos.row = 2:3, layout.pos.col = 2)
grid.raster(chosenFitSampleImg,vp=vp2)
#vp4<-viewport(layout.pos.row = 9, layout.pos.col = 2)
#grid.text("Sample", vp=vp4, gp=gpar(fontsize=10, col="black"))
totalVariogramModels=length(AFMImageAnalyser@variogramAnalysis@variogramModels)
#print(totalVariogramModels)
if (totalVariogramModels>0) {
vp5<-viewport(layout.pos.row = 4:7, layout.pos.col = 1:2)
print(plotBestVariogramModelsTable,vp=vp5)
printVariogramModelEvaluations(AFMImageAnalyser, sampleDT, numberOfModelsPerPage)
}
}
# Roughness against length scale
if (!length(AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)==0) {
grid.newpage() # Open a new page on grid device
pushViewport(viewport(layout = grid.layout(7, 2)))
vp0<-viewport(layout.pos.row = 1, layout.pos.col = 1:2)
grid.text("Roughness vs. lengthscale", vp=vp0, gp=gpar(fontsize=20, col="black"))
exportCsvFullFilename<-getRoughnessAgainstLengthscale(reportDirectory, sampleName)
print(paste("reading",basename(exportCsvFullFilename)))
samplePredictedImg <- readPNG(exportCsvFullFilename)
vp1<-viewport(layout.pos.row = 2:4, layout.pos.col = 1)
grid.raster(samplePredictedImg,vp=vp1)
exportCsvFullFilename<-getRoughnessAgainstLengthscale10nm(reportDirectory, sampleName)
print(paste("reading",basename(exportCsvFullFilename)))
samplePredictedImg <- readPNG(exportCsvFullFilename)
vp1<-viewport(layout.pos.row = 2:4, layout.pos.col = 2)
grid.raster(samplePredictedImg,vp=vp1)
for(i in c(0,1)) {
exportpng2FullFilename=getRoughnessAgainstLengthscaleIntersection(reportDirectory, paste(sampleName,i*2,sep="-"))
if (file.exists(exportpng2FullFilename)) {
print("intersection inserted...")
img<-readPNG(exportpng2FullFilename)
vp2<-viewport(layout.pos.row = 5:7, layout.pos.col = i+1)
grid.raster(img,vp=vp2)
}
}
}
# export fractal dimension
if (!length(AFMImageAnalyser@fdAnalysis@fractalDimensionMethods)==0) {
grid.newpage() # Open a new page on grid device
pushViewport(viewport(layout = grid.layout(7, 4)))
vp0<-viewport(layout.pos.row = 1, layout.pos.col = 1:4)
grid.text("Fractal dimension analysis", vp=vp0, gp=gpar(fontsize=20, col="black"))
n=length(AFMImageAnalyser@fdAnalysis@fractalDimensionMethods)
print(n)
if (n>0) {
sampleDT <- data.table(
fd_method= c(sapply(1:n, function(i) AFMImageAnalyser@fdAnalysis@fractalDimensionMethods[[i]]@fd_method)),
fd= c(sapply(1:n, function(i) AFMImageAnalyser@fdAnalysis@fractalDimensionMethods[[i]]@fd)),
fd_scale= c(sapply(1:n, function(i) AFMImageAnalyser@fdAnalysis@fractalDimensionMethods[[i]]@fd_scale)))
# sampleDT <- data.table( AFMImageAnalyser@fdAnalysis@fractalDimensionMethods)
# setnames(sampleDT,c("method","scale"),c("fd_method","fd_scale"))
print(sampleDT)
#sampleDT <- sampleDT[,c(2,13,14,15), with = FALSE]
setkey(sampleDT, "fd_method")
sampleDT <- unique(sampleDT)
name<-NULL
plotFractalDimensionTable<-getGgplotFromDataTable(sampleDT[, name:=NULL])
vp3<-viewport(layout.pos.row = 2:3, layout.pos.col = 1:4)
print(plotFractalDimensionTable,vp=vp3)
exportpng2FullFilename=getFractalDimensionsPngFullfilename(reportDirectory, sampleName, "isotropic")
if (file.exists(exportpng2FullFilename)) {
img<-readPNG(exportpng2FullFilename)
vp4<-viewport(layout.pos.row = 4:5, layout.pos.col = 1:2)
grid.raster(img,vp=vp4)
}
exportpng2FullFilename=getFractalDimensionsPngFullfilename(reportDirectory, sampleName, "squareincr")
if (file.exists(exportpng2FullFilename)) {
img<-readPNG(exportpng2FullFilename)
vp5<-viewport(layout.pos.row = 4:5, layout.pos.col = 3:4)
grid.raster(img,vp=vp5)
}
exportpng2FullFilename=getFractalDimensionsPngFullfilename(reportDirectory, sampleName, "filter1")
if (file.exists(exportpng2FullFilename)) {
img<-readPNG(exportpng2FullFilename)
vp6<-viewport(layout.pos.row = 6:7, layout.pos.col = 1:2)
grid.raster(img,vp=vp6)
}
}
}
}
dev.off()
rm(AFMImageAnalyser)
}
#' @title printVariogramModelEvaluations
#'
#' @description \code{printVariogramModelEvaluations} generates a graphic element containing the evaluation of all variogram models
#'
#' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}} to be used to produce report
#' @param numberOfModelsPerPage numeric to specify the number of model evaluations per pages
#' @param sampleDT a data.table containg the evaluation information
#' @author M.Beauvais
#' @export
printVariogramModelEvaluations<-function(AFMImageAnalyser, sampleDT, numberOfModelsPerPage){
error<-predicted<-realH<-nbPointsPercent<-numberOfPoints<-NULL
#####################
# new page for experimental variogram and models
experimentalVariogramDT<-AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram
experimentalVariogramDT$name<-"Experimental"
#drops <- c("dir.hor","dir.ver","id","np")
experimentalVariogramDT<-experimentalVariogramDT[,c("dir.hor","dir.ver","id","np"):=NULL]
#names(experimentalVariogramDT)
sampleName<-basename(AFMImageAnalyser@AFMImage@fullfilename)
sampleSpplotfullfilename<-getSpplotImagefullfilename(tempdir(), sampleName)
saveSpplotFromAFMImage(AFMImageAnalyser@AFMImage, sampleSpplotfullfilename, withoutLegend=TRUE)
#print(paste("reading",basename(sampleSpplotfullfilename)))
#sampleImg<-getSpplotFromAFMImage(AFMImage=AFMImageAnalyser@AFMImage, expectedWidth=80, expectHeight=60, withoutLegend=TRUE)
sampleImg <- readPNG(sampleSpplotfullfilename)
allVarioModels<-str_sub(sampleDT$model,-3)
i<-1
for (i in seq(1:length(allVarioModels))) {
indexInPage<-i%%numberOfModelsPerPage
if (indexInPage==1) {
# Open a new page
grid.newpage()
pushViewport(viewport(layout = grid.layout(numberOfModelsPerPage*2, 3)))
}
if (indexInPage==0)indexInPage=numberOfModelsPerPage
#print(indexInPage)
#plot experimental variogram and model variogram
vp1<-viewport(layout.pos.row = (indexInPage-1)*2+1, layout.pos.col = 2)
grid.raster(sampleImg,vp=vp1)
# pushViewport(vp1)
# print(sampleImg,newpage=FALSE)
# popViewport(1)
#print(i)
allVariogramModelEvaluation<-AFMImageAnalyser@variogramAnalysis@variogramModels
for (j in seq(1:length(allVariogramModelEvaluation))) {
if (allVariogramModelEvaluation[j][[1]]@fit.v[2]$model==allVarioModels[i]) break;
}
#print(j)
#print(allVariogramModelEvaluation[j][[1]]@fit.v[2]$model)
#predictedfullfilename<-getSpplotPredictedImageFullfilename(reportDirectory, sampleName, allVarioModels[i])
modelName<-allVariogramModelEvaluation[j][[1]]@fit.v[2]$model
part_valid_pr<-AFMImageAnalyser@variogramAnalysis@variogramModels[[i]]@mykrige
cuts<-AFMImageAnalyser@variogramAnalysis@cuts
withoutLegend<-TRUE
colLimit<-length(cuts)+3
cols <- getSpplotColors(colLimit)
# density plot for percent error (over total amplitude)
amplitudeReal<-abs(max(AFMImageAnalyser@AFMImage@data$h)-min(AFMImageAnalyser@AFMImage@data$h))
statsHeightsDT<-data.table(realH=AFMImageAnalyser@AFMImage@data$h, predicted = as.vector(unlist(part_valid_pr["var1.pred"]@data)))
statsHeightsDT[,error:=(predicted-realH)/amplitudeReal,]
statsHeightsDT
resStatsDT<-data.table(step=c(0), numberOfPoints=c(0))
totalPoints<-length(statsHeightsDT$error)
for (i in seq(0,max(statsHeightsDT$error), by=(max(statsHeightsDT$error)/20))) {
nbPoints<-length(statsHeightsDT$error[abs(statsHeightsDT$error)<i])
resStatsDT<-rbind(resStatsDT, data.table(step=c(i), numberOfPoints=c(nbPoints)))
}
resStatsDT<-resStatsDT[-c(1,2),]
resStatsDT
resStatsDT[, nbPointsPercent:=numberOfPoints/totalPoints,]
resStatsDT
errorData<-data.frame(error=statsHeightsDT$error)
predictedspplotfullfilename<-getSpplotPredictedImageFullfilename(tempdir(), sampleName, modelName)
saveSpplotFromKrige(predictedspplotfullfilename, modelName, part_valid_pr,cuts, withoutLegend = TRUE)
# TODO save on disk as png and read
# read image on disk
#print(paste("reading", basename(predictedspplotfullfilename)))
samplePredictedImg <- readPNG(predictedspplotfullfilename)
#samplePredictedImg<-spplot(part_valid_pr["var1.pred"], cuts=cuts, col.regions=cols,key=list(lines=FALSE, col="transparent"))
vp2<-viewport(layout.pos.row = (indexInPage-1)*2+1, layout.pos.col = 3)
grid.raster(samplePredictedImg,vp=vp2)
# pushViewport(vp2)
# print(samplePredictedImg,newpage=FALSE)
# popViewport(1)
ang1<-ang2<-ang3<-anis1<-anis2<-name<-NULL
fit.v<-allVariogramModelEvaluation[j][[1]]@fit.v
vgm1<-vgm(fit.v[2]$psill, fit.v[2]$model, fit.v[2]$range, kappa = fit.v[2]$kappa, anis = c(fit.v[2]$anis1, fit.v[2]$anis2), add.to = vgm(fit.v[1]$psill, fit.v[1]$model, fit.v[1]$range,kappa = fit.v[1]$kappa,anis = c(fit.v[1]$anis1, fit.v[1]$anis2)))
newModelDT<-data.table(vgm1)
setnames(newModelDT, "psill", "sill" )
newModelDT<-rbind(newModelDT, sampleDT[i], fill=TRUE)
newModelDT<- newModelDT[, ang1:=NULL]
newModelDT<- newModelDT[, ang2:=NULL]
newModelDT<- newModelDT[, ang3:=NULL]
newModelDT<- newModelDT[, anis1:=NULL]
newModelDT<- newModelDT[, anis2:=NULL]
plotVariogramModelTable<-getGgplotFromDataTable(newModelDT[,name:=NULL])
vp4<-viewport(layout.pos.row = (indexInPage-1)*2+1+1, layout.pos.col = 2:3)
print(vp=vp4, plotVariogramModelTable, row.names= FALSE, include.rownames=FALSE)
# variogram from model
myvgm<-experimentalVariogramDT
experimentalVariogramDTnrow=nrow(myvgm)
class(myvgm) = c("gstatVariogram", "data.frame")
myvgm$np=rep(1,experimentalVariogramDTnrow)
myvgm$dir.hor=rep(0,experimentalVariogramDTnrow)
myvgm$dir.ver=rep(0,experimentalVariogramDTnrow)
myvgm$id=rep(factor("var1"),experimentalVariogramDTnrow)
begin<-(indexInPage-1)*2+1
vp3<-viewport(layout.pos.row = begin:(begin+1), layout.pos.col = 1, width=100, height=100)
vgLine <- rbind(
cbind(variogramLine(vgm1, maxdist = max(myvgm$dist)), id = "Raw")
)
p1<-ggplot(myvgm, aes(x = dist, y = gamma, colour = id)) + geom_line(data = vgLine) + geom_point()
p1 <- p1 + ylab("semivariance")
p1 <- p1 + xlab("distance (nm)")
p1 <- p1 + ggtitle("Semivariogram")
p1 <- p1 + guides(colour=FALSE)
p1 <- p1 + expand_limits(y = 0)
print(p1,vp=vp3)
grid.newpage()
vp5<-viewport(layout.pos.row = begin:(begin+2), layout.pos.col = 1, width=200, height=200)
p1<-ggplot(myvgm, aes(x = dist, y = gamma, colour = id)) + geom_line(data = vgLine) + geom_point()
p1 <- ggplot(errorData,aes(error, fill =c(1))) + geom_density(alpha = 0.2) +
guides(fill=FALSE)+
theme(legend.position="none")
# p1 + ylab("semivariance")
# p1 <- p1 + xlab("distance (nm)")
# p1 <- p1 + ggtitle("Semivariogram")
# p1 <- p1 + guides(colour=FALSE)
# p1 <- p1 + expand_limits(y = 0)
print(p1,vp=vp5)
plotVariogramModelTable<-getGgplotFromDataTable(resStatsDT)
vp6<-viewport(layout.pos.row = (indexInPage-1)*2+1+1, layout.pos.col = 2:3)
print(vp=vp6, plotVariogramModelTable, row.names= FALSE, include.rownames=FALSE)
}
}
getGgplotFromDataTable<-function(DT, removeRowNames, removeColNames) {
if (missing(removeRowNames)) removeRowNames<-TRUE
if (missing(removeColNames)) removeColNames<-FALSE
mytheme <- gridExtra::ttheme_default(
core = list(fg_params=list(cex = 0.8)),
colhead = list(fg_params=list(cex = 0.9)),
rowhead = list(fg_params=list(cex = 0.9)))
qplotFromDataTable<- qplot(1:10, 1:10, geom = "blank") +
theme_bw() +
theme(line = element_blank(), text = element_blank())
if ((removeRowNames)&&(removeColNames)) {
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, rows = NULL, cols=NULL))
}else{
if (removeRowNames) {
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, rows = NULL))
}else{
if (removeColNames) {
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, cols = NULL))
}else{
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme))
}
}
}
return(qplotFromDataTable)
}
#' Put the images from all analysis on disk
#'
#' A function to put on disk all the images from variogram, PSD Analysis of an \code{\link{AFMImage}}
#' An AFM Image 3D representation is saved on disk thanks to the \code{\link{rgl}} package.
#' On Unix system, it is necessary to have a X server connection to be able to use the \code{\link{rgl}} package.
#'
#' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}}
#' @param AFMImage an \code{\link{AFMImage}}
#' @param exportDirectory where the images will be stored
#' @export
#' @author M.Beauvais
putImagesFromAnalysisOnDisk<-function(AFMImageAnalyser, AFMImage, exportDirectory) {
exportFractalDimImagesForReport(AFMImage, exportDirectory)
exportPSDImagesForReport(AFMImageAnalyser, AFMImage, exportDirectory)
exportVariogramImagesForReport(AFMImageAnalyser, AFMImage, exportDirectory)
export3DImageForReport(AFMImage, exportDirectory)
}
exportVariogramImagesForReport<- function(AFMImageAnalyser, AFMImage, exportDirectory) {
class(AFMImageAnalyser)="AFMImageAnalyser"
class(AFMImageAnalyser@variogramAnalysis)="AFMImageVariogramAnalysis"
sampleName<-basename(AFMImage@fullfilename)
# ssplot of real sample for comparison with predicted sample from each variogram model
spplotImagefullfilename<-getSpplotImagefullfilename(exportDirectory, sampleName)
saveSpplotFromAFMImage(AFMImage, spplotImagefullfilename, withoutLegend=TRUE)
# directional variograms files
if (!length(AFMImageAnalyser@variogramAnalysis@directionalVariograms)==0) {
exportCsvFullFilename<-getDirectionalVarioCsvFullfilename(exportDirectory, sampleName)
print(paste("saving", basename(exportCsvFullFilename)))
tryCatch({
write.table(AFMImageAnalyser@variogramAnalysis@directionalVariograms, exportCsvFullFilename, sep=",")
}, error = function(e){
print("error",e)
})
dvarios<-AFMImageAnalyser@variogramAnalysis@directionalVariograms
dist<-gamma<-dir.hor<-NULL
p1 <- ggplot(dvarios, aes(x=dist, y=gamma, color=as.factor(dir.hor) , shape=as.factor(dir.hor)))
p1 <- p1 + geom_point()
p1 <- p1 + ylab("semivariance")
p1 <- p1 + xlab("distance (nm)")
p1 <- p1 + ggtitle("Semivariogram")
p1 <- p1 + expand_limits(y = 0)
p1 <- p1 + guides(colour=FALSE)
#print(p1)
exportpng2FullFilename<-getDirectionalVarioPngFullfilename(exportDirectory, sampleName)
print(paste("saving", basename(exportpng2FullFilename)))
png(filename=exportpng2FullFilename, units = "px", width=800, height=800)
print(p1)
dev.off()
}
# omnidirectional variogram files
if (!length(AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram)==0) {
exportCsvFullFilename<-getOmnidirectionalVarioCsvFullfilename(exportDirectory, sampleName)
print(paste("saving", basename(exportCsvFullFilename)))
AFMImageVariogram<-AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram
class(AFMImageVariogram)=c("gstatVariogram","data.frame")
tryCatch({
write.table(AFMImageVariogram, exportCsvFullFilename, sep=",")
}, error = function(e){
print("error",e)
})
myvgm<-AFMImageVariogram
experimentalVariogramDTnrow=nrow(myvgm)
class(myvgm) = c("gstatVariogram", "data.frame")
myvgm$np=rep(1,experimentalVariogramDTnrow)
myvgm$dir.hor=rep(0,experimentalVariogramDTnrow)
myvgm$dir.ver=rep(0,experimentalVariogramDTnrow)
myvgm$id=rep(factor("var1"),experimentalVariogramDTnrow)
dist<-gamma<-id<-NULL
p1<-ggplot(myvgm, aes(x = dist, y = gamma, colour = id)) + geom_point()
p1 <- p1 + ylab("semivariance")
p1 <- p1 + xlab("distance (nm)")
p1 <- p1 + ggtitle("Semivariogram")
p1 <- p1 + expand_limits(y = 0)
p1 <- p1 + guides(colour=FALSE)
exportpng2FullFilename<-getOmnidirectionalVarioPngFullfilename(exportDirectory, sampleName)
print(paste("saving", basename(exportpng2FullFilename)))
png(filename=exportpng2FullFilename, units = "px", width=800, height=800)
print(p1)
dev.off()
# chosen sample plot
TheData<-as.data.frame(AFMImage@data)
TheData=na.omit(TheData)
part_model <- TheData[AFMImageAnalyser@variogramAnalysis@chosenFitSample, ]
coordinates(part_model) = ~x+y
proj4string(part_model)=CRS("+init")
is.projected(part_model)
pchosenFitSample<-spplot(part_model, col.regions="black",contour=TRUE,key=list(lines=FALSE, col="transparent"))
expectedWidth = 400
expectHeight = 300
exportpngFullFilename<-getVarioPngchosenFitSample(exportDirectory, sampleName)
print(paste("saving", basename(exportpngFullFilename)))
png(filename=exportpngFullFilename, units = "px", width=expectedWidth, height=expectHeight)
print(pchosenFitSample)
dev.off()
# save images from variogram modeling
totalVariogramModels=length(AFMImageAnalyser@variogramAnalysis@variogramModels)
#print(totalVariogramModels)
if (totalVariogramModels>0) {
fullfilename<-AFMImage@fullfilename
cuts<-AFMImageAnalyser@variogramAnalysis@cuts
for (i in seq(1,totalVariogramModels)) {
#print(AFMImageAnalyser@variogramAnalysis@variogramModels[[i]]@res)
testedModel<-AFMImageAnalyser@variogramAnalysis@variogramModels[[i]]@model
print(testedModel)
if (testedModel=="Wav2") {
vgm<-vgm( 5, "Exp", 1, add.to = vgm(5, "Wav", 1, nugget = 2.5))
}else{
vgm<-vgm(5,testedModel,1,0)
}
mykrige<-AFMImageAnalyser@variogramAnalysis@variogramModels[[i]]@mykrige
predictedspplotfullfilename<-getSpplotPredictedImageFullfilename(exportDirectory, sampleName, testedModel)
saveSpplotFromKrige(predictedspplotfullfilename, vgm,mykrige,cuts, withoutLegend = TRUE)
predictedAFMImage<-getAFMImageFromKrige(AFMImage, vgm, mykrige)
class(predictedAFMImage) = c("AFMImage")
#displayIn3D(predictedAFMImage,1024, full2Dfilename,noLight=TRUE))
export3DImageForReport(predictedAFMImage, exportDirectory)
}
}
}
}
exportPSDImagesForReport<-function(AFMImageAnalyser, AFMImage, exportDirectory) {
#class(AFMImageAnalyser)="AFMImageAnalyser"
#class(AFMImageAnalyser@psdAnalysis)="AFMImagePSDAnalysis"
filename<-basename(AFMImage@fullfilename)
# export Roughness against lengthscale graph
if (!length(AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)==0) {
data<-AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale
r<-roughness<-NULL
p1 <- ggplot(data, aes(x=r, y=roughness, colour= basename(filename)))
p1 <- p1 + geom_point()
p1 <- p1 + geom_line()
p1 <- p1 + ylab("roughness (nm)")
p1 <- p1 + xlab("lengthscale (nm)")
p1 <- p1 + guides(colour=FALSE)
aIntercept<-AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2@yintersept
aSlope<-AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2@slope
if (length(AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2)!=0){
p1 <- p1 + geom_abline(intercept = aIntercept,
slope = aSlope,
size=1.2)
}
print(paste(aIntercept, aSlope))
pngFilename=paste(filename,"roughness-against-lengthscale.png",sep="-")
exportpngFullFilename<-paste(exportDirectory, pngFilename, sep="/")
print(paste("saving", basename(exportpngFullFilename)))
png(filename=exportpngFullFilename, units = "px", width=800, height=800)
print(p1)
dev.off()
# focus on the first 10nm
newdata<-data[r<10,]
r<-roughness<-NULL
p1 <- ggplot(newdata, aes(x=r, y=roughness, colour= basename(filename)))
p1 <- p1 + geom_point()
p1 <- p1 + geom_line()
p1 <- p1 + ylab("roughness (nm)")
p1 <- p1 + xlab("lengthscale (nm)")
p1 <- p1 + guides(colour=FALSE)
# if (length(AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis1)!=0){
# p1 <- p1 + geom_abline(intercept = AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis1@yintersept,
# slope = AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis1@slope,
# size=1.2)
# }
pngFilename=paste(filename,"roughness-against-lengthscale-10nm.png",sep="-")
exportpngFullFilename<-paste(exportDirectory, pngFilename, sep="/")
print(paste("saving", basename(exportpngFullFilename)))
png(filename=exportpngFullFilename, units = "px", width=800, height=800)
print(p1)
dev.off()
# save intersections images
if (!length(AFMImageAnalyser@psdAnalysis@intersections)==0) {
saveOnDiskIntersectionForRoughnessAgainstLengthscale(AFMImageAnalyser, exportDirectory)
}
}
}
getGgplotFromDataTable<-function(DT, removeRowNames, removeColNames) {
if (missing(removeRowNames)) removeRowNames<-TRUE
if (missing(removeColNames)) removeColNames<-FALSE
mytheme <- gridExtra::ttheme_default(
core = list(fg_params=list(cex = 0.8)),
colhead = list(fg_params=list(cex = 0.9)),
rowhead = list(fg_params=list(cex = 0.9)))
qplotFromDataTable<- qplot(1:15, 1:15, geom = "blank") +
theme_bw() +
theme(line = element_blank(), text = element_blank())
if ((removeRowNames)&&(removeColNames)) {
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, rows = NULL, cols=NULL))
}else{
if (removeRowNames) {
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, rows = NULL))
}else{
if (removeColNames) {
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, cols = NULL))
}else{
qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme))
}
}
}
return(qplotFromDataTable)
}
export3DImageForReport<-function(AFMImage, exportDirectory, noLight) {
library(AFM)
library(rgl)
sampleName<-basename(AFMImage@fullfilename)
rglImagefullfilename<-get3DImageFullfilename(exportDirectory, sampleName)
if (displayIn3D(AFMImage, width=1024, fullfilename=rglImagefullfilename,changeViewpoint=TRUE, noLight= noLight)) {
rgl.viewpoint(zoom=2)
rgl.close()
}
}
export3DImageForReport<-function(AFMImage, exportDirectory) {
sampleName<-basename(AFMImage@fullfilename)
rglImagefullfilename<-get3DImageFullfilename(exportDirectory, sampleName)
if (displayIn3D(AFMImage, width=1024, fullfilename=rglImagefullfilename,noLight=FALSE)) {
rgl.close()
}
}
createReportDirectory<-function(reportDirectory) {
if (!file.exists(reportDirectory)){
print(paste("creating report directory",reportDirectory))
dir.create(file.path(reportDirectory), showWarnings = FALSE)
}
if (!isReportDirectoryWritePermissionCorrect(reportDirectory)) {
stop(paste("Error: can't write to output directory", reportDirectory))
}
print(paste("report directory is", reportDirectory))
}
isReportDirectoryWritePermissionCorrect<-function(reportDirectory) {
tryCatch({
fullfilename=paste(reportDirectory, "permCheck.txt", sep="/")
fileConn<-file(fullfilename)
writeLines(c("Hello","World"), fileConn)
close(fileConn)
file.remove(fullfilename)
return(TRUE)
}, error = function(e){
close(fileConn)
return(FALSE)
})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coarsen_POS_tags.R
\name{coarsen_POS_tags}
\alias{coarsen_POS_tags}
\title{Coarsen POS tags}
\usage{
coarsen_POS_tags(tag_vector)
}
\arguments{
\item{tag_vector}{A vector of POS tags.}
}
\value{
A vector of coarse tags.
}
\description{
Coarsens PTB or Petrov/Gimpel coarse tags into one of eight
categories:
'A' = adjective, 'D' = determiner, 'P' = preposition,
'N' = common/proper noun, 'M' = verb modifiers, 'V' = verbs,
'C' = coordinating conjunction, 'O' = all else
NOTE: 'M', 'C', and 'V' tags are currently only compatible with the PTB tagset.
}
\examples{
pos_tags <- c("VB", "JJ", "NN", "NN")
coarsen_POS_tags(pos_tags)
}
| /R/phrasemachine/man/coarsen_POS_tags.Rd | permissive | JasonKessler/phrasemachine | R | false | true | 709 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coarsen_POS_tags.R
\name{coarsen_POS_tags}
\alias{coarsen_POS_tags}
\title{Coarsen POS tags}
\usage{
coarsen_POS_tags(tag_vector)
}
\arguments{
\item{tag_vector}{A vector of POS tags.}
}
\value{
A vector of coarse tags.
}
\description{
Coarsens PTB or Petrov/Gimpel coarse tags into one of eight
categories:
'A' = adjective, 'D' = determiner, 'P' = preposition,
'N' = common/proper noun, 'M' = verb modifiers, 'V' = verbs,
'C' = coordinating conjunction, 'O' = all else
NOTE: 'M', 'C', and 'V' tags are currently only compatible with the PTB tagset.
}
\examples{
pos_tags <- c("VB", "JJ", "NN", "NN")
coarsen_POS_tags(pos_tags)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mpl_morphy_objects.R
\name{GapHandler}
\alias{GapHandler}
\title{Read how a Morphy Object handles the inapplicable token}
\usage{
GapHandler(morphyObj)
}
\arguments{
\item{morphyObj}{Object of class \code{morphy}, perhaps created with
\code{\link[=PhyDat2Morphy]{PhyDat2Morphy()}}.}
}
\value{
\code{GapHandler()} returns a character string stating how
gaps are handled by \code{morphyObj}.
}
\description{
Gaps represented by the inapplicable token can be treated as "missing data",
i.e. as equivalent to the ambiguous token \verb{?}; as an extra state, equivalent
to other states such as \code{0} or \code{1}; or as "inapplicable data" using the
algorithm of Brazeau, Guillerme and Smith (2019).
}
\examples{
morphyObj <- SingleCharMorphy("-0-0", "Extra")
GapHandler(morphyObj)
morphyObj <- UnloadMorphy(morphyObj)
}
\seealso{
Other Morphy API functions:
\code{\link{MorphyErrorCheck}()},
\code{\link{MorphyWeights}()},
\code{\link{PhyDat2Morphy}()},
\code{\link{SingleCharMorphy}()},
\code{\link{UnloadMorphy}()},
\code{\link{is.morphyPtr}()},
\code{\link{mpl_apply_tipdata}()},
\code{\link{mpl_attach_rawdata}()},
\code{\link{mpl_attach_symbols}()},
\code{\link{mpl_delete_Morphy}()},
\code{\link{mpl_delete_rawdata}()},
\code{\link{mpl_first_down_recon}()},
\code{\link{mpl_first_up_recon}()},
\code{\link{mpl_get_charac_weight}()},
\code{\link{mpl_get_gaphandl}()},
\code{\link{mpl_get_num_charac}()},
\code{\link{mpl_get_num_internal_nodes}()},
\code{\link{mpl_get_numtaxa}()},
\code{\link{mpl_get_symbols}()},
\code{\link{mpl_init_Morphy}()},
\code{\link{mpl_new_Morphy}()},
\code{\link{mpl_second_down_recon}()},
\code{\link{mpl_second_up_recon}()},
\code{\link{mpl_set_charac_weight}()},
\code{\link{mpl_set_num_internal_nodes}()},
\code{\link{mpl_set_parsim_t}()},
\code{\link{mpl_translate_error}()},
\code{\link{mpl_update_lower_root}()},
\code{\link{mpl_update_tip}()},
\code{\link{summary.morphyPtr}()}
}
\author{
\href{https://smithlabdurham.github.io/}{Martin R. Smith}
(\href{mailto:martin.smith@durham.ac.uk}{martin.smith@durham.ac.uk})
}
\concept{Morphy API functions}
| /man/GapHandler.Rd | no_license | cran/TreeSearch | R | false | true | 2,232 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mpl_morphy_objects.R
\name{GapHandler}
\alias{GapHandler}
\title{Read how a Morphy Object handles the inapplicable token}
\usage{
GapHandler(morphyObj)
}
\arguments{
\item{morphyObj}{Object of class \code{morphy}, perhaps created with
\code{\link[=PhyDat2Morphy]{PhyDat2Morphy()}}.}
}
\value{
\code{GapHandler()} returns a character string stating how
gaps are handled by \code{morphyObj}.
}
\description{
Gaps represented by the inapplicable token can be treated as "missing data",
i.e. as equivalent to the ambiguous token \verb{?}; as an extra state, equivalent
to other states such as \code{0} or \code{1}; or as "inapplicable data" using the
algorithm of Brazeau, Guillerme and Smith (2019).
}
\examples{
morphyObj <- SingleCharMorphy("-0-0", "Extra")
GapHandler(morphyObj)
morphyObj <- UnloadMorphy(morphyObj)
}
\seealso{
Other Morphy API functions:
\code{\link{MorphyErrorCheck}()},
\code{\link{MorphyWeights}()},
\code{\link{PhyDat2Morphy}()},
\code{\link{SingleCharMorphy}()},
\code{\link{UnloadMorphy}()},
\code{\link{is.morphyPtr}()},
\code{\link{mpl_apply_tipdata}()},
\code{\link{mpl_attach_rawdata}()},
\code{\link{mpl_attach_symbols}()},
\code{\link{mpl_delete_Morphy}()},
\code{\link{mpl_delete_rawdata}()},
\code{\link{mpl_first_down_recon}()},
\code{\link{mpl_first_up_recon}()},
\code{\link{mpl_get_charac_weight}()},
\code{\link{mpl_get_gaphandl}()},
\code{\link{mpl_get_num_charac}()},
\code{\link{mpl_get_num_internal_nodes}()},
\code{\link{mpl_get_numtaxa}()},
\code{\link{mpl_get_symbols}()},
\code{\link{mpl_init_Morphy}()},
\code{\link{mpl_new_Morphy}()},
\code{\link{mpl_second_down_recon}()},
\code{\link{mpl_second_up_recon}()},
\code{\link{mpl_set_charac_weight}()},
\code{\link{mpl_set_num_internal_nodes}()},
\code{\link{mpl_set_parsim_t}()},
\code{\link{mpl_translate_error}()},
\code{\link{mpl_update_lower_root}()},
\code{\link{mpl_update_tip}()},
\code{\link{summary.morphyPtr}()}
}
\author{
\href{https://smithlabdurham.github.io/}{Martin R. Smith}
(\href{mailto:martin.smith@durham.ac.uk}{martin.smith@durham.ac.uk})
}
\concept{Morphy API functions}
|
library(dashBootstrapComponents)
library(dashHtmlComponents)
badges <- htmlSpan(
list(
dbcBadge("Primary", pill = TRUE, color = "primary", className = "mr-1"),
dbcBadge("Secondary", pill = TRUE, color = "secondary", className = "mr-1"),
dbcBadge("Success", pill = TRUE, color = "success", className = "mr-1"),
dbcBadge("Warning", pill = TRUE, color = "warning", className = "mr-1"),
dbcBadge("Danger", pill = TRUE, color = "danger", className = "mr-1"),
dbcBadge("Info", pill = TRUE, color = "info", className = "mr-1"),
dbcBadge("Light", pill = TRUE, color = "light", className = "mr-1"),
dbcBadge("Dark", pill = TRUE, color = "dark")
)
)
| /docs/components/badge/links.R | no_license | AnnMarieW/R-dash-bootstrap-components | R | false | false | 678 | r |
library(dashBootstrapComponents)
library(dashHtmlComponents)
badges <- htmlSpan(
list(
dbcBadge("Primary", pill = TRUE, color = "primary", className = "mr-1"),
dbcBadge("Secondary", pill = TRUE, color = "secondary", className = "mr-1"),
dbcBadge("Success", pill = TRUE, color = "success", className = "mr-1"),
dbcBadge("Warning", pill = TRUE, color = "warning", className = "mr-1"),
dbcBadge("Danger", pill = TRUE, color = "danger", className = "mr-1"),
dbcBadge("Info", pill = TRUE, color = "info", className = "mr-1"),
dbcBadge("Light", pill = TRUE, color = "light", className = "mr-1"),
dbcBadge("Dark", pill = TRUE, color = "dark")
)
)
|
if(!file.exists("./c3")){dir.create("./c3")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./c3/Dataset.zip",method="curl")
unzip(zipfile="./c3/Dataset.zip",exdir="./c3")
fpath <- file.path("./c3" , "UCI HAR Dataset")
## Read all the files
DataActivityTest <- read.table(file.path(fpath, "test" , "Y_test.txt" ),header = FALSE)
DataActivityTrain <- read.table(file.path(fpath, "train", "Y_train.txt"),header = FALSE)
DataSubjectTrain <- read.table(file.path(fpath, "train", "subject_train.txt"),header = FALSE)
DataSubjectTest <- read.table(file.path(fpath, "test" , "subject_test.txt"),header = FALSE)
DataFeaturesTest <- read.table(file.path(fpath, "test" , "X_test.txt" ),header = FALSE)
DataFeaturesTrain <- read.table(file.path(fpath, "train", "X_train.txt"),header = FALSE)
## Combine different Training and Testing Data
DataSubject <- rbind(DataSubjectTrain, DataSubjectTest)
DataActivity <- rbind(DataActivityTrain, DataActivityTest)
DataFeatures <- rbind(DataFeaturesTrain, DataFeaturesTest)
## Name the colums and combine Subjects and Activity Data
names(DataSubject) <- c("subject")
names(DataActivity) <- c("activity")
DataFeaturesNames <- read.table(file.path(fpath, "features.txt"),header = FALSE)
names(DataFeatures) <- DataFeaturesNames$V2
DataCombine <- cbind(DataSubject, DataActivity)
##Merges the training and the test sets to create one Data set.
MainData <- cbind(DataFeatures, DataCombine)
##Extracts only the measurements on the mean and standard deviation for each measurement.
subDataFeaturesNames <- DataFeaturesNames$V2[grep("(mean|std)\\(\\)", DataFeaturesNames$V2)]
##Uses descriptive activity names to name the activities in the Data set
selectedNames <- c(as.character(subDataFeaturesNames), "subject", "activity" )
MainData <- subset(MainData,select=selectedNames)
activityLabels <- read.table(file.path(fpath, "activity_labels.txt"),header = FALSE)
##Appropriately labels the Data set with descriptive variable names.
names(MainData) <- gsub("^t", "time", names(MainData))
names(MainData) <- gsub("^f", "frequency", names(MainData))
names(MainData) <- gsub("Acc", "Accelerometer", names(MainData))
names(MainData) <- gsub("Gyro", "Gyroscope", names(MainData))
names(MainData) <- gsub("Mag", "Magnitude", names(MainData))
names(MainData) <- gsub("BodyBody", "Body", names(MainData))
##From the Data set in step 4, creates a second, independent tidy Data set with the average of each variable for each activity and each subject.
library(plyr);
MainData2 <- aggregate(. ~subject + activity, MainData, mean)
MainData2 <- MainData2[order(MainData2$subject,MainData2$activity),]
write.table(MainData2, file = "tidyMainData.txt",row.name = FALSE)
| /c3/run_analysis.R | no_license | karmenr/datasciencecoursera | R | false | false | 2,771 | r | if(!file.exists("./c3")){dir.create("./c3")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./c3/Dataset.zip",method="curl")
unzip(zipfile="./c3/Dataset.zip",exdir="./c3")
fpath <- file.path("./c3" , "UCI HAR Dataset")
## Read all the files
DataActivityTest <- read.table(file.path(fpath, "test" , "Y_test.txt" ),header = FALSE)
DataActivityTrain <- read.table(file.path(fpath, "train", "Y_train.txt"),header = FALSE)
DataSubjectTrain <- read.table(file.path(fpath, "train", "subject_train.txt"),header = FALSE)
DataSubjectTest <- read.table(file.path(fpath, "test" , "subject_test.txt"),header = FALSE)
DataFeaturesTest <- read.table(file.path(fpath, "test" , "X_test.txt" ),header = FALSE)
DataFeaturesTrain <- read.table(file.path(fpath, "train", "X_train.txt"),header = FALSE)
## Combine different Training and Testing Data
DataSubject <- rbind(DataSubjectTrain, DataSubjectTest)
DataActivity <- rbind(DataActivityTrain, DataActivityTest)
DataFeatures <- rbind(DataFeaturesTrain, DataFeaturesTest)
## Name the colums and combine Subjects and Activity Data
names(DataSubject) <- c("subject")
names(DataActivity) <- c("activity")
DataFeaturesNames <- read.table(file.path(fpath, "features.txt"),header = FALSE)
names(DataFeatures) <- DataFeaturesNames$V2
DataCombine <- cbind(DataSubject, DataActivity)
##Merges the training and the test sets to create one Data set.
MainData <- cbind(DataFeatures, DataCombine)
##Extracts only the measurements on the mean and standard deviation for each measurement.
subDataFeaturesNames <- DataFeaturesNames$V2[grep("(mean|std)\\(\\)", DataFeaturesNames$V2)]
##Uses descriptive activity names to name the activities in the Data set
selectedNames <- c(as.character(subDataFeaturesNames), "subject", "activity" )
MainData <- subset(MainData,select=selectedNames)
activityLabels <- read.table(file.path(fpath, "activity_labels.txt"),header = FALSE)
##Appropriately labels the Data set with descriptive variable names.
names(MainData) <- gsub("^t", "time", names(MainData))
names(MainData) <- gsub("^f", "frequency", names(MainData))
names(MainData) <- gsub("Acc", "Accelerometer", names(MainData))
names(MainData) <- gsub("Gyro", "Gyroscope", names(MainData))
names(MainData) <- gsub("Mag", "Magnitude", names(MainData))
names(MainData) <- gsub("BodyBody", "Body", names(MainData))
##From the Data set in step 4, creates a second, independent tidy Data set with the average of each variable for each activity and each subject.
library(plyr);
MainData2 <- aggregate(. ~subject + activity, MainData, mean)
MainData2 <- MainData2[order(MainData2$subject,MainData2$activity),]
write.table(MainData2, file = "tidyMainData.txt",row.name = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/daily.data.R
\name{daily.data}
\alias{daily.data}
\title{Calculation of daily statistics for dendrometer data}
\usage{
daily.data(df, TreeNum)
}
\arguments{
\item{df}{dataframe with first column containing date and time in the format \code{yyyy-mm-dd HH:MM:SS} and the dendrometer data in following columns.}
\item{TreeNum}{numerical value indicating the tree to be analysed. E.g. '1' refers to the first dendrometer data column in \emph{df}.}
}
\value{
A dataframe with the daily statistics of the dendrometer data that contains:
\tabular{llll}{
\strong{Columns}\tab\tab \strong{Description}\cr
\code{DOY}\tab\tab The day of year.\cr
\code{min}\tab\tab The minimum value record for the corresponding day.\cr
\code{Time_min}\tab\tab The time when minimum value recorded for the corresponding day.\cr
\code{max}\tab\tab The maximum value record for the corresponding day.\cr
\code{Time_max}\tab\tab The time when maximum value recorded for the corresponding day.\cr
\code{mean}\tab\tab The daily average value of the dendrometer reading.\cr
\code{median}\tab\tab The daily median value of the dendrometer reading.\cr
\code{amplitude}\tab\tab The difference between daily maximum and daily minimum.\cr
}
}
\description{
This function calculates various statistics of dendrometer data on a daily basis. The daily statistics includes the daily maximum and minimum with their corresponding times and daily amplitude (difference between daily maximum and minimum). See \href{https://doi.org/10.1016/j.agrformet.2012.08.002}{King et al. (2013)} for details.
}
\examples{
library(dendRoAnalyst)
data(nepa17)
daily_stats<-daily.data(df=nepa17, TreeNum=1)
head(daily_stats,10)
}
\references{
King G, Fonti P, Nievergelt D, Büntgen U, Frank D (2013) Climatic drivers of hourly to yearly tree radius variations along a 6°C natural warming gradient. Agricultural and Forest Meteorology 168:36–46. https://doi.org/10.1016/j.agrformet.2012.08.002
}
| /man/daily.data.Rd | no_license | sugam72-os/dendRoAnalyst-1 | R | false | true | 2,043 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/daily.data.R
\name{daily.data}
\alias{daily.data}
\title{Calculation of daily statistics for dendrometer data}
\usage{
daily.data(df, TreeNum)
}
\arguments{
\item{df}{dataframe with first column containing date and time in the format \code{yyyy-mm-dd HH:MM:SS} and the dendrometer data in following columns.}
\item{TreeNum}{numerical value indicating the tree to be analysed. E.g. '1' refers to the first dendrometer data column in \emph{df}.}
}
\value{
A dataframe with the daily statistics of the dendrometer data that contains:
\tabular{llll}{
\strong{Columns}\tab\tab \strong{Description}\cr
\code{DOY}\tab\tab The day of year.\cr
\code{min}\tab\tab The minimum value record for the corresponding day.\cr
\code{Time_min}\tab\tab The time when minimum value recorded for the corresponding day.\cr
\code{max}\tab\tab The maximum value record for the corresponding day.\cr
\code{Time_max}\tab\tab The time when maximum value recorded for the corresponding day.\cr
\code{mean}\tab\tab The daily average value of the dendrometer reading.\cr
\code{median}\tab\tab The daily median value of the dendrometer reading.\cr
\code{amplitude}\tab\tab The difference between daily maximum and daily minimum.\cr
}
}
\description{
This function calculates various statistics of dendrometer data on a daily basis. The daily statistics includes the daily maximum and minimum with their corresponding times and daily amplitude (difference between daily maximum and minimum). See \href{https://doi.org/10.1016/j.agrformet.2012.08.002}{King et al. (2013)} for details.
}
\examples{
library(dendRoAnalyst)
data(nepa17)
daily_stats<-daily.data(df=nepa17, TreeNum=1)
head(daily_stats,10)
}
\references{
King G, Fonti P, Nievergelt D, Büntgen U, Frank D (2013) Climatic drivers of hourly to yearly tree radius variations along a 6°C natural warming gradient. Agricultural and Forest Meteorology 168:36–46. https://doi.org/10.1016/j.agrformet.2012.08.002
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{setMod}
\alias{setMod}
\title{Define a model}
\usage{
setMod(mf, model_name, var_names)
}
\arguments{
\item{mf}{data.frame}
\item{model_name}{character}
\item{var_names}{character}
}
\value{
data.frame
}
\description{
Define a model
}
| /man/setMod.Rd | permissive | leonstirk/modman | R | false | true | 329 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{setMod}
\alias{setMod}
\title{Define a model}
\usage{
setMod(mf, model_name, var_names)
}
\arguments{
\item{mf}{data.frame}
\item{model_name}{character}
\item{var_names}{character}
}
\value{
data.frame
}
\description{
Define a model
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spagi2_master.R
\name{convert_geneID_to_geneSymbol}
\alias{convert_geneID_to_geneSymbol}
\title{convert_geneID_to_geneSymbol}
\usage{
convert_geneID_to_geneSymbol(
user.cell.data,
species = "hsapiens",
geneSymbol.toMapping = "external_gene_name",
geneID.forMapping = "ensembl_gene_id",
collapse.method = "max"
)
}
\arguments{
\item{user.cell.data}{Cell/tissue profile with valid gene IDs}
\item{species}{The species of the given gene IDs or cell/tissue profile, default is "hsapiens"}
\item{geneSymbol.toMapping}{The valid gene ID names (that has ensembl id mapping in the biomaRt database) to which the user.cell.data will be converted, default is "external_gene_name". For more details to get the valid gene ids for a species please see the find_valid_geneID() function of the package.}
\item{geneID.forMapping}{The valid gene ID names (that has ensembl id mapping in the biomaRt database) of the user.cell.data, default is "ensembl_gene_id". For more details to get the valid gene ids for a species please see the find_valid_geneID() function of the package.}
\item{collapse.method}{Used when one external_gene_name has more then one probes, usually two options are used, maximum probe value (i.e., "max") or average of probe values (i.e., "mean"), default is "max".}
}
\value{
This function returns cell/tissue profiles with converted gene symbols
}
\description{
This function converts either a vector of gene IDs, a matrix of gene or a named cell / tissue expression profile or matrix to ensembl IDs
}
\details{
This function converts gene IDs of a named cell/tissue expression profile to gene symbols
}
\examples{
query.data<-matrix(sample(1:10, 100, replace=TRUE),10,10)
rownames(query.data)<-c("ENSG00000160202", "ENSG00000109846", "ENSG00000100053", "ENSG00000007372", "ENSG00000181449", "ENSG00000117707", "ENSG00000138083", "ENSG00000150938", "ENSG00000244752", "ENSG00000101144")
colnames(query.data)<-c("cell1", "cell1", "cell1", "cell2", "cell2", "cell2", "cell3", "cell3", "cell3", "cell3")
convert_geneID_to_geneSymbol(query.data)
}
| /man/convert_geneID_to_geneSymbol.Rd | no_license | humayun2017/SPAGI2 | R | false | true | 2,143 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spagi2_master.R
\name{convert_geneID_to_geneSymbol}
\alias{convert_geneID_to_geneSymbol}
\title{convert_geneID_to_geneSymbol}
\usage{
convert_geneID_to_geneSymbol(
user.cell.data,
species = "hsapiens",
geneSymbol.toMapping = "external_gene_name",
geneID.forMapping = "ensembl_gene_id",
collapse.method = "max"
)
}
\arguments{
\item{user.cell.data}{Cell/tissue profile with valid gene IDs}
\item{species}{The species of the given gene IDs or cell/tissue profile, default is "hsapiens"}
\item{geneSymbol.toMapping}{The valid gene ID names (that has ensembl id mapping in the biomaRt database) to which the user.cell.data will be converted, default is "external_gene_name". For more details to get the valid gene ids for a species please see the find_valid_geneID() function of the package.}
\item{geneID.forMapping}{The valid gene ID names (that has ensembl id mapping in the biomaRt database) of the user.cell.data, default is "ensembl_gene_id". For more details to get the valid gene ids for a species please see the find_valid_geneID() function of the package.}
\item{collapse.method}{Used when one external_gene_name has more then one probes, usually two options are used, maximum probe value (i.e., "max") or average of probe values (i.e., "mean"), default is "max".}
}
\value{
This function returns cell/tissue profiles with converted gene symbols
}
\description{
This function converts either a vector of gene IDs, a matrix of gene or a named cell / tissue expression profile or matrix to ensembl IDs
}
\details{
This function converts gene IDs of a named cell/tissue expression profile to gene symbols
}
\examples{
query.data<-matrix(sample(1:10, 100, replace=TRUE),10,10)
rownames(query.data)<-c("ENSG00000160202", "ENSG00000109846", "ENSG00000100053", "ENSG00000007372", "ENSG00000181449", "ENSG00000117707", "ENSG00000138083", "ENSG00000150938", "ENSG00000244752", "ENSG00000101144")
colnames(query.data)<-c("cell1", "cell1", "cell1", "cell2", "cell2", "cell2", "cell3", "cell3", "cell3", "cell3")
convert_geneID_to_geneSymbol(query.data)
}
|
peaks <- function(x)
{
x1 <- x[,1]
x2 <- x[,2]
y <- 10*(1-x1)^2*exp(-(x1^2)-(x2+1)^2)- 20*(x1/5 - x1^3 - x2^5)*exp(-x1^2-x2^2)- 1/3*exp(-(x1+1)^2 - x2^2)
y <- matrix(y,nrow=nrow(x))
rownames(y) <- rownames(x)
colnames(y) <- paste("y",seq(1,ncol(y)),sep="")
return(y)
}
| /emma/R/peaks.R | no_license | ingted/R-Examples | R | false | false | 293 | r | peaks <- function(x)
{
x1 <- x[,1]
x2 <- x[,2]
y <- 10*(1-x1)^2*exp(-(x1^2)-(x2+1)^2)- 20*(x1/5 - x1^3 - x2^5)*exp(-x1^2-x2^2)- 1/3*exp(-(x1+1)^2 - x2^2)
y <- matrix(y,nrow=nrow(x))
rownames(y) <- rownames(x)
colnames(y) <- paste("y",seq(1,ncol(y)),sep="")
return(y)
}
|
dados = read.csv("C:\\Users\\thiag_000\\Desktop\\analises\\participacaoComunidades.csv", header = TRUE, sep = ';')
dadosBio = subset(dados, dados$comunidade == 'BIO')
resumo<- summary(dadosBio$reputacao)
print(resumo)
hist(dadosBio$reputacao, breaks = 50)
boxplot(dadosBio$reputacao)
x <- c(1,2,3)
y <- c(1,2,3,4)
print(wilcox.test(x,y)) | /analysis/geral/reputacao.R | no_license | thiagoprocaci/experts-semantic-analysis | R | false | false | 342 | r | dados = read.csv("C:\\Users\\thiag_000\\Desktop\\analises\\participacaoComunidades.csv", header = TRUE, sep = ';')
dadosBio = subset(dados, dados$comunidade == 'BIO')
resumo<- summary(dadosBio$reputacao)
print(resumo)
hist(dadosBio$reputacao, breaks = 50)
boxplot(dadosBio$reputacao)
x <- c(1,2,3)
y <- c(1,2,3,4)
print(wilcox.test(x,y)) |
library(matchingR)
library(data.table)
library(dplyr)
library(ComplexHeatmap)
library(BuenColors)
# Import gene scores
atac_gene_scores <- fread("../output/revision-mousebrain_rawGeneScores.tsv") %>% data.frame()
rownames(atac_gene_scores) <- as.character(fread("../output/mousebrain_geneAnnotations.tsv", header = FALSE)[[1]])
mb_anno <- read.table("../data/revision-cluster_annotations.tsv", header = TRUE, stringsAsFactors = FALSE, sep = "\t", comment.char = "")
mb_trans <- as.character(mb_anno$new); names(mb_trans) <- as.character(mb_anno$old)
colnames(atac_gene_scores) <- mb_trans[as.character(colnames(atac_gene_scores))]
ordering <- as.character(mb_anno$new)
class <- as.character(mb_anno$type)
atac_gene_scores <- atac_gene_scores[,ordering]
# Import DropViz
anno_dropviz <- readRDS("../data/dropviz/annotation.BrainCellAtlas_Saunders_version_2018.04.01.RDS")
data_dropviz <- readRDS("../data/dropviz/metacells.BrainCellAtlas_Saunders_version_2018.04.01.RDS")
# Determine overlapping genes
type_markers <- lapply(as.character(anno_dropviz$type_marker), function(x) strsplit(x, "-|[.]")[[1]]) %>% unlist() %>% unique() %>% as.character()
class_markers <- lapply(as.character(anno_dropviz$class_marker), function(x) strsplit(x, "-|[.]")[[1]]) %>% unlist() %>% unique() %>% as.character()
gene_set <- unique(intersect(rownames(data_dropviz), rownames(atac_gene_scores)))
gene_set2 <- intersect(c(as.character(type_markers), as.character(class_markers)), gene_set)
atac_gene_scores_s <- atac_gene_scores[gene_set2, ]
dropviz_s <- data_dropviz[gene_set2,]
cormat <- t(cor(atac_gene_scores_s, dropviz_s, method = "spearman"))
stopifnot(all(colnames(data_dropviz) == anno_dropviz$tissue_subcluster))
keep_c <- c("Excite-NEURON", "Inhibit-NEURON", "MICROGLIA", "ENDOTHELIAL", "ASTROCYTE", "OLIGODENDROCYTE", "POLYDENDROCYTE")
str_b <- as.character(anno_dropviz$class)
str_b <- ifelse(str_b == "NEURON", ifelse(grepl("G", anno_dropviz$class_marker), "Inhibit-NEURON", "Excite-NEURON"), str_b)
str_b <- gsub("_TIP", "", gsub("_STALK", "", str_b))
boo <- str_b %in% keep_c
str_go <- str_b[boo]
cormat.mm <- t(apply(cormat, 1, function(x)(x-min(x))/(max(x)-min(x))))
splitMe2 <- as.numeric(factor(str_go, levels = keep_c))
pdf(paste0("../output/mouse-brain-global-assign.pdf"), width=4.2, height=3)
hm <- Heatmap(cormat.mm[boo,],
col=as.character(jdb_palette("brewer_spectra",type="continuous")),
show_row_names = FALSE,
cluster_columns = FALSE,
cluster_rows = TRUE,
split = splitMe2,
show_column_names = FALSE)
hm
dev.off()
| /mousebrain/code/05_optimal_pairing_dropviz-heatmap.R | no_license | ning-liang/dscATAC_analysis_code | R | false | false | 2,625 | r | library(matchingR)
library(data.table)
library(dplyr)
library(ComplexHeatmap)
library(BuenColors)
# Import gene scores
atac_gene_scores <- fread("../output/revision-mousebrain_rawGeneScores.tsv") %>% data.frame()
rownames(atac_gene_scores) <- as.character(fread("../output/mousebrain_geneAnnotations.tsv", header = FALSE)[[1]])
mb_anno <- read.table("../data/revision-cluster_annotations.tsv", header = TRUE, stringsAsFactors = FALSE, sep = "\t", comment.char = "")
mb_trans <- as.character(mb_anno$new); names(mb_trans) <- as.character(mb_anno$old)
colnames(atac_gene_scores) <- mb_trans[as.character(colnames(atac_gene_scores))]
ordering <- as.character(mb_anno$new)
class <- as.character(mb_anno$type)
atac_gene_scores <- atac_gene_scores[,ordering]
# Import DropViz
anno_dropviz <- readRDS("../data/dropviz/annotation.BrainCellAtlas_Saunders_version_2018.04.01.RDS")
data_dropviz <- readRDS("../data/dropviz/metacells.BrainCellAtlas_Saunders_version_2018.04.01.RDS")
# Determine overlapping genes
type_markers <- lapply(as.character(anno_dropviz$type_marker), function(x) strsplit(x, "-|[.]")[[1]]) %>% unlist() %>% unique() %>% as.character()
class_markers <- lapply(as.character(anno_dropviz$class_marker), function(x) strsplit(x, "-|[.]")[[1]]) %>% unlist() %>% unique() %>% as.character()
gene_set <- unique(intersect(rownames(data_dropviz), rownames(atac_gene_scores)))
gene_set2 <- intersect(c(as.character(type_markers), as.character(class_markers)), gene_set)
atac_gene_scores_s <- atac_gene_scores[gene_set2, ]
dropviz_s <- data_dropviz[gene_set2,]
cormat <- t(cor(atac_gene_scores_s, dropviz_s, method = "spearman"))
stopifnot(all(colnames(data_dropviz) == anno_dropviz$tissue_subcluster))
keep_c <- c("Excite-NEURON", "Inhibit-NEURON", "MICROGLIA", "ENDOTHELIAL", "ASTROCYTE", "OLIGODENDROCYTE", "POLYDENDROCYTE")
str_b <- as.character(anno_dropviz$class)
str_b <- ifelse(str_b == "NEURON", ifelse(grepl("G", anno_dropviz$class_marker), "Inhibit-NEURON", "Excite-NEURON"), str_b)
str_b <- gsub("_TIP", "", gsub("_STALK", "", str_b))
boo <- str_b %in% keep_c
str_go <- str_b[boo]
cormat.mm <- t(apply(cormat, 1, function(x)(x-min(x))/(max(x)-min(x))))
splitMe2 <- as.numeric(factor(str_go, levels = keep_c))
pdf(paste0("../output/mouse-brain-global-assign.pdf"), width=4.2, height=3)
hm <- Heatmap(cormat.mm[boo,],
col=as.character(jdb_palette("brewer_spectra",type="continuous")),
show_row_names = FALSE,
cluster_columns = FALSE,
cluster_rows = TRUE,
split = splitMe2,
show_column_names = FALSE)
hm
dev.off()
|
skewness <- function(x, na.rm = FALSE, type = 3)
{
if(any(ina <- is.na(x))) {
if(na.rm)
x <- x[!ina]
else
return(NA)
}
if(!(type %in% (1 : 3)))
stop("Invalid 'type' argument.")
n <- length(x)
x <- x - mean(x)
y <- sqrt(n) * sum(x ^ 3) / (sum(x ^ 2) ^ (3/2))
if(type == 2) {
if(n < 3)
stop("Need at least 3 complete observations.")
y <- y * sqrt(n * (n - 1)) / (n - 2)
} else if(type == 3)
y <- y * ((1 - 1 / n)) ^ (3/2)
y
}
| /R/skewness.R | no_license | baotong/code-recipes | R | false | false | 546 | r | skewness <- function(x, na.rm = FALSE, type = 3)
{
if(any(ina <- is.na(x))) {
if(na.rm)
x <- x[!ina]
else
return(NA)
}
if(!(type %in% (1 : 3)))
stop("Invalid 'type' argument.")
n <- length(x)
x <- x - mean(x)
y <- sqrt(n) * sum(x ^ 3) / (sum(x ^ 2) ^ (3/2))
if(type == 2) {
if(n < 3)
stop("Need at least 3 complete observations.")
y <- y * sqrt(n * (n - 1)) / (n - 2)
} else if(type == 3)
y <- y * ((1 - 1 / n)) ^ (3/2)
y
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/libpaths.R
\name{with_libpaths}
\alias{local_libpaths}
\alias{with_libpaths}
\title{Library paths}
\usage{
with_libpaths(new, code, action = "replace")
local_libpaths(new, code, action = "replace",
.local_envir = parent.frame())
}
\arguments{
\item{new}{\code{[character]}\cr New library paths}
\item{code}{\code{[any]}\cr Code to execute in the temporary environment}
\item{action}{\code{[character(1)]}\cr should new values \code{"replace"}, \code{"prefix"} or
\code{"suffix"} existing paths.}
\item{.local_envir}{\code{[environment]}\cr The environment to use for scoping.}
}
\value{
\code{[any]}\cr The results of the evaluation of the \code{code}
argument.
}
\description{
Temporarily change library paths.
}
\seealso{
\code{\link{withr}} for examples
\code{\link{.libPaths}}
Other libpaths: \code{\link{with_temp_libpaths}}
}
| /man/with_libpaths.Rd | no_license | lionel-/withr | R | false | true | 921 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/libpaths.R
\name{with_libpaths}
\alias{local_libpaths}
\alias{with_libpaths}
\title{Library paths}
\usage{
with_libpaths(new, code, action = "replace")
local_libpaths(new, code, action = "replace",
.local_envir = parent.frame())
}
\arguments{
\item{new}{\code{[character]}\cr New library paths}
\item{code}{\code{[any]}\cr Code to execute in the temporary environment}
\item{action}{\code{[character(1)]}\cr should new values \code{"replace"}, \code{"prefix"} or
\code{"suffix"} existing paths.}
\item{.local_envir}{\code{[environment]}\cr The environment to use for scoping.}
}
\value{
\code{[any]}\cr The results of the evaluation of the \code{code}
argument.
}
\description{
Temporarily change library paths.
}
\seealso{
\code{\link{withr}} for examples
\code{\link{.libPaths}}
Other libpaths: \code{\link{with_temp_libpaths}}
}
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{charFileHash}
\alias{charFileHash}
\title{Character File Hash Function}
\usage{
charFileHash(keys, conn)
}
\arguments{
\item{keys}{keys to be hashed}
\item{conn}{a "localDiskConn" object}
}
\description{
Function to be used to specify the file where key-value pairs get stored for local disk connections, useful when keys are scalar strings. Should be passed as the argument \code{fileHashFn} to \code{\link{localDiskConn}}.
}
\details{
You shouldn't need to call this directly other than to experiment with what the output looks like or to get ideas on how to write your own custom hash.
}
\examples{
# connect to empty localDisk directory
path <- file.path(tempdir(), "irisSplit")
unlink(path, recursive = TRUE)
conn <- localDiskConn(path, autoYes = TRUE, fileHashFn = charFileHash)
# add some data
addData(conn, list(list("key1", iris[1:10,])))
addData(conn, list(list("key2", iris[11:110,])))
addData(conn, list(list("key3", iris[111:150,])))
# see that files were stored by their key
list.files(path)
}
\author{
Ryan Hafen
}
\seealso{
\code{localDiskConn}, \code{\link{digestFileHash}}
}
| /man/charFileHash.Rd | permissive | linearregression/datadr | R | false | false | 1,155 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{charFileHash}
\alias{charFileHash}
\title{Character File Hash Function}
\usage{
charFileHash(keys, conn)
}
\arguments{
\item{keys}{keys to be hashed}
\item{conn}{a "localDiskConn" object}
}
\description{
Function to be used to specify the file where key-value pairs get stored for local disk connections, useful when keys are scalar strings. Should be passed as the argument \code{fileHashFn} to \code{\link{localDiskConn}}.
}
\details{
You shouldn't need to call this directly other than to experiment with what the output looks like or to get ideas on how to write your own custom hash.
}
\examples{
# connect to empty localDisk directory
path <- file.path(tempdir(), "irisSplit")
unlink(path, recursive = TRUE)
conn <- localDiskConn(path, autoYes = TRUE, fileHashFn = charFileHash)
# add some data
addData(conn, list(list("key1", iris[1:10,])))
addData(conn, list(list("key2", iris[11:110,])))
addData(conn, list(list("key3", iris[111:150,])))
# see that files were stored by their key
list.files(path)
}
\author{
Ryan Hafen
}
\seealso{
\code{localDiskConn}, \code{\link{digestFileHash}}
}
|
\name{HDF5ArraySeed-class}
\docType{class}
\alias{class:HDF5ArraySeed}
\alias{HDF5ArraySeed-class}
\alias{HDF5ArraySeed}
\alias{path,HDF5ArraySeed-method}
\alias{path<-,HDF5ArraySeed-method}
\alias{type,HDF5ArraySeed-method}
\alias{dim,HDF5ArraySeed-method}
\alias{dimnames,HDF5ArraySeed-method}
\alias{extract_array,HDF5ArraySeed-method}
\alias{is_sparse,HDF5ArraySeed-method}
\alias{is_sparse<-,HDF5ArraySeed-method}
\alias{OLD_extract_sparse_array,HDF5ArraySeed-method}
\alias{chunkdim,HDF5ArraySeed-method}
\alias{updateObject,HDF5ArraySeed-method}
\title{HDF5ArraySeed objects}
\description{
HDF5ArraySeed is a low-level helper class for representing a
pointer to an HDF5 dataset.
Note that an HDF5ArraySeed object is not intended to be used directly.
Most end users will typically create and manipulate a higher-level
\link{HDF5Array} object instead. See \code{?\link{HDF5Array}} for
more information.
}
\usage{
## --- Constructor function ---
HDF5ArraySeed(filepath, name, as.sparse=FALSE, type=NA)
## --- Accessors --------------
\S4method{path}{HDF5ArraySeed}(object)
\S4method{path}{HDF5ArraySeed}(object) <- value
\S4method{dim}{HDF5ArraySeed}(x)
\S4method{dimnames}{HDF5ArraySeed}(x)
\S4method{type}{HDF5ArraySeed}(x)
\S4method{is_sparse}{HDF5ArraySeed}(x)
\S4method{is_sparse}{HDF5ArraySeed}(x) <- value
\S4method{chunkdim}{HDF5ArraySeed}(x)
## --- Data extraction --------
\S4method{extract_array}{HDF5ArraySeed}(x, index)
\S4method{OLD_extract_sparse_array}{HDF5ArraySeed}(x, index)
}
\arguments{
\item{filepath, name, as.sparse, type}{
See \code{?\link{HDF5Array}} for a description of these arguments.
}
\item{object, x}{
An HDF5ArraySeed object or derivative.
}
\item{value}{
For the \code{path()} setter: The new path (as a single string) to the
HDF5 file where the dataset is located.
For the \code{is_sparse()} setter: \code{TRUE} or \code{FALSE}.
}
\item{index}{
See \code{?\link[S4Arrays]{extract_array}} in the \pkg{S4Arrays}
package.
}
}
\details{
The HDF5ArraySeed class has one direct subclass: \link{Dense_H5ADMatrixSeed}.
See \code{?\link{Dense_H5ADMatrixSeed}} for more information.
Note that the implementation of HDF5ArraySeed objects follows the widely
adopted convention of transposing HDF5 matrices when they get loaded into R.
Finally note that an HDF5ArraySeed object supports a very limited set
of methods:
\itemize{
\item \code{path()}: Returns the path to the HDF5 file where the dataset
is located.
\item \code{dim()}, \code{dimnames()}.
\item \code{type()}, \code{extract_array()}, \code{is_sparse()},
\code{OLD_extract_sparse_array()}, \code{chunkdim()}:
These generics are defined and documented in other packages e.g.
in \pkg{S4Arrays} for \code{\link[S4Arrays]{extract_array}()}
and \code{\link[S4Arrays]{is_sparse}()}, and in \pkg{DelayedArray}
for \code{\link[DelayedArray]{OLD_extract_sparse_array}()} and
\code{\link[DelayedArray]{chunkdim}()}.
}
}
\value{
\code{HDF5ArraySeed()} returns an HDF5ArraySeed object.
}
\section{HDF5ArraySeed vs HDF5Array objects}{
In order to have access to the full set of operations that are available
for \link[DelayedArray]{DelayedArray} objects, an HDF5ArraySeed object
first needs to be wrapped in a \link[DelayedArray]{DelayedArray} object,
typically by calling the \code{\link[DelayedArray]{DelayedArray}()}
constructor on it.
This is what the \code{\link{HDF5Array}()} constructor function does.
Note that the result of this wrapping is an \link{HDF5Array} object,
which is just an HDF5ArraySeed object wrapped in a
\link[DelayedArray]{DelayedArray} object.
}
\seealso{
\itemize{
\item \link{HDF5Array} objects.
\item \code{\link[S4Arrays]{type}}, \code{\link[S4Arrays]{extract_array}},
and \code{\link[S4Arrays]{is_sparse}}, in the the \pkg{S4Arrays}
package.
\item \code{\link[DelayedArray]{OLD_extract_sparse_array}} and
\code{\link[DelayedArray]{chunkdim}} in the \pkg{DelayedArray}
package.
\item \code{\link{h5ls}} to list the content of an HDF5 file.
}
}
\examples{
library(h5vcData)
tally_file <- system.file("extdata", "example.tally.hfs5",
package="h5vcData")
h5ls(tally_file)
name <- "/ExampleStudy/16/Coverages" # name of the dataset of interest
seed1 <- HDF5ArraySeed(tally_file, name)
seed1
path(seed1)
dim(seed1)
chunkdim(seed1)
seed2 <- HDF5ArraySeed(tally_file, name, as.sparse=TRUE)
seed2
## Alternatively:
is_sparse(seed1) <- TRUE
seed1 # same as 'seed2'
DelayedArray(seed1)
stopifnot(class(DelayedArray(seed1)) == "HDF5Array")
}
\keyword{classes}
\keyword{methods}
| /man/HDF5ArraySeed-class.Rd | no_license | Bioconductor/HDF5Array | R | false | false | 4,774 | rd | \name{HDF5ArraySeed-class}
\docType{class}
\alias{class:HDF5ArraySeed}
\alias{HDF5ArraySeed-class}
\alias{HDF5ArraySeed}
\alias{path,HDF5ArraySeed-method}
\alias{path<-,HDF5ArraySeed-method}
\alias{type,HDF5ArraySeed-method}
\alias{dim,HDF5ArraySeed-method}
\alias{dimnames,HDF5ArraySeed-method}
\alias{extract_array,HDF5ArraySeed-method}
\alias{is_sparse,HDF5ArraySeed-method}
\alias{is_sparse<-,HDF5ArraySeed-method}
\alias{OLD_extract_sparse_array,HDF5ArraySeed-method}
\alias{chunkdim,HDF5ArraySeed-method}
\alias{updateObject,HDF5ArraySeed-method}
\title{HDF5ArraySeed objects}
\description{
HDF5ArraySeed is a low-level helper class for representing a
pointer to an HDF5 dataset.
Note that an HDF5ArraySeed object is not intended to be used directly.
Most end users will typically create and manipulate a higher-level
\link{HDF5Array} object instead. See \code{?\link{HDF5Array}} for
more information.
}
\usage{
## --- Constructor function ---
HDF5ArraySeed(filepath, name, as.sparse=FALSE, type=NA)
## --- Accessors --------------
\S4method{path}{HDF5ArraySeed}(object)
\S4method{path}{HDF5ArraySeed}(object) <- value
\S4method{dim}{HDF5ArraySeed}(x)
\S4method{dimnames}{HDF5ArraySeed}(x)
\S4method{type}{HDF5ArraySeed}(x)
\S4method{is_sparse}{HDF5ArraySeed}(x)
\S4method{is_sparse}{HDF5ArraySeed}(x) <- value
\S4method{chunkdim}{HDF5ArraySeed}(x)
## --- Data extraction --------
\S4method{extract_array}{HDF5ArraySeed}(x, index)
\S4method{OLD_extract_sparse_array}{HDF5ArraySeed}(x, index)
}
\arguments{
\item{filepath, name, as.sparse, type}{
See \code{?\link{HDF5Array}} for a description of these arguments.
}
\item{object, x}{
An HDF5ArraySeed object or derivative.
}
\item{value}{
For the \code{path()} setter: The new path (as a single string) to the
HDF5 file where the dataset is located.
For the \code{is_sparse()} setter: \code{TRUE} or \code{FALSE}.
}
\item{index}{
See \code{?\link[S4Arrays]{extract_array}} in the \pkg{S4Arrays}
package.
}
}
\details{
The HDF5ArraySeed class has one direct subclass: \link{Dense_H5ADMatrixSeed}.
See \code{?\link{Dense_H5ADMatrixSeed}} for more information.
Note that the implementation of HDF5ArraySeed objects follows the widely
adopted convention of transposing HDF5 matrices when they get loaded into R.
Finally note that an HDF5ArraySeed object supports a very limited set
of methods:
\itemize{
\item \code{path()}: Returns the path to the HDF5 file where the dataset
is located.
\item \code{dim()}, \code{dimnames()}.
\item \code{type()}, \code{extract_array()}, \code{is_sparse()},
\code{OLD_extract_sparse_array()}, \code{chunkdim()}:
These generics are defined and documented in other packages e.g.
in \pkg{S4Arrays} for \code{\link[S4Arrays]{extract_array}()}
and \code{\link[S4Arrays]{is_sparse}()}, and in \pkg{DelayedArray}
for \code{\link[DelayedArray]{OLD_extract_sparse_array}()} and
\code{\link[DelayedArray]{chunkdim}()}.
}
}
\value{
\code{HDF5ArraySeed()} returns an HDF5ArraySeed object.
}
\section{HDF5ArraySeed vs HDF5Array objects}{
In order to have access to the full set of operations that are available
for \link[DelayedArray]{DelayedArray} objects, an HDF5ArraySeed object
first needs to be wrapped in a \link[DelayedArray]{DelayedArray} object,
typically by calling the \code{\link[DelayedArray]{DelayedArray}()}
constructor on it.
This is what the \code{\link{HDF5Array}()} constructor function does.
Note that the result of this wrapping is an \link{HDF5Array} object,
which is just an HDF5ArraySeed object wrapped in a
\link[DelayedArray]{DelayedArray} object.
}
\seealso{
\itemize{
\item \link{HDF5Array} objects.
\item \code{\link[S4Arrays]{type}}, \code{\link[S4Arrays]{extract_array}},
and \code{\link[S4Arrays]{is_sparse}}, in the the \pkg{S4Arrays}
package.
\item \code{\link[DelayedArray]{OLD_extract_sparse_array}} and
\code{\link[DelayedArray]{chunkdim}} in the \pkg{DelayedArray}
package.
\item \code{\link{h5ls}} to list the content of an HDF5 file.
}
}
\examples{
library(h5vcData)
tally_file <- system.file("extdata", "example.tally.hfs5",
package="h5vcData")
h5ls(tally_file)
name <- "/ExampleStudy/16/Coverages" # name of the dataset of interest
seed1 <- HDF5ArraySeed(tally_file, name)
seed1
path(seed1)
dim(seed1)
chunkdim(seed1)
seed2 <- HDF5ArraySeed(tally_file, name, as.sparse=TRUE)
seed2
## Alternatively:
is_sparse(seed1) <- TRUE
seed1 # same as 'seed2'
DelayedArray(seed1)
stopifnot(class(DelayedArray(seed1)) == "HDF5Array")
}
\keyword{classes}
\keyword{methods}
|
##plot2.R
## This file creates a histogram, based on data downloaded from
## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
## downloaded on 4-March_2015.
## It is expected that the unzipped file "household_power_consumption.txt" resides in
## the working directory of R.
##
##
## read the textfile into a dataframe
x<-read.table("household_power_consumption.txt",sep=";",header=TRUE,na.strings="?")
y<-x[(x$Date=="1/2/2007" | x$Date=="2/2/2007"),]
y$DateTime<-strptime(paste(y$Date,y$Time),"%d/%m/%Y %H:%M:%S")
## clear x , release memory
x<-NULL
## plot histogram
## x11()
png(filename="plot2.png", width=480, height=480, units="px",bg="white")
plot(y$DateTime,y$Global_active_power,main="",xlab="", ylab="", type="l")
title(ylab="Global Active Power (kilowatts)")
dev.off() | /plot2.R | no_license | pk28831/ExData_Plotting1 | R | false | false | 817 | r | ##plot2.R
## This file creates a histogram, based on data downloaded from
## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
## downloaded on 4-March_2015.
## It is expected that the unzipped file "household_power_consumption.txt" resides in
## the working directory of R.
##
##
## read the textfile into a dataframe
x<-read.table("household_power_consumption.txt",sep=";",header=TRUE,na.strings="?")
y<-x[(x$Date=="1/2/2007" | x$Date=="2/2/2007"),]
y$DateTime<-strptime(paste(y$Date,y$Time),"%d/%m/%Y %H:%M:%S")
## clear x , release memory
x<-NULL
## plot histogram
## x11()
png(filename="plot2.png", width=480, height=480, units="px",bg="white")
plot(y$DateTime,y$Global_active_power,main="",xlab="", ylab="", type="l")
title(ylab="Global Active Power (kilowatts)")
dev.off() |
# Yige Wu @WashU Aug 2021
## make barcode to cell type mapping table for the integrated dataset
## just for normal epithelial cells
# set up libraries and output directory -----------------------------------
## set working directory
dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
setwd(dir_base)
source("./ccRCC_snRNA_analysis/load_pkgs.R")
source("./ccRCC_snRNA_analysis/functions.R")
source("./ccRCC_snRNA_analysis/variables.R")
library(dplyr)
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies --------------------------------------------------
## input cluster2celltype
cluster2celltype_df <- readxl::read_excel(path = "./Resources/snRNA_Processed_Data/Cell_Type_Assignment/Individual_Subset/C3L-00079-N_immune_reclustered.20210802.xlsx")
## inpus seurat object
srat <- readRDS(file = "./Resources/Analysis_Results/recluster/recluster_cell_groups_in_individual_samples/subset_recluster/subset_C3L-00079-N_immune_recluster/20210802.v1/C3L-00079-N.Immune.Reclustered.Res2.RDS")
# map cell type to barcode ------------------------------------------------
barcode2cluster_df <- srat@meta.data
#nrow(barcode2cluster_df)
barcode2cluster_df$individual_barcode <- rownames(barcode2cluster_df)
#unique(barcode2cluster_df$seurat_clusters)
barcode2cluster_df$seurat_clusters <- as.numeric(as.vector(barcode2cluster_df$seurat_clusters))
#unique(barcode2cluster_df$seurat_clusters)
barcode2celltype_df <- merge(barcode2cluster_df,
cluster2celltype_df,
by.x = c("orig.ident", "seurat_clusters"), by.y = c("Aliquot", "Cluster"), all.x = T)
## format
barcode2celltype_df <- barcode2celltype_df %>%
mutate(Cell_type1 = NA) %>%
mutate(Cell_type2 = NA) %>%
mutate(Cell_type3 = NA) %>%
mutate(Cell_type4 = NA) %>%
mutate(Id_TumorManualCluster = NA) %>%
mutate(Id_SeuratCluster = NA) %>%
mutate(integrated_barcode = NA) %>%
mutate(Comment = NA) %>%
mutate(Cell_group13 = NA) %>%
mutate(Cell_group14_w_transitional = NA) %>%
mutate(Cell_group_w_epithelialcelltypes = NA) %>%
select(orig.ident, Cell_type.shorter, Cell_type.detailed,
Cell_group4, Cell_group5, Cell_type1, Cell_type2, Cell_type3, Cell_type4,
Id_TumorManualCluster, Id_SeuratCluster,
individual_barcode, integrated_barcode, Comment,
Cell_group13, Cell_group14_w_transitional, Cell_group_w_epithelialcelltypes)
table(barcode2celltype_df$Cell_type.detailed)
# group detailed immune cell types into major immune cell groups ----------
table(barcode2celltype_df$Cell_type.shorter)
barcode2celltype_df$Cell_group13 <- barcode2celltype_df$Cell_type.shorter
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("Macrophages", "Macrophages proliferating", "TRM", "Monocytes")] <- "Macrophages"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("B-cells", "Plasma")] <- "B-cells"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("CD4 CTL", "CD4 T-cells", "CD4 T-cells activated", "CD4 T-cells naive", "Tregs")] <- "CD4+ T-cells"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("CD8 CTL", "CD8 CTL exhausted", "CD8 T-cells preexhausted")] <- "CD8+ T-cells"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("cDC", "pDC")] <- "DC"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("NK cells strong", "NK cells weak")] <- "NK cells"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("Basophils", "CD4/CD8 proliferating", "Mixed myeloid/lymphoid", "Mast cells")] <- "Immune others"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("Transitional cells", "Tumor-like cells", "EMT tumor cells")] <- "Tumor cells"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("Normal-like cells")] <- "Normal epithelial cells"
table(barcode2celltype_df$Cell_group13)
# make a new group for the transitional cells -----------------------------
barcode2celltype_df <- barcode2celltype_df %>%
mutate(Cell_group14_w_transitional = ifelse(Cell_type.shorter == "EMT tumor cells", "EMT tumor cells", Cell_group13))
table(barcode2celltype_df$Cell_group14_w_transitional)
# make a new group for the epithelial cell types -----------------------------
barcode2celltype_df <- barcode2celltype_df %>%
mutate(Cell_group_w_epithelialcelltypes = ifelse(Cell_type.shorter == "Normal epithelial cells", Cell_type.detailed, Cell_group13))
table(barcode2celltype_df$Cell_group_w_epithelialcelltypes)
# write output ------------------------------------------------------------
file2write <- paste0(dir_out, "C3L-00079-N.", "Barcode2CellType.", run_id, ".tsv")
write.table(x = barcode2celltype_df, file = file2write, quote = F, sep = "\t", row.names = F)
| /annotate_barcode/individual_sample/map_celltype_for_C3L-00079-N_immune_reclustered.R | no_license | ding-lab/ccRCC_snRNA_analysis | R | false | false | 4,965 | r | # Yige Wu @WashU Aug 2021
## make barcode to cell type mapping table for the integrated dataset
## just for normal epithelial cells
# set up libraries and output directory -----------------------------------
## set working directory
dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
setwd(dir_base)
source("./ccRCC_snRNA_analysis/load_pkgs.R")
source("./ccRCC_snRNA_analysis/functions.R")
source("./ccRCC_snRNA_analysis/variables.R")
library(dplyr)
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies --------------------------------------------------
## input cluster2celltype
cluster2celltype_df <- readxl::read_excel(path = "./Resources/snRNA_Processed_Data/Cell_Type_Assignment/Individual_Subset/C3L-00079-N_immune_reclustered.20210802.xlsx")
## inpus seurat object
srat <- readRDS(file = "./Resources/Analysis_Results/recluster/recluster_cell_groups_in_individual_samples/subset_recluster/subset_C3L-00079-N_immune_recluster/20210802.v1/C3L-00079-N.Immune.Reclustered.Res2.RDS")
# map cell type to barcode ------------------------------------------------
barcode2cluster_df <- srat@meta.data
#nrow(barcode2cluster_df)
barcode2cluster_df$individual_barcode <- rownames(barcode2cluster_df)
#unique(barcode2cluster_df$seurat_clusters)
barcode2cluster_df$seurat_clusters <- as.numeric(as.vector(barcode2cluster_df$seurat_clusters))
#unique(barcode2cluster_df$seurat_clusters)
barcode2celltype_df <- merge(barcode2cluster_df,
cluster2celltype_df,
by.x = c("orig.ident", "seurat_clusters"), by.y = c("Aliquot", "Cluster"), all.x = T)
## format
barcode2celltype_df <- barcode2celltype_df %>%
mutate(Cell_type1 = NA) %>%
mutate(Cell_type2 = NA) %>%
mutate(Cell_type3 = NA) %>%
mutate(Cell_type4 = NA) %>%
mutate(Id_TumorManualCluster = NA) %>%
mutate(Id_SeuratCluster = NA) %>%
mutate(integrated_barcode = NA) %>%
mutate(Comment = NA) %>%
mutate(Cell_group13 = NA) %>%
mutate(Cell_group14_w_transitional = NA) %>%
mutate(Cell_group_w_epithelialcelltypes = NA) %>%
select(orig.ident, Cell_type.shorter, Cell_type.detailed,
Cell_group4, Cell_group5, Cell_type1, Cell_type2, Cell_type3, Cell_type4,
Id_TumorManualCluster, Id_SeuratCluster,
individual_barcode, integrated_barcode, Comment,
Cell_group13, Cell_group14_w_transitional, Cell_group_w_epithelialcelltypes)
table(barcode2celltype_df$Cell_type.detailed)
# group detailed immune cell types into major immune cell groups ----------
table(barcode2celltype_df$Cell_type.shorter)
barcode2celltype_df$Cell_group13 <- barcode2celltype_df$Cell_type.shorter
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("Macrophages", "Macrophages proliferating", "TRM", "Monocytes")] <- "Macrophages"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("B-cells", "Plasma")] <- "B-cells"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("CD4 CTL", "CD4 T-cells", "CD4 T-cells activated", "CD4 T-cells naive", "Tregs")] <- "CD4+ T-cells"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("CD8 CTL", "CD8 CTL exhausted", "CD8 T-cells preexhausted")] <- "CD8+ T-cells"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("cDC", "pDC")] <- "DC"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("NK cells strong", "NK cells weak")] <- "NK cells"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("Basophils", "CD4/CD8 proliferating", "Mixed myeloid/lymphoid", "Mast cells")] <- "Immune others"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("Transitional cells", "Tumor-like cells", "EMT tumor cells")] <- "Tumor cells"
barcode2celltype_df$Cell_group13[barcode2celltype_df$Cell_group13 %in% c("Normal-like cells")] <- "Normal epithelial cells"
table(barcode2celltype_df$Cell_group13)
# make a new group for the transitional cells -----------------------------
barcode2celltype_df <- barcode2celltype_df %>%
mutate(Cell_group14_w_transitional = ifelse(Cell_type.shorter == "EMT tumor cells", "EMT tumor cells", Cell_group13))
table(barcode2celltype_df$Cell_group14_w_transitional)
# make a new group for the epithelial cell types -----------------------------
barcode2celltype_df <- barcode2celltype_df %>%
mutate(Cell_group_w_epithelialcelltypes = ifelse(Cell_type.shorter == "Normal epithelial cells", Cell_type.detailed, Cell_group13))
table(barcode2celltype_df$Cell_group_w_epithelialcelltypes)
# write output ------------------------------------------------------------
file2write <- paste0(dir_out, "C3L-00079-N.", "Barcode2CellType.", run_id, ".tsv")
write.table(x = barcode2celltype_df, file = file2write, quote = F, sep = "\t", row.names = F)
|
library(leaps)
library(MASS)
library('DMwR')
library(randomForest)
library(MASS)
library(caret)
library(ROCR)
library(e1071)
analysisdata<-read.csv('C:/Users/sshss-pc/Desktop/project/processed/analysisdata.csv', header = TRUE)
analysisdata<-analysisdata[,-1]
january<-analysisdata[which(analysisdata[,"Timetag"]=="2016/1/1"),]
october<-analysisdata[which(analysisdata[,"Timetag"]=="2015/10/1"),]
november<-analysisdata[which(analysisdata[,"Timetag"]=="2015/11/1"),]
december<-analysisdata[which(analysisdata[,"Timetag"]=="2015/12/1"),]
february<-analysisdata[which(analysisdata[,"Timetag"]=="2016/2/1"),]
march<-analysisdata[which(analysisdata[,"Timetag"]=="2016/3/1"),]
jid<-january[,1]
mid<-march[,1]
persist<-match(mid,jid)
persist<-na.omit(persist)
later<-match(jid,mid)
later<-na.omit(later)
mchurn<-which(march[later,68]==1)
mchurnid<-march[mchurn,1]
churnadd<-match(mchurnid,jid)
churnadd<-na.omit(churnadd)
january<-january[,c(-1,-2,-4,-5,-6,-8,-10,-12,-14,-16,-18,-20,-21,-22,-24,-25,-56,-57,-58,-59,-60,-61,-62,-63,-64,-65,-66,-67)]
#january<-january[,c(-1,-2,-4,-5,-6,-25,-68)]
names(january)
january[,40]<-as.factor(january[,40])
january[,42]<-as.factor(january[,42])
january[,43]<-as.factor(january[,43])
january[,44]<-as.factor(january[,44])
january[,45]<-as.factor(january[,45])
january[,46]<-as.factor(january[,46])
january[,48]<-as.factor(january[,48])
january[,49]<-as.factor(january[,49])
january[,50]<-as.factor(january[,50])
january[,51]<-as.factor(january[,51])
january[persist,40]<-0
january[-persist,40]<-1
january[churnadd,40]<-1
names(january)[40]<-"three_month_churn"
str(january)
nrow(january)
prechurn<-which(january[,40]=="1")
length(prechurn)
selejan<-january[,c(49,50,39,3,9,35,5,7,31,20,37,41,40)]
names(selejan)
targetVar<-"three_month_churn"
xVars<-names(selejan[,-13])
set.seed(600)
inTrain <- createDataPartition(y = selejan[,targetVar], list = FALSE, p = .8)
train_jan <- selejan[inTrain,]
test_jan <- selejan[-inTrain,]
stopifnot(nrow(train_jan) + nrow(test_jan) == nrow(selejan))
createModelFormula <- function(targetVar, xVars, includeIntercept = TRUE){
if(includeIntercept){
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ ')))
} else {
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ '), -1))
}
return(modelForm)
}
modelForm <- createModelFormula(targetVar, xVars)
ctrl <- trainControl(method = "cv",
number = 10,
sampling = "up")
balanced_rf <- caret::train(modelForm,
data = train_jan,
method = "rf",
trControl = ctrl)
Prediction_jan <- predict(balanced_rf, test_jan, type = "prob")
jan_pred <- ifelse(Prediction_jan[,2]> 0.5,1,0)
Actual_jan <- test_jan$three_month_churn
confusionMatrix(reference = Actual_jan, data = jan_pred)
varImp(balanced_rf)
balanced_svmweight <- caret::train(modelForm,
data = train_jan,
method = "svmLinearWeights",
trControl = ctrl)
Prediction_jan <- predict(balanced_svmweight, test_jan)
Actual_jan <- test_jan$one_month_churn
confusionMatrix(reference = Actual_jan, data = Prediction_jan)
varImp(balanced_svmweight)
balanced_svmlk <- caret::train(modelForm,
data = train_jan,
method = "svmLinear2",
trControl = ctrl)
Prediction_jan <- predict(balanced_svmlk, test_jan)
Actual_jan <- test_jan$one_month_churn
confusionMatrix(reference = Actual_jan, data = Prediction_jan)
balanced_boosting <- caret::train(modelForm,
data = train_jan,
method = "gbm",
trControl = ctrl)
Prediction_jan <- predict(balanced_boosting, test_jan)
Actual_jan <- test_jan$three_month_churn
confusionMatrix(reference = Actual_jan, data = Prediction_jan)
varImp(balanced_boosting)
balanced_descent <- caret::train(modelForm,
data = train_jan,
method = "mlpSGD",
trControl = ctrl)
Prediction_jan <- predict(balanced_boosting, test_jan)
Actual_jan <- test_jan$three_month_churn
confusionMatrix(reference = Actual_jan, data = Prediction_jan)
varImp(balanced_descent)
#############################################################################################################################
#############################################################################################################################
#############################################################################################################################
january<-analysisdata[which(analysisdata[,"Timetag"]=="2016/1/1"),]
nid<-november[,1]
jid<-january[,1]
persist<-match(jid,nid)
persist<-na.omit(persist)
later<-match(nid,jid)
later<-na.omit(later)
jchurn<-which(january[later,68]==1)
jchurnid<-march[jchurn,1]
churnadd<-match(jchurnid,nid)
churnadd<-na.omit(churnadd)
november<-november[,c(-1,-2,-4,-5,-6,-8,-10,-12,-14,-16,-18,-20,-21,-22,-24,-25,-56,-57,-58,-59,-60,-61,-62,-63,-64,-65,-66,-67)]
names(november)
november[,40]<-as.factor(november[,40])
november[,42]<-as.factor(november[,42])
november[,43]<-as.factor(november[,43])
november[,44]<-as.factor(november[,44])
november[,45]<-as.factor(november[,45])
november[,46]<-as.factor(november[,46])
november[,48]<-as.factor(november[,48])
november[,49]<-as.factor(november[,49])
november[,50]<-as.factor(november[,50])
november[,51]<-as.factor(november[,51])
november[persist,40]<-0
november[-persist,40]<-1
november[churnadd,40]<-1
names(november)[40]<-"three_month_churn"
str(november)
nrow(november)
prechurn<-which(november[,40]=="1")
length(prechurn)
nullModel <- glm(formula = three_month_churn ~ 1,family=binomial,data=november)
fullModel <- glm(formula = three_month_churn ~ .,family=binomial,data=november)
nove_Selection <- step(nullModel, scope=list(lower=nullModel, upper=fullModel), direction="forward")
summary(nove_Selection)
nove_variable <- rownames(summary(nove_Selection)$coefficients)[-1]
match(nove_variable,names(november))
selenove<-november[,c(49,1,47,5,26,12,22,28,31,15,34,7,44,3,29,10,48,46,30,40)]
names(selenove)
targetVar<-"three_month_churn"
xVars<-names(selenove[,-20])
set.seed(600)
inTrain <- createDataPartition(y = selenove[,targetVar], list = FALSE, p = .8)
train_nove <- selenove[inTrain,]
test_nove <- selenove[-inTrain,]
stopifnot(nrow(train_nove) + nrow(test_nove) == nrow(selenove))
trainweight<-as.vector(rep(NA,11012))
trainchurn<-which(train_nove[,20]=="1")
trainweight[trainchurn]<-2
trainweight[-trainchurn]<-1
trainweight
testchurn<-which(test_nove[,20]=="1")
createModelFormula <- function(targetVar, xVars, includeIntercept = TRUE){
if(includeIntercept){
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ ')))
} else {
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ '), -1))
}
return(modelForm)
}
modelForm <- createModelFormula(targetVar, xVars)
nove_model <- glm(modelForm,family=binomial(link='logit'), data=train_nove)
nove_fitted <- predict(nove_model
,newdata = test_nove[,xVars]
# Specifying response means we want the probabilities
,type='response')
nove_pred <- ifelse(nove_fitted > 0.5,1,0)
confusion <- confusionMatrix(data = nove_pred
, reference = test_nove[,targetVar]
, dnn = c("Predicted Churn", 'Actual Churn')
)
confusion
PRcurve(preds = nove_pred, trues = test_nove$three_month_churn)
pr <- prediction(nove_pred, test_nove$three_month_churn)
prf <- performance(pr, measure = "tpr", x.measure = "fpr")
plot(prf)
auc <- performance(pr, measure = "auc")
auc <- auc@y.values[[1]]
auc
selenove2<-november[,c(49,1,6,50,3,35,51,46,15,40)]
targetVar<-"three_month_churn"
xVars<-names(selenove2[,-10])
set.seed(600)
inTrain <- createDataPartition(y = selenove2[,targetVar], list = FALSE, p = .8)
train_nove <- selenove2[inTrain,]
test_nove <- selenove2[-inTrain,]
stopifnot(nrow(train_nove) + nrow(test_nove) == nrow(selenove2))
trainweight<-as.vector(rep(NA,11012))
trainchurn<-which(train_nove[,10]=="1")
trainweight[trainchurn]<-15
trainweight[-trainchurn]<-1
trainweight
testchurn<-which(test_nove[,10]=="1")
createModelFormula <- function(targetVar, xVars, includeIntercept = TRUE){
if(includeIntercept){
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ ')))
} else {
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ '), -1))
}
return(modelForm)
}
modelForm <- createModelFormula(targetVar, xVars)
nove_model <- glm(modelForm,family=binomial(link='logit'), data=train_nove)
nove_fitted <- predict(nove_model
,newdata = test_nove[,xVars]
# Specifying response means we want the probabilities
,type='response')
nove_pred <- ifelse(nove_fitted > 0.5,1,0)
confusion <- confusionMatrix(data = nove_pred
, reference = test_nove[,targetVar]
, dnn = c("Predicted Churn", 'Actual Churn')
)
confusion
y_nove<-as.factor(train_nove[,targetVar])
x_nove<-train_nove[,xVars]
RF_nov <- randomForest(x = x_nove, y = y_nove
, data=train_nove,
importance=TRUE,
# fit 2000 decision trees!
ntree=2000)
varImpPlot(RF_nov)
Prediction_nov <- predict(RF_nov, test_nove, type = "response")
Actual_nov <- test_nove$three_month_churn
confusionMatrix(reference = Actual_nov, data = Prediction_nov)
| /code/model/threemonthapproach_advmodel.R | no_license | sravi9/captainu-csp572 | R | false | false | 10,160 | r | library(leaps)
library(MASS)
library('DMwR')
library(randomForest)
library(MASS)
library(caret)
library(ROCR)
library(e1071)
analysisdata<-read.csv('C:/Users/sshss-pc/Desktop/project/processed/analysisdata.csv', header = TRUE)
analysisdata<-analysisdata[,-1]
january<-analysisdata[which(analysisdata[,"Timetag"]=="2016/1/1"),]
october<-analysisdata[which(analysisdata[,"Timetag"]=="2015/10/1"),]
november<-analysisdata[which(analysisdata[,"Timetag"]=="2015/11/1"),]
december<-analysisdata[which(analysisdata[,"Timetag"]=="2015/12/1"),]
february<-analysisdata[which(analysisdata[,"Timetag"]=="2016/2/1"),]
march<-analysisdata[which(analysisdata[,"Timetag"]=="2016/3/1"),]
jid<-january[,1]
mid<-march[,1]
persist<-match(mid,jid)
persist<-na.omit(persist)
later<-match(jid,mid)
later<-na.omit(later)
mchurn<-which(march[later,68]==1)
mchurnid<-march[mchurn,1]
churnadd<-match(mchurnid,jid)
churnadd<-na.omit(churnadd)
january<-january[,c(-1,-2,-4,-5,-6,-8,-10,-12,-14,-16,-18,-20,-21,-22,-24,-25,-56,-57,-58,-59,-60,-61,-62,-63,-64,-65,-66,-67)]
#january<-january[,c(-1,-2,-4,-5,-6,-25,-68)]
names(january)
january[,40]<-as.factor(january[,40])
january[,42]<-as.factor(january[,42])
january[,43]<-as.factor(january[,43])
january[,44]<-as.factor(january[,44])
january[,45]<-as.factor(january[,45])
january[,46]<-as.factor(january[,46])
january[,48]<-as.factor(january[,48])
january[,49]<-as.factor(january[,49])
january[,50]<-as.factor(january[,50])
january[,51]<-as.factor(january[,51])
january[persist,40]<-0
january[-persist,40]<-1
january[churnadd,40]<-1
names(january)[40]<-"three_month_churn"
str(january)
nrow(january)
prechurn<-which(january[,40]=="1")
length(prechurn)
selejan<-january[,c(49,50,39,3,9,35,5,7,31,20,37,41,40)]
names(selejan)
targetVar<-"three_month_churn"
xVars<-names(selejan[,-13])
set.seed(600)
inTrain <- createDataPartition(y = selejan[,targetVar], list = FALSE, p = .8)
train_jan <- selejan[inTrain,]
test_jan <- selejan[-inTrain,]
stopifnot(nrow(train_jan) + nrow(test_jan) == nrow(selejan))
createModelFormula <- function(targetVar, xVars, includeIntercept = TRUE){
if(includeIntercept){
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ ')))
} else {
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ '), -1))
}
return(modelForm)
}
modelForm <- createModelFormula(targetVar, xVars)
ctrl <- trainControl(method = "cv",
number = 10,
sampling = "up")
balanced_rf <- caret::train(modelForm,
data = train_jan,
method = "rf",
trControl = ctrl)
Prediction_jan <- predict(balanced_rf, test_jan, type = "prob")
jan_pred <- ifelse(Prediction_jan[,2]> 0.5,1,0)
Actual_jan <- test_jan$three_month_churn
confusionMatrix(reference = Actual_jan, data = jan_pred)
varImp(balanced_rf)
balanced_svmweight <- caret::train(modelForm,
data = train_jan,
method = "svmLinearWeights",
trControl = ctrl)
Prediction_jan <- predict(balanced_svmweight, test_jan)
Actual_jan <- test_jan$one_month_churn
confusionMatrix(reference = Actual_jan, data = Prediction_jan)
varImp(balanced_svmweight)
balanced_svmlk <- caret::train(modelForm,
data = train_jan,
method = "svmLinear2",
trControl = ctrl)
Prediction_jan <- predict(balanced_svmlk, test_jan)
Actual_jan <- test_jan$one_month_churn
confusionMatrix(reference = Actual_jan, data = Prediction_jan)
balanced_boosting <- caret::train(modelForm,
data = train_jan,
method = "gbm",
trControl = ctrl)
Prediction_jan <- predict(balanced_boosting, test_jan)
Actual_jan <- test_jan$three_month_churn
confusionMatrix(reference = Actual_jan, data = Prediction_jan)
varImp(balanced_boosting)
balanced_descent <- caret::train(modelForm,
data = train_jan,
method = "mlpSGD",
trControl = ctrl)
Prediction_jan <- predict(balanced_boosting, test_jan)
Actual_jan <- test_jan$three_month_churn
confusionMatrix(reference = Actual_jan, data = Prediction_jan)
varImp(balanced_descent)
#############################################################################################################################
#############################################################################################################################
#############################################################################################################################
january<-analysisdata[which(analysisdata[,"Timetag"]=="2016/1/1"),]
nid<-november[,1]
jid<-january[,1]
persist<-match(jid,nid)
persist<-na.omit(persist)
later<-match(nid,jid)
later<-na.omit(later)
jchurn<-which(january[later,68]==1)
jchurnid<-march[jchurn,1]
churnadd<-match(jchurnid,nid)
churnadd<-na.omit(churnadd)
november<-november[,c(-1,-2,-4,-5,-6,-8,-10,-12,-14,-16,-18,-20,-21,-22,-24,-25,-56,-57,-58,-59,-60,-61,-62,-63,-64,-65,-66,-67)]
names(november)
november[,40]<-as.factor(november[,40])
november[,42]<-as.factor(november[,42])
november[,43]<-as.factor(november[,43])
november[,44]<-as.factor(november[,44])
november[,45]<-as.factor(november[,45])
november[,46]<-as.factor(november[,46])
november[,48]<-as.factor(november[,48])
november[,49]<-as.factor(november[,49])
november[,50]<-as.factor(november[,50])
november[,51]<-as.factor(november[,51])
november[persist,40]<-0
november[-persist,40]<-1
november[churnadd,40]<-1
names(november)[40]<-"three_month_churn"
str(november)
nrow(november)
prechurn<-which(november[,40]=="1")
length(prechurn)
nullModel <- glm(formula = three_month_churn ~ 1,family=binomial,data=november)
fullModel <- glm(formula = three_month_churn ~ .,family=binomial,data=november)
nove_Selection <- step(nullModel, scope=list(lower=nullModel, upper=fullModel), direction="forward")
summary(nove_Selection)
nove_variable <- rownames(summary(nove_Selection)$coefficients)[-1]
match(nove_variable,names(november))
selenove<-november[,c(49,1,47,5,26,12,22,28,31,15,34,7,44,3,29,10,48,46,30,40)]
names(selenove)
targetVar<-"three_month_churn"
xVars<-names(selenove[,-20])
set.seed(600)
inTrain <- createDataPartition(y = selenove[,targetVar], list = FALSE, p = .8)
train_nove <- selenove[inTrain,]
test_nove <- selenove[-inTrain,]
stopifnot(nrow(train_nove) + nrow(test_nove) == nrow(selenove))
trainweight<-as.vector(rep(NA,11012))
trainchurn<-which(train_nove[,20]=="1")
trainweight[trainchurn]<-2
trainweight[-trainchurn]<-1
trainweight
testchurn<-which(test_nove[,20]=="1")
createModelFormula <- function(targetVar, xVars, includeIntercept = TRUE){
if(includeIntercept){
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ ')))
} else {
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ '), -1))
}
return(modelForm)
}
modelForm <- createModelFormula(targetVar, xVars)
nove_model <- glm(modelForm,family=binomial(link='logit'), data=train_nove)
nove_fitted <- predict(nove_model
,newdata = test_nove[,xVars]
# Specifying response means we want the probabilities
,type='response')
nove_pred <- ifelse(nove_fitted > 0.5,1,0)
confusion <- confusionMatrix(data = nove_pred
, reference = test_nove[,targetVar]
, dnn = c("Predicted Churn", 'Actual Churn')
)
confusion
PRcurve(preds = nove_pred, trues = test_nove$three_month_churn)
pr <- prediction(nove_pred, test_nove$three_month_churn)
prf <- performance(pr, measure = "tpr", x.measure = "fpr")
plot(prf)
auc <- performance(pr, measure = "auc")
auc <- auc@y.values[[1]]
auc
selenove2<-november[,c(49,1,6,50,3,35,51,46,15,40)]
targetVar<-"three_month_churn"
xVars<-names(selenove2[,-10])
set.seed(600)
inTrain <- createDataPartition(y = selenove2[,targetVar], list = FALSE, p = .8)
train_nove <- selenove2[inTrain,]
test_nove <- selenove2[-inTrain,]
stopifnot(nrow(train_nove) + nrow(test_nove) == nrow(selenove2))
trainweight<-as.vector(rep(NA,11012))
trainchurn<-which(train_nove[,10]=="1")
trainweight[trainchurn]<-15
trainweight[-trainchurn]<-1
trainweight
testchurn<-which(test_nove[,10]=="1")
createModelFormula <- function(targetVar, xVars, includeIntercept = TRUE){
if(includeIntercept){
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ ')))
} else {
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ '), -1))
}
return(modelForm)
}
modelForm <- createModelFormula(targetVar, xVars)
nove_model <- glm(modelForm,family=binomial(link='logit'), data=train_nove)
nove_fitted <- predict(nove_model
,newdata = test_nove[,xVars]
# Specifying response means we want the probabilities
,type='response')
nove_pred <- ifelse(nove_fitted > 0.5,1,0)
confusion <- confusionMatrix(data = nove_pred
, reference = test_nove[,targetVar]
, dnn = c("Predicted Churn", 'Actual Churn')
)
confusion
y_nove<-as.factor(train_nove[,targetVar])
x_nove<-train_nove[,xVars]
RF_nov <- randomForest(x = x_nove, y = y_nove
, data=train_nove,
importance=TRUE,
# fit 2000 decision trees!
ntree=2000)
varImpPlot(RF_nov)
Prediction_nov <- predict(RF_nov, test_nove, type = "response")
Actual_nov <- test_nove$three_month_churn
confusionMatrix(reference = Actual_nov, data = Prediction_nov)
|
#' Design a fieldbook.
#'
#' @param design statistical design
#' @param matl list of germplasm
#' @param reps number of plot repetitions
#' @param msite logical, is this a mother site in a mother baby trial
#' @param lbls labels for germplasm
#' @param checkl list of check germplasm
#' @param bsize block size
#' @param adfn name of additional factor
#' @param adfl levels of additional factor
#' @param startn start number
#' @param seed random seed
#' @param randM randomization method
#' @author Raul Eyzaguirre, Reinhard Simon
#' @return data.frame
#' @export
randomize.design = function(design="(CRD)",
matl, # Vector of material list
reps, # number of repetitions
msite=FALSE, #is mother site in a M & B design
lbls, # short names of labels of genotypes
checkl = NULL, # check genotypes for ABD
bsize=2,# block size only BIB/A01D
adfn=NULL, # name for additional factor
adfl=NULL, # vector of additional factor levels
startn = 1,
seed = 0,
randM="Super-duper"
) {
#library(stringr)
abb=lbls
diseno = NULL
if (str_detect(design,"(UDNR)")){
diseno = as.data.frame(matrix(NA, nrow=length(matl), ncol=3), stringsAsFactors=F)
diseno[,1:3] <- cbind(Plot=seq(startn, startn+length(matl)-1),Rep=rep(1,length(matl)),X=matl)
colnames(diseno)[1:3] <- c("PLOT", "REP", lbls)
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", lbls), nrow=1), strings.as.factors=F)
}
if (str_detect(design,"(CRD)")){
diseno = as.data.frame(matrix(NA, nrow=reps*length(matl), ncol=3), stringsAsFactors=F)
diseno[,1:3] <- design.crd(matl, reps, number=startn)
colnames(diseno) <- c("PLOT", "REP", lbls)
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", lbls), nrow=1), strings.as.factors=F)
}
if (str_detect(design,"(RCBD)")){
diseno = as.data.frame(matrix(NA, nrow=reps*length(matl), ncol=3), stringsAsFactors=F)
diseno[,1:3]<- design.rcbd(matl, reps, serie=startn)$book
colnames(diseno) <- c("PLOT", "REP", lbls)
#colnames(diseno) <- c("Plot", "Block", abbreviate(inst[3]), abb)
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", lbls), nrow=1), strings.as.factors=F)
}
# if (str_detect(design,"(BIBD)")){
# bib <- design.bib(matl, bsize, number=startn)
# diseno = as.data.frame(matrix(NA, nrow=dim(bib)[1], ncol=3), stringsAsFactors=F)
# diseno[,1:3] <- bib
# colnames(diseno) <- c("PLOT", "REP", lbls)
# # labs = as.data.frame(matrix(c("Plot", "Block or repetition", lbls), nrow=1), strings.as.factors=F)
# }
if (str_detect(design,"(LSD)")){
diseno = as.data.frame(matrix(NA, nrow=length(matl)^2, ncol=4), stringsAsFactors=F)
diseno[,1:4]<- design.lsd(matl, number=startn)
colnames(diseno) <- c("PLOT", "REP", "CBLOCK", lbls)
# labs = as.data.frame(matrix(c("Plot", "Row Block", "Column Block", lbls), nrow=1), strings.as.factors=F)
}
if (str_detect(design,"(F2CRD)")){
nt <- length(matl)*length(adfl)
tr <- 1:nt
est <- cbind(1:nt, rep(adfl, length(matl)), rep(matl, each=length(adfl)))
diseno = as.data.frame(matrix(NA, nrow=reps*nt, ncol=4), stringsAsFactors=F)
fdcrd <- design.crd(tr, reps)$book
diseno[,1:2] <- fdcrd[,1:2]
ord <- fdcrd[,3]
for (i in 1:(nt*reps)){
diseno[i,3] <- est[est[,1]==ord[i],2]
diseno[i,4] <- est[est[,1]==ord[i],3]
}
colnames(diseno) <- c("PLOT", "REP", adfn, lbls)
colnames(diseno) <- c("PLOT", "REP", "FACTOR", lbls) #cambiamos la etiqueta adfn por FACTOR para evitar erores
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", "Factor A", "Factor B", lab), nrow=1), strings.as.factors=F)
}
if (str_detect(design,"(F2RCBD)")){
nt <- length(matl)*length(adfl)
tr <- 1:nt
est <- cbind(1:nt, rep(adfl, length(matl)), rep(matl, each=length(adfl)))
diseno = as.data.frame(matrix(NA, nrow=reps*nt, ncol=4), stringsAsFactors=F)
fdrcbd <- design.rcbd(tr, reps, number=startn)
diseno[,1:2] <- fdrcbd[,1:2]
ord <- fdrcbd[,3]
for (i in 1:(nt*reps)){
diseno[i,3] <- est[est[,1]==ord[i],2]
diseno[i,4] <- est[est[,1]==ord[i],3]
}
colnames(diseno) <- c("PLOT", "REP", adfn, lbls)
colnames(diseno) <- c("PLOT", "REP", "FACTOR", lbls)
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", "Factor A", "Factor B", lab), nrow=1), strings.as.factors=F)
}
if (str_detect(design,"(SPCRD)")){
diseno = as.data.frame(matrix(NA, nrow=reps*length(matl)*length(adfl), ncol=4), stringsAsFactors=F)
diseno[,1:4] <- design.split(adfl, matl, reps, "crd", number=startn)
colnames(diseno) <- c("PLOT", "REP", adfn, lbls)
colnames(diseno) <- c("PLOT", "REP", "FACTOR", lbls)
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", "Factor in Plots", "Factor in SubPlots", lab), nrow=1), strings.as.factors=F)
}
if (str_detect(design,"(SPRCBD)")){
diseno = as.data.frame(matrix(NA, nrow=reps*length(matl)*length(adfl), ncol=4), stringsAsFactors=F)
diseno[,1:4] <- design.split(adfl,matl,reps, "rcbd", number=startn)
colnames(diseno) <- c("PLOT", "REP", adfn, lbls)
colnames(diseno) <- c("PLOT", "REP","FACTOR",lbls)
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", "Factor in Plots", "Factor in SubPlots", lab), nrow=1), strings.as.factors=F)
}
if (str_detect(design,"(ABD)")){
diseno = as.data.frame(matrix(NA, nrow=reps*length(checkl)+length(matl), ncol=3), stringsAsFactors=F)
diseno[,1:3] <- design.dau(checkl, matl, reps, number=startn)
colnames(diseno) <- c("PLOT", "REP", lbls)
}
if (str_detect(design,"(A01D)")){
# GTDM-396
diseno = as.data.frame(matrix(NA, nrow=reps*length(matl), ncol=4), stringsAsFactors=F)
diseno[,1:4] <- design.alpha(matl, bsize, reps, number=startn)$book[,c(1,5,3,4)]
colnames(diseno) <- c("PLOT", "REP", "BLOCK", lbls)
}
# if (str_detect(design,"(MBCRD)")){
# if(msite){ # use CRD design
# diseno = as.data.frame(matrix(NA, nrow=reps*length(matl), ncol=3), stringsAsFactors=F)
# diseno[,1:3] <- design.crd(matl, reps, number=startn)
# colnames(diseno) <- c("PLOT", "REP", lbls)
# }
# if(!msite){# use unreplicated non-randomized design
# diseno = as.data.frame(matrix(NA, nrow=length(matl), ncol=3), stringsAsFactors=F)
# diseno[,1:3] <- cbind(Plot=seq(startn, startn+length(matl)-1),Rep=rep(1,length(matl)),X=matl)
# colnames(diseno)[1:3] <- c("PLOT", "REP", lbls)
# }
# }
if (str_detect(design,"(STRIP)")){
diseno = as.data.frame(matrix(NA, nrow=reps*length(matl)*length(adfl), ncol=4), stringsAsFactors=F)
diseno[,1:4] <- design.strip(adfl, matl, reps, number=startn)
colnames(diseno) <- c("PLOT", "REP", adfn, lbls)
colnames(diseno) <- c("PLOT", "REP", "FACTOR", lbls)
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", "Factor in Plots", "Factor in SubPlots", lab), nrow=1), strings.as.factors=F)
}
diseno
}
# getMaterialList = function(fpath, id){
# data = read.xlsx2(fpath, sheetName="GermplasmList")
# matl = unique(as.character(data[,id]))
# if(length(matl)!=nrow(data)) matl=NULL
# matl
# }
| /R/randomize.design.R | no_license | CIPTOOLS/fbdesign | R | false | false | 6,834 | r |
#' Design a fieldbook.
#'
#' @param design statistical design
#' @param matl list of germplasm
#' @param reps number of plot repetitions
#' @param msite logical, is this a mother site in a mother baby trial
#' @param lbls labels for germplasm
#' @param checkl list of check germplasm
#' @param bsize block size
#' @param adfn name of additional factor
#' @param adfl levels of additional factor
#' @param startn start number
#' @param seed random seed
#' @param randM randomization method
#' @author Raul Eyzaguirre, Reinhard Simon
#' @return data.frame
#' @export
randomize.design = function(design="(CRD)",
matl, # Vector of material list
reps, # number of repetitions
msite=FALSE, #is mother site in a M & B design
lbls, # short names of labels of genotypes
checkl = NULL, # check genotypes for ABD
bsize=2,# block size only BIB/A01D
adfn=NULL, # name for additional factor
adfl=NULL, # vector of additional factor levels
startn = 1,
seed = 0,
randM="Super-duper"
) {
#library(stringr)
abb=lbls
diseno = NULL
if (str_detect(design,"(UDNR)")){
diseno = as.data.frame(matrix(NA, nrow=length(matl), ncol=3), stringsAsFactors=F)
diseno[,1:3] <- cbind(Plot=seq(startn, startn+length(matl)-1),Rep=rep(1,length(matl)),X=matl)
colnames(diseno)[1:3] <- c("PLOT", "REP", lbls)
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", lbls), nrow=1), strings.as.factors=F)
}
if (str_detect(design,"(CRD)")){
diseno = as.data.frame(matrix(NA, nrow=reps*length(matl), ncol=3), stringsAsFactors=F)
diseno[,1:3] <- design.crd(matl, reps, number=startn)
colnames(diseno) <- c("PLOT", "REP", lbls)
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", lbls), nrow=1), strings.as.factors=F)
}
if (str_detect(design,"(RCBD)")){
diseno = as.data.frame(matrix(NA, nrow=reps*length(matl), ncol=3), stringsAsFactors=F)
diseno[,1:3]<- design.rcbd(matl, reps, serie=startn)$book
colnames(diseno) <- c("PLOT", "REP", lbls)
#colnames(diseno) <- c("Plot", "Block", abbreviate(inst[3]), abb)
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", lbls), nrow=1), strings.as.factors=F)
}
# if (str_detect(design,"(BIBD)")){
# bib <- design.bib(matl, bsize, number=startn)
# diseno = as.data.frame(matrix(NA, nrow=dim(bib)[1], ncol=3), stringsAsFactors=F)
# diseno[,1:3] <- bib
# colnames(diseno) <- c("PLOT", "REP", lbls)
# # labs = as.data.frame(matrix(c("Plot", "Block or repetition", lbls), nrow=1), strings.as.factors=F)
# }
if (str_detect(design,"(LSD)")){
diseno = as.data.frame(matrix(NA, nrow=length(matl)^2, ncol=4), stringsAsFactors=F)
diseno[,1:4]<- design.lsd(matl, number=startn)
colnames(diseno) <- c("PLOT", "REP", "CBLOCK", lbls)
# labs = as.data.frame(matrix(c("Plot", "Row Block", "Column Block", lbls), nrow=1), strings.as.factors=F)
}
if (str_detect(design,"(F2CRD)")){
nt <- length(matl)*length(adfl)
tr <- 1:nt
est <- cbind(1:nt, rep(adfl, length(matl)), rep(matl, each=length(adfl)))
diseno = as.data.frame(matrix(NA, nrow=reps*nt, ncol=4), stringsAsFactors=F)
fdcrd <- design.crd(tr, reps)$book
diseno[,1:2] <- fdcrd[,1:2]
ord <- fdcrd[,3]
for (i in 1:(nt*reps)){
diseno[i,3] <- est[est[,1]==ord[i],2]
diseno[i,4] <- est[est[,1]==ord[i],3]
}
colnames(diseno) <- c("PLOT", "REP", adfn, lbls)
colnames(diseno) <- c("PLOT", "REP", "FACTOR", lbls) #cambiamos la etiqueta adfn por FACTOR para evitar erores
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", "Factor A", "Factor B", lab), nrow=1), strings.as.factors=F)
}
if (str_detect(design,"(F2RCBD)")){
nt <- length(matl)*length(adfl)
tr <- 1:nt
est <- cbind(1:nt, rep(adfl, length(matl)), rep(matl, each=length(adfl)))
diseno = as.data.frame(matrix(NA, nrow=reps*nt, ncol=4), stringsAsFactors=F)
fdrcbd <- design.rcbd(tr, reps, number=startn)
diseno[,1:2] <- fdrcbd[,1:2]
ord <- fdrcbd[,3]
for (i in 1:(nt*reps)){
diseno[i,3] <- est[est[,1]==ord[i],2]
diseno[i,4] <- est[est[,1]==ord[i],3]
}
colnames(diseno) <- c("PLOT", "REP", adfn, lbls)
colnames(diseno) <- c("PLOT", "REP", "FACTOR", lbls)
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", "Factor A", "Factor B", lab), nrow=1), strings.as.factors=F)
}
if (str_detect(design,"(SPCRD)")){
diseno = as.data.frame(matrix(NA, nrow=reps*length(matl)*length(adfl), ncol=4), stringsAsFactors=F)
diseno[,1:4] <- design.split(adfl, matl, reps, "crd", number=startn)
colnames(diseno) <- c("PLOT", "REP", adfn, lbls)
colnames(diseno) <- c("PLOT", "REP", "FACTOR", lbls)
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", "Factor in Plots", "Factor in SubPlots", lab), nrow=1), strings.as.factors=F)
}
if (str_detect(design,"(SPRCBD)")){
diseno = as.data.frame(matrix(NA, nrow=reps*length(matl)*length(adfl), ncol=4), stringsAsFactors=F)
diseno[,1:4] <- design.split(adfl,matl,reps, "rcbd", number=startn)
colnames(diseno) <- c("PLOT", "REP", adfn, lbls)
colnames(diseno) <- c("PLOT", "REP","FACTOR",lbls)
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", "Factor in Plots", "Factor in SubPlots", lab), nrow=1), strings.as.factors=F)
}
if (str_detect(design,"(ABD)")){
diseno = as.data.frame(matrix(NA, nrow=reps*length(checkl)+length(matl), ncol=3), stringsAsFactors=F)
diseno[,1:3] <- design.dau(checkl, matl, reps, number=startn)
colnames(diseno) <- c("PLOT", "REP", lbls)
}
if (str_detect(design,"(A01D)")){
# GTDM-396
diseno = as.data.frame(matrix(NA, nrow=reps*length(matl), ncol=4), stringsAsFactors=F)
diseno[,1:4] <- design.alpha(matl, bsize, reps, number=startn)$book[,c(1,5,3,4)]
colnames(diseno) <- c("PLOT", "REP", "BLOCK", lbls)
}
# if (str_detect(design,"(MBCRD)")){
# if(msite){ # use CRD design
# diseno = as.data.frame(matrix(NA, nrow=reps*length(matl), ncol=3), stringsAsFactors=F)
# diseno[,1:3] <- design.crd(matl, reps, number=startn)
# colnames(diseno) <- c("PLOT", "REP", lbls)
# }
# if(!msite){# use unreplicated non-randomized design
# diseno = as.data.frame(matrix(NA, nrow=length(matl), ncol=3), stringsAsFactors=F)
# diseno[,1:3] <- cbind(Plot=seq(startn, startn+length(matl)-1),Rep=rep(1,length(matl)),X=matl)
# colnames(diseno)[1:3] <- c("PLOT", "REP", lbls)
# }
# }
if (str_detect(design,"(STRIP)")){
diseno = as.data.frame(matrix(NA, nrow=reps*length(matl)*length(adfl), ncol=4), stringsAsFactors=F)
diseno[,1:4] <- design.strip(adfl, matl, reps, number=startn)
colnames(diseno) <- c("PLOT", "REP", adfn, lbls)
colnames(diseno) <- c("PLOT", "REP", "FACTOR", lbls)
# labs = as.data.frame(matrix(c("Plot", "Block or repetition", "Factor in Plots", "Factor in SubPlots", lab), nrow=1), strings.as.factors=F)
}
diseno
}
# getMaterialList = function(fpath, id){
# data = read.xlsx2(fpath, sheetName="GermplasmList")
# matl = unique(as.character(data[,id]))
# if(length(matl)!=nrow(data)) matl=NULL
# matl
# }
|
# Define ratio() function
ratio <- function(x, y) {
return(x / y)
}
# Call ratio() with arguments 3 and 4
ratio(3,4)
# Rewrite the call to follow best practices
mean(c(1:9, NA),trim = 0.1,na.rm = TRUE)
# 2nd element in tricky_list
typeof(tricky_list[[2]])
# Element called x in tricky_list
typeof(tricky_list[["x"]])
# 2nd element inside the element called x in tricky_list
typeof(tricky_list[['x']][[2]])
# Guess where the regression model is stored
names(tricky_list)
# Use names() and str() on the model element
names(tricky_list$model)
str(tricky_list$model)
# Subset the coefficients element
tricky_list$model$coefficients
# Subset the wt element
tricky_list$model$coefficients[["wt"]]
# Replace the 1:ncol(df) sequence
for (i in seq_along(df)) {
print(median(df[[i]]))
}
# Create new double vector: output
output <- vector("double", ncol(df))
# Alter the loop
for (i in seq_along(df)) {
output[i] <- median(df[[i]])
}
# Print output
print(output)
########################################################################################33
# Define example vectors x and y
x <- c( 1, 2, NA, 3, NA)
y <- c(NA, 3, NA, 3, 4)
# Count how many elements are missing in both x and y
sum(is.na(x) & is.na(y))
# Turn this snippet into a function: both_na()
both_na <- function(x , y){
sum(is.na(x) & is.na(y))
}
replace_missings <- function(x, replacement) {
is_miss <- is.na(x)
x[is_miss] <- replacement
# Rewrite to use message()
message(sum(is_miss)," missings replaced by the value ", replacement)
x
}
# Check your new function by running on df$z
df$z <- replace_missings(df$z, 0)
######################################################################################################
# Initialize output vector
output <- vector("double", ncol(df))
# Fill in the body of the for loop
for (i in seq_along(df)) {
output[i] <- median(df[[i]])
}
# View the result
output
# Function
col_median <- function(df){
output <- vector("double", ncol(df))
for (i in seq_along(df)) {
output[[i]] <- median(df[[i]])
}
return(output)
}
# function with function as parameter
col_summary <- function(df, fun) {
output <- vector("numeric", length(df))
for (i in seq_along(df)) {
output[[i]] <- fun(df[[i]])
}
output
}
col_median(df)
col_summary(df, fun = median) # same results
# Find the column IQRs using col_summary()
col_summary(df, fun = IQR)
# Load the purrr package
library(purrr)
# Use map_dbl() to find column means
map_dbl(df, mean)
# Use map_dbl() to column medians
map_dbl(df, median)
# Find the mean of each column
map_dbl(planes, mean)
# Find the mean of each column, excluding missing values
map_dbl(planes, mean, na.rm = TRUE)
# Find the 5th percentile of each column, excluding missing values
map_dbl(planes, quantile, probs = 0.05, na.rm = TRUE)
# Find the columns that are numeric
map_lgl(df3, is.numeric)
# Find the type of each column
map_chr(df3, typeof)
# Find a summary of each column
map(df3, summary)
# Examine the structure of cyl
str(cyl)
# Extract the first element into four_cyls
four_cyls <- cyl[[1]]
# Fit a linear regression of mpg on wt using four_cyls
lm(four_cyls$mpg ~ four_cyls$wt)
# Rewrite to call an anonymous function
map(cyl, function(df) lm(mpg ~ wt, data = df))
# Rewrite to use the formula shortcut instead
map(cyl, ~lm(mpg ~ wt, data = .))
# Save the result from the previous exercise to the variable models
models <- map(cyl, ~ lm(mpg ~ wt, data = .))
# Use map and coef to get the coefficients for each model: coefs
coefs <- map(models, coef)
# Use string shortcut to extract the wt coefficient
map(coefs, "wt")
# use map_dbl with the numeric shortcut to pull out the second element
map_dbl(coefs, 2)
# Define models (don't change)
models <- mtcars %>%
split(mtcars$cyl) %>%
map(~ lm(mpg ~ wt, data = .))
# Rewrite to be a single command using pipes
models %>%
map(summary) %>%
map_dbl("r.squared")
###########################################################################################
# Create safe_readLines() by passing readLines() to safely()
safe_readLines <- safely(readLines)
# Call safe_readLines() on "http://example.org"
safe_readLines("http://example.org")
# Call safe_readLines() on "http://asdfasdasdkfjlda"
safe_readLines("http://asdfasdasdkfjlda")
# Define safe_readLines()
safe_readLines <- safely(readLines)
# Use the safe_readLines() function with map(): html
html <- map(urls, safe_readLines)
# Call str() on html
str(html)
# Extract the result from one of the successful elements
html$example[["result"]]
# Extract the error from the element that was unsuccessful
html$asdf[["error"]]
# Define safe_readLines() and html
safe_readLines <- safely(readLines)
html <- map(urls, safe_readLines)
# Examine the structure of transpose(html)
str(transpose(html))
# Extract the results: res
res <- transpose(html)[["result"]]
# Extract the errors: errs
errs <- transpose(html)[["error"]]
# Initialize some objects
safe_readLines <- safely(readLines)
html <- map(urls, safe_readLines)
res <- transpose(html)[["result"]]
errs <- transpose(html)[["error"]]
# Create a logical vector is_ok
is_ok <- map_lgl(errs, is_null)
# Extract the successful results
res[is_ok]
# Find the URLs that were unsuccessful
urls[!is_ok]
# Use map_dbl() to find column standard deviations
map_dbl(df, sd)
##
# Create a list n containing the values: 5, 10, and 20
n <- list(5,10,20)
# Call map() on n with rnorm() to simulate three samples
map(n, rnorm)
# Create a list mu containing the values: 1, 5, and 10
mu <- list(1, 5, 10)
# Edit to call map2() on n and mu with rnorm() to simulate three samples
map2(n, mu, rnorm)
# Create a sd list with the values: 0.1, 1 and 0.1
sd <- list(0.1, 1, 0.1)
# Edit this call to pmap() to iterate over the sd list as well
pmap(list(n, mu, sd), rnorm)
# Define list of functions
f <- list("rnorm", "runif", "rexp")
# Parameter list for rnorm()
rnorm_params <- list(mean = 10)
# Add a min element with value 0 and max element with value 5
runif_params <- list(min = 0, max = 5)
# Add a rate element with value 5
rexp_params <- list(rate = 5)
# Define params for each function
params <- list(
rnorm_params,
runif_params,
rexp_params
)
# Call invoke_map() on f supplying params as the second argument
invoke_map(f, params, n = 5)
# Assign the simulated samples to sims
sims <- invoke_map(f, params, n = 50)
# Use walk() to make a histogram of each element in sims
walk(sims, hist)
breaks_list <- list(
Normal = seq(6,16,0.5),
Uniform = seq(0,5,0.25),
Exp = seq(0,1.5,0.1)
)
# Use walk2() to make histograms with the right breaks
walk2(sims, breaks_list, hist)
# Turn this snippet into find_breaks()
find_breaks <- function(x){
rng <- range(x, na.rm = TRUE)
seq(rng[1], rng[2], length.out = 30)
}
# Call find_breaks() on sims[[1]]
find_breaks(sims[[1]])
# Use map() to iterate find_breaks() over sims: nice_breaks
nice_breaks <- map(sims, find_breaks)
# Use nice_breaks as the second argument to walk2()
walk2(sims, nice_breaks, hist)
# Increase sample size to 1000
sims <- invoke_map(f, params, n = 1000)
# Compute nice_breaks (don't change this)
nice_breaks <- map(sims, find_breaks)
# Create a vector nice_titles
nice_titles <- c("Normal(10, 1)", "Uniform(0, 5)", "Exp(5)")
# Use pwalk() instead of walk2()
pwalk(list(x = sims, breaks = nice_breaks,main = nice_titles), hist, xlab = "")
# Pipe this along to map(), using summary() as .f
sims %>%
walk(hist) %>%
map(summary)
###########################################################################################
# Define troublesome x and y
x <- c(NA, NA, NA)
y <- c( 1, NA, NA, NA)
both_na <- function(x, y) {
# Add stopifnot() to check length of x and y
stopifnot(length(x) == length(y))
sum(is.na(x) & is.na(y))
}
# Call both_na() on x and y
both_na(x, y)
# Define troublesome x and y
x <- c(NA, NA, NA)
y <- c( 1, NA, NA, NA)
both_na <- function(x, y) {
# Replace condition with logical
if (length(x) != length(y)) {
# Replace "Error" with better message
stop("x and y must have the same length", call. = FALSE)
}
sum(is.na(x) & is.na(y))
}
col_classes <- function(df) {
# Assign list output to class_list
class_list <- map(df, class)
# Use map_chr() to extract first element in class_list
map_chr(class_list, 1)
}
# Check that our new function is type consistent
df %>% col_classes() %>% str()
df[3:4] %>% col_classes() %>% str()
df[1:2] %>% col_classes() %>% str()
col_classes <- function(df) {
class_list <- map(df, class)
# Add a check that no element of class_list has length > 1
if (any(map_dbl(class_list, length) > 1)) {
stop("Some columns have more than one class", call. = FALSE)
}
# Use flatten_chr() to return a character vector
flatten_chr(class_list)
}
# Check that our new function is type consistent
df %>% col_classes() %>% str()
df[3:4] %>% col_classes() %>% str()
df[1:2] %>% col_classes() %>% str()
big_x <- function(df, threshold) {
dplyr::filter(df, x > threshold)
}
big_x(diamonds_sub, 7)
# Remove the x column from diamonds
diamonds_sub$x <- NULL
# Create variable x with value 1
x <- 1
# Use big_x() to find rows in diamonds_sub where x > 7
big_x(diamonds_sub, 7)
# Create a threshold column with value 100
diamonds_sub$threshold <- 100
# Use big_x() to find rows in diamonds_sub where x > 7
big_x(diamonds_sub, 7)
big_x <- function(df, threshold) {
# Write a check for x not being in df
if(!"x" %in% names(df)){
stop("df must contain variable called x", call. = FALSE)
}
# Write a check for threshold being in df
if("threshold" %in% names(df)){
stop("df must not contain variable called threshold", call. = FALSE)
}
dplyr::filter(df, x > threshold)
}
# Read in the swimming_pools.csv to pools
pools <- read.csv("swimming_pools.csv")
# Examine the structure of pools
str(pools)
# Change the global stringsAsFactors option to FALSE
options("stringsAsFactors" = FALSE)
# Read in the swimming_pools.csv to pools2
pools2 <- read.csv("swimming_pools.csv")
# Examine the structure of pools2
str(pools2)
# Fit a regression model
fit <- lm(mpg ~ wt, data = mtcars)
# Look at the summary of the model
summary(fit)
# Set the global digits option to 2
options("digits" = 2)
# Take another look at the summary
summary(fit)
| /Basics_of_R/writing_functions.R | no_license | venkatk89/Data_Science_Algorithms | R | false | false | 10,829 | r | # Define ratio() function
ratio <- function(x, y) {
return(x / y)
}
# Call ratio() with arguments 3 and 4
ratio(3,4)
# Rewrite the call to follow best practices
mean(c(1:9, NA),trim = 0.1,na.rm = TRUE)
# 2nd element in tricky_list
typeof(tricky_list[[2]])
# Element called x in tricky_list
typeof(tricky_list[["x"]])
# 2nd element inside the element called x in tricky_list
typeof(tricky_list[['x']][[2]])
# Guess where the regression model is stored
names(tricky_list)
# Use names() and str() on the model element
names(tricky_list$model)
str(tricky_list$model)
# Subset the coefficients element
tricky_list$model$coefficients
# Subset the wt element
tricky_list$model$coefficients[["wt"]]
# Replace the 1:ncol(df) sequence
for (i in seq_along(df)) {
print(median(df[[i]]))
}
# Create new double vector: output
output <- vector("double", ncol(df))
# Alter the loop
for (i in seq_along(df)) {
output[i] <- median(df[[i]])
}
# Print output
print(output)
########################################################################################33
# Define example vectors x and y
x <- c( 1, 2, NA, 3, NA)
y <- c(NA, 3, NA, 3, 4)
# Count how many elements are missing in both x and y
sum(is.na(x) & is.na(y))
# Turn this snippet into a function: both_na()
both_na <- function(x , y){
sum(is.na(x) & is.na(y))
}
replace_missings <- function(x, replacement) {
is_miss <- is.na(x)
x[is_miss] <- replacement
# Rewrite to use message()
message(sum(is_miss)," missings replaced by the value ", replacement)
x
}
# Check your new function by running on df$z
df$z <- replace_missings(df$z, 0)
######################################################################################################
# Initialize output vector
output <- vector("double", ncol(df))
# Fill in the body of the for loop
for (i in seq_along(df)) {
output[i] <- median(df[[i]])
}
# View the result
output
# Function
col_median <- function(df){
output <- vector("double", ncol(df))
for (i in seq_along(df)) {
output[[i]] <- median(df[[i]])
}
return(output)
}
# function with function as parameter
col_summary <- function(df, fun) {
output <- vector("numeric", length(df))
for (i in seq_along(df)) {
output[[i]] <- fun(df[[i]])
}
output
}
col_median(df)
col_summary(df, fun = median) # same results
# Find the column IQRs using col_summary()
col_summary(df, fun = IQR)
# Load the purrr package
library(purrr)
# Use map_dbl() to find column means
map_dbl(df, mean)
# Use map_dbl() to column medians
map_dbl(df, median)
# Find the mean of each column
map_dbl(planes, mean)
# Find the mean of each column, excluding missing values
map_dbl(planes, mean, na.rm = TRUE)
# Find the 5th percentile of each column, excluding missing values
map_dbl(planes, quantile, probs = 0.05, na.rm = TRUE)
# Find the columns that are numeric
map_lgl(df3, is.numeric)
# Find the type of each column
map_chr(df3, typeof)
# Find a summary of each column
map(df3, summary)
# Examine the structure of cyl
str(cyl)
# Extract the first element into four_cyls
four_cyls <- cyl[[1]]
# Fit a linear regression of mpg on wt using four_cyls
lm(four_cyls$mpg ~ four_cyls$wt)
# Rewrite to call an anonymous function
map(cyl, function(df) lm(mpg ~ wt, data = df))
# Rewrite to use the formula shortcut instead
map(cyl, ~lm(mpg ~ wt, data = .))
# Save the result from the previous exercise to the variable models
models <- map(cyl, ~ lm(mpg ~ wt, data = .))
# Use map and coef to get the coefficients for each model: coefs
coefs <- map(models, coef)
# Use string shortcut to extract the wt coefficient
map(coefs, "wt")
# use map_dbl with the numeric shortcut to pull out the second element
map_dbl(coefs, 2)
# Define models (don't change)
models <- mtcars %>%
split(mtcars$cyl) %>%
map(~ lm(mpg ~ wt, data = .))
# Rewrite to be a single command using pipes
models %>%
map(summary) %>%
map_dbl("r.squared")
###########################################################################################
# Create safe_readLines() by passing readLines() to safely()
safe_readLines <- safely(readLines)
# Call safe_readLines() on "http://example.org"
safe_readLines("http://example.org")
# Call safe_readLines() on "http://asdfasdasdkfjlda"
safe_readLines("http://asdfasdasdkfjlda")
# Define safe_readLines()
safe_readLines <- safely(readLines)
# Use the safe_readLines() function with map(): html
html <- map(urls, safe_readLines)
# Call str() on html
str(html)
# Extract the result from one of the successful elements
html$example[["result"]]
# Extract the error from the element that was unsuccessful
html$asdf[["error"]]
# Define safe_readLines() and html
safe_readLines <- safely(readLines)
html <- map(urls, safe_readLines)
# Examine the structure of transpose(html)
str(transpose(html))
# Extract the results: res
res <- transpose(html)[["result"]]
# Extract the errors: errs
errs <- transpose(html)[["error"]]
# Initialize some objects
safe_readLines <- safely(readLines)
html <- map(urls, safe_readLines)
res <- transpose(html)[["result"]]
errs <- transpose(html)[["error"]]
# Create a logical vector is_ok
is_ok <- map_lgl(errs, is_null)
# Extract the successful results
res[is_ok]
# Find the URLs that were unsuccessful
urls[!is_ok]
# Use map_dbl() to find column standard deviations
map_dbl(df, sd)
##
# Create a list n containing the values: 5, 10, and 20
n <- list(5,10,20)
# Call map() on n with rnorm() to simulate three samples
map(n, rnorm)
# Create a list mu containing the values: 1, 5, and 10
mu <- list(1, 5, 10)
# Edit to call map2() on n and mu with rnorm() to simulate three samples
map2(n, mu, rnorm)
# Create a sd list with the values: 0.1, 1 and 0.1
sd <- list(0.1, 1, 0.1)
# Edit this call to pmap() to iterate over the sd list as well
pmap(list(n, mu, sd), rnorm)
# Define list of functions
f <- list("rnorm", "runif", "rexp")
# Parameter list for rnorm()
rnorm_params <- list(mean = 10)
# Add a min element with value 0 and max element with value 5
runif_params <- list(min = 0, max = 5)
# Add a rate element with value 5
rexp_params <- list(rate = 5)
# Define params for each function
params <- list(
rnorm_params,
runif_params,
rexp_params
)
# Call invoke_map() on f supplying params as the second argument
invoke_map(f, params, n = 5)
# Assign the simulated samples to sims
sims <- invoke_map(f, params, n = 50)
# Use walk() to make a histogram of each element in sims
walk(sims, hist)
breaks_list <- list(
Normal = seq(6,16,0.5),
Uniform = seq(0,5,0.25),
Exp = seq(0,1.5,0.1)
)
# Use walk2() to make histograms with the right breaks
walk2(sims, breaks_list, hist)
# Turn this snippet into find_breaks()
find_breaks <- function(x){
rng <- range(x, na.rm = TRUE)
seq(rng[1], rng[2], length.out = 30)
}
# Call find_breaks() on sims[[1]]
find_breaks(sims[[1]])
# Use map() to iterate find_breaks() over sims: nice_breaks
nice_breaks <- map(sims, find_breaks)
# Use nice_breaks as the second argument to walk2()
walk2(sims, nice_breaks, hist)
# Increase sample size to 1000
sims <- invoke_map(f, params, n = 1000)
# Compute nice_breaks (don't change this)
nice_breaks <- map(sims, find_breaks)
# Create a vector nice_titles
nice_titles <- c("Normal(10, 1)", "Uniform(0, 5)", "Exp(5)")
# Use pwalk() instead of walk2()
pwalk(list(x = sims, breaks = nice_breaks,main = nice_titles), hist, xlab = "")
# Pipe this along to map(), using summary() as .f
sims %>%
walk(hist) %>%
map(summary)
###########################################################################################
# Define troublesome x and y
x <- c(NA, NA, NA)
y <- c( 1, NA, NA, NA)
both_na <- function(x, y) {
# Add stopifnot() to check length of x and y
stopifnot(length(x) == length(y))
sum(is.na(x) & is.na(y))
}
# Call both_na() on x and y
both_na(x, y)
# Define troublesome x and y
x <- c(NA, NA, NA)
y <- c( 1, NA, NA, NA)
both_na <- function(x, y) {
# Replace condition with logical
if (length(x) != length(y)) {
# Replace "Error" with better message
stop("x and y must have the same length", call. = FALSE)
}
sum(is.na(x) & is.na(y))
}
col_classes <- function(df) {
# Assign list output to class_list
class_list <- map(df, class)
# Use map_chr() to extract first element in class_list
map_chr(class_list, 1)
}
# Check that our new function is type consistent
df %>% col_classes() %>% str()
df[3:4] %>% col_classes() %>% str()
df[1:2] %>% col_classes() %>% str()
col_classes <- function(df) {
class_list <- map(df, class)
# Add a check that no element of class_list has length > 1
if (any(map_dbl(class_list, length) > 1)) {
stop("Some columns have more than one class", call. = FALSE)
}
# Use flatten_chr() to return a character vector
flatten_chr(class_list)
}
# Check that our new function is type consistent
df %>% col_classes() %>% str()
df[3:4] %>% col_classes() %>% str()
df[1:2] %>% col_classes() %>% str()
big_x <- function(df, threshold) {
dplyr::filter(df, x > threshold)
}
big_x(diamonds_sub, 7)
# Remove the x column from diamonds
diamonds_sub$x <- NULL
# Create variable x with value 1
x <- 1
# Use big_x() to find rows in diamonds_sub where x > 7
big_x(diamonds_sub, 7)
# Create a threshold column with value 100
diamonds_sub$threshold <- 100
# Use big_x() to find rows in diamonds_sub where x > 7
big_x(diamonds_sub, 7)
big_x <- function(df, threshold) {
# Write a check for x not being in df
if(!"x" %in% names(df)){
stop("df must contain variable called x", call. = FALSE)
}
# Write a check for threshold being in df
if("threshold" %in% names(df)){
stop("df must not contain variable called threshold", call. = FALSE)
}
dplyr::filter(df, x > threshold)
}
# Read in the swimming_pools.csv to pools
pools <- read.csv("swimming_pools.csv")
# Examine the structure of pools
str(pools)
# Change the global stringsAsFactors option to FALSE
options("stringsAsFactors" = FALSE)
# Read in the swimming_pools.csv to pools2
pools2 <- read.csv("swimming_pools.csv")
# Examine the structure of pools2
str(pools2)
# Fit a regression model
fit <- lm(mpg ~ wt, data = mtcars)
# Look at the summary of the model
summary(fit)
# Set the global digits option to 2
options("digits" = 2)
# Take another look at the summary
summary(fit)
|
testlist <- list(score = NULL, id = NULL, item_score = integer(0), person_id = c(-50529028L, 1L, 452984892L, 16742144L, 252L, -65211140L, -50529028L, -50593792L, 72448L, 3932415L, 1996488704L, 19995L, 452984836L, 457179136L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::im_booklet_score,testlist)
str(result) | /dexterMST/inst/testfiles/im_booklet_score/AFL_im_booklet_score/im_booklet_score_valgrind_files/1615944777-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 577 | r | testlist <- list(score = NULL, id = NULL, item_score = integer(0), person_id = c(-50529028L, 1L, 452984892L, 16742144L, 252L, -65211140L, -50529028L, -50593792L, 72448L, 3932415L, 1996488704L, 19995L, 452984836L, 457179136L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::im_booklet_score,testlist)
str(result) |
#
# cum-prob-distrib.R, 30 Dec 15
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
plot_layout(2, 2)
brew_col=rainbow(3)
plot(c(0, 1,1, 2,2, 3,3, 4,4, 5), c(0, 0, 0.25,0.25, 0.5,0.5, 0.75,0.75, 1, 1), type="l",
col=brew_col[1],
xlab="x", ylab="")
x_geom=0:10
plot(x_geom, pgeom(x_geom, prob=0.2),
col=brew_col[1],
xlim=range(x_geom), ylim=c(0, 1),
xlab="x", ylab="")
points(x_geom, pgeom(x_geom, prob=0.4), col=brew_col[2])
points(x_geom, pgeom(x_geom, prob=0.6), col=brew_col[3])
legend(x="bottomright", legend=c("prob=0.2", "prob=0.4", "prob=0.6"), bty="n", fill=brew_col, cex=1.3)
x_binom=0:10
plot(x_binom, pbinom(x_binom, size=10, prob=0.2),
bty="n", yaxt="n",
col=brew_col[1],
xlim=range(x_binom), ylim=c(0, 1),
xlab="x", ylab="")
points(x_binom, pbinom(x_binom, size=10, prob=0.4), col=brew_col[2])
points(x_binom, pbinom(x_binom, size=25, prob=0.2), col=brew_col[3])
legend(x="bottomright", legend=c("size=10\nprob=0.2\n", "size=10\nprob=0.4\n", "size=25\nprob=0.2"), bty="n", fill=brew_col, cex=1.3)
lambda_str=function(num) as.expression(substitute(lambda == num))
x_pois=0:10
plot(x_pois, ppois(x_pois, lambda=2),
bty="n", yaxt="n",
col=brew_col[1],
xlim=range(x_pois), ylim=c(0, 1),
xlab="x", ylab="")
points(x_pois, ppois(x_pois, lambda=4), col=brew_col[2])
points(x_pois, ppois(x_pois, lambda=7), col=brew_col[3])
legend(x="bottomright", legend=c(lambda_str(2), lambda_str(4), lambda_str(7)),
bty="n", fill=brew_col, cex=1.3)
| /probability/cum-prob-distrib.R | no_license | vinodrajendran001/ESEUR-code-data | R | false | false | 1,518 | r | #
# cum-prob-distrib.R, 30 Dec 15
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
plot_layout(2, 2)
brew_col=rainbow(3)
plot(c(0, 1,1, 2,2, 3,3, 4,4, 5), c(0, 0, 0.25,0.25, 0.5,0.5, 0.75,0.75, 1, 1), type="l",
col=brew_col[1],
xlab="x", ylab="")
x_geom=0:10
plot(x_geom, pgeom(x_geom, prob=0.2),
col=brew_col[1],
xlim=range(x_geom), ylim=c(0, 1),
xlab="x", ylab="")
points(x_geom, pgeom(x_geom, prob=0.4), col=brew_col[2])
points(x_geom, pgeom(x_geom, prob=0.6), col=brew_col[3])
legend(x="bottomright", legend=c("prob=0.2", "prob=0.4", "prob=0.6"), bty="n", fill=brew_col, cex=1.3)
x_binom=0:10
plot(x_binom, pbinom(x_binom, size=10, prob=0.2),
bty="n", yaxt="n",
col=brew_col[1],
xlim=range(x_binom), ylim=c(0, 1),
xlab="x", ylab="")
points(x_binom, pbinom(x_binom, size=10, prob=0.4), col=brew_col[2])
points(x_binom, pbinom(x_binom, size=25, prob=0.2), col=brew_col[3])
legend(x="bottomright", legend=c("size=10\nprob=0.2\n", "size=10\nprob=0.4\n", "size=25\nprob=0.2"), bty="n", fill=brew_col, cex=1.3)
lambda_str=function(num) as.expression(substitute(lambda == num))
x_pois=0:10
plot(x_pois, ppois(x_pois, lambda=2),
bty="n", yaxt="n",
col=brew_col[1],
xlim=range(x_pois), ylim=c(0, 1),
xlab="x", ylab="")
points(x_pois, ppois(x_pois, lambda=4), col=brew_col[2])
points(x_pois, ppois(x_pois, lambda=7), col=brew_col[3])
legend(x="bottomright", legend=c(lambda_str(2), lambda_str(4), lambda_str(7)),
bty="n", fill=brew_col, cex=1.3)
|
#' Return the description of the stacked bar plot that visualizes
#' the groups of InteractionActions that are linked with the user given compound input.
#' @param chemical The chemical that triggered the InteractionActions
#' @return Text that explains the bar plot structure
get_generalInformationDescription <- function( chemical){
string <- paste0( "In the figure below, all mentioned 'InteractionActions' (IAs) are clustered that are collected by the CTDbase with regard to ",
chemical,
" IAs group interactions into classes and can be combined such that multiple
IAs can be used to describe chemical-gene relations. The different IAs contain one or more interaction terms,
separated by '|' character, from a dictionary of 79 entries. Each of these terms are mostly prefixed by an
attribute such as 'increases', 'decreases', 'affects', yielding IAs in the form of
'increases^expression | affects^activity'. All IAs that are connected with chemical-gene interactions triggered by ",
chemical,
" are displayed in the following plot, where the number of references of each IA is shown in a stacked bar plot.")
return( string)
}
#' Return the description of the sideways bar plot that displays
#' the top40 genes and their respective number of references.
#' @param chemical The chemical that was used as search term
#' @return Text that explains the structure of the plot
get_chemicalGeneDescription <- function( chemical){
string <-"With a focus on the effects triggered by the exposure, the number of references that mention either a deacrease
or increase in expression changes where plotted for the top40 genes (highest number of references). Publications
that merely mention an 'affect' rather than specifying the direction are addtionally visualized at the right side of the figure."
return( string)
}
#' Return the description of the bar plot illustrating the most
#' referenced diseases that were linked with a certain compound.
#' @param chemical The chemical that was used as search term
#' @return Text that explains the structure of the plot
get_diseasesDescription <- function( chemical){
string <- paste0("The following figure summarizes those diseases that have the largest reference counts and/or inference score associating a connection between ", chemical,
" and the disease. Each of the CTDbase collected associations is either curated or inferred (via a curated gene interaction)",
" The unique list of both top40 diseases with the most reference counts AND the top40 list of diseases with the highest inference score,
is displayed as bar plot. The bar length indicates the actual number of references while the color scheme indicates the inference score.
In brief, the inference score reflects the degree of similarity between CTD chemical–gene–disease networks and a similar scale-free random network.
The higher the score, the more likely the inference network has atypical connectivity. For more information about inference scores, please see:
King et al, 2012, PLoS One, 'Ranking Transitive Chemical-Disease Inferences Using Local Network Topology in the Comparative Toxicogenomics Database.'")
return( string)
}
#' Return the description of the bar plot illustrating the most
#' enriched KEGG pathways.
#' @param chemical The chemical that was used as search term
#' @return Text that explains the structure of the plot
get_pathwaysDescription <- function( chemical){
string <- paste0( "Finally, the top40 KEGG and REACTOME represented pathways, based on their adjusted P-Values are shown. The length of each bar
reflects the ration between enriched genes and the total number of annotated genes in this respective pathway. Based on the CTDbase,
a pathway is considered 'enriched' if the proportion of genes annotated to it in a test set is significantly larger than the
proportion of all genes annotated to it in the genome.",
" The color scale of each bar reflects the amount of annotated genes in each pathway. The actual adjusted P-Value is stated in the
pathway label on the y axis.")
return( string)
} | /shiny_output_texts.R | no_license | yigbt/MOD-Finder | R | false | false | 4,463 | r | #' Return the description of the stacked bar plot that visualizes
#' the groups of InteractionActions that are linked with the user given compound input.
#' @param chemical The chemical that triggered the InteractionActions
#' @return Text that explains the bar plot structure
get_generalInformationDescription <- function( chemical){
string <- paste0( "In the figure below, all mentioned 'InteractionActions' (IAs) are clustered that are collected by the CTDbase with regard to ",
chemical,
" IAs group interactions into classes and can be combined such that multiple
IAs can be used to describe chemical-gene relations. The different IAs contain one or more interaction terms,
separated by '|' character, from a dictionary of 79 entries. Each of these terms are mostly prefixed by an
attribute such as 'increases', 'decreases', 'affects', yielding IAs in the form of
'increases^expression | affects^activity'. All IAs that are connected with chemical-gene interactions triggered by ",
chemical,
" are displayed in the following plot, where the number of references of each IA is shown in a stacked bar plot.")
return( string)
}
#' Return the description of the sideways bar plot that displays
#' the top40 genes and their respective number of references.
#' @param chemical The chemical that was used as search term
#' @return Text that explains the structure of the plot
get_chemicalGeneDescription <- function( chemical){
string <-"With a focus on the effects triggered by the exposure, the number of references that mention either a deacrease
or increase in expression changes where plotted for the top40 genes (highest number of references). Publications
that merely mention an 'affect' rather than specifying the direction are addtionally visualized at the right side of the figure."
return( string)
}
#' Return the description of the bar plot illustrating the most
#' referenced diseases that were linked with a certain compound.
#' @param chemical The chemical that was used as search term
#' @return Text that explains the structure of the plot
get_diseasesDescription <- function( chemical){
string <- paste0("The following figure summarizes those diseases that have the largest reference counts and/or inference score associating a connection between ", chemical,
" and the disease. Each of the CTDbase collected associations is either curated or inferred (via a curated gene interaction)",
" The unique list of both top40 diseases with the most reference counts AND the top40 list of diseases with the highest inference score,
is displayed as bar plot. The bar length indicates the actual number of references while the color scheme indicates the inference score.
In brief, the inference score reflects the degree of similarity between CTD chemical–gene–disease networks and a similar scale-free random network.
The higher the score, the more likely the inference network has atypical connectivity. For more information about inference scores, please see:
King et al, 2012, PLoS One, 'Ranking Transitive Chemical-Disease Inferences Using Local Network Topology in the Comparative Toxicogenomics Database.'")
return( string)
}
#' Return the description of the bar plot illustrating the most
#' enriched KEGG pathways.
#' @param chemical The chemical that was used as search term
#' @return Text that explains the structure of the plot
get_pathwaysDescription <- function( chemical){
string <- paste0( "Finally, the top40 KEGG and REACTOME represented pathways, based on their adjusted P-Values are shown. The length of each bar
reflects the ration between enriched genes and the total number of annotated genes in this respective pathway. Based on the CTDbase,
a pathway is considered 'enriched' if the proportion of genes annotated to it in a test set is significantly larger than the
proportion of all genes annotated to it in the genome.",
" The color scale of each bar reflects the amount of annotated genes in each pathway. The actual adjusted P-Value is stated in the
pathway label on the y axis.")
return( string)
} |
library(data.table)
path = commandArgs(trailingOnly = TRUE)[1]
df = fread(path, data.table=F)
# remove patients with only 1 admission
tb = table(df$PID)
pids = names(tb[tb==1])
df = df[!df$PID %in% pids,]
# patients age aggregation
ageagg = aggregate(agyradm~PID, FUN=min, data=df)
# gender aggregation
genagg = aggregate(gender~PID, FUN=function(vals) vals[1], data=df)
# race aggregation
racagg = aggregate(race_grp~PID, FUN=function(vals) vals[1], data=df)
# plot heatmap
library(gplots)
tb = table(df$cost_b, df$LOS_b)
tb = tb[order(rownames(tb), decreasing=T),]
heatmap.2(tb, dendrogram='none', Rowv='none', Colv='none', cexRow=2, cexCol=2, col=grey(16:1/16), trace='none', xlab='LOS in Buckets', ylab='Cost in Buckets')
# plot seq-len distribution
plot(log10(table(table(df$PID))), ylab='Number of Patients(Log10)', xlab='Number of Admissions', cex.lab=1.5, cex.axis=1.5)
# plot cost distribution
myhist = hist(df$cost/100000, breaks=500)
non = which(myhist$counts == 0)
myhist$counts = myhist$counts[-non]
myhist$breaks = myhist$breaks[-non]
plot(y=log10(myhist$counts), x=myhist$breaks[-1], type='h', xlab='Admission Cost(100K)', ylab='Number of Patients(Log10)', cex.lab=1.5, cex.axis=1.5)
| /bcb/dataprocess.R | no_license | darrenhon/projects | R | false | false | 1,237 | r | library(data.table)
path = commandArgs(trailingOnly = TRUE)[1]
df = fread(path, data.table=F)
# remove patients with only 1 admission
tb = table(df$PID)
pids = names(tb[tb==1])
df = df[!df$PID %in% pids,]
# patients age aggregation
ageagg = aggregate(agyradm~PID, FUN=min, data=df)
# gender aggregation
genagg = aggregate(gender~PID, FUN=function(vals) vals[1], data=df)
# race aggregation
racagg = aggregate(race_grp~PID, FUN=function(vals) vals[1], data=df)
# plot heatmap
library(gplots)
tb = table(df$cost_b, df$LOS_b)
tb = tb[order(rownames(tb), decreasing=T),]
heatmap.2(tb, dendrogram='none', Rowv='none', Colv='none', cexRow=2, cexCol=2, col=grey(16:1/16), trace='none', xlab='LOS in Buckets', ylab='Cost in Buckets')
# plot seq-len distribution
plot(log10(table(table(df$PID))), ylab='Number of Patients(Log10)', xlab='Number of Admissions', cex.lab=1.5, cex.axis=1.5)
# plot cost distribution
myhist = hist(df$cost/100000, breaks=500)
non = which(myhist$counts == 0)
myhist$counts = myhist$counts[-non]
myhist$breaks = myhist$breaks[-non]
plot(y=log10(myhist$counts), x=myhist$breaks[-1], type='h', xlab='Admission Cost(100K)', ylab='Number of Patients(Log10)', cex.lab=1.5, cex.axis=1.5)
|
## These functions allow you to cache the inverse of a matrix
## and access that inverse from other functions
## makeCacheMatrix() provides a set of functions for storing
## and access the initial matrix as well as the inverse once
## the inverse has been solved vie cacheSolve() or via direct
## assignment
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve() accepts a makeCacheMatric() object and either
## solves, stores and returns a newly computed inverse or simply
## returns an inverse if already cached
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
| /cachematrix.R | no_license | apeletz512/ProgrammingAssignment2 | R | false | false | 914 | r | ## These functions allow you to cache the inverse of a matrix
## and access that inverse from other functions
## makeCacheMatrix() provides a set of functions for storing
## and access the initial matrix as well as the inverse once
## the inverse has been solved vie cacheSolve() or via direct
## assignment
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve() accepts a makeCacheMatric() object and either
## solves, stores and returns a newly computed inverse or simply
## returns an inverse if already cached
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
# Title : TODO
# Objective : TODO
# Created by: Chris
# Created on: 2019/07/16
dataset = read.csv("50_Startups.csv")
# Encoding catergorical data
dataset$State = factor(dataset$State,
levels = c("New York", "California", "Florida"),
labels = c(1, 2, 3))
# Split dataset into test set and training set
#install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Profit, SplitRatio = 0.8)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# regressor = lm(formula=Profit ~ R.D.Spend + Administration + Marketing.Spend + State, data = training_set)
regressor = lm(formula= Profit ~ ., data = training_set)
y_pred = predict(regressor, test_set)
# Building backward elimination
regressor = lm(formula=Profit ~ R.D.Spend + Administration + Marketing.Spend + State, data = dataset)
backwardElimination <- function(x, sl) {
numVars = length(x)
for (i in c(1:numVars)){
regressor = lm(formula = Profit ~ ., data = x)
maxVar = max(coef(summary(regressor))[c(2:numVars), "Pr(>|t|)"])
if (maxVar > sl){
j = which(coef(summary(regressor))[c(2:numVars), "Pr(>|t|)"] == maxVar)
x = x[, -j]
}
numVars = numVars - 1
}
return(summary(regressor))
}
SL = 0.05
dataset = dataset[, c(1,2,3,4,5)]
backwardElimination(training_set, SL)
| /Regression/Multiple Linear Regression/MultipleLinearRegression.R | permissive | Maelstrom6/MachineLearning3 | R | false | false | 1,384 | r | # Title : TODO
# Objective : TODO
# Created by: Chris
# Created on: 2019/07/16
dataset = read.csv("50_Startups.csv")
# Encoding catergorical data
dataset$State = factor(dataset$State,
levels = c("New York", "California", "Florida"),
labels = c(1, 2, 3))
# Split dataset into test set and training set
#install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Profit, SplitRatio = 0.8)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# regressor = lm(formula=Profit ~ R.D.Spend + Administration + Marketing.Spend + State, data = training_set)
regressor = lm(formula= Profit ~ ., data = training_set)
y_pred = predict(regressor, test_set)
# Building backward elimination
regressor = lm(formula=Profit ~ R.D.Spend + Administration + Marketing.Spend + State, data = dataset)
backwardElimination <- function(x, sl) {
numVars = length(x)
for (i in c(1:numVars)){
regressor = lm(formula = Profit ~ ., data = x)
maxVar = max(coef(summary(regressor))[c(2:numVars), "Pr(>|t|)"])
if (maxVar > sl){
j = which(coef(summary(regressor))[c(2:numVars), "Pr(>|t|)"] == maxVar)
x = x[, -j]
}
numVars = numVars - 1
}
return(summary(regressor))
}
SL = 0.05
dataset = dataset[, c(1,2,3,4,5)]
backwardElimination(training_set, SL)
|
library(ggplot2)
library(magrittr)
inverse <- function(f, lower, upper){
function(y){
uniroot(function(x){f(x) - y}, lower = lower, upper = upper, tol=1e-3)[1]
}
}
# ch2
# problem 1
# Posterior inference: suppose you have a Beta(4, 4) prior distribution on the probability θ
# that a coin will yield a ‘head’ when spun in a specified manner. The coin is independently
# spun ten times, and ‘heads’ appear fewer than 3 times. You are not told how many heads
# were seen, only that the number is less than 3. Calculate your exact posterior density
# (up to a proportionality constant) for θ and sketch it.
#
# Answer: Average the beta distributions for every possible world. The different worlds...
# might have different weights on them? I think they do? Since there are more ways to
# flip three heads than ways to flip one head - but I don't want to think about that
# so I'm gonna skip it.
xs <- seq(0, 1, 0.01)
priors <- list(a = 4, b = 4)
possible_heads <- 0:3
flips <- 10
possible_distributions <- lapply(
possible_heads,
function(nheads) dbeta(xs, priors$a + nheads,
priors$b + flips - nheads))
average_distribution <- (
Reduce(function(d1, d2) d1 + d2, possible_distributions) /
length(possible_distributions))
plot(xs, possible_distributions[[1]], type = 'l', col = 'gray')
for (d in possible_distributions) {
lines(xs, d, col = 'gray')
}
lines(xs, average_distribution, col = 'red')
# problem 2
# Predictive distributions: consider two coins, C1 and C2, with the following characteristics:
# Pr(heads|C1)=0.6 and Pr(heads|C2)=0.4. Choose one of the coins at random and
# imagine spinning it repeatedly. Given that the first two spins from the chosen coin are
# tails, what is the expectation of the number of additional spins until a head shows up?
#
# Answer: Two parts to this one. First: find the posterior after the two Tails spins.
# Second: compute the mean of the .. predictive distribution of that posterior, I think?
# P(C1|tails) = (P(tails|C1) * P(C1) / a) = 0.4 * 0.5 / a, where
# a =([P(tails|C1) * P(C1) + P(tails|C2) * P(C2)]) = 0.4 * 0.5 + 0.6 * 0.5 =
# 4/10 * 1/2 + 6/10 * 1/2 = 4/20 + 6/20 = 10/20 = 1/2, so we have
# the full expression (4/10 * 5/10) / 1/2 = (1/5) / (1/2) = 2/5.
# P(C1|T,T) = P(tails|C1) * P(C1|tails) / b = 4/10 * 2/5 / b, where b =
# P(tails|C1) * P(C1|tails) + P(tails|C2) * P(C2|tails) =
# 4/10 * 2/5 + 6/10 * 3/5 = 8/50 + 18/50 = 26/50 = b, so the full expression
# is 4/10 * 2/5 / 26/50 = 8/50 * 50/26 = 8/26 = 4/13!
# So now... we think it's a 4/13 chance that it's P(heads|all the above) = 0.6,
# and a 9/13 chance it's P(heads|all the above = 0.4). We can just weight these worlds,
# so our probability of heads on any flip is 4/13 * 6/10 + 9/13 * 4/10 = (24 + 36)/130 =
# 60/130.
# So we want... the number of flips so ... oh!! use exponential distribution maybe!
# the rate is 60/130!
# Or... this is where hierarchical modeling comes in.
# We want to simulate a draw from the 4/13 vs. 9/13, then the draw from the proper exponential,
# many times. Maybe?
outer_ndraws <- 100000
outer_draws <- runif(outer_ndraws) < 4/13 # TRUE means "picked C1"
coin_probas <- sapply(outer_draws, function(draw) if (draw) 0.6 else 0.4)
mean(sapply(coin_probas, function(r) rexp(1, r)))
# problem 3
times <- 1000
xs <- seq(0, 0.3, 0.001)
mu <- 1/6
variance <- sqrt(times * 1/6 * 5/6) / times
plot(xs, dnorm(xs, mu, variance), type='l')
x_locations <- c(0.05, 0.25, 0.50, 0.75, 0.95)
print(qnorm(x_locations, mu, variance))
# problem 4a
xs <- seq(0, 1, 0.001)
means <- c(1/12, 1/6, 1/4)
weights <- c(0.25, 0.5, 0.25)
distributions <- Map(function(mu, weight) weight * dnorm(xs, mu, variance) / times,
means, weights)
mixed <- Reduce(`+`, distributions)
plot(xs, mixed, type = 'l')
cumul_curve <- cumsum(mixed)
x_locations <- c(0.1, 0.2, 0.25, 0.3, 0.4)
quantiles <- quantile(cumul_curve, x_locations)
plot(xs, cumul_curve, type = 'l', xlim = c(0, 0.4))
for (i in seq_along(x_locations)) {
x_loc <- x_locations[i]
q <- quantiles[i]
abline(v = x_loc, h = q, col = 'gray')
}
# problem 8c
compute_posterior <- function(n) {
known_sd <- sigma <- 20
prior_mean <- mu0 <- 180
prior_sd <- tau0 <- 40
data_mean <- ybar <- 150
posterion_mean_denom <- (1 / tau0^2) + (n / sigma^2)
posterion_mean_numer <- (mu0 / tau0^2) + ((ybar * n) / sigma^2)
posterior_mean <- mu_n <- posterion_mean_numer / posterion_mean_denom
posterior_sd_denom <- (1 / tau0^2) + (n / sigma^2)
posterior_sd <- tau_n <- 1 / posterior_sd_denom
list(mu = posterior_mean, sigma = posterior_sd)
}
get_posterior_interval <- function(n) {
posterior <- compute_posterior(n)
posterior %$% qnorm(c(0.05, 0.95), mu, sigma)
}
n <- 100
results <- list(posterior = compute_posterior(n), interval = get_posterior_interval(n))
xs <- seq(0, 350, 1)
distr <- results %$% posterior %$% dnorm(xs, mu, sigma)
plot(xs, distr, type = 'l')
abline(v=results$interval[[1]])
abline(v=results$interval[[2]])
results
# problem 9
alpha <- 1
beta <- 2/3
xs <- seq(0, 1, 0.0001)
dens <- dbeta(xs, alpha, beta)
points <- rbeta(1000, alpha, beta)
mean(points)
plot(xs, dens, type = 'l')
posterior <- list(a = 651, b = 350.67)
dens <- posterior %$% dbeta(xs, a, b)
rvec <- posterior %$% rbeta(1000, a, b)
mean(rvec)
sd(rvec)
plot(xs, dens, type='l')
# problem 10
get_posterior_mean <- function(xs, posterior)
sapply(xs, function(n) n * posterior(n)) %>% sum()
prior <- function(N) {
(1/100) * (99/100)^(N - 1)
}
xs <- 1:500
dens <- sapply(xs, prior)
plot(xs, dens, type='l')
number_seen <- 5
p_data_given_n <- function(n) if (n < number_seen) 0 else 1 / n
prior_on_n <- function(n) (1/100) * (99/100)^(n-1)
posterior <- function(n) p_data_given_n(n) * prior_on_n(n)
max_n <- 1000
xs <- 0:max_n
dens <- sapply(xs, posterior)
c <- 1 / sum(dens)
x_range <- number_seen:max_n
posterior_mean <- sapply(x_range, function(n) c * n * posterior(n)) %>% sum()
posterior_mean
posterior_variance <- x_range %>%
sapply(function(n) ((n - posterior_mean)^2) * (c/n) * (1/100) * (99/100)^n) %>%
sum()
posterior_sd <- sqrt(posterior_variance)
posterior_sd
plot(xs, dens, type='l')
abline(v=posterior_mean, col='blue')
# 10c
get_posterior_mean <- function(xs, posterior) {
sapply(xs, function(n) n * posterior(n)) %>% sum()
}
number_seen <- 203
prior_max <- 1000
xs <- 1:prior_max
# uniform prior
prior <- Vectorize(function(n) if (n <= prior_max) 1/prior_max else 0)
# geometric prior
prior <- Vectorize(function(n) (1/100) * (99/100)^(n - 1))
## These two are broken..
# gaussian prior
# prior <- Vectorize(
# function(n) {
# dens <- dnorm(n, 500, 300)
# dens[dens < 0] <- 0
# function(n) dens[n]
# }())
# # exponential prior
# prior <- Vectorize(function(n) {
# dens <- dexp(xs, 0.2)
# function(n) dens[n]
# })
# plot(xs, prior(xs), type='l')
p_data_given_n <- Vectorize(function(n)
if (n >= number_seen) 1/n
else 0
)
unnormalized_posterior <- Vectorize(function(n)
prior(n) * p_data_given_n(n)
)
c <- sum(unnormalized_posterior(xs))
posterior_fn <- Vectorize(function(n) unnormalized_posterior(n) / c)
posterior_vec <- posterior_fn(xs)
stopifnot(sum(posterior_vec) == 1)
posterior_mean <- get_posterior_mean(xs, posterior_fn)
plot_settings <- list(fn = plot, color = 'black')
plot_settings <- list(fn = lines, color = 'red')
plot_settings %$% fn(xs, posterior_vec, col=color, type='l')
plot_settings %$% abline(v=posterior_mean, col=color)
posterior_mean
plot_settings %$% lines(xs, prior(xs), type='l', col=color)
abline(v=get_posterior_mean(xs, prior))
# OK so woooo the answer to the taxicab problem definitely depends
# a lot on the prior! Which makes sense cuz one datum but still!
# We could make a fun site where you can play with this problem <hug emoji>
# with different priors, and drawing taxis seen, and, and...
# or a blog post! We can use the evolving-distribution gif style
# from the CLT post. One main insight is that you're moving probability
# from < 203 to the LHS of 203+ with the update.
# Does seeing 503, then 203, differ from seeing 503 alone? Probably no?
#####
# 11
y <- c(43, 44, 45, 46.5, 47.5)
prior <- Vectorize(
function(theta) if (theta >= 0 && theta <= 100) 1/theta else 0
)
dens <- function (y, thetas) {
sapply(thetas, function(th) prod(dcauchy(y, th, 1)))
}
dtheta <- .01
theta <- seq(0, 100, dtheta)
unnormalized_posterior <- dens(y, theta)
normalized_posterior <- unnormalized_posterior / sum(unnormalized_posterior)
plot(theta, normalized_posterior, type='l',
ylim=c(0, 1.1 * max(normalized_posterior)))
sum(theta * normalized_posterior)
?dcauchy
# 11b
posterior_draws <- sample(theta, 1000, replace=TRUE, prob=normalized_posterior)
hist(posterior_draws, breaks=50)
# 11c
# So to get the posterior predictive distribution for a parameter theta...
# we take many draws of theta from the posterior... and generate a random
# number from the "likelihood"... is what the website said... so does
# that mean the cauchy is the likelihood here? Hmh.
# WRITE THIS DOWN THOOO :) here's getting closer to knowing how to
# draw from predictive distributions.
posterior_predictive <- rcauchy(length(posterior_draws), posterior_draws, 1)
hist(posterior_predictive, breaks=100)
# 13a
compute_gamma_parameters <- function(mu, var) {
theta <- ((mu + var) / mu) - 1
k <- mu / theta
beta <- 1 / theta
alpha <- k
list(alpha = alpha, beta = beta)
}
data_str <- "1976 24 734 0.19
1977 25 516 0.12
1978 31 754 0.15
1979 31 877 0.16
1980 22 814 0.14
1981 21 362 0.06
1982 26 764 0.13
1983 20 809 0.13
1984 16 223 0.03
1985 22 1066 0.15"
dat <- read.table(text=data_str, sep = ' ',
col.names = c('year', 'accidents', 'deaths', 'death_rate'))
# P(H | data, X) =~ P(data|X) * P(X)
variable <- list(name='accidents', xs=0:200, prior_mean=30, prior_stddev=27.5)
variable <- list(name='deaths', xs=0:4000, prior_mean=1000, prior_stddev=500)
y <- dat[,variable$name]
gamma_params <- variable %$% compute_gamma_parameters(prior_mean, prior_stddev^2)
prior <- gamma_params %$% dgamma(variable$xs, shape=alpha, scale=1/beta)
plot(variable$xs, prior, type = 'l')
# By conjugacy - a gamma prior with a poisson likelihood
# gives a gamma posterior with parameters given by these sums.
posterior_alpha <- gamma_params %$% {alpha + sum(y)}
posterior_beta <- gamma_params %$% beta + length(y)
posterior <- dgamma(variable$xs, shape=posterior_alpha, scale=1/posterior_beta)
posterior_draws <- sample(variable$xs, 100000, replace=TRUE, prob=posterior)
# here... we wanna find... the probability of each possible new data point,
# given the location parameter.
# maybe it's: draw a location from the posterior, then create a new
# distribution of the same form as the posterior but with a new location,
# then draw a data point from it? And do this many times?
#
# OK!
# The posterior here is the posterior _over the location parameter_.
# So I need the probability of the predicted point y` for a bunch of y`s
# generated from distributions with locations drawn from that posterior.
# So I did it right!
pospred_ndraws <- 100000
locations <- sample(variable$xs, pospred_ndraws, replace=TRUE, prob=posterior)
# The mean is gamma-distributed, but the data are poisson distributed.
# So when doing simulated data draws, use the poisson - because we're drawing
# from the data distribution!
posterior_predictive <- sapply(
locations,
function(location) {
dens <- rpois(1, location)
dens
})
# for comparison
draws_from_posterior <- rgamma(
pospred_ndraws, shape=posterior_alpha, scale=1/posterior_beta)
hist(draws_from_posterior, breaks=80)
posterior[posterior == Inf] <- 0
posterior %<>% {. / sum(.)}
alpha = 0.4
ggplot() +
geom_histogram(aes(x=draws_from_posterior), bins=100, fill = 'red', alpha=alpha) +
geom_histogram(aes(x=posterior_predictive), bins=30, fill = 'blue', alpha=0.2) +
theme_bw()
# these two are same!
qgamma(c(0.025, 0.975), shape=posterior_alpha, scale=1/posterior_beta)
quantile(locations, probs = c(0.025, 0.975))
# and the wider predictive distribution:
quantile(posterior_predictive, probs = c(0.025, 0.975))
# Wow... so if you do have a posterior and want to predict from it,
# you need to do this extra thing! Don't accidentally talk about
# location parameters when you mean to talk about distributions!
sum(variable$xs * prior)
sum(variable$xs * posterior)
mean(y)
# 13b
# getting miles_flown algebraically:
# death_rate = (deaths / miles_flown) * 1e8
# miles_flown = (deaths / death_rate) * 1e8
dat %<>% dplyr::mutate(miles_flown = (deaths / death_rate) * 1e8)
variable <- list(name = "rate", xs = seq(0, 0.01, 0.0000001))
prior <- list(alpha = 0, beta = 0)
posterior_alpha <- (prior$alpha + sum(y))
posterior_beta <- (prior$beta + sum(dat$miles_flown))
miles_flown_to_predict <- 8e10
n_posterior_draws <- 10000
posterior_draws <- rgamma(n_posterior_draws,
shape=posterior_alpha, scale=1/posterior_beta)
# Unlike the next problem, here we are plugging in a single value for
# the explanatory variable - miles flown - and seeing what the prediction
# of deaths for that number of miles flown is. And this number is hiiiigh!
# So we get a distribution with a higher mean.
posterior_predictive <- rpois(1000, posterior_draws * miles_flown_to_predict)
hist(posterior_predictive)
alpha = 0.4
ggplot() +
geom_histogram(aes(x=posterior_draws * miles_flown_to_predict),
bins=100, fill = 'red', alpha=alpha) +
geom_histogram(aes(x=posterior_predictive), bins=30, fill = 'blue', alpha=0.2) +
theme_bw()
quantile(posterior_predictive, probs=c(0.025, 0.975))
# 20
# a
# Average over all the posteriors for y >= 100.
xs <- 0:2000
prior <- exp()
#guh
# 21
# Estimate the percentage of the (adult) population in each state (excluding
# Alaska, Hawaii, and the District of Columbia) who label themselves as ‘very liberal’.
remove_names <- c('alaska', 'hawaii', 'washington dc')
xs <- seq(0, 1, 0.0001)
survey_df <- foreign::read.dta('data/pew_research_center_june_elect_wknd_data.dta')
variable <- "very_liberal"
survey_df[,variable] <- survey_df['ideo'] == 'very liberal'
states_responses <- survey_df %>%
{base::split(., .[,"state"])}
states_responses %<>% {.[!(names(.) %in% remove_names)]}
yeses <- sapply(states_responses,
function(dat) sum(dat[,variable], na.rm = TRUE))
noes <- sapply(states_responses,
function(dat) sum(!dat[,variable], na.rm = TRUE))
respondents <- yeses + noes
data_df <- tibble::tibble(
state = names(states_responses),
respondents = respondents,
prop = yeses / (yeses + noes))
prior_alpha <- 3
prior_beta <- 20
prior <- dbeta(xs, prior_alpha, prior_beta)
plot(xs, prior, type='l')
posteriors <- Map(
function(nyes, nno) dbeta(xs, prior_alpha + nyes, prior_beta + nno) / length(xs),
yeses, noes)
intervals <- Map(
function(nyes, nno)
# bayestestR::hdi(posterior, ci=0.95),
qbeta(c(0.025, 0.975), prior_alpha + nyes, prior_beta + nno),
yeses, noes)
means <- sapply(posteriors, function(p) sum(xs * p))
lbs <- sapply(intervals, function(tuple) tuple[1])
ubs <- sapply(intervals, function(tuple) tuple[2])
intervals_df <- tibble::tibble(state = names(intervals),
lb = lbs, ub = ubs, mid = means)
ordering <- intervals_df %>% dplyr::arrange(mid) %$% state
intervals_df$state %<>% factor(levels = rev(ordering))
ggplot(intervals_df, aes(y = state)) +
geom_segment(aes(yend = state, x = lb, xend = ub)) +
geom_point(aes(x = mid)) +
theme_bw()
results_df <- read.csv('data/2008ElectionResult.csv')
respondents_df <- survey_df %>% dplyr::select(state, )
states_df <- tibble::tibble(state = tolower(state.name), state_code = state.abb)
results_df$state %<>% tolower()
together_df <- intervals_df %>%
dplyr::inner_join(results_df, by = 'state') %>%
dplyr::inner_join(states_df, by = 'state') %>%
dplyr::inner_join(data_df, by = 'state')
ggplot(together_df, aes(x = vote_Obama_pct)) +
geom_segment(aes(xend = vote_Obama_pct, y = lb, yend = ub),
color='blue', alpha = 0.10) +
geom_text(aes(y = mid, label = state_code), color = 'blue') +
geom_text(aes(y = prop, label = state_code), color = 'red') +
theme_bw() +
labs(y = "% very liberal") +
theme(panel.grid = element_blank())
| /R/ch2.R | no_license | allswellthatsmaxwell/book_exercises | R | false | false | 16,336 | r | library(ggplot2)
library(magrittr)
inverse <- function(f, lower, upper){
function(y){
uniroot(function(x){f(x) - y}, lower = lower, upper = upper, tol=1e-3)[1]
}
}
# ch2
# problem 1
# Posterior inference: suppose you have a Beta(4, 4) prior distribution on the probability θ
# that a coin will yield a ‘head’ when spun in a specified manner. The coin is independently
# spun ten times, and ‘heads’ appear fewer than 3 times. You are not told how many heads
# were seen, only that the number is less than 3. Calculate your exact posterior density
# (up to a proportionality constant) for θ and sketch it.
#
# Answer: Average the beta distributions for every possible world. The different worlds...
# might have different weights on them? I think they do? Since there are more ways to
# flip three heads than ways to flip one head - but I don't want to think about that
# so I'm gonna skip it.
xs <- seq(0, 1, 0.01)
priors <- list(a = 4, b = 4)
possible_heads <- 0:3
flips <- 10
possible_distributions <- lapply(
possible_heads,
function(nheads) dbeta(xs, priors$a + nheads,
priors$b + flips - nheads))
average_distribution <- (
Reduce(function(d1, d2) d1 + d2, possible_distributions) /
length(possible_distributions))
plot(xs, possible_distributions[[1]], type = 'l', col = 'gray')
for (d in possible_distributions) {
lines(xs, d, col = 'gray')
}
lines(xs, average_distribution, col = 'red')
# problem 2
# Predictive distributions: consider two coins, C1 and C2, with the following characteristics:
# Pr(heads|C1)=0.6 and Pr(heads|C2)=0.4. Choose one of the coins at random and
# imagine spinning it repeatedly. Given that the first two spins from the chosen coin are
# tails, what is the expectation of the number of additional spins until a head shows up?
#
# Answer: Two parts to this one. First: find the posterior after the two Tails spins.
# Second: compute the mean of the .. predictive distribution of that posterior, I think?
# P(C1|tails) = (P(tails|C1) * P(C1) / a) = 0.4 * 0.5 / a, where
# a =([P(tails|C1) * P(C1) + P(tails|C2) * P(C2)]) = 0.4 * 0.5 + 0.6 * 0.5 =
# 4/10 * 1/2 + 6/10 * 1/2 = 4/20 + 6/20 = 10/20 = 1/2, so we have
# the full expression (4/10 * 5/10) / 1/2 = (1/5) / (1/2) = 2/5.
# P(C1|T,T) = P(tails|C1) * P(C1|tails) / b = 4/10 * 2/5 / b, where b =
# P(tails|C1) * P(C1|tails) + P(tails|C2) * P(C2|tails) =
# 4/10 * 2/5 + 6/10 * 3/5 = 8/50 + 18/50 = 26/50 = b, so the full expression
# is 4/10 * 2/5 / 26/50 = 8/50 * 50/26 = 8/26 = 4/13!
# So now... we think it's a 4/13 chance that it's P(heads|all the above) = 0.6,
# and a 9/13 chance it's P(heads|all the above = 0.4). We can just weight these worlds,
# so our probability of heads on any flip is 4/13 * 6/10 + 9/13 * 4/10 = (24 + 36)/130 =
# 60/130.
# So we want... the number of flips so ... oh!! use exponential distribution maybe!
# the rate is 60/130!
# Or... this is where hierarchical modeling comes in.
# We want to simulate a draw from the 4/13 vs. 9/13, then the draw from the proper exponential,
# many times. Maybe?
outer_ndraws <- 100000
outer_draws <- runif(outer_ndraws) < 4/13 # TRUE means "picked C1"
coin_probas <- sapply(outer_draws, function(draw) if (draw) 0.6 else 0.4)
mean(sapply(coin_probas, function(r) rexp(1, r)))
# problem 3
times <- 1000
xs <- seq(0, 0.3, 0.001)
mu <- 1/6
variance <- sqrt(times * 1/6 * 5/6) / times
plot(xs, dnorm(xs, mu, variance), type='l')
x_locations <- c(0.05, 0.25, 0.50, 0.75, 0.95)
print(qnorm(x_locations, mu, variance))
# problem 4a
xs <- seq(0, 1, 0.001)
means <- c(1/12, 1/6, 1/4)
weights <- c(0.25, 0.5, 0.25)
distributions <- Map(function(mu, weight) weight * dnorm(xs, mu, variance) / times,
means, weights)
mixed <- Reduce(`+`, distributions)
plot(xs, mixed, type = 'l')
cumul_curve <- cumsum(mixed)
x_locations <- c(0.1, 0.2, 0.25, 0.3, 0.4)
quantiles <- quantile(cumul_curve, x_locations)
plot(xs, cumul_curve, type = 'l', xlim = c(0, 0.4))
for (i in seq_along(x_locations)) {
x_loc <- x_locations[i]
q <- quantiles[i]
abline(v = x_loc, h = q, col = 'gray')
}
# problem 8c
compute_posterior <- function(n) {
known_sd <- sigma <- 20
prior_mean <- mu0 <- 180
prior_sd <- tau0 <- 40
data_mean <- ybar <- 150
posterion_mean_denom <- (1 / tau0^2) + (n / sigma^2)
posterion_mean_numer <- (mu0 / tau0^2) + ((ybar * n) / sigma^2)
posterior_mean <- mu_n <- posterion_mean_numer / posterion_mean_denom
posterior_sd_denom <- (1 / tau0^2) + (n / sigma^2)
posterior_sd <- tau_n <- 1 / posterior_sd_denom
list(mu = posterior_mean, sigma = posterior_sd)
}
get_posterior_interval <- function(n) {
posterior <- compute_posterior(n)
posterior %$% qnorm(c(0.05, 0.95), mu, sigma)
}
n <- 100
results <- list(posterior = compute_posterior(n), interval = get_posterior_interval(n))
xs <- seq(0, 350, 1)
distr <- results %$% posterior %$% dnorm(xs, mu, sigma)
plot(xs, distr, type = 'l')
abline(v=results$interval[[1]])
abline(v=results$interval[[2]])
results
# problem 9
alpha <- 1
beta <- 2/3
xs <- seq(0, 1, 0.0001)
dens <- dbeta(xs, alpha, beta)
points <- rbeta(1000, alpha, beta)
mean(points)
plot(xs, dens, type = 'l')
posterior <- list(a = 651, b = 350.67)
dens <- posterior %$% dbeta(xs, a, b)
rvec <- posterior %$% rbeta(1000, a, b)
mean(rvec)
sd(rvec)
plot(xs, dens, type='l')
# problem 10
get_posterior_mean <- function(xs, posterior)
sapply(xs, function(n) n * posterior(n)) %>% sum()
prior <- function(N) {
(1/100) * (99/100)^(N - 1)
}
xs <- 1:500
dens <- sapply(xs, prior)
plot(xs, dens, type='l')
number_seen <- 5
p_data_given_n <- function(n) if (n < number_seen) 0 else 1 / n
prior_on_n <- function(n) (1/100) * (99/100)^(n-1)
posterior <- function(n) p_data_given_n(n) * prior_on_n(n)
max_n <- 1000
xs <- 0:max_n
dens <- sapply(xs, posterior)
c <- 1 / sum(dens)
x_range <- number_seen:max_n
posterior_mean <- sapply(x_range, function(n) c * n * posterior(n)) %>% sum()
posterior_mean
posterior_variance <- x_range %>%
sapply(function(n) ((n - posterior_mean)^2) * (c/n) * (1/100) * (99/100)^n) %>%
sum()
posterior_sd <- sqrt(posterior_variance)
posterior_sd
plot(xs, dens, type='l')
abline(v=posterior_mean, col='blue')
# 10c
get_posterior_mean <- function(xs, posterior) {
sapply(xs, function(n) n * posterior(n)) %>% sum()
}
number_seen <- 203
prior_max <- 1000
xs <- 1:prior_max
# uniform prior
prior <- Vectorize(function(n) if (n <= prior_max) 1/prior_max else 0)
# geometric prior
prior <- Vectorize(function(n) (1/100) * (99/100)^(n - 1))
## These two are broken..
# gaussian prior
# prior <- Vectorize(
# function(n) {
# dens <- dnorm(n, 500, 300)
# dens[dens < 0] <- 0
# function(n) dens[n]
# }())
# # exponential prior
# prior <- Vectorize(function(n) {
# dens <- dexp(xs, 0.2)
# function(n) dens[n]
# })
# plot(xs, prior(xs), type='l')
p_data_given_n <- Vectorize(function(n)
if (n >= number_seen) 1/n
else 0
)
unnormalized_posterior <- Vectorize(function(n)
prior(n) * p_data_given_n(n)
)
c <- sum(unnormalized_posterior(xs))
posterior_fn <- Vectorize(function(n) unnormalized_posterior(n) / c)
posterior_vec <- posterior_fn(xs)
stopifnot(sum(posterior_vec) == 1)
posterior_mean <- get_posterior_mean(xs, posterior_fn)
plot_settings <- list(fn = plot, color = 'black')
plot_settings <- list(fn = lines, color = 'red')
plot_settings %$% fn(xs, posterior_vec, col=color, type='l')
plot_settings %$% abline(v=posterior_mean, col=color)
posterior_mean
plot_settings %$% lines(xs, prior(xs), type='l', col=color)
abline(v=get_posterior_mean(xs, prior))
# OK so woooo the answer to the taxicab problem definitely depends
# a lot on the prior! Which makes sense cuz one datum but still!
# We could make a fun site where you can play with this problem <hug emoji>
# with different priors, and drawing taxis seen, and, and...
# or a blog post! We can use the evolving-distribution gif style
# from the CLT post. One main insight is that you're moving probability
# from < 203 to the LHS of 203+ with the update.
# Does seeing 503, then 203, differ from seeing 503 alone? Probably no?
#####
# 11
y <- c(43, 44, 45, 46.5, 47.5)
prior <- Vectorize(
function(theta) if (theta >= 0 && theta <= 100) 1/theta else 0
)
dens <- function (y, thetas) {
sapply(thetas, function(th) prod(dcauchy(y, th, 1)))
}
dtheta <- .01
theta <- seq(0, 100, dtheta)
unnormalized_posterior <- dens(y, theta)
normalized_posterior <- unnormalized_posterior / sum(unnormalized_posterior)
plot(theta, normalized_posterior, type='l',
ylim=c(0, 1.1 * max(normalized_posterior)))
sum(theta * normalized_posterior)
?dcauchy
# 11b
posterior_draws <- sample(theta, 1000, replace=TRUE, prob=normalized_posterior)
hist(posterior_draws, breaks=50)
# 11c
# So to get the posterior predictive distribution for a parameter theta...
# we take many draws of theta from the posterior... and generate a random
# number from the "likelihood"... is what the website said... so does
# that mean the cauchy is the likelihood here? Hmh.
# WRITE THIS DOWN THOOO :) here's getting closer to knowing how to
# draw from predictive distributions.
posterior_predictive <- rcauchy(length(posterior_draws), posterior_draws, 1)
hist(posterior_predictive, breaks=100)
# 13a
compute_gamma_parameters <- function(mu, var) {
theta <- ((mu + var) / mu) - 1
k <- mu / theta
beta <- 1 / theta
alpha <- k
list(alpha = alpha, beta = beta)
}
data_str <- "1976 24 734 0.19
1977 25 516 0.12
1978 31 754 0.15
1979 31 877 0.16
1980 22 814 0.14
1981 21 362 0.06
1982 26 764 0.13
1983 20 809 0.13
1984 16 223 0.03
1985 22 1066 0.15"
dat <- read.table(text=data_str, sep = ' ',
col.names = c('year', 'accidents', 'deaths', 'death_rate'))
# P(H | data, X) =~ P(data|X) * P(X)
variable <- list(name='accidents', xs=0:200, prior_mean=30, prior_stddev=27.5)
variable <- list(name='deaths', xs=0:4000, prior_mean=1000, prior_stddev=500)
y <- dat[,variable$name]
gamma_params <- variable %$% compute_gamma_parameters(prior_mean, prior_stddev^2)
prior <- gamma_params %$% dgamma(variable$xs, shape=alpha, scale=1/beta)
plot(variable$xs, prior, type = 'l')
# By conjugacy - a gamma prior with a poisson likelihood
# gives a gamma posterior with parameters given by these sums.
posterior_alpha <- gamma_params %$% {alpha + sum(y)}
posterior_beta <- gamma_params %$% beta + length(y)
posterior <- dgamma(variable$xs, shape=posterior_alpha, scale=1/posterior_beta)
posterior_draws <- sample(variable$xs, 100000, replace=TRUE, prob=posterior)
# here... we wanna find... the probability of each possible new data point,
# given the location parameter.
# maybe it's: draw a location from the posterior, then create a new
# distribution of the same form as the posterior but with a new location,
# then draw a data point from it? And do this many times?
#
# OK!
# The posterior here is the posterior _over the location parameter_.
# So I need the probability of the predicted point y` for a bunch of y`s
# generated from distributions with locations drawn from that posterior.
# So I did it right!
pospred_ndraws <- 100000
locations <- sample(variable$xs, pospred_ndraws, replace=TRUE, prob=posterior)
# The mean is gamma-distributed, but the data are poisson distributed.
# So when doing simulated data draws, use the poisson - because we're drawing
# from the data distribution!
posterior_predictive <- sapply(
locations,
function(location) {
dens <- rpois(1, location)
dens
})
# for comparison
draws_from_posterior <- rgamma(
pospred_ndraws, shape=posterior_alpha, scale=1/posterior_beta)
hist(draws_from_posterior, breaks=80)
posterior[posterior == Inf] <- 0
posterior %<>% {. / sum(.)}
alpha = 0.4
ggplot() +
geom_histogram(aes(x=draws_from_posterior), bins=100, fill = 'red', alpha=alpha) +
geom_histogram(aes(x=posterior_predictive), bins=30, fill = 'blue', alpha=0.2) +
theme_bw()
# these two are same!
qgamma(c(0.025, 0.975), shape=posterior_alpha, scale=1/posterior_beta)
quantile(locations, probs = c(0.025, 0.975))
# and the wider predictive distribution:
quantile(posterior_predictive, probs = c(0.025, 0.975))
# Wow... so if you do have a posterior and want to predict from it,
# you need to do this extra thing! Don't accidentally talk about
# location parameters when you mean to talk about distributions!
sum(variable$xs * prior)
sum(variable$xs * posterior)
mean(y)
# 13b
# getting miles_flown algebraically:
# death_rate = (deaths / miles_flown) * 1e8
# miles_flown = (deaths / death_rate) * 1e8
dat %<>% dplyr::mutate(miles_flown = (deaths / death_rate) * 1e8)
variable <- list(name = "rate", xs = seq(0, 0.01, 0.0000001))
prior <- list(alpha = 0, beta = 0)
posterior_alpha <- (prior$alpha + sum(y))
posterior_beta <- (prior$beta + sum(dat$miles_flown))
miles_flown_to_predict <- 8e10
n_posterior_draws <- 10000
posterior_draws <- rgamma(n_posterior_draws,
shape=posterior_alpha, scale=1/posterior_beta)
# Unlike the next problem, here we are plugging in a single value for
# the explanatory variable - miles flown - and seeing what the prediction
# of deaths for that number of miles flown is. And this number is hiiiigh!
# So we get a distribution with a higher mean.
posterior_predictive <- rpois(1000, posterior_draws * miles_flown_to_predict)
hist(posterior_predictive)
alpha = 0.4
ggplot() +
geom_histogram(aes(x=posterior_draws * miles_flown_to_predict),
bins=100, fill = 'red', alpha=alpha) +
geom_histogram(aes(x=posterior_predictive), bins=30, fill = 'blue', alpha=0.2) +
theme_bw()
quantile(posterior_predictive, probs=c(0.025, 0.975))
# 20
# a
# Average over all the posteriors for y >= 100.
xs <- 0:2000
prior <- exp()
#guh
# 21
# Estimate the percentage of the (adult) population in each state (excluding
# Alaska, Hawaii, and the District of Columbia) who label themselves as ‘very liberal’.
remove_names <- c('alaska', 'hawaii', 'washington dc')
xs <- seq(0, 1, 0.0001)
survey_df <- foreign::read.dta('data/pew_research_center_june_elect_wknd_data.dta')
variable <- "very_liberal"
survey_df[,variable] <- survey_df['ideo'] == 'very liberal'
states_responses <- survey_df %>%
{base::split(., .[,"state"])}
states_responses %<>% {.[!(names(.) %in% remove_names)]}
yeses <- sapply(states_responses,
function(dat) sum(dat[,variable], na.rm = TRUE))
noes <- sapply(states_responses,
function(dat) sum(!dat[,variable], na.rm = TRUE))
respondents <- yeses + noes
data_df <- tibble::tibble(
state = names(states_responses),
respondents = respondents,
prop = yeses / (yeses + noes))
prior_alpha <- 3
prior_beta <- 20
prior <- dbeta(xs, prior_alpha, prior_beta)
plot(xs, prior, type='l')
posteriors <- Map(
function(nyes, nno) dbeta(xs, prior_alpha + nyes, prior_beta + nno) / length(xs),
yeses, noes)
intervals <- Map(
function(nyes, nno)
# bayestestR::hdi(posterior, ci=0.95),
qbeta(c(0.025, 0.975), prior_alpha + nyes, prior_beta + nno),
yeses, noes)
means <- sapply(posteriors, function(p) sum(xs * p))
lbs <- sapply(intervals, function(tuple) tuple[1])
ubs <- sapply(intervals, function(tuple) tuple[2])
intervals_df <- tibble::tibble(state = names(intervals),
lb = lbs, ub = ubs, mid = means)
ordering <- intervals_df %>% dplyr::arrange(mid) %$% state
intervals_df$state %<>% factor(levels = rev(ordering))
ggplot(intervals_df, aes(y = state)) +
geom_segment(aes(yend = state, x = lb, xend = ub)) +
geom_point(aes(x = mid)) +
theme_bw()
results_df <- read.csv('data/2008ElectionResult.csv')
respondents_df <- survey_df %>% dplyr::select(state, )
states_df <- tibble::tibble(state = tolower(state.name), state_code = state.abb)
results_df$state %<>% tolower()
together_df <- intervals_df %>%
dplyr::inner_join(results_df, by = 'state') %>%
dplyr::inner_join(states_df, by = 'state') %>%
dplyr::inner_join(data_df, by = 'state')
ggplot(together_df, aes(x = vote_Obama_pct)) +
geom_segment(aes(xend = vote_Obama_pct, y = lb, yend = ub),
color='blue', alpha = 0.10) +
geom_text(aes(y = mid, label = state_code), color = 'blue') +
geom_text(aes(y = prop, label = state_code), color = 'red') +
theme_bw() +
labs(y = "% very liberal") +
theme(panel.grid = element_blank())
|
library(MicrobiomeR)
library(testthat)
context("Testing of metacoder formatting functions")
test_that("which_format works", {
expect_equal(which_format(create_taxmap(phyloseq_silva_2)), "phyloseq_format")
expect_equal(which_format(raw_silva_2), "raw_format")
expect_equal(which_format(as_basic_format(raw_silva_2)), "basic_format")
expect_equal(which_format(as_analyzed_format(raw_silva_2)), "analyzed_format")
})
test_that("as_*_format works", {
expect_true(is_raw_format(as_raw_format(create_taxmap(phyloseq_silva_2))))
})
test_that("is_*_format works", {
expect_true(is_phyloseq_format(create_taxmap(phyloseq_silva_2)))
expect_false(is_phyloseq_format(raw_silva_2))
expect_true(is_raw_format(raw_silva_2))
expect_false(is_raw_format(create_taxmap(phyloseq_silva_2)))
expect_true(is_basic_format(as_basic_format(raw_silva_2)))
expect_false(is_basic_format(raw_silva_2))
expect_true(is_analyzed_format(as_analyzed_format(raw_silva_2)))
expect_false(is_analyzed_format(raw_silva_2))
})
| /tests/testthat/test-metacoder-formatting.R | permissive | sekingsley/MicrobiomeR | R | false | false | 1,017 | r | library(MicrobiomeR)
library(testthat)
context("Testing of metacoder formatting functions")
test_that("which_format works", {
expect_equal(which_format(create_taxmap(phyloseq_silva_2)), "phyloseq_format")
expect_equal(which_format(raw_silva_2), "raw_format")
expect_equal(which_format(as_basic_format(raw_silva_2)), "basic_format")
expect_equal(which_format(as_analyzed_format(raw_silva_2)), "analyzed_format")
})
test_that("as_*_format works", {
expect_true(is_raw_format(as_raw_format(create_taxmap(phyloseq_silva_2))))
})
test_that("is_*_format works", {
expect_true(is_phyloseq_format(create_taxmap(phyloseq_silva_2)))
expect_false(is_phyloseq_format(raw_silva_2))
expect_true(is_raw_format(raw_silva_2))
expect_false(is_raw_format(create_taxmap(phyloseq_silva_2)))
expect_true(is_basic_format(as_basic_format(raw_silva_2)))
expect_false(is_basic_format(raw_silva_2))
expect_true(is_analyzed_format(as_analyzed_format(raw_silva_2)))
expect_false(is_analyzed_format(raw_silva_2))
})
|
library(cepp)
data(Colon)
alondat <- Colon$X
alony <- Colon$Y
alonnames <- Colon$gene.names
alondatlognorm <- apply(alondat,2,log)
alondatlognorm <- apply(alondatlognorm,2,function(x) (x - mean(x))/sd(x))
zscores <- rep(NA,2000)
pvals <- rep(NA,2000)
for (i in 1:2000){
X.i <- alondatlognorm[,i]
my.summary <- summary(glm(y ~ X.i + 0,
data = data.frame(X.i,y = alony),
family = "binomial"))$coefficients
zscores[i] <- my.summary[3]
pvals[i] <- my.summary[4]
}
alonX <- alondatlognorm[,order(pvals)[1:500]]
alonnames.pared <- alonnames[order(pvals)[1:500]]
aloncass <- stan("cass_logistic.stan",
data = list(N = 62,
ncov = 500,
y = alony,
x = alonX,
sigma_indic = 10,
mu_indic = 0,
tau = 5),
chains = 4,
iter = 1000,
control = list(adapt_delta = 0.99))
## LOOCV
indices.pos <- which(alony == 1)
indices.neg <- which(alony == 0)
to.remove <- rep(NA,62)
for (i in 1:62){
if (alony[i] == 1){
to.remove[i] <- sample(indices.neg,1)
} else{
to.remove[i] <- sample(indices.pos,1)
}
}
loolist <- list()
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
for (i in 1:62){
newX <- alonX[-c(i,to.remove[i]),]
newy <- alony[-c(i,to.remove[i])]
loolist[[i]] <- stan("cass_logistic.stan",
data = list(N = 60,
ncov = 500,
y = newy,
x = newX,
sigma_indic = 10,
mu_indic = -2,
tau = 5),
chains = 1,
iter = 500,
control = list(adapt_delta = 0.99))
}
## posterior predictions
inv_logit <- function(x) 1/(1 + exp(-x))
mean.pred <- rep(NA,62)
pred <- rep(NA,250)
for (i in 1:62){
beta.matrix <- extract(loolist[[i]],pars = "beta")$beta
X.loo <- alonX[i,]
for (j in 1:250){
pred[j] <- rbinom(1,1,inv_logit(X.loo%*%beta.matrix[j,]))
}
mean.pred[i] <- mean(pred)
}
## loo cv for random forest
library(randomForest)
mean.pred.rf <- rep(NA,62)
for (i in 1:62){
newX <- alonX[-c(i,to.remove[i]),]
newy <- alony[-c(i,to.remove[i])]
mean.pred.rf[i] <- predict(randomForest(newX,as.factor(newy),ntree = 1000),newdata = alonX[i,],type = "prob")[2]
}
## loo cv for lasso
library(glmnet)
mean.pred.lasso <- rep(NA,62)
for (i in 1:62){
newX <- alonX[-c(i,to.remove[i]),]
newy <- alony[-c(i,to.remove[i])]
mean.pred.lasso[i] <- predict(cv.glmnet(newX,newy,family = "binomial"),newx = t(as.matrix(alonX[i,])),type = "response")
}
## loo cv for NN
library(MicrosoftML)
mean.pred.nn <- rep(NA,62)
nnform <- paste0("newy ~ ", paste0("X",1:500,collapse = "+"))
for (i in 1:62){
newX <- alonX[-c(i,to.remove[i]),]
newy <- alony[-c(i,to.remove[i])]
nnfit <- rxNeuralNet(formula = nnform,data = data.frame(newX,newy),numHiddenNodes = 200,numIterations = 10000,reportProgress = 0, verbose = 0)
mean.pred.nn[i] <- as.numeric(rxPredict(nnfit,data = data.frame(t(alonX[i,]),newy = alony[i])))[3]
}
## compute AUCS
library(pROC)
alon.auc.cass <- auc(roc(alony,mean.pred))
alon.auc.lasso <- auc(roc(alony,mean.pred.lasso))
alon.auc.rf <- auc(roc(alony,mean.pred.rf))
alon.auc.nn <- auc(roc(alony,mean.pred.nn))
| /alon_script.R | no_license | willthomson1/RS-Interface-code | R | false | false | 3,655 | r | library(cepp)
data(Colon)
alondat <- Colon$X
alony <- Colon$Y
alonnames <- Colon$gene.names
alondatlognorm <- apply(alondat,2,log)
alondatlognorm <- apply(alondatlognorm,2,function(x) (x - mean(x))/sd(x))
zscores <- rep(NA,2000)
pvals <- rep(NA,2000)
for (i in 1:2000){
X.i <- alondatlognorm[,i]
my.summary <- summary(glm(y ~ X.i + 0,
data = data.frame(X.i,y = alony),
family = "binomial"))$coefficients
zscores[i] <- my.summary[3]
pvals[i] <- my.summary[4]
}
alonX <- alondatlognorm[,order(pvals)[1:500]]
alonnames.pared <- alonnames[order(pvals)[1:500]]
aloncass <- stan("cass_logistic.stan",
data = list(N = 62,
ncov = 500,
y = alony,
x = alonX,
sigma_indic = 10,
mu_indic = 0,
tau = 5),
chains = 4,
iter = 1000,
control = list(adapt_delta = 0.99))
## LOOCV
indices.pos <- which(alony == 1)
indices.neg <- which(alony == 0)
to.remove <- rep(NA,62)
for (i in 1:62){
if (alony[i] == 1){
to.remove[i] <- sample(indices.neg,1)
} else{
to.remove[i] <- sample(indices.pos,1)
}
}
loolist <- list()
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
for (i in 1:62){
newX <- alonX[-c(i,to.remove[i]),]
newy <- alony[-c(i,to.remove[i])]
loolist[[i]] <- stan("cass_logistic.stan",
data = list(N = 60,
ncov = 500,
y = newy,
x = newX,
sigma_indic = 10,
mu_indic = -2,
tau = 5),
chains = 1,
iter = 500,
control = list(adapt_delta = 0.99))
}
## posterior predictions
inv_logit <- function(x) 1/(1 + exp(-x))
mean.pred <- rep(NA,62)
pred <- rep(NA,250)
for (i in 1:62){
beta.matrix <- extract(loolist[[i]],pars = "beta")$beta
X.loo <- alonX[i,]
for (j in 1:250){
pred[j] <- rbinom(1,1,inv_logit(X.loo%*%beta.matrix[j,]))
}
mean.pred[i] <- mean(pred)
}
## loo cv for random forest
library(randomForest)
mean.pred.rf <- rep(NA,62)
for (i in 1:62){
newX <- alonX[-c(i,to.remove[i]),]
newy <- alony[-c(i,to.remove[i])]
mean.pred.rf[i] <- predict(randomForest(newX,as.factor(newy),ntree = 1000),newdata = alonX[i,],type = "prob")[2]
}
## loo cv for lasso
library(glmnet)
mean.pred.lasso <- rep(NA,62)
for (i in 1:62){
newX <- alonX[-c(i,to.remove[i]),]
newy <- alony[-c(i,to.remove[i])]
mean.pred.lasso[i] <- predict(cv.glmnet(newX,newy,family = "binomial"),newx = t(as.matrix(alonX[i,])),type = "response")
}
## loo cv for NN
library(MicrosoftML)
mean.pred.nn <- rep(NA,62)
nnform <- paste0("newy ~ ", paste0("X",1:500,collapse = "+"))
for (i in 1:62){
newX <- alonX[-c(i,to.remove[i]),]
newy <- alony[-c(i,to.remove[i])]
nnfit <- rxNeuralNet(formula = nnform,data = data.frame(newX,newy),numHiddenNodes = 200,numIterations = 10000,reportProgress = 0, verbose = 0)
mean.pred.nn[i] <- as.numeric(rxPredict(nnfit,data = data.frame(t(alonX[i,]),newy = alony[i])))[3]
}
## compute AUCS
library(pROC)
alon.auc.cass <- auc(roc(alony,mean.pred))
alon.auc.lasso <- auc(roc(alony,mean.pred.lasso))
alon.auc.rf <- auc(roc(alony,mean.pred.rf))
alon.auc.nn <- auc(roc(alony,mean.pred.nn))
|
# funkcja transformuje rozkład pierwotny na normalny (bardzo zbliżony do niego, bo obcinamy ogony) wyrażony na skali 0-2 ze średnią 1.
f_transform <- function(x){
f_tmp <- ecdf(x)
return(list(fun_norm = function(y)
mice::squeeze(qnorm(f_tmp(y) - 1/(2*length(x)), 1, 1/3), c(0, 2)),
unif = f_tmp(x), normal = mice::squeeze(qnorm(f_tmp(x) - 1/(2*length(x)), 1, 1/3), c(0, 2))))
} | /R/ad-hocowe/f_transform.R | no_license | basiaczarnota/przydatne-w-pracy | R | false | false | 398 | r | # funkcja transformuje rozkład pierwotny na normalny (bardzo zbliżony do niego, bo obcinamy ogony) wyrażony na skali 0-2 ze średnią 1.
f_transform <- function(x){
f_tmp <- ecdf(x)
return(list(fun_norm = function(y)
mice::squeeze(qnorm(f_tmp(y) - 1/(2*length(x)), 1, 1/3), c(0, 2)),
unif = f_tmp(x), normal = mice::squeeze(qnorm(f_tmp(x) - 1/(2*length(x)), 1, 1/3), c(0, 2))))
} |
## Coursera RPROG-011 FEB 2015
## Programming assignment 2 - pskelton
## The follwing functions make a special matrix which can cache the inverse of itself for future use.
## This functions specifies the fucntions of the special matrix
makeCacheMatrix <- function(x = matrix()) {
# creates empty holder for chache inverse
minv <- NULL
# function to set the value of the matrix
# here we use the "<<-" for assignment in the global environment
set <- function(y) {
x <<- y
minv <<- NULL
}
# function to retrieve the matrix
get <- function() {
x
}
# function to set inverse
setinv <- function(inv) {
minv <<- inv
}
# fucntion to retrieve the inverse
getinv <- function() {
minv
}
# return a list of the functions
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## This function calculates and caches the inverse of our sepcial matrix
## If the inverse has already been calculated it will return the cached value
cacheSolve <- function(x, ...) {
# try to retreive the inverse
minv <- x$getinv()
# check if the inverse is already there and return it if so
if (!is.null(minv)) {
message("Getting cached inverse")
return(minv)
}
# inverse is null so needs to be calculated..
# get the matrix
mat <- x$get()
# solve the matrix - we are assuming the matrix is always invertable for this assignment
minv <- solve(mat, ...)
# set the inverse in the cache
x$setinv(minv)
# return the inverse
minv
}
| /cachematrix.R | no_license | pskelton/ProgrammingAssignment2 | R | false | false | 1,644 | r | ## Coursera RPROG-011 FEB 2015
## Programming assignment 2 - pskelton
## The follwing functions make a special matrix which can cache the inverse of itself for future use.
## This functions specifies the fucntions of the special matrix
makeCacheMatrix <- function(x = matrix()) {
# creates empty holder for chache inverse
minv <- NULL
# function to set the value of the matrix
# here we use the "<<-" for assignment in the global environment
set <- function(y) {
x <<- y
minv <<- NULL
}
# function to retrieve the matrix
get <- function() {
x
}
# function to set inverse
setinv <- function(inv) {
minv <<- inv
}
# fucntion to retrieve the inverse
getinv <- function() {
minv
}
# return a list of the functions
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## This function calculates and caches the inverse of our sepcial matrix
## If the inverse has already been calculated it will return the cached value
cacheSolve <- function(x, ...) {
# try to retreive the inverse
minv <- x$getinv()
# check if the inverse is already there and return it if so
if (!is.null(minv)) {
message("Getting cached inverse")
return(minv)
}
# inverse is null so needs to be calculated..
# get the matrix
mat <- x$get()
# solve the matrix - we are assuming the matrix is always invertable for this assignment
minv <- solve(mat, ...)
# set the inverse in the cache
x$setinv(minv)
# return the inverse
minv
}
|
#!/usr/bin/env Rscript
library(pROC, quietly = T)
library(data.table, quietly = T)
library(readr, quietly = T)
Args <- commandArgs(T)
target <- Args[1]
data_path1 <- Args[2]
data_path2 <- Args[3]
output_path <- Args[4]
do_logistic_ML <-
function(df_train,
df_test,
ProbeID) {
formula_string <- paste("judge ~ ", ProbeID, sep = "")
df_train <- na.omit(df_train)
df_test <- na.omit(df_test)
median_diff <-
abs(median(df_train[df_train$judge == T, ProbeID]) - median(df_train[df_train$judge ==
F, ProbeID]))
mean_diff <-
abs(mean(df_train[df_train$judge == T, ProbeID]) - mean(df_train[df_train$judge ==
F, ProbeID]))
res_logistic <-
glm(as.formula(formula_string), df_train, family = "binomial")
if (length(df_train$judge) == length(res_logistic$fitted.values)) {
rocauc <- format(round(
roc(
df_train$judge,
res_logistic$fitted.values,
levels = c(F, T),
direction = "<"
)$auc,
5
), nsmall = 5)
} else {
rocauc <- NA
}
testauc <- format(round(
roc(
df_test$judge,
predict(res_logistic, df_test, type = "link"),
levels = c(F, T),
direction = "<"
)$auc,
5
), nsmall = 5)
if (nrow(summary(res_logistic)$coefficients) > 1) {
return(as.data.frame(
cbind(
ProbeID,
median_diff,
mean_diff,
format(round(
summary(res_logistic)$coefficients[2, 1], 5
), nsmall = 5),
format(round(
summary(res_logistic)$coefficients[2, 4], 5
), nsmall = 5),
format(round(((
summary(res_logistic)$null.deviance / -2
) - (summary(res_logistic)$deviance / -2)) / (summary(res_logistic)$null.deviance / -2), 5
), nsmall = 5),
format(round(1 - pchisq(2 * (
(summary(res_logistic)$deviance / -2) - (summary(res_logistic)$null.deviance / -2)
), df = 1), 10), nsmall = 10),
rocauc,
testauc
),
stringAsFactor = F
))
} else{
return(as.data.frame(
cbind(
ProbeID,
median_diff,
mean_diff,
"NA",
"NA",
"NA",
"NA",
rocauc,
testauc
),
stringAsFactor = F
))
}
}
train_df <-
read.csv(data_path1,
header = T,
row.names = 1,
sep = "\t")
test_df <-
read.csv(data_path2,
header = T,
row.names = 1,
sep = "\t")
train_df$judge <- F
train_df[train_df$status == target,]$judge <- T
train_df$judge <- as.factor(train_df$judge)
test_df$judge <- F
test_df[test_df$status == target,]$judge <- T
test_df$judge <- as.factor(test_df$judge)
write_delim(
data.frame(t(
c(
"#marker",
"median_diff",
"mean_diff",
"coef",
"coef_p",
"R_2",
"reg_p",
"rocauc",
"testauc"
)
)),
output_path,
append = TRUE,
col_names = F,
delim = "\t"
)
for (ProbeID in colnames(train_df)[3:ncol(train_df) - 1]) {
output <-
do_logistic_ML(train_df[, which(names(train_df) %in% c(ProbeID, "judge"))],
test_df[, which(names(test_df) %in% c(ProbeID, "judge"))],
ProbeID)
write_delim(
output,
output_path,
append = TRUE,
col_names = F,
delim = "\t"
)
} | /230215/univariate_Twist.R | no_license | IvanWoo22/DayDayUp | R | false | false | 3,625 | r | #!/usr/bin/env Rscript
library(pROC, quietly = T)
library(data.table, quietly = T)
library(readr, quietly = T)
Args <- commandArgs(T)
target <- Args[1]
data_path1 <- Args[2]
data_path2 <- Args[3]
output_path <- Args[4]
do_logistic_ML <-
function(df_train,
df_test,
ProbeID) {
formula_string <- paste("judge ~ ", ProbeID, sep = "")
df_train <- na.omit(df_train)
df_test <- na.omit(df_test)
median_diff <-
abs(median(df_train[df_train$judge == T, ProbeID]) - median(df_train[df_train$judge ==
F, ProbeID]))
mean_diff <-
abs(mean(df_train[df_train$judge == T, ProbeID]) - mean(df_train[df_train$judge ==
F, ProbeID]))
res_logistic <-
glm(as.formula(formula_string), df_train, family = "binomial")
if (length(df_train$judge) == length(res_logistic$fitted.values)) {
rocauc <- format(round(
roc(
df_train$judge,
res_logistic$fitted.values,
levels = c(F, T),
direction = "<"
)$auc,
5
), nsmall = 5)
} else {
rocauc <- NA
}
testauc <- format(round(
roc(
df_test$judge,
predict(res_logistic, df_test, type = "link"),
levels = c(F, T),
direction = "<"
)$auc,
5
), nsmall = 5)
if (nrow(summary(res_logistic)$coefficients) > 1) {
return(as.data.frame(
cbind(
ProbeID,
median_diff,
mean_diff,
format(round(
summary(res_logistic)$coefficients[2, 1], 5
), nsmall = 5),
format(round(
summary(res_logistic)$coefficients[2, 4], 5
), nsmall = 5),
format(round(((
summary(res_logistic)$null.deviance / -2
) - (summary(res_logistic)$deviance / -2)) / (summary(res_logistic)$null.deviance / -2), 5
), nsmall = 5),
format(round(1 - pchisq(2 * (
(summary(res_logistic)$deviance / -2) - (summary(res_logistic)$null.deviance / -2)
), df = 1), 10), nsmall = 10),
rocauc,
testauc
),
stringAsFactor = F
))
} else{
return(as.data.frame(
cbind(
ProbeID,
median_diff,
mean_diff,
"NA",
"NA",
"NA",
"NA",
rocauc,
testauc
),
stringAsFactor = F
))
}
}
train_df <-
read.csv(data_path1,
header = T,
row.names = 1,
sep = "\t")
test_df <-
read.csv(data_path2,
header = T,
row.names = 1,
sep = "\t")
train_df$judge <- F
train_df[train_df$status == target,]$judge <- T
train_df$judge <- as.factor(train_df$judge)
test_df$judge <- F
test_df[test_df$status == target,]$judge <- T
test_df$judge <- as.factor(test_df$judge)
write_delim(
data.frame(t(
c(
"#marker",
"median_diff",
"mean_diff",
"coef",
"coef_p",
"R_2",
"reg_p",
"rocauc",
"testauc"
)
)),
output_path,
append = TRUE,
col_names = F,
delim = "\t"
)
for (ProbeID in colnames(train_df)[3:ncol(train_df) - 1]) {
output <-
do_logistic_ML(train_df[, which(names(train_df) %in% c(ProbeID, "judge"))],
test_df[, which(names(test_df) %in% c(ProbeID, "judge"))],
ProbeID)
write_delim(
output,
output_path,
append = TRUE,
col_names = F,
delim = "\t"
)
} |
devtools::install_github("rmcelreath/rethinking")
library(rethinking)
# note, rstan is necessary and needs to be set up appropriately see:
# https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started | /install_load_packages.R | permissive | L-Marley/Statistical_Rethinking | R | false | false | 203 | r | devtools::install_github("rmcelreath/rethinking")
library(rethinking)
# note, rstan is necessary and needs to be set up appropriately see:
# https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started |
start <- as.POSIXlt(Sys.time())
library(testthat)
library(EuPathDB)
context("001_metadata_fungidb
12\n")
meta <- download_eupath_metadata(overwrite=TRUE, webservice="fungidb")
all_metadata <- meta[["valid"]]
actual <- nrow(all_metadata)
expected <- 150
test_that("Do we get sufficient metadata?", {
expect_gt(nrow(all_metadata), expected)
})
wanted <- "pombe"
entry <- get_eupath_entry(species=wanted, metadata=meta)
test_that("Did we get expected metadata for Schizosaccharomyces pombe?", {
expect_equal(entry[["DataProvider"]], "FungiDB")
expect_equal(entry[["TaxonUnmodified"]], "Schizosaccharomyces pombe 972h-")
})
end <- as.POSIXlt(Sys.time())
elapsed <- round(x=as.numeric(end) - as.numeric(start))
message(paste0("\nFinished 002_metadata_fungidb in ", elapsed, " seconds."))
| /tests/testthat/test_002_metadata_fungidb.R | no_license | khughitt/EuPathDB | R | false | false | 797 | r | start <- as.POSIXlt(Sys.time())
library(testthat)
library(EuPathDB)
context("001_metadata_fungidb
12\n")
meta <- download_eupath_metadata(overwrite=TRUE, webservice="fungidb")
all_metadata <- meta[["valid"]]
actual <- nrow(all_metadata)
expected <- 150
test_that("Do we get sufficient metadata?", {
expect_gt(nrow(all_metadata), expected)
})
wanted <- "pombe"
entry <- get_eupath_entry(species=wanted, metadata=meta)
test_that("Did we get expected metadata for Schizosaccharomyces pombe?", {
expect_equal(entry[["DataProvider"]], "FungiDB")
expect_equal(entry[["TaxonUnmodified"]], "Schizosaccharomyces pombe 972h-")
})
end <- as.POSIXlt(Sys.time())
elapsed <- round(x=as.numeric(end) - as.numeric(start))
message(paste0("\nFinished 002_metadata_fungidb in ", elapsed, " seconds."))
|
###### Functions used to modify y-axes in Figure 4 (as asked in 1st review)----
# These functions have been written by Dewey Dunnington and details about what they do can be find here: https://fishandwhistle.net/post/2018/modifying-facet-scales-in-ggplot2/
scale_override <- function(which, scale) {
if(!is.numeric(which) || (length(which) != 1) || (which %% 1 != 0)) {
stop("which must be an integer of length 1")
}
if(is.null(scale$aesthetics) || !any(c("x", "y") %in% scale$aesthetics)) {
stop("scale must be an x or y position scale")
}
structure(list(which = which, scale = scale), class = "scale_override")
}
CustomFacetWrap <- ggproto(
"CustomFacetWrap", FacetWrap,
init_scales = function(self, layout, x_scale = NULL, y_scale = NULL, params) {
# make the initial x, y scales list
scales <- ggproto_parent(FacetWrap, self)$init_scales(layout, x_scale, y_scale, params)
if(is.null(params$scale_overrides)) return(scales)
max_scale_x <- length(scales$x)
max_scale_y <- length(scales$y)
# ... do some modification of the scales$x and scales$y here based on params$scale_overrides
for(scale_override in params$scale_overrides) {
which <- scale_override$which
scale <- scale_override$scale
if("x" %in% scale$aesthetics) {
if(!is.null(scales$x)) {
if(which < 0 || which > max_scale_x) stop("Invalid index of x scale: ", which)
scales$x[[which]] <- scale$clone()
}
} else if("y" %in% scale$aesthetics) {
if(!is.null(scales$y)) {
if(which < 0 || which > max_scale_y) stop("Invalid index of y scale: ", which)
scales$y[[which]] <- scale$clone()
}
} else {
stop("Invalid scale")
}
}
# return scales
scales
}
)
facet_wrap_custom <- function(..., scale_overrides = NULL) {
# take advantage of the sanitizing that happens in facet_wrap
facet_super <- facet_wrap(...)
# sanitize scale overrides
if(inherits(scale_overrides, "scale_override")) {
scale_overrides <- list(scale_overrides)
} else if(!is.list(scale_overrides) ||
!all(vapply(scale_overrides, inherits, "scale_override", FUN.VALUE = logical(1)))) {
stop("scale_overrides must be a scale_override object or a list of scale_override objects")
}
facet_super$params$scale_overrides <- scale_overrides
ggproto(NULL, CustomFacetWrap,
shrink = facet_super$shrink,
params = facet_super$params
)
}
| /modifying-facet-scales-in-ggplot2.R | no_license | Nmoiroux/diopKdrBiting | R | false | false | 2,392 | r | ###### Functions used to modify y-axes in Figure 4 (as asked in 1st review)----
# These functions have been written by Dewey Dunnington and details about what they do can be find here: https://fishandwhistle.net/post/2018/modifying-facet-scales-in-ggplot2/
scale_override <- function(which, scale) {
if(!is.numeric(which) || (length(which) != 1) || (which %% 1 != 0)) {
stop("which must be an integer of length 1")
}
if(is.null(scale$aesthetics) || !any(c("x", "y") %in% scale$aesthetics)) {
stop("scale must be an x or y position scale")
}
structure(list(which = which, scale = scale), class = "scale_override")
}
CustomFacetWrap <- ggproto(
"CustomFacetWrap", FacetWrap,
init_scales = function(self, layout, x_scale = NULL, y_scale = NULL, params) {
# make the initial x, y scales list
scales <- ggproto_parent(FacetWrap, self)$init_scales(layout, x_scale, y_scale, params)
if(is.null(params$scale_overrides)) return(scales)
max_scale_x <- length(scales$x)
max_scale_y <- length(scales$y)
# ... do some modification of the scales$x and scales$y here based on params$scale_overrides
for(scale_override in params$scale_overrides) {
which <- scale_override$which
scale <- scale_override$scale
if("x" %in% scale$aesthetics) {
if(!is.null(scales$x)) {
if(which < 0 || which > max_scale_x) stop("Invalid index of x scale: ", which)
scales$x[[which]] <- scale$clone()
}
} else if("y" %in% scale$aesthetics) {
if(!is.null(scales$y)) {
if(which < 0 || which > max_scale_y) stop("Invalid index of y scale: ", which)
scales$y[[which]] <- scale$clone()
}
} else {
stop("Invalid scale")
}
}
# return scales
scales
}
)
facet_wrap_custom <- function(..., scale_overrides = NULL) {
# take advantage of the sanitizing that happens in facet_wrap
facet_super <- facet_wrap(...)
# sanitize scale overrides
if(inherits(scale_overrides, "scale_override")) {
scale_overrides <- list(scale_overrides)
} else if(!is.list(scale_overrides) ||
!all(vapply(scale_overrides, inherits, "scale_override", FUN.VALUE = logical(1)))) {
stop("scale_overrides must be a scale_override object or a list of scale_override objects")
}
facet_super$params$scale_overrides <- scale_overrides
ggproto(NULL, CustomFacetWrap,
shrink = facet_super$shrink,
params = facet_super$params
)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.